diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 0000000000..55c746a753 --- /dev/null +++ b/.coveragerc @@ -0,0 +1,8 @@ +[run] +branch = True +source = octavia +omit = octavia/tests/* +concurrency = multiprocessing,thread + +[report] +ignore_errors = True diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000..c9781e5e0a --- /dev/null +++ b/.gitignore @@ -0,0 +1,42 @@ +AUTHORS +build/* +build-stamp +ChangeLog +cover/ +.coverage +.coverage.* +covhtml/ +dist/ +doc/build +doc/source/configuration/_static/octavia.policy.yaml.sample +doc/source/contributor/devref/erd.svg +doc/source/contributor/devref/flow_diagrams/ +doc/source/contributor/devref/flow_diagrams_v2/ +doc/source/contributor/modules +etc/octavia/*.sample +api-ref/build +.idea/* +.vscode/* +*.DS_Store +*.pyc +*.egg-info/ +pbr*.egg/ +run_tests.err.log +run_tests.log +setuptools*.egg/ +tempest.log +.testrepository +.stestr +.tox/ +.venv/ +*.mo +*.sw? +*~ +.eggs/ +.ropeproject/ +*.qcow2 +*.orig + +# Files created by releasenotes build +releasenotes/build +api-ref/build diff --git a/.gitreview b/.gitreview index efc8d60b03..75c6506ca4 100644 --- a/.gitreview +++ b/.gitreview @@ -1,4 +1,4 @@ [gerrit] -host=review.openstack.org +host=review.opendev.org port=29418 -project=stackforge/octavia.git +project=openstack/octavia.git diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000..6e39e71627 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,37 @@ +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: 3298ddab3c13dd77d6ce1fc0baf97691430d84b0 # v4.3.0 + hooks: + - id: trailing-whitespace + # Replaces or checks mixed line ending + - id: mixed-line-ending + args: ['--fix', 'lf'] + exclude: '.*\.(svg)$' + - id: end-of-file-fixer + # Forbid files which have a UTF-8 byte-order marker + - id: check-byte-order-marker + # Checks that non-binary executables have a proper shebang + - id: check-executables-have-shebangs + # Check for files that contain merge conflict strings. + - id: check-merge-conflict + - id: debug-statements + - id: check-yaml + files: .*\.(yaml|yml)$ + - id: check-added-large-files +- repo: local + hooks: + - id: flake8 + name: flake8 + additional_dependencies: + - hacking>=6.1.0,<6.2.0 + language: python + entry: flake8 + files: '^.*\.py$' + exclude: '^(doc|releasenotes|tools)/.*$' +- repo: https://github.com/asottile/pyupgrade + rev: v3.13.0 + hooks: + - id: pyupgrade + args: [--py38-plus] diff --git a/.pylintrc b/.pylintrc new file mode 100644 index 0000000000..fae8b7b34b --- /dev/null +++ b/.pylintrc @@ -0,0 +1,100 @@ +# The format of this file isn't really documented; just use --generate-rcfile +[MASTER] +# Add to the black list. It should be a base name, not a +# path. You may set this option multiple times. +ignore=.git,tests + +[MESSAGES CONTROL] +# NOTE: The options which do not need to be suppressed can be removed. +disable= +# "F" Fatal errors that prevent further processing +# "I" Informational noise + c-extension-no-member, + locally-disabled, +# "E" Error for important programming issues (likely bugs) + import-error, + not-callable, + no-member, +# "W" Warnings for stylistic problems or minor programming issues + abstract-method, + anomalous-backslash-in-string, + arguments-differ, + attribute-defined-outside-init, + broad-except, + fixme, + global-statement, + pointless-string-statement, + protected-access, + redefined-builtin, + redefined-outer-name, + signature-differs, + unidiomatic-typecheck, + unused-argument, + unused-variable, + useless-super-delegation, + # TODO(gthiemonge) Re-enable this checker and fix too general exceptions + broad-exception-raised, +# "C" Coding convention violations + invalid-name, + line-too-long, + missing-docstring, + consider-using-f-string, +# "R" Refactor recommendations + duplicate-code, + too-few-public-methods, + too-many-ancestors, + too-many-arguments, + too-many-branches, + too-many-instance-attributes, + too-many-lines, + too-many-locals, + too-many-public-methods, + too-many-return-statements, + too-many-statements, + multiple-statements, + duplicate-except, + keyword-arg-before-vararg, + useless-object-inheritance, + arguments-renamed, + consider-using-enumerate, + too-many-positional-arguments + +[BASIC] +# Variable names can be 1 to 31 characters long, with lowercase and underscores +variable-rgx=[a-z_][a-z0-9_]{0,30}$ + +# Argument names can be 2 to 31 characters long, with lowercase and underscores +argument-rgx=[a-z_][a-z0-9_]{1,30}$ + +# Method names should be at least 3 characters long +# and be lowercased with underscores +method-rgx=([a-z_][a-z0-9_]{2,}|setUp|tearDown)$ + +# Module names matching +module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ + +# Don't require docstrings on tests. +no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ + +[FORMAT] +# Maximum number of characters on a single line. +max-line-length=79 + +[VARIABLES] +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid to define new builtins when possible. +additional-builtins= + +[CLASSES] + +[IMPORTS] +# Deprecated modules which should not be used, separated by a comma +deprecated-modules= + +[TYPECHECK] +# List of module names for which member attributes should not be checked +ignored-modules=six.moves,_MovedItems + +[REPORTS] +# Tells whether to display a full report or only the messages +reports=no diff --git a/.stestr.conf b/.stestr.conf new file mode 100644 index 0000000000..75ac582e74 --- /dev/null +++ b/.stestr.conf @@ -0,0 +1,3 @@ +[DEFAULT] +test_path=${OS_TEST_PATH:-./octavia/tests/unit} +top_dir=./ diff --git a/CONSTITUTION.rst b/CONSTITUTION.rst new file mode 100644 index 0000000000..7a349481fb --- /dev/null +++ b/CONSTITUTION.rst @@ -0,0 +1,54 @@ +==================== +Octavia Constitution +==================== + +This document defines the guiding principles that project leadership will be +following in creating, improving and maintaining the Octavia project. + +Octavia is an OpenStack project +------------------------------- +This means we try to run things the same way other "canonized" OpenStack +projects operate from a procedural perspective. This is because we hope that +Octavia will eventually become a standard part of any OpenStack deployment. + +Octavia is as open as OpenStack +------------------------------- +Octavia tries to follow the same standards for openness that the OpenStack +project also strives to follow: https://wiki.openstack.org/wiki/Open +We are committed to open design, development, and community. + +Octavia is "free" +----------------- +We mean that both in the "beer" and in the "speech" sense. That is to say, the +reference implementation for Octavia should be made up only of open source +components that share the same kind of unencumbered licensing that OpenStack +uses. + +Note that this does not mean we are against having vendors develop products +which can replace some of the components within Octavia. (For example, the +Octavia VM images might be replaced by a vendor's proprietary VM image.) +Rather, it means that: + +* The reference implementation should always be open source and unencumbered. +* We are typically not interested in making design compromises in order to work + with a vendor's proprietary product. If a vendor wants to develop a component + for Octavia, then the vendor should bend to Octavia's needs, not the other + way around. + +Octavia is a load balancer for large operators +---------------------------------------------- +That's not to say that small operators can't use it. (In fact, we expect it to +work well for small deployments, too.) But what we mean here is that if in +creating, improving or maintaining Octavia we somehow make it unable to meet +the needs of a typical large operator (or that operator's users), then we have +failed. + +Octavia follows the best coding and design conventions +------------------------------------------------------ +For the most part, Octavia tries to follow the coding standards set forth for +the OpenStack project in general: https://docs.openstack.org/hacking/latest +More specific additional standards can be found in the HACKING.rst file in the +same directory as this constitution. + +Any exceptions should be well justified and documented. (Comments in or near +the breach in coding standards are usually sufficient documentation.) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst new file mode 100644 index 0000000000..1ae9a648de --- /dev/null +++ b/CONTRIBUTING.rst @@ -0,0 +1,19 @@ +The source repository for this project can be found at: + + https://opendev.org/openstack/octavia + +Pull requests submitted through GitHub are not monitored. + +To start contributing to OpenStack, follow the steps in the contribution guide +to set up and use Gerrit: + + https://docs.openstack.org/contributors/code-and-documentation/quick-start.html + +Bugs should be filed on Launchpad: + + https://launchpad.net/octavia + +For more specific information about contributing to this repository, see the +Octavia contributor guide: + + https://docs.openstack.org/octavia/latest/contributor/contributing.html diff --git a/HACKING.rst b/HACKING.rst new file mode 100644 index 0000000000..f0bcfe93eb --- /dev/null +++ b/HACKING.rst @@ -0,0 +1,159 @@ +.. _octavia-style-commandments: + +Octavia Style Commandments +========================== +This project was ultimately spawned from work done on the Neutron project. +As such, we tend to follow Neutron conventions regarding coding style. + +- We follow the OpenStack Style Commandments: + https://docs.openstack.org/hacking/latest + +Octavia Specific Commandments +----------------------------- +- [O319] Validate that debug level logs are not translated. +- [O321] Validate that jsonutils module is used instead of json +- [O322] Don't use author tags +- [O323] Change assertEqual(True, A) or assertEqual(False, A) to the more + specific assertTrue(A) or assertFalse(A) +- [O324] Method's default argument shouldn't be mutable +- [O339] LOG.warn() is not allowed. Use LOG.warning() +- [O340] Don't use xrange() +- [O341] Don't translate logs. +- [0342] Exception messages should be translated +- [O343] Python 3: do not use basestring. +- [O344] Python 3: do not use dict.iteritems. +- [O345] Usage of Python eventlet module not allowed +- [O346] Don't use backslashes for line continuation. +- [O347] Taskflow revert methods must have \*\*kwargs. + +Creating Unit Tests +------------------- +For every new feature, unit tests should be created that both test and +(implicitly) document the usage of said feature. If submitting a patch for a +bug that had no unit test, a new passing unit test should be added. If a +submitted bug fix does have a unit test, be sure to add a new one that fails +without the patch and passes with the patch. + +Everything is python +-------------------- +Although OpenStack apparently allows either python or C++ code, at this time +we don't envision needing anything other than python (and standard, supported +open source modules) for anything we intend to do in Octavia. + +Idempotency +----------- +With as much as is going on inside Octavia, its likely that certain messages +and commands will be repeatedly processed. It's important that this doesn't +break the functionality of the load balancing service. Therefore, as much as +possible, algorithms and interfaces should be made as idempotent as possible. + +Centralize intelligence, de-centralize workload +----------------------------------------------- +This means that tasks which need to be done relatively infrequently but require +either additional knowledge about the state of other components in the Octavia +system, advanced logic behind decisions, or otherwise a high degree of +intelligence should be done by centralized components (ex. controllers) within +the Octavia system. Examples of this might include: + +* Generating haproxy configuration files +* Managing the lifecycle of Octavia amphorae +* Moving a loadbalancer instance from one Octavia amphora to another. + +On the other hand, tasks done extremely often, or which entail a significant +load on the system should be pushed as far out to the most horizontally +scalable components as possible. Examples of this might include: + +* Serving actual client requests to end-users (ie. running haproxy) +* Monitoring pool members for failure and sending notifications about this +* Processing log files + +There will often be a balance that needs to be struck between these two design +considerations for any given task for which an algorithm needs to be designed. +In considering how to strike this balance, always consider the conditions +that will be present in a large operator environment. + +Also, as a secondary benefit of centralizing intelligence, minor feature +additions and bugfixes can often be accomplished in a large operator +environment without having to touch every Octavia amphora running in said +environment. + +All APIs are versioned +---------------------- +This includes "internal" APIs between Octavia components. Experience coding in +the Neutron LBaaS project has taught us that in a large project with many +heterogeneous parts, throughout the lifecycle of this project, different parts +will evolve at different rates. It is important that these components are +allowed to do so without hindering or being hindered by parallel development +in other components. + +It is also likely that in very large deployments, there might be tens- or +hundreds-of-thousands of individual instances of a given component deployed +(most likely, the Octavia amphorae). It is unreasonable to expect a large +operator to update all of these components at once. Therefore it is likely that +for a significant amount of time during a roll-out of a new version, both the +old and new versions of a given component must be able to be controlled or +otherwise interfaced with by the new components. + +Both of the above considerations can be allowed for if we use versioning of +APIs where components interact with each other. + +Octavia must also keep in mind Neutron LBaaS API versions. Octavia must have +the ability to support multiple simultaneous Neutron LBaaS API versions in an +effort to allow for Neutron LBaaS API deprecation of URIs. The rationale is +that Neutron LBaaS API users should have the ability to transition from one +version to the next easily. + +Scalability and resilience are as important as functionality +------------------------------------------------------------ +Octavia is meant to be an *operator scale* load balancer. As such, it's usually +not enough just to get something working: It also needs to be scalable. For +most components, "scalable" implies horizontally scalable. + +In any large operational environment, resilience to failures is a necessity. +Practically speaking, this means that all components of the system that make up +Octavia should be monitored in one way or another, and that where possible +automatic recovery from the most common kinds of failures should become a +standard feature. Where automatic recovery is not an option, then some form +of notification about the failure should be implemented. + +Avoid premature optimization +---------------------------- +Understand that being "high performance" is often not the same thing as being +"scalable." First get the thing to work in an intelligent way. Only worry about +making it fast if speed becomes an issue. + +Don't repeat yourself +--------------------- +Octavia strives to follow DRY principles. There should be one source of truth, +and repetition of code should be avoided. + +Security is not an afterthought +------------------------------- +The load balancer is often both the most visible public interface to a given +user application, but load balancers themselves often have direct access to +sensitive components and data within the application environment. Security bugs +will happen, but in general we should not approve designs which have known +significant security problems, or which could be made more secure by better +design. + +Octavia should follow industry standards +---------------------------------------- +By "industry standards" we either mean RFCs or well-established best practices. +We are generally not interested in defining new standards if a prior open +standard already exists. We should also avoid doing things which directly +or indirectly contradict established standards. + +Use of pre-commit checks +------------------------ +`pre-commit`_ is a software tool that allows us to manage pre-commit checks as +part of the Git repository's configuration +and to run checks as Git pre-commit hooks (or other types of Git hooks) +automatically on developer machines. +It helps to catch and fix common issues before they get pushed to the server. +After the installation of the tool (e.g. on Fedora via +`sudo dnf install pre-commit`) simply `cd` to the Git repository and run +`pre-commit install` to let the tool install its Git pre-commit hook. +From now on these predefined checks will run on files that you change in new +Git commits. + +.. _pre-commit: https://pre-commit.com/ diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000..68c771a099 --- /dev/null +++ b/LICENSE @@ -0,0 +1,176 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + diff --git a/README.rst b/README.rst new file mode 100644 index 0000000000..5e9d4485f6 --- /dev/null +++ b/README.rst @@ -0,0 +1,58 @@ +======================== +Team and repository tags +======================== + +.. image:: https://governance.openstack.org/tc/badges/octavia.svg + :target: https://governance.openstack.org/tc/reference/tags/index.html + +.. Change things from this point on + +======= +Octavia +======= + +.. image:: https://img.shields.io/pypi/v/octavia.svg + :target: https://pypi.org/project/octavia/ + :alt: Latest Version + +Octavia is an operator-grade open source scalable load balancer for use in +large OpenStack deployments. + +Octavia provides the load balancing API for OpenStack. It supports multiple +"provider drivers" that implement load balancing, including the "amphora" +reference driver included with Octavia. + +Octavia is distributed under the terms of the Apache License, Version 2.0. +The full terms and conditions of this license are detailed in the LICENSE +file. + +Project resources +~~~~~~~~~~~~~~~~~ + +Developer documentation for the Octavia project is available at +https://docs.openstack.org/octavia/latest/ + +Release notes for the Octavia project are available at +https://docs.openstack.org/releasenotes/octavia/ + +The project source code repository is located at +https://opendev.org/openstack/octavia + +Project status, bugs, and requests for feature enhancements are tracked on +https://launchpad.net/octavia + +For more information on project direction and guiding principles for +contributors, please see the CONSTITUTION.rst file in this directory, or +specifications in the specs/ sub-directory. + +The project roadmap is available at +https://wiki.openstack.org/wiki/Octavia/Roadmap + +External Resources +~~~~~~~~~~~~~~~~~~ + +* Octavia Wiki: https://wiki.openstack.org/wiki/Octavia + +* For help on usage and hacking of Octavia, please send an email to + OpenStack-dev Mailing List + with **[Octavia]** tag. diff --git a/TESTING.rst b/TESTING.rst new file mode 100644 index 0000000000..c057490b34 --- /dev/null +++ b/TESTING.rst @@ -0,0 +1,119 @@ +==================== +Testing with Octavia +==================== + + +Unit Testing +------------ + +Octavia uses tox to manage the virtual environments for running test cases. + +Install python-tox: + +.. code-block:: bash + + $ pip install tox + +To run the full suite of tests maintained within Octavia. + +.. code-block:: bash + + $ tox + +.. NOTE:: + + The first time you run ``tox``, it will take additional time to build + virtualenvs. You can later use the ``-r`` option with ``tox`` to rebuild + your virtualenv in a similar manner. + + +To run tests for one or more specific test environments(for example, the most +common configuration of Python 3 and PEP-8), list the environments with the +``-e`` option, separated by spaces: + +.. code-block:: bash + + $ tox -e py3,pep8 + +See ``tox -l`` for the full list of available test environments. + +Structure of the Unit Test Tree +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The structure of the unit test tree should match the structure of the +code tree, e.g. :: + + - target module: octavia.common.utils + + - test module: octavia.tests.unit.common.test_utils + +Unit test modules should have the same path under octavia/tests/unit/ +as the module they target has under octavia/, and their name should be +the name of the target module prefixed by `test_`. This requirement +is intended to make it easier for developers to find the unit tests +for a given module. + +Similarly, when a test module targets a package, that module's name +should be the name of the package prefixed by `test_` with the same +path as when a test targets a module, e.g. :: + + - target package: octavia.hacking + + - test module: octavia.tests.unit.test_hacking + +The following command can be used to validate whether the unit test +tree is structured according to the above requirements: :: + + ./tools/check_unit_test_structure.sh + +Where appropriate, exceptions can be added to the above script. If +code is not part of the Octavia namespace, for example, it's probably +reasonable to exclude their unit tests from the check. + +Functional Testing +------------------ + +Octavia creates a simulated API and handler for its functional tests. +The tests then run requests against the mocked up API. + +To run the entire suite of functional tests: + +.. code-block:: bash + + $ tox -e functional + +To run a specific functional test: + +.. code-block:: bash + + $ tox -e functional octavia.tests.functional.api.v2.test_load_balancer + +Tests can also be run using partial matching, to run all API tests for v2: + +.. code-block:: bash + + $ tox -e functional api.v2 + +Additional options can be used while running tests. Two useful options that can +be used when running tests are ``-- --until-failure`` which will run the tests +in a loop until the first failure is hit, and ``-- --failing`` which if used +after an initial run will only run the tests that failed in the previous run. + +Scenario Testing +---------------- + +Octavia uses Tempest to cover the scenario tests for the project. +These tests are run against actual cloud deployments. + +To run the entire suite of scenario tests: + +.. code-block:: bash + + $ tox -e scenario + +.. NOTE:: + + The first time running the Tempest scenario tests export the + Tempest configuration directory + (i.e. TEMPEST_CONFIG_DIR=/opt/stack/tempest/etc) + diff --git a/api-ref/source/conf.py b/api-ref/source/conf.py new file mode 100644 index 0000000000..ac19486dcc --- /dev/null +++ b/api-ref/source/conf.py @@ -0,0 +1,206 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# nova documentation build configuration file, created by +# sphinx-quickstart on Sat May 1 15:17:47 2010. +# +# This file is execfile()d with the current directory set to +# its containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import os +import sys + +extensions = [ + 'os_api_ref', + 'openstackdocstheme' +] + + +html_theme = 'openstackdocs' +html_theme_options = { + "sidebar_dropdown": "api_ref", + "sidebar_mode": "toc" +} +openstackdocs_repo_name = 'openstack/octavia' +openstackdocs_bug_project = 'octavia' +openstackdocs_bug_tag = '' + + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath('../../')) +sys.path.insert(0, os.path.abspath('../')) +sys.path.insert(0, os.path.abspath('./')) + +# -- General configuration ---------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +# +# source_encoding = 'utf-8' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +copyright = '2017-present, OpenStack Foundation' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# The reST default role (used for this markup: `text`) to use +# for all documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +add_module_names = False + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'native' + +# -- Options for man page output ---------------------------------------------- + +# Grouping the document tree for man pages. +# List of tuples 'sourcefile', 'target', 'title', 'Authors name', 'manual' + + +# -- Options for HTML output -------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. Major themes that come with +# Sphinx are currently 'default' and 'sphinxdoc'. +# html_theme_path = ["."] +# html_theme = '_theme' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +# html_static_path = ['_static'] + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_use_modindex = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = '' + +# Output file base name for HTML help builder. +htmlhelp_basename = 'octaviadoc' + + +# -- Options for LaTeX output ------------------------------------------------- + +# The paper size ('letter' or 'a4'). +# latex_paper_size = 'letter' + +# The font size ('10pt', '11pt' or '12pt'). +# latex_font_size = '10pt' + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass +# [howto/manual]). +latex_documents = [ + ('index', 'Octavia.tex', 'OpenStack Octavia API Documentation', + 'OpenStack Foundation', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# Additional stuff for the LaTeX preamble. +# latex_preamble = '' + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_use_modindex = True diff --git a/api-ref/source/examples/versions-get-resp.json b/api-ref/source/examples/versions-get-resp.json new file mode 100644 index 0000000000..1c67d67cf6 --- /dev/null +++ b/api-ref/source/examples/versions-get-resp.json @@ -0,0 +1,19 @@ +{ + "versions": [{ + "status": "SUPPORTED", + "updated": "2016-12-11T00:00:00Z", + "id": "v2.0", + "links": [{ + "href": "http://10.21.21.53/load-balancer/v2", + "rel": "self" + }] + }, { + "status": "CURRENT", + "updated": "2018-04-20T00:00:00Z", + "id": "v2.1", + "links": [{ + "href": "http://10.21.21.53/load-balancer/v2", + "rel": "self" + }] + }] +} diff --git a/api-ref/source/http-status.yaml b/api-ref/source/http-status.yaml new file mode 100644 index 0000000000..f9ae08c372 --- /dev/null +++ b/api-ref/source/http-status.yaml @@ -0,0 +1,55 @@ +200: + default: | + Request was successful. +201: + default: | + Request has been fulfilled and new resource created. +202: + default: | + Request is accepted, but processing may take some time. +203: + default: | + Returned information is not full set, but a subset. +204: + default: | + Request fulfilled but service does not return anything. +300: + default: | + The resource corresponds to more than one representation. +400: + default: | + Some content in the request was invalid. +401: + default: | + Access is denied due to invalid credentials. +403: + default: | + Policy does not allow current user to do this operation. +404: + default: | + The requested resource could not be found. +405: + default: | + Method is not valid for this endpoint and resource. +409: + default: | + This resource has an action in progress that would conflict with this + request. +413: + default: | + This operation cannot be completed. +415: + default: | + The entity of the request is in a format not supported by the requested + resource for the method. +500: + default: | + Something went wrong with the service which prevents it from fulfilling + the request. +501: + default: | + The service does not have the functionality required to fulfill this + request. +503: + default: | + The service cannot handle the request right now. diff --git a/api-ref/source/index.rst b/api-ref/source/index.rst new file mode 100644 index 0000000000..933de610c1 --- /dev/null +++ b/api-ref/source/index.rst @@ -0,0 +1,86 @@ +:tocdepth: 2 + +============= + Octavia API +============= + +This is a reference for the OpenStack Load Balancing API which is provided by +the Octavia project. + +**Current** API version + + :doc:`Octavia API v2` + +**Supported** API version + + None + +.. toctree:: + :hidden: + + v2/index + +Octavia API minor releases are additive to the API major revision and share +the same URL path. Minor revision changes to the API are called out in the API +reference in the section the change occurred in. Subsequent minor versions are a +superset of the previous versions of the same major revision. + +The API status reflects the state of the endpoint on the service. + +* *Current* indicates a stable version that is up-to-date, recent, and might + receive future versions. This endpoint should be prioritized over all + others. +* *Supported* is a stable version that is available on the server. However, it + is not likely the most recent available and might not be updated or might + be deprecated at some time in the future. +* *Deprecated* is a stable version that is still available but is being + deprecated and might be removed in the future. +* *Experimental* is not a stable version. This version is under development or + contains features that are otherwise subject to change. For more + information about API status values and version information, see + `Version Discovery `__. + +.. rest_expand_all:: + +------------- +API Discovery +------------- + +List All Versions +======================= + +.. rest_method:: GET / + +This fetches all the information about all known API versions in the +deployment. + +Response codes +-------------- + +.. rest_status_code:: success http-status.yaml + + - 200 + +.. rest_status_code:: error http-status.yaml + + - 500 + +Response +-------- + +.. rest_parameters:: parameters.yaml + + - id: api_version_id + - links: links + - status: api_version_status + - updated_at: updated_at + +Response Example +---------------- + +.. literalinclude:: examples/versions-get-resp.json + :language: javascript + +.. note:: + This is just an example output and does not represent the current API + versions available. diff --git a/api-ref/source/parameters.yaml b/api-ref/source/parameters.yaml new file mode 100644 index 0000000000..ee4de99809 --- /dev/null +++ b/api-ref/source/parameters.yaml @@ -0,0 +1,1887 @@ +############################################################################### +# Path fields +############################################################################### +path-amphora-id: + description: | + The ID of the amphora to query. + in: path + required: true + type: uuid +path-availability-zone-name: + description: | + The name of the availability zone to query. + in: path + required: true + type: string +path-availability-zone-profile-id: + description: | + The ID of the availability zone profile to query. + in: path + required: true + type: uuid +path-flavor-id: + description: | + The ID of the flavor to query. + in: path + required: true + type: uuid +path-flavorprofile-id: + description: | + The ID of the flavor profile to query. + in: path + required: true + type: uuid +path-healthmonitor-id: + description: | + The ID of the health monitor to query. + in: path + required: true + type: uuid +path-l7policy-id: + description: | + The ID of the L7 policy to query. + in: path + required: true + type: uuid +path-l7rule-id: + description: | + The ID of the L7 rule to query. + in: path + required: true + type: uuid +path-listener-id: + description: | + The ID of the listener to query. + in: path + required: true + type: uuid +path-loadbalancer-id: + description: | + The ID of the load balancer to query. + in: path + required: true + type: uuid +path-member-id: + description: | + The ID of the member to query. + in: path + required: true + type: uuid +path-pool-id: + description: | + The ID of the pool to query. + in: path + required: true + type: uuid +path-project-id: + description: | + The ID of the project to query. + in: path + required: true + type: string +path-provider: + description: | + The provider to query. + in: path + required: true + type: string +############################################################################### +# Query fields +############################################################################### +additive-only: + description: | + If ``true`` no members will be deleted during the batch operation. + in: query + min_version: 2.11 + required: false + type: boolean +cascade-delete: + description: | + If ``true`` will delete all child objects of the load balancer. + in: query + required: false + type: boolean +fields: + description: | + The fields that you want the server to return. + If no ``fields`` query parameter is specified, + the octavia API returns all attributes allowed by the policy settings. + By using the ``fields`` parameter, the API returns only the requested set + of attributes. The ``fields`` parameter can be specified multiple times. + For example, if you specify ``fields=id&fields=name`` in the request URL, + only the ``id`` and ``name`` attributes will be returned. + in: query + required: false + type: string +project_id_query: + description: | + The ID of the project to query. + in: query + required: false + type: string + +############################################################################### +# Body fields +############################################################################### +action: + description: | + The action associated with the resource. + in: body + required: true + type: string +active_connections: + description: | + The currently active connections. + in: body + required: true + type: integer +additional_vips: + description: | + A list of JSON objects defining "additional VIPs". The format for these + is ``{"subnet_id": , "ip_address": }``, where + the ``subnet_id`` field is mandatory and the ``ip_address`` field is + optional. Additional VIP subnets must all belong to the same network as + the primary VIP. + in: body + required: false + type: array + min_version: 2.26 +address: + description: | + The IP address of the resource. + in: body + required: true + type: string +address-member: + description: | + The IP address of the backend member server. + in: body + required: true + type: string +admin_state_up: + description: | + The administrative state of the resource, which is + up (``true``) or down (``false``). + in: body + required: true + type: boolean +admin_state_up-default-optional: + description: | + The administrative state of the resource, which is + up (``true``) or down (``false``). Default is ``true``. + in: body + required: false + type: boolean +admin_state_up-optional: + description: | + The administrative state of the resource, which is + up (``true``) or down (``false``). + in: body + required: false + type: boolean +allowed_cidrs: + description: | + A list of IPv4, IPv6 or mix of both CIDRs. + in: body + min_version: 2.12 + required: true + type: array +allowed_cidrs-optional: + description: | + A list of IPv4, IPv6 or mix of both CIDRs. The default is all allowed. + When a list of CIDRs is provided, the default switches to deny all. + in: body + min_version: 2.12 + required: false + type: array +alpn_protocols-listener: + description: | + A list of ALPN protocols. + Available protocols: http/1.0, http/1.1, h2 + in: body + min_version: 2.20 + required: true + type: array +alpn_protocols-listener-optional: + description: | + A list of ALPN protocols. + Available protocols: http/1.0, http/1.1, h2 + in: body + min_version: 2.20 + required: false + type: array +alpn_protocols-pool: + description: | + A list of ALPN protocols. + Available protocols: http/1.0, http/1.1, h2 + in: body + min_version: 2.24 + required: true + type: array +alpn_protocols-pool-optional: + description: | + A list of ALPN protocols. + Available protocols: http/1.0, http/1.1, h2 + in: body + min_version: 2.24 + required: false + type: array +amphora-id: + description: | + The associated amphora ID. + in: body + required: true + type: uuid +amphora-role: + description: | + The role of the amphora. One of ``STANDALONE``, ``MASTER``, ``BACKUP``. + in: body + required: true + type: string +amphora-stats: + description: | + A list of amphora statistics objects, one per listener. + in: body + min_version: 2.3 + required: true + type: array +amphora-status: + description: | + The status of the amphora. One of: ``BOOTING``, ``ALLOCATED``, ``READY``, + ``PENDING_CREATE``, ``PENDING_DELETE``, ``DELETED``, ``ERROR``. + in: body + required: true + type: string +api_links: + description: | + Links to the resources in question. + in: body + required: true + type: array +api_version_id: + description: | + A common name for the version. + in: body + required: true + type: string +api_version_status: + description: | + The status of this API version. This can be one of: + + - ``CURRENT``: This is the preferred version of the API to use. + - ``SUPPORTED``: This is an older, but still supported version of the API. + - ``DEPRECATED``: A deprecated version of the API that is slated for + removal. + in: body + required: true + type: string +availability-zone-capabilities: + description: | + The provider availability zone capabilities dictonary object. + in: body + required: true + type: object +availability-zone-capability-description: + description: | + The provider availability zone capability description. + in: body + required: true + type: string +availability-zone-capability-name: + description: | + The provider availability zone capability name. + in: body + required: true + type: string +availability-zone-data: + description: | + The JSON string containing the availability zone metadata. + in: body + required: true + type: string +availability-zone-data-optional: + description: | + The JSON string containing the availability zone metadata. + in: body + required: false + type: string +availability-zone-name: + description: | + An availability zone name. + in: body + required: true + type: object +availability-zone-name-optional: + description: | + An availability zone name. + in: body + required: false + type: object +availability-zone-profile: + description: | + An ``availability zone profile`` object. + in: body + required: true + type: object +availability-zone-profile-id: + description: | + The ID of the availability zone profile. + in: body + required: true + type: uuid +availability-zone-profiles: + description: | + A list of ``availability zone profile`` objects. + in: body + required: true + type: array +availability-zones: + description: | + A list of ``availability zone`` objects. + in: body + required: true + type: array +availability_zone: + description: | + An availability zone object. + in: body + required: true + type: object +backup: + description: | + Is the member a backup? Backup members only receive traffic when all + non-backup members are down. + in: body + min_version: 2.1 + required: true + type: boolean +backup-optional: + description: | + Is the member a backup? Backup members only receive traffic when all + non-backup members are down. + in: body + min_version: 2.1 + required: false + type: boolean +bytes_in: + description: | + The total bytes received. + in: body + required: true + type: integer +bytes_out: + description: | + The total bytes sent. + in: body + required: true + type: integer +ca_tls_container_ref: + description: | + The reference of the `key manager service + `__ secret containing a + PEM format CA certificate bundle for ``tls_enabled`` pools. + in: body + min_version: 2.8 + required: true + type: string +ca_tls_container_ref-optional: + description: | + The reference of the `key manager service + `__ secret containing a + PEM format CA certificate bundle for ``tls_enabled`` pools. + in: body + min_version: 2.8 + required: false + type: string +cached-zone: + description: | + The availability zone of a compute instance, cached at create time. This + is not guaranteed to be current. May be an empty-string if the compute + service does not use zones. + in: body + required: true + type: string +cert-busy: + description: | + Whether the certificate is in the process of being replaced. + in: body + required: true + type: string +cert-expiration: + description: | + The date the certificate for the amphora expires. + in: body + required: true + type: string +client_authentication: + description: | + The TLS client authentication mode. One of the options ``NONE``, + ``OPTIONAL`` or ``MANDATORY``. + in: body + min_version: 2.8 + required: true + type: string +client_authentication-optional: + description: | + The TLS client authentication mode. One of the options ``NONE``, + ``OPTIONAL`` or ``MANDATORY``. + in: body + min_version: 2.8 + required: false + type: string +client_ca_tls_container_ref: + description: | + The ref of the `key manager service + `__ secret containing a + PEM format client CA certificate bundle for ``TERMINATED_HTTPS`` + listeners. + in: body + min_version: 2.8 + required: true + type: string +client_ca_tls_container_ref-optional: + description: | + The ref of the `key manager service + `__ secret containing a + PEM format client CA certificate bundle for ``TERMINATED_HTTPS`` + listeners. + in: body + min_version: 2.8 + required: false + type: string +client_crl_container_ref: + description: | + The URI of the `key manager service + `__ secret containing a + PEM format CA revocation list file for ``TERMINATED_HTTPS`` listeners. + in: body + min_version: 2.8 + required: true + type: string +client_crl_container_ref-optional: + description: | + The URI of the `key manager service + `__ secret containing a + PEM format CA revocation list file for ``TERMINATED_HTTPS`` listeners. + in: body + min_version: 2.8 + required: false + type: string +compute-flavor: + description: | + The ID of the compute flavor used for the amphora. + in: body + min_version: 2.3 + required: true + type: string +compute-id: + description: | + The ID of the amphora resource in the compute system. + in: body + required: true + type: uuid +connection_limit: + description: | + The maximum number of connections permitted for this listener. Default + value is -1 which represents infinite connections or a default value + defined by the provider driver. + in: body + required: true + type: integer +connection_limit-optional: + description: | + The maximum number of connections permitted for this listener. Default + value is -1 which represents infinite connections or a default value + defined by the provider driver. + in: body + required: false + type: integer +created_at: + description: | + The UTC date and timestamp when the resource was created. + in: body + required: true + type: string +crl_container_ref: + description: | + The reference of the `key manager service + `__ secret containing a + PEM format CA revocation list file for ``tls_enabled`` pools. + in: body + required: true + type: string +crl_container_ref-optional: + description: | + The reference of the `key manager service + `__ secret containing a + PEM format CA revocation list file for ``tls_enabled`` pools. + in: body + required: false + type: string +default_pool_id: + description: | + The ID of the pool used by the listener if no L7 policies match. The pool + has some restrictions. See :ref:`valid_protocol`. + in: body + required: true + type: uuid +default_pool_id-optional: + description: | + The ID of the pool used by the listener if no L7 policies match. The pool + has some restrictions. See :ref:`valid_protocol`. + in: body + required: false + type: uuid +default_tls_container_ref: + description: | + The URI of the `key manager service + `__ secret containing a + PKCS12 format certificate/key bundle for ``TERMINATED_HTTPS`` listeners. + DEPRECATED: A secret container of type "certificate" containing the + certificate and key for ``TERMINATED_HTTPS`` listeners. + in: body + required: true + type: string +default_tls_container_ref-optional: + description: | + The URI of the `key manager service + `__ secret containing a + PKCS12 format certificate/key bundle for ``TERMINATED_HTTPS`` listeners. + DEPRECATED: A secret container of type "certificate" containing the + certificate and key for ``TERMINATED_HTTPS`` listeners. + in: body + required: false + type: string +description: + description: | + A human-readable description for the resource. + in: body + required: true + type: string +description-optional: + description: | + A human-readable description for the resource. + in: body + required: false + type: string +enabled: + description: | + If the resource is available for use. + in: body + required: true + type: boolean +enabled-optional: + description: | + If the resource is available for use. The default is True. + in: body + required: false + type: boolean +flavor: + description: | + A flavor object. + in: body + required: true + type: object +flavor-capabilities: + description: | + The provider flavor capabilities dictonary object. + in: body + required: true + type: object +flavor-capability-description: + description: | + The provider flavor capability description. + in: body + required: true + type: string +flavor-capability-name: + description: | + The provider flavor capability name. + in: body + required: true + type: string +flavor-data: + description: | + The JSON string containing the flavor metadata. + in: body + required: true + type: string +flavor-data-optional: + description: | + The JSON string containing the flavor metadata. + in: body + required: false + type: string +flavor-id: + description: | + The ID of the flavor. + in: body + required: true + type: uuid +flavor-id-optional: + description: | + The ID of the flavor. + in: body + required: false + type: uuid +flavor-profile-id: + description: | + The ID of the flavor profile. + in: body + required: true + type: uuid +flavorprofile: + description: | + A ``flavorprofile`` object. + in: body + required: true + type: object +flavorprofiles: + description: | + A list of ``flavorprofile`` objects. + in: body + required: true + type: array +flavors: + description: | + A list of ``flavor`` objects. + in: body + required: true + type: array +healthmonitor-delay: + description: | + The time, in seconds, between sending probes to members. + in: body + required: true + type: integer +healthmonitor-delay-optional: + description: | + The time, in seconds, between sending probes to members. + in: body + required: false + type: integer +healthmonitor-domain_name: + description: | + The domain name, which be injected into the HTTP Host Header to the backend + server for HTTP health check. + in: body + min_version: 2.10 + required: true + type: string +healthmonitor-domain_name-optional: + description: | + The domain name, which be injected into the HTTP Host Header to the backend + server for HTTP health check. + in: body + min_version: 2.10 + required: false + type: string +healthmonitor-expected_codes: + description: | + The list of HTTP status codes expected in response from the member to + declare it healthy. Specify one of the following values: + + - A single value, such as ``200`` + - A list, such as ``200, 202`` + - A range, such as ``200-204`` + in: body + required: true + type: string +healthmonitor-expected_codes-optional: + description: | + The list of HTTP status codes expected in response from the member to + declare it healthy. Specify one of the following values: + + - A single value, such as ``200`` + - A list, such as ``200, 202`` + - A range, such as ``200-204`` + + The default is 200. + in: body + required: false + type: string +healthmonitor-http_method: + description: | + The HTTP method that the health monitor uses for requests. One of + ``CONNECT``, ``DELETE``, ``GET``, ``HEAD``, ``OPTIONS``, ``PATCH``, + ``POST``, ``PUT``, or ``TRACE``. + in: body + required: true + type: string +healthmonitor-http_method-optional: + description: | + The HTTP method that the health monitor uses for requests. One of + ``CONNECT``, ``DELETE``, ``GET``, ``HEAD``, ``OPTIONS``, ``PATCH``, + ``POST``, ``PUT``, or ``TRACE``. The default is ``GET``. + in: body + required: false + type: string +healthmonitor-http_version: + description: | + The HTTP version. One of ``1.0`` or ``1.1``. The default is ``1.0``. + in: body + min_version: 2.10 + required: true + type: float +healthmonitor-http_version-optional: + description: | + The HTTP version. One of ``1.0`` or ``1.1``. The default is ``1.0``. + in: body + min_version: 2.10 + required: false + type: float +healthmonitor-id: + description: | + The associated health monitor ID. + in: body + required: true + type: uuid +healthmonitor-max-retries: + description: | + The number of successful checks before changing the ``operating status`` + of the member to ``ONLINE``. A valid value is from ``1`` to ``10``. + in: body + required: true + type: integer +healthmonitor-max-retries-down: + description: | + The number of allowed check failures before changing the ``operating + status`` of the member to ``ERROR``. A valid value is from ``1`` to ``10``. + in: body + required: true + type: integer +healthmonitor-max-retries-down-optional: + description: | + The number of allowed check failures before changing the ``operating + status`` of the member to ``ERROR``. A valid value is from ``1`` to ``10``. + The default is ``3``. + in: body + required: false + type: integer +healthmonitor-max-retries-optional: + description: | + The number of successful checks before changing the ``operating status`` + of the member to ``ONLINE``. A valid value is from ``1`` to ``10``. + in: body + required: false + type: integer +healthmonitor-status: + description: | + The associated health monitor status object. + in: body + required: true + type: object +healthmonitor-timeout: + description: | + The maximum time, in seconds, that a monitor waits to connect before it + times out. This value must be less than the delay value. + in: body + required: true + type: integer +healthmonitor-timeout-optional: + description: | + The maximum time, in seconds, that a monitor waits to connect before it + times out. This value must be less than the delay value. + in: body + required: false + type: integer +healthmonitor-type: + description: | + The type of health monitor. One of ``HTTP``, ``HTTPS``, ``PING``, + ``SCTP``, ``TCP``, ``TLS-HELLO``, or ``UDP-CONNECT``. + in: body + required: true + type: string +healthmonitor-url_path: + description: | + The HTTP URL path of the request sent by the monitor to test the health of + a backend member. Must be a string that begins with a forward slash + (``/``). + in: body + required: true + type: string +healthmonitor-url_path-optional: + description: | + The HTTP URL path of the request sent by the monitor to test the health of + a backend member. Must be a string that begins with a forward slash + (``/``). The default URL path is ``/``. + in: body + required: false + type: string +hsts_include_subdomains: + description: | + Defines whether the ``includeSubDomains`` directive should be + added to the Strict-Transport-Security HTTP response + header. + in: body + min_version: 2.27 + required: true + type: bool +hsts_include_subdomains-optional: + description: | + Defines whether the ``includeSubDomains`` directive should be + added to the Strict-Transport-Security HTTP response + header. This requires setting the ``hsts_max_age`` option as well in + order to become effective. + in: body + min_version: 2.27 + required: false + type: bool +hsts_max_age: + description: | + The value of the ``max_age`` directive for the + Strict-Transport-Security HTTP response header. + in: body + min_version: 2.27 + required: true + type: integer +hsts_max_age-optional: + description: | + The value of the ``max_age`` directive for the + Strict-Transport-Security HTTP response header. + Setting this enables HTTP Strict Transport + Security (HSTS) for the TLS-terminated listener. + in: body + min_version: 2.27 + required: false + type: integer +hsts_preload: + description: | + Defines whether the ``preload`` directive should be + added to the Strict-Transport-Security HTTP response + header. + in: body + min_version: 2.27 + required: true + type: bool +hsts_preload-optional: + description: | + Defines whether the ``preload`` directive should be + added to the Strict-Transport-Security HTTP response + header. This requires setting the ``hsts_max_age`` option as well in + order to become effective. + in: body + min_version: 2.27 + required: false + type: bool +id: + description: | + The ID of the resource. + in: body + required: true + type: uuid +image-id: + description: | + The ID of the glance image used for the amphora. + in: body + min_version: 2.1 + required: true + type: uuid +insert_headers: + description: | + A dictionary of optional headers to insert into the request before it is + sent to the backend ``member``. See :ref:`header_insertions`. Both keys + and values are always specified as strings. + in: body + required: true + type: object +insert_headers-optional: + description: | + A dictionary of optional headers to insert into the request before it is + sent to the backend ``member``. See :ref:`header_insertions`. Both keys + and values are always specified as strings. + in: body + required: false + type: object +l7policies-optional: + description: | + A list of L7 policy objects. + in: body + required: false + type: array +l7policies-status-object-list: + description: | + A list of L7 policy status objects. + in: body + required: true + type: array +l7policy-action: + description: | + The L7 policy action. One of ``REDIRECT_PREFIX``, ``REDIRECT_TO_POOL``, + ``REDIRECT_TO_URL``, or ``REJECT``. + in: body + required: true + type: string +l7policy-action-optional: + description: | + The L7 policy action. One of ``REDIRECT_PREFIX``, ``REDIRECT_TO_POOL``, + ``REDIRECT_TO_URL``, or ``REJECT``. + in: body + required: false + type: string +l7policy-id: + description: | + The ID of the L7 policy. + in: body + required: true + type: uuid +l7policy-ids: + description: | + A list of L7 policy IDs. + in: body + required: true + type: array +l7policy-position: + description: | + The position of this policy on the listener. Positions start at 1. + in: body + required: true + type: integer +l7policy-position-optional: + description: | + The position of this policy on the listener. Positions start at 1. + in: body + required: false + type: integer +l7policy-redirect-http-code: + description: | + Requests matching this policy will be redirected to the specified URL or + Prefix URL with the HTTP response code. Valid if ``action`` is + ``REDIRECT_TO_URL`` or ``REDIRECT_PREFIX``. Valid options are: 301, 302, + 303, 307, or 308. Default is 302. + in: body + min_version: 2.9 + required: true + type: integer +l7policy-redirect-http-code-optional: + description: | + Requests matching this policy will be redirected to the specified URL or + Prefix URL with the HTTP response code. Valid if ``action`` is + ``REDIRECT_TO_URL`` or ``REDIRECT_PREFIX``. Valid options are: 301, 302, + 303, 307, or 308. Default is 302. + in: body + min_version: 2.9 + required: false + type: integer +l7policy-redirect-pool_id: + description: | + Requests matching this policy will be redirected to the pool with this ID. + Only valid if ``action`` is ``REDIRECT_TO_POOL``. The pool has some + restrictions, See :ref:`valid_protocol`. + in: body + required: true + type: uuid +l7policy-redirect-pool_id-optional: + description: | + Requests matching this policy will be redirected to the pool with this ID. + Only valid if ``action`` is ``REDIRECT_TO_POOL``. The pool has some + restrictions, See :ref:`valid_protocol`. + in: body + required: false + type: uuid +l7policy-redirect-prefix: + description: | + Requests matching this policy will be redirected to this Prefix URL. + Only valid if ``action`` is ``REDIRECT_PREFIX``. + in: body + required: true + type: string +l7policy-redirect-prefix-optional: + description: | + Requests matching this policy will be redirected to this Prefix URL. + Only valid if ``action`` is ``REDIRECT_PREFIX``. + in: body + required: false + type: string +l7policy-redirect-url: + description: | + Requests matching this policy will be redirected to this URL. + Only valid if ``action`` is ``REDIRECT_TO_URL``. + in: body + required: true + type: string +l7policy-redirect-url-optional: + description: | + Requests matching this policy will be redirected to this URL. + Only valid if ``action`` is ``REDIRECT_TO_URL``. + in: body + required: false + type: string +l7policy-rule-ids: + description: | + List of associated L7 rule IDs. + in: body + required: true + type: array +l7rule-compare_type: + description: | + The comparison type for the L7 rule. One of ``CONTAINS``, ``ENDS_WITH``, + ``EQUAL_TO``, ``REGEX``, or ``STARTS_WITH``. + in: body + required: true + type: string +l7rule-compare_type-optional: + description: | + The comparison type for the L7 rule. One of ``CONTAINS``, ``ENDS_WITH``, + ``EQUAL_TO``, ``REGEX``, or ``STARTS_WITH``. + in: body + required: false + type: string +l7rule-id: + description: | + The ID of the L7 rule. + in: body + required: true + type: uuid +l7rule-invert: + description: | + When ``true`` the logic of the rule is inverted. For example, with + invert ``true``, `equal to` would become `not equal to`. + in: body + required: true + type: boolean +l7rule-invert-optional: + description: | + When ``true`` the logic of the rule is inverted. For example, with + invert ``true``, `equal to` would become `not equal to`. + Default is ``false``. + in: body + required: false + type: boolean +l7rule-key: + description: | + The key to use for the comparison. For example, the name of the cookie + to evaluate. + in: body + required: true + type: string +l7rule-key-optional: + description: | + The key to use for the comparison. For example, the name of the cookie + to evaluate. + in: body + required: false + type: string +l7rule-type: + description: | + The L7 rule type. One of ``COOKIE``, ``FILE_TYPE``, ``HEADER``, + ``HOST_NAME``, ``PATH``, ``SSL_CONN_HAS_CERT``, ``SSL_VERIFY_RESULT``, + or ``SSL_DN_FIELD``. + in: body + required: true + type: string +l7rule-type-optional: + description: | + The L7 rule type. One of ``COOKIE``, ``FILE_TYPE``, ``HEADER``, + ``HOST_NAME``, ``PATH``, ``SSL_CONN_HAS_CERT``, ``SSL_VERIFY_RESULT``, + or ``SSL_DN_FIELD``. + in: body + required: false + type: string +l7rule-value: + description: | + The value to use for the comparison. For example, the file type to compare. + in: body + required: true + type: string +l7rule-value-optional: + description: | + The value to use for the comparison. For example, the file type to compare. + in: body + required: false + type: string +l7rules-status-object-list: + description: | + A list of L7 rule status objects. + in: body + required: true + type: array +lb-algorithm: + description: | + The load balancing algorithm for the pool. One of ``LEAST_CONNECTIONS``, + ``ROUND_ROBIN``, ``SOURCE_IP``, or ``SOURCE_IP_PORT``. + in: body + required: true + type: string +lb-algorithm-optional: + description: | + The load balancing algorithm for the pool. One of ``LEAST_CONNECTIONS``, + ``ROUND_ROBIN``, or ``SOURCE_IP``. + in: body + required: false + type: string +lb-network-ip: + description: | + The management IP of the amphora. + in: body + required: true + type: string +links: + description: | + A list of relative links. Includes the self link for the API. + in: body + required: true + type: array +listener: + description: | + A listener object. + in: body + required: true + type: object +listener-id: + description: | + The ID of the listener. + in: body + required: true + type: uuid +listener-id-pool-optional: + description: | + The ID of the listener for the pool. Either ``listener_id`` or + ``loadbalancer_id`` must be specified. The listener has some restrictions, + See :ref:`valid_protocol`. + in: body + required: false + type: uuid +listener-ids: + description: | + A list of listener IDs. + in: body + required: true + type: array +listeners: + description: | + The associated listener IDs, if any. + in: body + required: true + type: array +listeners-optional: + description: | + The associated listener IDs, if any. + in: body + required: false + type: array +listeners-status-object-list: + description: | + A list of listener status objects. + in: body + required: true + type: array +loadbalancer: + description: | + A load balancer object. + in: body + required: true + type: object +loadbalancer-id: + description: | + The ID of the load balancer. + in: body + required: true + type: uuid +loadbalancer-id-pool-optional: + description: | + The ID of the load balancer for the pool. Either ``listener_id`` or + ``loadbalancer_id`` must be specified. + in: body + required: false + type: uuid +loadbalancer-ids: + description: | + A list of load balancer IDs. + in: body + required: true + type: array +loadbalancer-status: + description: | + A load balancer status object. + in: body + required: true + type: object +loadbalancers: + description: | + A list of ``loadbalancer`` objects. + in: body + required: true + type: array +member-id: + description: | + The ID of the member. + in: body + required: true + type: uuid +member-ids: + description: | + A list of member IDs. + in: body + required: true + type: array +member_vnic_type: + description: | + The member vNIC type used for the member port. One of ``normal`` or + ``direct``. + in: body + required: true + type: string + min_version: 2.29 +members-status-object-list: + description: | + A list of members status objects. + in: body + required: true + type: array +monitor_address: + description: | + An alternate IP address used for health monitoring a backend member. + Default is ``null`` which monitors the member ``address``. + in: body + required: true + type: string +monitor_address-optional: + description: | + An alternate IP address used for health monitoring a backend member. + Default is ``null`` which monitors the member ``address``. + in: body + required: false + type: string +monitor_port: + description: | + An alternate protocol port used for health monitoring a backend member. + Default is ``null`` which monitors the member ``protocol_port``. + in: body + required: true + type: integer +monitor_port-optional: + description: | + An alternate protocol port used for health monitoring a backend member. + Default is ``null`` which monitors the member ``protocol_port``. + in: body + required: false + type: integer +name: + description: | + Human-readable name of the resource. + in: body + required: true + type: string +name-optional: + description: | + Human-readable name of the resource. + in: body + required: false + type: string +operating_status: + description: | + The operating status of the resource. See :ref:`op_status`. + in: body + required: true + type: string +pool-id: + description: | + The ID of the pool. + in: body + required: true + type: uuid +pool-optional: + description: | + A pool object. + in: body + required: false + type: object +pools-status-list: + description: | + The list of pools status objects. + in: body + required: true + type: array +pools_ids: + description: | + The associated pool IDs, if any. + in: body + required: true + type: array +project_id: + description: | + The ID of the project owning this resource. + in: body + required: true + type: string +project_id-optional: + description: | + The ID of the project owning this resource. + in: body + required: false + type: string +project_id-optional-deprecated: + description: | + The ID of the project owning this resource. (deprecated) + in: body + required: false + type: string +protocol: + description: | + The protocol for the resource. One of ``HTTP``, ``HTTPS``, ``SCTP``, + ``PROMETHEUS``, ``TCP``, ``TERMINATED_HTTPS``, or ``UDP``. + in: body + required: true + type: string +protocol-pools: + description: | + The protocol for the resource. One of ``HTTP``, ``HTTPS``, ``PROXY``, + ``PROXYV2``, ``SCTP``, ``TCP``, or ``UDP``. + in: body + required: true + type: string +protocol_port: + description: | + The protocol port number for the resource. + in: body + required: true + type: integer +protocol_port-member: + description: | + The protocol port number the backend member server is listening on. + in: body + required: true + type: integer +provider: + description: | + Provider name for the load balancer. + in: body + required: true + type: string +provider-description: + description: | + Provider description. + in: body + required: true + type: string +provider-name: + description: | + Provider name. + in: body + required: true + type: string +provider-name-optional: + description: | + Provider name. + in: body + required: false + type: string +provider-optional: + description: | + Provider name for the load balancer. Default is ``octavia``. + in: body + required: false + type: string +provisioning_status: + description: | + The provisioning status of the resource. See :ref:`prov_status`. + in: body + required: true + type: string +quota-health_monitor: + description: | + The configured health monitor quota limit. A setting of ``null`` means it + is using the deployment default quota. A setting of ``-1`` + means unlimited. + in: body + required: true + type: integer +quota-health_monitor-optional: + description: | + The configured health monitor quota limit. A setting of ``null`` means it + is using the deployment default quota. A setting of ``-1`` means + unlimited. + in: body + required: false + type: integer +quota-l7policy: + description: | + The configured l7policy quota limit. A setting of ``null`` means it is + using the deployment default quota. A setting of ``-1`` means unlimited. + in: body + required: true + type: integer +quota-l7policy-optional: + description: | + The configured l7policy quota limit. A setting of ``null`` means it is + using the deployment default quota. A setting of ``-1`` means unlimited. + in: body + required: false + type: integer +quota-l7rule: + description: | + The configured l7rule quota limit. A setting of ``null`` means it is + using the deployment default quota. A setting of ``-1`` means unlimited. + in: body + required: true + type: integer +quota-l7rule-optional: + description: | + The configured l7rule quota limit. A setting of ``null`` means it is + using the deployment default quota. A setting of ``-1`` means unlimited. + in: body + required: false + type: integer +quota-listener: + description: | + The configured listener quota limit. A setting of ``null`` means it is + using the deployment default quota. A setting of ``-1`` means unlimited. + in: body + required: true + type: integer +quota-listener-optional: + description: | + The configured listener quota limit. A setting of ``null`` means it is + using the deployment default quota. A setting of ``-1`` means unlimited. + in: body + required: true + type: integer +quota-load_balancer: + description: | + The configured load balancer quota limit. A setting of ``null`` means it + is using the deployment default quota. A setting of ``-1`` means + unlimited. + in: body + required: true + type: integer +quota-load_balancer-optional: + description: | + The configured load balancer quota limit. A setting of ``null`` means it + is using the deployment default quota. A setting of ``-1`` means + unlimited. + in: body + required: true + type: integer +quota-member: + description: | + The configured member quota limit. A setting of ``null`` means it is using + the deployment default quota. A setting of ``-1`` means unlimited. + in: body + required: true + type: integer +quota-member-optional: + description: | + The configured member quota limit. A setting of ``null`` means it is using + the deployment default quota. A setting of ``-1`` means unlimited. + in: body + required: true + type: integer +quota-pool: + description: | + The configured pool quota limit. A setting of ``null`` means it is using + the deployment default quota. A setting of ``-1`` means unlimited. + in: body + required: true + type: integer +quota-pool-optional: + description: | + The configured pool quota limit. A setting of ``null`` means it is using + the deployment default quota. A setting of ``-1`` means unlimited. + in: body + required: true + type: integer +request_errors: + description: | + The total requests that were unable to be fulfilled. + in: body + required: true + type: integer +request_sriov: + description: | + Request that an SR-IOV VF be used for the member network port. Defaults to + ``false``. + in: body + required: false + type: boolean + min_version: 2.29 +session_persistence: + description: | + A JSON object specifying the session persistence for the pool or ``null`` + for no session persistence. See :ref:`session_persistence`. Default is + ``null``. + in: body + required: true + type: object +session_persistence-optional: + description: | + A JSON object specifying the session persistence for the pool or ``null`` + for no session persistence. See :ref:`session_persistence`. Default is + ``null``. + in: body + required: false + type: object +session_persistence_cookie: + description: | + The name of the cookie to use for session persistence. Only applicable to + the ``APP_COOKIE`` session persistence type where it is required. + in: body + required: false + type: string +session_persistence_granularity: + description: | + The netmask used to determine SCTP or UDP session persistence. Currently + only valid for SCTP or UDP pools with session persistence of SOURCE_IP. + Default netmask is 255.255.255.255, meaning per client full IP. + in: body + min_version: 2.2 + required: false + type: string +session_persistence_timeout: + description: | + The timeout, in seconds, after which a SCTP or UDP flow may be rescheduled + to a different member. Currently only applies to SCTP or UDP pools with + session persistence of SOURCE_IP. Default is 360. + in: body + min_version: 2.2 + required: false + type: integer +session_persistence_type: + description: | + Session persistence type for the pool. One of ``APP_COOKIE``, + ``HTTP_COOKIE``, or ``SOURCE_IP``. + in: body + required: true + type: string +sni_container_refs: + description: | + A list of URIs to the `key manager service + `__ secrets containing + PKCS12 format certificate/key bundles for ``TERMINATED_HTTPS`` listeners. + (DEPRECATED) Secret containers of type "certificate" containing the + certificates and keys for ``TERMINATED_HTTPS`` listeners. + in: body + required: true + type: array +sni_container_refs-optional: + description: | + A list of URIs to the `key manager service + `__ secrets containing + PKCS12 format certificate/key bundles for ``TERMINATED_HTTPS`` listeners. + (DEPRECATED) Secret containers of type "certificate" containing the + certificates and keys for ``TERMINATED_HTTPS`` listeners. + in: body + required: false + type: array +stats: + description: | + A statistics object. + in: body + required: true + type: object +statuses: + description: | + The status tree of a load balancer object contains all provisioning and + operating statuses for its children. + in: body + required: true + type: object +subnet_id: + description: | + The subnet ID the member service is accessible from. + in: body + required: true + type: uuid +subnet_id-optional: + description: | + The subnet ID the member service is accessible from. + in: body + required: false + type: uuid +tags: + description: | + A list of simple strings assigned to the resource. + in: body + min_version: 2.5 + required: true + type: array +tags-optional: + description: | + A list of simple strings assigned to the resource. + in: body + min_version: 2.5 + required: false + type: array +timeout_client_data: + description: | + Frontend client inactivity timeout in milliseconds. Default: 50000. + in: body + min_version: 2.1 + required: true + type: integer +timeout_client_data-optional: + description: | + Frontend client inactivity timeout in milliseconds. Default: 50000. + in: body + min_version: 2.1 + required: false + type: integer +timeout_member_connect: + description: | + Backend member connection timeout in milliseconds. Default: 5000. + in: body + min_version: 2.1 + required: true + type: integer +timeout_member_connect-optional: + description: | + Backend member connection timeout in milliseconds. Default: 5000. + in: body + min_version: 2.1 + required: false + type: integer +timeout_member_data: + description: | + Backend member inactivity timeout in milliseconds. Default: 50000. + in: body + min_version: 2.1 + required: true + type: integer +timeout_member_data-optional: + description: | + Backend member inactivity timeout in milliseconds. Default: 50000. + in: body + min_version: 2.1 + required: false + type: integer +timeout_tcp_inspect: + description: | + Time, in milliseconds, to wait for additional TCP packets for content + inspection. Default: 0. + in: body + min_version: 2.1 + required: true + type: integer +timeout_tcp_inspect-optional: + description: | + Time, in milliseconds, to wait for additional TCP packets for content + inspection. Default: 0. + in: body + min_version: 2.1 + required: false + type: integer +tls_ciphers: + description: | + List of ciphers in OpenSSL format (colon-separated). + See https://www.openssl.org/docs/man1.1.1/man1/ciphers.html + in: body + min_version: 2.15 + required: true + type: string +tls_ciphers-optional: + description: | + List of ciphers in OpenSSL format (colon-separated). + See https://www.openssl.org/docs/man1.1.1/man1/ciphers.html + in: body + min_version: 2.15 + required: false + type: string +tls_container_ref: + description: | + The reference to the `key manager service + `__ secret containing a + PKCS12 format certificate/key bundle for ``tls_enabled`` pools for + TLS client authentication to the member servers. + in: body + min_version: 2.8 + required: true + type: string +tls_container_ref-optional: + description: | + The reference to the `key manager service + `__ secret containing a + PKCS12 format certificate/key bundle for ``tls_enabled`` pools for + TLS client authentication to the member servers. + in: body + min_version: 2.8 + required: false + type: string +tls_enabled: + description: | + When ``true`` connections to backend member servers will use TLS + encryption. Default is ``false``. + in: body + min_version: 2.8 + required: true + type: boolean +tls_enabled-optional: + description: | + When ``true`` connections to backend member servers will use TLS + encryption. Default is ``false``. + in: body + min_version: 2.8 + required: false + type: boolean +tls_versions: + description: | + A list of TLS protocol versions. + Available versions: SSLv3, TLSv1, TLSv1.1, TLSv1.2, TLSv1.3 + in: body + min_version: 2.17 + required: true + type: array +tls_versions-optional: + description: | + A list of TLS protocol versions. + Available versions: SSLv3, TLSv1, TLSv1.1, TLSv1.2, TLSv1.3 + in: body + min_version: 2.17 + required: false + type: array +total_connections: + description: | + The total connections handled. + in: body + required: true + type: integer +type: + description: | + The type associated with the resource. + in: body + required: true + type: string +updated_at: + description: | + The UTC date and timestamp when the resource was last updated. + in: body + required: true + type: string +vip_address: + description: | + The IP address of the Virtual IP (VIP). + in: body + required: true + type: string +vip_address-optional: + description: | + The IP address of the Virtual IP (VIP). + in: body + required: false + type: string +vip_network_id: + description: | + The ID of the network for the Virtual IP (VIP). + in: body + required: true + type: uuid +vip_network_id-optional: + description: | + The ID of the network for the Virtual IP (VIP). One of ``vip_network_id``, + ``vip_port_id``, or ``vip_subnet_id`` must be specified. + in: body + required: false + type: uuid +vip_port_id: + description: | + The ID of the Virtual IP (VIP) port. + in: body + required: true + type: uuid +vip_port_id-optional: + description: | + The ID of the Virtual IP (VIP) port. One of ``vip_network_id``, + ``vip_port_id``, or ``vip_subnet_id`` must be specified. + in: body + required: false + type: uuid +vip_qos_policy_id: + description: | + The ID of the QoS Policy which will apply to the Virtual IP (VIP). + in: body + required: true + type: uuid +vip_qos_policy_id-optional: + description: | + The ID of the QoS Policy which will apply to the Virtual IP (VIP). + in: body + required: false + type: uuid +vip_sg_ids: + description: | + The list of Security Group IDs of the Virtual IP (VIP) port of the Load + Balancer. + in: body + required: true + type: array + min_version: 2.29 +vip_sg_ids-optional: + description: | + The list of Security Group IDs of the Virtual IP (VIP) port of the Load + Balancer. + in: body + required: false + type: array + min_version: 2.29 +vip_subnet_id: + description: | + The ID of the subnet for the Virtual IP (VIP). + in: body + required: true + type: uuid +vip_subnet_id-optional: + description: | + The ID of the subnet for the Virtual IP (VIP). One of ``vip_network_id``, + ``vip_port_id``, or ``vip_subnet_id`` must be specified. + in: body + required: false + type: uuid +vip_vnic_type: + description: | + The VIP vNIC type used for the load balancer. One of ``normal`` or + ``direct``. + in: body + required: true + type: string + min_version: 2.28 +vrrp-id: + description: | + The vrrp group's ID for the amphora. + in: body + required: true + type: string +vrrp-interface: + description: | + The bound interface name of the vrrp port on the amphora. + in: body + required: true + type: string +vrrp-ip: + description: | + The address of the vrrp port on the amphora. + in: body + required: true + type: string +vrrp-port-id: + description: | + The vrrp port's ID in the networking system. + in: body + required: true + type: uuid +vrrp-priority: + description: | + The priority of the amphora in the vrrp group. + in: body + required: true + type: string +weight: + description: | + The weight of a member determines the portion of requests or connections it + services compared to the other members of the pool. For example, a member + with a weight of 10 receives five times as many requests as a member with a + weight of 2. A value of 0 means the member does not receive new connections + but continues to service existing connections. A valid value is + from ``0`` to ``256``. Default is ``1``. + in: body + required: true + type: integer +weight-optional: + description: | + The weight of a member determines the portion of requests or connections it + services compared to the other members of the pool. For example, a member + with a weight of 10 receives five times as many requests as a member with a + weight of 2. A value of 0 means the member does not receive new connections + but continues to service existing connections. A valid value is + from ``0`` to ``256``. Default is ``1``. + in: body + required: false + type: integer diff --git a/api-ref/source/v2/amphora.inc b/api-ref/source/v2/amphora.inc new file mode 100644 index 0000000000..2e867522d6 --- /dev/null +++ b/api-ref/source/v2/amphora.inc @@ -0,0 +1,347 @@ +.. -*- rst -*- + +List Amphora +============ + +.. rest_method:: GET /v2/octavia/amphorae + +Lists all amphora for the project. + +If you are not an administrative user, the service returns the HTTP +``Forbidden (403)`` response code. + +Use the ``fields`` query parameter to control which fields are +returned in the response body. Additionally, you can filter results +by using query string parameters. For information, see :ref:`filtering`. + +The list might be empty. + +.. NOTE:: + + The field `cached_zone` should be used for quick filtering and reference + only, as it may out of date. If an up-to-date zone is vital, we recommend + retrieving details directly from the compute service. + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - fields: fields + +Curl Example +------------ + +.. literalinclude:: examples/amphora-list-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - id: amphora-id + - loadbalancer_id: loadbalancer-id + - compute_id: compute-id + - lb_network_ip: lb-network-ip + - vrrp_ip: vrrp-ip + - ha_ip: vip_address + - vrrp_port_id: vrrp-port-id + - ha_port_id: vip_port_id + - cert_expiration: cert-expiration + - cert_busy: cert-busy + - role: amphora-role + - status: amphora-status + - vrrp_interface: vrrp-interface + - vrrp_id: vrrp-id + - vrrp_priority: vrrp-priority + - cached_zone: cached-zone + - created_at: created_at + - updated_at: updated_at + - image_id: image-id + - compute_flavor: compute-flavor + +Response Example +---------------- + +.. literalinclude:: examples/amphora-list-response.json + :language: javascript + +Show Amphora details +==================== + +.. rest_method:: GET /v2/octavia/amphorae/{amphora_id} + +Shows the details of an amphora. + +If you are not an administrative user, the service returns the HTTP +``Forbidden (403)`` response code. + +This operation does not require a request body. + +.. NOTE:: + + The field `cached_zone` should be used for quick filtering and reference + only, as it may out of date. If an up-to-date zone is vital, we recommend + retrieving details directly from the compute service. + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 401 + - 403 + - 404 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - fields: fields + - amphora_id: path-amphora-id + +Curl Example +------------ + +.. literalinclude:: examples/amphora-show-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - id: amphora-id + - loadbalancer_id: loadbalancer-id + - compute_id: compute-id + - lb_network_ip: lb-network-ip + - vrrp_ip: vrrp-ip + - ha_ip: vip_address + - vrrp_port_id: vrrp-port-id + - ha_port_id: vip_port_id + - cert_expiration: cert-expiration + - cert_busy: cert-busy + - role: amphora-role + - status: amphora-status + - vrrp_interface: vrrp-interface + - vrrp_id: vrrp-id + - vrrp_priority: vrrp-priority + - cached_zone: cached-zone + - created_at: created_at + - updated_at: updated_at + - image_id: image-id + - compute_flavor: compute-flavor + +Response Example +---------------- + +.. literalinclude:: examples/amphora-show-response.json + :language: javascript + +Show Amphora Statistics +======================= + +.. rest_method:: GET /v2/octavia/amphorae/{amphora_id}/stats + +Show the statistics for an amphora. + +If you are not an administrative user, the service returns the HTTP +``Forbidden (403)`` response code. + +Use the ``fields`` query parameter to control which fields are +returned in the response body. + +**New in version 2.3** + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 404 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - amphora_id: path-amphora-id + - fields: fields + +Curl Example +------------ + +.. literalinclude:: examples/amphora-show-stats-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - active_connections: active_connections + - amphora_stats: amphora-stats + - bytes_in: bytes_in + - bytes_out: bytes_out + - id: amphora-id + - listener_id: listener-id + - loadbalancer_id: loadbalancer-id + - request_errors: request_errors + - total_connections: total_connections + +Response Example +---------------- + +.. literalinclude:: examples/amphora-show-stats-response.json + :language: javascript + +Configure Amphora +================= + +.. rest_method:: PUT /v2/octavia/amphorae/{amphora_id}/config + +Update the amphora agent configuration. This will push the new configuration +to the amphora agent and will update the configuration options that are +mutatable. + +If you are not an administrative user, the service returns the HTTP +``Forbidden (403)`` response code. + +This operation does not require a request body. + +**New in version 2.7** + +.. rest_status_code:: success ../http-status.yaml + + - 202 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 404 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - amphora_id: path-amphora-id + +Curl Example +------------ + +.. literalinclude:: examples/amphora-config-curl + :language: bash + +Response +-------- + +There is no body content for the response of a successful PUT request. + +Failover Amphora +================ + +.. rest_method:: PUT /v2/octavia/amphorae/{amphora_id}/failover + +Force an amphora to failover. + +If you are not an administrative user, the service returns the HTTP +``Forbidden (403)`` response code. + +This operation does not require a request body. + +.. rest_status_code:: success ../http-status.yaml + + - 202 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 404 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - amphora_id: path-amphora-id + +Curl Example +------------ + +.. literalinclude:: examples/amphora-failover-curl + :language: bash + +Response +-------- + +There is no body content for the response of a successful PUT request. + +Remove an Amphora +================= + +.. rest_method:: DELETE /v2/octavia/amphorae/{amphora_id} + +Removes an amphora and its associated configuration. + +The API immediately purges any and all configuration data, depending on the +configuration settings. You cannot recover it. + +**New in version 2.20** + +.. rest_status_code:: success ../http-status.yaml + + - 204 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 404 + - 409 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - amphora_id: path-amphora-id + +Curl Example +------------ + +.. literalinclude:: examples/amphora-delete-curl + :language: bash + +Response +-------- + +There is no body content for the response of a successful DELETE request. diff --git a/api-ref/source/v2/availabilityzone.inc b/api-ref/source/v2/availabilityzone.inc new file mode 100644 index 0000000000..9bdf2d1894 --- /dev/null +++ b/api-ref/source/v2/availabilityzone.inc @@ -0,0 +1,290 @@ +.. -*- rst -*- + +List Availability Zones +======================= + +.. rest_method:: GET /v2.0/lbaas/availabilityzones + +List all available availability zones. + +Use the ``fields`` query parameter to control which fields are +returned in the response body. Additionally, you can filter results +by using query string parameters. For information, see :ref:`filtering`. + +The list might be empty. + +**New in version 2.14** + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - fields: fields + +Curl Example +------------ + +.. literalinclude:: examples/availabilityzone-list-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - description: description + - enabled: enabled + - availability_zone_profile_id: availability-zone-profile-id + - availability_zones: availability-zones + - name: name + +Response Example +---------------- + +.. literalinclude:: examples/availabilityzone-list-response.json + :language: javascript + +Create Availability Zone +======================== + +.. rest_method:: POST /v2.0/lbaas/availabilityzones + +Creates an availability zone. + +If the API cannot fulfill the request due to insufficient data or +data that is not valid, the service returns the HTTP ``Bad Request +(400)`` response code with information about the failure in the +response body. Validation errors require that you correct the error +and submit the request again. + +If you are not an administrative user the service returns the HTTP ``Forbidden +(403)`` response code. + +**New in version 2.14** + +.. rest_status_code:: success ../http-status.yaml + + - 201 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - description: description-optional + - enabled: enabled-optional + - availability_zone: availability_zone + - availability_zone_profile_id: availability-zone-profile-id + - name: name + +Request Example +--------------- + +.. literalinclude:: examples/availabilityzone-create-request.json + :language: javascript + +Curl Example +------------ + +.. literalinclude:: examples/availabilityzone-create-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - description: description + - enabled: enabled + - availability_zone_profile_id: availability-zone-profile-id + - availability_zone: availability_zone + - name: name + +Response Example +---------------- + +.. literalinclude:: examples/availabilityzone-create-response.json + :language: javascript + + +Show Availability Zone Details +============================== + +.. rest_method:: GET /v2.0/lbaas/availabilityzones/{availability_zone_name} + +Shows the details of an availability zone. + +Use the ``fields`` query parameter to control which fields are +returned in the response body. Additionally, you can filter results +by using query string parameters. For information, see :ref:`filtering`. + +This operation does not require a request body. + +**New in version 2.14** + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 401 + - 404 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - fields: fields + - availability_zone_name: path-availability-zone-name + +Curl Example +------------ + +.. literalinclude:: examples/availabilityzone-show-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - description: description + - enabled: enabled + - availability_zone_profile_id: availability-zone-profile-id + - availability_zone: availability_zone + - name: name + +Response Example +---------------- + +.. literalinclude:: examples/availabilityzone-show-response.json + :language: javascript + +Update an Availability Zone +=========================== + +.. rest_method:: PUT /v2.0/lbaas/availabilityzones/{availability_zone_name} + +Update an availability zone. + +If you are not an administrative user the service returns the HTTP ``Forbidden +(403)`` response code. + +**New in version 2.14** + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 404 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - description: description-optional + - enabled: enabled-optional + - availability_zone: availability_zone + - availability_zone_name: path-availability-zone-name + +Request Example +--------------- + +.. literalinclude:: examples/availabilityzone-update-request.json + :language: javascript + +Curl Example +------------ + +.. literalinclude:: examples/availabilityzone-update-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - description: description + - enabled: enabled + - availability_zone_profile_id: availability-zone-profile-id + - availability_zone: availability_zone + - name: name + +Response Example +---------------- + +.. literalinclude:: examples/availabilityzone-update-response.json + :language: javascript + +Remove an Availability Zone +=========================== + +.. rest_method:: DELETE /v2.0/lbaas/availabilityzones/{availability_zone_name} + +Remove an availability zone and its associated configuration. + +If any load balancers are using this availability zone the service returns +the HTTP ``Conflict (409)`` response code. + +If you are not an administrative user the service returns the HTTP ``Forbidden +(403)`` response code. + +**New in version 2.14** + +.. rest_status_code:: success ../http-status.yaml + + - 204 + +.. rest_status_code:: error ../http-status.yaml + + - 401 + - 403 + - 404 + - 409 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - availability_zone_name: path-availability-zone-name + +Curl Example +------------ + +.. literalinclude:: examples/availabilityzone-delete-curl + :language: bash + +Response +-------- + +There is no body content for the response of a successful DELETE request. diff --git a/api-ref/source/v2/availabilityzoneprofile.inc b/api-ref/source/v2/availabilityzoneprofile.inc new file mode 100644 index 0000000000..632e1ec01a --- /dev/null +++ b/api-ref/source/v2/availabilityzoneprofile.inc @@ -0,0 +1,297 @@ +.. -*- rst -*- + +List Availability Zone Profiles +=============================== + +.. rest_method:: GET /v2.0/lbaas/availabilityzoneprofiles + +List all available Availability Zone Profiles. + +Use the ``fields`` query parameter to control which fields are +returned in the response body. Additionally, you can filter results +by using query string parameters. For information, see :ref:`filtering`. + +If you are not an administrative user the service returns the HTTP ``Forbidden +(403)`` response code. + +The list might be empty. + +**New in version 2.14** + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - fields: fields + +Curl Example +------------ + +.. literalinclude:: examples/availabilityzoneprofile-list-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - availability_zone_data: availability-zone-data + - availability_zone_profiles: availability-zone-profiles + - id: availability-zone-profile-id + - name: name + - provider_name: provider-name + +Response Example +---------------- + +.. literalinclude:: examples/availabilityzoneprofile-list-response.json + :language: javascript + +Create Availability Zone Profile +================================ + +.. rest_method:: POST /v2.0/lbaas/availabilityzoneprofiles + +Creates a Availability Zone Profile. + +If the API cannot fulfill the request due to insufficient data or +data that is not valid, the service returns the HTTP ``Bad Request +(400)`` response code with information about the failure in the +response body. Validation errors require that you correct the error +and submit the request again. + +If you are not an administrative user the service returns the HTTP ``Forbidden +(403)`` response code. + +**New in version 2.14** + +.. rest_status_code:: success ../http-status.yaml + + - 201 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - availability_zone_data: availability-zone-data + - availability_zone_profile: availability-zone-profile + - name: name + - provider_name: provider-name + +Request Example +--------------- + +.. literalinclude:: examples/availabilityzoneprofile-create-request.json + :language: javascript + +Curl Example +------------ + +.. literalinclude:: examples/availabilityzoneprofile-create-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - availability_zone_data: availability-zone-data + - availability_zone_profile: availability-zone-profile + - id: availability-zone-profile-id + - name: name + - provider_name: provider-name + +Response Example +---------------- + +.. literalinclude:: examples/availabilityzoneprofile-create-response.json + :language: javascript + +Show Availability Zone Profile Details +====================================== + +.. rest_method:: GET /v2.0/lbaas/availabilityzoneprofiles/{availability_zone_profile_id} + +Shows the details of a Availability Zone Profile. + +Use the ``fields`` query parameter to control which fields are +returned in the response body. Additionally, you can filter results +by using query string parameters. For information, see :ref:`filtering`. + +If you are not an administrative user the service returns the HTTP ``Forbidden +(403)`` response code. + +This operation does not require a request body. + +**New in version 2.14** + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 401 + - 403 + - 404 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - fields: fields + - availability_zone_profile_id: path-availability-zone-profile-id + +Curl Example +------------ + +.. literalinclude:: examples/availabilityzoneprofile-show-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - availability_zone_data: availability-zone-data + - availability_zone_profile: availability-zone-profile + - id: availability-zone-profile-id + - name: name + - provider_name: provider-name + +Response Example +---------------- + +.. literalinclude:: examples/availabilityzoneprofile-show-response.json + :language: javascript + +Update a Availability Zone Profile +================================== + +.. rest_method:: PUT /v2.0/lbaas/availabilityzoneprofiles/{availability_zone_profile_id} + +Update a Availability Zone Profile. + +If you are not an administrative user the service returns the HTTP ``Forbidden +(403)`` response code. + +**New in version 2.14** + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 404 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - availability_zone_data: availability-zone-data-optional + - availability_zone_profile: availability-zone-profile + - availability_zone_profile_id: path-availability-zone-profile-id + - name: name-optional + - provider_name: provider-name-optional + +Request Example +--------------- + +.. literalinclude:: examples/availabilityzoneprofile-update-request.json + :language: javascript + +Curl Example +------------ + +.. literalinclude:: examples/availabilityzoneprofile-update-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - availability_zone_data: availability-zone-data + - availability_zone_profile: availability-zone-profile + - id: availability-zone-profile-id + - name: name + - provider_name: provider-name + +Response Example +---------------- + +.. literalinclude:: examples/availabilityzoneprofile-update-response.json + :language: javascript + +Remove a Availability Zone Profile +================================== + +.. rest_method:: DELETE /v2.0/lbaas/availabilityzoneprofiles/{availability_zone_profile_id} + +Remove a Availability Zone Profile and its associated configuration. + +If any availability zone is using this Availability Zone Profile the service +returns the HTTP ``Conflict (409)`` response code. + +If you are not an administrative user the service returns the HTTP ``Forbidden +(403)`` response code. + +**New in version 2.14** + +.. rest_status_code:: success ../http-status.yaml + + - 204 + +.. rest_status_code:: error ../http-status.yaml + + - 401 + - 403 + - 404 + - 409 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - availability_zone_profile_id: path-availability-zone-profile-id + +Curl Example +------------ + +.. literalinclude:: examples/availabilityzoneprofile-delete-curl + :language: bash + +Response +-------- + +There is no body content for the response of a successful DELETE request. diff --git a/api-ref/source/v2/examples/amphora-config-curl b/api-ref/source/v2/examples/amphora-config-curl new file mode 100644 index 0000000000..a0fc0a921f --- /dev/null +++ b/api-ref/source/v2/examples/amphora-config-curl @@ -0,0 +1 @@ +curl -X PUT -H "X-Auth-Token: " http://198.51.100.10:9876/v2/octavia/amphorae/6bd55cd3-802e-447e-a518-1e74e23bb106/config diff --git a/api-ref/source/v2/examples/amphora-delete-curl b/api-ref/source/v2/examples/amphora-delete-curl new file mode 100644 index 0000000000..ad5208c8f3 --- /dev/null +++ b/api-ref/source/v2/examples/amphora-delete-curl @@ -0,0 +1 @@ +curl -X DELETE -H "X-Auth-Token: " http://198.51.100.10:9876/v2/octavia/amphorae/1a032adb-d6ac-4dbb-a04a-c1126bc547c7 diff --git a/api-ref/source/v2/examples/amphora-failover-curl b/api-ref/source/v2/examples/amphora-failover-curl new file mode 100644 index 0000000000..c42f41ea14 --- /dev/null +++ b/api-ref/source/v2/examples/amphora-failover-curl @@ -0,0 +1 @@ +curl -X PUT -H "X-Auth-Token: " http://198.51.100.10:9876/v2/octavia/amphorae/6bd55cd3-802e-447e-a518-1e74e23bb106/failover diff --git a/api-ref/source/v2/examples/amphora-list-curl b/api-ref/source/v2/examples/amphora-list-curl new file mode 100644 index 0000000000..b9070f88d2 --- /dev/null +++ b/api-ref/source/v2/examples/amphora-list-curl @@ -0,0 +1 @@ +curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/octavia/amphorae?loadbalancer_id=09eedfc6-2c55-41a8-a75c-2cd4e95212ca diff --git a/api-ref/source/v2/examples/amphora-list-response.json b/api-ref/source/v2/examples/amphora-list-response.json new file mode 100644 index 0000000000..14a5d01638 --- /dev/null +++ b/api-ref/source/v2/examples/amphora-list-response.json @@ -0,0 +1,48 @@ +{ + "amphorae": [ + { + "id": "6bd55cd3-802e-447e-a518-1e74e23bb106", + "load_balancer_id": "09eedfc6-2c55-41a8-a75c-2cd4e95212ca", + "compute_id": "f0f79f90-733d-417a-8d70-cc6be62cd54d", + "lb_network_ip": "192.168.1.2", + "vrrp_ip": "192.168.1.5", + "ha_ip": "192.168.1.10", + "vrrp_port_id": "ab2a8add-76a9-44bb-89f8-88430193cc83", + "ha_port_id": "19561fd3-5da5-46cc-bdd3-99bbdf7246e6", + "cert_expiration": "2019-09-19 00:34:51", + "cert_busy": 0, + "role": "MASTER", + "status": "ALLOCATED", + "vrrp_interface": "eth1", + "vrrp_id": 1, + "vrrp_priority": 100, + "cached_zone": "zone1", + "created_at": "2017-05-10T18:14:44", + "updated_at": "2017-05-10T23:08:12", + "image_id": "c1c2ad6f-1c1e-4744-8d1a-d0ef36289e74", + "compute_flavor": "5446a14a-abec-4455-bc0e-a34e5ff001a3" + }, + { + "id": "89c186a3-cb16-497b-b099-c4bd40316642", + "load_balancer_id": "09eedfc6-2c55-41a8-a75c-2cd4e95212ca", + "compute_id": "24b1cb54-122d-4960-9035-083642f5c2bb", + "lb_network_ip": "192.168.1.3", + "vrrp_ip": "192.168.1.6", + "ha_ip": "192.168.1.10", + "vrrp_port_id": "cae421f6-dcf0-4866-9438-d0c682645799", + "ha_port_id": "19561fd3-5da5-46cc-bdd3-99bbdf7246e6", + "cert_expiration": "2019-09-19 00:34:51", + "cert_busy": 0, + "role": "BACKUP", + "status": "ALLOCATED", + "vrrp_interface": "eth1", + "vrrp_id": 1, + "vrrp_priority": 200, + "cached_zone": "zone2", + "created_at": "2017-06-11T19:15:45", + "updated_at": "2017-06-11T24:09:13", + "image_id": "1014292d-cbaa-4ad6-b38b-2e138389f87f", + "compute_flavor": "5446a14a-abec-4455-bc0e-a34e5ff001a3" + } + ] +} diff --git a/api-ref/source/v2/examples/amphora-show-curl b/api-ref/source/v2/examples/amphora-show-curl new file mode 100644 index 0000000000..e95a0bf74c --- /dev/null +++ b/api-ref/source/v2/examples/amphora-show-curl @@ -0,0 +1 @@ +curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/octavia/amphorae/6bd55cd3-802e-447e-a518-1e74e23bb106 diff --git a/api-ref/source/v2/examples/amphora-show-response.json b/api-ref/source/v2/examples/amphora-show-response.json new file mode 100644 index 0000000000..cb0ec083fc --- /dev/null +++ b/api-ref/source/v2/examples/amphora-show-response.json @@ -0,0 +1,24 @@ +{ + "amphora": { + "id": "6bd55cd3-802e-447e-a518-1e74e23bb106", + "load_balancer_id": "09eedfc6-2c55-41a8-a75c-2cd4e95212ca", + "compute_id": "f0f79f90-733d-417a-8d70-cc6be62cd54d", + "lb_network_ip": "192.168.1.2", + "vrrp_ip": "192.168.1.5", + "ha_ip": "192.168.1.10", + "vrrp_port_id": "ab2a8add-76a9-44bb-89f8-88430193cc83", + "ha_port_id": "19561fd3-5da5-46cc-bdd3-99bbdf7246e6", + "cert_expiration": "2019-09-19 00:34:51", + "cert_busy": 0, + "role": "MASTER", + "status": "ALLOCATED", + "vrrp_interface": "eth1", + "vrrp_id": 1, + "vrrp_priority": 100, + "cached_zone": "zone1", + "created_at": "2017-05-10T18:14:44", + "updated_at": "2017-05-10T23:08:12", + "image_id": "c1c2ad6f-1c1e-4744-8d1a-d0ef36289e74", + "compute_flavor": "5446a14a-abec-4455-bc0e-a34e5ff001a3" + } +} diff --git a/api-ref/source/v2/examples/amphora-show-stats-curl b/api-ref/source/v2/examples/amphora-show-stats-curl new file mode 100644 index 0000000000..3f9f5d1702 --- /dev/null +++ b/api-ref/source/v2/examples/amphora-show-stats-curl @@ -0,0 +1 @@ +curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/octavia/amphorae/63d8349e-c4d7-4156-bc94-29260607b04f/stats diff --git a/api-ref/source/v2/examples/amphora-show-stats-response.json b/api-ref/source/v2/examples/amphora-show-stats-response.json new file mode 100644 index 0000000000..33317f11a6 --- /dev/null +++ b/api-ref/source/v2/examples/amphora-show-stats-response.json @@ -0,0 +1,24 @@ +{ + "amphora_stats": [ + { + "active_connections": 48629, + "bytes_in": 65671420, + "bytes_out": 774771186, + "id": "63d8349e-c4d7-4156-bc94-29260607b04f", + "listener_id": "bbe44114-cda2-4fe0-b192-d9e24ce661db", + "loadbalancer_id": "65b5a7c3-1437-4909-84cf-cec9f7e371ea", + "request_errors": 0, + "total_connections": 26189172 + }, + { + "active_connections": 0, + "bytes_in": 5, + "bytes_out": 100, + "id": "63d8349e-c4d7-4156-bc94-29260607b04f", + "listener_id": "af45a658-4eeb-4ce9-8b7e-16b0e5676f87", + "loadbalancer_id": "65b5a7c3-1437-4909-84cf-cec9f7e371ea", + "request_errors": 0, + "total_connections": 1 + } + ] +} diff --git a/api-ref/source/v2/examples/availabilityzone-create-curl b/api-ref/source/v2/examples/availabilityzone-create-curl new file mode 100644 index 0000000000..afe4680c66 --- /dev/null +++ b/api-ref/source/v2/examples/availabilityzone-create-curl @@ -0,0 +1 @@ +curl -X POST -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"availability_zone":{"name":"my_az","description":"My availability zone.","enabled":true,"availability_zone_profile_id":"5712097e-0092-45dc-bff0-ab68b61ad51a"}}' http://198.51.100.10:9876/v2.0/lbaas/availabilityzones diff --git a/api-ref/source/v2/examples/availabilityzone-create-request.json b/api-ref/source/v2/examples/availabilityzone-create-request.json new file mode 100644 index 0000000000..42a5f596a4 --- /dev/null +++ b/api-ref/source/v2/examples/availabilityzone-create-request.json @@ -0,0 +1,8 @@ +{ + "availability_zone": { + "name": "my_az", + "description": "My availability zone.", + "enabled": true, + "availability_zone_profile_id": "5712097e-0092-45dc-bff0-ab68b61ad51a" + } +} diff --git a/api-ref/source/v2/examples/availabilityzone-create-response.json b/api-ref/source/v2/examples/availabilityzone-create-response.json new file mode 100644 index 0000000000..42a5f596a4 --- /dev/null +++ b/api-ref/source/v2/examples/availabilityzone-create-response.json @@ -0,0 +1,8 @@ +{ + "availability_zone": { + "name": "my_az", + "description": "My availability zone.", + "enabled": true, + "availability_zone_profile_id": "5712097e-0092-45dc-bff0-ab68b61ad51a" + } +} diff --git a/api-ref/source/v2/examples/availabilityzone-delete-curl b/api-ref/source/v2/examples/availabilityzone-delete-curl new file mode 100644 index 0000000000..fcaad926a0 --- /dev/null +++ b/api-ref/source/v2/examples/availabilityzone-delete-curl @@ -0,0 +1 @@ +curl -X DELETE -H "X-Auth-Token: " http://198.51.100.10:9876/v2.0/lbaas/availabilityzones/my_az diff --git a/api-ref/source/v2/examples/availabilityzone-list-curl b/api-ref/source/v2/examples/availabilityzone-list-curl new file mode 100644 index 0000000000..fcca159a35 --- /dev/null +++ b/api-ref/source/v2/examples/availabilityzone-list-curl @@ -0,0 +1 @@ +curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2.0/lbaas/availabilityzones diff --git a/api-ref/source/v2/examples/availabilityzone-list-response.json b/api-ref/source/v2/examples/availabilityzone-list-response.json new file mode 100644 index 0000000000..eac99f04cc --- /dev/null +++ b/api-ref/source/v2/examples/availabilityzone-list-response.json @@ -0,0 +1,10 @@ +{ + "availability_zones": [ + { + "name": "my_az", + "description": "My availability zone.", + "enabled": true, + "availability_zone_profile_id": "5712097e-0092-45dc-bff0-ab68b61ad51a" + } + ] +} diff --git a/api-ref/source/v2/examples/availabilityzone-show-curl b/api-ref/source/v2/examples/availabilityzone-show-curl new file mode 100644 index 0000000000..baa2854daa --- /dev/null +++ b/api-ref/source/v2/examples/availabilityzone-show-curl @@ -0,0 +1 @@ +curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2.0/lbaas/availabilityzones/my_az diff --git a/api-ref/source/v2/examples/availabilityzone-show-response.json b/api-ref/source/v2/examples/availabilityzone-show-response.json new file mode 100644 index 0000000000..42a5f596a4 --- /dev/null +++ b/api-ref/source/v2/examples/availabilityzone-show-response.json @@ -0,0 +1,8 @@ +{ + "availability_zone": { + "name": "my_az", + "description": "My availability zone.", + "enabled": true, + "availability_zone_profile_id": "5712097e-0092-45dc-bff0-ab68b61ad51a" + } +} diff --git a/api-ref/source/v2/examples/availabilityzone-update-curl b/api-ref/source/v2/examples/availabilityzone-update-curl new file mode 100644 index 0000000000..063bd2732c --- /dev/null +++ b/api-ref/source/v2/examples/availabilityzone-update-curl @@ -0,0 +1 @@ +curl -X PUT -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"availability_zone":{"description":"My availability zone.","enabled":false}}' http://198.51.100.10:9876/v2.0/lbaas/availabilityzones/my_az diff --git a/api-ref/source/v2/examples/availabilityzone-update-request.json b/api-ref/source/v2/examples/availabilityzone-update-request.json new file mode 100644 index 0000000000..32b69dad5a --- /dev/null +++ b/api-ref/source/v2/examples/availabilityzone-update-request.json @@ -0,0 +1,6 @@ +{ + "availability_zone": { + "description": "My availability zone.", + "enabled": false + } +} diff --git a/api-ref/source/v2/examples/availabilityzone-update-response.json b/api-ref/source/v2/examples/availabilityzone-update-response.json new file mode 100644 index 0000000000..5d7bbedbb1 --- /dev/null +++ b/api-ref/source/v2/examples/availabilityzone-update-response.json @@ -0,0 +1,8 @@ +{ + "availability_zone": { + "name": "my_az", + "description": "My availability zone.", + "enabled": false, + "availability_zone_profile_id": "5712097e-0092-45dc-bff0-ab68b61ad51a" + } +} diff --git a/api-ref/source/v2/examples/availabilityzoneprofile-create-curl b/api-ref/source/v2/examples/availabilityzoneprofile-create-curl new file mode 100644 index 0000000000..98acd6ed82 --- /dev/null +++ b/api-ref/source/v2/examples/availabilityzoneprofile-create-curl @@ -0,0 +1 @@ +curl -X POST -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"availability_zone_profile":{"name":"some_az","provider_name":"amphora","availability_zone_data":"{\"compute_zone\": \"az1\"}"}}' http://198.51.100.10:9876/v2.0/lbaas/availabilityzoneprofiles diff --git a/api-ref/source/v2/examples/availabilityzoneprofile-create-request.json b/api-ref/source/v2/examples/availabilityzoneprofile-create-request.json new file mode 100644 index 0000000000..3bfb81d2e5 --- /dev/null +++ b/api-ref/source/v2/examples/availabilityzoneprofile-create-request.json @@ -0,0 +1,8 @@ +{ + "availability_zone_profile": + { + "name": "some_az", + "provider_name": "amphora", + "availability_zone_data": "{\"compute_zone\": \"az1\", \"volume_zone\": \"az1\"}" + } +} diff --git a/api-ref/source/v2/examples/availabilityzoneprofile-create-response.json b/api-ref/source/v2/examples/availabilityzoneprofile-create-response.json new file mode 100644 index 0000000000..6ca6cd68fd --- /dev/null +++ b/api-ref/source/v2/examples/availabilityzoneprofile-create-response.json @@ -0,0 +1,9 @@ +{ + "availability_zone_profile": + { + "id": "5712097e-0092-45dc-bff0-ab68b61ad51a", + "name": "some_az", + "provider_name": "amphora", + "availability_zone_data": "{\"compute_zone\": \"az1\", \"volume_zone\": \"az1\"}" + } +} diff --git a/api-ref/source/v2/examples/availabilityzoneprofile-delete-curl b/api-ref/source/v2/examples/availabilityzoneprofile-delete-curl new file mode 100644 index 0000000000..c864b8fdd0 --- /dev/null +++ b/api-ref/source/v2/examples/availabilityzoneprofile-delete-curl @@ -0,0 +1 @@ +curl -X DELETE -H "X-Auth-Token: " http://198.51.100.10:9876/v2.0/lbaas/availabilityzoneprofiles/5712097e-0092-45dc-bff0-ab68b61ad51a diff --git a/api-ref/source/v2/examples/availabilityzoneprofile-list-curl b/api-ref/source/v2/examples/availabilityzoneprofile-list-curl new file mode 100644 index 0000000000..a811706035 --- /dev/null +++ b/api-ref/source/v2/examples/availabilityzoneprofile-list-curl @@ -0,0 +1 @@ +curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2.0/lbaas/availabilityzoneprofiles diff --git a/api-ref/source/v2/examples/availabilityzoneprofile-list-response.json b/api-ref/source/v2/examples/availabilityzoneprofile-list-response.json new file mode 100644 index 0000000000..6541e1b4bd --- /dev/null +++ b/api-ref/source/v2/examples/availabilityzoneprofile-list-response.json @@ -0,0 +1,10 @@ +{ + "availability_zone_profiles": [ + { + "id": "5712097e-0092-45dc-bff0-ab68b61ad51a", + "name": "some_az", + "provider_name": "amphora", + "availability_zone_data": "{\"compute_zone\": \"az1\", \"volume_zone\": \"az2\"}" + } + ] +} diff --git a/api-ref/source/v2/examples/availabilityzoneprofile-show-curl b/api-ref/source/v2/examples/availabilityzoneprofile-show-curl new file mode 100644 index 0000000000..a768324033 --- /dev/null +++ b/api-ref/source/v2/examples/availabilityzoneprofile-show-curl @@ -0,0 +1 @@ +curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2.0/lbaas/availabilityzoneprofiles/5712097e-0092-45dc-bff0-ab68b61ad51a diff --git a/api-ref/source/v2/examples/availabilityzoneprofile-show-response.json b/api-ref/source/v2/examples/availabilityzoneprofile-show-response.json new file mode 100644 index 0000000000..6ca6cd68fd --- /dev/null +++ b/api-ref/source/v2/examples/availabilityzoneprofile-show-response.json @@ -0,0 +1,9 @@ +{ + "availability_zone_profile": + { + "id": "5712097e-0092-45dc-bff0-ab68b61ad51a", + "name": "some_az", + "provider_name": "amphora", + "availability_zone_data": "{\"compute_zone\": \"az1\", \"volume_zone\": \"az1\"}" + } +} diff --git a/api-ref/source/v2/examples/availabilityzoneprofile-update-curl b/api-ref/source/v2/examples/availabilityzoneprofile-update-curl new file mode 100644 index 0000000000..4d39fb8c56 --- /dev/null +++ b/api-ref/source/v2/examples/availabilityzoneprofile-update-curl @@ -0,0 +1 @@ +curl -X PUT -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"availability_zone_profile":{"name":"other_az","provider_name":"amphora","availability_zone_data":"{\"compute_zone\": \"az2\"}"}}' http://198.51.100.10:9876/v2.0/lbaas/availabilityzoneprofiles/5712097e-0092-45dc-bff0-ab68b61ad51a diff --git a/api-ref/source/v2/examples/availabilityzoneprofile-update-request.json b/api-ref/source/v2/examples/availabilityzoneprofile-update-request.json new file mode 100644 index 0000000000..eed7b0a7e0 --- /dev/null +++ b/api-ref/source/v2/examples/availabilityzoneprofile-update-request.json @@ -0,0 +1,8 @@ +{ + "availability_zone_profile": + { + "name": "other_az", + "provider_name": "amphora", + "availability_zone_data": "{\"compute_zone\": \"az2\", \"volume_zone\": \"az2\"}" + } +} diff --git a/api-ref/source/v2/examples/availabilityzoneprofile-update-response.json b/api-ref/source/v2/examples/availabilityzoneprofile-update-response.json new file mode 100644 index 0000000000..e92ebe2afb --- /dev/null +++ b/api-ref/source/v2/examples/availabilityzoneprofile-update-response.json @@ -0,0 +1,9 @@ +{ + "availability_zone_profile": + { + "id": "5712097e-0092-45dc-bff0-ab68b61ad51a", + "name": "other_az", + "provider_name": "amphora", + "availability_zone_data": "{\"compute_zone\": \"az2\", \"volume_zone\": \"az2\"}" + } +} diff --git a/api-ref/source/v2/examples/flavor-create-curl b/api-ref/source/v2/examples/flavor-create-curl new file mode 100644 index 0000000000..841f2b70e7 --- /dev/null +++ b/api-ref/source/v2/examples/flavor-create-curl @@ -0,0 +1 @@ +curl -X POST -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"flavor":{"name":"Basic","description":"A basic standalone Octavia load balancer.","enabled":true,"flavor_profile_id":"5712097e-0092-45dc-bff0-ab68b61ad51a"}}' http://198.51.100.10:9876/v2.0/lbaas/flavors diff --git a/api-ref/source/v2/examples/flavor-create-request.json b/api-ref/source/v2/examples/flavor-create-request.json new file mode 100644 index 0000000000..5dcdd39831 --- /dev/null +++ b/api-ref/source/v2/examples/flavor-create-request.json @@ -0,0 +1,8 @@ +{ + "flavor": { + "name": "Basic", + "description": "A basic standalone Octavia load balancer.", + "enabled": true, + "flavor_profile_id": "5712097e-0092-45dc-bff0-ab68b61ad51a" + } +} diff --git a/api-ref/source/v2/examples/flavor-create-response.json b/api-ref/source/v2/examples/flavor-create-response.json new file mode 100644 index 0000000000..443639b074 --- /dev/null +++ b/api-ref/source/v2/examples/flavor-create-response.json @@ -0,0 +1,9 @@ +{ + "flavor": { + "id": "8f94060c-8d5b-4472-9cfd-e8a2b909481d", + "name": "Basic", + "description": "A basic standalone Octavia load balancer.", + "enabled": true, + "flavor_profile_id": "5712097e-0092-45dc-bff0-ab68b61ad51a" + } +} diff --git a/api-ref/source/v2/examples/flavor-delete-curl b/api-ref/source/v2/examples/flavor-delete-curl new file mode 100644 index 0000000000..a64b486fb5 --- /dev/null +++ b/api-ref/source/v2/examples/flavor-delete-curl @@ -0,0 +1 @@ +curl -X DELETE -H "X-Auth-Token: " http://198.51.100.10:9876/v2.0/lbaas/flavors/8f94060c-8d5b-4472-9cfd-e8a2b909481d diff --git a/api-ref/source/v2/examples/flavor-list-curl b/api-ref/source/v2/examples/flavor-list-curl new file mode 100644 index 0000000000..62702ff643 --- /dev/null +++ b/api-ref/source/v2/examples/flavor-list-curl @@ -0,0 +1 @@ +curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2.0/lbaas/flavors diff --git a/api-ref/source/v2/examples/flavor-profile-list-curl b/api-ref/source/v2/examples/flavor-profile-list-curl new file mode 100644 index 0000000000..cfbe2bec4e --- /dev/null +++ b/api-ref/source/v2/examples/flavor-profile-list-curl @@ -0,0 +1 @@ +curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2.0/lbaas/flavorprofiles diff --git a/api-ref/source/v2/examples/flavor-show-curl b/api-ref/source/v2/examples/flavor-show-curl new file mode 100644 index 0000000000..7b04f1ffa6 --- /dev/null +++ b/api-ref/source/v2/examples/flavor-show-curl @@ -0,0 +1 @@ +curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2.0/lbaas/flavors/8f94060c-8d5b-4472-9cfd-e8a2b909481d diff --git a/api-ref/source/v2/examples/flavor-show-response.json b/api-ref/source/v2/examples/flavor-show-response.json new file mode 100644 index 0000000000..443639b074 --- /dev/null +++ b/api-ref/source/v2/examples/flavor-show-response.json @@ -0,0 +1,9 @@ +{ + "flavor": { + "id": "8f94060c-8d5b-4472-9cfd-e8a2b909481d", + "name": "Basic", + "description": "A basic standalone Octavia load balancer.", + "enabled": true, + "flavor_profile_id": "5712097e-0092-45dc-bff0-ab68b61ad51a" + } +} diff --git a/api-ref/source/v2/examples/flavor-update-curl b/api-ref/source/v2/examples/flavor-update-curl new file mode 100644 index 0000000000..99c583926c --- /dev/null +++ b/api-ref/source/v2/examples/flavor-update-curl @@ -0,0 +1 @@ +curl -X PUT -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"flavor":{"name":"Basic","description":"A basic standalone Octavia load balancer.","enabled":false}}' http://198.51.100.10:9876/v2.0/lbaas/flavors/8f94060c-8d5b-4472-9cfd-e8a2b909481d diff --git a/api-ref/source/v2/examples/flavor-update-request.json b/api-ref/source/v2/examples/flavor-update-request.json new file mode 100644 index 0000000000..000b531f3a --- /dev/null +++ b/api-ref/source/v2/examples/flavor-update-request.json @@ -0,0 +1,7 @@ +{ + "flavor": { + "name": "Basic", + "description": "A basic standalone Octavia load balancer.", + "enabled": false + } +} diff --git a/api-ref/source/v2/examples/flavor-update-response.json b/api-ref/source/v2/examples/flavor-update-response.json new file mode 100644 index 0000000000..8d8fb791c8 --- /dev/null +++ b/api-ref/source/v2/examples/flavor-update-response.json @@ -0,0 +1,9 @@ +{ + "flavor": { + "id": "8f94060c-8d5b-4472-9cfd-e8a2b909481d", + "name": "Basic", + "description": "A basic standalone Octavia load balancer.", + "enabled": false, + "flavor_profile_id": "5712097e-0092-45dc-bff0-ab68b61ad51a" + } +} diff --git a/api-ref/source/v2/examples/flavorprofile-create-curl b/api-ref/source/v2/examples/flavorprofile-create-curl new file mode 100644 index 0000000000..c949289332 --- /dev/null +++ b/api-ref/source/v2/examples/flavorprofile-create-curl @@ -0,0 +1 @@ +curl -X POST -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"flavorprofile":{"name":"amphora-act-stdby","provider_name":"amphora","flavor_data":"{\"loadbalancer_topology\": \"ACTIVE_STANDBY\"}"}}' http://198.51.100.10:9876/v2.0/lbaas/flavorprofiles diff --git a/api-ref/source/v2/examples/flavorprofile-create-request.json b/api-ref/source/v2/examples/flavorprofile-create-request.json new file mode 100644 index 0000000000..7f72850f3a --- /dev/null +++ b/api-ref/source/v2/examples/flavorprofile-create-request.json @@ -0,0 +1,8 @@ +{ + "flavorprofile": + { + "name": "amphora-act-stdby", + "provider_name": "amphora", + "flavor_data": "{\"loadbalancer_topology\": \"ACTIVE_STANDBY\"}" + } +} diff --git a/api-ref/source/v2/examples/flavorprofile-create-response.json b/api-ref/source/v2/examples/flavorprofile-create-response.json new file mode 100644 index 0000000000..a98f72bad4 --- /dev/null +++ b/api-ref/source/v2/examples/flavorprofile-create-response.json @@ -0,0 +1,9 @@ +{ + "flavorprofile": + { + "id": "5712097e-0092-45dc-bff0-ab68b61ad51a", + "name": "amphora-act-stdby", + "provider_name": "amphora", + "flavor_data": "{\"loadbalancer_topology\": \"ACTIVE_STANDBY\"}" + } +} diff --git a/api-ref/source/v2/examples/flavorprofile-delete-curl b/api-ref/source/v2/examples/flavorprofile-delete-curl new file mode 100644 index 0000000000..ca461f4e71 --- /dev/null +++ b/api-ref/source/v2/examples/flavorprofile-delete-curl @@ -0,0 +1 @@ +curl -X DELETE -H "X-Auth-Token: " http://198.51.100.10:9876/v2.0/lbaas/flavorprofiles/5712097e-0092-45dc-bff0-ab68b61ad51a diff --git a/api-ref/source/v2/examples/flavorprofile-show-curl b/api-ref/source/v2/examples/flavorprofile-show-curl new file mode 100644 index 0000000000..db3f8ce27f --- /dev/null +++ b/api-ref/source/v2/examples/flavorprofile-show-curl @@ -0,0 +1 @@ +curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2.0/lbaas/flavorprofiles/5712097e-0092-45dc-bff0-ab68b61ad51a diff --git a/api-ref/source/v2/examples/flavorprofile-show-response.json b/api-ref/source/v2/examples/flavorprofile-show-response.json new file mode 100644 index 0000000000..a98f72bad4 --- /dev/null +++ b/api-ref/source/v2/examples/flavorprofile-show-response.json @@ -0,0 +1,9 @@ +{ + "flavorprofile": + { + "id": "5712097e-0092-45dc-bff0-ab68b61ad51a", + "name": "amphora-act-stdby", + "provider_name": "amphora", + "flavor_data": "{\"loadbalancer_topology\": \"ACTIVE_STANDBY\"}" + } +} diff --git a/api-ref/source/v2/examples/flavorprofile-update-curl b/api-ref/source/v2/examples/flavorprofile-update-curl new file mode 100644 index 0000000000..26d5e348d2 --- /dev/null +++ b/api-ref/source/v2/examples/flavorprofile-update-curl @@ -0,0 +1 @@ +curl -X PUT -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"flavorprofile":{"name":"amphora-standalone","provider_name":"amphora","flavor_data":"{\"loadbalancer_topology\": \"SINGLE\"}"}}' http://198.51.100.10:9876/v2.0/lbaas/flavorprofiles/5712097e-0092-45dc-bff0-ab68b61ad51a diff --git a/api-ref/source/v2/examples/flavorprofile-update-request.json b/api-ref/source/v2/examples/flavorprofile-update-request.json new file mode 100644 index 0000000000..c9b2a62218 --- /dev/null +++ b/api-ref/source/v2/examples/flavorprofile-update-request.json @@ -0,0 +1,8 @@ +{ + "flavorprofile": + { + "name": "amphora-standalone", + "provider_name": "amphora", + "flavor_data": "{\"loadbalancer_topology\": \"SINGLE\"}" + } +} diff --git a/api-ref/source/v2/examples/flavorprofile-update-response.json b/api-ref/source/v2/examples/flavorprofile-update-response.json new file mode 100644 index 0000000000..fa245412b5 --- /dev/null +++ b/api-ref/source/v2/examples/flavorprofile-update-response.json @@ -0,0 +1,9 @@ +{ + "flavorprofile": + { + "id": "5712097e-0092-45dc-bff0-ab68b61ad51a", + "name": "amphora-standalone", + "provider_name": "amphora", + "flavor_data": "{\"loadbalancer_topology\": \"SINGLE\"}" + } +} diff --git a/api-ref/source/v2/examples/flavorprofiles-list-response.json b/api-ref/source/v2/examples/flavorprofiles-list-response.json new file mode 100644 index 0000000000..79caee2e0b --- /dev/null +++ b/api-ref/source/v2/examples/flavorprofiles-list-response.json @@ -0,0 +1,10 @@ +{ + "flavorprofiles": [ + { + "id": "5712097e-0092-45dc-bff0-ab68b61ad51a", + "name": "amphora-act-stdby", + "provider_name": "amphora", + "flavor_data": "{\"loadbalancer_topology\": \"ACTIVE_STANDBY\"}" + } + ] +} diff --git a/api-ref/source/v2/examples/flavors-list-response.json b/api-ref/source/v2/examples/flavors-list-response.json new file mode 100644 index 0000000000..f7a68c6676 --- /dev/null +++ b/api-ref/source/v2/examples/flavors-list-response.json @@ -0,0 +1,11 @@ +{ + "flavors": [ + { + "id": "8f94060c-8d5b-4472-9cfd-e8a2b909481d", + "name": "Basic", + "description": "A basic standalone Octavia load balancer.", + "enabled": true, + "flavor_profile_id": "5712097e-0092-45dc-bff0-ab68b61ad51a" + } + ] +} diff --git a/api-ref/source/v2/examples/healthmonitor-create-curl b/api-ref/source/v2/examples/healthmonitor-create-curl new file mode 100644 index 0000000000..d2003d1e0e --- /dev/null +++ b/api-ref/source/v2/examples/healthmonitor-create-curl @@ -0,0 +1 @@ +curl -X POST -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"healthmonitor":{"name":"super-pool-health-monitor","admin_state_up":true,"pool_id":"4029d267-3983-4224-a3d0-afb3fe16a2cd","delay":"10","expected_codes":"200","max_retries":"1","http_method":"GET","timeout":"5","url_path":"/","type":"HTTP","max_retries_down":3,"tags":["test_tag"],"http_version":1.1,"domain_name":"testlab.com"}}' http://198.51.100.10:9876/v2/lbaas/healthmonitors diff --git a/api-ref/source/v2/examples/healthmonitor-create-request.json b/api-ref/source/v2/examples/healthmonitor-create-request.json new file mode 100644 index 0000000000..9859835e72 --- /dev/null +++ b/api-ref/source/v2/examples/healthmonitor-create-request.json @@ -0,0 +1,18 @@ +{ + "healthmonitor": { + "name": "super-pool-health-monitor", + "admin_state_up": true, + "pool_id": "4029d267-3983-4224-a3d0-afb3fe16a2cd", + "delay": "10", + "expected_codes": "200", + "max_retries": "1", + "http_method": "GET", + "timeout": "5", + "url_path": "/", + "type": "HTTP", + "max_retries_down": 3, + "tags": ["test_tag"], + "http_version": 1.1, + "domain_name": "testlab.com" + } +} diff --git a/api-ref/source/v2/examples/healthmonitor-create-response.json b/api-ref/source/v2/examples/healthmonitor-create-response.json new file mode 100644 index 0000000000..b2d95722b5 --- /dev/null +++ b/api-ref/source/v2/examples/healthmonitor-create-response.json @@ -0,0 +1,28 @@ +{ + "healthmonitor": { + "project_id": "e3cd678b11784734bc366148aa37580e", + "name": "super-pool-health-monitor", + "admin_state_up": true, + "pools": [ + { + "id": "4029d267-3983-4224-a3d0-afb3fe16a2cd" + } + ], + "created_at": "2017-05-11T23:53:47", + "provisioning_status": "ACTIVE", + "updated_at": "2017-05-11T23:53:47", + "delay": 10, + "expected_codes": "200", + "max_retries": 1, + "http_method": "GET", + "timeout": 5, + "max_retries_down": 3, + "url_path": "/", + "type": "HTTP", + "id": "8ed3c5ac-6efa-420c-bedb-99ba14e58db5", + "operating_status": "ONLINE", + "tags": ["test_tag"], + "http_version": 1.1, + "domain_name": "testlab.com" + } +} diff --git a/api-ref/source/v2/examples/healthmonitor-delete-curl b/api-ref/source/v2/examples/healthmonitor-delete-curl new file mode 100644 index 0000000000..84da67e2cc --- /dev/null +++ b/api-ref/source/v2/examples/healthmonitor-delete-curl @@ -0,0 +1 @@ +curl -X DELETE -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/healthmonitors/8ed3c5ac-6efa-420c-bedb-99ba14e58db5 diff --git a/api-ref/source/v2/examples/healthmonitor-list-curl b/api-ref/source/v2/examples/healthmonitor-list-curl new file mode 100644 index 0000000000..0297d5aa9e --- /dev/null +++ b/api-ref/source/v2/examples/healthmonitor-list-curl @@ -0,0 +1 @@ +curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/healthmonitors?project_id=e3cd678b11784734bc366148aa37580e diff --git a/api-ref/source/v2/examples/healthmonitor-show-curl b/api-ref/source/v2/examples/healthmonitor-show-curl new file mode 100644 index 0000000000..86d0029438 --- /dev/null +++ b/api-ref/source/v2/examples/healthmonitor-show-curl @@ -0,0 +1 @@ +curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/healthmonitors/8ed3c5ac-6efa-420c-bedb-99ba14e58db5 diff --git a/api-ref/source/v2/examples/healthmonitor-show-response.json b/api-ref/source/v2/examples/healthmonitor-show-response.json new file mode 100644 index 0000000000..f8c4c40009 --- /dev/null +++ b/api-ref/source/v2/examples/healthmonitor-show-response.json @@ -0,0 +1,28 @@ +{ + "healthmonitor": { + "project_id": "e3cd678b11784734bc366148aa37580e", + "name": "super-pool-health-monitor", + "admin_state_up": true, + "pools": [ + { + "id": "4029d267-3983-4224-a3d0-afb3fe16a2cd" + } + ], + "created_at": "2017-05-11T23:53:47", + "provisioning_status": "ACTIVE", + "updated_at": "2017-05-11T23:53:47", + "delay": 10, + "expected_codes": "200", + "max_retries": 1, + "http_method": "GET", + "timeout": 5, + "max_retries_down": 3, + "url_path": "/", + "type": "HTTP", + "id": "8ed3c5ac-6efa-420c-bedb-99ba14e58db5", + "operating_status": "ONLINE", + "tags": ["test_tag"], + "http_version": 1.0, + "domain_name": null + } +} diff --git a/api-ref/source/v2/examples/healthmonitor-update-curl b/api-ref/source/v2/examples/healthmonitor-update-curl new file mode 100644 index 0000000000..7b9aef3004 --- /dev/null +++ b/api-ref/source/v2/examples/healthmonitor-update-curl @@ -0,0 +1 @@ +curl -X PUT -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"healthmonitor":{"name":"super-pool-health-monitor-updated","admin_state_up":true,"delay":5,"expected_codes":"200","http_method":"HEAD","timeout":2,"url_path":"/index.html","max_retries":2,"max_retries_down":2,"tags":["updated_tag"],"http_version":1.1}}' http://198.51.100.10:9876/v2/lbaas/healthmonitors/8ed3c5ac-6efa-420c-bedb-99ba14e58db5 diff --git a/api-ref/source/v2/examples/healthmonitor-update-request.json b/api-ref/source/v2/examples/healthmonitor-update-request.json new file mode 100644 index 0000000000..73b8d1bed3 --- /dev/null +++ b/api-ref/source/v2/examples/healthmonitor-update-request.json @@ -0,0 +1,15 @@ +{ + "healthmonitor": { + "name": "super-pool-health-monitor-updated", + "admin_state_up": true, + "delay": 5, + "expected_codes": "200", + "http_method": "HEAD", + "timeout": 2, + "url_path": "/index.html", + "max_retries": 2, + "max_retries_down": 2, + "tags": ["updated_tag"], + "http_version": 1.1 + } +} diff --git a/api-ref/source/v2/examples/healthmonitor-update-response.json b/api-ref/source/v2/examples/healthmonitor-update-response.json new file mode 100644 index 0000000000..416eb15dfe --- /dev/null +++ b/api-ref/source/v2/examples/healthmonitor-update-response.json @@ -0,0 +1,28 @@ +{ + "healthmonitor": { + "project_id": "e3cd678b11784734bc366148aa37580e", + "name": "super-pool-health-monitor-updated", + "admin_state_up": true, + "pools": [ + { + "id": "4029d267-3983-4224-a3d0-afb3fe16a2cd" + } + ], + "created_at": "2017-05-11T23:53:47", + "provisioning_status": "PENDING_UPDATE", + "updated_at": "2017-05-11T23:53:47", + "delay": 5, + "expected_codes": "200", + "max_retries": 2, + "http_method": "HEAD", + "timeout": 2, + "max_retries_down": 2, + "url_path": "/index.html", + "type": "HTTP", + "id": "8ed3c5ac-6efa-420c-bedb-99ba14e58db5", + "operating_status": "ONLINE", + "tags": ["updated_tag"], + "http_version": 1.1, + "domain_name": null + } +} diff --git a/api-ref/source/v2/examples/healthmonitors-list-response.json b/api-ref/source/v2/examples/healthmonitors-list-response.json new file mode 100644 index 0000000000..1f045f198e --- /dev/null +++ b/api-ref/source/v2/examples/healthmonitors-list-response.json @@ -0,0 +1,30 @@ +{ + "healthmonitors": [ + { + "project_id": "e3cd678b11784734bc366148aa37580e", + "name": "super-pool-health-monitor", + "admin_state_up": true, + "pools": [ + { + "id": "4029d267-3983-4224-a3d0-afb3fe16a2cd" + } + ], + "created_at": "2017-05-11T23:53:47", + "provisioning_status": "ACTIVE", + "updated_at": "2017-05-11T23:53:47", + "delay": 10, + "expected_codes": "200", + "max_retries": 1, + "http_method": "GET", + "timeout": 5, + "max_retries_down": 3, + "url_path": "/", + "type": "HTTP", + "id": "8ed3c5ac-6efa-420c-bedb-99ba14e58db5", + "operating_status": "ONLINE", + "tags": ["test_tag"], + "http_version": 1.0, + "domain_name": null + } + ] +} diff --git a/api-ref/source/v2/examples/http-header-insertion-obj.json b/api-ref/source/v2/examples/http-header-insertion-obj.json new file mode 100644 index 0000000000..f59d4e6252 --- /dev/null +++ b/api-ref/source/v2/examples/http-header-insertion-obj.json @@ -0,0 +1,6 @@ +{ + "insert_headers": { + "X-Forwarded-For": "true", + "X-Forwarded-Port": "true" + } +} diff --git a/api-ref/source/v2/examples/l7policies-list-curl b/api-ref/source/v2/examples/l7policies-list-curl new file mode 100644 index 0000000000..4bf1dbde7f --- /dev/null +++ b/api-ref/source/v2/examples/l7policies-list-curl @@ -0,0 +1 @@ +curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/l7policies?project_id=e3cd678b11784734bc366148aa37580e diff --git a/api-ref/source/v2/examples/l7policies-list-response.json b/api-ref/source/v2/examples/l7policies-list-response.json new file mode 100644 index 0000000000..0a12e2bac4 --- /dev/null +++ b/api-ref/source/v2/examples/l7policies-list-response.json @@ -0,0 +1,28 @@ +{ + "l7policies": [ + { + "listener_id": "023f2e34-7806-443b-bfae-16c324569a3d", + "description": "Redirect requests to example.com", + "admin_state_up": true, + "rules": [ + { + "id": "efd6a3f8-73bf-47f0-8ae6-503ebda57372" + } + ], + "created_at": "2017-06-24T23:25:14", + "provisioning_status": "ACTIVE", + "updated_at": "2017-06-24T23:30:05", + "redirect_http_code": 302, + "redirect_pool_id": null, + "redirect_prefix": null, + "redirect_url": "http://www.example.com/", + "action": "REDIRECT_TO_URL", + "position": 1, + "project_id": "e3cd678b11784734bc366148aa37580e", + "id": "8a1412f0-4c32-4257-8b07-af4770b604fd", + "operating_status": "ONLINE", + "name": "redirect-example.com", + "tags": ["test_tag"] + } + ] +} diff --git a/api-ref/source/v2/examples/l7policy-create-curl b/api-ref/source/v2/examples/l7policy-create-curl new file mode 100644 index 0000000000..95c74fd773 --- /dev/null +++ b/api-ref/source/v2/examples/l7policy-create-curl @@ -0,0 +1 @@ +curl -X POST -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"l7policy":{"description":"Redirect requests to example.com","admin_state_up":true,"listener_id":"023f2e34-7806-443b-bfae-16c324569a3d","redirect_http_code":301,"redirect_url":"http://www.example.com/","name":"redirect-example.com","action":"REDIRECT_TO_URL","position":1,"tags":["test_tag"]}}' http://198.51.100.10:9876/v2/lbaas/l7policies diff --git a/api-ref/source/v2/examples/l7policy-create-request.json b/api-ref/source/v2/examples/l7policy-create-request.json new file mode 100644 index 0000000000..0d28471d8b --- /dev/null +++ b/api-ref/source/v2/examples/l7policy-create-request.json @@ -0,0 +1,13 @@ +{ + "l7policy": { + "description": "Redirect requests to example.com", + "admin_state_up": true, + "listener_id": "023f2e34-7806-443b-bfae-16c324569a3d", + "redirect_url": "http://www.example.com/", + "redirect_http_code": 301, + "name": "redirect-example.com", + "action": "REDIRECT_TO_URL", + "position": 1, + "tags": ["test_tag"] + } +} diff --git a/api-ref/source/v2/examples/l7policy-create-response.json b/api-ref/source/v2/examples/l7policy-create-response.json new file mode 100644 index 0000000000..8f3f429f44 --- /dev/null +++ b/api-ref/source/v2/examples/l7policy-create-response.json @@ -0,0 +1,28 @@ +{ + "l7policy": [ + { + "listener_id": "023f2e34-7806-443b-bfae-16c324569a3d", + "description": "Redirect requests to example.com", + "admin_state_up": true, + "rules": [ + { + "id": "efd6a3f8-73bf-47f0-8ae6-503ebda57372" + } + ], + "created_at": "2017-06-24T23:25:14", + "provisioning_status": "PENDING_CREATE", + "updated_at": "2017-06-24T23:30:05", + "redirect_http_code": 301, + "redirect_pool_id": null, + "redirect_prefix": null, + "redirect_url": "http://www.example.com/", + "action": "REDIRECT_TO_URL", + "position": 1, + "project_id": "e3cd678b11784734bc366148aa37580e", + "id": "8a1412f0-4c32-4257-8b07-af4770b604fd", + "operating_status": "OFFLINE", + "name": "redirect-example.com", + "tags": ["test_tag"] + } + ] +} diff --git a/api-ref/source/v2/examples/l7policy-delete-curl b/api-ref/source/v2/examples/l7policy-delete-curl new file mode 100644 index 0000000000..7392468704 --- /dev/null +++ b/api-ref/source/v2/examples/l7policy-delete-curl @@ -0,0 +1 @@ +curl -X DELETE -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/l7policies/8a1412f0-4c32-4257-8b07-af4770b604fd diff --git a/api-ref/source/v2/examples/l7policy-show-curl b/api-ref/source/v2/examples/l7policy-show-curl new file mode 100644 index 0000000000..5335d62abe --- /dev/null +++ b/api-ref/source/v2/examples/l7policy-show-curl @@ -0,0 +1 @@ +curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/l7policies/8a1412f0-4c32-4257-8b07-af4770b604fd diff --git a/api-ref/source/v2/examples/l7policy-show-response.json b/api-ref/source/v2/examples/l7policy-show-response.json new file mode 100644 index 0000000000..c6cebb4c42 --- /dev/null +++ b/api-ref/source/v2/examples/l7policy-show-response.json @@ -0,0 +1,27 @@ +{ + "l7policy": + { + "listener_id": "023f2e34-7806-443b-bfae-16c324569a3d", + "description": "Redirect requests to example.com", + "admin_state_up": true, + "rules": [ + { + "id": "efd6a3f8-73bf-47f0-8ae6-503ebda57372" + } + ], + "created_at": "2017-06-24T23:25:14", + "provisioning_status": "ACTIVE", + "updated_at": "2017-06-24T23:30:05", + "redirect_http_code": 302, + "redirect_pool_id": null, + "redirect_prefix": null, + "redirect_url": "http://www.example.com/", + "action": "REDIRECT_TO_URL", + "position": 1, + "project_id": "e3cd678b11784734bc366148aa37580e", + "id": "8a1412f0-4c32-4257-8b07-af4770b604fd", + "operating_status": "ONLINE", + "name": "redirect-example.com", + "tags": ["test_tag"] + } +} diff --git a/api-ref/source/v2/examples/l7policy-update-curl b/api-ref/source/v2/examples/l7policy-update-curl new file mode 100644 index 0000000000..528ef8a4d3 --- /dev/null +++ b/api-ref/source/v2/examples/l7policy-update-curl @@ -0,0 +1 @@ +curl -X PUT -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"l7policy":{"description":"Redirect requests to images.example.com","admin_state_up":true,"redirect_http_code":301,"redirect_url":"http://images.example.com/","name":"redirect-images.example.com","action":"REDIRECT_TO_URL","position":1,"tags":["updated_tag"]}}' http://198.51.100.10:9876/v2/lbaas/l7policies/8a1412f0-4c32-4257-8b07-af4770b604fd diff --git a/api-ref/source/v2/examples/l7policy-update-request.json b/api-ref/source/v2/examples/l7policy-update-request.json new file mode 100644 index 0000000000..e831358372 --- /dev/null +++ b/api-ref/source/v2/examples/l7policy-update-request.json @@ -0,0 +1,12 @@ +{ + "l7policy": { + "description": "Redirect requests to images.example.com", + "admin_state_up": true, + "redirect_http_code": 301, + "redirect_url": "http://images.example.com/", + "name": "redirect-images.example.com", + "action": "REDIRECT_TO_URL", + "position": 1, + "tags": ["updated_tag"] + } +} diff --git a/api-ref/source/v2/examples/l7policy-update-response.json b/api-ref/source/v2/examples/l7policy-update-response.json new file mode 100644 index 0000000000..3451aae937 --- /dev/null +++ b/api-ref/source/v2/examples/l7policy-update-response.json @@ -0,0 +1,27 @@ +{ + "l7policy": + { + "listener_id": "023f2e34-7806-443b-bfae-16c324569a3d", + "description": "Redirect requests to example.com", + "admin_state_up": true, + "rules": [ + { + "id": "efd6a3f8-73bf-47f0-8ae6-503ebda57372" + } + ], + "created_at": "2017-06-24T23:25:14", + "provisioning_status": "PENDING_UPDATE", + "updated_at": "2017-06-24T23:30:05", + "redirect_http_code": 301, + "redirect_pool_id": null, + "redirect_prefix": null, + "redirect_url": "http://www.example.com/", + "action": "REDIRECT_TO_URL", + "position": 1, + "project_id": "e3cd678b11784734bc366148aa37580e", + "id": "8a1412f0-4c32-4257-8b07-af4770b604fd", + "operating_status": "ONLINE", + "name": "redirect-example.com", + "tags": ["updated_tag"] + } +} diff --git a/api-ref/source/v2/examples/l7rule-create-curl b/api-ref/source/v2/examples/l7rule-create-curl new file mode 100644 index 0000000000..e9a62d3038 --- /dev/null +++ b/api-ref/source/v2/examples/l7rule-create-curl @@ -0,0 +1 @@ +curl -X POST -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"rule":{"compare_type":"REGEX","invert":false,"type":"PATH","value":"/images*","admin_state_up":true,"tags":["test_tag"]}}' http://198.51.100.10:9876/v2/lbaas/l7policies/8a1412f0-4c32-4257-8b07-af4770b604fd/rules diff --git a/api-ref/source/v2/examples/l7rule-create-request.json b/api-ref/source/v2/examples/l7rule-create-request.json new file mode 100644 index 0000000000..8aa8555035 --- /dev/null +++ b/api-ref/source/v2/examples/l7rule-create-request.json @@ -0,0 +1,10 @@ +{ + "rule": { + "compare_type": "REGEX", + "invert": false, + "type": "PATH", + "value": "/images*", + "admin_state_up": true, + "tags": ["test_tag"] + } +} diff --git a/api-ref/source/v2/examples/l7rule-create-response.json b/api-ref/source/v2/examples/l7rule-create-response.json new file mode 100644 index 0000000000..847b1ee05c --- /dev/null +++ b/api-ref/source/v2/examples/l7rule-create-response.json @@ -0,0 +1,18 @@ +{ + "rule": + { + "created_at": "2017-06-27T15:52:27", + "compare_type": "REGEX", + "provisioning_status": "PENDING_CREATE", + "invert": false, + "admin_state_up": true, + "updated_at": "2017-06-27T15:52:28", + "value": "/images*", + "key": null, + "project_id": "e3cd678b11784734bc366148aa37580e", + "type": "PATH", + "id": "16621dbb-a736-4888-a57a-3ecd53df784c", + "operating_status": "OFFLINE", + "tags": ["test_tag"] + } +} diff --git a/api-ref/source/v2/examples/l7rule-delete-curl b/api-ref/source/v2/examples/l7rule-delete-curl new file mode 100644 index 0000000000..33e5c052b2 --- /dev/null +++ b/api-ref/source/v2/examples/l7rule-delete-curl @@ -0,0 +1 @@ +curl -X DELETE -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/l7policies/8a1412f0-4c32-4257-8b07-af4770b604fd/rules/16621dbb-a736-4888-a57a-3ecd53df784c diff --git a/api-ref/source/v2/examples/l7rule-show-curl b/api-ref/source/v2/examples/l7rule-show-curl new file mode 100644 index 0000000000..44dea9d8a8 --- /dev/null +++ b/api-ref/source/v2/examples/l7rule-show-curl @@ -0,0 +1 @@ +curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/l7policies/8a1412f0-4c32-4257-8b07-af4770b604fd/rules/16621dbb-a736-4888-a57a-3ecd53df784c diff --git a/api-ref/source/v2/examples/l7rule-show-response.json b/api-ref/source/v2/examples/l7rule-show-response.json new file mode 100644 index 0000000000..803b1cc5f3 --- /dev/null +++ b/api-ref/source/v2/examples/l7rule-show-response.json @@ -0,0 +1,18 @@ +{ + "rule": + { + "created_at": "2017-06-27T15:52:27", + "compare_type": "REGEX", + "provisioning_status": "ACTIVE", + "invert": false, + "admin_state_up": true, + "updated_at": "2017-06-27T15:52:28", + "value": "/images*", + "key": null, + "project_id": "e3cd678b11784734bc366148aa37580e", + "type": "PATH", + "id": "16621dbb-a736-4888-a57a-3ecd53df784c", + "operating_status": "ONLINE", + "tags": ["test_tag"] + } +} diff --git a/api-ref/source/v2/examples/l7rule-update-curl b/api-ref/source/v2/examples/l7rule-update-curl new file mode 100644 index 0000000000..1eead8c509 --- /dev/null +++ b/api-ref/source/v2/examples/l7rule-update-curl @@ -0,0 +1 @@ +curl -X PUT -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"rule":{"compare_type":"REGEX","invert":true,"type":"PATH","value":"/images/special*","admin_state_up":true,"tags":["updated_tag"]}}' http://198.51.100.10:9876/v2/lbaas/l7policies/8a1412f0-4c32-4257-8b07-af4770b604fd/rules/16621dbb-a736-4888-a57a-3ecd53df784c diff --git a/api-ref/source/v2/examples/l7rule-update-request.json b/api-ref/source/v2/examples/l7rule-update-request.json new file mode 100644 index 0000000000..db3e821dff --- /dev/null +++ b/api-ref/source/v2/examples/l7rule-update-request.json @@ -0,0 +1,10 @@ +{ + "rule": { + "compare_type": "REGEX", + "invert": true, + "type": "PATH", + "value": "/images/special*", + "admin_state_up": true, + "tags": ["updated_tag"] + } +} diff --git a/api-ref/source/v2/examples/l7rule-update-response.json b/api-ref/source/v2/examples/l7rule-update-response.json new file mode 100644 index 0000000000..dd4caf8e75 --- /dev/null +++ b/api-ref/source/v2/examples/l7rule-update-response.json @@ -0,0 +1,18 @@ +{ + "rule": + { + "created_at": "2017-06-27T15:52:27", + "compare_type": "REGEX", + "provisioning_status": "PENDING_UPDATE", + "invert": true, + "admin_state_up": true, + "updated_at": "2017-06-27T15:58:28", + "value": "/images/special*", + "key": null, + "project_id": "e3cd678b11784734bc366148aa37580e", + "type": "PATH", + "id": "16621dbb-a736-4888-a57a-3ecd53df784c", + "operating_status": "ONLINE", + "tags": ["updated_tag"] + } +} diff --git a/api-ref/source/v2/examples/l7rules-list-curl b/api-ref/source/v2/examples/l7rules-list-curl new file mode 100644 index 0000000000..e33ffd0f3f --- /dev/null +++ b/api-ref/source/v2/examples/l7rules-list-curl @@ -0,0 +1 @@ +curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/l7policies/8a1412f0-4c32-4257-8b07-af4770b604fd/rules diff --git a/api-ref/source/v2/examples/l7rules-list-response.json b/api-ref/source/v2/examples/l7rules-list-response.json new file mode 100644 index 0000000000..8a94adf4ff --- /dev/null +++ b/api-ref/source/v2/examples/l7rules-list-response.json @@ -0,0 +1,19 @@ +{ + "rules": [ + { + "created_at": "2017-06-27T15:52:27", + "compare_type": "REGEX", + "provisioning_status": "ACTIVE", + "invert": false, + "admin_state_up": true, + "updated_at": "2017-06-27T15:52:28", + "value": "/images*", + "key": null, + "project_id": "e3cd678b11784734bc366148aa37580e", + "type": "PATH", + "id": "16621dbb-a736-4888-a57a-3ecd53df784c", + "operating_status": "ONLINE", + "tags": ["test_tag"] + } + ] +} diff --git a/api-ref/source/v2/examples/listener-create-curl b/api-ref/source/v2/examples/listener-create-curl new file mode 100644 index 0000000000..aa74e8e7ba --- /dev/null +++ b/api-ref/source/v2/examples/listener-create-curl @@ -0,0 +1 @@ +curl -X POST -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"listener": {"protocol": "TERMINATED_HTTPS", "description": "A great TLS listener", "admin_state_up": true, "connection_limit": 200, "protocol_port": "443", "loadbalancer_id": "607226db-27ef-4d41-ae89-f2a800e9c2db", "name": "great_tls_listener", "insert_headers": {"X-Forwarded-For": "true", "X-Forwarded-Port": "true"}, "default_tls_container_ref": "http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", "sni_container_refs": ["http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", "http://198.51.100.10:9311/v1/containers/aaebb31e-7761-4826-8cb4-2b829caca3ee"], "timeout_client_data": 50000, "timeout_member_connect": 5000, "timeout_member_data": 50000, "timeout_tcp_inspect": 0, "tags": ["test_tag"], "client_ca_tls_container_ref": "http://198.51.100.10:9311/v1/containers/35649991-49f3-4625-81ce-2465fe8932e5", "client_authentication": "MANDATORY", "client_crl_container_ref": "http://198.51.100.10:9311/v1/containers/e222b065-b93b-4e2a-9a02-804b7a118c3c", "allowed_cidrs": ["192.0.2.0/24", "198.51.100.0/24"], "tls_ciphers": "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256", "tls_versions": ["TLSv1.2", "TLSv1.3"], "alpn_protocols": ["http/1.1", "http/1.0"], "hsts_include_subdomains": true, "hsts_max_age": 31536000, "hsts_preload": true}}' http://198.51.100.10:9876/v2/lbaas/listeners diff --git a/api-ref/source/v2/examples/listener-create-request.json b/api-ref/source/v2/examples/listener-create-request.json new file mode 100644 index 0000000000..dda54ebff1 --- /dev/null +++ b/api-ref/source/v2/examples/listener-create-request.json @@ -0,0 +1,38 @@ +{ + "listener": { + "protocol": "TERMINATED_HTTPS", + "description": "A great TLS listener", + "admin_state_up": true, + "connection_limit": 200, + "protocol_port": "443", + "loadbalancer_id": "607226db-27ef-4d41-ae89-f2a800e9c2db", + "name": "great_tls_listener", + "insert_headers": { + "X-Forwarded-For": "true", + "X-Forwarded-Port": "true" + }, + "default_tls_container_ref": "http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", + "sni_container_refs": [ + "http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", + "http://198.51.100.10:9311/v1/containers/aaebb31e-7761-4826-8cb4-2b829caca3ee" + ], + "timeout_client_data": 50000, + "timeout_member_connect": 5000, + "timeout_member_data": 50000, + "timeout_tcp_inspect": 0, + "tags": ["test_tag"], + "client_ca_tls_container_ref": "http://198.51.100.10:9311/v1/containers/35649991-49f3-4625-81ce-2465fe8932e5", + "client_authentication": "MANDATORY", + "client_crl_container_ref": "http://198.51.100.10:9311/v1/containers/e222b065-b93b-4e2a-9a02-804b7a118c3c", + "allowed_cidrs": [ + "192.0.2.0/24", + "198.51.100.0/24" + ], + "tls_ciphers": "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256", + "tls_versions": ["TLSv1.2", "TLSv1.3"], + "alpn_protocols": ["http/1.1", "http/1.0"], + "hsts_include_subdomains": true, + "hsts_max_age": 31536000, + "hsts_preload": true + } +} diff --git a/api-ref/source/v2/examples/listener-create-response.json b/api-ref/source/v2/examples/listener-create-response.json new file mode 100644 index 0000000000..b24dc8a636 --- /dev/null +++ b/api-ref/source/v2/examples/listener-create-response.json @@ -0,0 +1,59 @@ +{ + "listener": { + "description": "A great TLS listener", + "admin_state_up": true, + "project_id": "e3cd678b11784734bc366148aa37580e", + "protocol": "TERMINATED_HTTPS", + "protocol_port": 443, + "provisioning_status": "PENDING_CREATE", + "default_tls_container_ref": "http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", + "loadbalancers": [ + { + "id": "607226db-27ef-4d41-ae89-f2a800e9c2db" + } + ], + "insert_headers": { + "X-Forwarded-Port": "true", + "X-Forwarded-For": "true" + }, + "created_at": "2017-02-28T00:42:44", + "updated_at": "2017-02-28T00:44:30", + "id": "023f2e34-7806-443b-bfae-16c324569a3d", + "operating_status": "OFFLINE", + "default_pool_id": "ddb2b28f-89e9-45d3-a329-a359c3e39e4a", + "sni_container_refs": [ + "http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", + "http://198.51.100.10:9311/v1/containers/aaebb31e-7761-4826-8cb4-2b829caca3ee" + ], + "l7policies": [ + { + "id": "5e618272-339d-4a80-8d14-dbc093091bb1" + } + ], + "name": "great_tls_listener", + "timeout_client_data": 50000, + "timeout_member_connect": 5000, + "timeout_member_data": 50000, + "timeout_tcp_inspect": 0, + "tags": ["test_tag"], + "client_ca_tls_container_ref": "http://198.51.100.10:9311/v1/containers/35649991-49f3-4625-81ce-2465fe8932e5", + "client_authentication": "MANDATORY", + "client_crl_container_ref": "http://198.51.100.10:9311/v1/containers/e222b065-b93b-4e2a-9a02-804b7a118c3c", + "allowed_cidrs": [ + "192.0.2.0/24", + "198.51.100.0/24" + ], + "tls_ciphers": "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256", + "tls_versions": [ + "TLSv1.2", + "TLSv1.3" + ], + "alpn_protocols": [ + "http/1.1", + "http/1.0" + ], + "hsts_include_subdomains": true, + "hsts_max_age": 31536000, + "hsts_preload": true + } +} diff --git a/api-ref/source/v2/examples/listener-delete-curl b/api-ref/source/v2/examples/listener-delete-curl new file mode 100644 index 0000000000..02e5991bcb --- /dev/null +++ b/api-ref/source/v2/examples/listener-delete-curl @@ -0,0 +1 @@ +curl -X DELETE -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/listeners/023f2e34-7806-443b-bfae-16c324569a3d diff --git a/api-ref/source/v2/examples/listener-show-curl b/api-ref/source/v2/examples/listener-show-curl new file mode 100644 index 0000000000..ee85b4c4fd --- /dev/null +++ b/api-ref/source/v2/examples/listener-show-curl @@ -0,0 +1 @@ +curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/listeners/023f2e34-7806-443b-bfae-16c324569a3d diff --git a/api-ref/source/v2/examples/listener-show-response.json b/api-ref/source/v2/examples/listener-show-response.json new file mode 100644 index 0000000000..0168d2e165 --- /dev/null +++ b/api-ref/source/v2/examples/listener-show-response.json @@ -0,0 +1,59 @@ +{ + "listener": { + "description": "A great TLS listener", + "admin_state_up": true, + "project_id": "e3cd678b11784734bc366148aa37580e", + "protocol": "TERMINATED_HTTPS", + "protocol_port": 443, + "provisioning_status": "ACTIVE", + "default_tls_container_ref": "http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", + "loadbalancers": [ + { + "id": "607226db-27ef-4d41-ae89-f2a800e9c2db" + } + ], + "insert_headers": { + "X-Forwarded-Port": "true", + "X-Forwarded-For": "true" + }, + "created_at": "2017-02-28T00:42:44", + "updated_at": "2017-02-28T00:44:30", + "id": "023f2e34-7806-443b-bfae-16c324569a3d", + "operating_status": "ONLINE", + "default_pool_id": "ddb2b28f-89e9-45d3-a329-a359c3e39e4a", + "sni_container_refs": [ + "http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", + "http://198.51.100.10:9311/v1/containers/aaebb31e-7761-4826-8cb4-2b829caca3ee" + ], + "l7policies": [ + { + "id": "5e618272-339d-4a80-8d14-dbc093091bb1" + } + ], + "name": "great_tls_listener", + "timeout_client_data": 50000, + "timeout_member_connect": 5000, + "timeout_member_data": 50000, + "timeout_tcp_inspect": 0, + "tags": ["test_tag"], + "client_ca_tls_container_ref": "http://198.51.100.10:9311/v1/containers/35649991-49f3-4625-81ce-2465fe8932e5", + "client_authentication": "MANDATORY", + "client_crl_container_ref": "http://198.51.100.10:9311/v1/containers/e222b065-b93b-4e2a-9a02-804b7a118c3c", + "allowed_cidrs": [ + "192.0.2.0/24", + "198.51.100.0/24" + ], + "tls_ciphers": "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256", + "tls_versions": [ + "TLSv1.2", + "TLSv1.3" + ], + "alpn_protocols": [ + "http/1.1", + "http/1.0" + ], + "hsts_include_subdomains": true, + "hsts_max_age": 31536000, + "hsts_preload": true + } +} diff --git a/api-ref/source/v2/examples/listener-stats-curl b/api-ref/source/v2/examples/listener-stats-curl new file mode 100644 index 0000000000..b4eb2c147d --- /dev/null +++ b/api-ref/source/v2/examples/listener-stats-curl @@ -0,0 +1 @@ +curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/listeners/023f2e34-7806-443b-bfae-16c324569a3d/stats diff --git a/api-ref/source/v2/examples/listener-stats-response.json b/api-ref/source/v2/examples/listener-stats-response.json new file mode 100644 index 0000000000..da25929b52 --- /dev/null +++ b/api-ref/source/v2/examples/listener-stats-response.json @@ -0,0 +1,9 @@ +{ + "stats": { + "bytes_in": 65671420, + "total_connections": 26189172, + "active_connections": 48629, + "bytes_out": 774771186, + "request_errors": 0 + } +} diff --git a/api-ref/source/v2/examples/listener-update-curl b/api-ref/source/v2/examples/listener-update-curl new file mode 100644 index 0000000000..01d0d98aeb --- /dev/null +++ b/api-ref/source/v2/examples/listener-update-curl @@ -0,0 +1 @@ +curl -X PUT -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"listener": {"description": "An updated great TLS listener", "admin_state_up": true, "connection_limit": 200, "name": "great_updated_tls_listener", "insert_headers": {"X-Forwarded-For": "false", "X-Forwarded-Port": "true"}, "default_tls_container_ref": "http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", "sni_container_refs": ["http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", "http://198.51.100.10:9311/v1/containers/aaebb31e-7761-4826-8cb4-2b829caca3ee"], "timeout_client_data": 100000, "timeout_member_connect": 1000, "timeout_member_data": 100000, "timeout_tcp_inspect": 5, "tags": ["updated_tag"], "client_ca_tls_container_ref": null, "allowed_cidrs": ["192.0.2.0/24", "198.51.100.0/24"], "tls_ciphers": "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256", "tls_versions": ["TLSv1.2", "TLSv1.3"], "alpn_protocols": ["http/1.1", "http/1.0"], "hsts_include_subdomains": true, "hsts_max_age": 31536000, "hsts_preload": true}}' http://198.51.100.10:9876/v2/lbaas/listeners/023f2e34-7806-443b-bfae-16c324569a3d diff --git a/api-ref/source/v2/examples/listener-update-request.json b/api-ref/source/v2/examples/listener-update-request.json new file mode 100644 index 0000000000..435e9826a6 --- /dev/null +++ b/api-ref/source/v2/examples/listener-update-request.json @@ -0,0 +1,42 @@ +{ + "listener": { + "description": "An updated great TLS listener", + "admin_state_up": true, + "connection_limit": 200, + "name": "great_updated_tls_listener", + "default_pool_id": "ddb2b28f-89e9-45d3-a329-a359c3e39e4a", + "insert_headers": { + "X-Forwarded-For": "false", + "X-Forwarded-Port": "true" + }, + "default_tls_container_ref": "http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", + "sni_container_refs": [ + "http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", + "http://198.51.100.10:9311/v1/containers/aaebb31e-7761-4826-8cb4-2b829caca3ee" + ], + "timeout_client_data": 100000, + "timeout_member_connect": 1000, + "timeout_member_data": 100000, + "timeout_tcp_inspect": 5, + "tags": [ + "updated_tag" + ], + "client_ca_tls_container_ref": null, + "allowed_cidrs": [ + "192.0.2.0/24", + "198.51.100.0/24" + ], + "tls_ciphers": "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256", + "tls_versions": [ + "TLSv1.2", + "TLSv1.3" + ], + "alpn_protocols": [ + "http/1.1", + "http/1.0" + ], + "hsts_include_subdomains": true, + "hsts_max_age": 31536000, + "hsts_preload": true + } +} diff --git a/api-ref/source/v2/examples/listener-update-response.json b/api-ref/source/v2/examples/listener-update-response.json new file mode 100644 index 0000000000..bd231f8fd0 --- /dev/null +++ b/api-ref/source/v2/examples/listener-update-response.json @@ -0,0 +1,59 @@ +{ + "listener": { + "description": "An updated great TLS listener", + "admin_state_up": true, + "project_id": "e3cd678b11784734bc366148aa37580e", + "protocol": "TERMINATED_HTTPS", + "protocol_port": 443, + "provisioning_status": "PENDING_UPDATE", + "default_tls_container_ref": "http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", + "loadbalancers": [ + { + "id": "607226db-27ef-4d41-ae89-f2a800e9c2db" + } + ], + "insert_headers": { + "X-Forwarded-Port": "true", + "X-Forwarded-For": "false" + }, + "created_at": "2017-02-28T00:42:44", + "updated_at": "2017-02-28T00:44:30", + "id": "023f2e34-7806-443b-bfae-16c324569a3d", + "operating_status": "OFFLINE", + "default_pool_id": "ddb2b28f-89e9-45d3-a329-a359c3e39e4a", + "sni_container_refs": [ + "http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", + "http://198.51.100.10:9311/v1/containers/aaebb31e-7761-4826-8cb4-2b829caca3ee" + ], + "l7policies": [ + { + "id": "5e618272-339d-4a80-8d14-dbc093091bb1" + } + ], + "name": "great_updated_tls_listener", + "timeout_client_data": 100000, + "timeout_member_connect": 1000, + "timeout_member_data": 100000, + "timeout_tcp_inspect": 5, + "tags": ["updated_tag"], + "client_ca_tls_container_ref": null, + "client_authentication": "NONE", + "client_crl_container_ref": null, + "allowed_cidrs": [ + "192.0.2.0/24", + "198.51.100.0/24" + ], + "tls_ciphers": "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256", + "tls_versions": [ + "TLSv1.2", + "TLSv1.3" + ], + "alpn_protocols": [ + "http/1.1", + "http/1.0" + ], + "hsts_include_subdomains": true, + "hsts_max_age": 31536000, + "hsts_preload": true + } +} diff --git a/api-ref/source/v2/examples/listeners-list-curl b/api-ref/source/v2/examples/listeners-list-curl new file mode 100644 index 0000000000..e29c057e49 --- /dev/null +++ b/api-ref/source/v2/examples/listeners-list-curl @@ -0,0 +1 @@ +curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/listeners?project_id=e3cd678b11784734bc366148aa37580e diff --git a/api-ref/source/v2/examples/listeners-list-response.json b/api-ref/source/v2/examples/listeners-list-response.json new file mode 100644 index 0000000000..51d3689e19 --- /dev/null +++ b/api-ref/source/v2/examples/listeners-list-response.json @@ -0,0 +1,64 @@ +{ + "listeners": [ + { + "description": "A great TLS listener", + "admin_state_up": true, + "project_id": "e3cd678b11784734bc366148aa37580e", + "protocol": "TERMINATED_HTTPS", + "protocol_port": 443, + "provisioning_status": "ACTIVE", + "default_tls_container_ref": "http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", + "loadbalancers": [ + { + "id": "607226db-27ef-4d41-ae89-f2a800e9c2db" + } + ], + "insert_headers": { + "X-Forwarded-Port": "true", + "X-Forwarded-For": "true" + }, + "created_at": "2017-02-28T00:42:44", + "updated_at": "2017-02-28T00:44:30", + "id": "023f2e34-7806-443b-bfae-16c324569a3d", + "operating_status": "ONLINE", + "default_pool_id": "ddb2b28f-89e9-45d3-a329-a359c3e39e4a", + "sni_container_refs": [ + "http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", + "http://198.51.100.10:9311/v1/containers/aaebb31e-7761-4826-8cb4-2b829caca3ee" + ], + "l7policies": [ + { + "id": "58284ac9-673e-47ff-9dcb-09871a1956c4", + "id": "5e618272-339d-4a80-8d14-dbc093091bb1" + } + ], + "name": "great_tls_listener", + "timeout_client_data": 50000, + "timeout_member_connect": 5000, + "timeout_member_data": 50000, + "timeout_tcp_inspect": 0, + "tags": [ + "test_tag" + ], + "client_ca_tls_container_ref": "http://198.51.100.10:9311/v1/containers/35649991-49f3-4625-81ce-2465fe8932e5", + "client_authentication": "NONE", + "client_crl_container_ref": "http://198.51.100.10:9311/v1/containers/e222b065-b93b-4e2a-9a02-804b7a118c3c", + "allowed_cidrs": [ + "192.0.2.0/24", + "198.51.100.0/24" + ], + "tls_ciphers": "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256", + "tls_versions": [ + "TLSv1.2", + "TLSv1.3" + ], + "alpn_protocols": [ + "http/1.1", + "http/1.0" + ], + "hsts_include_subdomains": true, + "hsts_max_age": 31536000, + "hsts_preload": true + } + ] +} diff --git a/api-ref/source/v2/examples/loadbalancer-create-curl b/api-ref/source/v2/examples/loadbalancer-create-curl new file mode 100644 index 0000000000..12bc2d5a35 --- /dev/null +++ b/api-ref/source/v2/examples/loadbalancer-create-curl @@ -0,0 +1 @@ +curl -X POST -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"loadbalancer": {"description": "My favorite load balancer", "admin_state_up": true, "project_id": "e3cd678b11784734bc366148aa37580e", "flavor_id": "a7ae5d5a-d855-4f9a-b187-af66b53f4d04", "vip_subnet_id": "d4af86e1-0051-488c-b7a0-527f97490c9a", "vip_address": "203.0.113.50", "additional_vips": [{"subnet_id": "3ca40b2e-c286-4e53-bdb9-dd01c8a0ad6d", "ip_address": "2001:db8::b33f"}, {"subnet_id": "44d92b92-510f-4c05-8058-bf5a17b4d41c"}], "provider": "octavia", "name": "best_load_balancer", "vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3", "availability_zone": "my_az", "tags": ["test_tag"]}}' http://198.51.100.10:9876/v2/lbaas/loadbalancers diff --git a/api-ref/source/v2/examples/loadbalancer-create-request.json b/api-ref/source/v2/examples/loadbalancer-create-request.json new file mode 100644 index 0000000000..51a8c07ffa --- /dev/null +++ b/api-ref/source/v2/examples/loadbalancer-create-request.json @@ -0,0 +1,18 @@ +{ + "loadbalancer": { + "description": "My favorite load balancer", + "admin_state_up": true, + "project_id": "e3cd678b11784734bc366148aa37580e", + "vip_subnet_id": "d4af86e1-0051-488c-b7a0-527f97490c9a", + "vip_address": "203.0.113.50", + "additional_vips": [ + {"subnet_id": "3ca40b2e-c286-4e53-bdb9-dd01c8a0ad6d", "ip_address": "2001:db8::b33f"}, + {"subnet_id": "44d92b92-510f-4c05-8058-bf5a17b4d41c"} + ], + "provider": "octavia", + "name": "best_load_balancer", + "vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3", + "availability_zone": "my_az", + "tags": ["test_tag"] + } +} diff --git a/api-ref/source/v2/examples/loadbalancer-create-response.json b/api-ref/source/v2/examples/loadbalancer-create-response.json new file mode 100644 index 0000000000..7f4b58d707 --- /dev/null +++ b/api-ref/source/v2/examples/loadbalancer-create-response.json @@ -0,0 +1,27 @@ +{ + "loadbalancer": { + "description": "My favorite load balancer", + "admin_state_up": true, + "project_id": "e3cd678b11784734bc366148aa37580e", + "provisioning_status": "PENDING_CREATE", + "flavor_id": "", + "vip_subnet_id": "d4af86e1-0051-488c-b7a0-527f97490c9a", + "vip_address": "203.0.113.50", + "vip_network_id": "d0d217df-3958-4fbf-a3c2-8dad2908c709", + "vip_port_id": "b4ca07d1-a31e-43e2-891a-7d14f419f342", + "additional_vips": [ + {"subnet_id": "3ca40b2e-c286-4e53-bdb9-dd01c8a0ad6d", "ip_address": "2001:db8::b33f"}, + {"subnet_id": "44d92b92-510f-4c05-8058-bf5a17b4d41c", "ip_address": "198.51.100.4"} + ], + "provider": "octavia", + "created_at": "2017-02-28T00:41:44", + "updated_at": "2017-02-28T00:43:30", + "id": "607226db-27ef-4d41-ae89-f2a800e9c2db", + "operating_status": "OFFLINE", + "name": "best_load_balancer", + "vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3", + "availability_zone": "my_az", + "tags": ["test_tag"], + "vip_vnic_type": "normal" + } +} diff --git a/api-ref/source/v2/examples/loadbalancer-delete-curl b/api-ref/source/v2/examples/loadbalancer-delete-curl new file mode 100644 index 0000000000..a2ecd6bc98 --- /dev/null +++ b/api-ref/source/v2/examples/loadbalancer-delete-curl @@ -0,0 +1 @@ +curl -X DELETE -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/loadbalancers/4b9b652c-537a-44bf-bbe8-85a690625597 diff --git a/api-ref/source/v2/examples/loadbalancer-failover-curl b/api-ref/source/v2/examples/loadbalancer-failover-curl new file mode 100644 index 0000000000..8319dffdff --- /dev/null +++ b/api-ref/source/v2/examples/loadbalancer-failover-curl @@ -0,0 +1 @@ +curl -X PUT -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/loadbalancers/4a13c573-623c-4d23-8a9c-581dc17ceb1f/failover diff --git a/api-ref/source/v2/examples/loadbalancer-full-create-request.json b/api-ref/source/v2/examples/loadbalancer-full-create-request.json new file mode 100644 index 0000000000..3ff6961078 --- /dev/null +++ b/api-ref/source/v2/examples/loadbalancer-full-create-request.json @@ -0,0 +1,91 @@ +{ + "loadbalancer": { + "description": "My favorite load balancer", + "admin_state_up": true, + "project_id": "e3cd678b11784734bc366148aa37580e", + "flavor_id": "", + "listeners": [ + { + "name": "http_listener", + "protocol": "HTTP", + "protocol_port": 80, + "default_pool": { + "name": "rr_pool", + "protocol": "HTTP", + "lb_algorithm": "ROUND_ROBIN", + "healthmonitor": { + "type": "HTTP", + "delay": "3", + "expected_codes": "200,201,202", + "http_method": "GET", + "max_retries": 2, + "timeout": 1, + "url_path": "/index.html" + }, + "members": [ + { + "address": "192.0.2.16", + "protocol_port": 80 + }, + { + "address": "192.0.2.19", + "protocol_port": 80 + } + ] + } + }, + { + "name": "https_listener", + "protocol": "HTTPS", + "protocol_port": 443, + "default_pool": { + "name": "https_pool" + }, + "tags": ["test_tag"] + }, + { + "name": "redirect_listener", + "protocol": "HTTP", + "protocol_port": 8080, + "l7policies": [ + { + "action": "REDIRECT_TO_URL", + "name": "redirect_policy", + "redirect_url": "https://www.example.com/", + "admin_state_up": true + } + ] + } + ], + "pools": [ + { + "name": "https_pool", + "protocol": "HTTPS", + "lb_algorithm": "ROUND_ROBIN", + "healthmonitor": { + "type": "HTTPS", + "delay": "3", + "max_retries": 2, + "timeout": 1 + }, + "members": [ + { + "address": "192.0.2.51", + "protocol_port": 80 + }, + { + "address": "192.0.2.52", + "protocol_port": 80 + } + ] + } + ], + "vip_subnet_id": "d4af86e1-0051-488c-b7a0-527f97490c9a", + "vip_address": "203.0.113.50", + "provider": "octavia", + "name": "best_load_balancer", + "vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3", + "availability_zone": "my_az", + "tags": ["test_tag"] + } +} diff --git a/api-ref/source/v2/examples/loadbalancer-full-create-response.json b/api-ref/source/v2/examples/loadbalancer-full-create-response.json new file mode 100644 index 0000000000..2d39f02d68 --- /dev/null +++ b/api-ref/source/v2/examples/loadbalancer-full-create-response.json @@ -0,0 +1,183 @@ +{ + "loadbalancer": { + "description": "My favorite load balancer", + "admin_state_up": true, + "project_id": "e3cd678b11784734bc366148aa37580e", + "provisioning_status": "ACTIVE", + "flavor_id": "", + "vip_subnet_id": "d4af86e1-0051-488c-b7a0-527f97490c9a", + "listeners": [ + { + "l7policies": [], + "protocol": "HTTP", + "description": "", + "default_tls_container_ref": null, + "admin_state_up": true, + "default_pool": { + "id": "c8cec227-410a-4a5b-af13-ecf38c2b0abb" + }, + "project_id": "e3cd678b11784734bc366148aa37580e", + "default_tls_container_id": null, + "connection_limit": -1, + "sni_container_refs": [], + "protocol_port": 80, + "id": "a99995c6-4f04-4ed3-a37f-ae58f6e7e5e1", + "name": "http_listener" + }, + { + "l7policies": [], + "protocol": "HTTPS", + "description": "", + "default_tls_container_ref": null, + "admin_state_up": true, + "default_pool": { + "id": "b0577aff-c1f9-40c6-9a3b-7b1d2a669136" + }, + "project_id": "e3cd678b11784734bc366148aa37580e", + "default_tls_container_id": null, + "connection_limit": -1, + "sni_container_refs": [], + "protocol_port": 443, + "id": "73c6c564-f215-48e9-91d6-f10bb3454954", + "name": "https_listener", + "tags": ["test_tag"] + }, + { + "l7policies": [ + { + "description": "", + "admin_state_up": true, + "rules": [], + "project_id": "e3cd678b11784734bc366148aa37580e", + "listener_id": "95de30ec-67f4-437b-b3f3-22c5d9ef9828", + "redirect_url": "https://www.example.com/", + "action": "REDIRECT_TO_URL", + "position": 1, + "id": "d0553837-f890-4981-b99a-f7cbd6a76577", + "name": "redirect_policy" + } + ], + "protocol": "HTTP", + "description": "", + "default_tls_container_ref": null, + "admin_state_up": true, + "default_pool": null, + "project_id": "e3cd678b11784734bc366148aa37580e", + "default_tls_container_id": null, + "connection_limit": -1, + "sni_container_refs": [], + "protocol_port": 8080, + "id": "95de30ec-67f4-437b-b3f3-22c5d9ef9828", + "name": "redirect_listener" + } + ], + "vip_address": "203.0.113.50", + "vip_network_id": "d0d217df-3958-4fbf-a3c2-8dad2908c709", + "vip_port_id": "b4ca07d1-a31e-43e2-891a-7d14f419f342", + "additional_vips": [], + "provider": "octavia", + "pools": [ + { + "lb_algorithm": "ROUND_ROBIN", + "protocol": "HTTP", + "description": "", + "admin_state_up": true, + "project_id": "e3cd678b11784734bc366148aa37580e", + "session_persistence": null, + "healthmonitor": { + "name": "", + "admin_state_up": true, + "project_id": "e3cd678b11784734bc366148aa37580e", + "delay": 3, + "expected_codes": "200,201,202", + "max_retries": 2, + "http_method": "GET", + "timeout": 1, + "max_retries_down": 3, + "url_path": "/index.html", + "type": "HTTP", + "id": "a8a2aa3f-d099-4752-8265-e6472f8147f9" + }, + "members": [ + { + "name": "", + "weight": 1, + "admin_state_up": true, + "subnet_id": "bbb35f84-35cc-4b2f-84c2-a6a29bba68aa", + "project_id": "e3cd678b11784734bc366148aa37580e", + "address": "192.0.2.16", + "protocol_port": 80, + "id": "7d19ad6c-d549-453e-a5cd-05382c6be96a" + }, + { + "name": "", + "weight": 1, + "admin_state_up": true, + "subnet_id": "bbb35f84-35cc-4b2f-84c2-a6a29bba68aa", + "project_id": "e3cd678b11784734bc366148aa37580e", + "address": "192.0.2.19", + "protocol_port": 80, + "id": "a167402b-caa6-41d5-b4d4-bde7f2cbfa5e" + } + ], + "id": "c8cec227-410a-4a5b-af13-ecf38c2b0abb", + "name": "rr_pool" + }, + { + "lb_algorithm": "ROUND_ROBIN", + "protocol": "HTTPS", + "description": "", + "admin_state_up": true, + "project_id": "e3cd678b11784734bc366148aa37580e", + "session_persistence": null, + "healthmonitor": { + "name": "", + "admin_state_up": true, + "project_id": "e3cd678b11784734bc366148aa37580e", + "delay": 3, + "expected_codes": "200,201,202", + "max_retries": 2, + "http_method": "GET", + "timeout": 1, + "max_retries_down": 3, + "url_path": "/index.html", + "type": "HTTPS", + "id": "d5bb7712-26b7-4809-8c14-3b407c0cb00d" + }, + "members": [ + { + "name": "", + "weight": 1, + "admin_state_up": true, + "subnet_id": "bbb35f84-35cc-4b2f-84c2-a6a29bba68aa", + "project_id": "e3cd678b11784734bc366148aa37580e", + "address": "192.0.2.51", + "protocol_port": 80, + "id": "f83832d5-1f22-45fa-866a-4abea36e0886" + }, + { + "name": "", + "weight": 1, + "admin_state_up": true, + "subnet_id": "bbb35f84-35cc-4b2f-84c2-a6a29bba68aa", + "project_id": "e3cd678b11784734bc366148aa37580e", + "address": "192.0.2.52", + "protocol_port": 80, + "id": "f83832d5-1f22-45fa-866a-4abea36e0886" + } + ], + "id": "b0577aff-c1f9-40c6-9a3b-7b1d2a669136", + "name": "https_pool" + } + ], + "created_at": "2017-02-28T00:41:44", + "updated_at": "2017-02-28T00:43:30", + "id": "607226db-27ef-4d41-ae89-f2a800e9c2db", + "operating_status": "ONLINE", + "name": "best_load_balancer", + "vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3", + "availability_zone": "my_az", + "tags": ["test_tag"], + "vip_vnic_type": "normal" + } +} diff --git a/api-ref/source/v2/examples/loadbalancer-show-curl b/api-ref/source/v2/examples/loadbalancer-show-curl new file mode 100644 index 0000000000..ea5d55b49b --- /dev/null +++ b/api-ref/source/v2/examples/loadbalancer-show-curl @@ -0,0 +1 @@ +curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/loadbalancers/8a562351-f0fb-424c-a0af-513461424ea5 diff --git a/api-ref/source/v2/examples/loadbalancer-show-response.json b/api-ref/source/v2/examples/loadbalancer-show-response.json new file mode 100644 index 0000000000..b5c3728d89 --- /dev/null +++ b/api-ref/source/v2/examples/loadbalancer-show-response.json @@ -0,0 +1,24 @@ +{ + "loadbalancer": { + "description": "My favorite load balancer", + "admin_state_up": true, + "project_id": "e3cd678b11784734bc366148aa37580e", + "provisioning_status": "PENDING_CREATE", + "flavor_id": "", + "vip_subnet_id": "d4af86e1-0051-488c-b7a0-527f97490c9a", + "vip_address": "203.0.113.50", + "vip_network_id": "d0d217df-3958-4fbf-a3c2-8dad2908c709", + "vip_port_id": "b4ca07d1-a31e-43e2-891a-7d14f419f342", + "additional_vips": [], + "provider": "octavia", + "created_at": "2017-02-28T00:41:44", + "updated_at": "2017-02-28T00:43:30", + "id": "8a562351-f0fb-424c-a0af-513461424ea5", + "operating_status": "ONLINE", + "name": "best_load_balancer", + "vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3", + "availability_zone": "my_az", + "tags": [], + "vip_vnic_type": "normal" + } +} diff --git a/api-ref/source/v2/examples/loadbalancer-stats-curl b/api-ref/source/v2/examples/loadbalancer-stats-curl new file mode 100644 index 0000000000..9b796c65c6 --- /dev/null +++ b/api-ref/source/v2/examples/loadbalancer-stats-curl @@ -0,0 +1 @@ +curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/loadbalancers/4a13c573-623c-4d23-8a9c-581dc17ceb1f/stats diff --git a/api-ref/source/v2/examples/loadbalancer-stats-response.json b/api-ref/source/v2/examples/loadbalancer-stats-response.json new file mode 100644 index 0000000000..68c0f6710c --- /dev/null +++ b/api-ref/source/v2/examples/loadbalancer-stats-response.json @@ -0,0 +1,9 @@ +{ + "stats": { + "bytes_in": 131342840, + "total_connections": 52378345, + "active_connections": 97258, + "bytes_out": 1549542372, + "request_errors": 0 + } +} diff --git a/api-ref/source/v2/examples/loadbalancer-status-curl b/api-ref/source/v2/examples/loadbalancer-status-curl new file mode 100644 index 0000000000..57072f187f --- /dev/null +++ b/api-ref/source/v2/examples/loadbalancer-status-curl @@ -0,0 +1 @@ +curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/loadbalancers/bda6f032-80d3-414a-b395-e79c374e3929/status diff --git a/api-ref/source/v2/examples/loadbalancer-status-response.json b/api-ref/source/v2/examples/loadbalancer-status-response.json new file mode 100644 index 0000000000..99cacf8103 --- /dev/null +++ b/api-ref/source/v2/examples/loadbalancer-status-response.json @@ -0,0 +1,113 @@ +{ + "statuses": { + "loadbalancer": { + "name": "excellent_load_balancer", + "provisioning_status": "ACTIVE", + "listeners": [ + { + "name": "HTTP_listener", + "provisioning_status": "ACTIVE", + "pools": [ + { + "name": "HTTP_pool", + "provisioning_status": "ACTIVE", + "healthmonitor": { + "type": "HTTP", + "id": "0b608787-ea2d-48c7-89a1-8b8c24fa3b17", + "name": "HTTP_healthmonitor", + "provisioning_status": "ACTIVE" + }, + "members": [ + { + "name": "", + "provisioning_status": "ACTIVE", + "address": "192.0.2.20", + "protocol_port": 80, + "id": "3c6857f4-057a-405a-9134-bdeaa8796c8a", + "operating_status": "ERROR" + }, + { + "name": "", + "provisioning_status": "ACTIVE", + "address": "192.0.2.21", + "protocol_port": 80, + "id": "f7495909-1706-4c91-83b4-641dab6962ac", + "operating_status": "ONLINE" + } + ], + "id": "89a47f78-cf81-480b-ad74-bba4177eeb81", + "operating_status": "DEGRADED" + } + ], + "l7policies": [], + "id": "78febaf6-1e63-47c6-af5f-7b5e23fd7094", + "operating_status": "DEGRADED" + }, + { + "name": "redirect_listener", + "provisioning_status": "ACTIVE", + "pools": [], + "l7policies": [ + { + "action": "REDIRECT_TO_URL", + "rules": [ + { + "type": "PATH", + "id": "27f3007a-a1cb-4e17-9696-0e578d617715", + "provisioning_status": "ACTIVE" + } + ], + "id": "2e8f3139-0673-43f9-aae4-c7a9460e3233", + "name": "redirect_policy", + "provisioning_status": "ACTIVE" + } + ], + "id": "1341fbaf-ad4f-4cfe-a943-ad5e14e664cb", + "operating_status": "ONLINE" + } + ], + "pools": [ + { + "name": "HTTP_pool", + "provisioning_status": "ACTIVE", + "healthmonitor": { + "type": "HTTP", + "id": "0b608787-ea2d-48c7-89a1-8b8c24fa3b17", + "name": "HTTP_healthmonitor", + "provisioning_status": "ACTIVE" + }, + "members": [ + { + "name": "", + "provisioning_status": "ACTIVE", + "address": "192.0.2.20", + "protocol_port": 80, + "id": "3c6857f4-057a-405a-9134-bdeaa8796c8a", + "operating_status": "ERROR" + }, + { + "name": "", + "provisioning_status": "ACTIVE", + "address": "192.0.2.21", + "protocol_port": 80, + "id": "f7495909-1706-4c91-83b4-641dab6962ac", + "operating_status": "ONLINE" + } + ], + "id": "89a47f78-cf81-480b-ad74-bba4177eeb81", + "operating_status": "DEGRADED" + }, + { + "name": "source_ip_pool", + "provisioning_status": "ACTIVE", + "healthmonitor": {}, + "members": [], + "id": "8189d6a9-646e-4d23-b742-548dab991951", + "operating_status": "ONLINE" + } + ], + "id": "84faceee-cb97-48d0-93df-9e41d40d4cb4", + "operating_status": "DEGRADED" + } + } +} diff --git a/api-ref/source/v2/examples/loadbalancer-update-curl b/api-ref/source/v2/examples/loadbalancer-update-curl new file mode 100644 index 0000000000..4ff48e4a17 --- /dev/null +++ b/api-ref/source/v2/examples/loadbalancer-update-curl @@ -0,0 +1 @@ +curl -X PUT -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"loadbalancer": {"description": "Temporarily disabled load balancer", "admin_state_up": false, "name": "disabled_load_balancer", "vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3", "tags": ["updated_tag"]}}' http://198.51.100.10:9876/v2/lbaas/loadbalancers/8b6fc468-07d5-4d8b-a0b9-695060e72c31 diff --git a/api-ref/source/v2/examples/loadbalancer-update-request.json b/api-ref/source/v2/examples/loadbalancer-update-request.json new file mode 100644 index 0000000000..b66dac4371 --- /dev/null +++ b/api-ref/source/v2/examples/loadbalancer-update-request.json @@ -0,0 +1,9 @@ +{ + "loadbalancer": { + "description": "Temporarily disabled load balancer", + "admin_state_up": false, + "name": "disabled_load_balancer", + "vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3", + "tags": ["updated_tag"] + } +} diff --git a/api-ref/source/v2/examples/loadbalancer-update-response.json b/api-ref/source/v2/examples/loadbalancer-update-response.json new file mode 100644 index 0000000000..39ca86e748 --- /dev/null +++ b/api-ref/source/v2/examples/loadbalancer-update-response.json @@ -0,0 +1,23 @@ +{ + "loadbalancer": { + "description": "Temporarily disabled load balancer", + "admin_state_up": false, + "project_id": "e3cd678b11784734bc366148aa37580e", + "provisioning_status": "PENDING_UPDATE", + "flavor_id": "", + "vip_subnet_id": "d4af86e1-0051-488c-b7a0-527f97490c9a", + "vip_address": "203.0.113.50", + "vip_network_id": "d0d217df-3958-4fbf-a3c2-8dad2908c709", + "vip_port_id": "b4ca07d1-a31e-43e2-891a-7d14f419f342", + "additional_vips": [], + "provider": "octavia", + "created_at": "2017-02-28T00:41:44", + "updated_at": "2017-02-28T00:43:30", + "id": "8b6fc468-07d5-4d8b-a0b9-695060e72c31", + "operating_status": "ONLINE", + "name": "disabled_load_balancer", + "vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3", + "tags": ["updated_tag"], + "vip_vnic_type": "normal" + } +} diff --git a/api-ref/source/v2/examples/loadbalancers-list-curl b/api-ref/source/v2/examples/loadbalancers-list-curl new file mode 100644 index 0000000000..a6a5025f29 --- /dev/null +++ b/api-ref/source/v2/examples/loadbalancers-list-curl @@ -0,0 +1 @@ +curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/loadbalancers?project_id=e3cd678b11784734bc366148aa37580e diff --git a/api-ref/source/v2/examples/loadbalancers-list-response.json b/api-ref/source/v2/examples/loadbalancers-list-response.json new file mode 100644 index 0000000000..82d12a091b --- /dev/null +++ b/api-ref/source/v2/examples/loadbalancers-list-response.json @@ -0,0 +1,36 @@ +{ + "loadbalancers": [ + { + "description": "My favorite load balancer", + "admin_state_up": true, + "project_id": "e3cd678b11784734bc366148aa37580e", + "provisioning_status": "ACTIVE", + "flavor_id": "", + "vip_subnet_id": "d4af86e1-0051-488c-b7a0-527f97490c9a", + "listeners": [ + { + "id": "023f2e34-7806-443b-bfae-16c324569a3d" + } + ], + "vip_address": "203.0.113.50", + "vip_network_id": "d0d217df-3958-4fbf-a3c2-8dad2908c709", + "vip_port_id": "b4ca07d1-a31e-43e2-891a-7d14f419f342", + "additional_vips": [], + "provider": "octavia", + "pools": [ + { + "id": "9aa16cdc-8d18-47b9-aba9-ec044531a79f" + } + ], + "created_at": "2017-02-28T00:41:44", + "updated_at": "2017-02-28T00:43:30", + "id": "607226db-27ef-4d41-ae89-f2a800e9c2db", + "operating_status": "ONLINE", + "name": "best_load_balancer", + "vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3", + "availability_zone": "my_az", + "tags": [], + "vip_vnic_type": "normal" + } + ] +} diff --git a/api-ref/source/v2/examples/member-batch-update-curl b/api-ref/source/v2/examples/member-batch-update-curl new file mode 100644 index 0000000000..75cbe8f272 --- /dev/null +++ b/api-ref/source/v2/examples/member-batch-update-curl @@ -0,0 +1 @@ +curl -X PUT -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"members":[{"name":"web-server-1","weight":"20","admin_state_up":true,"subnet_id":"bbb35f84-35cc-4b2f-84c2-a6a29bba68aa","address":"192.0.2.16","protocol_port":"80","monitor_port":8080,"tags":["updated_tag"]},{"name":"web-server-2","weight":"10","admin_state_up":true,"subnet_id":"bbb35f84-35cc-4b2f-84c2-a6a29bba68aa","address":"192.0.2.17","protocol_port":"80","monitor_port":8080,"tags":["updated_tag"]}]}' http://198.51.100.10:9876/v2/lbaas/pools/4029d267-3983-4224-a3d0-afb3fe16a2cd/members diff --git a/api-ref/source/v2/examples/member-batch-update-request.json b/api-ref/source/v2/examples/member-batch-update-request.json new file mode 100644 index 0000000000..681b9bc055 --- /dev/null +++ b/api-ref/source/v2/examples/member-batch-update-request.json @@ -0,0 +1,24 @@ +{ + "members": [ + { + "name": "web-server-1", + "weight": 20, + "admin_state_up": true, + "subnet_id": "bbb35f84-35cc-4b2f-84c2-a6a29bba68aa", + "address": "192.0.2.16", + "protocol_port": 80, + "monitor_port": 8080, + "tags": ["updated_tag"] + }, + { + "name": "web-server-2", + "weight": 10, + "admin_state_up": true, + "subnet_id": "bbb35f84-35cc-4b2f-84c2-a6a29bba68aa", + "address": "192.0.2.17", + "protocol_port": 80, + "monitor_port": 8080, + "tags": ["updated_tag"] + } + ] +} diff --git a/api-ref/source/v2/examples/member-create-curl b/api-ref/source/v2/examples/member-create-curl new file mode 100644 index 0000000000..69d587810f --- /dev/null +++ b/api-ref/source/v2/examples/member-create-curl @@ -0,0 +1 @@ +curl -X POST -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"member":{"name":"web-server-1","weight":"20","admin_state_up":true,"subnet_id":"bbb35f84-35cc-4b2f-84c2-a6a29bba68aa","address":"192.0.2.16","protocol_port":"80","monitor_port":8080,"backup":false,"tags":["test_tag"]}}' http://198.51.100.10:9876/v2/lbaas/pools/4029d267-3983-4224-a3d0-afb3fe16a2cd/members diff --git a/api-ref/source/v2/examples/member-create-request.json b/api-ref/source/v2/examples/member-create-request.json new file mode 100644 index 0000000000..ce6f9cc48c --- /dev/null +++ b/api-ref/source/v2/examples/member-create-request.json @@ -0,0 +1,13 @@ +{ + "member": { + "name": "web-server-1", + "weight": "20", + "admin_state_up": true, + "subnet_id": "bbb35f84-35cc-4b2f-84c2-a6a29bba68aa", + "address": "192.0.2.16", + "protocol_port": "80", + "monitor_port": 8080, + "backup": false, + "tags": ["test_tag"] + } +} diff --git a/api-ref/source/v2/examples/member-create-response.json b/api-ref/source/v2/examples/member-create-response.json new file mode 100644 index 0000000000..1c2a08145c --- /dev/null +++ b/api-ref/source/v2/examples/member-create-response.json @@ -0,0 +1,21 @@ +{ + "member": { + "monitor_port": 8080, + "project_id": "e3cd678b11784734bc366148aa37580e", + "name": "web-server-1", + "weight": 20, + "backup": false, + "admin_state_up": true, + "subnet_id": "bbb35f84-35cc-4b2f-84c2-a6a29bba68aa", + "created_at": "2017-05-11T17:21:34", + "provisioning_status": "ACTIVE", + "monitor_address": null, + "updated_at": "2017-05-11T17:21:37", + "address": "192.0.2.16", + "protocol_port": 80, + "id": "957a1ace-1bd2-449b-8455-820b6e4b63f3", + "operating_status": "NO_MONITOR", + "tags": ["test_tag"], + "vnic_type": "normal" + } +} diff --git a/api-ref/source/v2/examples/member-delete-curl b/api-ref/source/v2/examples/member-delete-curl new file mode 100644 index 0000000000..eaf1a0126a --- /dev/null +++ b/api-ref/source/v2/examples/member-delete-curl @@ -0,0 +1 @@ +curl -X DELETE -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/pools/4029d267-3983-4224-a3d0-afb3fe16a2cd/members/957a1ace-1bd2-449b-8455-820b6e4b63f3 diff --git a/api-ref/source/v2/examples/member-show-curl b/api-ref/source/v2/examples/member-show-curl new file mode 100644 index 0000000000..dcd7856fd9 --- /dev/null +++ b/api-ref/source/v2/examples/member-show-curl @@ -0,0 +1 @@ +curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/pools/24a43e68-36de-45f6-89cf-c03df583131d/members/957a1ace-1bd2-449b-8455-820b6e4b63f3 diff --git a/api-ref/source/v2/examples/member-show-response.json b/api-ref/source/v2/examples/member-show-response.json new file mode 100644 index 0000000000..1c2a08145c --- /dev/null +++ b/api-ref/source/v2/examples/member-show-response.json @@ -0,0 +1,21 @@ +{ + "member": { + "monitor_port": 8080, + "project_id": "e3cd678b11784734bc366148aa37580e", + "name": "web-server-1", + "weight": 20, + "backup": false, + "admin_state_up": true, + "subnet_id": "bbb35f84-35cc-4b2f-84c2-a6a29bba68aa", + "created_at": "2017-05-11T17:21:34", + "provisioning_status": "ACTIVE", + "monitor_address": null, + "updated_at": "2017-05-11T17:21:37", + "address": "192.0.2.16", + "protocol_port": 80, + "id": "957a1ace-1bd2-449b-8455-820b6e4b63f3", + "operating_status": "NO_MONITOR", + "tags": ["test_tag"], + "vnic_type": "normal" + } +} diff --git a/api-ref/source/v2/examples/member-update-curl b/api-ref/source/v2/examples/member-update-curl new file mode 100644 index 0000000000..87d430b7e5 --- /dev/null +++ b/api-ref/source/v2/examples/member-update-curl @@ -0,0 +1 @@ +curl -X PUT -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"member":{"name":"web-server-1-2","weight":"0","admin_state_up":"true","monitor_address":"192.0.2.40","monitor_port":8888,"backup":false,"tags":["updated_tag"]}}' http://198.51.100.10:9876/v2/lbaas/pools/4029d267-3983-4224-a3d0-afb3fe16a2cd/members/957a1ace-1bd2-449b-8455-820b6e4b63f3 diff --git a/api-ref/source/v2/examples/member-update-request.json b/api-ref/source/v2/examples/member-update-request.json new file mode 100644 index 0000000000..6ebf1e8763 --- /dev/null +++ b/api-ref/source/v2/examples/member-update-request.json @@ -0,0 +1,11 @@ +{ + "member": { + "name": "web-server-1-2", + "weight": "0", + "admin_state_up": "true", + "monitor_address": "192.0.2.40", + "monitor_port": 8888, + "backup": false, + "tags": ["updated_tag"] + } +} diff --git a/api-ref/source/v2/examples/member-update-response.json b/api-ref/source/v2/examples/member-update-response.json new file mode 100644 index 0000000000..949b842735 --- /dev/null +++ b/api-ref/source/v2/examples/member-update-response.json @@ -0,0 +1,21 @@ +{ + "member": { + "monitor_port": 8080, + "project_id": "e3cd678b11784734bc366148aa37580e", + "name": "web-server-1", + "weight": 20, + "backup": false, + "admin_state_up": true, + "subnet_id": "bbb35f84-35cc-4b2f-84c2-a6a29bba68aa", + "created_at": "2017-05-11T17:21:34", + "provisioning_status": "PENDING_UPDATE", + "monitor_address": null, + "updated_at": "2017-05-11T17:21:37", + "address": "192.0.2.16", + "protocol_port": 80, + "id": "957a1ace-1bd2-449b-8455-820b6e4b63f3", + "operating_status": "NO_MONITOR", + "tags": ["updated_tag"], + "vnic_type": "normal" + } +} diff --git a/api-ref/source/v2/examples/members-list-curl b/api-ref/source/v2/examples/members-list-curl new file mode 100644 index 0000000000..d88e2cbabb --- /dev/null +++ b/api-ref/source/v2/examples/members-list-curl @@ -0,0 +1 @@ +curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/pools/24a43e68-36de-45f6-89cf-c03df583131d/members?project_id=e3cd678b11784734bc366148aa37580e diff --git a/api-ref/source/v2/examples/members-list-response.json b/api-ref/source/v2/examples/members-list-response.json new file mode 100644 index 0000000000..5b463939c4 --- /dev/null +++ b/api-ref/source/v2/examples/members-list-response.json @@ -0,0 +1,23 @@ +{ + "members": [ + { + "monitor_port": 8080, + "project_id": "e3cd678b11784734bc366148aa37580e", + "name": "web-server-1", + "weight": 20, + "backup": false, + "admin_state_up": true, + "subnet_id": "bbb35f84-35cc-4b2f-84c2-a6a29bba68aa", + "created_at": "2017-05-11T17:21:34", + "provisioning_status": "ACTIVE", + "monitor_address": null, + "updated_at": "2017-05-11T17:21:37", + "address": "192.0.2.16", + "protocol_port": 80, + "id": "957a1ace-1bd2-449b-8455-820b6e4b63f3", + "operating_status": "NO_MONITOR", + "tags": ["test_tag"], + "vnic_type": "normal" + } + ] +} diff --git a/api-ref/source/v2/examples/pool-create-curl b/api-ref/source/v2/examples/pool-create-curl new file mode 100644 index 0000000000..1c5a2d9ba9 --- /dev/null +++ b/api-ref/source/v2/examples/pool-create-curl @@ -0,0 +1 @@ +curl -X POST -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"pool":{"lb_algorithm":"ROUND_ROBIN","protocol":"HTTP","description":"Super Round Robin Pool","admin_state_up":true,"session_persistence":{"cookie_name":"ChocolateChip","type":"APP_COOKIE"},"listener_id":"023f2e34-7806-443b-bfae-16c324569a3d","name":"super-pool","tags":["test_tag"],"tls_container_ref":"http://198.51.100.10:9311/v1/containers/4073846f-1d5e-42e1-a4cf-a7046419d0e6","ca_tls_container_ref":"http://198.51.100.10:9311/v1/containers/5f0d5540-fae6-4646-85d6-8a84883807fb","crl_container_ref":"http://198.51.100.10:9311/v1/containers/6faf0a01-6892-454c-aaac-650282820c0b","tls_enabled":true,"tls_ciphers":"ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256", "tls_versions": ["TLSv1.2", "TLSv1.3"], "alpn_protocols": ["http/1.1", "http/1.0"]}}' http://198.51.100.10:9876/v2/lbaas/pools diff --git a/api-ref/source/v2/examples/pool-create-request.json b/api-ref/source/v2/examples/pool-create-request.json new file mode 100644 index 0000000000..baf5e3db59 --- /dev/null +++ b/api-ref/source/v2/examples/pool-create-request.json @@ -0,0 +1,22 @@ +{ + "pool": { + "lb_algorithm": "ROUND_ROBIN", + "protocol": "HTTP", + "description": "Super Round Robin Pool", + "admin_state_up": true, + "session_persistence": { + "cookie_name": "ChocolateChip", + "type": "APP_COOKIE" + }, + "listener_id": "023f2e34-7806-443b-bfae-16c324569a3d", + "name": "super-pool", + "tags": ["test_tag"], + "tls_container_ref": "http://198.51.100.10:9311/v1/containers/4073846f-1d5e-42e1-a4cf-a7046419d0e6", + "ca_tls_container_ref": "http://198.51.100.10:9311/v1/containers/5f0d5540-fae6-4646-85d6-8a84883807fb", + "crl_container_ref": "http://198.51.100.10:9311/v1/containers/6faf0a01-6892-454c-aaac-650282820c0b", + "tls_enabled": true, + "tls_ciphers": "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256", + "tls_versions": ["TLSv1.2", "TLSv1.3"], + "alpn_protocols": ["http/1.1", "http/1.0"] + } +} diff --git a/api-ref/source/v2/examples/pool-create-response.json b/api-ref/source/v2/examples/pool-create-response.json new file mode 100644 index 0000000000..f706348a17 --- /dev/null +++ b/api-ref/source/v2/examples/pool-create-response.json @@ -0,0 +1,39 @@ +{ + "pool": { + "lb_algorithm": "ROUND_ROBIN", + "protocol": "HTTP", + "description": "Super Round Robin Pool", + "admin_state_up": true, + "loadbalancers": [ + { + "id": "607226db-27ef-4d41-ae89-f2a800e9c2db" + } + ], + "created_at": "2017-05-10T18:14:44", + "provisioning_status": "ACTIVE", + "updated_at": "2017-05-10T23:08:12", + "session_persistence": { + "cookie_name": "ChocolateChip", + "type": "APP_COOKIE" + }, + "listeners": [ + { + "id": "023f2e34-7806-443b-bfae-16c324569a3d" + } + ], + "members": [], + "healthmonitor_id": null, + "project_id": "e3cd678b11784734bc366148aa37580e", + "id": "4029d267-3983-4224-a3d0-afb3fe16a2cd", + "operating_status": "ONLINE", + "name": "super-pool", + "tags": ["test_tag"], + "tls_container_ref": "http://198.51.100.10:9311/v1/containers/4073846f-1d5e-42e1-a4cf-a7046419d0e6", + "ca_tls_container_ref": "http://198.51.100.10:9311/v1/containers/5f0d5540-fae6-4646-85d6-8a84883807fb", + "crl_container_ref": "http://198.51.100.10:9311/v1/containers/6faf0a01-6892-454c-aaac-650282820c0b", + "tls_enabled": true, + "tls_ciphers": "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256", + "tls_versions": ["TLSv1.2", "TLSv1.3"], + "alpn_protocols": ["http/1.1", "http/1.0"] + } +} diff --git a/api-ref/source/v2/examples/pool-delete-curl b/api-ref/source/v2/examples/pool-delete-curl new file mode 100644 index 0000000000..5024e56c0c --- /dev/null +++ b/api-ref/source/v2/examples/pool-delete-curl @@ -0,0 +1 @@ +curl -X DELETE -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/pools/4029d267-3983-4224-a3d0-afb3fe16a2cd diff --git a/api-ref/source/v2/examples/pool-session-persistence-obj.json b/api-ref/source/v2/examples/pool-session-persistence-obj.json new file mode 100644 index 0000000000..385be47c80 --- /dev/null +++ b/api-ref/source/v2/examples/pool-session-persistence-obj.json @@ -0,0 +1 @@ +{"cookie_name": "my_app_cookie", "type": "APP_COOKIE"} diff --git a/api-ref/source/v2/examples/pool-show-curl b/api-ref/source/v2/examples/pool-show-curl new file mode 100644 index 0000000000..c7db80ad4b --- /dev/null +++ b/api-ref/source/v2/examples/pool-show-curl @@ -0,0 +1 @@ +curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/pools/24a43e68-36de-45f6-89cf-c03df583131d diff --git a/api-ref/source/v2/examples/pool-show-response.json b/api-ref/source/v2/examples/pool-show-response.json new file mode 100644 index 0000000000..8165e8eb30 --- /dev/null +++ b/api-ref/source/v2/examples/pool-show-response.json @@ -0,0 +1,39 @@ +{ + "pool": { + "lb_algorithm": "ROUND_ROBIN", + "protocol": "HTTP", + "description": "Super Round Robin Pool", + "admin_state_up": true, + "loadbalancers": [ + { + "id": "607226db-27ef-4d41-ae89-f2a800e9c2db" + } + ], + "created_at": "2017-05-10T18:14:44", + "provisioning_status": "ACTIVE", + "updated_at": "2017-05-10T23:08:12", + "session_persistence": { + "cookie_name": "ChocolateChip", + "type": "APP_COOKIE" + }, + "listeners": [ + { + "id": "023f2e34-7806-443b-bfae-16c324569a3d" + } + ], + "members": [], + "healthmonitor_id": null, + "project_id": "e3cd678b11784734bc366148aa37580e", + "id": "4029d267-3983-4224-a3d0-afb3fe16a2cd", + "operating_status": "ONLINE", + "name": "super-pool", + "tags": ["test_tag"], + "tls_container_ref": "http://198.51.100.10:9311/v1/containers/4073846f-1d5e-42e1-a4cf-a7046419d0e6", + "ca_tls_container_ref": "http://198.51.100.10:9311/v1/containers/5f0d5540-fae6-4646-85d6-8a84883807fb", + "crl_container_ref": "http://198.51.100.10:9311/v1/containers/6faf0a01-6892-454c-aaac-650282820c0b", + "tls_enabled": false, + "tls_ciphers": "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256", + "tls_versions": ["TLSv1.2", "TLSv1.3"], + "alpn_protocols": ["http/1.1", "http/1.0"] + } +} diff --git a/api-ref/source/v2/examples/pool-update-curl b/api-ref/source/v2/examples/pool-update-curl new file mode 100644 index 0000000000..157ca4c81f --- /dev/null +++ b/api-ref/source/v2/examples/pool-update-curl @@ -0,0 +1 @@ +curl -X PUT -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"pool":{"lb_algorithm":"LEAST_CONNECTIONS","session_persistence":{"type":"SOURCE_IP"},"description":"second description","name":"second_name","tags":["updated_tag"],"tls_container_ref":"http://198.51.100.10:9311/v1/containers/c1cd501d-3cf9-4873-a11b-a74bebcde929","ca_tls_container_ref":null,"crl_container_ref":null,"tls_enabled":false,"tls_ciphers":"ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256", "tls_versions": ["TLSv1.2", "TLSv1.3"], "alpn_protocols": ["http/1.1", "http/1.0"]}}' http://198.51.100.10:9876/v2/lbaas/pools/4029d267-3983-4224-a3d0-afb3fe16a2cd diff --git a/api-ref/source/v2/examples/pool-update-request.json b/api-ref/source/v2/examples/pool-update-request.json new file mode 100644 index 0000000000..ee62860239 --- /dev/null +++ b/api-ref/source/v2/examples/pool-update-request.json @@ -0,0 +1,18 @@ +{ + "pool": { + "lb_algorithm": "LEAST_CONNECTIONS", + "session_persistence": { + "type": "SOURCE_IP" + }, + "description": "Super Least Connections Pool", + "name": "super-least-conn-pool", + "tags": ["updated_tag"], + "tls_container_ref": "http://198.51.100.10:9311/v1/containers/c1cd501d-3cf9-4873-a11b-a74bebcde929", + "ca_tls_container_ref": null, + "crl_container_ref": null, + "tls_enabled": false, + "tls_ciphers": "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256", + "tls_versions": ["TLSv1.2", "TLSv1.3"], + "alpn_protocols": ["http/1.1", "http/1.0"] + } +} diff --git a/api-ref/source/v2/examples/pool-update-response.json b/api-ref/source/v2/examples/pool-update-response.json new file mode 100644 index 0000000000..dad0203f21 --- /dev/null +++ b/api-ref/source/v2/examples/pool-update-response.json @@ -0,0 +1,39 @@ +{ + "pool": { + "lb_algorithm": "LEAST_CONNECTIONS", + "protocol": "HTTP", + "description": "Super Least Connections Pool", + "admin_state_up": true, + "loadbalancers": [ + { + "id": "607226db-27ef-4d41-ae89-f2a800e9c2db" + } + ], + "created_at": "2017-05-10T18:14:44", + "provisioning_status": "PENDING_UPDATE", + "updated_at": "2017-05-10T23:08:12", + "session_persistence": { + "cookie_name": null, + "type": "SOURCE_IP" + }, + "listeners": [ + { + "id": "023f2e34-7806-443b-bfae-16c324569a3d" + } + ], + "members": [], + "healthmonitor_id": null, + "project_id": "e3cd678b11784734bc366148aa37580e", + "id": "4029d267-3983-4224-a3d0-afb3fe16a2cd", + "operating_status": "ONLINE", + "name": "super-least-conn-pool", + "tags": ["updated_tag"], + "tls_container_ref": "http://198.51.100.10:9311/v1/containers/c1cd501d-3cf9-4873-a11b-a74bebcde929", + "ca_tls_container_ref": null, + "crl_container_ref": null, + "tls_enabled": false, + "tls_ciphers": "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256", + "tls_versions": ["TLSv1.2", "TLSv1.3"], + "alpn_protocols": ["http/1.1", "http/1.0"] + } +} diff --git a/api-ref/source/v2/examples/pools-list-curl b/api-ref/source/v2/examples/pools-list-curl new file mode 100644 index 0000000000..4246a28b6b --- /dev/null +++ b/api-ref/source/v2/examples/pools-list-curl @@ -0,0 +1 @@ +curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/pools?project_id=e3cd678b11784734bc366148aa37580e diff --git a/api-ref/source/v2/examples/pools-list-response.json b/api-ref/source/v2/examples/pools-list-response.json new file mode 100644 index 0000000000..1532cd0fc6 --- /dev/null +++ b/api-ref/source/v2/examples/pools-list-response.json @@ -0,0 +1,46 @@ +{ + "pools": [ + { + "lb_algorithm": "ROUND_ROBIN", + "protocol": "HTTP", + "description": "My round robin pool", + "admin_state_up": true, + "loadbalancers": [ + { + "id": "607226db-27ef-4d41-ae89-f2a800e9c2db" + } + ], + "created_at": "2017-04-13T18:14:44", + "provisioning_status": "ACTIVE", + "updated_at": "2017-04-13T23:08:12", + "session_persistence": { + "cookie_name": null, + "type": "SOURCE_IP" + }, + "listeners": [ + { + "id": "023f2e34-7806-443b-bfae-16c324569a3d" + } + ], + "members": [ + { + "id": "5bc73753-348f-4b5a-8f9c-10bd7b30dc35", + "id": "692e8358-f8fd-4b92-bbca-6e4b97c75571" + } + ], + "healthmonitor_id": null, + "project_id": "e3cd678b11784734bc366148aa37580e", + "id": "ddb2b28f-89e9-45d3-a329-a359c3e39e4a", + "operating_status": "ONLINE", + "name": "round_robin_pool", + "tags": ["test_tag"], + "tls_container_ref": "http://198.51.100.10:9311/v1/containers/4073846f-1d5e-42e1-a4cf-a7046419d0e6", + "ca_tls_container_ref": "http://198.51.100.10:9311/v1/containers/5f0d5540-fae6-4646-85d6-8a84883807fb", + "crl_container_ref": "http://198.51.100.10:9311/v1/containers/6faf0a01-6892-454c-aaac-650282820c0b", + "tls_enabled": true, + "tls_ciphers": "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256", + "tls_versions": ["TLSv1.2", "TLSv1.3"], + "alpn_protocols": ["http/1.1", "http/1.0"] + } + ] +} diff --git a/api-ref/source/v2/examples/provider-availability-zone-capability-show-curl b/api-ref/source/v2/examples/provider-availability-zone-capability-show-curl new file mode 100644 index 0000000000..c89a067c00 --- /dev/null +++ b/api-ref/source/v2/examples/provider-availability-zone-capability-show-curl @@ -0,0 +1 @@ +curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/providers/amphora/availability_zone_capabilities diff --git a/api-ref/source/v2/examples/provider-availability-zone-capability-show-response.json b/api-ref/source/v2/examples/provider-availability-zone-capability-show-response.json new file mode 100644 index 0000000000..2517eeec4f --- /dev/null +++ b/api-ref/source/v2/examples/provider-availability-zone-capability-show-response.json @@ -0,0 +1,12 @@ +{ + "availability_zone_capabilities": [ + { + "name": "compute_zone", + "description": "The compute availability zone." + }, + { + "name": "volume_zone", + "description": "The volume availability zone." + } + ] +} diff --git a/api-ref/source/v2/examples/provider-flavor-capability-show-curl b/api-ref/source/v2/examples/provider-flavor-capability-show-curl new file mode 100644 index 0000000000..79f076add4 --- /dev/null +++ b/api-ref/source/v2/examples/provider-flavor-capability-show-curl @@ -0,0 +1 @@ +curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/providers/amphora/flavor_capabilities diff --git a/api-ref/source/v2/examples/provider-flavor-capability-show-response.json b/api-ref/source/v2/examples/provider-flavor-capability-show-response.json new file mode 100644 index 0000000000..5fae3c4e59 --- /dev/null +++ b/api-ref/source/v2/examples/provider-flavor-capability-show-response.json @@ -0,0 +1,8 @@ +{ + "flavor_capabilities": [ + { + "name": "loadbalancer_topology", + "description": "The load balancer topology. One of: SINGLE - One amphora per load balancer. ACTIVE_STANDBY - Two amphora per load balancer." + } + ] +} diff --git a/api-ref/source/v2/examples/provider-list-curl b/api-ref/source/v2/examples/provider-list-curl new file mode 100644 index 0000000000..f1e875f357 --- /dev/null +++ b/api-ref/source/v2/examples/provider-list-curl @@ -0,0 +1 @@ +curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/providers diff --git a/api-ref/source/v2/examples/provider-list-response.json b/api-ref/source/v2/examples/provider-list-response.json new file mode 100644 index 0000000000..d23a924c7b --- /dev/null +++ b/api-ref/source/v2/examples/provider-list-response.json @@ -0,0 +1,12 @@ +{ + "providers": [ + { + "name": "amphora", + "description": "The Octavia Amphora driver." + }, + { + "name": "octavia", + "description": "Deprecated alias of the Octavia Amphora driver." + } + ] +} diff --git a/api-ref/source/v2/examples/quota-reset-curl b/api-ref/source/v2/examples/quota-reset-curl new file mode 100644 index 0000000000..34fd1a4627 --- /dev/null +++ b/api-ref/source/v2/examples/quota-reset-curl @@ -0,0 +1 @@ +curl -X DELETE -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/quotas/e3cd678b11784734bc366148aa37580e diff --git a/api-ref/source/v2/examples/quota-show-curl b/api-ref/source/v2/examples/quota-show-curl new file mode 100644 index 0000000000..b182892e52 --- /dev/null +++ b/api-ref/source/v2/examples/quota-show-curl @@ -0,0 +1 @@ +curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/quotas/e3cd678b11784734bc366148aa37580e diff --git a/api-ref/source/v2/examples/quota-update-curl b/api-ref/source/v2/examples/quota-update-curl new file mode 100644 index 0000000000..dc348632d4 --- /dev/null +++ b/api-ref/source/v2/examples/quota-update-curl @@ -0,0 +1 @@ +curl -X PUT -H "Content-Type: application/json" -H "X-Auth-Token: " -d '{"quota":{"loadbalancer":10,"listener":-1,"member":50,"pool":-1,"healthmonitor":-1,"l7policy":15,"l7rule":25}}' http://198.51.100.10:9876/v2/lbaas/quotas/e3cd678b11784734bc366148aa37580e diff --git a/api-ref/source/v2/examples/quota-update-request.json b/api-ref/source/v2/examples/quota-update-request.json new file mode 100644 index 0000000000..56387b880c --- /dev/null +++ b/api-ref/source/v2/examples/quota-update-request.json @@ -0,0 +1,11 @@ +{ + "quota": { + "loadbalancer": 10, + "listener": -1, + "member": 50, + "pool": -1, + "healthmonitor": -1, + "l7policy": 15, + "l7rule": 25 + } +} diff --git a/api-ref/source/v2/examples/quota-update-response.json b/api-ref/source/v2/examples/quota-update-response.json new file mode 100644 index 0000000000..56387b880c --- /dev/null +++ b/api-ref/source/v2/examples/quota-update-response.json @@ -0,0 +1,11 @@ +{ + "quota": { + "loadbalancer": 10, + "listener": -1, + "member": 50, + "pool": -1, + "healthmonitor": -1, + "l7policy": 15, + "l7rule": 25 + } +} diff --git a/api-ref/source/v2/examples/quotas-defaults-curl b/api-ref/source/v2/examples/quotas-defaults-curl new file mode 100644 index 0000000000..b6a327b28a --- /dev/null +++ b/api-ref/source/v2/examples/quotas-defaults-curl @@ -0,0 +1 @@ +curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/quotas/defaults diff --git a/api-ref/source/v2/examples/quotas-defaults-response.json b/api-ref/source/v2/examples/quotas-defaults-response.json new file mode 100644 index 0000000000..fd2a432acf --- /dev/null +++ b/api-ref/source/v2/examples/quotas-defaults-response.json @@ -0,0 +1,11 @@ +{ + "quota": { + "loadbalancer": 50, + "listener": -1, + "member": -1, + "pool": -1, + "healthmonitor": -1, + "l7policy": -1, + "l7rule": -1 + } +} diff --git a/api-ref/source/v2/examples/quotas-list-curl b/api-ref/source/v2/examples/quotas-list-curl new file mode 100644 index 0000000000..8bef1d7719 --- /dev/null +++ b/api-ref/source/v2/examples/quotas-list-curl @@ -0,0 +1 @@ +curl -X GET -H "X-Auth-Token: " http://198.51.100.10:9876/v2/lbaas/quotas?project_id=e3cd678b11784734bc366148aa37580e diff --git a/api-ref/source/v2/examples/quotas-list-response.json b/api-ref/source/v2/examples/quotas-list-response.json new file mode 100644 index 0000000000..ae1f615d98 --- /dev/null +++ b/api-ref/source/v2/examples/quotas-list-response.json @@ -0,0 +1,14 @@ +{ + "quotas": [ + { + "loadbalancer": 5, + "member": 50, + "healthmonitor": -1, + "listener": null, + "project_id": "e3cd678b11784734bc366148aa37580e", + "pool": null, + "l7policy": 3, + "l7rule": null + } + ] +} diff --git a/api-ref/source/v2/examples/quotas-show-response.json b/api-ref/source/v2/examples/quotas-show-response.json new file mode 100644 index 0000000000..a03731dd4b --- /dev/null +++ b/api-ref/source/v2/examples/quotas-show-response.json @@ -0,0 +1,11 @@ +{ + "quota": { + "loadbalancer": 5, + "listener": -1, + "member": 50, + "pool": -1, + "healthmonitor": -1, + "l7policy": 20, + "l7rule": -1 + } +} diff --git a/api-ref/source/v2/flavor.inc b/api-ref/source/v2/flavor.inc new file mode 100644 index 0000000000..d13370b6ee --- /dev/null +++ b/api-ref/source/v2/flavor.inc @@ -0,0 +1,295 @@ +.. -*- rst -*- + +List Flavors +============ + +.. rest_method:: GET /v2.0/lbaas/flavors + +List all available flavors. + +Use the ``fields`` query parameter to control which fields are +returned in the response body. Additionally, you can filter results +by using query string parameters. For information, see :ref:`filtering`. + +The list might be empty. + +**New in version 2.6** + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - fields: fields + +Curl Example +------------ + +.. literalinclude:: examples/flavor-list-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - description: description + - enabled: enabled + - flavor_profile_id: flavor-profile-id + - flavors: flavors + - id: flavor-id + - name: name + +Response Example +---------------- + +.. literalinclude:: examples/flavors-list-response.json + :language: javascript + +Create Flavor +============= + +.. rest_method:: POST /v2.0/lbaas/flavors + +Creates a flavor. + +If the API cannot fulfill the request due to insufficient data or +data that is not valid, the service returns the HTTP ``Bad Request +(400)`` response code with information about the failure in the +response body. Validation errors require that you correct the error +and submit the request again. + +If you are not an administrative user the service returns the HTTP ``Forbidden +(403)`` response code. + +**New in version 2.6** + +.. rest_status_code:: success ../http-status.yaml + + - 201 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - description: description-optional + - enabled: enabled-optional + - flavor: flavor + - flavor_profile_id: flavor-profile-id + - name: name + +Request Example +--------------- + +.. literalinclude:: examples/flavor-create-request.json + :language: javascript + +Curl Example +------------ + +.. literalinclude:: examples/flavor-create-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - description: description + - enabled: enabled + - flavor_profile_id: flavor-profile-id + - flavor: flavor + - id: flavor-id + - name: name + +Response Example +---------------- + +.. literalinclude:: examples/flavor-create-response.json + :language: javascript + + +Show Flavor Details +=================== + +.. rest_method:: GET /v2.0/lbaas/flavors/{flavor_id} + +Shows the details of a flavor. + +Use the ``fields`` query parameter to control which fields are +returned in the response body. Additionally, you can filter results +by using query string parameters. For information, see :ref:`filtering`. + +This operation does not require a request body. + +**New in version 2.6** + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 401 + - 404 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - fields: fields + - flavor_id: path-flavor-id + +Curl Example +------------ + +.. literalinclude:: examples/flavor-show-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - description: description + - enabled: enabled + - flavor_profile_id: flavor-profile-id + - flavor: flavor + - id: flavor-id + - name: name + +Response Example +---------------- + +.. literalinclude:: examples/flavor-show-response.json + :language: javascript + +Update a Flavor +=============== + +.. rest_method:: PUT /v2.0/lbaas/flavors/{flavor_id} + +Update a flavor. + +If you are not an administrative user the service returns the HTTP ``Forbidden +(403)`` response code. + +**New in version 2.6** + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 404 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - description: description-optional + - enabled: enabled-optional + - flavor: flavor + - flavor_id: path-flavor-id + - name: name-optional + +Request Example +--------------- + +.. literalinclude:: examples/flavor-update-request.json + :language: javascript + +Curl Example +------------ + +.. literalinclude:: examples/flavor-update-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - description: description + - enabled: enabled + - flavor_profile_id: flavor-profile-id + - flavor: flavor + - id: flavor-id + - name: name + +Response Example +---------------- + +.. literalinclude:: examples/flavor-update-response.json + :language: javascript + +Remove a Flavor +=============== + +.. rest_method:: DELETE /v2.0/lbaas/flavors/{flavor_id} + +Remove a flavor and its associated configuration. + +If any load balancers are using this flavor the service returns the HTTP +``Conflict (409)`` response code. + +If you are not an administrative user the service returns the HTTP ``Forbidden +(403)`` response code. + +**New in version 2.6** + +.. rest_status_code:: success ../http-status.yaml + + - 204 + +.. rest_status_code:: error ../http-status.yaml + + - 401 + - 403 + - 404 + - 409 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - flavor_id: path-flavor-id + +Curl Example +------------ + +.. literalinclude:: examples/flavor-delete-curl + :language: bash + +Response +-------- + +There is no body content for the response of a successful DELETE request. diff --git a/api-ref/source/v2/flavorprofile.inc b/api-ref/source/v2/flavorprofile.inc new file mode 100644 index 0000000000..c544e80d59 --- /dev/null +++ b/api-ref/source/v2/flavorprofile.inc @@ -0,0 +1,297 @@ +.. -*- rst -*- + +List Flavor Profiles +==================== + +.. rest_method:: GET /v2.0/lbaas/flavorprofiles + +List all available flavor profiles. + +Use the ``fields`` query parameter to control which fields are +returned in the response body. Additionally, you can filter results +by using query string parameters. For information, see :ref:`filtering`. + +If you are not an administrative user the service returns the HTTP ``Forbidden +(403)`` response code. + +The list might be empty. + +**New in version 2.6** + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - fields: fields + +Curl Example +------------ + +.. literalinclude:: examples/flavor-profile-list-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - flavor_data: flavor-data + - flavorprofiles: flavorprofiles + - id: flavor-profile-id + - name: name + - provider_name: provider-name + +Response Example +---------------- + +.. literalinclude:: examples/flavorprofiles-list-response.json + :language: javascript + +Create Flavor Profile +===================== + +.. rest_method:: POST /v2.0/lbaas/flavorprofiles + +Creates a flavor profile. + +If the API cannot fulfill the request due to insufficient data or +data that is not valid, the service returns the HTTP ``Bad Request +(400)`` response code with information about the failure in the +response body. Validation errors require that you correct the error +and submit the request again. + +If you are not an administrative user the service returns the HTTP ``Forbidden +(403)`` response code. + +**New in version 2.6** + +.. rest_status_code:: success ../http-status.yaml + + - 201 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - flavor_data: flavor-data + - flavorprofile: flavorprofile + - name: name + - provider_name: provider-name + +Request Example +--------------- + +.. literalinclude:: examples/flavorprofile-create-request.json + :language: javascript + +Curl Example +------------ + +.. literalinclude:: examples/flavorprofile-create-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - flavor_data: flavor-data + - flavorprofile: flavorprofile + - id: flavor-profile-id + - name: name + - provider_name: provider-name + +Response Example +---------------- + +.. literalinclude:: examples/flavorprofile-create-response.json + :language: javascript + +Show Flavor Profile Details +=========================== + +.. rest_method:: GET /v2.0/lbaas/flavorprofiles/{flavorprofile_id} + +Shows the details of a flavor profile. + +Use the ``fields`` query parameter to control which fields are +returned in the response body. Additionally, you can filter results +by using query string parameters. For information, see :ref:`filtering`. + +If you are not an administrative user the service returns the HTTP ``Forbidden +(403)`` response code. + +This operation does not require a request body. + +**New in version 2.6** + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 401 + - 403 + - 404 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - fields: fields + - flavorprofile_id: path-flavorprofile-id + +Curl Example +------------ + +.. literalinclude:: examples/flavorprofile-show-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - flavor_data: flavor-data + - flavorprofile: flavorprofile + - id: flavor-profile-id + - name: name + - provider_name: provider-name + +Response Example +---------------- + +.. literalinclude:: examples/flavorprofile-show-response.json + :language: javascript + +Update a Flavor Profile +======================= + +.. rest_method:: PUT /v2.0/lbaas/flavorprofiles/{flavorprofile_id} + +Update a flavor profile. + +If you are not an administrative user the service returns the HTTP ``Forbidden +(403)`` response code. + +**New in version 2.6** + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 404 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - flavor_data: flavor-data-optional + - flavorprofile: flavorprofile + - flavorprofile_id: path-flavorprofile-id + - name: name-optional + - provider_name: provider-name-optional + +Request Example +--------------- + +.. literalinclude:: examples/flavorprofile-update-request.json + :language: javascript + +Curl Example +------------ + +.. literalinclude:: examples/flavorprofile-update-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - flavor_data: flavor-data + - flavorprofile: flavorprofile + - id: flavor-profile-id + - name: name + - provider_name: provider-name + +Response Example +---------------- + +.. literalinclude:: examples/flavorprofile-update-response.json + :language: javascript + +Remove a Flavor Profile +======================= + +.. rest_method:: DELETE /v2.0/lbaas/flavorprofiles/{flavorprofile_id} + +Remove a flavor profile and its associated configuration. + +If any flavors using this flavor profile the service returns the HTTP +``Conflict (409)`` response code. + +If you are not an administrative user the service returns the HTTP ``Forbidden +(403)`` response code. + +**New in version 2.6** + +.. rest_status_code:: success ../http-status.yaml + + - 204 + +.. rest_status_code:: error ../http-status.yaml + + - 401 + - 403 + - 404 + - 409 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - flavorprofile_id: path-flavorprofile-id + +Curl Example +------------ + +.. literalinclude:: examples/flavorprofile-delete-curl + :language: bash + +Response +-------- + +There is no body content for the response of a successful DELETE request. diff --git a/api-ref/source/v2/general.inc b/api-ref/source/v2/general.inc new file mode 100644 index 0000000000..ebd7d1f57c --- /dev/null +++ b/api-ref/source/v2/general.inc @@ -0,0 +1,675 @@ +.. -*- rst -*- + +This section introduces readers to OpenStack Octavia v2 ReSTful HTTP API and +provides guidelines on how to use it. + +.. note:: + To clarify the Octavia API versioning we have updated the endpoint to + support both the previously documented /v2.0 and the new path of /v2. + They are exactly the same API and /v2.0 will be a supported alias for the + life of the v2 API. + +Service Endpoints +================= + +All API calls described throughout the rest of this document require +authentication with the `OpenStack Identity service +`_. After authentication, the +base ``endpoint URL`` for the ``service type`` of ``load-balancer`` and +``service name`` of ``octavia`` can be extracted from the service catalog +returned with the identity token. + +**Example token snippet with service catalog** + +.. code:: + + { + "token": { + "catalog": [ + { + "endpoints": [ + { + "url": "http://198.51.100.10:9876/", + "interface": "public", + "region": "RegionOne", + "region_id": "RegionOne", + "id": "cd1c3c2dc6434c739ed0a12015373754" + } + ], + "type": "load-balancer", + "id": "1209701aecd3453e9803119cd28cb013", + "name": "octavia" + } + ] + } + } + + +For instance, if the ``endpoint URL`` is ``http://198.51.100.10:9876/`` then +the full API call for ``/v2/lbaas/loadbalancers`` is +``http://198.51.100.10:9876/v2/lbaas/loadbalancers``. + +Depending on the deployment, the ``load-balancer`` ``endpoint URL`` might be +http or https, a custom port, a custom path, and include your project id. The +only way to know the URLs for your deployment is by using the service catalog. +The ``load-balancer`` ``endpoint URL`` should never be hard coded in +applications, even if they are only expected to work at a single site. It +should always be discovered from the Identity token. + +As such, for the rest of this document we will be using short hand where ``GET +/v2/lbaas/loadbalancers`` really means ``GET +{your_load-balancer_endpoint_URL}/v2/lbaas/loadbalancers``. + +Neutron-lbaas and Octavia v2 APIs +================================= + +The Octavia v2 API is fully backward compatible with the neutron-lbaas v2 API +and is a superset of the neutron-lbaas v2 API. This is intended to provide a +simple migration path for deployments currently using the neutron-lbaas v2 API. +You can update the endpoint your application is using from the keystone +service catalog to use the ``octavia`` endpoint instead of the ``neutron`` +endpoint for load balancer activities. + +During the neutron-lbaas deprecation period a pass-through proxy will be +included in neutron to allow requests via neutron and the neutron-lbaas v2 API +to continue to function. Users are strongly encouraged to update their +applications to access load balancing via the Octavia v2 API. + +.. warning:: + + Load balancing functions accessed via the neutron endpoint are deprecated + and will be removed in a future release. Users are strongly encouraged to + migrate to using the octavia endpoint. + +Authentication and authorization +================================ + +The Octavia API v2 uses the `OpenStack Identity service +`_ as the default authentication +service. When Keystone is enabled, users that submit requests to the Octavia +service must provide an authentication token in **X-Auth-Token** request +header. You obtain the token by authenticating to the Keystone endpoint. + +When Keystone is enabled, the ``project_id`` attribute is not required in create +requests because the project ID is derived from the authentication token. + +The default authorization settings allow only administrative users to create +resources on behalf of a different project. + +Octavia uses information received from Keystone to authorize user requests. +Octavia Networking handles the following types of authorization policies: + +- **Operation-based policies** specify access criteria for specific + operations, possibly with fine-grained control over specific attributes. + +- **Resource-based policies** access a specific resource. Permissions might or + might not be granted depending on the permissions configured for the + resource. Currently available for only the network resource. + +The actual authorization policies enforced in Octavia might vary from +deployment to deployment. + +Request and response formats +============================ + +The Octavia API v2 supports JSON data serialization request and response +formats only. + +Request format +-------------- + +The Octavia API v2 only accepts requests with the JSON data serialization +format. The request must have no ``Accept`` header or an ``Accept`` header that +is compatible with ``application/json``. The one exception is the Oslo +middleware healthcheck endpoint. + +Response format +--------------- + +The Octavia API v2 always responds with the JSON data serialization +format. The one exception is the Oslo middleware healthcheck endpoint. + +Query extension + A ``.json`` extension can be added to the request URI. For example, the + ``.json`` extension in the following requests are equivalent: + + - **GET** *publicURL*/loadbalancers + + - **GET** *publicURL*/loadbalancers.json + +.. _filtering: + +Filtering and column selection +============================== + +The Octavia API v2 supports filtering based on all top level attributes of +a resource. Filters are applicable to all list requests. + +For example, the following request returns all loadbalancers named ``foobar``: + +.. code:: + + GET /v2/lbaas/loadbalancers?name=foobar + +When you specify multiple filters, the Octavia API v2 returns only objects +that meet all filtering criteria. The operation applies an AND condition among +the filters. + +Note +---- + +Octavia does not offer an OR mechanism for filters. + +Alternatively, you can issue a distinct request for each filter and build a +response set from the received responses on the client-side. + +Filtering by Tags +----------------- + +**New in version 2.5** + +Most Octavia resources support adding tags to the resource attributes. +Octavia supports advanced filtering using these tags. The following tag +filters are supported by the Octavia API: + +- ``tags`` - Return the list of entities that have this tag or tags. +- ``tags-any`` - Return the list of entities that have one or more of the + given tags. +- ``not-tags`` - Return the list of entities that do not have one or more + of the given tags. +- ``not-tags-any`` - Return the list of entities that do not have at least + one of the given tags. + +When supplying a list of tags, the tags should be provided in a comma separated +list. + +For example, if you would like to get the list of load balancers with both the +"red" and "blue" tags you would request: + +.. code:: + + GET /v2/lbaas/loadbalancers?tags=red,blue + +To get a list of load balancers that have the "red" or "blue" tag, you would +request: + +.. code:: + + GET /v2/lbaas/loadbalancers?tags-any=red,blue + +For a list of load balancers that do not have the "red" tag, you would request: + +.. code:: + + GET /v2/lbaas/loadbalancers?not-tags=red + +To get a list of load balancers that don't have either the "red" or "blue" tag +you would request: + +.. code:: + + GET /v2/lbaas/loadbalancers?not-tags-any=red,blue + +Tag filters can also be combined in the same request: + +.. code:: + + GET /v2/lbaas/loadbalancers?tags=red,blue&tags-any=green,orange + +Column Selection +---------------- + +By default, Octavia returns all attributes for any show or list call. The +Octavia API v2 has a mechanism to limit the set of attributes returned. +For example, return ``id``. + +You can use the ``fields`` query parameter to control the attributes returned +from the Octavia API v2. + +For example, the following request returns only ``id`` and ``name`` for each +load balancer: + +.. code:: + + GET /v2/lbaas/loadbalancers.json?fields=id&fields=name + +Synchronous versus asynchronous plug-in behavior +================================================ + +The Octavia API v2 presents a logical model of load balancers consisting +of listeners, pools, and members. It is up to the OpenStack Octavia plug-in +to communicate with the underlying infrastructure to ensure load balancing +is consistent with the logical model. A plug-in might perform these +operations asynchronously. + +When an API client modifies the logical model by issuing an HTTP **POST**, +**PUT**, or **DELETE** request, the API call might return before the plug-in +modifies underlying virtual and physical load balancing devices. However, an +API client is guaranteed that all subsequent API calls properly reflect the +changed logical model. + +For example, if a client issues an HTTP **PUT** request to set the weight +of a member, there is no guarantee that the new weight will be in effect when +the HTTP call returns. This is indicated by an HTTP response code of 202. + +You can use the ``provisioning_status`` attribute to determine whether the +Octavia plug-in has successfully completed the configuration of the resource. + +Bulk-create +=========== + +The Octavia v2 API does not support bulk create. You cannot create more than +one load balancer per API call. + +The Octavia v2 API does support single call create which allows you to +create a fully populated load balancer in one API call. This is discussed +in the load balancer create section of this reference. + +Pagination +========== + +To reduce load on the service, list operations will return a maximum number of +items at a time. To navigate the collection, the parameters limit, marker and +page\_reverse can be set in the URI. For example: + +.. code:: + + ?limit=100&marker=1234&page_reverse=False + +The ``marker`` parameter is the ID of the last item in the previous list. The +``limit`` parameter sets the page size. The ``page_reverse`` parameter sets +the page direction. These parameters are optional. If the client requests a +limit beyond the maximum limit configured by the deployment, the server returns +the maximum limit number of items. + +For convenience, list responses contain atom "next" links and "previous" links. +The last page in the list requested with 'page\_reverse=False' will not contain +"next" link, and the last page in the list requested with 'page\_reverse=True' +will not contain "previous" link. The following examples illustrate two pages +with three items. The first page was retrieved through: + +.. code:: + + GET http://198.51.100.10:9876/v2/lbaas/loadbalancers.json?limit=2 + +If a particular plug-in does not support pagination operations the Octavia API +v2 will emulate the pagination behavior so that users can expect the same +behavior regardless of the particular plug-in running in the background. + +**Example load balancer list, first page: JSON request** + +.. code:: + + GET /v2/lbaas/loadbalancers.json?limit=2 HTTP/1.1 + Host: 198.51.100.10:9876 + Content-Type: application/json + Accept: application/json + + +**Example load balancer list, first page: JSON response** + +.. code:: + + { + "loadbalancers": [ + { + "admin_state_up": true, + "listeners": [], + "vip_subnet_id": "08dce793-daef-411d-a896-d389cd45b1ea", + "pools": [], + "provider": "octavia", + "description": "Best App load balancer 1", + "name": "bestapplb1", + "operating_status": "ONLINE", + "id": "34d5f4a5-cbbc-43a0-878f-b8a26370e6e7", + "provisioning_status": "ACTIVE", + "vip_port_id": "1e20d91d-8df9-4c15-9778-28bc89226c19", + "vip_address": "203.0.113.10", + "project_id": "bf325b04-e7b1-4002-9b10-f4984630367f" + }, + { + "admin_state_up": true, + "listeners": [], + "vip_subnet_id": "08dce793-daef-411d-a896-d389cd45b1ea", + "pools": [], + "provider": "octavia", + "description": "Second Best App load balancer 1", + "name": "2ndbestapplb1", + "operating_status": "ONLINE", + "id": "0fdb0ca7-0a38-4aea-891c-daaed40bcafe", + "provisioning_status": "ACTIVE", + "vip_port_id": "21f7ac04-6824-4222-93cf-46e0d70607f9", + "vip_address": "203.0.113.20", + "project_id": "bf325b04-e7b1-4002-9b10-f4984630367f" + } + ], + "loadbalancers_links": [ + { + "href": "http://198.51.100.10:9876/v2/lbaas/loadbalancers.json?limit=2&marker=0fdb0ca7-0a38-4aea-891c-daaed40bcafe", + "rel": "next" + }, + { + "href": "http://198.51.100.10:9876/v2/lbaas/loadbalancers.json?limit=2&marker=34d5f4a5-cbbc-43a0-878f-b8a26370e6e7&page_reverse=True", + "rel": "previous" + } + ] + } + + +The last page won't show the "next" links + +**Example load balancer list, last page: JSON request** + +.. code:: + + GET /v2/lbaas/loadbalancers.json?limit=2&marker=4ef465f3-0233-44af-b93d-9d3eae4daf85 HTTP/1.1 + Host: 198.51.100.10:9876 + Content-Type: application/json + Accept: application/json + + + +**Example load balancer list, last page: JSON response** + +.. code:: + + { + "loadbalancers": [ + { + "admin_state_up": true, + "listeners": [], + "vip_subnet_id": "08dce793-daef-411d-a896-d389cd45b1ea", + "pools": [], + "provider": "octavia", + "description": "Other App load balancer 1", + "name": "otherapplb1", + "operating_status": "ONLINE", + "id": "4ef465f3-0233-44af-b93d-9d3eae4daf85", + "provisioning_status": "ACTIVE", + "vip_port_id": "f777a1c7-7f59-4a36-ad34-24dfebaf19e6", + "vip_address": "203.0.113.50", + "project_id": "bf325b04-e7b1-4002-9b10-f4984630367f" + } + ], + "loadbalancers_links": [ + { + "href": "http://198.51.100.10:9876/v2/lbaas/loadbalancers.json?limit=2&marker=4ef465f3-0233-44af-b93d-9d3eae4daf85&page_reverse=True", + "rel": "previous" + } + ] + } + + +Sorting +======= + +Sorting is determined through the use of the 'sort' query string parameter. The +value of this parameter is a comma-separated list of sort keys. Sort directions +can optionally be appended to each sort key, separated by the ':' character. + +The supported sort directions are either 'asc' for ascending or 'desc' for +descending. + +The caller may (but is not required to) specify a sort direction for each key. +If a sort direction is not specified for a key, then a default is set by the +server. + +For example: + +- Only sort keys specified: + + + ``sort=key1,key2,key3`` + + 'key1' is the first key, 'key2' is the second key, etc. + + Sort directions are defaulted by the server + +- Some sort directions specified: + + + ``sort=key1:asc,key2,key3`` + + Any sort key without a corresponding direction is defaulted + + 'key1' is the first key (ascending order), 'key2' is the second key + (direction defaulted by the server), etc. + +- Equal number of sort keys and directions specified: + + + ``sort=key1:asc,key2:desc,key3:asc`` + + Each key is paired with the corresponding direction + + 'key1' is the first key (ascending order), 'key2' is the second key + (descending order), etc. + +You can also use the ``sort_key`` and ``sort_dir`` parameters to sort the +results of list operations. Currently sorting does not work with extended +attributes of resource. The ``sort_key`` and ``sort_dir`` can be repeated, +and the number of ``sort_key`` and ``sort_dir`` provided must be same. The +``sort_dir`` parameter indicates in which direction to sort. Acceptable +values are ``asc`` (ascending) and ``desc`` (descending). + +If a particular plug-in does not support sorting operations the Octavia API +v2 emulates the sorting behavior so that users can expect the same behavior +regardless of the particular plug-in that runs in the background. + + +Response Codes +============== + +The following HTTP response status codes are used by the Octavia v2 API. + +Success +------- + ++------+----------------------------------------------------------------+ +| Code | Description | ++======+================================================================+ +| 200 | - The synchronous request was successful | ++------+----------------------------------------------------------------+ +| 202 | - The asynchronous request was accepted and is being processed | ++------+----------------------------------------------------------------+ +| 204 | - The request was successful, no content to return | +| | - The entity was successfully deleted | ++------+----------------------------------------------------------------+ + +Faults +------ + +The Octavia API v2 returns an error response if a failure occurs while +processing a request. Octavia uses only standard HTTP error codes. +4\ *nn* errors indicate problems in the particular request being sent from +the client. + ++------+----------------------------------------------------------------+ +| Code | Description | ++======+================================================================+ +| 400 | - Bad request | +| | - Malformed request URI or body requested | +| | - The request could not be understood | +| | - Invalid values entered | +| | - Bulk operations disallowed | +| | - Validation failed | +| | - Method not allowed for request body (such as trying to | +| | update attributes that can be specified at create-time only) | ++------+----------------------------------------------------------------+ +| 401 | - Unauthorized: Access is denied due to invalid credentials | ++------+----------------------------------------------------------------+ +| 403 | - Policy does not allow current user to do this operation | +| | - The project is over quota for the request | ++------+----------------------------------------------------------------+ +| 404 | - Not Found | +| | - Non existent URI | +| | - Resource not found | ++------+----------------------------------------------------------------+ +| 409 | - Conflict | +| | - The resource is in an immutable state | ++------+----------------------------------------------------------------+ +| 500 | - Internal server error | ++------+----------------------------------------------------------------+ +| 503 | - Service unavailable | +| | - The project is busy with other requests, try again later | ++------+----------------------------------------------------------------+ + + +Status Codes +============ + +Octavia API v2 entities have two status codes present in the response body. +The ``provisioning_status`` describes the lifecycle status of the entity while +the ``operating_status`` provides the observed status of the entity. + +For example, a member may be in a ``provisioning_status`` of ``PENDING_UPDATE`` +and have an ``operating_status`` of ``ONLINE``. This would indicate that an +update operation is occurring on this member and it is in an immutable state +but it is healthy and able to service requests. This situation could occur if +the user made a request to update the weight of the member. + +.. _op_status: + +Operating Status Codes +---------------------- + ++------------+--------------------------------------------------------------+ +| Code | Description | ++============+==============================================================+ +| ONLINE | - Entity is operating normally | +| | - All pool members are healthy | ++------------+--------------------------------------------------------------+ +| DRAINING | - The member is not accepting new connections | ++------------+--------------------------------------------------------------+ +| OFFLINE | - Entity is administratively disabled | ++------------+--------------------------------------------------------------+ +| DEGRADED | - One or more of the entity's components are in ERROR | ++------------+--------------------------------------------------------------+ +| ERROR | - The entity has failed | +| | - The member is failing it's health monitoring checks | +| | - All of the pool members are in ERROR | ++------------+--------------------------------------------------------------+ +| NO_MONITOR | - No health monitor is configured for this entity and it's | +| | status is unknown | ++------------+--------------------------------------------------------------+ + +.. _prov_status: + +Provisioning Status Codes +------------------------- + ++----------------+----------------------------------------------------------+ +| Code | Description | ++================+==========================================================+ +| ACTIVE | - The entity was provisioned successfully | ++----------------+----------------------------------------------------------+ +| DELETED | - The entity has been successfully deleted | ++----------------+----------------------------------------------------------+ +| ERROR | - Provisioning failed | ++----------------+----------------------------------------------------------+ +| PENDING_CREATE | - The entity is being created | ++----------------+----------------------------------------------------------+ +| PENDING_UPDATE | - The entity is being updated | ++----------------+----------------------------------------------------------+ +| PENDING_DELETE | - The entity is being deleted | ++----------------+----------------------------------------------------------+ + +Entities in a ``PENDING_*`` state are immutable and cannot be modified until +the requested operation completes. The entity will return to the ``ACTIVE`` +provisioning status once the asynchronus operation completes. + +An entity in ``ERROR`` has failed provisioning. The entity may be deleted and +recreated. + + +.. _valid_protocol: + +Protocol Combinations (Listener/Pool) +===================================== + +The listener and pool can be associated through the listener's +``default_pool_id`` or l7policy's ``redirect_pool_id``. Both listener and pool +must set the protocol parameter, but the association between the listener and +the pool isn't arbitrary and has some constraints on the protocol aspect. + +Valid protocol combinations +--------------------------- + +.. |1| unicode:: U+2002 .. nut ( ) +.. |2| unicode:: U+2003 .. mutton ( ) +.. |listener| replace:: |2| |2| Listener +.. |1Y| replace:: |1| Y +.. |1N| replace:: |1| N +.. |2Y| replace:: |2| Y +.. |2N| replace:: |2| N +.. |8Y| replace:: |2| |2| |2| |2| Y +.. |8N| replace:: |2| |2| |2| |2| N + ++-------------+-------+--------+-------+------+-------------------+------+ +|| |listener| || HTTP || HTTPS || SCTP || TCP || TERMINATED_HTTPS || UDP | +|| Pool || || || || || || | ++=============+=======+========+=======+======+===================+======+ +| HTTP | |2Y| | |2N| | |2N| | |1Y| | |8Y| | |1N| | ++-------------+-------+--------+-------+------+-------------------+------+ +| HTTPS | |2N| | |2Y| | |2N| | |1Y| | |8N| | |1N| | ++-------------+-------+--------+-------+------+-------------------+------+ +| PROXY | |2Y| | |2Y| | |2N| | |1Y| | |8Y| | |1N| | ++-------------+-------+--------+-------+------+-------------------+------+ +| PROXYV2 | |2Y| | |2Y| | |2N| | |1Y| | |8Y| | |1N| | ++-------------+-------+--------+-------+------+-------------------+------+ +| SCTP | |2N| | |2N| | |2Y| | |1N| | |8N| | |1N| | ++-------------+-------+--------+-------+------+-------------------+------+ +| TCP | |2N| | |2Y| | |2N| | |1Y| | |8N| | |1N| | ++-------------+-------+--------+-------+------+-------------------+------+ +| UDP | |2N| | |2N| | |2N| | |1N| | |8N| | |1Y| | ++-------------+-------+--------+-------+------+-------------------+------+ + +"Y" means the combination is valid and "N" means invalid. + +The HTTPS protocol is HTTPS pass-through. For most providers, this is treated +as a TCP protocol. Some advanced providers may support HTTPS session +persistence features by using the session ID. The Amphora provider treats +HTTPS as a TCP flow, but currently does not support HTTPS session persistence +using the session ID. + +The pool protocol of PROXY will use the listener protocol as the pool protocol +but will wrap that protocol in the proxy protocol. In the case of listener +protocol TERMINATED_HTTPS, a pool protocol of PROXY will be HTTP wrapped in the +proxy protocol. + +Protocol Combinations (Pool/Health Monitor) +=========================================== + +Pools and health monitors are also related with regard to protocol. Pools set +the protocol parameter for the real member connections, and the health monitor +sets a type for health checks. Health check types are limited based on the +protocol of the pool. + +Valid protocol combinations +--------------------------- + +.. |Health Monitor| replace:: |2| |2| Health Monitor +.. |UDPCONNECT| replace:: UDP-CONNECT +.. |4Y| replace:: |2| |2| Y +.. |4N| replace:: |2| |2| N +.. |5Y| replace:: |2| |2| |1| Y +.. |5N| replace:: |2| |2| |1| N + ++-------------------+-------+--------+-------+-------+------+------------+---------------+ +|| |Health Monitor| || HTTP || HTTPS || PING || SCTP || TCP || TLS-HELLO || |UDPCONNECT| | +|| Pool || || || || || || || | ++===================+=======+========+=======+=======+======+============+===============+ +| HTTP | |2Y| | |2Y| | |1Y| | |1N| | |1Y| | |4Y| | |5N| | ++-------------------+-------+--------+-------+-------+------+------------+---------------+ +| HTTPS | |2Y| | |2Y| | |1Y| | |1N| | |1Y| | |4Y| | |5N| | ++-------------------+-------+--------+-------+-------+------+------------+---------------+ +| PROXY | |2Y| | |2Y| | |1Y| | |1N| | |1Y| | |4Y| | |5N| | ++-------------------+-------+--------+-------+-------+------+------------+---------------+ +| PROXYV2 | |2Y| | |2Y| | |1Y| | |1N| | |1Y| | |4Y| | |5N| | ++-------------------+-------+--------+-------+-------+------+------------+---------------+ +| SCTP | |2Y| | |2N| | |1N| | |1Y| | |1Y| | |4N| | |5Y| | ++-------------------+-------+--------+-------+-------+------+------------+---------------+ +| TCP | |2Y| | |2Y| | |1Y| | |1N| | |1Y| | |4Y| | |5N| | ++-------------------+-------+--------+-------+-------+------+------------+---------------+ +| UDP | |2Y| | |2N| | |1N| | |1Y| | |1Y| | |4N| | |5Y| | ++-------------------+-------+--------+-------+-------+------+------------+---------------+ + +"Y" means the combination is valid and "N" means invalid. + +These combinations are mostly as you'd expect for all non-UDP/SCTP pool +protocols: non-UDP/SCTP pools can have health monitors with any check type +besides UDP-CONNECT and SCTP. +For UDP or SCTP pools however, things are a little more complicated. UDP and +SCTP Pools support UDP-CONNECT and SCTP but also HTTP and TCP checks. HTTPS +checks are technically feasible but have not yet been implemented. diff --git a/api-ref/source/v2/healthmonitor.inc b/api-ref/source/v2/healthmonitor.inc new file mode 100644 index 0000000000..6d7090b66e --- /dev/null +++ b/api-ref/source/v2/healthmonitor.inc @@ -0,0 +1,420 @@ +.. -*- rst -*- + +List Health Monitors +==================== + +.. rest_method:: GET /v2/lbaas/healthmonitors + +Lists all health monitors for the project. + +Use the ``fields`` query parameter to control which fields are +returned in the response body. Additionally, you can filter results +by using query string parameters. For information, see :ref:`filtering`. + +Administrative users can specify a project ID that is different than their own +to list health monitors for other projects. + +The list might be empty. + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - fields: fields + - project_id: project_id_query + +Curl Example +------------ + +.. literalinclude:: examples/healthmonitor-list-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - admin_state_up: admin_state_up + - created_at: created_at + - delay: healthmonitor-delay + - domain_name: healthmonitor-domain_name + - expected_codes: healthmonitor-expected_codes + - http_method: healthmonitor-http_method + - http_version: healthmonitor-http_version + - id: healthmonitor-id + - max_retries: healthmonitor-max-retries + - max_retries_down: healthmonitor-max-retries-down + - name: name + - operating_status: operating_status + - pool_id: pool-id + - project_id: project_id + - provisioning_status: provisioning_status + - tags: tags + - timeout: healthmonitor-timeout + - type: healthmonitor-type + - updated_at: updated_at + - url_path: healthmonitor-url_path + +Response Example +---------------- + +.. literalinclude:: examples/healthmonitors-list-response.json + :language: javascript + +Create Health Monitor +===================== + +.. rest_method:: POST /v2/lbaas/healthmonitors + +Creates a health monitor on a pool. + +Health monitors define how the load balancer monitors backend servers +to determine if they are available to service requests. + +This operation provisions a new health monitor by using the configuration that +you define in the request object. After the API validates the request and +starts the provisioning process, the API returns a response object that +contains a unique ID and the status of provisioning the health monitor. + +In the response, the health monitor :ref:`provisioning status` is +``ACTIVE``, ``PENDING_CREATE``, or ``ERROR``. + +If the status is ``PENDING_CREATE``, issue GET +``/v2/lbaas/healthmonitors/{healthmonitor_id}`` to view the progress of +the provisioning operation. When the health monitor status changes +to ``ACTIVE``, the health monitor is successfully provisioned and +is ready for further configuration. + +If the API cannot fulfill the request due to insufficient data or +data that is not valid, the service returns the HTTP ``Bad Request +(400)`` response code with information about the failure in the +response body. Validation errors require that you correct the error +and submit the request again. + +Specifying a project_id is deprecated. The health monitor will inherit +the project_id of the parent load balancer. + +At a minimum, you must specify these health monitor attributes: + +- ``delay`` The interval, in seconds, between health checks. + +- ``max_retries`` The number of successful checks before changing the + operating status of the member to ONLINE. + +- ``pool_id`` The pool to monitor. + +- ``timeout`` The time, in seconds, after which a health check + times out. + +- ``type`` The type of health monitor. One of ``HTTP``, ``HTTPS``, ``PING``, + ``SCTP``, ``TCP``, ``TLS-HELLO``, or ``UDP-CONNECT``. + +Some attributes receive default values if you omit them from the request: + +- ``admin_state_up`` The default is ``true``. + +- ``expected_codes`` The expected HTTP status codes to get from a + successful health check. The default is ``200``. + +- ``http_method`` The default is ``GET``. + +- ``http_version`` The default is ``1.0``. + +- ``max_retries_down`` The default is ``3``. + +- ``url_path`` The default is ``/``. + +To create a health monitor, the parent load balancer must have an ``ACTIVE`` +provisioning status. + +.. rest_status_code:: success ../http-status.yaml + + - 201 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 404 + - 409 + - 500 + - 503 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - admin_state_up: admin_state_up-default-optional + - delay: healthmonitor-delay + - domain_name: healthmonitor-domain_name-optional + - expected_codes: healthmonitor-expected_codes-optional + - http_method: healthmonitor-http_method-optional + - http_version: healthmonitor-http_version-optional + - name: name-optional + - max_retries: healthmonitor-max-retries + - max_retries_down: healthmonitor-max-retries-down-optional + - pool_id: pool-id + - project_id: project_id-optional-deprecated + - tags: tags-optional + - timeout: healthmonitor-timeout + - type: healthmonitor-type + - url_path: healthmonitor-url_path-optional + +Request Example +---------------- + +.. literalinclude:: examples/healthmonitor-create-request.json + :language: javascript + +Curl Example +------------ + +.. literalinclude:: examples/healthmonitor-create-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - admin_state_up: admin_state_up + - created_at: created_at + - delay: healthmonitor-delay + - domain_name: healthmonitor-domain_name + - expected_codes: healthmonitor-expected_codes + - http_method: healthmonitor-http_method + - http_version: healthmonitor-http_version + - id: healthmonitor-id + - max_retries: healthmonitor-max-retries + - max_retries_down: healthmonitor-max-retries-down + - name: name + - operating_status: operating_status + - pool_id: pool-id + - project_id: project_id + - provisioning_status: provisioning_status + - tags: tags + - timeout: healthmonitor-timeout + - type: healthmonitor-type + - updated_at: updated_at + - url_path: healthmonitor-url_path + +Response Example +---------------- + +.. literalinclude:: examples/healthmonitor-create-response.json + :language: javascript + +Show Health Monitor details +=========================== + +.. rest_method:: GET /v2/lbaas/healthmonitors/{healthmonitor_id} + +Shows the details of a health monitor. + +If you are not an administrative user and the parent load balancer does not +belong to your project, the service returns the HTTP ``Forbidden (403)`` +response code. + +This operation does not require a request body. + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 401 + - 403 + - 404 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - fields: fields + - healthmonitor_id: path-healthmonitor-id + +Curl Example +------------ + +.. literalinclude:: examples/healthmonitor-show-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - admin_state_up: admin_state_up + - created_at: created_at + - delay: healthmonitor-delay + - domain_name: healthmonitor-domain_name + - expected_codes: healthmonitor-expected_codes + - http_method: healthmonitor-http_method + - http_version: healthmonitor-http_version + - id: healthmonitor-id + - max_retries: healthmonitor-max-retries + - max_retries_down: healthmonitor-max-retries-down + - name: name + - operating_status: operating_status + - pool_id: pool-id + - project_id: project_id + - provisioning_status: provisioning_status + - tags: tags + - timeout: healthmonitor-timeout + - type: healthmonitor-type + - updated_at: updated_at + - url_path: healthmonitor-url_path + +Response Example +---------------- + +.. literalinclude:: examples/healthmonitor-show-response.json + :language: javascript + +Update a Health Monitor +======================= + +.. rest_method:: PUT /v2/lbaas/healthmonitors/{healthmonitor_id} + +Update an existing health monitor. + +If the request is valid, the service returns the ``Accepted (202)`` +response code. To confirm the update, check that the health monitor +provisioning status is ``ACTIVE``. If the status is ``PENDING_UPDATE``, +use a GET operation to poll the health monitor object for changes. + +This operation returns the updated health monitor object with the +``ACTIVE``, ``PENDING_UPDATE``, or ``ERROR`` provisioning status. + +.. rest_status_code:: success ../http-status.yaml + + - 202 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 404 + - 409 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - admin_state_up: admin_state_up-default-optional + - delay: healthmonitor-delay-optional + - domain_name: healthmonitor-domain_name-optional + - expected_codes: healthmonitor-expected_codes-optional + - healthmonitor_id: path-healthmonitor-id + - http_method: healthmonitor-http_method-optional + - http_version: healthmonitor-http_version-optional + - max_retries: healthmonitor-max-retries-optional + - max_retries_down: healthmonitor-max-retries-down-optional + - name: name-optional + - tags: tags-optional + - timeout: healthmonitor-timeout-optional + - url_path: healthmonitor-url_path-optional + +Request Example +--------------- + +.. literalinclude:: examples/healthmonitor-update-request.json + :language: javascript + +Curl Example +------------ + +.. literalinclude:: examples/healthmonitor-update-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - admin_state_up: admin_state_up + - created_at: created_at + - delay: healthmonitor-delay + - domain_name: healthmonitor-domain_name + - expected_codes: healthmonitor-expected_codes + - http_method: healthmonitor-http_method + - http_version: healthmonitor-http_version + - id: healthmonitor-id + - max_retries: healthmonitor-max-retries + - max_retries_down: healthmonitor-max-retries-down + - name: name + - operating_status: operating_status + - pool_id: pool-id + - project_id: project_id + - provisioning_status: provisioning_status + - tags: tags + - timeout: healthmonitor-timeout + - type: healthmonitor-type + - updated_at: updated_at + - url_path: healthmonitor-url_path + +Response Example +---------------- + +.. literalinclude:: examples/healthmonitor-update-response.json + :language: javascript + +Remove a Health Monitor +======================= + +.. rest_method:: DELETE /v2/lbaas/healthmonitors/{healthmonitor_id} + +Removes a health monitor and its associated configuration from the project. + +The API immediately purges any and all configuration data, depending on the +configuration settings. You cannot recover it. + +.. rest_status_code:: success ../http-status.yaml + + - 204 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 404 + - 409 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - healthmonitor_id: path-healthmonitor-id + +Curl Example +------------ + +.. literalinclude:: examples/healthmonitor-delete-curl + :language: bash + +Response +-------- + +There is no body content for the response of a successful DELETE request. diff --git a/api-ref/source/v2/index.rst b/api-ref/source/v2/index.rst new file mode 100644 index 0000000000..a081c4b19a --- /dev/null +++ b/api-ref/source/v2/index.rst @@ -0,0 +1,82 @@ +:tocdepth: 3 + +======================== +Octavia API v2 (Current) +======================== + +.. rest_expand_all:: + +-------------------- +General API Overview +-------------------- +.. include:: general.inc + +-------------- +Load Balancers +-------------- +.. include:: loadbalancer.inc + +--------- +Listeners +--------- +.. include:: listener.inc + +----- +Pools +----- +.. include:: pool.inc + +------- +Members +------- +.. include:: member.inc + +-------------- +Health Monitor +-------------- +.. include:: healthmonitor.inc + +----------- +L7 Policies +----------- +.. include:: l7policy.inc + +-------- +L7 Rules +-------- +.. include:: l7rule.inc + +------ +Quotas +------ +.. include:: quota.inc + +--------- +Providers +--------- +.. include:: provider.inc + +------- +Flavors +------- +.. include:: flavor.inc + +--------------- +Flavor Profiles +--------------- +.. include:: flavorprofile.inc + +------------------ +Availability Zones +------------------ +.. include:: availabilityzone.inc + +-------------------------- +Availability Zone Profiles +-------------------------- +.. include:: availabilityzoneprofile.inc + +-------- +Amphorae +-------- +.. include:: amphora.inc diff --git a/api-ref/source/v2/l7policy.inc b/api-ref/source/v2/l7policy.inc new file mode 100644 index 0000000000..30bbd78251 --- /dev/null +++ b/api-ref/source/v2/l7policy.inc @@ -0,0 +1,395 @@ +.. -*- rst -*- + +List L7 Policies +================ + +.. rest_method:: GET /v2/lbaas/l7policies + +Lists all L7 policies for the project. + +Use the ``fields`` query parameter to control which fields are +returned in the response body. Additionally, you can filter results +by using query string parameters. For information, see :ref:`filtering`. + +Administrative users can specify a project ID that is different than their own +to list L7 policies for other projects. + +The list might be empty. + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - fields: fields + - project_id: project_id_query + +Curl Example +------------ + +.. literalinclude:: examples/l7policies-list-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - action: l7policy-action + - admin_state_up: admin_state_up + - created_at: created_at + - description: description + - id: l7policy-id + - listener_id: listener-id + - name: name + - operating_status: operating_status + - position: l7policy-position + - project_id: project_id + - provisioning_status: provisioning_status + - redirect_http_code: l7policy-redirect-http-code + - redirect_pool_id: l7policy-redirect-pool_id + - redirect_prefix: l7policy-redirect-prefix + - redirect_url: l7policy-redirect-url + - rules: l7policy-rule-ids + - tags: tags + - updated_at: updated_at + +Response Example +---------------- + +.. literalinclude:: examples/l7policies-list-response.json + :language: javascript + +Create an L7 Policy +=================== + +.. rest_method:: POST /v2/lbaas/l7policies + +Creates a L7 policy. + +This operation provisions a new L7 policy by using the +configuration that you define in the request object. After the API +validates the request and starts the provisioning process, the API +returns a response object that contains a unique ID and the status +of provisioning the L7 policy. + +In the response, the L7 policy :ref:`provisioning status` is +``ACTIVE``, ``PENDING_CREATE``, or ``ERROR``. + +If the status is ``PENDING_CREATE``, issue GET +``/v2/lbaas/l7policies/{l7policy_id}`` to view the progress of +the provisioning operation. When the L7 policy status changes +to ``ACTIVE``, the L7 policy is successfully provisioned and +is ready for further configuration. + +If the API cannot fulfill the request due to insufficient data or +data that is not valid, the service returns the HTTP ``Bad Request +(400)`` response code with information about the failure in the +response body. Validation errors require that you correct the error +and submit the request again. + +All the rules associated with a given policy are logically ANDed together. A +request must match all the policy's rules to match the policy. + +If you need to express a logical OR operation between rules, then do this by +creating multiple policies with the same action. + +If a new policy is created with a position that matches that of an existing +policy, then the new policy is inserted at the given position. + +L7 policies with ``action`` of ``REDIRECT_TO_URL`` will return the default HTTP +``Found (302)`` response code with the ``redirect_url``. Also, specify +``redirect_http_code`` to configure the needed HTTP response code, such as, +301, 302, 303, 307 and 308. + +L7 policies with ``action`` of ``REJECT`` will return a ``Forbidden (403)`` +response code to the requester. + +.. note:: + Pools of type ``SCTP``, ``TCP`` or ``UDP`` cannot be used in L7 + policies at this time. + +.. rest_status_code:: success ../http-status.yaml + + - 201 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 404 + - 409 + - 500 + - 503 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - action: l7policy-action + - admin_state_up: admin_state_up-default-optional + - description: description-optional + - listener_id: listener-id + - name: name-optional + - position: l7policy-position-optional + - project_id: project_id-optional + - redirect_http_code: l7policy-redirect-http-code-optional + - redirect_pool_id: l7policy-redirect-pool_id-optional + - redirect_prefix: l7policy-redirect-prefix-optional + - redirect_url: l7policy-redirect-url-optional + - tags: tags-optional + +Request Example +---------------- + +.. literalinclude:: examples/l7policy-create-request.json + :language: javascript + +Curl Example +------------ + +.. literalinclude:: examples/l7policy-create-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - action: l7policy-action + - admin_state_up: admin_state_up + - created_at: created_at + - description: description + - id: l7policy-id + - listener_id: listener-id + - name: name + - operating_status: operating_status + - position: l7policy-position + - project_id: project_id + - provisioning_status: provisioning_status + - redirect_http_code: l7policy-redirect-http-code + - redirect_pool_id: l7policy-redirect-pool_id + - redirect_prefix: l7policy-redirect-prefix + - redirect_url: l7policy-redirect-url + - rules: l7policy-rule-ids + - tags: tags + - updated_at: updated_at + +Response Example +---------------- + +.. literalinclude:: examples/l7policy-create-response.json + :language: javascript + +Show L7 Policy details +========================== + +.. rest_method:: GET /v2/lbaas/l7policies/{l7policy_id} + +Shows the details of a L7 policy. + +If you are not an administrative user and the L7 policy object does not +belong to your project, the service returns the HTTP ``Forbidden (403)`` +response code. + +This operation does not require a request body. + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 401 + - 403 + - 404 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - fields: fields + - l7policy_id: path-l7policy-id + +Curl Example +------------ + +.. literalinclude:: examples/l7policy-show-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - action: l7policy-action + - admin_state_up: admin_state_up + - created_at: created_at + - description: description + - id: l7policy-id + - listener_id: listener-id + - name: name + - operating_status: operating_status + - position: l7policy-position + - project_id: project_id + - provisioning_status: provisioning_status + - redirect_http_code: l7policy-redirect-http-code + - redirect_pool_id: l7policy-redirect-pool_id + - redirect_prefix: l7policy-redirect-prefix + - redirect_url: l7policy-redirect-url + - rules: l7policy-rule-ids + - tags: tags + - updated_at: updated_at + + +Response Example +---------------- + +.. literalinclude:: examples/l7policy-show-response.json + :language: javascript + +Update a L7 Policy +================== + +.. rest_method:: PUT /v2/lbaas/l7policies/{l7policy_id} + +Updates a L7 policy. + +If the request is valid, the service returns the ``Accepted (202)`` +response code. To confirm the update, check that the L7 policy +provisioning status is ``ACTIVE``. If the status is +``PENDING_UPDATE``, use a GET operation to poll the L7 policy +object for changes. + +This operation returns the updated L7 policy object with the +``ACTIVE``, ``PENDING_UPDATE``, or ``ERROR`` provisioning status. + +If a policy is updated with a position that matches that of an existing +policy, then the updated policy is inserted at the given position. + +.. rest_status_code:: success ../http-status.yaml + + - 202 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 404 + - 409 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - action: l7policy-action-optional + - admin_state_up: admin_state_up-default-optional + - description: description-optional + - l7policy_id: path-l7policy-id + - name: name-optional + - position: l7policy-position-optional + - redirect_http_code: l7policy-redirect-http-code-optional + - redirect_pool_id: l7policy-redirect-pool_id-optional + - redirect_prefix: l7policy-redirect-prefix-optional + - redirect_url: l7policy-redirect-url-optional + - tags: tags-optional + +Request Example +--------------- + +.. literalinclude:: examples/l7policy-update-request.json + :language: javascript + +Curl Example +------------ + +.. literalinclude:: examples/l7policy-update-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - action: l7policy-action + - admin_state_up: admin_state_up + - created_at: created_at + - description: description + - id: l7policy-id + - listener_id: listener-id + - name: name + - operating_status: operating_status + - position: l7policy-position + - project_id: project_id + - provisioning_status: provisioning_status + - redirect_http_code: l7policy-redirect-http-code + - redirect_pool_id: l7policy-redirect-pool_id + - redirect_prefix: l7policy-redirect-prefix + - redirect_url: l7policy-redirect-url + - rules: l7policy-rule-ids + - tags: tags + - updated_at: updated_at + +Response Example +---------------- + +.. literalinclude:: examples/l7policy-update-response.json + :language: javascript + +Remove a L7 Policy +================== + +.. rest_method:: DELETE /v2/lbaas/l7policies/{l7policy_id} + +Removes a L7 policy and its associated configuration from the project. + +The API immediately purges any and all configuration data, depending on the +configuration settings. You cannot recover it. + +.. rest_status_code:: success ../http-status.yaml + + - 204 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 404 + - 409 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - l7policy_id: path-l7policy-id + +Curl Example +------------ + +.. literalinclude:: examples/l7policy-delete-curl + :language: bash + +Response +-------- + +There is no body content for the response of a successful DELETE request. diff --git a/api-ref/source/v2/l7rule.inc b/api-ref/source/v2/l7rule.inc new file mode 100644 index 0000000000..70d3d5e0a1 --- /dev/null +++ b/api-ref/source/v2/l7rule.inc @@ -0,0 +1,354 @@ +.. -*- rst -*- + +List L7 Rules +============= + +.. rest_method:: GET /v2/lbaas/l7policies/{l7policy_id}/rules + +Lists all L7 rules for the project. + +Use the ``fields`` query parameter to control which fields are +returned in the response body. Additionally, you can filter results +by using query string parameters. For information, see :ref:`filtering`. + +Administrative users can specify a project ID that is different than their own +to list L7 policies for other projects. + +The list might be empty. + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - fields: fields + - l7policy_id: path-l7policy-id + - project_id: project_id_query + +Curl Example +------------ + +.. literalinclude:: examples/l7rules-list-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - admin_state_up: admin_state_up + - compare_type: l7rule-compare_type + - created_at: created_at + - id: l7rule-id + - invert: l7rule-invert + - key: l7rule-key + - operating_status: operating_status + - project_id: project_id + - provisioning_status: provisioning_status + - tags: tags + - type: l7rule-type + - updated_at: updated_at + - value: l7rule-value + +Response Example +---------------- + +.. literalinclude:: examples/l7rules-list-response.json + :language: javascript + +Create an L7 Rule +================= + +.. rest_method:: POST /v2/lbaas/l7policies/{l7policy_id}/rules + +Creates a L7 rule. + +This operation provisions a new L7 rule by using the +configuration that you define in the request object. After the API +validates the request and starts the provisioning process, the API +returns a response object that contains a unique ID and the status +of provisioning the L7 rule. + +In the response, the L7 rule :ref:`provisioning status` is +``ACTIVE``, ``PENDING_CREATE``, or ``ERROR``. + +If the status is ``PENDING_CREATE``, issue GET +``/v2/lbaas/l7policies/{l7policy_id}/rules/{l7rule_id}`` to view the +progress of the provisioning operation. When the L7 rule status changes +to ``ACTIVE``, the L7 rule is successfully provisioned and +is ready for further configuration. + +If the API cannot fulfill the request due to insufficient data or +data that is not valid, the service returns the HTTP ``Bad Request +(400)`` response code with information about the failure in the +response body. Validation errors require that you correct the error +and submit the request again. + +All the rules associated with a given policy are logically ANDed together. A +request must match all the policy's rules to match the policy. + +If you need to express a logical OR operation between rules, then do this by +creating multiple policies with the same action. + +.. rest_status_code:: success ../http-status.yaml + + - 201 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 404 + - 409 + - 500 + - 503 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - admin_state_up: admin_state_up-default-optional + - compare_type: l7rule-compare_type + - invert: l7rule-invert-optional + - key: l7rule-key-optional + - l7policy_id: path-l7policy-id + - project_id: project_id-optional + - tags: tags-optional + - type: l7rule-type + - value: l7rule-value + +Request Example +---------------- + +.. literalinclude:: examples/l7rule-create-request.json + :language: javascript + +Curl Example +------------ + +.. literalinclude:: examples/l7rule-create-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - admin_state_up: admin_state_up + - compare_type: l7rule-compare_type + - created_at: created_at + - id: l7rule-id + - invert: l7rule-invert + - key: l7rule-key + - operating_status: operating_status + - project_id: project_id + - provisioning_status: provisioning_status + - tags: tags + - type: l7rule-type + - updated_at: updated_at + - value: l7rule-value + +Response Example +---------------- + +.. literalinclude:: examples/l7rule-create-response.json + :language: javascript + +Show L7 Rule details +========================== + +.. rest_method:: GET /v2/lbaas/l7policies/{l7policy_id}/rules/{l7rule_id} + +Shows the details of a L7 rule. + +If you are not an administrative user and the L7 rule object does not +belong to your project, the service returns the HTTP ``Forbidden (403)`` +response code. + +This operation does not require a request body. + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 401 + - 403 + - 404 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - fields: fields + - l7policy_id: path-l7policy-id + - l7rule_id: path-l7rule-id + +Curl Example +------------ + +.. literalinclude:: examples/l7rule-show-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - admin_state_up: admin_state_up + - compare_type: l7rule-compare_type + - created_at: created_at + - id: l7rule-id + - invert: l7rule-invert + - key: l7rule-key + - operating_status: operating_status + - project_id: project_id + - provisioning_status: provisioning_status + - tags: tags + - type: l7rule-type + - updated_at: updated_at + - value: l7rule-value + +Response Example +---------------- + +.. literalinclude:: examples/l7rule-show-response.json + :language: javascript + +Update a L7 Rule +================ + +.. rest_method:: PUT /v2/lbaas/l7policies/{l7policy_id}/rules/{l7rule_id} + +Updates a L7 rule. + +If the request is valid, the service returns the ``Accepted (202)`` +response code. To confirm the update, check that the L7 rule +provisioning status is ``ACTIVE``. If the status is +``PENDING_UPDATE``, use a GET operation to poll the L7 rule +object for changes. + +This operation returns the updated L7 rule object with the +``ACTIVE``, ``PENDING_UPDATE``, or ``ERROR`` provisioning status. + +.. rest_status_code:: success ../http-status.yaml + + - 202 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 404 + - 409 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - admin_state_up: admin_state_up-default-optional + - compare_type: l7rule-compare_type-optional + - invert: l7rule-invert-optional + - key: l7rule-key-optional + - l7policy_id: path-l7policy-id + - l7rule_id: path-l7rule-id + - tags: tags-optional + - type: l7rule-type-optional + - value: l7rule-value-optional + +Request Example +--------------- + +.. literalinclude:: examples/l7rule-update-request.json + :language: javascript + +Curl Example +------------ + +.. literalinclude:: examples/l7rule-update-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - admin_state_up: admin_state_up + - compare_type: l7rule-compare_type + - created_at: created_at + - id: l7rule-id + - invert: l7rule-invert + - key: l7rule-key + - operating_status: operating_status + - project_id: project_id + - provisioning_status: provisioning_status + - tags: tags + - type: l7rule-type + - updated_at: updated_at + - value: l7rule-value + +Response Example +---------------- + +.. literalinclude:: examples/l7rule-update-response.json + :language: javascript + +Remove a L7 Rule +================ + +.. rest_method:: DELETE /v2/lbaas/l7policies/{l7policy_id}/rules/{l7rule_id} + +Removes a L7 rule and its associated configuration from the project. + +The API immediately purges any and all configuration data, depending on the +configuration settings. You cannot recover it. + +.. rest_status_code:: success ../http-status.yaml + + - 204 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 404 + - 409 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - l7policy_id: path-l7policy-id + - l7rule_id: path-l7rule-id + +Curl Example +------------ + +.. literalinclude:: examples/l7rule-delete-curl + :language: bash + +Response +-------- + +There is no body content for the response of a successful DELETE request. diff --git a/api-ref/source/v2/listener.inc b/api-ref/source/v2/listener.inc new file mode 100644 index 0000000000..d741095029 --- /dev/null +++ b/api-ref/source/v2/listener.inc @@ -0,0 +1,613 @@ +.. -*- rst -*- + +List Listeners +============== + +.. rest_method:: GET /v2/lbaas/listeners + +Lists all listeners for the project. + +Use the ``fields`` query parameter to control which fields are +returned in the response body. Additionally, you can filter results +by using query string parameters. For information, see :ref:`filtering`. + +Administrative users can specify a project ID that is different than their own +to list listeners for other projects. + +The list might be empty. + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - fields: fields + - project_id: project_id_query + +Curl Example +------------ + +.. literalinclude:: examples/listeners-list-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - admin_state_up: admin_state_up + - allowed_cidrs: allowed_cidrs + - alpn_protocols: alpn_protocols-listener + - client_authentication: client_authentication + - client_ca_tls_container_ref: client_ca_tls_container_ref + - client_crl_container_ref: client_crl_container_ref + - connection_limit: connection_limit + - created_at: created_at + - default_pool_id: default_pool_id + - default_tls_container_ref: default_tls_container_ref + - description: description + - hsts_include_subdomains: hsts_include_subdomains + - hsts_max_age: hsts_max_age + - hsts_preload: hsts_preload + - id: listener-id + - insert_headers: insert_headers + - l7policies: l7policy-ids + - listener: listener + - loadbalancers: loadbalancer-ids + - name: name + - operating_status: operating_status + - project_id: project_id + - protocol: protocol + - protocol_port: protocol_port + - provisioning_status: provisioning_status + - sni_container_refs: sni_container_refs + - tags: tags + - timeout_client_data: timeout_client_data + - timeout_member_connect: timeout_member_connect + - timeout_member_data: timeout_member_data + - timeout_tcp_inspect: timeout_tcp_inspect + - tls_ciphers: tls_ciphers + - tls_versions: tls_versions + - updated_at: updated_at + +Response Example +---------------- + +.. literalinclude:: examples/listeners-list-response.json + :language: javascript + +Create Listener +=============== + +.. rest_method:: POST /v2/lbaas/listeners + +Creates a listener for a load balancer. + +The listener configures a port and protocol for the load balancer to listen +on for incoming requests. A load balancer may have zero or more listeners +configured. + +This operation provisions a new listener by using the configuration that +you define in the request object. After the API validates the request and +starts the provisioning process, the API returns a response object that +contains a unique ID and the status of provisioning the listener. + +In the response, the listener :ref:`provisioning status` is +``ACTIVE``, ``PENDING_CREATE``, or ``ERROR``. + +If the status is ``PENDING_CREATE``, issue GET +``/v2/lbaas/listeners/{listener_id}`` to view the progress of +the provisioning operation. When the listener status changes +to ``ACTIVE``, the listener is successfully provisioned and +is ready for further configuration. + +If the API cannot fulfill the request due to insufficient data or +data that is not valid, the service returns the HTTP ``Bad Request +(400)`` response code with information about the failure in the +response body. Validation errors require that you correct the error +and submit the request again. + +Specifying a project_id is deprecated. The listener will inherit +the project_id of the parent load balancer. + +You can configure all documented features of the listener at creation time by +specifying the additional elements or attributes in the request. + +To create a listener, the parent load balancer must have an ``ACTIVE`` +provisioning status. + +.. rest_status_code:: success ../http-status.yaml + + - 201 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 404 + - 409 + - 500 + - 503 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - admin_state_up: admin_state_up-default-optional + - allowed_cidrs: allowed_cidrs-optional + - alpn_protocols: alpn_protocols-listener-optional + - client_authentication: client_authentication-optional + - client_ca_tls_container_ref: client_ca_tls_container_ref-optional + - client_crl_container_ref: client_crl_container_ref-optional + - connection_limit: connection_limit-optional + - default_pool: pool-optional + - default_pool_id: default_pool_id-optional + - default_tls_container_ref: default_tls_container_ref-optional + - description: description-optional + - hsts_include_subdomains: hsts_include_subdomains-optional + - hsts_max_age: hsts_max_age-optional + - hsts_preload: hsts_preload-optional + - insert_headers: insert_headers-optional + - l7policies: l7policies-optional + - listeners: listener + - loadbalancer_id: loadbalancer-id + - name: name-optional + - project_id: project_id-optional-deprecated + - protocol: protocol + - protocol_port: protocol_port + - sni_container_refs: sni_container_refs-optional + - tags: tags-optional + - timeout_client_data: timeout_client_data-optional + - timeout_member_connect: timeout_member_connect-optional + - timeout_member_data: timeout_member_data-optional + - timeout_tcp_inspect: timeout_tcp_inspect-optional + - tls_ciphers: tls_ciphers-optional + - tls_versions: tls_versions-optional + +.. _header_insertions: + +Supported HTTP Header Insertions +-------------------------------- + +.. note:: + Both the key and the values are always specified as strings when specifying + header insertions. + + ++-------------------------+--------+------------------------------------------------+ +| Key | Value | Description | ++=========================+========+================================================+ +| X-Forwarded-For | string | When "``true``" a ``X-Forwarded-For`` header | +| | | is inserted into the request to the backend | +| | | ``member`` that specifies the client IP | +| | | address. | ++-------------------------+--------+------------------------------------------------+ +| X-Forwarded-Port | string | When "``true``" a ``X-Forwarded-Port`` header | +| | | is inserted into the request to the backend | +| | | ``member`` that specifies the listener port. | ++-------------------------+--------+------------------------------------------------+ +| X-Forwarded-Proto | string | When "``true``" a ``X-Forwarded-Proto`` header | +| | | is inserted into the request to the backend | +| | | ``member``. HTTP for the HTTP listener | +| | | protocol type, HTTPS for the TERMINATED_HTTPS | +| | | listener protocol type. | +| | | **New in version 2.1** | ++-------------------------+--------+------------------------------------------------+ +| X-SSL-Client-Verify | string | When "``true``" a ``X-SSL-Client-Verify`` | +| | | header is inserted into the request to the | +| | | backend ``member`` that contains 0 if the | +| | | client authentication was successful, or an | +| | | result error number greater than 0 that align | +| | | to the openssl veryify error codes. | ++-------------------------+--------+------------------------------------------------+ +| X-SSL-Client-Has-Cert | string | When "``true``" a ``X-SSL-Client-Has-Cert`` | +| | | header is inserted into the request to the | +| | | backend ``member`` that is ''true'' if a client| +| | | authentication certificate was presented, and | +| | | ''false'' if not. Does not indicate validity. | ++-------------------------+--------+------------------------------------------------+ +| X-SSL-Client-DN | string | When "``true``" a ``X-SSL-Client-DN`` header | +| | | is inserted into the request to the backend | +| | | ``member`` that contains the full | +| | | Distinguished Name of the certificate | +| | | presented by the client. | ++-------------------------+--------+------------------------------------------------+ +| X-SSL-Client-CN | string | When "``true``" a ``X-SSL-Client-CN`` header | +| | | is inserted into the request to the backend | +| | | ``member`` that contains the Common Name from | +| | | the full Distinguished Name of the certificate | +| | | presented by the client. | ++-------------------------+--------+------------------------------------------------+ +| X-SSL-Issuer | string | When "``true``" a ``X-SSL-Issuer`` header is | +| | | inserted into the request to the backend | +| | | ``member`` that contains the full | +| | | Distinguished Name of the client certificate | +| | | issuer. | ++-------------------------+--------+------------------------------------------------+ +| X-SSL-Client-SHA1 | string | When "``true``" a ``X-SSL-Client-SHA1`` header | +| | | is inserted into the request to the backend | +| | | ``member`` that contains the SHA-1 fingerprint | +| | | of the certificate presented by the client in | +| | | hex string format. | ++-------------------------+--------+------------------------------------------------+ +| X-SSL-Client-Not-Before | string | When "``true``" a ``X-SSL-Client-Not-Before`` | +| | | header is inserted into the request to the | +| | | backend ``member`` that contains the start | +| | | date presented by the client as a formatted | +| | | string YYMMDDhhmmss[Z]. | ++-------------------------+--------+------------------------------------------------+ +| X-SSL-Client-Not-After | string | When "``true``" a ``X-SSL-Client-Not-After`` | +| | | header is inserted into the request to the | +| | | backend ``member`` that contains the end date | +| | | presented by the client as a formatted string | +| | | YYMMDDhhmmss[Z]. | ++-------------------------+--------+------------------------------------------------+ + +Request Example +---------------- + +.. literalinclude:: examples/listener-create-request.json + :language: javascript + +Curl Example +------------ + +.. literalinclude:: examples/listener-create-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - admin_state_up: admin_state_up + - allowed_cidrs: allowed_cidrs + - alpn_protocols: alpn_protocols-listener + - client_authentication: client_authentication + - client_ca_tls_container_ref: client_ca_tls_container_ref + - client_crl_container_ref: client_crl_container_ref + - connection_limit: connection_limit + - created_at: created_at + - default_pool_id: default_pool_id + - default_tls_container_ref: default_tls_container_ref + - description: description + - hsts_include_subdomains: hsts_include_subdomains + - hsts_max_age: hsts_max_age + - hsts_preload: hsts_preload + - id: listener-id + - insert_headers: insert_headers + - l7policies: l7policy-ids + - listener: listener + - loadbalancers: loadbalancer-ids + - name: name + - operating_status: operating_status + - project_id: project_id + - protocol: protocol + - protocol_port: protocol_port + - provisioning_status: provisioning_status + - sni_container_refs: sni_container_refs + - tags: tags + - timeout_client_data: timeout_client_data + - timeout_member_connect: timeout_member_connect + - timeout_member_data: timeout_member_data + - timeout_tcp_inspect: timeout_tcp_inspect + - tls_ciphers: tls_ciphers + - tls_versions: tls_versions + - updated_at: updated_at + +Response Example +---------------- + +.. literalinclude:: examples/listener-create-response.json + :language: javascript + +Show Listener details +===================== + +.. rest_method:: GET /v2/lbaas/listeners/{listener_id} + +Shows the details of a listener. + +If you are not an administrative user and the parent load balancer does not +belong to your project, the service returns the HTTP ``Forbidden (403)`` +response code. + +This operation does not require a request body. + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 401 + - 403 + - 404 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - fields: fields + - listener_id: path-listener-id + +Curl Example +------------ + +.. literalinclude:: examples/listener-show-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - admin_state_up: admin_state_up + - allowed_cidrs: allowed_cidrs + - alpn_protocols: alpn_protocols-listener + - client_authentication: client_authentication + - client_ca_tls_container_ref: client_ca_tls_container_ref + - client_crl_container_ref: client_crl_container_ref + - connection_limit: connection_limit + - created_at: created_at + - default_pool_id: default_pool_id + - default_tls_container_ref: default_tls_container_ref + - description: description + - hsts_include_subdomains: hsts_include_subdomains + - hsts_max_age: hsts_max_age + - hsts_preload: hsts_preload + - id: listener-id + - insert_headers: insert_headers + - l7policies: l7policy-ids + - listener: listener + - loadbalancers: loadbalancer-ids + - name: name + - operating_status: operating_status + - project_id: project_id + - protocol: protocol + - protocol_port: protocol_port + - provisioning_status: provisioning_status + - sni_container_refs: sni_container_refs + - tags: tags + - timeout_client_data: timeout_client_data + - timeout_member_connect: timeout_member_connect + - timeout_member_data: timeout_member_data + - timeout_tcp_inspect: timeout_tcp_inspect + - tls_ciphers: tls_ciphers + - tls_versions: tls_versions + - updated_at: updated_at + +Response Example +---------------- + +.. literalinclude:: examples/listener-show-response.json + :language: javascript + +Update a Listener +================= + +.. rest_method:: PUT /v2/lbaas/listeners/{listener_id} + +Update an existing listener. + +If the request is valid, the service returns the ``Accepted (202)`` +response code. To confirm the update, check that the listener provisioning +status is ``ACTIVE``. If the status is ``PENDING_UPDATE``, use a GET +operation to poll the listener object for changes. + +This operation returns the updated listener object with the +``ACTIVE``, ``PENDING_UPDATE``, or ``ERROR`` provisioning status. + +.. rest_status_code:: success ../http-status.yaml + + - 202 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 404 + - 409 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - admin_state_up: admin_state_up-default-optional + - allowed_cidrs: allowed_cidrs-optional + - alpn_protocols: alpn_protocols-listener-optional + - client_authentication: client_authentication-optional + - client_ca_tls_container_ref: client_ca_tls_container_ref-optional + - client_crl_container_ref: client_crl_container_ref-optional + - connection_limit: connection_limit-optional + - default_pool_id: default_pool_id-optional + - default_tls_container_ref: default_tls_container_ref-optional + - description: description-optional + - hsts_include_subdomains: hsts_include_subdomains-optional + - hsts_max_age: hsts_max_age-optional + - hsts_preload: hsts_preload-optional + - insert_headers: insert_headers-optional + - listener_id: path-listener-id + - name: name-optional + - sni_container_refs: sni_container_refs-optional + - tags: tags-optional + - timeout_client_data: timeout_client_data-optional + - timeout_member_connect: timeout_member_connect-optional + - timeout_member_data: timeout_member_data-optional + - timeout_tcp_inspect: timeout_tcp_inspect-optional + - tls_ciphers: tls_ciphers-optional + - tls_versions: tls_versions-optional + +Request Example +--------------- + +.. literalinclude:: examples/listener-update-request.json + :language: javascript + +Curl Example +------------ + +.. literalinclude:: examples/listener-update-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - admin_state_up: admin_state_up + - allowed_cidrs: allowed_cidrs + - alpn_protocols: alpn_protocols-listener + - client_authentication: client_authentication + - client_ca_tls_container_ref: client_ca_tls_container_ref + - client_crl_container_ref: client_crl_container_ref + - connection_limit: connection_limit + - created_at: created_at + - default_pool_id: default_pool_id + - default_tls_container_ref: default_tls_container_ref + - description: description + - hsts_include_subdomains: hsts_include_subdomains + - hsts_max_age: hsts_max_age + - hsts_preload: hsts_preload + - id: listener-id + - insert_headers: insert_headers + - l7policies: l7policy-ids + - listener: listener + - loadbalancers: loadbalancer-ids + - name: name + - operating_status: operating_status + - project_id: project_id + - protocol: protocol + - protocol_port: protocol_port + - provisioning_status: provisioning_status + - sni_container_refs: sni_container_refs + - tags: tags + - timeout_client_data: timeout_client_data + - timeout_member_connect: timeout_member_connect + - timeout_member_data: timeout_member_data + - timeout_tcp_inspect: timeout_tcp_inspect + - tls_ciphers: tls_ciphers + - tls_versions: tls_versions + - updated_at: updated_at + +Response Example +---------------- + +.. literalinclude:: examples/listener-update-response.json + :language: javascript + +Remove a Listener +================= + +.. rest_method:: DELETE /v2/lbaas/listeners/{listener_id} + +Removes a listener and its associated configuration from the project. + +The API immediately purges any and all configuration data, depending on the +configuration settings. You cannot recover it. + +.. rest_status_code:: success ../http-status.yaml + + - 204 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 404 + - 409 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - listener_id: path-listener-id + +Curl Example +------------ + +.. literalinclude:: examples/listener-delete-curl + :language: bash + +Response +-------- + +There is no body content for the response of a successful DELETE request. + +Get Listener statistics +======================= + +.. rest_method:: GET /v2/lbaas/listeners/{listener_id}/stats + +Shows the current statistics for a listener. + +This operation returns the statistics of a listener object identified +by listener_id. + +If you are not an administrative user and the parent load balancer does not +belong to your project, the service returns the HTTP ``Forbidden (403)`` +response code. + +This operation does not require a request body. + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 401 + - 403 + - 404 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - listener_id: path-listener-id + +Curl Example +------------ + +.. literalinclude:: examples/listener-stats-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - stats: stats + - active_connections: active_connections + - bytes_in: bytes_in + - bytes_out: bytes_out + - request_errors: request_errors + - total_connections: total_connections + +Response Example +---------------- + +.. literalinclude:: examples/listener-stats-response.json + :language: javascript diff --git a/api-ref/source/v2/loadbalancer.inc b/api-ref/source/v2/loadbalancer.inc new file mode 100644 index 0000000000..feb5627211 --- /dev/null +++ b/api-ref/source/v2/loadbalancer.inc @@ -0,0 +1,649 @@ +.. -*- rst -*- + +List Load Balancers +=================== + +.. rest_method:: GET /v2/lbaas/loadbalancers + +Lists all load balancers for the project. + +Use the ``fields`` query parameter to control which fields are +returned in the response body. Additionally, you can filter results +by using query string parameters. For information, see :ref:`filtering`. + +Administrative users can specify a project ID that is different than their own +to list load balancers for other projects. + +The list might be empty. + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - fields: fields + - project_id: project_id_query + +Curl Example +------------ + +.. literalinclude:: examples/loadbalancers-list-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - additional_vips: additional_vips + - admin_state_up: admin_state_up + - availability_zone: availability-zone-name + - created_at: created_at + - description: description + - flavor_id: flavor-id + - id: loadbalancer-id + - listeners: listeners + - loadbalancers: loadbalancers + - name: name + - operating_status: operating_status + - pools: pools_ids + - project_id: project_id + - provider: provider + - provisioning_status: provisioning_status + - tags: tags + - updated_at: updated_at + - vip_address: vip_address + - vip_network_id: vip_network_id + - vip_port_id: vip_port_id + - vip_qos_policy_id: vip_qos_policy_id + - vip_subnet_id: vip_subnet_id + - vip_sg_ids: vip_sg_ids + - vip_vnic_type: vip_vnic_type + +Response Example +---------------- + +.. literalinclude:: examples/loadbalancers-list-response.json + :language: javascript + +Create a Load Balancer +====================== + +.. rest_method:: POST /v2/lbaas/loadbalancers + +Creates a load balancer. + +This operation provisions a new load balancer by using the +configuration that you define in the request object. After the API +validates the request and starts the provisioning process, the API +returns a response object that contains a unique ID and the status +of provisioning the load balancer. + +In the response, the load balancer :ref:`provisioning status` is +``ACTIVE``, ``PENDING_CREATE``, or ``ERROR``. + +If the status is ``PENDING_CREATE``, issue GET +``/v2/lbaas/loadbalancers/{loadbalancer_id}`` to view the progress of +the provisioning operation. When the load balancer status changes +to ``ACTIVE``, the load balancer is successfully provisioned and +is ready for further configuration. + +If the API cannot fulfill the request due to insufficient data or +data that is not valid, the service returns the HTTP ``Bad Request +(400)`` response code with information about the failure in the +response body. Validation errors require that you correct the error +and submit the request again. + +Administrative users can specify a project ID that is different than +their own to create load balancers for other projects. + +An optional ``flavor_id`` attribute can be used to create the load balancer +using a pre-configured octavia flavor. Flavors are created by the operator +to allow custom load balancer configurations, such as allocating more +memory for the load balancer. + +An optional ``vip_qos_policy_id`` attribute from Neutron can be used to +apply QoS policies on a loadbalancer VIP, also could pass a 'null' value to +remove QoS policies. + +You can also specify the ``provider`` attribute when you create a +load balancer. The ``provider`` attribute specifies which backend should +be used to create the load balancer. This could be the default provider +(``octavia``) or a vendor supplied ``provider`` if one has been installed. +Setting both a flavor_id and a provider will result in a conflict error if +the provider does not match the provider of the configured flavor profiles. + +Specifying a Virtual IP (VIP) is mandatory. There are three ways to specify +a VIP network for the load balancer: + +1. Provide a ``vip_port_id``. + + Providing a neutron port ID for the ``vip_port_id`` tells + octavia to use this port for the VIP. Some port settings may be changed or + removed as required by octavia, but the IP address will be retained. If the + port has more than one subnet you must specify either the ``vip_subnet_id`` + or ``vip_address`` to clarify which address should be used for the VIP. + +2. Provide a ``vip_network_id``. + + When a ``vip_network_ip`` is specified, unless neither ``vip_subnet_id`` + nor ``vip_address`` is specified, octavia will select a subnet from + the network, preferring IPv4 over IPv6 subnets. + +3. Provide a ``vip_subnet_id``. + + Specifying a neutron subnet ID will tell octavia to create a neutron port + on this subnet and allocate an IP address from the subnet if the + ``vip_address`` was not specified. If ``vip_address`` was specified, + octavia will attempt to allocate the ``vip_address`` from the subnet for + the VIP address. + +Additional VIPs may also be specified in the ``additional_vips`` field, by +providing a list of JSON objects containing a ``subnet_id`` and optionally +an ``ip_address``. All additional subnets must be part of the same network +as the primary VIP. + +An optional ``vip_sg_ids`` attribute can be used to set custom Neutron Security +Groups that are applied on the VIP port of the Load Balancer. When this option +is used, Octavia does not manage the security of the Listeners, the user +must set Security Group Rules to allow the network traffic on the VIP port. +``vip_sg_ids`` are incompatible with SR-IOV load balancer and cannot be set if +the load balancer has a listener that uses ``allowed_cidrs``. + +.. rest_status_code:: success ../http-status.yaml + + - 201 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 404 + - 500 + - 503 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - additional_vips: additional_vips + - admin_state_up: admin_state_up-default-optional + - availability_zone: availability-zone-name-optional + - description: description-optional + - flavor_id: flavor-id-optional + - listeners: listeners-optional + - loadbalancer: loadbalancer + - name: name-optional + - project_id: project_id-optional + - provider: provider-optional + - tags: tags-optional + - vip_address: vip_address-optional + - vip_network_id: vip_network_id-optional + - vip_port_id: vip_port_id-optional + - vip_qos_policy_id: vip_qos_policy_id-optional + - vip_subnet_id: vip_subnet_id-optional + - vip_sg_ids: vip_sg_ids-optional + +Request Example +---------------- + +.. literalinclude:: examples/loadbalancer-create-request.json + :language: javascript + +Curl Example +------------ + +.. literalinclude:: examples/loadbalancer-create-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - additional_vips: additional_vips + - admin_state_up: admin_state_up + - availability_zone: availability-zone-name + - created_at: created_at + - description: description + - flavor_id: flavor-id + - id: loadbalancer-id + - listeners: listeners + - loadbalancer: loadbalancer + - name: name + - operating_status: operating_status + - pools: pools_ids + - project_id: project_id + - provider: provider + - provisioning_status: provisioning_status + - tags: tags + - updated_at: updated_at + - vip_address: vip_address + - vip_network_id: vip_network_id + - vip_port_id: vip_port_id + - vip_qos_policy_id: vip_qos_policy_id + - vip_subnet_id: vip_subnet_id + - vip_sg_ids: vip_sg_ids + - vip_vnic_type: vip_vnic_type + +Response Example +---------------- + +.. literalinclude:: examples/loadbalancer-create-response.json + :language: javascript + +Creating a Fully Populated Load Balancer +---------------------------------------- + +You can configure all documented features of the load balancer at +creation time by specifying the additional elements or attributes +in the request. + +Note: all pools must have names, and must only be fully defined once. To +reference a pool from multiple objects, supply the pool name only for all +subsequent references. + +Request Example +--------------- + +.. literalinclude:: examples/loadbalancer-full-create-request.json + :language: javascript + +Response Example +---------------- + +.. literalinclude:: examples/loadbalancer-full-create-response.json + :language: javascript + +Show Load Balancer details +========================== + +.. rest_method:: GET /v2/lbaas/loadbalancers/{loadbalancer_id} + +Shows the details of a load balancer. + +If you are not an administrative user and the load balancer object does not +belong to your project, the service returns the HTTP ``Forbidden (403)`` +response code. + +This operation does not require a request body. + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 401 + - 403 + - 404 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - fields: fields + - loadbalancer_id: path-loadbalancer-id + +Curl Example +------------ + +.. literalinclude:: examples/loadbalancer-show-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - additional_vips: additional_vips + - admin_state_up: admin_state_up + - availability_zone: availability-zone-name + - created_at: created_at + - description: description + - flavor_id: flavor-id + - id: loadbalancer-id + - loadbalancer: loadbalancer + - listeners: listeners + - name: name + - operating_status: operating_status + - pools: pools_ids + - project_id: project_id + - provider: provider + - provisioning_status: provisioning_status + - tags: tags + - updated_at: updated_at + - vip_address: vip_address + - vip_network_id: vip_network_id + - vip_port_id: vip_port_id + - vip_qos_policy_id: vip_qos_policy_id + - vip_subnet_id: vip_subnet_id + - vip_sg_ids: vip_sg_ids + - vip_vnic_type: vip_vnic_type + +Response Example +---------------- + +.. literalinclude:: examples/loadbalancer-show-response.json + :language: javascript + +Update a Load Balancer +====================== + +.. rest_method:: PUT /v2/lbaas/loadbalancers/{loadbalancer_id} + +Updates a load balancer. + +If the request is valid, the service returns the ``Accepted (202)`` +response code. To confirm the update, check that the load balancer +provisioning status is ``ACTIVE``. If the status is +``PENDING_UPDATE``, use a GET operation to poll the load balancer +object for changes. + +This operation returns the updated load balancer object with the +``ACTIVE``, ``PENDING_UPDATE``, or ``ERROR`` provisioning status. + +.. rest_status_code:: success ../http-status.yaml + + - 202 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 404 + - 409 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - admin_state_up: admin_state_up-optional + - description: description-optional + - loadbalancer: loadbalancer + - loadbalancer_id: path-loadbalancer-id + - name: name-optional + - tags: tags-optional + - vip_qos_policy_id: vip_qos_policy_id-optional + +Request Example +--------------- + +.. literalinclude:: examples/loadbalancer-update-request.json + :language: javascript + +Curl Example +------------ + +.. literalinclude:: examples/loadbalancer-update-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - additional_vips: additional_vips + - admin_state_up: admin_state_up + - created_at: created_at + - description: description + - flavor_id: flavor-id + - id: loadbalancer-id + - listeners: listeners + - loadbalancer: loadbalancer + - name: name + - operating_status: operating_status + - pools: pools_ids + - project_id: project_id + - provider: provider + - provisioning_status: provisioning_status + - tags: tags + - updated_at: updated_at + - vip_address: vip_address + - vip_network_id: vip_network_id + - vip_port_id: vip_port_id + - vip_qos_policy_id: vip_qos_policy_id + - vip_subnet_id: vip_subnet_id + - vip_sg_ids: vip_sg_ids + - vip_vnic_type: vip_vnic_type + +Response Example +---------------- + +.. literalinclude:: examples/loadbalancer-update-response.json + :language: javascript + +Remove a Load Balancer +====================== + +.. rest_method:: DELETE /v2/lbaas/loadbalancers/{loadbalancer_id} + +Removes a load balancer and its associated configuration from the project. + +The optional parameter ``cascade`` when defined as ``true`` will delete all +child objects of the load balancer. + +The API immediately purges any and all configuration data, depending on the +configuration settings. You cannot recover it. + +.. rest_status_code:: success ../http-status.yaml + + - 204 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 404 + - 409 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - cascade: cascade-delete + - loadbalancer_id: path-loadbalancer-id + +Curl Example +------------ + +.. literalinclude:: examples/loadbalancer-delete-curl + :language: bash + +Response +-------- + +There is no body content for the response of a successful DELETE request. + +Get Load Balancer statistics +============================ + +.. rest_method:: GET /v2/lbaas/loadbalancers/{loadbalancer_id}/stats + +Shows the current statistics for a load balancer. + +This operation returns the statistics of a load balancer object identified +by loadbalancer_id. + +If you are not an administrative user and the load balancer object does not +belong to your project, the service returns the HTTP ``Forbidden (403)`` +response code. + +This operation does not require a request body. + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 401 + - 403 + - 404 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - loadbalancer_id: path-loadbalancer-id + +Curl Example +------------ + +.. literalinclude:: examples/loadbalancer-stats-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - stats: stats + - active_connections: active_connections + - bytes_in: bytes_in + - bytes_out: bytes_out + - request_errors: request_errors + - total_connections: total_connections + +Response Example +---------------- + +.. literalinclude:: examples/loadbalancer-stats-response.json + :language: javascript + +Get the Load Balancer status tree +================================= + +.. rest_method:: GET /v2/lbaas/loadbalancers/{loadbalancer_id}/status + +Shows the status tree for a load balancer. + +This operation returns a status tree for a load balancer object, by load +balancer ID. + +``provisioning_status`` is the status associated with lifecycle of the resource. +See :ref:`prov_status` for descriptions of the status codes. + +``operating_status`` is the observed status of the resource. +See :ref:`op_status` for descriptions of the status codes. + +If you are not an administrative user and the load balancer object does not +belong to your project, the service returns the HTTP ``Forbidden (403)`` +response code. + +If the operation succeeds, the returned element is a status tree that contains +the load balancer and all provisioning and operating statuses for its children. + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 401 + - 403 + - 404 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - loadbalancer_id: path-loadbalancer-id + +Curl Example +------------ + +.. literalinclude:: examples/loadbalancer-status-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - action: action + - address: address + - healthmonitor: healthmonitor-status + - id: id + - l7policies: l7policies-status-object-list + - l7rules: l7rules-status-object-list + - listeners: listeners-status-object-list + - loadbalancer: loadbalancer-status + - members: members-status-object-list + - name: name + - operating_status: operating_status + - pools: pools-status-list + - protocol_port: protocol_port + - provisioning_status: provisioning_status + - statuses: statuses + - type: type + +Response Example +---------------- + +.. literalinclude:: examples/loadbalancer-status-response.json + :language: javascript + +Failover a load balancer +======================== + +.. rest_method:: PUT /v2/lbaas/loadbalancers/{loadbalancer_id}/failover + +Performs a failover of a load balancer. + +This operation is only available to users with load balancer administrative +rights. + +.. rest_status_code:: success ../http-status.yaml + + - 202 + +.. rest_status_code:: error ../http-status.yaml + + - 401 + - 403 + - 404 + - 409 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - loadbalancer_id: path-loadbalancer-id + +Curl Example +------------ + +.. literalinclude:: examples/loadbalancer-failover-curl + :language: bash + +Response +-------- + +There is no body content for the response of a successful failover request. diff --git a/api-ref/source/v2/member.inc b/api-ref/source/v2/member.inc new file mode 100644 index 0000000000..291ca27414 --- /dev/null +++ b/api-ref/source/v2/member.inc @@ -0,0 +1,484 @@ +.. -*- rst -*- + +List Members +============ + +.. rest_method:: GET /v2/lbaas/pools/{pool_id}/members + +Lists all members for the project. + +Use the ``fields`` query parameter to control which fields are +returned in the response body. Additionally, you can filter results +by using query string parameters. For information, see :ref:`filtering`. + +Administrative users can specify a project ID that is different than their own +to list members for other projects. + +The list might be empty. + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - fields: fields + - pool_id: path-pool-id + - project_id: project_id_query + +Curl Example +------------ + +.. literalinclude:: examples/members-list-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - address: address-member + - admin_state_up: admin_state_up + - backup: backup + - created_at: created_at + - id: member-id + - monitor_address: monitor_address + - monitor_port: monitor_port + - name: name + - operating_status: operating_status + - project_id: project_id + - protocol_port: protocol_port-member + - provisioning_status: provisioning_status + - subnet_id: subnet_id + - tags: tags + - updated_at: updated_at + - weight: weight + - vnic_type: member_vnic_type + +Response Example +---------------- + +.. literalinclude:: examples/members-list-response.json + :language: javascript + +Create Member +============= + +.. rest_method:: POST /v2/lbaas/pools/{pool_id}/members + +This operation provisions a member and adds it to a pool by using +the configuration that you define in the request object. After the +API validates the request and starts the provisioning process, it +returns a response object, which contains a unique ID. + +In the response, the member :ref:`provisioning status` is +``ACTIVE``, ``PENDING_CREATE``, or ``ERROR``. + +If the status is ``PENDING_CREATE``, issue GET +``/v2/lbaas/pools/{pool_id}/members/{member_id}`` to view the progress of +the provisioning operation. When the member status changes +to ``ACTIVE``, the member is successfully provisioned and +is ready for further configuration. + +If the API cannot fulfill the request due to insufficient data or +data that is not valid, the service returns the HTTP ``Bad Request +(400)`` response code with information about the failure in the +response body. Validation errors require that you correct the error +and submit the request again. + +At a minimum, you must specify these member attributes: + +- ``address``. The IP address of the backend member to receive traffic from + the load balancer. + +- ``protocol_port`` The port on which the backend member listens for + traffic. + +Some attributes receive default values if you omit them from the +request: + +- ``admin_state_up``. Default is ``true``. + +- ``backup``. Default is ``false``. + +- ``weight``. Default is ``1``. + +If you omit the ``subnet_id`` parameter, the ``vip_subnet_id`` for the parent +load balancer will be used for the member subnet UUID. + +The member ``address`` does not necessarily need to be a member of the +``subnet_id`` subnet. Members can be routable from the subnet specified +either via the default route or by using ``host_routes`` defined on the subnet. + +Administrative users can specify a project ID that is different than +their own to create members for other projects. + +``monitor_address`` and/or ``monitor_port`` can be used to have the health +monitor, if one is configured for the pool, connect to an alternate IP address +and port when executing a health check on the member. + +To create a member, the load balancer must have an ``ACTIVE`` +provisioning status. + +.. rest_status_code:: success ../http-status.yaml + + - 201 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 404 + - 409 + - 500 + - 503 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - admin_state_up: admin_state_up-default-optional + - address: address + - backup: backup-optional + - monitor_address: monitor_address-optional + - monitor_port: monitor_port-optional + - name: name-optional + - pool_id: path-pool-id + - project_id: project_id-optional-deprecated + - protocol_port: protocol_port + - request_sriov: request_sriov + - subnet_id: subnet_id-optional + - tags: tags-optional + - weight: weight-optional + +Request Example +---------------- + +.. literalinclude:: examples/member-create-request.json + :language: javascript + +Curl Example +------------ + +.. literalinclude:: examples/member-create-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - address: address-member + - admin_state_up: admin_state_up + - backup: backup + - created_at: created_at + - id: member-id + - monitor_address: monitor_address + - monitor_port: monitor_port + - name: name + - operating_status: operating_status + - project_id: project_id + - protocol_port: protocol_port-member + - provisioning_status: provisioning_status + - subnet_id: subnet_id + - tags: tags + - updated_at: updated_at + - weight: weight + - vnic_type: member_vnic_type + +Response Example +---------------- + +.. literalinclude:: examples/member-create-response.json + :language: javascript + +Show Member details +=================== + +.. rest_method:: GET /v2/lbaas/pools/{pool_id}/members/{member-id} + +Shows the details of a pool member. + +If you are not an administrative user and the parent load balancer does not +belong to your project, the service returns the HTTP ``Forbidden (403)`` +response code. + +This operation does not require a request body. + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 401 + - 403 + - 404 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - fields: fields + - member_id: path-member-id + - pool_id: path-pool-id + +Curl Example +------------ + +.. literalinclude:: examples/member-show-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - address: address-member + - admin_state_up: admin_state_up + - backup: backup + - created_at: created_at + - id: member-id + - monitor_address: monitor_address + - monitor_port: monitor_port + - name: name + - operating_status: operating_status + - project_id: project_id + - protocol_port: protocol_port-member + - provisioning_status: provisioning_status + - subnet_id: subnet_id + - tags: tags + - updated_at: updated_at + - weight: weight + - vnic_type: member_vnic_type + +Response Example +---------------- + +.. literalinclude:: examples/member-show-response.json + :language: javascript + +Update a Member +=============== + +.. rest_method:: PUT /v2/lbaas/pools/{pool_id}/members/{member_id} + +Update an existing member. + +If the request is valid, the service returns the ``Accepted (202)`` +response code. To confirm the update, check that the member provisioning +status is ``ACTIVE``. If the status is ``PENDING_UPDATE``, use a GET +operation to poll the member object for changes. + +Setting the member weight to ``0`` means that the member will not receive +new requests but will finish any existing connections. This "drains" the +backend member of active connections. + +This operation returns the updated member object with the +``ACTIVE``, ``PENDING_UPDATE``, or ``ERROR`` provisioning status. + +.. rest_status_code:: success ../http-status.yaml + + - 202 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 404 + - 409 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - admin_state_up: admin_state_up-default-optional + - backup: backup-optional + - member_id: path-member-id + - monitor_address: monitor_address-optional + - monitor_port: monitor_port-optional + - name: name-optional + - pool_id: path-pool-id + - tags: tags-optional + - weight: weight-optional + +Request Example +--------------- + +.. literalinclude:: examples/member-update-request.json + :language: javascript + +Curl Example +------------ + +.. literalinclude:: examples/member-update-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - address: address-member + - admin_state_up: admin_state_up + - backup: backup + - created_at: created_at + - id: member-id + - monitor_address: monitor_address + - monitor_port: monitor_port + - name: name + - operating_status: operating_status + - project_id: project_id + - protocol_port: protocol_port-member + - provisioning_status: provisioning_status + - subnet_id: subnet_id + - tags: tags + - updated_at: updated_at + - weight: weight + - vnic_type: member_vnic_type + +Response Example +---------------- + +.. literalinclude:: examples/member-update-response.json + :language: javascript + +Batch Update Members +==================== + +.. rest_method:: PUT /v2/lbaas/pools/{pool_id}/members + +Set the state of members for a pool in one API call. This may include +creating new members, deleting old members, and updating existing members. +Existing members are matched based on address/port combination. + +For example, assume a pool currently has two members. These members have the +following address/port combinations: '192.0.2.15:80' and '192.0.2.16:80'. +Now assume a PUT request is made that includes members with address/port +combinations: '192.0.2.16:80' and '192.0.2.17:80'. + +The member '192.0.2.15:80' will be deleted, because it was not in the request. + +The member '192.0.2.16:80' will be updated to match the request data for that +member, because it was matched. + +The member '192.0.2.17:80' will be created, because no such member existed. + +The optional parameter ``additive_only`` when defined as ``true`` will skip +deletions for members missing from the provided list. If this were set in the +above example, the member '192.0.2.15:80' would have remained in the pool. + +If the request is valid, the service returns the ``Accepted (202)`` +response code. To confirm the updates, check that the member provisioning +statuses are ``ACTIVE`` for new or updated members, and that any unspecified +members were correctly deleted. If the statuses are ``PENDING_UPDATE`` or +``PENDING_DELETE``, use GET to poll the member objects for changes. + +.. rest_status_code:: success ../http-status.yaml + + - 202 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 404 + - 409 + - 500 + - 503 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - additive_only: additive-only + - admin_state_up: admin_state_up-default-optional + - address: address + - backup: backup-optional + - monitor_address: monitor_address-optional + - monitor_port: monitor_port-optional + - name: name-optional + - pool_id: path-pool-id + - project_id: project_id-optional-deprecated + - protocol_port: protocol_port + - request_sriov: request_sriov + - subnet_id: subnet_id-optional + - tags: tags-optional + - weight: weight-optional + +Request Example +--------------- + +.. literalinclude:: examples/member-batch-update-request.json + :language: javascript + +Curl Example +------------ + +.. literalinclude:: examples/member-batch-update-curl + :language: bash + +Response +-------- + +There is no body content for the response of a successful PUT request. + +Remove a Member +=============== + +.. rest_method:: DELETE /v2/lbaas/pools/{pool_id}/members/{member_id} + +Removes a member and its associated configuration from the pool. + +The API immediately purges any and all configuration data, depending on the +configuration settings. You cannot recover it. + +.. rest_status_code:: success ../http-status.yaml + + - 204 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 404 + - 409 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - member_id: path-member-id + - pool_id: path-pool-id + +Curl Example +------------ + +.. literalinclude:: examples/member-delete-curl + :language: bash + +Response +-------- + +There is no body content for the response of a successful DELETE request. diff --git a/api-ref/source/v2/pool.inc b/api-ref/source/v2/pool.inc new file mode 100644 index 0000000000..fe48b5e44e --- /dev/null +++ b/api-ref/source/v2/pool.inc @@ -0,0 +1,491 @@ +.. -*- rst -*- + +List Pools +========== + +.. rest_method:: GET /v2/lbaas/pools + +Lists all pools for the project. + +Use the ``fields`` query parameter to control which fields are +returned in the response body. Additionally, you can filter results +by using query string parameters. For information, see :ref:`filtering`. + +Administrative users can specify a project ID that is different than their own +to list pools for other projects. + +The list might be empty. + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - fields: fields + - project_id: project_id_query + +Curl Example +------------ + +.. literalinclude:: examples/pools-list-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - admin_state_up: admin_state_up + - alpn_protocols: alpn_protocols-pool + - ca_tls_container_ref: ca_tls_container_ref + - created_at: created_at + - crl_container_ref: crl_container_ref + - description: description + - healthmonitor_id: healthmonitor-id + - id: pool-id + - lb_algorithm: lb-algorithm + - listeners: listener-ids + - loadbalancers: loadbalancer-ids + - members: member-ids + - name: name + - operating_status: operating_status + - project_id: project_id + - protocol: protocol-pools + - provisioning_status: provisioning_status + - session_persistence: session_persistence + - tags: tags + - tls_ciphers: tls_ciphers + - tls_container_ref: tls_container_ref + - tls_enabled: tls_enabled + - tls_versions: tls_versions + - updated_at: updated_at + +Response Example +---------------- + +.. literalinclude:: examples/pools-list-response.json + :language: javascript + +Create Pool +=========== + +.. rest_method:: POST /v2/lbaas/pools + +Creates a pool for a load balancer. + +The pool defines how requests should be balanced across the backend +member servers. + +This operation provisions a pool by using the configuration that +you define in the request object. After the API validates the +request and starts the provisioning process, the API returns a +response object, which contains a unique ID. + +In the response, the pool :ref:`provisioning status` is +``ACTIVE``, ``PENDING_CREATE``, or ``ERROR``. + +If the status is ``PENDING_CREATE``, issue GET +``/v2/lbaas/pools/{pool_id}`` to view the progress of +the provisioning operation. When the pool status changes +to ``ACTIVE``, the pool is successfully provisioned and +is ready for further configuration. + +At a minimum, you must specify these pool attributes: + +- ``protocol`` The protocol for which this pool and its members + listen. A valid value is ``HTTP``, ``HTTPS``, ``PROXY``, ``PROXYV2``, + ``SCTP``, ``TCP``, or ``UDP``. + +- ``lb_algorithm`` The load-balancer algorithm, such as + ``ROUND_ROBIN``, ``LEAST_CONNECTIONS``, ``SOURCE_IP`` and ``SOURCE_IP_PORT``, + that distributes traffic to the pool members. The load-balancer + provider must support this algorithm. + +- ``listener_id`` The ID of the listener in which this pool + becomes the default pool. Each listener has only one default + pool. + + ---OR--- + +- ``loadbalancer_id`` The ID of the load balancer under which this + pool will be created. Each load balancer can have zero or more pools + associated with it. These pools can be used for L7policies. + +.. note:: + Either ``listener_id`` or ``loadbalancer_id`` must be specified. + +Some attributes receive default values if you omit them from the +request: + +- ``admin_state_up`` Default is ``true``. + +- ``name`` Default is an empty string. + +- ``description`` Default is an empty string. + +If the API cannot fulfill the request due to insufficient data or +data that is not valid, the service returns the HTTP ``Bad Request +(400)`` response code with information about the failure in the +response body. Validation errors require that you correct the error +and submit the request again. + +Specifying a project_id is deprecated. The pool will inherit the +project_id of the parent load balancer. + +You can configure all documented features of the pool at creation time by +specifying the additional elements or attributes in the request. + +To create a pool, the parent load balancer must have an ``ACTIVE`` +provisioning status. + +``SOURCE_IP_PORT`` algorithm is available from version 2.13. + + +.. rest_status_code:: success ../http-status.yaml + + - 201 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 404 + - 409 + - 500 + - 503 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - admin_state_up: admin_state_up-default-optional + - alpn_protocols: alpn_protocols-pool-optional + - ca_tls_container_ref: ca_tls_container_ref-optional + - crl_container_ref: crl_container_ref-optional + - description: description-optional + - lb_algorithm: lb-algorithm + - listener_id: listener-id-pool-optional + - loadbalancer_id: loadbalancer-id-pool-optional + - name: name-optional + - project_id: project_id-optional-deprecated + - protocol: protocol-pools + - session_persistence: session_persistence-optional + - tags: tags-optional + - tls_enabled: tls_enabled-optional + - tls_ciphers: tls_ciphers-optional + - tls_container_ref: tls_container_ref-optional + - tls_versions: tls_versions-optional + +.. _session_persistence: + +Pool Session Persistence +------------------------ + +Pool session persistence tells the load balancer to attempt to send future +requests from a client to the same backend member as the initial request. + +When the pool has no session persistence, the session persistence object is +``null``. + +Octavia currently support three session persistence methods: + ++-----------------+----------------------------------------------------------+ +| Method | Description | ++=================+==========================================================+ +| ``APP_COOKIE`` | Use the specified ``cookie_name`` send future requests | +| | to the same member. | ++-----------------+----------------------------------------------------------+ +| ``HTTP_COOKIE`` | The load balancer will generate a cookie that is | +| | inserted into the response. This cookie will be used to | +| | send future requests to the same member. | ++-----------------+----------------------------------------------------------+ +| ``SOURCE_IP`` | The source IP address on the request will be hashed to | +| | send future requests to the same member. | ++-----------------+----------------------------------------------------------+ + +Pool Session Persistence Object +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. rest_parameters:: ../parameters.yaml + + - type: session_persistence_type + - cookie_name: session_persistence_cookie + - persistence_timeout: session_persistence_timeout + - persistence_granularity: session_persistence_granularity + +Pool Session Persistence Object Example +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. literalinclude:: examples/pool-session-persistence-obj.json + :language: javascript + +Request Example +---------------- + +.. literalinclude:: examples/pool-create-request.json + :language: javascript + +Curl Example +------------ + +.. literalinclude:: examples/pool-create-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - admin_state_up: admin_state_up + - alpn_protocols: alpn_protocols-pool + - ca_tls_container_ref: ca_tls_container_ref + - created_at: created_at + - crl_container_ref: crl_container_ref + - description: description + - healthmonitor_id: healthmonitor-id + - id: pool-id + - lb_algorithm: lb-algorithm + - listeners: listener-ids + - loadbalancers: loadbalancer-ids + - members: member-ids + - name: name + - operating_status: operating_status + - project_id: project_id + - protocol: protocol-pools + - provisioning_status: provisioning_status + - session_persistence: session_persistence + - tags: tags + - tls_enabled: tls_enabled + - tls_ciphers: tls_ciphers + - tls_container_ref: tls_container_ref + - tls_versions: tls_versions + - updated_at: updated_at + +Response Example +---------------- + +.. literalinclude:: examples/pool-create-response.json + :language: javascript + +Show Pool details +================= + +.. rest_method:: GET /v2/lbaas/pools/{pool_id} + +Shows the details of a pool. + +If you are not an administrative user and the parent load balancer does not +belong to your project, the service returns the HTTP ``Forbidden (403)`` +response code. + +This operation does not require a request body. + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 401 + - 403 + - 404 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - fields: fields + - pool_id: path-pool-id + +Curl Example +------------ + +.. literalinclude:: examples/pool-show-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - admin_state_up: admin_state_up + - alpn_protocols: alpn_protocols-pool + - ca_tls_container_ref: ca_tls_container_ref + - created_at: created_at + - crl_container_ref: crl_container_ref + - description: description + - healthmonitor_id: healthmonitor-id + - id: pool-id + - lb_algorithm: lb-algorithm + - listeners: listener-ids + - loadbalancers: loadbalancer-ids + - members: member-ids + - name: name + - operating_status: operating_status + - project_id: project_id + - protocol: protocol-pools + - provisioning_status: provisioning_status + - session_persistence: session_persistence + - tags: tags + - tls_enabled: tls_enabled + - tls_ciphers: tls_ciphers + - tls_container_ref: tls_container_ref + - tls_versions: tls_versions + - updated_at: updated_at + +Response Example +---------------- + +.. literalinclude:: examples/pool-show-response.json + :language: javascript + +Update a Pool +============= + +.. rest_method:: PUT /v2/lbaas/pools/{pool_id} + +Update an existing pool. + +If the request is valid, the service returns the ``Accepted (202)`` +response code. To confirm the update, check that the pool provisioning +status is ``ACTIVE``. If the status is ``PENDING_UPDATE``, use a GET +operation to poll the pool object for changes. + +This operation returns the updated pool object with the +``ACTIVE``, ``PENDING_UPDATE``, or ``ERROR`` provisioning status. + +.. rest_status_code:: success ../http-status.yaml + + - 202 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 404 + - 409 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - admin_state_up: admin_state_up-default-optional + - alpn_protocols: alpn_protocols-pool-optional + - ca_tls_container_ref: ca_tls_container_ref-optional + - crl_container_ref: crl_container_ref-optional + - description: description-optional + - lb_algorithm: lb-algorithm-optional + - name: name-optional + - pool_id: path-pool-id + - session_persistence: session_persistence-optional + - tags: tags-optional + - tls_enabled: tls_enabled-optional + - tls_ciphers: tls_ciphers-optional + - tls_container_ref: tls_container_ref-optional + - tls_versions: tls_versions-optional + +Request Example +--------------- + +.. literalinclude:: examples/pool-update-request.json + :language: javascript + +Curl Example +------------ + +.. literalinclude:: examples/pool-update-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - admin_state_up: admin_state_up + - alpn_protocols: alpn_protocols-pool + - ca_tls_container_ref: ca_tls_container_ref + - created_at: created_at + - crl_container_ref: crl_container_ref + - description: description + - healthmonitor_id: healthmonitor-id + - id: pool-id + - lb_algorithm: lb-algorithm + - listeners: listener-ids + - loadbalancers: loadbalancer-ids + - members: member-ids + - name: name + - operating_status: operating_status + - project_id: project_id + - protocol: protocol-pools + - provisioning_status: provisioning_status + - session_persistence: session_persistence + - tags: tags + - tls_enabled: tls_enabled + - tls_ciphers: tls_ciphers + - tls_container_ref: tls_container_ref + - tls_versions: tls_versions + - updated_at: updated_at + +Response Example +---------------- + +.. literalinclude:: examples/pool-update-response.json + :language: javascript + +Remove a Pool +============= + +.. rest_method:: DELETE /v2/lbaas/pools/{pool_id} + +Removes a pool and its associated configuration from the load balancer. + +The API immediately purges any and all configuration data, depending on the +configuration settings. You cannot recover it. + +.. rest_status_code:: success ../http-status.yaml + + - 204 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 404 + - 409 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - pool_id: path-pool-id + +Curl Example +------------ + +.. literalinclude:: examples/pool-delete-curl + :language: bash + +Response +-------- + +There is no body content for the response of a successful DELETE request. diff --git a/api-ref/source/v2/provider.inc b/api-ref/source/v2/provider.inc new file mode 100644 index 0000000000..c785e50613 --- /dev/null +++ b/api-ref/source/v2/provider.inc @@ -0,0 +1,159 @@ +.. -*- rst -*- + +List Providers +============== + +.. rest_method:: GET /v2/lbaas/providers + +Lists all enabled provider drivers. + +Use the ``fields`` query parameter to control which fields are +returned in the response body. + +The list might be empty. + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - fields: fields + +Curl Example +------------ + +.. literalinclude:: examples/provider-list-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - name: provider-name + - description: provider-description + +Response Example +---------------- + +.. literalinclude:: examples/provider-list-response.json + :language: javascript + +Show Provider Flavor Capabilities +================================= + +.. rest_method:: GET /v2/lbaas/providers/{provider}/flavor_capabilities + +Shows the provider driver flavor capabilities. These are the features of the +provider driver that can be configured in an Octavia flavor. This API returns +a list of dictionaries with the name and description of each flavor capability +of the provider. + +The list might be empty and a provider driver may not implement this feature. + +**New in version 2.6** + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - fields: fields + - provider: path-provider + +Curl Example +------------ + +.. literalinclude:: examples/provider-flavor-capability-show-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - flavor_capabilities: flavor-capabilities + - name: flavor-capability-name + - description: flavor-capability-description + +Response Example +---------------- + +.. literalinclude:: examples/provider-flavor-capability-show-response.json + :language: javascript + +Show Provider Availability Zone Capabilities +============================================ + +.. rest_method:: GET /v2/lbaas/providers/{provider}/availability_zone_capabilities + +Shows the provider driver availability zone capabilities. These are the +features of the provider driver that can be configured in an Octavia +availability zone. This API returns a list of dictionaries with the name and +description of each availability zone capability of the provider. + +The list might be empty and a provider driver may not implement this feature. + +**New in version 2.14** + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - fields: fields + - provider: path-provider + +Curl Example +------------ + +.. literalinclude:: examples/provider-availability-zone-capability-show-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - availability_zone_capabilities: availability-zone-capabilities + - name: availability-zone-capability-name + - description: availability-zone-capability-description + +Response Example +---------------- + +.. literalinclude:: examples/provider-availability-zone-capability-show-response.json + :language: javascript diff --git a/api-ref/source/v2/quota.inc b/api-ref/source/v2/quota.inc new file mode 100644 index 0000000000..16929ca864 --- /dev/null +++ b/api-ref/source/v2/quota.inc @@ -0,0 +1,291 @@ +.. -*- rst -*- + +List Quota +========== + +.. rest_method:: GET /v2/lbaas/quotas + +Lists all quotas for the project. + +Use the ``fields`` query parameter to control which fields are +returned in the response body. Additionally, you can filter results +by using query string parameters. For information, see :ref:`filtering`. + +Administrative users can specify a project ID that is different than their own +to list quotas for other projects. + +If the quota is listed as ``null`` the quota is using the deployment default +quota settings. + +A quota of ``-1`` means the quota is unlimited. + +The list might be empty. + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - fields: fields + - project_id: project_id_query + +Curl Example +------------ + +.. literalinclude:: examples/quotas-list-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - healthmonitor: quota-health_monitor + - l7policy: quota-l7policy + - l7rule: quota-l7rule + - listener: quota-listener + - loadbalancer: quota-load_balancer + - member: quota-member + - pool: quota-pool + - project_id: project_id + +Response Example +---------------- + +.. literalinclude:: examples/quotas-list-response.json + :language: javascript + +Show Quota Defaults +=================== + +.. rest_method:: GET /v2/lbaas/quotas/defaults + +Show the quota defaults configured for the deployment. + +A quota of ``-1`` means the quota is unlimited. + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 500 + +Request +------- + +There are no request parameters for the show quota defaults API. + +Curl Example +------------ + +.. literalinclude:: examples/quotas-defaults-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - healthmonitor: quota-health_monitor + - l7policy: quota-l7policy + - l7rule: quota-l7rule + - listener: quota-listener + - loadbalancer: quota-load_balancer + - member: quota-member + - pool: quota-pool + +Response Example +---------------- + +.. literalinclude:: examples/quotas-defaults-response.json + :language: javascript + +Show Project Quota +================== + +.. rest_method:: GET /v2/lbaas/quotas/{project_id} + +Show the quota for the project. + +Use the ``fields`` query parameter to control which fields are +returned in the response body. Additionally, you can filter results +by using query string parameters. For information, see :ref:`filtering`. + +Administrative users can specify a project ID that is different than their own +to show quota for other projects. + +A quota of ``-1`` means the quota is unlimited. + +.. rest_status_code:: success ../http-status.yaml + + - 200 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - fields: fields + - project_id: path-project-id + +Curl Example +------------ + +.. literalinclude:: examples/quota-show-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - healthmonitor: quota-health_monitor + - l7policy: quota-l7policy + - l7rule: quota-l7rule + - listener: quota-listener + - loadbalancer: quota-load_balancer + - member: quota-member + - pool: quota-pool + +Response Example +---------------- + +.. literalinclude:: examples/quotas-show-response.json + :language: javascript + +Update a Quota +============== + +.. rest_method:: PUT /v2/lbaas/quotas/{project_id} + +Updates a quota for a project. + +If the request is valid, the service returns the ``Accepted (202)`` +response code. + +This operation returns the updated quota object. + +If the quota is specified as ``null`` the quota will use the deployment default +quota settings. + +Specifying a quota of ``-1`` means the quota is unlimited. + +Specifying a quota of ``0`` means the project cannot create any of the +resource. + +.. rest_status_code:: success ../http-status.yaml + + - 202 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 404 + - 409 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - healthmonitor: quota-health_monitor-optional + - l7policy: quota-l7policy-optional + - l7rule: quota-l7rule-optional + - listener: quota-listener-optional + - loadbalancer: quota-load_balancer-optional + - member: quota-member-optional + - pool: quota-pool-optional + - project_id: path-project-id + +Request Example +--------------- + +.. literalinclude:: examples/quota-update-request.json + :language: javascript + +Curl Example +------------ + +.. literalinclude:: examples/quota-update-curl + :language: bash + +Response Parameters +------------------- + +.. rest_parameters:: ../parameters.yaml + + - healthmonitor: quota-health_monitor + - l7policy: quota-l7policy + - l7rule: quota-l7rule + - listener: quota-listener + - loadbalancer: quota-load_balancer + - member: quota-member + - pool: quota-pool + +Response Example +---------------- + +.. literalinclude:: examples/quota-update-response.json + :language: javascript + +Reset a Quota +============= + +.. rest_method:: DELETE /v2/lbaas/quotas/{project_id} + +Resets a project quota to use the deployment default quota. + +.. rest_status_code:: success ../http-status.yaml + + - 204 + +.. rest_status_code:: error ../http-status.yaml + + - 400 + - 401 + - 403 + - 404 + - 409 + - 500 + +Request +------- + +.. rest_parameters:: ../parameters.yaml + + - project_id: path-project-id + +Curl Example +------------ + +.. literalinclude:: examples/quota-reset-curl + :language: bash + +Response +-------- + +There is no body content for the response of a successful DELETE request. + diff --git a/bin/create_dual_intermediate_CA.sh b/bin/create_dual_intermediate_CA.sh new file mode 100755 index 0000000000..ae1435cb8d --- /dev/null +++ b/bin/create_dual_intermediate_CA.sh @@ -0,0 +1,155 @@ +#!/bin/bash + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +echo "!!!!!!!!!!!!!!!Do not use this script for deployments!!!!!!!!!!!!!" +echo "Please use the Octavia Certificate Configuration guide:" +echo "https://docs.openstack.org/octavia/latest/admin/guides/certificates.html" +echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" + +# This script produces weak security PKI to save resources in the test gates. +# It should be modified to use stronger encryption (aes256), better pass +# phrases, and longer keys (4096). +# Please see the Octavia Certificate Configuration guide: +# https://docs.openstack.org/octavia/latest/admin/guides/certificates.html + +set -x -e + +CA_PATH=dual_ca + +rm -rf $CA_PATH +mkdir $CA_PATH +chmod 700 $CA_PATH +cd $CA_PATH + +mkdir -p etc/octavia/certs +chmod 700 etc/octavia/certs + +###### Client Root CA +mkdir client_ca +cd client_ca +mkdir certs crl newcerts private +chmod 700 private +touch index.txt +echo 1000 > serial + +# Create the client CA private key +openssl genpkey -algorithm RSA -out private/ca.key.pem -aes-128-cbc -pass pass:not-secure-passphrase +chmod 400 private/ca.key.pem + +# Create the client CA root certificate +openssl req -config ../../openssl.cnf -key private/ca.key.pem -new -x509 -sha256 -extensions v3_ca -days 7300 -out certs/ca.cert.pem -subj "/C=US/ST=Oregon/L=Corvallis/O=OpenStack/OU=Octavia/CN=ClientRootCA" -passin pass:not-secure-passphrase + +###### Client Intermediate CA +mkdir intermediate_ca +mkdir intermediate_ca/certs intermediate_ca/crl intermediate_ca/newcerts intermediate_ca/private +chmod 700 intermediate_ca/private +touch intermediate_ca/index.txt +echo 1000 > intermediate_ca/serial + +# Create the client intermediate CA private key +openssl genpkey -algorithm RSA -out intermediate_ca/private/intermediate.ca.key.pem -aes-128-cbc -pass pass:not-secure-passphrase +chmod 400 intermediate_ca/private/intermediate.ca.key.pem + +# Create the client intermediate CA certificate signing request +openssl req -config ../../openssl.cnf -key intermediate_ca/private/intermediate.ca.key.pem -new -sha256 -out intermediate_ca/client_intermediate.csr -subj "/C=US/ST=Oregon/L=Corvallis/O=OpenStack/OU=Octavia/CN=ClientIntermediateCA" -passin pass:not-secure-passphrase + +# Create the client intermediate CA certificate +openssl ca -config ../../openssl.cnf -name CA_intermediate -extensions v3_intermediate_ca -days 3650 -notext -md sha256 -in intermediate_ca/client_intermediate.csr -out intermediate_ca/certs/intermediate.cert.pem -passin pass:not-secure-passphrase -batch + +# Create the client CA certificate chain +cat intermediate_ca/certs/intermediate.cert.pem certs/ca.cert.pem > intermediate_ca/ca-chain.cert.pem + +###### Create the client key and certificate +openssl genpkey -algorithm RSA -out intermediate_ca/private/controller.key.pem -aes-128-cbc -pass pass:not-secure-passphrase +chmod 400 intermediate_ca/private/controller.key.pem + +# Create the client controller certificate signing request +openssl req -config ../../openssl.cnf -key intermediate_ca/private/controller.key.pem -new -sha256 -out intermediate_ca/controller.csr -subj "/C=US/ST=Oregon/L=Corvallis/O=OpenStack/OU=Octavia/CN=OctaviaController" -passin pass:not-secure-passphrase + +# Create the client controller certificate +openssl ca -config ../../openssl.cnf -name CA_intermediate -extensions usr_cert -days 1825 -notext -md sha256 -in intermediate_ca/controller.csr -out intermediate_ca/certs/controller.cert.pem -passin pass:not-secure-passphrase -batch + +# Build the cancatenated client cert and key +openssl rsa -in intermediate_ca/private/controller.key.pem -out intermediate_ca/private/client.cert-and-key.pem -passin pass:not-secure-passphrase + +cat intermediate_ca/certs/controller.cert.pem >> intermediate_ca/private/client.cert-and-key.pem + +# We are done with the client CA +cd .. + +###### Stash the octavia default client CA cert files +cp client_ca/intermediate_ca/ca-chain.cert.pem etc/octavia/certs/client_ca.cert.pem +chmod 444 etc/octavia/certs/client_ca.cert.pem +cp client_ca/intermediate_ca/private/client.cert-and-key.pem etc/octavia/certs/client.cert-and-key.pem +chmod 600 etc/octavia/certs/client.cert-and-key.pem + +###### Server Root CA +mkdir server_ca +cd server_ca +mkdir certs crl newcerts private +chmod 700 private +touch index.txt +echo 1000 > serial + +# Create the server CA private key +openssl genpkey -algorithm RSA -out private/ca.key.pem -aes-128-cbc -pass pass:not-secure-passphrase +chmod 400 private/ca.key.pem + +# Create the server CA root certificate +openssl req -config ../../openssl.cnf -key private/ca.key.pem -new -x509 -sha256 -extensions v3_ca -days 7300 -out certs/ca.cert.pem -subj "/C=US/ST=Oregon/L=Corvallis/O=OpenStack/OU=Octavia/CN=ServerRootCA" -passin pass:not-secure-passphrase + +###### Server Intermediate CA +mkdir intermediate_ca +mkdir intermediate_ca/certs intermediate_ca/crl intermediate_ca/newcerts intermediate_ca/private +chmod 700 intermediate_ca/private +touch intermediate_ca/index.txt +echo 1000 > intermediate_ca/serial + +# Create the server intermediate CA private key +openssl genpkey -algorithm RSA -out intermediate_ca/private/intermediate.ca.key.pem -aes-128-cbc -pass pass:not-secure-passphrase +chmod 400 intermediate_ca/private/intermediate.ca.key.pem + +# Create the server intermediate CA certificate signing request +openssl req -config ../../openssl.cnf -key intermediate_ca/private/intermediate.ca.key.pem -new -sha256 -out intermediate_ca/server_intermediate.csr -subj "/C=US/ST=Oregon/L=Corvallis/O=OpenStack/OU=Octavia/CN=ServerIntermediateCA" -passin pass:not-secure-passphrase + +# Create the server intermediate CA certificate +openssl ca -config ../../openssl.cnf -name CA_intermediate -extensions v3_intermediate_ca -days 3650 -notext -md sha256 -in intermediate_ca/server_intermediate.csr -out intermediate_ca/certs/intermediate.cert.pem -passin pass:not-secure-passphrase -batch + +# Create the server CA certificate chain +cat intermediate_ca/certs/intermediate.cert.pem certs/ca.cert.pem > intermediate_ca/ca-chain.cert.pem + +# We are done with the server CA +cd .. + +###### Stash the octavia default server CA cert files +cp server_ca/intermediate_ca/ca-chain.cert.pem etc/octavia/certs/server_ca-chain.cert.pem +chmod 444 etc/octavia/certs/server_ca-chain.cert.pem +cp server_ca/intermediate_ca/certs/intermediate.cert.pem etc/octavia/certs/server_ca.cert.pem +chmod 400 etc/octavia/certs/server_ca.cert.pem +cp server_ca/intermediate_ca/private/intermediate.ca.key.pem etc/octavia/certs/server_ca.key.pem +chmod 400 etc/octavia/certs/server_ca.key.pem + +##### Validate the Octavia PKI files +set +x +echo "################# Verifying the Octavia files ###########################" +openssl verify -CAfile etc/octavia/certs/client_ca.cert.pem etc/octavia/certs/client.cert-and-key.pem +openssl verify -CAfile etc/octavia/certs/server_ca-chain.cert.pem etc/octavia/certs/server_ca.cert.pem + +# We are done, stop enforcing shell errexit +set +e + +echo "!!!!!!!!!!!!!!!Do not use this script for deployments!!!!!!!!!!!!!" +echo "Please use the Octavia Certificate Configuration guide:" +echo "https://docs.openstack.org/octavia/latest/admin/guides/certificates.html" +echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" diff --git a/bin/create_single_CA_intermediate_CA.sh b/bin/create_single_CA_intermediate_CA.sh new file mode 100755 index 0000000000..5b91e7fffa --- /dev/null +++ b/bin/create_single_CA_intermediate_CA.sh @@ -0,0 +1,114 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +echo "!!!!!!!!!!!!!!!Do not use this script for deployments!!!!!!!!!!!!!" +echo "Single CA mode is insecure, do not use this! It is for testing only." +echo "Please use the Octavia Certificate Configuration guide:" +echo "https://docs.openstack.org/octavia/latest/admin/guides/certificates.html" +echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" + +# This script produces weak security PKI to save resources in the test gates. +# A single CA should never be used in a production deployment. This script +# exists purely to test legacy migrations / deployments where someone +# acidently used a single CA. + +set -x -e + +CA_PATH=single_ca + +rm -rf $CA_PATH +mkdir $CA_PATH +chmod 700 $CA_PATH +cd $CA_PATH + +mkdir -p etc/octavia/certs +chmod 700 etc/octavia/certs + +###### Client Root CA +mkdir client_ca +cd client_ca +mkdir certs crl newcerts private +chmod 700 private +touch index.txt +echo 1000 > serial + +# Create the client CA private key +openssl genpkey -algorithm RSA -out private/ca.key.pem -aes-128-cbc -pass pass:not-secure-passphrase +chmod 400 private/ca.key.pem + +# Create the client CA root certificate +openssl req -config ../../openssl.cnf -key private/ca.key.pem -new -x509 -sha256 -extensions v3_ca -days 7300 -out certs/ca.cert.pem -subj "/C=US/ST=Oregon/L=Corvallis/O=OpenStack/OU=Octavia/CN=ClientRootCA" -passin pass:not-secure-passphrase + +###### Client Intermediate CA +mkdir intermediate_ca +mkdir intermediate_ca/certs intermediate_ca/crl intermediate_ca/newcerts intermediate_ca/private +chmod 700 intermediate_ca/private +touch intermediate_ca/index.txt +echo 1000 > intermediate_ca/serial + +# Create the client intermediate CA private key +openssl genpkey -algorithm RSA -out intermediate_ca/private/intermediate.ca.key.pem -aes-128-cbc -pass pass:not-secure-passphrase +chmod 400 intermediate_ca/private/intermediate.ca.key.pem + +# Create the client intermediate CA certificate signing request +openssl req -config ../../openssl.cnf -key intermediate_ca/private/intermediate.ca.key.pem -new -sha256 -out intermediate_ca/client_intermediate.csr -subj "/C=US/ST=Oregon/L=Corvallis/O=OpenStack/OU=Octavia/CN=ClientIntermediateCA" -passin pass:not-secure-passphrase + +# Create the client intermediate CA certificate +openssl ca -config ../../openssl.cnf -name CA_intermediate -extensions v3_intermediate_ca -days 3650 -notext -md sha256 -in intermediate_ca/client_intermediate.csr -out intermediate_ca/certs/intermediate.cert.pem -passin pass:not-secure-passphrase -batch + +# Create the client CA certificate chain +cat intermediate_ca/certs/intermediate.cert.pem certs/ca.cert.pem > intermediate_ca/ca-chain.cert.pem + +###### Create the client key and certificate +openssl genpkey -algorithm RSA -out intermediate_ca/private/controller.key.pem -aes-128-cbc -pass pass:not-secure-passphrase +chmod 400 intermediate_ca/private/controller.key.pem + +# Create the client controller certificate signing request +openssl req -config ../../openssl.cnf -key intermediate_ca/private/controller.key.pem -new -sha256 -out intermediate_ca/controller.csr -subj "/C=US/ST=Oregon/L=Corvallis/O=OpenStack/OU=Octavia/CN=OctaviaController" -passin pass:not-secure-passphrase + +# Create the controller client certificate +openssl ca -config ../../openssl.cnf -name CA_intermediate -extensions usr_cert -days 1825 -notext -md sha256 -in intermediate_ca/controller.csr -out intermediate_ca/certs/controller.cert.pem -passin pass:not-secure-passphrase -batch + +# Build the cancatenated client cert and key +openssl rsa -in intermediate_ca/private/controller.key.pem -out intermediate_ca/private/client.cert-and-key.pem -passin pass:not-secure-passphrase + +cat intermediate_ca/certs/controller.cert.pem >> intermediate_ca/private/client.cert-and-key.pem + +# We are done with the client CA +cd .. + +###### Stash the octavia default cert files +cp client_ca/intermediate_ca/ca-chain.cert.pem etc/octavia/certs/client_ca.cert.pem +chmod 444 etc/octavia/certs/client_ca.cert.pem +cp client_ca/intermediate_ca/private/client.cert-and-key.pem etc/octavia/certs/client.cert-and-key.pem +chmod 600 etc/octavia/certs/client.cert-and-key.pem +cp client_ca/intermediate_ca/ca-chain.cert.pem etc/octavia/certs/server_ca.cert.pem +chmod 444 etc/octavia/certs/server_ca.cert.pem +cp client_ca/intermediate_ca/private/intermediate.ca.key.pem etc/octavia/certs/server_ca.key.pem +chmod 600 etc/octavia/certs/server_ca.key.pem + +##### Validate the Octavia PKI files +set +x +echo "################# Verifying the Octavia files ###########################" +openssl verify -CAfile etc/octavia/certs/client_ca.cert.pem etc/octavia/certs/client.cert-and-key.pem +openssl verify -CAfile etc/octavia/certs/server_ca.cert.pem etc/octavia/certs/server_ca.cert.pem + +# We are done, stop enforcing shell errexit +set +e + +echo "!!!!!!!!!!!!!!!Do not use this script for deployments!!!!!!!!!!!!!" +echo "Single CA mode is insecure, do not use this! It is for testing only." +echo "Please use the Octavia Certificate Configuration guide:" +echo "https://docs.openstack.org/octavia/latest/admin/guides/certificates.html" +echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" diff --git a/bin/openssl.cnf b/bin/openssl.cnf new file mode 100644 index 0000000000..fe4cdb4334 --- /dev/null +++ b/bin/openssl.cnf @@ -0,0 +1,144 @@ +# OpenSSL root CA configuration file. + +[ ca ] +# `man ca` +default_ca = CA_default + +[ CA_default ] +# Directory and file locations. +dir = ./ +certs = $dir/certs +crl_dir = $dir/crl +new_certs_dir = $dir/newcerts +database = $dir/index.txt +serial = $dir/serial +RANDFILE = $dir/private/.rand + +# The root key and root certificate. +private_key = $dir/private/ca.key.pem +certificate = $dir/certs/ca.cert.pem + +# For certificate revocation lists. +crlnumber = $dir/crlnumber +crl = $dir/crl/ca.crl.pem +crl_extensions = crl_ext +default_crl_days = 30 + +# SHA-1 is deprecated, so use SHA-2 instead. +default_md = sha256 + +name_opt = ca_default +cert_opt = ca_default +# 10 years +default_days = 7300 +preserve = no +policy = policy_strict + +[ CA_intermediate ] +# Directory and file locations. +dir = ./intermediate_ca +certs = $dir/certs +crl_dir = $dir/crl +new_certs_dir = $dir/newcerts +database = $dir/index.txt +serial = $dir/serial +RANDFILE = $dir/private/.rand + +# The root key and root certificate. +private_key = ./private/ca.key.pem +certificate = ./certs/ca.cert.pem + +# For certificate revocation lists. +crlnumber = $dir/crlnumber +crl = $dir/crl/ca.crl.pem +crl_extensions = crl_ext +default_crl_days = 30 + +# SHA-1 is deprecated, so use SHA-2 instead. +default_md = sha256 + +name_opt = ca_default +cert_opt = ca_default +# 5 years +default_days = 3650 +preserve = no +policy = policy_strict + +[ policy_strict ] +# The root CA should only sign intermediate certificates that match. +# See the POLICY FORMAT section of `man ca`. +countryName = match +stateOrProvinceName = match +organizationName = match +organizationalUnitName = optional +commonName = supplied +emailAddress = optional + +[ req ] +# Options for the `req` tool (`man req`). +default_bits = 2048 +distinguished_name = req_distinguished_name +string_mask = utf8only + +# SHA-1 is deprecated, so use SHA-2 instead. +default_md = sha256 + +# Extension to add when the -x509 option is used. +x509_extensions = v3_ca + +[ req_distinguished_name ] +# See . +countryName = Country Name (2 letter code) +stateOrProvinceName = State or Province Name +localityName = Locality Name +0.organizationName = Organization Name +organizationalUnitName = Organizational Unit Name +commonName = Common Name +emailAddress = Email Address + +# Optionally, specify some defaults. +countryName_default = US +stateOrProvinceName_default = Oregon +localityName_default = Corvallis +0.organizationName_default = OpenStack +organizationalUnitName_default = Octavia +emailAddress_default = +commonName_default = example.org + +[ v3_ca ] +# Extensions for a typical CA (`man x509v3_config`). +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always,issuer +basicConstraints = critical, CA:true +keyUsage = critical, digitalSignature, cRLSign, keyCertSign + +[ v3_intermediate_ca ] +# Extensions for a typical intermediate CA (`man x509v3_config`). +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always,issuer +basicConstraints = critical, CA:true, pathlen:0 +keyUsage = critical, digitalSignature, cRLSign, keyCertSign + +[ usr_cert ] +# Extensions for client certificates (`man x509v3_config`). +basicConstraints = CA:FALSE +nsCertType = client, email +nsComment = "OpenSSL Generated Client Certificate" +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid,issuer +keyUsage = critical, nonRepudiation, digitalSignature, keyEncipherment +extendedKeyUsage = clientAuth, emailProtection + +[ server_cert ] +# Extensions for server certificates (`man x509v3_config`). +basicConstraints = CA:FALSE +nsCertType = server +nsComment = "OpenSSL Generated Server Certificate" +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid,issuer:always +keyUsage = critical, digitalSignature, keyEncipherment +extendedKeyUsage = serverAuth + +[ crl_ext ] +# Extension for CRLs (`man x509v3_config`). +authorityKeyIdentifier=keyid:always diff --git a/bindep.txt b/bindep.txt new file mode 100644 index 0000000000..6226f9082d --- /dev/null +++ b/bindep.txt @@ -0,0 +1,27 @@ +# Docs package dependencies +graphviz [doc] +# PDF Docs package dependencies +make [doc] +fonts-freefont-otf [doc platform:dpkg] +fonts-liberation [doc platform:dpkg] +texlive-latex-base [doc platform:dpkg] +texlive-latex-extra [doc platform:dpkg] +texlive-xetex [doc platform:dpkg] +texlive-fonts-recommended [doc platform:dpkg] +xindy [doc platform:dpkg] +latexmk [doc platform:dpkg] +texlive [doc platform:rpm] +texlive-fncychap [doc platform:rpm] +texlive-titlesec [doc platform:rpm] +texlive-tabulary [doc platform:rpm] +texlive-framed [doc platform:rpm] +texlive-wrapfig [doc platform:rpm] +texlive-upquote [doc platform:rpm] +texlive-capt-of [doc platform:rpm] +texlive-needspace [doc platform:rpm] +texlive-polyglossia [doc platform:rpm] +texlive-xindy [doc platform:rpm] +latexmk [doc platform:rpm] +python3-sphinxcontrib-svg2pdfconverter-common [doc platform:rpm] +librsvg2-tools [doc platform:rpm] +librsvg2-bin [doc platform:dpkg] diff --git a/devstack/README.md b/devstack/README.md new file mode 100644 index 0000000000..fd1e34bfef --- /dev/null +++ b/devstack/README.md @@ -0,0 +1,29 @@ +This directory contains the octavia devstack plugin. To configure octavia, +in the [[local|localrc]] section you will need to enable the octavia devstack +plugin and enable the octavia service by editing the [[local|localrc]] section +of your local.conf file. + +1) Enable the plugin + +To enable the octavia plugin, add a line of the form: + + enable_plugin octavia [GITREF] + +where + + is the URL of an octavia repository + [GITREF] is an optional git ref (branch/ref/tag). The default is + master. + +For example + + enable_plugin octavia https://opendev.org/openstack/octavia master + +2) Enable the Octavia services + +For example + + ENABLED_SERVICES+=,octavia,o-api,o-cw,o-hk,o-hm,o-da + +For more information, see the "Externally Hosted Plugins" section of +https://docs.openstack.org/devstack/latest/plugins.html diff --git a/devstack/contrib/new-octavia-devstack.sh b/devstack/contrib/new-octavia-devstack.sh new file mode 100755 index 0000000000..ec2e52ae16 --- /dev/null +++ b/devstack/contrib/new-octavia-devstack.sh @@ -0,0 +1,68 @@ +#!/bin/bash +# +# These instructions assume an Ubuntu-based host or VM for running devstack. +# Please note that if you are running this in a VM, it is vitally important +# that the underlying hardware have nested virtualization enabled or you will +# experience very poor amphora performance. + +# Set up the packages we need. Ubuntu package manager is assumed. +apt-get update +apt-get install git vim -y + +# TODO(sbalukoff): Add prerequisites for other distributions. + +# Clone the devstack repo +git clone https://github.com/openstack-dev/devstack.git $HOME/devstack + +cat < $HOME/devstack/localrc +enable_plugin barbican https://opendev.org/openstack/barbican +enable_plugin neutron https://opendev.org/openstack/neutron +enable_plugin octavia https://opendev.org/openstack/octavia +LIBS_FROM_GIT+=python-octaviaclient + +KEYSTONE_TOKEN_FORMAT=fernet + +DATABASE_PASSWORD=secretdatabase +RABBIT_PASSWORD=secretrabbit +ADMIN_PASSWORD=secretadmin +SERVICE_PASSWORD=secretservice +SERVICE_TOKEN=111222333444 +# Enable Logging +LOGFILE=/opt/stack/logs/stack.sh.log +VERBOSE=True +LOG_COLOR=True +# Pre-requisite +ENABLED_SERVICES=key,rabbit,mysql +# Nova +ENABLED_SERVICES+=,n-api,n-cpu,n-cond,n-sch +# Placement service needed for Nova +ENABLED_SERVICES+=,placement-api,placement-client +# Glance +ENABLED_SERVICES+=,g-api +# Neutron +ENABLED_SERVICES+=,neutron-api,neutron-agent,neutron-dhcp,neutron-l3 +ENABLED_SERVICES+=,neutron-metadata-agent,neutron-qos +# Tempest (optional) +#ENABLED_SERVICES+=,tempest +# Octavia +ENABLED_SERVICES+=,octavia,o-api,o-cw,o-hm,o-hk,o-da +EOF + +# Create the stack user +$HOME/devstack/tools/create-stack-user.sh + +# Move everything into place +mv $HOME/devstack /opt/stack/ +chown -R stack:stack /opt/stack/devstack/ + +# Fix permissions on current tty so screens can attach +chmod go+rw `tty` + +# Stack that stack! +su - stack -c /opt/stack/devstack/stack.sh + +# Add environment variables for auth/endpoints +echo 'source /opt/stack/devstack/openrc admin admin' >> /opt/stack/.bashrc + +# Drop into a shell +exec su - stack diff --git a/devstack/etc/octavia/haproxy.cfg b/devstack/etc/octavia/haproxy.cfg new file mode 100644 index 0000000000..54fcf651fb --- /dev/null +++ b/devstack/etc/octavia/haproxy.cfg @@ -0,0 +1,30 @@ +global + daemon + log /dev/log local0 + log /dev/log local1 notice + +defaults + log global + retries 3 + option redispatch + timeout connect 5000 + timeout client 50000 + timeout server 50000 + + + +frontend octavia-frontend-api + option httplog + bind 0.0.0.0:OCTAVIA_PORT + mode http + default_backend octavia-backend-api + +backend octavia-backend-api + mode http + balance roundrobin + +# the devstack plugin will add entries here looking like: +# server octavia-main : weight 1 +# server octavia-second : weight 1 +# + diff --git a/devstack/etc/rsyslog/10-octavia-log-offloading.conf b/devstack/etc/rsyslog/10-octavia-log-offloading.conf new file mode 100644 index 0000000000..7d02757b1d --- /dev/null +++ b/devstack/etc/rsyslog/10-octavia-log-offloading.conf @@ -0,0 +1,16 @@ +# Work around CentOS/RHEL umask override of file permissions +# Note: This is a global rsyslog setting, you probably do not want to set +# this outside of testing! +$umask 0000 + +# provides UDP syslog reception +module(load="imudp") +input(type="imudp" port=["%ADMIN_PORT%", "%TENANT_PORT%"]) + +if ($inputname == "imudp" and $syslogfacility-text == "local0" and $syslogseverity-text == "info") then { + action(type="omfile" FileCreateMode="0644" File="/var/log/octavia/octavia-tenant-traffic.log")&stop +} + +if ($inputname == "imudp" and $syslogfacility-text != "local0") then { + action(type="omfile" FileCreateMode="0644" File="/var/log/octavia/octavia-amphora.log")&stop +} diff --git a/devstack/files/debs/octavia b/devstack/files/debs/octavia new file mode 100644 index 0000000000..f6cd3c98e9 --- /dev/null +++ b/devstack/files/debs/octavia @@ -0,0 +1,3 @@ +golang +debootstrap +rsyslog diff --git a/devstack/files/rpms-suse/octavia b/devstack/files/rpms-suse/octavia new file mode 100644 index 0000000000..611a0f45a1 --- /dev/null +++ b/devstack/files/rpms-suse/octavia @@ -0,0 +1,4 @@ +debootstrap +dpkg +go +rsyslog diff --git a/devstack/files/rpms/octavia b/devstack/files/rpms/octavia new file mode 100644 index 0000000000..50d122e160 --- /dev/null +++ b/devstack/files/rpms/octavia @@ -0,0 +1,3 @@ +golang +debootstrap # not:rhel9 +rsyslog diff --git a/devstack/files/wsgi/octavia-api.template b/devstack/files/wsgi/octavia-api.template new file mode 100644 index 0000000000..0dac32a18b --- /dev/null +++ b/devstack/files/wsgi/octavia-api.template @@ -0,0 +1,47 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# This is a template Apache2 configuration file for using the +# Octavia API through mod_wsgi. This version assumes you are +# running devstack to configure the software. + +Listen %OCTAVIA_SERVICE_PORT% + + + + WSGIDaemonProcess octavia-wsgi processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% + WSGIProcessGroup octavia-wsgi + WSGIScriptAlias / /usr/local/bin/octavia-wsgi + WSGIApplicationGroup %{GLOBAL} + + ErrorLog /var/log/%APACHE_NAME%/octavia_error.log + = 2.4> + ErrorLogFormat "%{cu}t %M" + + CustomLog /var/log/%APACHE_NAME%/octavia_access.log combined + + %SSLENGINE% + %SSLCERTFILE% + %SSLKEYFILE% + + + WSGIProcessGroup octavia-wsgi + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + + diff --git a/devstack/plugin.sh b/devstack/plugin.sh new file mode 100644 index 0000000000..b8cb2e24af --- /dev/null +++ b/devstack/plugin.sh @@ -0,0 +1,843 @@ +#!/usr/bin/env bash + +saveenv=$- +set -ex + + +# devstack plugin for octavia + +GET_PIP_CACHE_LOCATION=/opt/stack/cache/files/get-pip.py + +function octavia_install { + if [[ ${OCTAVIA_ENABLE_AMPHORAV2_JOBBOARD} == True ]]; then + setup_develop $OCTAVIA_DIR ${OCTAVIA_JOBBOARD_BACKEND} + else + setup_develop $OCTAVIA_DIR + fi +} + +function octaviaclient_install { + if use_library_from_git "python-octaviaclient"; then + git_clone_by_name "python-octaviaclient" + setup_dev_lib "python-octaviaclient" + else + pip_install_gr python-octaviaclient + fi +} + +function octavia_lib_install { + if use_library_from_git "octavia-lib"; then + git_clone_by_name "octavia-lib" + setup_dev_lib "octavia-lib" + export DIB_REPOLOCATION_octavia_lib=${GITDIR["octavia-lib"]} + export DIB_REPOREF_octavia_lib=$(git --git-dir="${GITDIR["octavia-lib"]}/.git" log -1 --pretty="format:%H") + else + pip_install_gr octavia-lib + fi +} + +function install_diskimage_builder { + if use_library_from_git "diskimage-builder"; then + GITREPO["diskimage-builder"]=$DISKIMAGE_BUILDER_REPO_URL + GITDIR["diskimage-builder"]=$DISKIMAGE_BUILDER_DIR + GITBRANCH["diskimage-builder"]=$DISKIMAGE_BUILDER_REPO_REF + git_clone_by_name "diskimage-builder" + setup_dev_lib -bindep "diskimage-builder" + else + pip_install -r $OCTAVIA_DIR/diskimage-create/requirements.txt + fi +} + +function set_octavia_worker_image_owner_id { + image_id=$(openstack image list --property name=${OCTAVIA_AMP_IMAGE_NAME} -f value -c ID) + owner_id=$(openstack image show ${image_id} -c owner -f value) + iniset $OCTAVIA_CONF controller_worker amp_image_owner_id ${owner_id} +} + +function build_octavia_worker_image { + # set up diskimage-builder if we need to + install_diskimage_builder + + # Pull in DIB local elements if they are defined in devstack + if [ -n "$DIB_LOCAL_ELEMENTS" ]; then + export DIB_LOCAL_ELEMENTS=$DIB_LOCAL_ELEMENTS + fi + + # Pull in the option to install nftables in the amphora + if [ -n "$DIB_OCTAVIA_AMP_USE_NFTABLES" ]; then + export DIB_OCTAVIA_AMP_USE_NFTABLES=$DIB_OCTAVIA_AMP_USE_NFTABLES + fi + + # pull the agent code from the current code zuul has a reference to + if [ -n "$DIB_REPOLOCATION_pip_and_virtualenv" ]; then + export DIB_REPOLOCATION_pip_and_virtualenv=$DIB_REPOLOCATION_pip_and_virtualenv + elif [ -f $GET_PIP_CACHE_LOCATION ] ; then + export DIB_REPOLOCATION_pip_and_virtualenv=file://$GET_PIP_CACHE_LOCATION + fi + export DIB_REPOLOCATION_amphora_agent=$OCTAVIA_DIR + export DIB_REPOREF_amphora_agent=$(git --git-dir="$OCTAVIA_DIR/.git" log -1 --pretty="format:%H") + + TOKEN=$(openstack token issue -f value -c id) + die_if_not_set $LINENO TOKEN "Keystone failed to get token." + + octavia_dib_tracing_arg= + if [ "$OCTAVIA_DIB_TRACING" != "0" ]; then + octavia_dib_tracing_arg="-x" + fi + if [[ ${OCTAVIA_AMP_BASE_OS:+1} ]] ; then + export PARAM_OCTAVIA_AMP_BASE_OS='-i '$OCTAVIA_AMP_BASE_OS + fi + if [[ ${OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID:+1} ]] ; then + export PARAM_OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID='-d '$OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID + fi + if [[ ${OCTAVIA_AMP_IMAGE_SIZE:+1} ]] ; then + export PARAM_OCTAVIA_AMP_IMAGE_SIZE='-s '$OCTAVIA_AMP_IMAGE_SIZE + fi + if [[ ${OCTAVIA_AMP_IMAGE_ARCH:+1} ]] ; then + export PARAM_OCTAVIA_AMP_IMAGE_ARCH='-a '$OCTAVIA_AMP_IMAGE_ARCH + fi + if [[ "$(trueorfalse False OCTAVIA_AMP_DISABLE_TMP_FS)" == "True" ]]; then + export PARAM_OCTAVIA_AMP_DISABLE_TMP_FS='-f' + fi + if [[ "$(trueorfalse False OCTAVIA_AMP_ENABLE_FIPS)" == "True" ]]; then + export PARAM_OCTAVIA_AMP_ENABLE_FIPS='-y' + fi + + # Use the infra pypi mirror if it is available + if [[ -e /etc/ci/mirror_info.sh ]]; then + source /etc/ci/mirror_info.sh + fi + if [[ ${NODEPOOL_PYPI_MIRROR:+1} ]]; then + if [[ ${DIB_LOCAL_ELEMENTS:+1} ]]; then + export DIB_LOCAL_ELEMENTS="${DIB_LOCAL_ELEMENTS} pypi" + else + export DIB_LOCAL_ELEMENTS='pypi' + fi + export DIB_PYPI_MIRROR_URL=$NODEPOOL_PYPI_MIRROR + export DIB_PYPI_MIRROR_URL_1=$NODEPOOL_WHEEL_MIRROR + export DIB_PIP_RETRIES=0 + fi + + if ! [ -f $OCTAVIA_AMP_IMAGE_FILE ]; then + local dib_logs=/var/log/dib-build + if [[ -e ${dib_logs} ]]; then + sudo rm -rf ${dib_logs} + fi + sudo mkdir -m755 ${dib_logs} + sudo chown $STACK_USER ${dib_logs} + # Workaround for rockylinux images + export DIB_CONTAINERFILE_RUNTIME_ROOT=1 + $OCTAVIA_DIR/diskimage-create/diskimage-create.sh -l ${dib_logs}/$(basename $OCTAVIA_AMP_IMAGE_FILE).log $octavia_dib_tracing_arg -o $OCTAVIA_AMP_IMAGE_FILE ${PARAM_OCTAVIA_AMP_BASE_OS:-} ${PARAM_OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID:-} ${PARAM_OCTAVIA_AMP_IMAGE_SIZE:-} ${PARAM_OCTAVIA_AMP_IMAGE_ARCH:-} ${PARAM_OCTAVIA_AMP_DISABLE_TMP_FS:-} ${PARAM_OCTAVIA_AMP_ENABLE_FIPS:-} + fi + + if ! [ -f $OCTAVIA_AMP_IMAGE_FILE ]; then + echo "Diskimage-builder failed to create the amphora image. Aborting." + exit 1 + fi + + upload_image file://${OCTAVIA_AMP_IMAGE_FILE} $TOKEN +} + +function _configure_octavia_apache_uwsgi { + write_uwsgi_config "$OCTAVIA_UWSGI_CONF" "$OCTAVIA_UWSGI_APP" "/$OCTAVIA_SERVICE_TYPE" "" "octavia-wsgi" +} + +function _cleanup_octavia_apache_wsgi { + remove_uwsgi_config "$OCTAVIA_UWSGI_CONF" "$OCTAVIA_UWSGI_APP" + restart_apache_server +} + +function _start_octavia_apache_wsgi { + run_process o-api "$(which uwsgi) --ini $OCTAVIA_UWSGI_CONF" + enable_apache_site octavia-wsgi + restart_apache_server +} + +function _stop_octavia_apache_wsgi { + disable_apache_site octavia-wsgi + stop_process o-api + restart_apache_server +} + +function create_octavia_accounts { + create_service_user $OCTAVIA + + # Increase the octavia account secgroups quota + # This is imporant for concurrent tempest testing + openstack quota set --secgroups 100 $OCTAVIA_PROJECT_NAME + openstack quota set --secgroup-rules 1000 $OCTAVIA_PROJECT_NAME + + octavia_service=$(get_or_create_service "octavia" \ + $OCTAVIA_SERVICE_TYPE "Octavia Load Balancing Service") + + if [[ "$OCTAVIA_NODE" == "main" ]] ; then + get_or_create_endpoint $octavia_service \ + "$REGION_NAME" \ + "$OCTAVIA_PROTOCOL://$SERVICE_HOST:$OCTAVIA_PORT/$OCTAVIA_SERVICE_TYPE" + else + get_or_create_endpoint $octavia_service \ + "$REGION_NAME" \ + "$OCTAVIA_PROTOCOL://$SERVICE_HOST/$OCTAVIA_SERVICE_TYPE" + fi +} + +function install_redis { + if is_fedora; then + install_package redis + elif is_ubuntu; then + install_package redis-server + elif is_suse; then + install_package redis + else + exit_distro_not_supported "redis installation" + fi + + start_service redis + redis-cli del octavia_jobboard:listings +} + +function stop_redis { + stop_service redis || true +} + +function uninstall_redis { + if is_fedora; then + uninstall_package redis + elif is_ubuntu; then + uninstall_package redis-server + elif is_suse; then + uninstall_package redis + fi +} + +function octavia_configure { + + sudo mkdir -m 755 -p $OCTAVIA_CONF_DIR + safe_chown $STACK_USER $OCTAVIA_CONF_DIR + + sudo mkdir -m 700 -p $OCTAVIA_RUN_DIR + safe_chown $STACK_USER $OCTAVIA_RUN_DIR + + if ! [ -e $OCTAVIA_AUDIT_MAP ] ; then + cp $OCTAVIA_DIR/etc/audit/octavia_api_audit_map.conf.sample $OCTAVIA_AUDIT_MAP + fi + + # Use devstack logging configuration + setup_logging $OCTAVIA_CONF + iniset $OCTAVIA_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + + # Change bind host + iniset $OCTAVIA_CONF api_settings bind_host $(ipv6_unquote $SERVICE_HOST) + iniset $OCTAVIA_CONF api_settings api_handler queue_producer + + iniset $OCTAVIA_CONF database connection "mysql+pymysql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_HOST}:3306/octavia" + if [[ ${OCTAVIA_ENABLE_AMPHORAV2_JOBBOARD} == True ]]; then + iniset $OCTAVIA_CONF task_flow persistence_connection "mysql+pymysql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_HOST}:3306/octavia_persistence" + iniset $OCTAVIA_CONF task_flow jobboard_expiration_time ${OCTAVIA_JOBBOARD_EXPIRATION_TIME} + iniset $OCTAVIA_CONF task_flow jobboard_enabled True + if [[ ${OCTAVIA_JOBBOARD_BACKEND} == "etcd" ]]; then + iniset $OCTAVIA_CONF task_flow jobboard_backend_driver etcd_taskflow_driver + iniset $OCTAVIA_CONF task_flow jobboard_backend_port 2379 + iniset $OCTAVIA_CONF task_flow jobboard_backend_hosts ${SERVICE_HOST} + fi + fi + # Configure keystone auth_token for all users + configure_keystone_authtoken_middleware $OCTAVIA_CONF octavia + + # Ensure config is set up properly for authentication as admin + iniset $OCTAVIA_CONF service_auth auth_url $KEYSTONE_SERVICE_URI + iniset $OCTAVIA_CONF service_auth auth_type password + iniset $OCTAVIA_CONF service_auth username $OCTAVIA_USERNAME + iniset $OCTAVIA_CONF service_auth password $OCTAVIA_PASSWORD + iniset $OCTAVIA_CONF service_auth user_domain_name $OCTAVIA_USER_DOMAIN_NAME + iniset $OCTAVIA_CONF service_auth project_name $OCTAVIA_PROJECT_NAME + iniset $OCTAVIA_CONF service_auth project_domain_name $OCTAVIA_PROJECT_DOMAIN_NAME + iniset $OCTAVIA_CONF service_auth cafile $SSL_BUNDLE_FILE + iniset $OCTAVIA_CONF service_auth memcached_servers $SERVICE_HOST:11211 + + # neutron + iniset $OCTAVIA_CONF neutron auth_url $KEYSTONE_SERVICE_URI + iniset $OCTAVIA_CONF neutron auth_type password + iniset $OCTAVIA_CONF neutron username $OCTAVIA_USERNAME + iniset $OCTAVIA_CONF neutron password $OCTAVIA_PASSWORD + iniset $OCTAVIA_CONF neutron user_domain_name $OCTAVIA_USER_DOMAIN_NAME + iniset $OCTAVIA_CONF neutron project_name $OCTAVIA_PROJECT_NAME + iniset $OCTAVIA_CONF neutron project_domain_name $OCTAVIA_PROJECT_DOMAIN_NAME + iniset $OCTAVIA_CONF neutron cafile $SSL_BUNDLE_FILE + + # Setting other required default options + iniset $OCTAVIA_CONF controller_worker amphora_driver ${OCTAVIA_AMPHORA_DRIVER} + iniset $OCTAVIA_CONF controller_worker compute_driver ${OCTAVIA_COMPUTE_DRIVER} + iniset $OCTAVIA_CONF controller_worker volume_driver ${OCTAVIA_VOLUME_DRIVER} + iniset $OCTAVIA_CONF controller_worker network_driver ${OCTAVIA_NETWORK_DRIVER} + iniset $OCTAVIA_CONF controller_worker image_driver ${OCTAVIA_IMAGE_DRIVER} + iniset $OCTAVIA_CONF controller_worker amp_image_tag ${OCTAVIA_AMP_IMAGE_TAG} + iniset $OCTAVIA_CONF controller_worker amp_timezone $(readlink -e /etc/localtime | sed "s/\/usr\/share\/zoneinfo\///") + + iniuncomment $OCTAVIA_CONF health_manager heartbeat_key + iniset $OCTAVIA_CONF health_manager heartbeat_key ${OCTAVIA_HEALTH_KEY} + + iniset $OCTAVIA_CONF house_keeping amphora_expiry_age ${OCTAVIA_AMP_EXPIRY_AGE} + iniset $OCTAVIA_CONF house_keeping load_balancer_expiry_age ${OCTAVIA_LB_EXPIRY_AGE} + + iniset_rpc_backend octavia $OCTAVIA_CONF + + iniset $OCTAVIA_CONF oslo_messaging rpc_thread_pool_size 2 + iniset $OCTAVIA_CONF oslo_messaging topic octavia_prov + + # Uncomment other default options + iniuncomment $OCTAVIA_CONF haproxy_amphora base_path + iniuncomment $OCTAVIA_CONF haproxy_amphora base_cert_dir + iniuncomment $OCTAVIA_CONF haproxy_amphora connection_max_retries + iniuncomment $OCTAVIA_CONF haproxy_amphora connection_retry_interval + iniuncomment $OCTAVIA_CONF haproxy_amphora rest_request_conn_timeout + iniuncomment $OCTAVIA_CONF haproxy_amphora rest_request_read_timeout + iniuncomment $OCTAVIA_CONF controller_worker amp_active_retries + iniuncomment $OCTAVIA_CONF controller_worker amp_active_wait_sec + iniuncomment $OCTAVIA_CONF controller_worker workers + iniuncomment $OCTAVIA_CONF controller_worker loadbalancer_topology + + iniset $OCTAVIA_CONF controller_worker loadbalancer_topology ${OCTAVIA_LB_TOPOLOGY} + + # devstack optimizations for tempest runs + iniset $OCTAVIA_CONF haproxy_amphora connection_max_retries 1500 + iniset $OCTAVIA_CONF haproxy_amphora connection_retry_interval 1 + iniset $OCTAVIA_CONF haproxy_amphora rest_request_conn_timeout ${OCTAVIA_AMP_CONN_TIMEOUT} + iniset $OCTAVIA_CONF haproxy_amphora rest_request_read_timeout ${OCTAVIA_AMP_READ_TIMEOUT} + iniset $OCTAVIA_CONF controller_worker amp_active_retries 100 + iniset $OCTAVIA_CONF controller_worker amp_active_wait_sec 2 + iniset $OCTAVIA_CONF controller_worker workers 2 + + if [[ -a $OCTAVIA_SSH_DIR ]] ; then + rm -rf $OCTAVIA_SSH_DIR + fi + + mkdir -m755 $OCTAVIA_SSH_DIR + + if [[ "$(trueorfalse False OCTAVIA_USE_PREGENERATED_SSH_KEY)" == "True" ]]; then + cp -fp ${OCTAVIA_PREGENERATED_SSH_KEY_PATH} ${OCTAVIA_AMP_SSH_KEY_PATH} + cp -fp ${OCTAVIA_PREGENERATED_SSH_KEY_PATH}.pub ${OCTAVIA_AMP_SSH_KEY_PATH}.pub + chmod 0600 ${OCTAVIA_AMP_SSH_KEY_PATH} + else + ssh-keygen -b $OCTAVIA_AMP_SSH_KEY_BITS -t $OCTAVIA_AMP_SSH_KEY_TYPE -N "" -f ${OCTAVIA_AMP_SSH_KEY_PATH} + fi + iniset $OCTAVIA_CONF controller_worker amp_ssh_key_name ${OCTAVIA_AMP_SSH_KEY_NAME} + + if [ $OCTAVIA_NODE == 'main' ] || [ $OCTAVIA_NODE == 'standalone' ] || [ $OCTAVIA_NODE == 'api' ]; then + recreate_database_mysql octavia + octavia-db-manage upgrade head + + if [[ ${OCTAVIA_ENABLE_AMPHORAV2_JOBBOARD} == True ]]; then + recreate_database_mysql octavia_persistence + octavia-db-manage upgrade_persistence + fi + fi + + if [[ -a $OCTAVIA_CERTS_DIR ]] ; then + rm -rf $OCTAVIA_CERTS_DIR + fi + + # amphorav2 required redis installation + if [[ ${OCTAVIA_ENABLE_AMPHORAV2_JOBBOARD} == True ]]; then + install_redis + fi + + if [[ "$(trueorfalse False OCTAVIA_USE_PREGENERATED_CERTS)" == "True" ]]; then + cp -rfp ${OCTAVIA_PREGENERATED_CERTS_DIR} ${OCTAVIA_CERTS_DIR} + else + pushd $OCTAVIA_DIR/bin + source create_dual_intermediate_CA.sh + mkdir -p ${OCTAVIA_CERTS_DIR}/private + chmod 700 ${OCTAVIA_CERTS_DIR}/private + cp -p etc/octavia/certs/server_ca.cert.pem ${OCTAVIA_CERTS_DIR}/ + cp -p etc/octavia/certs/server_ca-chain.cert.pem ${OCTAVIA_CERTS_DIR}/ + cp -p etc/octavia/certs/server_ca.key.pem ${OCTAVIA_CERTS_DIR}/private/ + cp -p etc/octavia/certs/client_ca.cert.pem ${OCTAVIA_CERTS_DIR}/ + cp -p etc/octavia/certs/client.cert-and-key.pem ${OCTAVIA_CERTS_DIR}/private/ + popd + fi + + iniset $OCTAVIA_CONF certificates ca_certificate ${OCTAVIA_CERTS_DIR}/server_ca.cert.pem + iniset $OCTAVIA_CONF certificates ca_private_key ${OCTAVIA_CERTS_DIR}/private/server_ca.key.pem + iniset $OCTAVIA_CONF certificates ca_private_key_passphrase not-secure-passphrase + iniset $OCTAVIA_CONF controller_worker client_ca ${OCTAVIA_CERTS_DIR}/client_ca.cert.pem + iniset $OCTAVIA_CONF haproxy_amphora client_cert ${OCTAVIA_CERTS_DIR}/private/client.cert-and-key.pem + iniset $OCTAVIA_CONF haproxy_amphora server_ca ${OCTAVIA_CERTS_DIR}/server_ca-chain.cert.pem + + # Controller side symmetric encryption, not used for PKI + iniset $OCTAVIA_CONF certificates server_certs_key_passphrase insecure-key-do-not-use-this-key + + if [[ "$OCTAVIA_USE_ADVANCED_RBAC" == "True" ]]; then + cp $OCTAVIA_DIR/etc/policy/octavia-advanced-rbac-policy.yaml $OCTAVIA_CONF_DIR/policy.yaml + iniset $OCTAVIA_CONF oslo_policy policy_file $OCTAVIA_CONF_DIR/policy.yaml + fi + if [[ "$OCTAVIA_USE_LEGACY_RBAC" == "True" ]]; then + cp $OCTAVIA_DIR/etc/policy/admin_or_owner-policy.yaml $OCTAVIA_CONF_DIR/policy.yaml + iniset $OCTAVIA_CONF oslo_policy policy_file $OCTAVIA_CONF_DIR/policy.yaml + fi + if [[ "$OCTAVIA_USE_KEYSTONE_DEFAULT_ROLES" == "True" ]]; then + cp $OCTAVIA_DIR/etc/policy/keystone_default_roles-policy.yaml $OCTAVIA_CONF_DIR/policy.yaml + iniset $OCTAVIA_CONF oslo_policy policy_file $OCTAVIA_CONF_DIR/policy.yaml + fi + + _configure_octavia_apache_uwsgi + + if [ $OCTAVIA_NODE == 'main' ]; then + configure_octavia_api_haproxy + # make sure octavia is reachable from haproxy + iniset $OCTAVIA_CONF api_settings bind_port ${OCTAVIA_HA_PORT} + iniset $OCTAVIA_CONF api_settings bind_host 0.0.0.0 + fi + if [ $OCTAVIA_NODE != 'main' ] && [ $OCTAVIA_NODE != 'standalone' ] ; then + # make sure octavia is reachable from haproxy from main node + iniset $OCTAVIA_CONF api_settings bind_port ${OCTAVIA_HA_PORT} + iniset $OCTAVIA_CONF api_settings bind_host 0.0.0.0 + fi + + # set default graceful_shutdown_timeout to 300 sec (5 minutes) + # TODO(gthiemonge) update this value after persistant taskflow commits are + # merged + iniset $OCTAVIA_CONF DEFAULT graceful_shutdown_timeout 300 +} + +function create_mgmt_network_interface { + if [ $OCTAVIA_MGMT_PORT_IP != 'auto' ]; then + SUBNET_ID=$(openstack subnet show lb-mgmt-subnet -f value -c id) + PORT_FIXED_IP="--fixed-ip subnet=$SUBNET_ID,ip-address=$OCTAVIA_MGMT_PORT_IP" + fi + + MGMT_PORT_ID=$(openstack port create --security-group lb-health-mgr-sec-grp --device-owner Octavia:health-mgr --host=$(hostname) -c id -f value --network lb-mgmt-net $PORT_FIXED_IP octavia-health-manager-$OCTAVIA_NODE-listen-port) + MGMT_PORT_MAC=$(openstack port show -c mac_address -f value $MGMT_PORT_ID) + + MGMT_PORT_IP=$(openstack port show -f yaml -c fixed_ips $MGMT_PORT_ID | awk -v IP_VER=$SERVICE_IP_VERSION '{FS=",|";gsub(",","");gsub("'\''","");for(line = 1; line <= NF; ++line) {if ($line ~ /^.*- ip_address:/) {split($line, word, " ");if ((IP_VER == "4" || IP_VER == "") && word[3] ~ /\./) print word[3];if (IP_VER == "6" && word[3] ~ /:/) print word[3];} else {split($line, word, " ");for(ind in word) {if (word[ind] ~ /^ip_address=/) {split(word[ind], token, "=");if ((IP_VER == "4" || IP_VER == "") && token[2] ~ /\./) print token[2];if (IP_VER == "6" && token[2] ~ /:/) print token[2];}}}}}') + + if function_exists octavia_create_network_interface_device ; then + octavia_create_network_interface_device o-hm0 $MGMT_PORT_ID $MGMT_PORT_MAC + else + die "Unknown network controller. Please define octavia_create_network_interface_device" + fi + sudo ip link set dev o-hm0 address $MGMT_PORT_MAC + + function _get_firewall () { + # The devstack CI forces the use of iptables, the openstack-INPUT table + # can be used to indicate it. + if sudo iptables -L -n -v | grep openstack-INPUT; then + echo "iptables" + elif [[ -x $(which nft 2> /dev/null) ]]; then + echo "nft" + else + echo "iptables" + fi + } + + # Check if the host is using nftables, an alternative to iptables + if [[ $(_get_firewall) == "nft" ]]; then + sudo nft add table inet octavia + sudo nft add chain inet octavia o-hm0-incoming { type filter hook input priority 0\;} + sudo nft flush chain inet octavia o-hm0-incoming + # Note: Order is important here and using counter here as this is + # devstack for testing. + sudo nft insert rule inet octavia o-hm0-incoming iifname "o-hm0" counter log drop + sudo nft insert rule inet octavia o-hm0-incoming iifname "o-hm0" meta l4proto ipv6-icmp counter accept + sudo nft insert rule inet octavia o-hm0-incoming iifname "o-hm0" udp dport $OCTAVIA_HM_LISTEN_PORT counter accept + sudo nft insert rule inet octavia o-hm0-incoming iifname "o-hm0" udp dport $OCTAVIA_AMP_LOG_ADMIN_PORT counter accept + sudo nft insert rule inet octavia o-hm0-incoming iifname "o-hm0" udp dport $OCTAVIA_AMP_LOG_TENANT_PORT counter accept + sudo nft insert rule inet octavia o-hm0-incoming iifname "o-hm0" ct state related,established accept + else + if [ $SERVICE_IP_VERSION == '6' ] ; then + # Allow the required IPv6 ICMP messages + sudo ip6tables -I INPUT -i o-hm0 -p ipv6-icmp -j ACCEPT + sudo ip6tables -I INPUT -i o-hm0 -p udp --dport $OCTAVIA_HM_LISTEN_PORT -j ACCEPT + sudo ip6tables -I INPUT -i o-hm0 -p udp --dport $OCTAVIA_AMP_LOG_ADMIN_PORT -j ACCEPT + sudo ip6tables -I INPUT -i o-hm0 -p udp --dport $OCTAVIA_AMP_LOG_TENANT_PORT -j ACCEPT + else + sudo iptables -I INPUT -i o-hm0 -p udp --dport $OCTAVIA_HM_LISTEN_PORT -j ACCEPT + sudo iptables -I INPUT -i o-hm0 -p udp --dport $OCTAVIA_AMP_LOG_ADMIN_PORT -j ACCEPT + sudo iptables -I INPUT -i o-hm0 -p udp --dport $OCTAVIA_AMP_LOG_TENANT_PORT -j ACCEPT + fi + fi + + + if [ $OCTAVIA_CONTROLLER_IP_PORT_LIST == 'auto' ] ; then + iniset $OCTAVIA_CONF health_manager controller_ip_port_list $MGMT_PORT_IP:$OCTAVIA_HM_LISTEN_PORT + else + iniset $OCTAVIA_CONF health_manager controller_ip_port_list $OCTAVIA_CONTROLLER_IP_PORT_LIST + fi + + iniset $OCTAVIA_CONF health_manager bind_ip $MGMT_PORT_IP + iniset $OCTAVIA_CONF health_manager bind_port $OCTAVIA_HM_LISTEN_PORT + + iniset $OCTAVIA_CONF amphora_agent admin_log_targets "${MGMT_PORT_IP}:${OCTAVIA_AMP_LOG_ADMIN_PORT}" + iniset $OCTAVIA_CONF amphora_agent tenant_log_targets "${MGMT_PORT_IP}:${OCTAVIA_AMP_LOG_TENANT_PORT}" + # Setting these here as the devstack rsyslog configuration expects + # these values. + iniset $OCTAVIA_CONF amphora_agent user_log_facility 0 + iniset $OCTAVIA_CONF amphora_agent administrative_log_facility 1 + +} + +function build_mgmt_network { + # Create network and attach a subnet + openstack network create lb-mgmt-net + if [ $SERVICE_IP_VERSION == '6' ] ; then + openstack subnet create --subnet-range $OCTAVIA_MGMT_SUBNET_IPV6 --allocation-pool start=$OCTAVIA_MGMT_SUBNET_IPV6_START,end=$OCTAVIA_MGMT_SUBNET_IPV6_END --network lb-mgmt-net --ip-version 6 --gateway none --no-dhcp lb-mgmt-subnet + else + openstack subnet create --subnet-range $OCTAVIA_MGMT_SUBNET --allocation-pool start=$OCTAVIA_MGMT_SUBNET_START,end=$OCTAVIA_MGMT_SUBNET_END --network lb-mgmt-net --gateway none --no-dhcp lb-mgmt-subnet + fi + + # Create security group and rules + # Used for the amphora lb-mgmt-net ports + openstack security group create lb-mgmt-sec-grp + if [ $SERVICE_IP_VERSION == '6' ] ; then + openstack security group rule create --protocol ipv6-icmp --ethertype IPv6 --remote-ip ::/0 lb-mgmt-sec-grp + openstack security group rule create --protocol tcp --dst-port 22 --ethertype IPv6 --remote-ip ::/0 lb-mgmt-sec-grp + openstack security group rule create --protocol tcp --dst-port 9443 --ethertype IPv6 --remote-ip ::/0 lb-mgmt-sec-grp + else + openstack security group rule create --protocol icmp lb-mgmt-sec-grp + openstack security group rule create --protocol tcp --dst-port 22 lb-mgmt-sec-grp + openstack security group rule create --protocol tcp --dst-port 9443 lb-mgmt-sec-grp + fi + + # Create security group and rules + # Used for the health manager port + openstack security group create lb-health-mgr-sec-grp + if [ $SERVICE_IP_VERSION == '6' ] ; then + openstack security group rule create --protocol ipv6-icmp --ethertype IPv6 --remote-ip ::/0 lb-health-mgr-sec-grp + openstack security group rule create --protocol udp --dst-port $OCTAVIA_HM_LISTEN_PORT --ethertype IPv6 --remote-ip ::/0 lb-health-mgr-sec-grp + openstack security group rule create --protocol udp --dst-port $OCTAVIA_AMP_LOG_ADMIN_PORT --ethertype IPv6 --remote-ip ::/0 lb-health-mgr-sec-grp + openstack security group rule create --protocol udp --dst-port $OCTAVIA_AMP_LOG_TENANT_PORT --ethertype IPv6 --remote-ip ::/0 lb-health-mgr-sec-grp + else + openstack security group rule create --protocol udp --dst-port $OCTAVIA_HM_LISTEN_PORT lb-health-mgr-sec-grp + openstack security group rule create --protocol udp --dst-port $OCTAVIA_AMP_LOG_ADMIN_PORT lb-health-mgr-sec-grp + openstack security group rule create --protocol udp --dst-port $OCTAVIA_AMP_LOG_TENANT_PORT lb-health-mgr-sec-grp + fi +} + +function configure_lb_mgmt_sec_grp { + OCTAVIA_MGMT_SEC_GRP_ID=$(openstack security group show lb-mgmt-sec-grp -f value -c id) + iniset ${OCTAVIA_CONF} controller_worker amp_secgroup_list ${OCTAVIA_MGMT_SEC_GRP_ID} +} + +function create_amphora_flavor { + disk_size=${OCTAVIA_AMP_IMAGE_SIZE:-2} + if [[ "$OCTAVIA_AMP_IMAGE_ARCH" =~ (aarch64|arm64) ]]; then + # DIB produces images larger than size specified, add another GB to the flavor disk + # See https://bugs.launchpad.net/diskimage-builder/+bug/1918461 + disk_size=$((disk_size + 1)) + fi + # Pass even if it exists to avoid race condition on multinode + openstack flavor create --ram 1024 --disk $disk_size --vcpus 1 --private m1.amphora -f value -c id --property hw_rng:allowed=True || true + amp_flavor_id=$(openstack flavor show m1.amphora -f value -c id) + iniset $OCTAVIA_CONF controller_worker amp_flavor_id $amp_flavor_id +} + +function configure_octavia_api_haproxy { + + install_package haproxy + + cp ${OCTAVIA_DIR}/devstack/etc/octavia/haproxy.cfg ${OCTAVIA_CONF_DIR}/haproxy.cfg + + sed -i.bak "s/OCTAVIA_PORT/${OCTAVIA_PORT}/" ${OCTAVIA_CONF_DIR}/haproxy.cfg + + NODES=(${OCTAVIA_NODES//,/ }) + + for NODE in ${NODES[@]}; do + DATA=(${NODE//:/ }) + NAME=$(echo -e "${DATA[0]}" | tr -d '[[:space:]]') + IP=$(echo -e "${DATA[1]}" | tr -d '[[:space:]]') + echo " server octavia-${NAME} ${IP}:80 weight 1" >> ${OCTAVIA_CONF_DIR}/haproxy.cfg + done + +} + +function configure_rsyslog { + sudo mkdir -pm 775 /var/log/octavia + sudo chgrp syslog /var/log/octavia + + sudo cp ${OCTAVIA_DIR}/devstack/etc/rsyslog/10-octavia-log-offloading.conf /etc/rsyslog.d/ + sudo sed -e " + s|%ADMIN_PORT%|${OCTAVIA_AMP_LOG_ADMIN_PORT}|g; + s|%TENANT_PORT%|${OCTAVIA_AMP_LOG_TENANT_PORT}|g; + " -i /etc/rsyslog.d/10-octavia-log-offloading.conf + + # Temporary backward compatibility symbolic link. + # Remove in the next "I" cycle + sudo touch /var/log/octavia/octavia-tenant-traffic.log + sudo chmod 664 /var/log/octavia/octavia-tenant-traffic.log + sudo chgrp syslog /var/log/octavia/octavia-tenant-traffic.log + sudo ln -fs /var/log/octavia/octavia-tenant-traffic.log /var/log/octavia-tenant-traffic.log + + sudo touch /var/log/octavia/octavia-amphora.log + sudo chmod 664 /var/log/octavia/octavia-amphora.log + sudo chgrp syslog /var/log/octavia/octavia-amphora.log + sudo ln -fs /var/log/octavia/octavia-amphora.log /var/log/octavia-amphora.log +} + +function octavia_start { + # octavia-specific start actions + if [ $OCTAVIA_NODE != 'api' ] ; then + # This is probably out of scope here? Load it from config + MGMT_PORT_IP=$(iniget $OCTAVIA_CONF health_manager bind_ip) + + if [ $SERVICE_IP_VERSION == '6' ] ; then + MGMT_SUBNET_ARRAY=(${OCTAVIA_MGMT_SUBNET_IPV6//// }) + else + MGMT_SUBNET_ARRAY=(${OCTAVIA_MGMT_SUBNET//// }) + fi + MGMT_SUBNET_MASK=${MGMT_SUBNET_ARRAY[1]} + + sudo ip addr add $MGMT_PORT_IP/$MGMT_SUBNET_MASK dev o-hm0 + sudo ip link set o-hm0 up + fi + + if [ $OCTAVIA_NODE == 'main' ]; then + run_process $OCTAVIA_API_HAPROXY "/usr/sbin/haproxy -db -V -f ${OCTAVIA_CONF_DIR}/haproxy.cfg" + fi + + _start_octavia_apache_wsgi + + run_process $OCTAVIA_DRIVER_AGENT "$OCTAVIA_DRIVER_AGENT_BINARY $OCTAVIA_DRIVER_AGENT_ARGS" + run_process $OCTAVIA_CONSUMER "$OCTAVIA_CONSUMER_BINARY $OCTAVIA_CONSUMER_ARGS" + run_process $OCTAVIA_HOUSEKEEPER "$OCTAVIA_HOUSEKEEPER_BINARY $OCTAVIA_HOUSEKEEPER_ARGS" + run_process $OCTAVIA_HEALTHMANAGER "$OCTAVIA_HEALTHMANAGER_BINARY $OCTAVIA_HEALTHMANAGER_ARGS" + + restart_service rsyslog +} + +function octavia_stop { + # octavia-specific stop actions + _stop_octavia_apache_wsgi + + stop_process $OCTAVIA_DRIVER_AGENT + stop_process $OCTAVIA_CONSUMER + stop_process $OCTAVIA_HOUSEKEEPER + stop_process $OCTAVIA_HEALTHMANAGER + + # TODO(johnsom) Remove this in 2025.2 release so upgrades will stop this + # process. + # Kill dhclient process started for o-hm0 interface + pids=$(ps aux | awk '/[o]-hm0/ { print $2 }') + [ ! -z "$pids" ] && sudo kill $pids + + if function_exists octavia_delete_network_interface_device ; then + octavia_delete_network_interface_device o-hm0 + fi + + # Grenade upgrades need the IP address removed here + MGMT_PORT_IP=$(iniget $OCTAVIA_CONF health_manager bind_ip) + if [ $SERVICE_IP_VERSION == '6' ] ; then + MGMT_SUBNET_ARRAY=(${OCTAVIA_MGMT_SUBNET_IPV6//// }) + else + MGMT_SUBNET_ARRAY=(${OCTAVIA_MGMT_SUBNET//// }) + fi + MGMT_SUBNET_MASK=${MGMT_SUBNET_ARRAY[1]} + sudo ip addr del $MGMT_PORT_IP/$MGMT_SUBNET_MASK dev o-hm0 + + if [[ $NEUTRON_AGENT == "linuxbridge" || $Q_AGENT == "linuxbridge" ]]; then + # This elif can go away in the X cycle, needed for grenade old/new logic + if ip link show o-hm0 ; then + sudo ip link del o-hm0 + fi + fi + + if [[ ${OCTAVIA_ENABLE_AMPHORAV2_JOBBOARD} == True ]]; then + stop_redis + fi +} + +function octavia_cleanup { + + if [ ${OCTAVIA_AMP_IMAGE_NAME}x != x ] ; then + rm -rf ${OCTAVIA_AMP_IMAGE_NAME}* + fi + if [ ${OCTAVIA_AMP_SSH_KEY_NAME}x != x ] ; then + rm -f ${OCTAVIA_AMP_SSH_KEY_NAME}* + fi + if [ ${OCTAVIA_SSH_DIR}x != x ] ; then + rm -rf ${OCTAVIA_SSH_DIR} + fi + if [ ${OCTAVIA_CONF_DIR}x != x ] ; then + sudo rm -rf ${OCTAVIA_CONF_DIR} + fi + if [ ${OCTAVIA_RUN_DIR}x != x ] ; then + sudo rm -rf ${OCTAVIA_RUN_DIR} + fi + if [ ${OCTAVIA_AMP_SSH_KEY_PATH}x != x ] ; then + rm -f ${OCTAVIA_AMP_SSH_KEY_PATH} ${OCTAVIA_AMP_SSH_KEY_PATH}.pub + fi + if [ $OCTAVIA_NODE == 'main' ] || [ $OCTAVIA_NODE == 'standalone' ] ; then + if [ ${OCTAVIA_AMP_SSH_KEY_NAME}x != x ] ; then + openstack keypair delete ${OCTAVIA_AMP_SSH_KEY_NAME} || true + fi + fi + + _cleanup_octavia_apache_wsgi + + sudo rm -rf $OCTAVIA_DIR/bin/dual_ca + sudo rm -rf $OCTAVIA_DIR/bin/single_ca + + sudo rm -rf $NOVA_STATE_PATH $NOVA_AUTH_CACHE_DIR + + if [[ ${OCTAVIA_ENABLE_AMPHORAV2_JOBBOARD} == True ]]; then + uninstall_redis + fi + + sudo rm -f /etc/rsyslog.d/10-octavia-log-offloading.conf + restart_service rsyslog + + # Remove compatibility symbolic links + sudo rm -f /var/log/octavia-tenant-traffic.log + sudo rm -f /var/log/octavia-amphora.log +} + +function add_load-balancer_roles { + get_or_create_role load-balancer_observer + get_or_create_role load-balancer_global_observer + get_or_create_role load-balancer_member + get_or_create_role load-balancer_admin + get_or_create_role load-balancer_quota_admin + get_or_add_user_project_role load-balancer_member demo demo +} + +function octavia_init { + if [ $OCTAVIA_NODE != 'main' ] && [ $OCTAVIA_NODE != 'standalone' ] && [ $OCTAVIA_NODE != 'api' ]; then + # without the other services enabled apparently we don't have + # credentials at this point +# TOP_DIR=$(cd $(dirname "$0") && pwd) + source ${TOP_DIR}/openrc admin admin + OCTAVIA_AMP_NETWORK_ID=$(openstack network show lb-mgmt-net -f value -c id) + iniset $OCTAVIA_CONF controller_worker amp_boot_network_list ${OCTAVIA_AMP_NETWORK_ID} + fi + + if [ $OCTAVIA_NODE == 'main' ] || [ $OCTAVIA_NODE == 'standalone' ] ; then + # things that should only happen on the ha main node / or once + if ! openstack keypair show ${OCTAVIA_AMP_SSH_KEY_NAME} ; then + openstack keypair create --public-key ${OCTAVIA_AMP_SSH_KEY_PATH}.pub ${OCTAVIA_AMP_SSH_KEY_NAME} + fi + + # Check if an amphora image is already loaded + AMPHORA_IMAGE_NAME=$(openstack image list --property name=${OCTAVIA_AMP_IMAGE_NAME} -f value -c Name) + export AMPHORA_IMAGE_NAME + + if [ "$AMPHORA_IMAGE_NAME" == ${OCTAVIA_AMP_IMAGE_NAME} ]; then + echo "Found existing amphora image: $AMPHORA_IMAGE_NAME" + echo "Skipping amphora image build" + export DISABLE_AMP_IMAGE_BUILD=True + fi + + if ! [ "$DISABLE_AMP_IMAGE_BUILD" == 'True' ]; then + build_octavia_worker_image + fi + + OCTAVIA_AMP_IMAGE_ID=$(openstack image list -f value --property name=${OCTAVIA_AMP_IMAGE_NAME} -c ID) + + if [ -n "$OCTAVIA_AMP_IMAGE_ID" ]; then + # Normalize architecture + # https://docs.openstack.org/nova/latest/configuration/config.html#filter_scheduler.image_properties_default_architecture + hw_arch=${OCTAVIA_AMP_IMAGE_ARCH:-x86_64} + if [[ "$OCTAVIA_AMP_IMAGE_ARCH" == "amd64" ]]; then + hw_arch="x86_64" + elif [[ "$OCTAVIA_AMP_IMAGE_ARCH" == "arm64" ]]; then + hw_arch="aarch64" + fi + openstack image set --tag ${OCTAVIA_AMP_IMAGE_TAG} --property hw_architecture=${hw_arch} --property hw_rng_model=virtio ${OCTAVIA_AMP_IMAGE_ID} + fi + + # Create a management network. + build_mgmt_network + OCTAVIA_AMP_NETWORK_ID=$(openstack network show lb-mgmt-net -f value -c id) + iniset $OCTAVIA_CONF controller_worker amp_boot_network_list ${OCTAVIA_AMP_NETWORK_ID} + + create_octavia_accounts + + add_load-balancer_roles + elif [ $OCTAVIA_NODE == 'api' ] ; then + create_octavia_accounts + + add_load-balancer_roles + fi + + if [ $OCTAVIA_NODE != 'api' ] ; then + create_mgmt_network_interface + create_amphora_flavor + configure_lb_mgmt_sec_grp + configure_rsyslog + fi + + if ! [ "$DISABLE_AMP_IMAGE_BUILD" == 'True' ]; then + set_octavia_worker_image_owner_id + fi +} + +function _configure_tempest { + iniset $TEMPEST_CONFIG service_available octavia "True" +} + +# check for service enabled +if is_service_enabled $OCTAVIA; then + if [ $OCTAVIA_NODE == 'main' ] || [ $OCTAVIA_NODE == 'standalone' ] ; then # main-ha node stuff only + if ! is_service_enabled $NEUTRON_ANY; then + die "The neutron-api/q-svc service must be enabled to use $OCTAVIA" + fi + + if [ "$DISABLE_AMP_IMAGE_BUILD" == 'True' ]; then + echo "Found DISABLE_AMP_IMAGE_BUILD == True" + echo "Skipping amphora image build" + fi + + fi + + if [[ "$1" == "stack" && "$2" == "install" ]]; then + # Perform installation of service source + echo_summary "Installing octavia" + octavia_lib_install + octavia_install + octaviaclient_install + + elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then + # Configure after the other layer 1 and 2 services have been configured + # TODO: need to make sure this runs after LBaaS V2 configuration + echo_summary "Configuring octavia" + octavia_configure + + elif [[ "$1" == "stack" && "$2" == "extra" ]]; then + # Initialize and start the octavia service + echo_summary "Initializing Octavia" + octavia_init + + echo_summary "Starting Octavia" + octavia_start + elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then + if is_service_enabled tempest; then + # Configure Tempest for Congress + _configure_tempest + fi + fi +fi + +if [[ "$1" == "unstack" ]]; then + # Shut down Octavia services + if is_service_enabled $OCTAVIA; then + echo_summary "Stopping octavia" + octavia_stop + fi +fi + +if [[ "$1" == "clean" ]]; then + # Remember clean.sh first calls unstack.sh + if is_service_enabled $OCTAVIA; then + echo_summary "Cleaning up octavia" + octavia_cleanup + fi +fi + +if [[ $saveenv =~ e ]]; then + set -e +else + set +e +fi +if [[ $saveenv =~ x ]]; then + set -x +else + set +x +fi diff --git a/devstack/pregenerated/certs/client_ca.cert.pem b/devstack/pregenerated/certs/client_ca.cert.pem new file mode 100644 index 0000000000..0b9e837abf --- /dev/null +++ b/devstack/pregenerated/certs/client_ca.cert.pem @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDjTCCAnWgAwIBAgIJAPJtDNgcwPTZMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAlVTMQ8wDQYDVQQIDAZEZW5pYWwxFDASBgNVBAcMC1NwcmluZ2ZpZWxkMQww +CgYDVQQKDANEaXMxGDAWBgNVBAMMD3d3dy5leGFtcGxlLmNvbTAgFw0xNjEwMTQx +MzQzNDJaGA8yMDY2MTAwMjEzNDM0MlowXDELMAkGA1UEBhMCVVMxDzANBgNVBAgM +BkRlbmlhbDEUMBIGA1UEBwwLU3ByaW5nZmllbGQxDDAKBgNVBAoMA0RpczEYMBYG +A1UEAwwPd3d3LmV4YW1wbGUuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAxptZMcFHFsCXWUxWNOkXXARCvAkZ7MeXDAyKzadWup9Trzn3qdz1h6+e +VbPBYTiJeuvX7RWpfN3lhFqy9Y+Fu0ip98zZE7ZjbvUx13BQBkXiJpqsYIoD6IK1 +Lh4J9Exllzy7bTQ0f/IX1yrRztXkpRM5KvcbfUrGAMEy4SW6Idc6ZI+lwxvVIhqZ +KXAyTBg4f8hMhPO5RYFyaxS2PdNDaTLrvb1aDiuYLqcpDcr4/0YSg0iejklMHovC +oLK/uEFgRGYDSX+Os1CUdtnVzLpkFHZtomtEB0kUug4lZpGQckappLq+dWNTu43O +tJzbEa9lpYT8P/nie94tBQYx5+HgSwIDAQABo1AwTjAdBgNVHQ4EFgQUBpJ+Zoky +aGdQtMu9NzcoqOPc+yMwHwYDVR0jBBgwFoAUBpJ+ZokyaGdQtMu9NzcoqOPc+yMw +DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAJe8mlfQ69kyrIuIdbTtg +Kl7ndj7MGQnmNfxytBB5gqUFwswEPKs4VTp3Pp+EStJZxJ8qeeG9B+g3oU3Rhpqc +CDhIyCW8shE2ACKLl0zRRk91LDyXASI4UyvjgN71Ti91VZ3oPVvTIefG6CMeI9oD +Spl6TbPzCOl2rFrTWmdwM3qIVpmhGntdWnA6btga6Fz7dRwUPwycJyhzfLmnjRlQ +3+QxmF2T5iIYw4B1Lsiz1uy27egMuq2M4Hvd2pSGhCB9l/3ZmEXvbF1aFVcnoEHH +/aHqOCx2fQTty1M+qnvofs1dNJlyyxq2LuE4r4wocSTRVfexaichhtsSkjQJ60w1 +VA== +-----END CERTIFICATE----- diff --git a/devstack/pregenerated/certs/private/client.cert-and-key.pem b/devstack/pregenerated/certs/private/client.cert-and-key.pem new file mode 100644 index 0000000000..13696aa718 --- /dev/null +++ b/devstack/pregenerated/certs/private/client.cert-and-key.pem @@ -0,0 +1,109 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 1 (0x1) + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=Denial, L=Springfield, O=Dis, CN=www.example.com + Validity + Not Before: Oct 14 13:43:42 2016 GMT + Not After : Oct 2 13:43:42 2066 GMT + Subject: C=US, ST=Denial, O=Dis, CN=www.example.com + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:d3:57:2f:a9:3b:cb:e3:71:ef:db:42:f0:af:c8: + 58:95:39:65:93:48:d7:c0:71:db:6b:11:95:3c:92: + 01:fa:d6:32:ed:83:53:a6:b7:3f:f1:f4:ba:65:42: + f0:b6:53:69:48:94:08:ae:2c:f5:80:53:24:e0:98: + 31:21:74:e3:f5:ef:c8:77:76:80:89:02:52:9f:9d: + 69:f0:b1:e5:83:55:6f:ec:dd:aa:e7:92:09:d1:a1: + 17:e4:cc:42:69:13:82:42:3c:71:e2:d4:e8:22:5f: + b1:74:c9:2c:31:0a:70:5c:42:f7:77:d1:e1:76:83: + 8f:f1:a2:06:20:55:e3:ea:fa:65:5c:83:89:7e:32: + 20:8b:45:2a:51:0b:34:f1:f5:77:15:7b:fc:f0:6d: + e4:34:7d:54:8e:8a:f3:0a:a6:f1:7f:d6:65:2b:b3: + ef:82:17:31:97:f4:71:5f:67:d7:80:11:d5:43:82: + 2f:0f:4e:39:49:45:0c:a8:8e:1a:29:7a:4e:bf:94: + c8:af:42:2f:9f:bb:e9:43:18:f3:a6:9a:e8:c8:ad: + eb:df:2c:94:fe:2b:a7:60:27:fc:b8:1a:3c:2e:6e: + f3:60:51:e7:0c:53:70:de:88:b6:6d:6c:6a:21:17: + 0a:17:d3:e5:94:fc:13:79:33:8f:6d:e7:89:b7:66: + 7e:29 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Comment: + OpenSSL Generated Certificate + X509v3 Subject Key Identifier: + 73:7C:07:15:F5:CD:48:57:D5:D2:79:DF:0E:C4:E8:7A:29:1F:03:16 + X509v3 Authority Key Identifier: + keyid:06:92:7E:66:89:32:68:67:50:B4:CB:BD:37:37:28:A8:E3:DC:FB:23 + + Signature Algorithm: sha256WithRSAEncryption + be:89:f1:3e:5e:3b:72:80:96:8b:74:d5:1d:06:14:02:d9:35: + b5:41:ed:6e:43:c1:d6:b9:1d:07:08:4e:c6:93:d0:a2:b8:93: + 81:71:34:d1:a8:f5:1d:d5:48:6f:14:af:14:65:69:1b:4e:9f: + 87:25:a2:62:fa:99:fd:c1:e6:ce:fb:87:44:38:b7:8b:c7:1d: + 88:0e:61:2f:14:d8:61:b2:bd:01:b5:a6:6c:11:76:b6:57:a1: + 03:cb:6a:8e:dc:97:25:33:75:49:a9:44:d7:08:6c:3d:ae:2e: + fe:4e:69:47:c1:3b:43:6a:fe:89:10:9f:3a:7f:7b:28:61:3b: + 4a:62:bb:c3:7f:01:7b:90:e1:38:e2:83:b4:c1:4f:ac:5a:12: + 9b:5e:4b:64:9e:50:d8:6f:79:7f:8f:f1:4e:4b:eb:9e:0e:b7: + 64:36:9c:cf:bc:7c:bd:a4:1e:37:a6:5f:2e:b1:24:88:50:cc: + 68:91:95:b8:9e:2a:00:5a:fb:28:eb:a1:9b:4f:54:cd:01:d3: + 90:34:b4:5d:aa:db:2e:90:37:0b:a6:8c:3c:80:43:c2:88:2d: + 00:b1:a1:5d:fe:4e:98:02:57:5f:fb:fc:78:7d:59:04:96:9c: + 2a:1a:be:ca:5b:87:2b:66:bc:55:6f:14:dd:85:e1:b5:4b:6f: + f7:c7:dd:eb +-----BEGIN CERTIFICATE----- +MIIDmjCCAoKgAwIBAgIBATANBgkqhkiG9w0BAQsFADBcMQswCQYDVQQGEwJVUzEP +MA0GA1UECAwGRGVuaWFsMRQwEgYDVQQHDAtTcHJpbmdmaWVsZDEMMAoGA1UECgwD +RGlzMRgwFgYDVQQDDA93d3cuZXhhbXBsZS5jb20wIBcNMTYxMDE0MTM0MzQyWhgP +MjA2NjEwMDIxMzQzNDJaMEYxCzAJBgNVBAYTAlVTMQ8wDQYDVQQIDAZEZW5pYWwx +DDAKBgNVBAoMA0RpczEYMBYGA1UEAwwPd3d3LmV4YW1wbGUuY29tMIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA01cvqTvL43Hv20Lwr8hYlTllk0jXwHHb +axGVPJIB+tYy7YNTprc/8fS6ZULwtlNpSJQIriz1gFMk4JgxIXTj9e/Id3aAiQJS +n51p8LHlg1Vv7N2q55IJ0aEX5MxCaROCQjxx4tToIl+xdMksMQpwXEL3d9HhdoOP +8aIGIFXj6vplXIOJfjIgi0UqUQs08fV3FXv88G3kNH1UjorzCqbxf9ZlK7Pvghcx +l/RxX2fXgBHVQ4IvD045SUUMqI4aKXpOv5TIr0Ivn7vpQxjzpproyK3r3yyU/iun +YCf8uBo8Lm7zYFHnDFNw3oi2bWxqIRcKF9PllPwTeTOPbeeJt2Z+KQIDAQABo3sw +eTAJBgNVHRMEAjAAMCwGCWCGSAGG+EIBDQQfFh1PcGVuU1NMIEdlbmVyYXRlZCBD +ZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQUc3wHFfXNSFfV0nnfDsToeikfAxYwHwYDVR0j +BBgwFoAUBpJ+ZokyaGdQtMu9NzcoqOPc+yMwDQYJKoZIhvcNAQELBQADggEBAL6J +8T5eO3KAlot01R0GFALZNbVB7W5Dwda5HQcITsaT0KK4k4FxNNGo9R3VSG8UrxRl +aRtOn4clomL6mf3B5s77h0Q4t4vHHYgOYS8U2GGyvQG1pmwRdrZXoQPLao7clyUz +dUmpRNcIbD2uLv5OaUfBO0Nq/okQnzp/eyhhO0piu8N/AXuQ4Tjig7TBT6xaEpte +S2SeUNhveX+P8U5L654Ot2Q2nM+8fL2kHjemXy6xJIhQzGiRlbieKgBa+yjroZtP +VM0B05A0tF2q2y6QNwumjDyAQ8KILQCxoV3+TpgCV1/7/Hh9WQSWnCoavspbhytm +vFVvFN2F4bVLb/fH3es= +-----END CERTIFICATE----- +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDTVy+pO8vjce/b +QvCvyFiVOWWTSNfAcdtrEZU8kgH61jLtg1Omtz/x9LplQvC2U2lIlAiuLPWAUyTg +mDEhdOP178h3doCJAlKfnWnwseWDVW/s3arnkgnRoRfkzEJpE4JCPHHi1OgiX7F0 +ySwxCnBcQvd30eF2g4/xogYgVePq+mVcg4l+MiCLRSpRCzTx9XcVe/zwbeQ0fVSO +ivMKpvF/1mUrs++CFzGX9HFfZ9eAEdVDgi8PTjlJRQyojhopek6/lMivQi+fu+lD +GPOmmujIrevfLJT+K6dgJ/y4GjwubvNgUecMU3DeiLZtbGohFwoX0+WU/BN5M49t +54m3Zn4pAgMBAAECggEAZu5MwUDlYaZJauHkdci/FBa7WQueQRVzB2et5q06F6Ah +d7qBkG4pz78g1VbQBA0F9xpaS/KLs29LQ7P8Ic5bhJm/aiemHJSsBx9UzKzoGpoP +BC9GILjo3Vd3WrD9G04sH/Ruh0qosK0osbeVNWFfLiBThOEMzXrwLYB7OV57viJI +4YAXGOzOgK3aMHF8cYRRgTDIi2dGAMH1EyIIB8gKYlp1PdMmaTOk2LBhechuImRX +4LgvM1fUdJ7utyQKEXMJEg+wzV9BMlX6nvM3vVWdYZy2Hsu9DDyJUFYQk9cDpXNP +RF4jjLUtz6gEZOlotOQgPWqLANJrt/BdVfyeA97psQKBgQD7SeNlQd2bu8GfH0vB +mjzSWmJ3nDnpeaUR9MIYVQ6zNlvYPjM2BMVQtE5+VWK15YOjD5L9SoresNKubrSv +wzNFeqf6Dvq7zJ+6Rkst7GcRV/P3D4C3ZeKeDNjVm4eMRCa5ttIJlLmfqffeLO9M +RSanNjnjwWENgsXCCvlVBfc9ZQKBgQDXTY8X9ug9xVlqBR4TMfzXBadzP+nDqYd9 +MkH3tEltLba0vP4vKyjQa8A9FMzSRr9bv13mNpAbFEDGnhzv1l5OlHTM6tG//Rxq +nnhmFLFWZl8WowP0LiPTafrDjGEX/7iDAJjAtSacBBm6EGaM8igWEQT0WXwsQbTw +rlRolJ5DdQKBgQDgMBJ80x+IAiGC+iPXLOjYbqTsu2d7YfigJXJIzRHZV0Tnjs6X +gfgbwVFKKplvWL1xa8Ki0a9FcBH2Z3QyXv9OHFjiohyWEb/rKy2FYiSt938Dy0P1 +2yMsCKAnKqPqwx6dj3qh65sT1Er8X7B6pjMO+TT6ehtBN4uBS9MYRMNIdQKBgQDU +6UztTOzDUSqn7mGcZ916IYxDK1wXcsmapB2aQD4wanl4aEEREiQtX7DednhKJU5N +A4RvCVweezvHbkp9Xscp/CM5FanQqxPz17yGbkYkg93au+BIE2y4P+CMioDlw6uK +WQe14i5JMMDkQB25mirMD46PuQJTnbK6JBsyxG1xlQKBgGtcSY0AyVq00p0kkxNm +KhzI+17T0j0CuczJ/X+NvCUjLsx3NTJatRkJNYHWG7jUqs1vvtZbHVspQeteMlEi +rNE/xz98iG2eC8AdW+TmZvySmIZgoAoPuopUvBzRiyfLQVh4pPuikbTDghEn+CSG +WSyOd/I4JsH4xQFJC89nnm5M +-----END PRIVATE KEY----- diff --git a/devstack/pregenerated/certs/private/server_ca.key.pem b/devstack/pregenerated/certs/private/server_ca.key.pem new file mode 100644 index 0000000000..757c97881f --- /dev/null +++ b/devstack/pregenerated/certs/private/server_ca.key.pem @@ -0,0 +1,30 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: AES-128-CBC,B6C2D5A9657E9635BE06551CAD6EF969 + +N90cGt5rEntmiPvIAQwbO9W02blpDRZLJYMJeqttqxttnq6+InYQL3M4nJmR8XVz +/bCjWhMQlh5kEKzBtjhu5xFXqYhF3q9UcA6/13VY4gicrSHwwpoVLP0X2IXFp6ub +t4haSggaH6F2ZxF9DJCVG6+GyqOpuTPlGD4QiEf40NTo7x2H+JCEveLsIaSUljTV +W/XZDk1RSo8hMpr+huqCQOZxfhEuM76gSK8wPW3nCzVoBMCk/1RpMcXq8A7FT9gd +0V+2jwucDPOEVrTLmYjh/Aln6ATdte2l/b9XKPnAoVW6psYw83pu2hXtjgfCI+ey +IbRvzJ9djPvx0qhEu/EQIcKLFfNt/+OExm7rce8+O6NcB1x+bFbvCLamPYQxtcjE +xjqOWD0QT+VtIdqnG631jctN2mocmhVWfmp6le1RlkwfKSsbS1lb6Lcj/TasTlai +5c6hfYB83drlJUw0374PuWn8Tb62HGaROK8JEG07CcgNT1l8KXHrCpLzwEQvRtP+ +Bze+mlbjScm21ny280huQz5hiNdDrH9q/YzVHcHEVICAnimEsZeaQCyEt0Um9h56 +gvTZ6Udh/SeetBsL77hQ3EwDYs2nNdacaOIu5tASrfdMXWdSiLiNR8zK7y7x4a0b +GrgrerYJPWdb2axy4rrhzzlPRTHCJL1gA/E3CYC5mObk07tCMoQt7Ak3dofto9jG +1CSRLGqbP31k7tXBOLCwNAYekQkDWRQV4u0vf2aWJdLjxLwiX7424E6p/cvaUi5B +Sv+Iit3Zuee7Tq6DK0rv+5oWZmyfC/rzHcqmAMUhnjfBBlcI1N22BrBEBpfX6zq1 +DnIwiS9ayJMzaExSS+tBuqoHuoLMo2Fn++NpYxIUrwtQBvAD1Qxqx6QacTGFK025 +UpyV/ML+FdENujwU6KYYdciHX3E7nU4UYC/qwT7u9B/k3OiTS37GSlnz4ZkU34cF +UiBcN2gXqYYxsonD37vUX40oTjrQYaQJbWcGgcyNw7Z5U4GV7t1ZFcxNBuE485pE +jqZiDkeP5zmk+r9AB7djUpcowQ0TpPs1SthPsllv/LidusA8DwmeGp063fa1wScv +gH6iJ40HRc7ffwN4ikk409L8awjpSA+HyXC+BsjIaG9uyaoy6XpjjQHrl/kZgeS2 +Nm3wvq00OFKYLi8UgmXlrRNMyNc/osTSAesdJeaiNHUM/+nrdTL1SaOvht/6i07B +bG7Vqv3LtpWvd8fDhSPR/1eiBaYBzDJ+jx25oX5Wbv4/AbsG5/BEgfrBJnMddPyv +Y8X6LY3IpUqRx1sf1L3ia3YxWp5r3bfcCQvVL0W6brEKxbw8BTHFrS3qaBOOfLrC +XuiMKEUcSlexxYnYcJr1RnBYQ4HqcAOCbqQAhXqFv5nge+5gSskP8MS/FtGZ0+nm +wi2ak3WmZbpr08mVnjHVhhxnuuVm7esYhNJLwXvSITXfUPPgpjvzYe0ABLdtWVuo +s4NsU/1XG33I4r+gnrHQyFxsgaZ3rr5VpcbTHLzDzBgTRWk06AZB/nxyfAexE67U +VHRL+4FP+ee5CxpWkT8i0/n2PJ/U/42+pglZmxEzIw76PqcT0aqmnpSwsEnnMH0w +-----END RSA PRIVATE KEY----- diff --git a/devstack/pregenerated/certs/server_ca-chain.cert.pem b/devstack/pregenerated/certs/server_ca-chain.cert.pem new file mode 100644 index 0000000000..0b9e837abf --- /dev/null +++ b/devstack/pregenerated/certs/server_ca-chain.cert.pem @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDjTCCAnWgAwIBAgIJAPJtDNgcwPTZMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAlVTMQ8wDQYDVQQIDAZEZW5pYWwxFDASBgNVBAcMC1NwcmluZ2ZpZWxkMQww +CgYDVQQKDANEaXMxGDAWBgNVBAMMD3d3dy5leGFtcGxlLmNvbTAgFw0xNjEwMTQx +MzQzNDJaGA8yMDY2MTAwMjEzNDM0MlowXDELMAkGA1UEBhMCVVMxDzANBgNVBAgM +BkRlbmlhbDEUMBIGA1UEBwwLU3ByaW5nZmllbGQxDDAKBgNVBAoMA0RpczEYMBYG +A1UEAwwPd3d3LmV4YW1wbGUuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAxptZMcFHFsCXWUxWNOkXXARCvAkZ7MeXDAyKzadWup9Trzn3qdz1h6+e +VbPBYTiJeuvX7RWpfN3lhFqy9Y+Fu0ip98zZE7ZjbvUx13BQBkXiJpqsYIoD6IK1 +Lh4J9Exllzy7bTQ0f/IX1yrRztXkpRM5KvcbfUrGAMEy4SW6Idc6ZI+lwxvVIhqZ +KXAyTBg4f8hMhPO5RYFyaxS2PdNDaTLrvb1aDiuYLqcpDcr4/0YSg0iejklMHovC +oLK/uEFgRGYDSX+Os1CUdtnVzLpkFHZtomtEB0kUug4lZpGQckappLq+dWNTu43O +tJzbEa9lpYT8P/nie94tBQYx5+HgSwIDAQABo1AwTjAdBgNVHQ4EFgQUBpJ+Zoky +aGdQtMu9NzcoqOPc+yMwHwYDVR0jBBgwFoAUBpJ+ZokyaGdQtMu9NzcoqOPc+yMw +DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAJe8mlfQ69kyrIuIdbTtg +Kl7ndj7MGQnmNfxytBB5gqUFwswEPKs4VTp3Pp+EStJZxJ8qeeG9B+g3oU3Rhpqc +CDhIyCW8shE2ACKLl0zRRk91LDyXASI4UyvjgN71Ti91VZ3oPVvTIefG6CMeI9oD +Spl6TbPzCOl2rFrTWmdwM3qIVpmhGntdWnA6btga6Fz7dRwUPwycJyhzfLmnjRlQ +3+QxmF2T5iIYw4B1Lsiz1uy27egMuq2M4Hvd2pSGhCB9l/3ZmEXvbF1aFVcnoEHH +/aHqOCx2fQTty1M+qnvofs1dNJlyyxq2LuE4r4wocSTRVfexaichhtsSkjQJ60w1 +VA== +-----END CERTIFICATE----- diff --git a/devstack/pregenerated/certs/server_ca.cert.pem b/devstack/pregenerated/certs/server_ca.cert.pem new file mode 100644 index 0000000000..0b9e837abf --- /dev/null +++ b/devstack/pregenerated/certs/server_ca.cert.pem @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDjTCCAnWgAwIBAgIJAPJtDNgcwPTZMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAlVTMQ8wDQYDVQQIDAZEZW5pYWwxFDASBgNVBAcMC1NwcmluZ2ZpZWxkMQww +CgYDVQQKDANEaXMxGDAWBgNVBAMMD3d3dy5leGFtcGxlLmNvbTAgFw0xNjEwMTQx +MzQzNDJaGA8yMDY2MTAwMjEzNDM0MlowXDELMAkGA1UEBhMCVVMxDzANBgNVBAgM +BkRlbmlhbDEUMBIGA1UEBwwLU3ByaW5nZmllbGQxDDAKBgNVBAoMA0RpczEYMBYG +A1UEAwwPd3d3LmV4YW1wbGUuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAxptZMcFHFsCXWUxWNOkXXARCvAkZ7MeXDAyKzadWup9Trzn3qdz1h6+e +VbPBYTiJeuvX7RWpfN3lhFqy9Y+Fu0ip98zZE7ZjbvUx13BQBkXiJpqsYIoD6IK1 +Lh4J9Exllzy7bTQ0f/IX1yrRztXkpRM5KvcbfUrGAMEy4SW6Idc6ZI+lwxvVIhqZ +KXAyTBg4f8hMhPO5RYFyaxS2PdNDaTLrvb1aDiuYLqcpDcr4/0YSg0iejklMHovC +oLK/uEFgRGYDSX+Os1CUdtnVzLpkFHZtomtEB0kUug4lZpGQckappLq+dWNTu43O +tJzbEa9lpYT8P/nie94tBQYx5+HgSwIDAQABo1AwTjAdBgNVHQ4EFgQUBpJ+Zoky +aGdQtMu9NzcoqOPc+yMwHwYDVR0jBBgwFoAUBpJ+ZokyaGdQtMu9NzcoqOPc+yMw +DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAJe8mlfQ69kyrIuIdbTtg +Kl7ndj7MGQnmNfxytBB5gqUFwswEPKs4VTp3Pp+EStJZxJ8qeeG9B+g3oU3Rhpqc +CDhIyCW8shE2ACKLl0zRRk91LDyXASI4UyvjgN71Ti91VZ3oPVvTIefG6CMeI9oD +Spl6TbPzCOl2rFrTWmdwM3qIVpmhGntdWnA6btga6Fz7dRwUPwycJyhzfLmnjRlQ +3+QxmF2T5iIYw4B1Lsiz1uy27egMuq2M4Hvd2pSGhCB9l/3ZmEXvbF1aFVcnoEHH +/aHqOCx2fQTty1M+qnvofs1dNJlyyxq2LuE4r4wocSTRVfexaichhtsSkjQJ60w1 +VA== +-----END CERTIFICATE----- diff --git a/devstack/pregenerated/regenerate-certs.sh b/devstack/pregenerated/regenerate-certs.sh new file mode 100755 index 0000000000..bd6e9af95d --- /dev/null +++ b/devstack/pregenerated/regenerate-certs.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +GEN_DIR=/tmp/certs +rm -rf $GEN_DIR +bash ../../bin/create_certificates.sh $GEN_DIR $(pwd)/../../etc/certificates/openssl.cnf +for file in client.key client.pem ca_01.pem private/cakey.pem; do + cp -v $GEN_DIR/$file certs/$file +done + +echo "" +echo Validating client cert with CA: +openssl verify -verbose -CAfile certs/ca_01.pem certs/client.pem + +echo "" +echo CA expiration time: +openssl x509 -enddate -noout -in certs/ca_01.pem + +echo "" +echo Client cert expiration time: +openssl x509 -enddate -noout -in certs/client.pem + diff --git a/devstack/pregenerated/ssh-keys/octavia_ssh_key b/devstack/pregenerated/ssh-keys/octavia_ssh_key new file mode 100644 index 0000000000..52c66a8b7e --- /dev/null +++ b/devstack/pregenerated/ssh-keys/octavia_ssh_key @@ -0,0 +1,28 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA/V449K2GRGBMypMBVkIBZRfDFVDUeJvEebVlCuNW33bmcblS +x8LB3+oCclhhfToCrtHO5Hndk2oMCJyQRojCcuox4Uauq5I+0cIo1mowmdlqFIDP +7YQEJVnJZQah96F468LY3dc9fyp+2Y3XVeeOjY3ChuBSUaQUb58aNH7lSgS/QwHv +/6sYRmej16CBmYK+NQlxgBFShA9M1F+DNVBnk229iP2+uwfmQyCAv188Ts/tDb3e +974QOmv+vJqJo0nnJIYQd0jOlLIiDfHC3+JsWslYGk8YbBeLsxkdljnLHpIY47pb +i6L4Sy993tlb/2XfbCyw+L+dFoZhynNxyt/c4QIDAQABAoIBAFDaqq5aWcikOp1C +wGB4e9148cZxnvxGKTL10iLhXa2+Udfk3iflXN1J3jIDRkkiJA0J405CHZWXd/Of +kuMPbY4icnyDg+Y4q1dg8ItMI+pU2Wdlm/Ud9fy9ZGma7kEKBH6oFXDl6TgVpZlj +jF5boMBHhtZn650mEWd1jHVIMX+m1Z3lA9dA3qsDTLDmh5IPeH4InWumCn59qw3Z +lMu8cKZLpiAJNEx428P0DbOMpTMgmgFIrRFMQeMRHukxf1X6UeHS3UgHUmTnA2jG +IbGJShNQywxI1pAJKR6BgUJqxZZ1ukcWl8gO4bedkaTejJWIp65KwI7xMNPgYQEO +V+8PfGECgYEA/v9r/ypQzkUEsxyNUqKhJ/02rgSSGdzQT52Fi82O1e2j63PbRPBL +izkA9LkDoxz2RDnG6H3BFfj0QrCbDiV2DqtxBp+xu+mua60JysnjoTRQo1rXS/kr +cDLsNL0q3s/dBNwUCwyoveHdX5V72E5ueqY/vhRbjHV66hzNno7ryqUCgYEA/l0p +LIovymkpqG9wAquvyQXLbQk5qx71CXX0yjip5BEcPmQrEIbV0CwUtL3wKmuHx8xR +dyyvTwSYWANHFVzB85itpAnRdJcRz02SU/4Qq2pMXbp/6oBK3CwAW0xp0l3k4Yol ++SnfZkaQ8jcNDSb5oYxjsl0Jj40T7V3MTCd4QI0CgYEAoUYYHqy7qIl8PG+9bdsP +g8QhFhQr9xFx6jidIttiECkZOCvxLPuxO59U3HI7O6lwk5vbEmWeffATRC6AEoVc +0lBZzq+ncEqOFum8vLXNMsJskbQ9YH55m5+JRp2xhHQAvDcYshhSjK1SHkbjqd2J +ACcvP1+Ouxn+IB0RasvHk0UCgYEAgDhd5QHTjWjtguaJxA7fkanGHbSkyUnVo2s+ +diGSIlEtt5Wuz6noZgOSfHmycu+5hlHMTxLLXD2ovdUJJA+aBT1Vanc4ilkMtT8Z +IBXWOVJgJG86w+7fzZSwqVUfkteZ5MdK1Qryfg/cSPzPK24WMAUgzGxxwVcQUHsT +3N+YkpECgYB4fzJ10b4ZuYYQRSUAxcfQXTqAR1LH9WS0axGQhJrpxtUe9Jur1eJV +NF+o9kcAhFqVCuoJXFn/puDqsYNz4MBYHMXd8S7DVbdOyZs0h/F3lLyTmWS99tjt +cG7xtFl7/75WcbgITcJSbeACKGpC6g6U2vFF5IeM4wA0gOwY1G24fw== +-----END RSA PRIVATE KEY----- + diff --git a/devstack/pregenerated/ssh-keys/octavia_ssh_key.pub b/devstack/pregenerated/ssh-keys/octavia_ssh_key.pub new file mode 100644 index 0000000000..2334218725 --- /dev/null +++ b/devstack/pregenerated/ssh-keys/octavia_ssh_key.pub @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD9Xjj0rYZEYEzKkwFWQgFlF8MVUNR4m8R5tWUK41bfduZxuVLHwsHf6gJyWGF9OgKu0c7ked2TagwInJBGiMJy6jHhRq6rkj7RwijWajCZ2WoUgM/thAQlWcllBqH3oXjrwtjd1z1/Kn7ZjddV546NjcKG4FJRpBRvnxo0fuVKBL9DAe//qxhGZ6PXoIGZgr41CXGAEVKED0zUX4M1UGeTbb2I/b67B+ZDIIC/XzxOz+0Nvd73vhA6a/68momjSeckhhB3SM6UsiIN8cLf4mxayVgaTxhsF4uzGR2WOcsekhjjuluLovhLL33e2Vv/Zd9sLLD4v50WhmHKc3HK39zh vagrant@main diff --git a/devstack/samples/README-Vagrant.md b/devstack/samples/README-Vagrant.md new file mode 100644 index 0000000000..d9a4cf51fb --- /dev/null +++ b/devstack/samples/README-Vagrant.md @@ -0,0 +1,86 @@ +This file describes how to use Vagrant (http://www.vagrantup.com) to +create a devstack virtual environment that contains two nova instances +running a simple web server and a working Neutron LBaaS Version 2 load +balancer backed by Octavia. + +1) Install vagrant on your host machine. Vagrant is available for +Windows, Mac OS, and most Linux distributions. Download and install +the package appropriate for your system. On Ubuntu, simply type: + + sudo apt-get install vagrant + +2) copy 'Vagrantfile' from this directory to any appropriate directory + + mkdir $HOME/lbaas-octavia-vagrant # or any other appropriate directory + cp -rfp $HOME/lbaas-octavia-vagrant + +3) Continue either by the single node deployment (6GB RAM minimum), or by the +multinode deployment (12GB RAM minimum). + +Single node deployment +~~~~~~~~~~~~~~~~~~~~~~ + +1) Create and deploy the environment VM + + cd $HOME/lbaas-octavia-vagrant/single + vagrant up + + Alternatively, you can specify the number of vcpus or memory: + VM_CPUS=4 VM_MEMORY=8192 vagrant up + +2) Wait for the vagrant VM to boot and install, typically 20-30 minutes + +3) SSH into the vagrant box + + vagrant ssh + +4) Continue on the common section below + +Multinode +~~~~~~~~~ + +This will create an environment where the octavia services are replicated +across two nodes, and in front of the octavia api, an haproxy is configured +to distribute traffic among both API servers, and provide failure tolerance. + +Please note that the database is a single mysql instance, with no clustering. + +1) Create and deploy the environment VMs + + cd $HOME/lbaas-octavia-vagrant/multinode + vagrant up main + +2) Wait for the main node to be deployed, and then start the second node + + vagrant up second + +3) Log in to the main node, and run local-manual.sh now that everything is + deployed + + vagrant ssh main + cd devstack + ./local-manual.sh + logout + +4) SSH in any of the vagrant boxes: + + vagrant ssh main + vagrant ssh second + +4) Continue on the common section bellow + +Common to multinode and single node +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +1) Determine the loadbalancer IP: + + source openrc admin admin + openstack loadbalancer show lb1 -f value -c vip_address + +2) make HTTP requests to test your load balancer: + + curl + +where is the VIP address for lb1. The subsequent invocations of +"curl " should demonstrate that the load balancer is alternating +between two member nodes. diff --git a/devstack/samples/multinode/Vagrantfile b/devstack/samples/multinode/Vagrantfile new file mode 100644 index 0000000000..20d4b8399c --- /dev/null +++ b/devstack/samples/multinode/Vagrantfile @@ -0,0 +1,50 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +require '../providers.rb' + +Vagrant.configure(2) do |config| + + config.vm.define 'main' do |main| + configure_providers(main.vm) + main.vm.network "private_network", ip:"192.168.42.10" + main.vm.hostname = "main" + main.vm.provision "shell", privileged: false, inline: <<-SHELL + #!/usr/bin/env bash + set -e + + sudo apt-get update + sudo apt-get -y upgrade + sudo apt-get -y install git + + git clone https://opendev.org/openstack/devstack + cp /vagrant/local.conf ~/devstack + cp /vagrant/local.sh ~/devstack/local-manual.sh + cp /vagrant/webserver.sh ~/devstack + cd ~/devstack + ./stack.sh + + SHELL + end + + config.vm.define 'second' do |second| + configure_providers(second.vm) + second.vm.network "private_network", ip:"192.168.42.11" + second.vm.hostname = "second" + second.vm.provision "shell", privileged: false, inline: <<-SHELL + #!/usr/bin/env bash + set -e + + sudo apt-get update + sudo apt-get -y upgrade + sudo apt-get -y install git + + git clone https://opendev.org/openstack/devstack + cp /vagrant/local-2.conf ~/devstack/local.conf + + cd ~/devstack + ./stack.sh + SHELL + end + +end diff --git a/devstack/samples/multinode/local-2.conf b/devstack/samples/multinode/local-2.conf new file mode 100644 index 0000000000..0950b4b831 --- /dev/null +++ b/devstack/samples/multinode/local-2.conf @@ -0,0 +1,65 @@ +[[local|localrc]] + +# The name of the RECLONE environment variable is a bit misleading. It doesn't actually +# reclone repositories, rather it uses git fetch to make sure the repos are current. + +RECLONE=True + +# Load the external LBaaS plugin. + +enable_plugin neutron https://opendev.org/openstack/neutron +enable_plugin octavia https://opendev.org/openstack/octavia + +LIBS_FROM_GIT+=python-octaviaclient +DATABASE_PASSWORD=password +ADMIN_PASSWORD=password +SERVICE_PASSWORD=password +SERVICE_TOKEN=password +RABBIT_PASSWORD=password +# Enable Logging +LOGFILE=$DEST/logs/stack.sh.log +VERBOSE=True +LOG_COLOR=True + +# Nova +enable_service n-cpu + +# Neutron +enable_service neutron +enable_service neutron-agent +enable_service neutron-qos + +# LBaaS V2 and Octavia +enable_service octavia +enable_service o-api +enable_service o-cw +enable_service o-hm +enable_service o-hk + +OCTAVIA_USE_PREGENERATED_CERTS=True +OCTAVIA_USE_PREGENERATED_SSH_KEY=True +OCTAVIA_CONTROLLER_IP_PORT_LIST=192.168.0.3:5555,192.168.0.4:5555 +OCTAVIA_NODE=second + +# we are not enabling the mysql service here, but this is necessary +# to get the connection string constructed +DATABASE_TYPE=mysql + +NEUTRON_CORE_PLUGIN=ml2 +Q_ML2_TENANT_NETWORK_TYPE=vxlan + +LOGFILE=$DEST/logs/stack.sh.log + +# Old log files are automatically removed after 7 days to keep things neat. Change +# the number of days by setting ``LOGDAYS``. +LOGDAYS=2 + +HOST_IP=192.168.42.11 +SERVICE_HOST=192.168.42.10 +MULTI_HOST=1 +NEUTRON_SERVICE_HOST=$SERVICE_HOST +MYSQL_HOST=$SERVICE_HOST +RABBIT_HOST=$SERVICE_HOST +GLANCE_HOSTPORT=$SERVICE_HOST:9292 +NOVA_VNC_ENABLED=True +NOVNCPROXY_URL="http://$service_host:6080/vnc_auto.html" diff --git a/devstack/samples/multinode/local.conf b/devstack/samples/multinode/local.conf new file mode 100644 index 0000000000..5cb7da4cdc --- /dev/null +++ b/devstack/samples/multinode/local.conf @@ -0,0 +1,81 @@ +[[local|localrc]] + +# The name of the RECLONE environment variable is a bit misleading. It doesn't actually +# reclone repositories, rather it uses git fetch to make sure the repos are current. + +RECLONE=True + +# Load the external Octavia plugin. + +enable_plugin barbican https://opendev.org/openstack/barbican +enable_plugin neutron https://opendev.org/openstack/neutron +enable_plugin octavia-dashboard https://opendev.org/openstack/octavia-dashboard +enable_plugin octavia https://opendev.org/openstack/octavia + +LIBS_FROM_GIT+=python-octaviaclient +DATABASE_PASSWORD=password +ADMIN_PASSWORD=password +SERVICE_PASSWORD=password +SERVICE_TOKEN=password +RABBIT_PASSWORD=password +# Enable Logging +LOGFILE=$DEST/logs/stack.sh.log +VERBOSE=True +LOG_COLOR=True + +# Pre-requisites +enable_service rabbit +enable_service mysql +enable_service key + +# Horizon +enable_service horizon + +# Nova +enable_service n-api +enable_service n-cpu +enable_service n-cond +enable_service n-sch + +# Placement service needed for Nova +enable_service placement-api +enable_service placement-client + +# Glance +enable_service g-api + +# Neutron +enable_service neutron +enable_service neutron-api +enable_service neutron-agent +enable_service neutron-dhcp +enable_service neutron-l3 +enable_service neutron-metadata-agent +enable_service neutron-qos + +# Octavia +enable_service octavia +enable_service o-cw +enable_service o-hm +enable_service o-hk +enable_service o-api +enable_service o-api-ha +enable_service o-da + +OCTAVIA_USE_PREGENERATED_CERTS=True +OCTAVIA_USE_PREGENERATED_SSH_KEY=True +OCTAVIA_CONTROLLER_IP_PORT_LIST=192.168.0.3:5555,192.168.0.4:5555 +OCTAVIA_NODE=main +OCTAVIA_NODES=main:192.168.42.10,second:192.168.42.11 + +NEUTRON_CORE_PLUGIN=ml2 +Q_ML2_TENANT_NETWORK_TYPE=vxlan + +LOGFILE=$DEST/logs/stack.sh.log + +# Old log files are automatically removed after 7 days to keep things neat. Change +# the number of days by setting ``LOGDAYS``. +LOGDAYS=2 + +HOST_IP=192.168.42.10 +MULTI_HOST=1 diff --git a/devstack/samples/multinode/local.sh b/devstack/samples/multinode/local.sh new file mode 120000 index 0000000000..7b7badde4f --- /dev/null +++ b/devstack/samples/multinode/local.sh @@ -0,0 +1 @@ +../singlenode/local.sh \ No newline at end of file diff --git a/devstack/samples/multinode/webserver.sh b/devstack/samples/multinode/webserver.sh new file mode 120000 index 0000000000..cc3dfe6db9 --- /dev/null +++ b/devstack/samples/multinode/webserver.sh @@ -0,0 +1 @@ +../singlenode/webserver.sh \ No newline at end of file diff --git a/devstack/samples/providers.rb b/devstack/samples/providers.rb new file mode 100644 index 0000000000..d02876f067 --- /dev/null +++ b/devstack/samples/providers.rb @@ -0,0 +1,22 @@ +# defaults +VM_MEMORY = ENV['VM_MEMORY'] || "8192" +VM_CPUS = ENV['VM_CPUS'] || "1" + +def configure_providers(vm) + + vm.provider "virtualbox" do |vb, config| + config.vm.box = "ubuntu/bionic64" + vb.gui = true + vb.memory = VM_MEMORY + vb.cpus = VM_CPUS + end + + vm.provider "libvirt" do |lb, config| + config.vm.box = "celebdor/bionic64" + config.vm.synced_folder './', '/vagrant', type: 'rsync' + lb.nested = true + lb.memory = VM_MEMORY + lb.cpus = VM_CPUS + lb.suspend_mode = 'managedsave' + end +end diff --git a/devstack/samples/singlenode/Vagrantfile b/devstack/samples/singlenode/Vagrantfile new file mode 100644 index 0000000000..f7b65b1228 --- /dev/null +++ b/devstack/samples/singlenode/Vagrantfile @@ -0,0 +1,96 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +# All Vagrant configuration is done below. The "2" in Vagrant.configure +# configures the configuration version (we support older styles for +# backwards compatibility). Please don't change it unless you know what +# you're doing. +Vagrant.configure(2) do |config| + + # defaults + VM_MEMORY = ENV['VM_MEMORY'] || "8192" + VM_CPUS = ENV['VM_CPUS'] || "1" + + # The most common configuration options are documented and commented below. + # For a complete reference, please see the online documentation at + # https://docs.vagrantup.com. + + # Every Vagrant development environment requires a box. You can search for + # boxes at https://atlas.hashicorp.com/search. + config.vm.box = "ubuntu/xenial64" + + # Disable automatic box update checking. If you disable this, then + # boxes will only be checked for updates when the user runs + # `vagrant box outdated`. This is not recommended. + # config.vm.box_check_update = false + + # Create a forwarded port mapping which allows access to a specific port + # within the machine from a port on the host machine. In the example below, + # accessing "localhost:8080" will access port 80 on the guest machine. + #config.vm.network "forwarded_port", guest: 80, host: 8080 + + # Create a private network, which allows host-only access to the machine + # using a specific IP. + # config.vm.network "private_network", ip: "192.168.33.10" + + # Create a public network, which generally matched to bridged network. + # Bridged networks make the machine appear as another physical device on + # your network. + # config.vm.network "public_network" + + # Share an additional folder to the guest VM. The first argument is + # the path on the host to the actual folder. The second argument is + # the path on the guest to mount the folder. And the optional third + # argument is a set of non-required options. + # config.vm.synced_folder "../data", "/vagrant_data" + + # Provider-specific configuration so you can fine-tune various + # backing providers for Vagrant. These expose provider-specific options. + # Example for VirtualBox: + # + config.vm.provider "virtualbox" do |vb| + # Display the VirtualBox GUI when booting the machine + vb.gui = true + # Customize the amount of memory on the VM: + vb.memory = VM_MEMORY + vb.cpus = VM_CPUS + end + + config.vm.provider "libvirt" do |lb, config| + config.vm.box = "celebdor/xenial64" + config.vm.synced_folder './', '/vagrant', type: 'rsync' + lb.nested = true + lb.memory = VM_MEMORY + lb.cpus = VM_CPUS + lb.suspend_mode = 'managedsave' + end + + # + # View the documentation for the provider you are using for more + # information on available options + # Define a Vagrant Push strategy for pushing to Atlas. Other push strategies + # such as FTP and Heroku are also available. See the documentation at + # https://docs.vagrantup.com/v2/push/atlas.html for more information. + # config.push.define "atlas" do |push| + # push.app = "YOUR_ATLAS_USERNAME/YOUR_APPLICATION_NAME" + # end + + # Enable provisioning with a shell script. Additional provisioners such as + # Puppet, Chef, Ansible, Salt, and Docker are also available. Please see the + # documentation for more information about their specific syntax and use. + config.vm.provision "shell", privileged: false, inline: <<-SHELL + + #!/usr/bin/env bash + + sudo apt-get update + sudo apt-get -y upgrade + sudo apt-get -y install git + + git clone https://opendev.org/openstack/devstack + cp /vagrant/local.* /vagrant/webserver.sh ~/devstack + cd ~/devstack + ./stack.sh + + SHELL + +end diff --git a/devstack/samples/singlenode/local.conf b/devstack/samples/singlenode/local.conf new file mode 100644 index 0000000000..b3f6a23d59 --- /dev/null +++ b/devstack/samples/singlenode/local.conf @@ -0,0 +1,88 @@ +# Sample ``local.conf`` that builds a devstack with neutron LBaaS Version 2 + +# NOTE: Copy this file to the root DevStack directory for it to work properly. + +# ``local.conf`` is a user-maintained settings file that is sourced from ``stackrc``. +# This gives it the ability to override any variables set in ``stackrc``. +# Also, most of the settings in ``stack.sh`` are written to only be set if no +# value has already been set; this lets ``local.conf`` effectively override the +# default values. + +# The ``localrc`` section replaces the old ``localrc`` configuration file. +# Note that if ``localrc`` is present it will be used in favor of this section. + +[[local|localrc]] + +# The name of the RECLONE environment variable is a bit misleading. It doesn't actually +# reclone repositories, rather it uses git fetch to make sure the repos are current. + +RECLONE=True + +# Load the external Octavia plugin. + +enable_plugin barbican https://opendev.org/openstack/barbican +enable_plugin neutron https://opendev.org/openstack/neutron +enable_plugin octavia https://opendev.org/openstack/octavia +enable_plugin octavia-dashboard https://opendev.org/openstack/octavia-dashboard + +LIBS_FROM_GIT+=python-octaviaclient +DATABASE_PASSWORD=password +ADMIN_PASSWORD=password +SERVICE_PASSWORD=password +SERVICE_TOKEN=password +RABBIT_PASSWORD=password +# Enable Logging +LOGFILE=$DEST/logs/stack.sh.log +VERBOSE=True +LOG_COLOR=True + +# Pre-requisites +enable_service rabbit +enable_service mysql +enable_service key + +# Horizon +enable_service horizon + +# Nova +enable_service n-api +enable_service n-cpu +enable_service n-cond +enable_service n-sch + +# Placement service needed for Nova +enable_service placement-api +enable_service placement-client + +# Glance +enable_service g-api + +# Neutron +enable_service neutron +enable_service neutron-api +enable_service neutron-agent +enable_service neutron-dhcp +enable_service neutron-l3 +enable_service neutron-metadata-agent +enable_service neutron-qos + +# Octavia +enable_service octavia +enable_service o-cw +enable_service o-hm +enable_service o-hk +enable_service o-api +enable_service o-da + + +# enable DVR + +NEUTRON_CORE_PLUGIN=ml2 +Q_ML2_TENANT_NETWORK_TYPE=vxlan +Q_DVR_MODE=dvr_snat + +LOGFILE=$DEST/logs/stack.sh.log + +# Old log files are automatically removed after 7 days to keep things neat. Change +# the number of days by setting ``LOGDAYS``. +LOGDAYS=2 diff --git a/devstack/samples/singlenode/local.sh b/devstack/samples/singlenode/local.sh new file mode 100755 index 0000000000..bb363ef55f --- /dev/null +++ b/devstack/samples/singlenode/local.sh @@ -0,0 +1,101 @@ +#!/usr/bin/env bash +set -ex + +# Sample ``local.sh`` that configures two simple webserver instances and sets +# up a Neutron LBaaS Version 2 loadbalancer backed by Octavia. + +# Keep track of the DevStack directory +TOP_DIR=$(cd $(dirname "$0") && pwd) +BOOT_DELAY=60 + +# Import common functions +source ${TOP_DIR}/functions + +# Use openrc + stackrc for settings +source ${TOP_DIR}/stackrc + +# Destination path for installation ``DEST`` +DEST=${DEST:-/opt/stack} + +# Polling functions +function wait_for_loadbalancer_active { + lb_name=$1 + while [ $(openstack loadbalancer show $lb_name -f value -c provisioning_status) != "ACTIVE" ]; do + sleep 2 + done +} + +if is_service_enabled nova; then + + # Unset DOMAIN env variables that are not needed for keystone v2 and set OpenStack demo user auth + unset OS_USER_DOMAIN_ID + unset OS_PROJECT_DOMAIN_ID + source ${TOP_DIR}/openrc demo demo + + # Create an SSH key to use for the instances + DEVSTACK_LBAAS_SSH_KEY_NAME=DEVSTACK_LBAAS_SSH_KEY_RSA + DEVSTACK_LBAAS_SSH_KEY_DIR=${TOP_DIR} + DEVSTACK_LBAAS_SSH_KEY=${DEVSTACK_LBAAS_SSH_KEY_DIR}/${DEVSTACK_LBAAS_SSH_KEY_NAME} + rm -f ${DEVSTACK_LBAAS_SSH_KEY}.pub ${DEVSTACK_LBAAS_SSH_KEY} + ssh-keygen -b 2048 -t rsa -f ${DEVSTACK_LBAAS_SSH_KEY} -N "" + openstack keypair create --public-key=${DEVSTACK_LBAAS_SSH_KEY}.pub ${DEVSTACK_LBAAS_SSH_KEY_NAME} + + # Add tcp/22,80 and icmp to default security group + openstack security group rule create --protocol tcp --dst-port 22:22 default + openstack security group rule create --protocol tcp --dst-port 80:80 default + openstack security group rule create --protocol icmp default + + # Boot some instances + NOVA_BOOT_ARGS="--key-name ${DEVSTACK_LBAAS_SSH_KEY_NAME} --image $(openstack image show cirros-0.5.1-x86_64-disk -f value -c id) --flavor 1 --nic net-id=$(openstack network show private -f value -c id)" + + openstack server create ${NOVA_BOOT_ARGS} node1 + openstack server create ${NOVA_BOOT_ARGS} node2 + + echo "Waiting ${BOOT_DELAY} seconds for instances to boot" + sleep ${BOOT_DELAY} + + IP1=$(openstack server show node1 | awk '/private/ {ip = substr($4, 9, length($4)-9) ; if (ip ~ "\\.") print ip ; else print $5}') + IP2=$(openstack server show node2 | awk '/private/ {ip = substr($4, 9, length($4)-9) ; if (ip ~ "\\.") print ip ; else print $5}') + + touch ~/.ssh/known_hosts + + ssh-keygen -R ${IP1} + ssh-keygen -R ${IP2} + + + # Get Neutron router namespace details + NAMESPACE_NAME='qrouter-'$(openstack router show router1 -f value -c id) + NAMESPACE_CMD_PREFIX='sudo ip netns exec' + + # Run a simple web server on the instances + chmod 0755 ${TOP_DIR}/webserver.sh + $NAMESPACE_CMD_PREFIX $NAMESPACE_NAME scp -i ${DEVSTACK_LBAAS_SSH_KEY} -o StrictHostKeyChecking=no ${TOP_DIR}/webserver.sh cirros@${IP1}:webserver.sh + $NAMESPACE_CMD_PREFIX $NAMESPACE_NAME scp -i ${DEVSTACK_LBAAS_SSH_KEY} -o StrictHostKeyChecking=no ${TOP_DIR}/webserver.sh cirros@${IP2}:webserver.sh + $NAMESPACE_CMD_PREFIX $NAMESPACE_NAME ssh -o UserKnownHostsFile=/dev/null -i ${DEVSTACK_LBAAS_SSH_KEY} -o StrictHostKeyChecking=no -q cirros@${IP1} "screen -d -m sh webserver.sh" + $NAMESPACE_CMD_PREFIX $NAMESPACE_NAME ssh -o UserKnownHostsFile=/dev/null -i ${DEVSTACK_LBAAS_SSH_KEY} -o StrictHostKeyChecking=no -q cirros@${IP2} "screen -d -m sh webserver.sh" + +fi + +if is_service_enabled octavia; then + + SUBNET_ID=$(openstack subnet show private-subnet -f value -c id) + openstack loadbalancer create --name lb1 --vip-subnet-id $SUBNET_ID + wait_for_loadbalancer_active lb1 + + openstack loadbalancer listener create lb1 --protocol HTTP --protocol-port 80 --name listener1 + wait_for_loadbalancer_active lb1 + + openstack loadbalancer pool create --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1 + wait_for_loadbalancer_active lb1 + + openstack loadbalancer member create --subnet-id $SUBNET_ID --address ${IP1} --protocol-port 80 pool1 + wait_for_loadbalancer_active lb1 + + openstack loadbalancer member create --subnet-id $SUBNET_ID --address ${IP2} --protocol-port 80 pool1 + +fi + +echo "How to test load balancing:" +echo "" +echo "${NAMESPACE_CMD_PREFIX} ${NAMESPACE_NAME} curl $(openstack loadbalancer show lb1 -f value -c vip_address)" +echo "" diff --git a/devstack/samples/singlenode/webserver.sh b/devstack/samples/singlenode/webserver.sh new file mode 100644 index 0000000000..724fc85788 --- /dev/null +++ b/devstack/samples/singlenode/webserver.sh @@ -0,0 +1,9 @@ +#!/bin/sh + +MYIP=$(/sbin/ifconfig eth0|grep 'inet addr'|awk -F: '{print $2}'| awk '{print $1}'); +OUTPUT_STR="Welcome to $MYIP\r" +OUTPUT_LEN=${#OUTPUT_STR} + +while true; do + echo -e "HTTP/1.0 200 OK\r\nContent-Length: ${OUTPUT_LEN}\r\n\r\n${OUTPUT_STR}" | sudo nc -l -p 80 +done diff --git a/devstack/settings b/devstack/settings new file mode 100644 index 0000000000..b7b425383f --- /dev/null +++ b/devstack/settings @@ -0,0 +1,113 @@ +# settings for octavia devstack plugin + +OCTAVIA=${OCTAVIA:-"octavia"} +OCTAVIA_DIR=${OCTAVIA_DIR:-"${DEST}/octavia"} +DISKIMAGE_BUILDER_REPO_URL=${DISKIMAGE_BUILDER_REPO_URL:-${GIT_BASE}/openstack/diskimage-builder.git} +DISKIMAGE_BUILDER_REPO_REF=${DISKIMAGE_BUILDER_REPO_REF:-master} +DISKIMAGE_BUILDER_DIR=$DEST/diskimage-builder +OCTAVIA_BIN_DIR=${OCTAVIA_BIN_DIR:-$(get_python_exec_prefix)} +OCTAVIA_CONF_DIR=${OCTAVIA_CONF_DIR:-"/etc/octavia"} +OCTAVIA_SSH_DIR=${OCTAVIA_SSH_DIR:-${OCTAVIA_CONF_DIR}/.ssh} +OCTAVIA_CERTS_DIR=${OCTAVIA_CERTS_DIR:-${OCTAVIA_CONF_DIR}/certs} +OCTAVIA_CONF=${OCTAVIA_CONF:-${OCTAVIA_CONF_DIR}/octavia.conf} +OCTAVIA_AUDIT_MAP=${OCTAVIA_AUDIT_MAP:-${OCTAVIA_CONF_DIR}/octavia_api_audit_map.conf} +OCTAVIA_RUN_DIR=${OCTAVIA_RUN_DIR:-"/var/run/octavia"} + +OCTAVIA_AMPHORA_DRIVER=${OCTAVIA_AMPHORA_DRIVER:-"amphora_haproxy_rest_driver"} +OCTAVIA_NETWORK_DRIVER=${OCTAVIA_NETWORK_DRIVER:-"allowed_address_pairs_driver"} +OCTAVIA_COMPUTE_DRIVER=${OCTAVIA_COMPUTE_DRIVER:-"compute_nova_driver"} +OCTAVIA_VOLUME_DRIVER=${OCTAVIA_VOLUME_DRIVER:-"volume_noop_driver"} +OCTAVIA_IMAGE_DRIVER=${OCTAVIA_IMAGE_DRIVER:-"image_glance_driver"} + +OCTAVIA_USERNAME=${OCTAVIA_ADMIN_USER:-"admin"} +OCTAVIA_PASSWORD=${OCTAVIA_PASSWORD:-${ADMIN_PASSWORD}} +OCTAVIA_PROJECT_NAME=${OCTAVIA_PROJECT_NAME:-$OCTAVIA_USERNAME} +OCTAVIA_USER_DOMAIN_NAME=${OCTAVIA_USER_DOMAIN_NAME:-"Default"} +OCTAVIA_PROJECT_DOMAIN_NAME=${OCTAVIA_PROJECT_DOMAIN_NAME:-"Default"} + +OCTAVIA_PROTOCOL=${OCTAVIA_PROTOCOL:-$SERVICE_PROTOCOL} +OCTAVIA_PORT=${OCTAVIA_PORT:-"9876"} +OCTAVIA_HA_PORT=${OCTAVIA_HA_PORT:-"9875"} +OCTAVIA_HM_LISTEN_PORT=${OCTAVIA_HM_LISTEN_PORT:-"5555"} + +OCTAVIA_ENABLE_AMPHORAV2_JOBBOARD=${OCTAVIA_ENABLE_AMPHORAV2_JOBBOARD:-False} +OCTAVIA_JOBBOARD_EXPIRATION_TIME=${OCTAVIA_JOBBOARD_EXPIRATION_TIME:-30} +OCTAVIA_JOBBOARD_BACKEND=${OCTAVIA_JOBBOARD_BACKEND:-redis} + +OCTAVIA_MGMT_SUBNET=${OCTAVIA_MGMT_SUBNET:-"192.168.0.0/24"} +OCTAVIA_MGMT_SUBNET_START=${OCTAVIA_MGMT_SUBNET_START:-"192.168.0.2"} +OCTAVIA_MGMT_SUBNET_END=${OCTAVIA_MGMT_SUBNET_END:-"192.168.0.200"} +OCTAVIA_MGMT_SUBNET_IPV6=${OCTAVIA_MGMT_SUBNET_IPV6:-"fd00:0:0:42::/64"} +OCTAVIA_MGMT_SUBNET_IPV6_START=${OCTAVIA_MGMT_SUBNET_IPV6_START:-"fd00:0:0:42:0:0:0:2"} +OCTAVIA_MGMT_SUBNET_IPV6_END=${OCTAVIA_MGMT_SUBNET_IPV6_END:-"fd00:0:0:42:ffff:ffff:ffff:ffff"} + +OCTAVIA_AMP_SSH_KEY_BITS=${OCTAVIA_SSH_KEY_BITS:-"2048"} +OCTAVIA_AMP_SSH_KEY_TYPE=${OCTAVIA_SSH_KEY_TYPE:-"rsa"} +OCTAVIA_AMP_SSH_KEY_PATH=${OCTAVIA_SSH_KEY_PATH:-${OCTAVIA_SSH_DIR}/octavia_ssh_key} +OCTAVIA_AMP_SSH_KEY_NAME=${OCTAVIA_AMP_SSH_KEY_NAME:-"octavia_ssh_key"} + +OCTAVIA_AMP_FLAVOR_ID=${OCTAVIA_AMP_FLAVOR_ID:-"10"} +OCTAVIA_AMP_IMAGE_NAME=${OCTAVIA_AMP_IMAGE_NAME:-"amphora-x64-haproxy"} +OCTAVIA_AMP_IMAGE_FILE=${OCTAVIA_AMP_IMAGE_FILE:-${OCTAVIA_DIR}/diskimage-create/${OCTAVIA_AMP_IMAGE_NAME}.qcow2} +OCTAVIA_AMP_IMAGE_TAG="amphora" +OCTAVIA_AMP_IMAGE_ARCH=${OCTAVIA_AMP_IMAGE_ARCH:-"amd64"} + +OCTAVIA_AMP_CONN_TIMEOUT=${OCTAVIA_AMP_CONN_TIMEOUT:-"10"} +OCTAVIA_AMP_READ_TIMEOUT=${OCTAVIA_AMP_READ_TIMEOUT:-"120"} + +OCTAVIA_AMP_LOG_ADMIN_PORT=${OCTAVIA_AMP_LOG_ADMIN_PORT:="10514"} +OCTAVIA_AMP_LOG_TENANT_PORT=${OCTAVIA_AMP_LOG_TENANT_PORT:="20514"} + +OCTAVIA_HEALTH_KEY=${OCTAVIA_HEALTH_KEY:-"insecure"} + +OCTAVIA_LB_TOPOLOGY=${OCTAVIA_LB_TOPOLOGY:-"SINGLE"} + +OCTAVIA_AMP_EXPIRY_AGE=${OCTAVIA_AMP_EXPIRY_AGE:-"3600"} +OCTAVIA_LB_EXPIRY_AGE=${OCTAVIA_LB_EXPIRY_AGE:-"3600"} + +OCTAVIA_CONSUMER_BINARY=${OCTAVIA_CONSUMER_BINARY:-${OCTAVIA_BIN_DIR}/octavia-worker} +OCTAVIA_HOUSEKEEPER_BINARY=${OCTAVIA_HOUSEKEEPER_BINARY:-${OCTAVIA_BIN_DIR}/octavia-housekeeping} +OCTAVIA_HEALTHMANAGER_BINARY=${OCTAVIA_HEALTHMANAGER_BINARY:-${OCTAVIA_BIN_DIR}/octavia-health-manager} +OCTAVIA_DRIVER_AGENT_BINARY=${OCTAVIA_DRIVER_AGENT_BINARY:-${OCTAVIA_BIN_DIR}/octavia-driver-agent} + +OCTAVIA_API_ARGS=${OCTAVIA_API_ARGS:-" --config-file $OCTAVIA_CONF"} +OCTAVIA_CONSUMER_ARGS=${OCTAVIA_CONSUMER_ARGS:-" --config-file $OCTAVIA_CONF"} +OCTAVIA_HOUSEKEEPER_ARGS=${OCTAVIA_HOUSEKEEPER_ARGS:-" --config-file $OCTAVIA_CONF"} +OCTAVIA_HEALTHMANAGER_ARGS=${OCTAVIA_HEALTHMANAGER_ARGS:-" --config-file $OCTAVIA_CONF"} +OCTAVIA_DRIVER_AGENT_ARGS=${OCTAVIA_DRIVER_AGENT_ARGS:-" --config-file $OCTAVIA_CONF"} + +OCTAVIA_API="o-api" +OCTAVIA_CONSUMER="o-cw" +OCTAVIA_HOUSEKEEPER="o-hk" +OCTAVIA_HEALTHMANAGER="o-hm" +OCTAVIA_SERVICE="octavia" +OCTAVIA_API_HAPROXY="o-api-ha" +OCTAVIA_DRIVER_AGENT="o-da" + +# Client settings +GITREPO["python-octaviaclient"]=${OCTAVIACLIENT_REPO:-${GIT_BASE}/openstack/python-octaviaclient.git} +GITBRANCH["python-octaviaclient"]=${OCTAVIACLIENT_BRANCH:-master} +GITDIR["python-octaviaclient"]=$DEST/python-octaviaclient + +# Library settings +GITREPO["octavia-lib"]=${OCTAVIA_LIB_REPO:-${GIT_BASE}/openstack/octavia-lib.git} +GITBRANCH["octavia-lib"]=${OCTAVIA_LIB_BRANCH:-master} +GITDIR["octavia-lib"]=$DEST/octavia-lib + +NEUTRON_ANY=${NEUTRON_ANY:-"q-svc neutron-api"} + +# HA-deployment related settings +OCTAVIA_USE_PREGENERATED_SSH_KEY=${OCTAVIA_USE_PREGENERATED_SSH_KEY:-"False"} +OCTAVIA_PREGENERATED_SSH_KEY_PATH=${OCTAVIA_PREGENERATED_SSH_KEY_PATH:-"${OCTAVIA_DIR}/devstack/pregenerated/ssh-keys/octavia_ssh_key"} + +OCTAVIA_USE_PREGENERATED_CERTS=${OCTAVIA_USE_PREGENERATED_CERTS:-"False"} +OCTAVIA_PREGENERATED_CERTS_DIR=${OCTAVIA_PREGENERATED_CERTS_DIR:-"${OCTAVIA_DIR}/devstack/pregenerated/certs"} +OCTAVIA_NODE=${OCTAVIA_NODE:-"standalone"} +OCTAVIA_CONTROLLER_IP_PORT_LIST=${OCTAVIA_CONTROLLER_IP_PORT_LIST:-"auto"} +OCTAVIA_MGMT_PORT_IP=${OCTAVIA_MGMT_PORT_IP:-"auto"} + +OCTAVIA_DIB_TRACING=${OCTAVIA_DIB_TRACING:-"1"} + +OCTAVIA_SERVICE_TYPE="load-balancer" +OCTAVIA_UWSGI_APP=${OCTAVIA_UWSGI_APP:-octavia.wsgi.api:application} +OCTAVIA_UWSGI_CONF=${OCTAVIA_UWSGI_CONF:-${OCTAVIA_CONF_DIR}/octavia-uwsgi.ini} diff --git a/devstack/upgrade/from-2025.1/upgrade-octavia b/devstack/upgrade/from-2025.1/upgrade-octavia new file mode 100644 index 0000000000..9d573b55f8 --- /dev/null +++ b/devstack/upgrade/from-2025.1/upgrade-octavia @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +# Openstack moved from using wsgi scripts to directly calling the wsgi module +# due to changes in setuptools that stops the pbr wsgi_scripts from working. +# This script will reconfigure uwsgi on upgrade to point to the module instead +# of the wsgi script that is no longer present in octavia. + +function configure_octavia_upgrade { + source $GRENADE_DIR/functions + + iniset $OCTAVIA_UWSGI_CONF uwsgi module $OCTAVIA_UWSGI_APP +} diff --git a/devstack/upgrade/resources.sh b/devstack/upgrade/resources.sh new file mode 100755 index 0000000000..a6542c40ee --- /dev/null +++ b/devstack/upgrade/resources.sh @@ -0,0 +1,132 @@ +#!/bin/bash + +set -o errexit + +source $GRENADE_DIR/grenaderc +source $GRENADE_DIR/functions + +source $TOP_DIR/openrc admin demo + +set -o xtrace + +OCTAVIA_GRENADE_DIR=$(dirname $0) +INSTANCE_USER_DATA_FILE=$OCTAVIA_GRENADE_DIR/vm_user_data.sh +DEFAULT_INSTANCE_FLAVOR=${DEFAULT_INSTANCE_FLAVOR:-m1.tiny} +PUBLIC_SUBNET_NAME=${PUBLIC_SUBNET_NAME:-"public-subnet"} +PRIVATE_NETWORK_NAME=${PRIVATE_NETWORK_NAME:-"private"} +PRIVATE_SUBNET_NAME=${PRIVATE_SUBNET_NAME:-"private-subnet"} + +# $1: desired provisioning_status +# $2: desired operating_status +# $3..n: command with arguments and parameters +# TODO(cgoncalves): set timeout +function _wait_for_status { + while true; do + eval $("${@:3}" -f shell -c provisioning_status -c operating_status) + [[ $operating_status == "ONLINE" && $provisioning_status == "ACTIVE" ]] && break + if [ $provisioning_status == "ERROR" ]; then + die $LINENO "ERROR creating load balancer" + fi + sleep 10 + done +} + +function create { + # TODO(cgoncalves): make create idempotent for resiliancy in testing + + # NOTE(cgoncalves): OS_USERNAME=demo is set to overcome security group name collision + sc_rule_id=$(OS_USERNAME=demo openstack security group rule create -f value -c id --protocol tcp --ingress --dst-port 80 default) + resource_save octavia sc_rule_id $sc_rule_id + + # create VMs + vm1_ips=$(openstack server create -f value -c addresses --user-data $INSTANCE_USER_DATA_FILE --flavor $DEFAULT_INSTANCE_FLAVOR --image $DEFAULT_IMAGE_NAME --network $PRIVATE_NETWORK_NAME --wait vm1) + vm2_ips=$(openstack server create -f value -c addresses --user-data $INSTANCE_USER_DATA_FILE --flavor $DEFAULT_INSTANCE_FLAVOR --image $DEFAULT_IMAGE_NAME --network $PRIVATE_NETWORK_NAME --wait vm2) + vm1_ipv4=$(echo $vm1_ips | grep -oE '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+') + vm2_ipv4=$(echo $vm2_ips | grep -oE '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+') + + openstack loadbalancer create --name lb1 --vip-subnet-id $PUBLIC_SUBNET_NAME + _wait_for_status "ACTIVE" "ONLINE" openstack loadbalancer show lb1 + + openstack loadbalancer listener create --name listener1 --protocol HTTP --protocol-port 80 lb1 + _wait_for_status "ACTIVE" "ONLINE" openstack loadbalancer listener show listener1 + _wait_for_status "ACTIVE" "ONLINE" openstack loadbalancer show lb1 + + openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP + _wait_for_status "ACTIVE" "ONLINE" openstack loadbalancer pool show pool1 + _wait_for_status "ACTIVE" "ONLINE" openstack loadbalancer show lb1 + + openstack loadbalancer healthmonitor create --delay 5 --max-retries 4 --timeout 10 --type HTTP --url-path / --name hm1 pool1 + _wait_for_status "ACTIVE" "ONLINE" openstack loadbalancer healthmonitor show hm1 + _wait_for_status "ACTIVE" "ONLINE" openstack loadbalancer show lb1 + + openstack loadbalancer member create --subnet-id $PRIVATE_SUBNET_NAME --address $vm1_ipv4 --protocol-port 80 pool1 --name member1 + _wait_for_status "ACTIVE" "ONLINE" openstack loadbalancer member show pool1 member1 + _wait_for_status "ACTIVE" "ONLINE" openstack loadbalancer show lb1 + + openstack loadbalancer member create --subnet-id $PRIVATE_SUBNET_NAME --address $vm2_ipv4 --protocol-port 80 pool1 --name member2 + _wait_for_status "ACTIVE" "ONLINE" openstack loadbalancer member show pool1 member2 + _wait_for_status "ACTIVE" "ONLINE" openstack loadbalancer show lb1 + + lb_vip_ip=$(openstack loadbalancer show -f value -c vip_address lb1) + resource_save octavia lb_vip_ip $lb_vip_ip + + echo "Octavia create: SUCCESS" +} + +function verify { + # verify control plane + openstack loadbalancer show -f value -c operating_status lb1 | grep -q ONLINE + openstack loadbalancer listener show -f value -c operating_status listener1 | grep -q ONLINE + openstack loadbalancer pool show -f value -c operating_status pool1 | grep -q ONLINE + openstack loadbalancer healthmonitor show -f value -c operating_status hm1 | grep -q ONLINE + openstack loadbalancer member show -f value -c operating_status pool1 member1 | grep -q ONLINE + openstack loadbalancer member show -f value -c operating_status pool1 member2 | grep -q ONLINE + + # verify data plane + lb_vip_ip=$(resource_get octavia lb_vip_ip) + curl --include -D lb.out $lb_vip_ip + grep -q "^HTTP/1.1 200 OK" lb.out + + echo "Octavia verify: SUCCESS" +} + +function verify_noapi { + # verify data plane + lb_vip_ip=$(resource_get octavia lb_vip_ip) + curl --include -D lb.out $lb_vip_ip + grep -q "^HTTP/1.1 200 OK" lb.out + + echo "Octavia verify_noapi: SUCCESS" +} + +function destroy { + sc_rule_id=$(resource_get octavia sc_rule_id) + + # make destroy idempotent for resiliancy in testing + openstack loadbalancer show lb1 && openstack loadbalancer delete --cascade lb1 + openstack server show vm1 && openstack server delete vm1 + openstack server show vm2 && openstack server delete vm2 + openstack security group rule show $sc_rule_id && openstack security group rule delete $sc_rule_id + + echo "Octavia destroy: SUCCESS" +} + +# Dispatcher +case $1 in + "create") + create + ;; + "verify_noapi") + verify_noapi + ;; + "verify") + verify + ;; + "destroy") + destroy + ;; + "force_destroy") + set +o errexit + destroy + ;; +esac diff --git a/devstack/upgrade/settings b/devstack/upgrade/settings new file mode 100644 index 0000000000..f3246aff4b --- /dev/null +++ b/devstack/upgrade/settings @@ -0,0 +1,5 @@ +register_project_for_upgrade octavia +register_db_to_save octavia + +BASE_RUN_SMOKE=False +TARGET_RUN_SMOKE=False diff --git a/devstack/upgrade/shutdown.sh b/devstack/upgrade/shutdown.sh new file mode 100755 index 0000000000..8fe8c10728 --- /dev/null +++ b/devstack/upgrade/shutdown.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +set -o errexit + +source $GRENADE_DIR/grenaderc +source $GRENADE_DIR/functions + +# We need base DevStack functions for this +source $BASE_DEVSTACK_DIR/functions +source $BASE_DEVSTACK_DIR/stackrc # needed for status directory +source $BASE_DEVSTACK_DIR/lib/tls +source $BASE_DEVSTACK_DIR/lib/apache +source $BASE_DEVSTACK_DIR/lib/neutron + +OCTAVIA_DEVSTACK_DIR=$(dirname $(dirname $0)) +source $OCTAVIA_DEVSTACK_DIR/settings +source $OCTAVIA_DEVSTACK_DIR/plugin.sh + +source ${GITDIR[neutron]}/devstack/lib/octavia + +set -o xtrace + +octavia_stop + +# sanity check that service is actually down +ensure_services_stopped o-api o-cw o-hk o-hm diff --git a/devstack/upgrade/upgrade.sh b/devstack/upgrade/upgrade.sh new file mode 100755 index 0000000000..41a2088254 --- /dev/null +++ b/devstack/upgrade/upgrade.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash + +# ``upgrade-octavia`` + +echo "*********************************************************************" +echo "Begin $0" +echo "*********************************************************************" + +# Clean up any resources that may be in use +cleanup() { + set +o errexit + + echo "********************************************************************" + echo "ERROR: Abort $0" + echo "********************************************************************" + + # Kill ourselves to signal any calling process + trap 2; kill -2 $$ +} + +trap cleanup SIGHUP SIGINT SIGTERM + +# Keep track of the grenade directory +RUN_DIR=$(cd $(dirname "$0") && pwd) + +# Source params +source $GRENADE_DIR/grenaderc + +# Import common functions +source $GRENADE_DIR/functions + +# This script exits on an error so that errors don't compound and you see +# only the first error that occurred. +set -o errexit + +# Upgrade octavia +# ============ + +# Get functions from current DevStack +source $TARGET_DEVSTACK_DIR/stackrc +source $TARGET_DEVSTACK_DIR/lib/apache +source $TARGET_DEVSTACK_DIR/lib/tls +source $(dirname $(dirname $BASH_SOURCE))/settings +source $(dirname $(dirname $BASH_SOURCE))/plugin.sh + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + +# Save current config files for posterity +[[ -d $SAVE_DIR/etc.octavia ]] || cp -pr $OCTAVIA_CONF_DIR $SAVE_DIR/etc.octavia + +# Install the target octavia +octavia_lib_install +octavia_install + +# calls upgrade-octavia for specific release +upgrade_project octavia $RUN_DIR $BASE_DEVSTACK_BRANCH $TARGET_DEVSTACK_BRANCH + +# Migrate the database +$OCTAVIA_BIN_DIR/octavia-db-manage upgrade head || die $LINO "DB migration error" + +octavia_start + +# Don't succeed unless the services come up +ensure_services_started o-api o-cw o-hm o-hk + +set +o xtrace +echo "*********************************************************************" +echo "SUCCESS: End $0" +echo "*********************************************************************" diff --git a/devstack/upgrade/vm_user_data.sh b/devstack/upgrade/vm_user_data.sh new file mode 100755 index 0000000000..5f2a447483 --- /dev/null +++ b/devstack/upgrade/vm_user_data.sh @@ -0,0 +1,6 @@ +#!/bin/sh -v +Body=$(hostname) +Response="HTTP/1.1 200 OK\r\nContent-Length: ${#Body}\r\n\r\n$Body" +while true; do + echo -e $Response | nc -llp 80 +done diff --git a/diskimage-create/README.rst b/diskimage-create/README.rst new file mode 100644 index 0000000000..c0822c6bc4 --- /dev/null +++ b/diskimage-create/README.rst @@ -0,0 +1,388 @@ +=============================== +Building Octavia Amphora Images +=============================== + +Octavia is an operator-grade reference implementation for Load Balancing as a +Service (LBaaS) for OpenStack. The component of Octavia that does the load +balancing is known as amphora. Amphora may be a virtual machine, may be a +container, or may run on bare metal. Creating images for bare metal amphora +installs is outside the scope of this version but may be added in a +future release. + +Prerequisites +============= + +Python pip should be installed as well as the python modules found in the +requirements.txt file. + +To do so, you can use the following command on Ubuntu: + +.. code:: bash + + $ # Install python pip + $ sudo apt install python-pip + $ # Eventually create a virtualenv + $ sudo apt install python-virtualenv + $ virtualenv octavia_disk_image_create + $ source octavia_disk_image_create/bin/activate + $ # Install octavia requirements + $ cd octavia/diskimage-create + $ pip install -r requirements.txt + + +Your cache directory should have at least 1GB available, the working directory +will need ~1.5GB, and your image destination will need ~500MB + +The script will use the version of diskimage-builder installed on your system, +or it can be overridden by setting the following environment variables: + +.. code-block:: bash + + DIB_REPO_PATH = //diskimage-builder + DIB_ELEMENTS = //diskimage-builder/elements + + +The following packages are required on each platform: + +Ubuntu + +.. code:: bash + + $ sudo apt install qemu-utils git kpartx debootstrap + +Fedora, CentOS and Red Hat Enterprise Linux + +.. code:: bash + + $ sudo dnf install qemu-img git e2fsprogs policycoreutils-python-utils + +Test Prerequisites +------------------ +The tox image tests require libguestfs-tools 1.24 or newer. +Libguestfs allows testing the Amphora image without requiring root privileges. +On Ubuntu systems you also need to give read access to the kernels for the user +running the tests: + +.. code:: bash + + $ sudo chmod 0644 /boot/vmlinuz* + +Usage +===== +This script and associated elements will build Amphora images. Current support +is with an Ubuntu and CentOS Stream base OS and HAProxy. +The script can use RHEL and Fedora +as a base OS but these will not initially be tested or supported. +As the project progresses and/or the diskimage-builder project adds support +for additional base OS options they may become available for Amphora images. +This does not mean that they are necessarily supported or tested. + +.. note:: + + If your cloud has multiple hardware architectures available to nova, + remember to set the appropriate hw_architecture property on the + image when you load it into glance. For example, when loading an + amphora image built for "amd64" you would add + "--property hw_architecture='x86_64'" to your "openstack image create" + command line. + +The script will use environment variables to customize the build beyond the +Octavia project defaults, such as adding elements. + +The supported and tested image is created by using the diskimage-create.sh +defaults (no command line parameters or environment variables set). As the +project progresses we may add additional supported configurations. + +Command syntax: + + +.. code-block:: + + $ diskimage-create.sh + [-a **amd64** | armhf | aarch64 | ppc64le ] + [-b **haproxy** ] + [-c **~/.cache/image-create** | ] + [-d **noble**/**9-stream**/**9** | ] + [-e] + [-f] + [-g **repository branch** | stable/train | stable/stein | ... ] + [-h] + [-i **ubuntu-minimal** | fedora | centos-minimal | rhel | rocky ] + [-k ] + [-l ] + [-m] + [-n] + [-o **amphora-x64-haproxy** | ] + [-p] + [-r ] + [-s **2** | ] + [-t **qcow2** | tar ] + [-v] + [-w ] + [-x] + [-y] + + '-a' is the architecture type for the image (default: amd64) + '-b' is the backend type (default: haproxy) + '-c' is the path to the cache directory (default: ~/.cache/image-create) + '-d' distribution release id (default on ubuntu: noble) + '-e' enable complete mandatory access control systems when available (default: permissive) + '-f' disable tmpfs for build + '-g' build the image for a specific OpenStack Git branch (default: current repository branch) + '-h' display help message + '-i' is the base OS (default: ubuntu-minimal) + '-k' is the kernel meta package name, currently only for ubuntu-minimal base OS (default: linux-image-virtual) + '-l' is output logfile (default: none) + '-m' enable vCPU pinning optimizations (default: disabled) + '-n' disable sshd (default: enabled) + '-o' is the output image file name + '-p' install amphora-agent from distribution packages (default: disabled)" + '-r' enable the root account in the generated image (default: disabled) + '-s' is the image size to produce in gigabytes (default: 2) + '-t' is the image type (default: qcow2) + '-v' display the script version + '-w' working directory for image building (default: .) + '-x' enable tracing for diskimage-builder + '-y' enable FIPS 140-2 mode in the amphora image + + +Building Images for Alternate Branches +====================================== + +By default, the diskimage-create.sh script will build an amphora image using +the Octavia Git branch of the repository. If you need an image for a specific +branch, such as "stable/train", you need to specify the "-g" option with the +branch name. An example for "stable/train" would be: + +.. code-block:: bash + + diskimage-create.sh -g stable/train + +Advanced Git Branch/Reference Based Images +------------------------------------------ + +If you need to build an image from a local repository or with a specific Git +reference or branch, you will need to set some environment variables for +diskimage-builder. + +.. note:: + + These advanced settings will override the "-g" diskimage-create.sh setting. + +Building From a Local Octavia Repository +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Set the DIB_REPOLOCATION_amphora_agent variable to the location of the Git +repository containing the amphora agent: + +.. code-block:: bash + + export DIB_REPOLOCATION_amphora_agent=/opt/stack/octavia + +Building With a Specific Git Reference +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Set the DIB_REPOREF_amphora_agent variable to point to the Git branch or +reference of the amphora agent: + +.. code-block:: bash + + export DIB_REPOREF_amphora_agent=refs/changes/40/674140/7 + +See the `Environment Variables`_ section below for additional information and +examples. + +Amphora Agent Upper Constraints +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +You may also need to specify which version of the OpenStack +upper-constraints.txt file will be used to build the image. For example, to +specify the "stable/train" upper constraints Git branch, set the following +environment variable: + +.. code-block:: bash + + export DIB_REPOLOCATION_upper_constraints=https://opendev.org/openstack/requirements/raw/branch/stable/train/upper-constraints.txt + +See `Dependency Management for OpenStack Projects `_ for more information. + +Environment Variables +===================== +These are optional environment variables that can be set to override the script +defaults. + +DIB_REPOLOCATION_amphora_agent + - Location of the amphora-agent code that will be installed in the image. + - Default: https://opendev.org/openstack/octavia + - Example: /tmp/octavia + +DIB_REPOREF_amphora_agent + - The Git reference to checkout for the amphora-agent code inside the + image. + - Default: The current branch + - Example: stable/stein + - Example: refs/changes/40/674140/7 + +DIB_REPOLOCATION_octavia_lib + - Location of the octavia-lib code that will be installed in the image. + - Default: https://opendev.org/openstack/octavia-lib + - Example: /tmp/octavia-lib + +DIB_REPOREF_octavia_lib + - The Git reference to checkout for the octavia-lib code inside the + image. + - Default: master or stable branch for released OpenStack series installs. + - Example: stable/ussuri + - Example: refs/changes/19/744519/2 + +DIB_REPOLOCATION_upper_constraints + - Location of the upper-constraints.txt file used for the image. + - Default: The upper-constraints.txt for the current branch + - Example: https://opendev.org/openstack/requirements/raw/branch/master/upper-constraints.txt + - Example: https://opendev.org/openstack/requirements/raw/branch/stable/train/upper-constraints.txt + +CLOUD_INIT_DATASOURCES + - Comma separated list of cloud-int datasources + - Default: ConfigDrive + - Options: NoCloud, ConfigDrive, OVF, MAAS, Ec2, + - Reference: https://launchpad.net/cloud-init + +DIB_DISTRIBUTION_MIRROR + - URL to a mirror for the base OS selected + - Default: None + +DIB_ELEMENTS + - Override the elements used to build the image + - Default: None + +DIB_LOCAL_ELEMENTS + - Elements to add to the build (requires DIB_LOCAL_ELEMENTS_PATH be + specified) + - Default: None + +DIB_LOCAL_ELEMENTS_PATH + - Path to the local elements directory + - Default: None + +DIB_REPO_PATH + - Directory containing diskimage-builder + - Default: /diskimage-builder + - Reference: https://github.com/openstack/diskimage-builder + +OCTAVIA_REPO_PATH + - Directory containing octavia + - Default: + - Reference: https://github.com/openstack/octavia + +DIB_OCTAVIA_AMP_USE_NFTABLES + - Boolean that configures nftables inside the amphora image + - Required for SR-IOV enabled amphora + - Default: True + +Using distribution packages for amphora agent +--------------------------------------------- +By default, amphora agent is installed from Octavia Git repository. +To use distribution packages, use the "-p" option. + +Note this needs a base system image with the required repositories enabled (for +example RDO repositories for CentOS/Fedora). One of these variables must be +set: + +DIB_LOCAL_IMAGE + - Path to the locally downloaded image + - Default: None + +DIB_CLOUD_IMAGES + - Directory base URL to download the image from + - Default: depends on the distribution + +RHEL specific variables +------------------------ +Building a RHEL-based image requires: + - a Red Hat Enterprise Linux KVM Guest Image, manually download from the + Red Hat Customer Portal. Set the DIB_LOCAL_IMAGE variable to point to + the file. More details at: + /elements/rhel + + - a Red Hat subscription for the matching Red Hat OpenStack Platform + repository if you want to install the amphora agent from the official + distribution package (requires setting -p option in diskimage-create.sh). + Set the needed registration parameters depending on your configuration. + More details at: + /elements/rhel-common + +Here is an example with Customer Portal registration and OSP 15 repository: + +.. code:: bash + + $ export DIB_LOCAL_IMAGE='/tmp/rhel-server-8.0-x86_64-kvm.qcow2' + + $ export REG_METHOD='portal' REG_REPOS='rhel-8-server-openstack-15-rpms' + + $ export REG_USER='' REG_PASSWORD='' REG_AUTO_ATTACH=true + +This example uses registration via a Satellite (the activation key must enable +an OSP repository): + +.. code:: bash + + $ export DIB_LOCAL_IMAGE='/tmp/rhel-server-8.1-x86_64-kvm.qcow2' + + $ export REG_METHOD='satellite' REG_ACTIVATION_KEY="" + + $ export REG_SAT_URL="" REG_ORG="" + +Building in a virtualenv with tox +--------------------------------- +To make use of a virtualenv for Python dependencies you may run ``tox``. Note +that you may still need to install binary dependencies on the host for the +build to succeed. + +If you wish to customize your build modify ``tox.ini`` to pass on relevant +environment variables or command line arguments to the ``diskimage-create.sh`` +script. + +.. code:: bash + + $ tox -e build + + +Container Support +================= +The Docker command line required to import a tar file created with this script +is: + +.. code:: bash + + $ docker import - image:amphora-x64-haproxy < amphora-x64-haproxy.tar + + +References +========== + +This documentation and script(s) leverage prior work by the OpenStack TripleO +and Sahara teams. Thank you to everyone that worked on them for providing a +great foundation for creating Octavia Amphora images. + +* https://opendev.org/openstack/diskimage-builder +* https://opendev.org/openstack/tripleo-image-elements +* https://opendev.org/openstack/sahara-image-elements + +Copyright +========= + +Copyright 2014 Hewlett-Packard Development Company, L.P. + +All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); you may +not use this file except in compliance with the License. You may obtain +a copy of the License at + +* http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +License for the specific language governing permissions and limitations +under the License. diff --git a/diskimage-create/diskimage-create.sh b/diskimage-create/diskimage-create.sh new file mode 100755 index 0000000000..fc3f9b5b3f --- /dev/null +++ b/diskimage-create/diskimage-create.sh @@ -0,0 +1,551 @@ +#!/bin/bash +# +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +set -e + +usage() { + echo + echo "Usage: $(basename "$0")" + echo " [-a **amd64** | armhf | aarch64 | ppc64le]" + echo " [-b **haproxy** ]" + echo " [-c **~/.cache/image-create** | ]" + echo " [-d **noble**/**9-stream**/**9** | ]" + echo " [-e]" + echo " [-f]" + echo " [-g **repository branch** | stable/train | stable/stein | ... ]" + echo " [-h]" + echo " [-i **ubuntu-minimal** | fedora | centos-minimal | rhel | rocky ]" + echo " [-k ]" + echo " [-l ]" + echo " [-m]" + echo " [-n]" + echo " [-o **amphora-x64-haproxy.qcow2** | ]" + echo " [-p]" + echo " [-r ]" + echo " [-s **2** | ]" + echo " [-t **qcow2** | tar | vhd | raw ]" + echo " [-v]" + echo " [-w ]" + echo " [-x]" + echo + echo " '-a' is the architecture type for the image (default: amd64)" + echo " '-b' is the backend type (default: haproxy)" + echo " '-c' is the path to the cache directory (default: ~/.cache/image-create)" + echo " '-d' distribution release id (default on ubuntu: noble)" + echo " '-e' enable complete mandatory access control systems when available (default: permissive)" + echo " '-f' disable tmpfs for build" + echo " '-g' build the image for a specific OpenStack Git branch (default: current repository branch)" + echo " '-h' display this help message" + echo " '-i' is the base OS (default: ubuntu-minimal)" + echo " '-k' is the kernel meta package name, currently only for ubuntu-minimal base OS (default: linux-image-virtual)" + echo " '-l' is output logfile (default: none)" + echo " '-m' enable vCPU pinning optimizations (default: disabled)" + echo " '-n' disable sshd (default: enabled)" + echo " '-o' is the output image file name" + echo " '-p' install amphora-agent from distribution packages (default: disabled)" + echo " '-r' enable the root account in the generated image (default: disabled)" + echo " '-s' is the image size to produce in gigabytes (default: 2)" + echo " '-t' is the image type (default: qcow2)" + echo " '-v' display the script version" + echo " '-w' working directory for image building (default: .)" + echo " '-x' enable tracing for diskimage-builder" + echo " '-y' enable FIPS 140-2 mode in the amphora image" + echo + exit 1 +} + +version() { + echo "Amphora disk image creation script version:"\ + "$(cat "${OCTAVIA_REPO_PATH}/diskimage-create/version.txt")" + exit 1 +} + +find_system_elements() { + # List of possible system installation directories + local system_prefixes="/usr/share /usr/local/share" + for prefix in $system_prefixes; do + if [ -d "$prefix/$1" ]; then + echo "$prefix/$1" + return + fi + done +} + +# Figure out where our directory is located +if [ -z "$OCTAVIA_REPO_PATH" ]; then + AMP_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) + OCTAVIA_REPO_PATH=${OCTAVIA_REPO_PATH:-${AMP_DIR%/*}} +fi +dib_enable_tracing= + +AMP_LOGFILE="" + +while getopts "a:b:c:d:efg:hi:k:l:mno:pt:r:s:vw:xy" opt; do + case $opt in + a) + AMP_ARCH=$OPTARG + if [ "$AMP_ARCH" != "amd64" ] && \ + [ "$AMP_ARCH" != "ppc64le" ] && \ + [ "$AMP_ARCH" != "aarch64" ] && \ + [ "$AMP_ARCH" != "armhf" ]; then + echo "Error: Unsupported architecture $AMP_ARCH specified" + exit 3 + fi + ;; + b) + if [ "$OPTARG" == "haproxy" ]; then + AMP_BACKEND=$OPTARG-octavia + else + echo "Error: Unsupported backend type $AMP_BACKEND specified" + exit 3 + fi + ;; + c) + AMP_CACHEDIR=$OPTARG + ;; + d) + AMP_DIB_RELEASE=$OPTARG + ;; + e) + AMP_ENABLE_FULL_MAC_SECURITY=1 + ;; + f) + AMP_DISABLE_TMP_FS='--no-tmpfs' + ;; + g) + if [ -z "$DIB_REPOREF_amphora_agent" ]; then + echo "Building image with amphora agent from $OPTARG." + export DIB_REPOREF_amphora_agent=$OPTARG + else + echo "Environment variable DIB_REPOREF_amphora_agent is set. Building the image with amphora agent $DIB_REPOREF_amphora_agent." + fi + if [ -z "$DIB_REPOLOCATION_upper_constraints" ]; then + echo "Using upper constraints from https://opendev.org/openstack/requirements/raw/branch/$OPTARG/upper-constraints.txt." + export DIB_REPOLOCATION_upper_constraints="https://opendev.org/openstack/requirements/raw/branch/$OPTARG/upper-constraints.txt" + else + echo "Environment variable DIB_REPOLOCATION_upper_constraints is set. Building the image with upper-constraints.txt from $DIB_REPOLOCATION_upper_constraints." + fi + ;; + h) + usage + ;; + i) + AMP_BASEOS=$OPTARG + if [ "$AMP_BASEOS" != "ubuntu" ] && \ + [ "$AMP_BASEOS" != "ubuntu-minimal" ] && \ + [ "$AMP_BASEOS" != "fedora" ] && \ + [ "$AMP_BASEOS" != "centos" ] && \ + [ "$AMP_BASEOS" != "centos-minimal" ] && \ + [ "$AMP_BASEOS" != "rocky" ] && \ + [ "$AMP_BASEOS" != "rhel" ]; then + echo "Error: Unsupported base OS $AMP_BASEOS specified" + exit 3 + fi + if [ "$AMP_BASEOS" == "ubuntu" ]; then + AMP_BASEOS="ubuntu-minimal" + fi + if [ "$AMP_BASEOS" == "centos" ]; then + AMP_BASEOS="centos-minimal" + fi + if [ "$AMP_BASEOS" == "rocky" ]; then + AMP_BASEOS="rocky-container" + fi + ;; + k) + AMP_KERNEL=$OPTARG + ;; + l) + AMP_LOGFILE="--logfile=$OPTARG" + ;; + m) + AMP_ENABLE_CPUPINNING=1 + ;; + n) + AMP_DISABLE_SSHD=1 + ;; + o) + AMP_OUTPUTFILENAME=$(readlink -f "$OPTARG") + amp_dir=$(dirname "$AMP_OUTPUTFILENAME") + if [ ! -d "$amp_dir" ]; then + echo "Error: Directory $amp_dir does not exist" + exit 3 + fi + ;; + p) + AMP_PACKAGE_INSTALL=1 + ;; + r) + AMP_ROOTPW=$OPTARG + ;; + s) + AMP_IMAGESIZE=$OPTARG + if ! [[ $AMP_IMAGESIZE =~ ^[0-9]+$ ]]; then + echo "Error: Invalid image size $AMP_IMAGESIZE specified" + exit 3 + fi + ;; + t) + AMP_IMAGETYPE=$OPTARG + if [ "$AMP_IMAGETYPE" != "qcow2" ] && \ + [ "$AMP_IMAGETYPE" != "tar" ] && \ + [ "$AMP_IMAGETYPE" != "vhd" ] && \ + [ "$AMP_IMAGETYPE" != "raw" ]; then + echo "Error: Unsupported image type $AMP_IMAGETYPE specified" + exit 3 + fi + ;; + v) + version + ;; + w) + AMP_WORKING_DIR=$OPTARG + ;; + x) dib_enable_tracing=1 + ;; + y) AMP_ENABLE_FIPS=1 + ;; + *) + usage + ;; + esac +done + +shift $((OPTIND-1)) +if [ "$1" ]; then + usage +fi + +# Set the Octavia Amphora defaults if they aren't already set +AMP_ARCH=${AMP_ARCH:-"amd64"} + +AMP_BACKEND=${AMP_BACKEND:-"haproxy-octavia"} + +AMP_CACHEDIR=${AMP_CACHEDIR:-"$HOME/.cache/image-create"} +# Make sure we have an absolute path for the cache location +mkdir -p "$AMP_CACHEDIR" +AMP_CACHEDIR="$( cd "$AMP_CACHEDIR" && pwd )" + +AMP_BASEOS=${AMP_BASEOS:-"ubuntu-minimal"} + +if [ "$AMP_BASEOS" = "ubuntu-minimal" ]; then + export DIB_RELEASE=${AMP_DIB_RELEASE:-"noble"} +elif [ "${AMP_BASEOS}" = "rhel" ]; then + export DIB_RELEASE=${AMP_DIB_RELEASE:-"9"} +elif [ "${AMP_BASEOS}" = "centos-minimal" ]; then + export DIB_RELEASE=${AMP_DIB_RELEASE:-"9-stream"} +elif [ "${AMP_BASEOS}" = "fedora" ]; then + export DIB_RELEASE=${AMP_DIB_RELEASE:-"28"} +elif [ "${AMP_BASEOS}" = "rocky-container" ]; then + export DIB_RELEASE=${AMP_DIB_RELEASE:-"9"} +fi + +AMP_OUTPUTFILENAME=${AMP_OUTPUTFILENAME:-"$PWD/amphora-x64-haproxy.qcow2"} + +AMP_IMAGETYPE=${AMP_IMAGETYPE:-"qcow2"} + +AMP_IMAGESIZE=${AMP_IMAGESIZE:-2} + +if [ "$AMP_BASEOS" = "ubuntu-minimal" ]; then + export DIB_UBUNTU_KERNEL=${AMP_KERNEL:-"linux-image-virtual"} +fi + +AMP_ENABLE_CPUPINNING=${AMP_ENABLE_CPUPINNING:-0} + +AMP_DISABLE_SSHD=${AMP_DISABLE_SSHD:-0} + +AMP_PACKAGE_INSTALL=${AMP_PACKAGE_INSTALL:-0} + +AMP_ENABLE_FULL_MAC_SECURITY=${AMP_ENABLE_FULL_MAC_SECURITY:-0} + +AMP_DISABLE_TMP_FS=${AMP_DISABLE_TMP_FS:-""} + +AMP_ENABLE_FIPS=${AMP_ENABLE_FIPS:-0} + +if [[ "$AMP_BASEOS" =~ ^(rhel|fedora)$ ]] && [[ "$AMP_IMAGESIZE" -lt 3 ]]; then + echo "RHEL/Fedora based amphora requires an image size of at least 3GB" + exit 1 +fi + +OCTAVIA_ELEMENTS_PATH=$OCTAVIA_REPO_PATH/elements + +if ! [ -d "$OCTAVIA_ELEMENTS_PATH" ]; then + SYSTEM_OCTAVIA_ELEMENTS_PATH=$(find_system_elements octavia-image-elements) + if [ -z "${SYSTEM_OCTAVIA_ELEMENTS_PATH}" ]; then + echo "ERROR: Octavia elements directory not found at: $OCTAVIA_ELEMENTS_PATH Exiting." + exit 1 + fi + OCTAVIA_ELEMENTS_PATH=${SYSTEM_OCTAVIA_ELEMENTS_PATH} +fi + +DIB_REPO_PATH=${DIB_REPO_PATH:-${OCTAVIA_REPO_PATH%/*}/diskimage-builder} + +if [ -d "$DIB_REPO_PATH" ]; then + export PATH=$PATH:$DIB_REPO_PATH/bin +else + if ! disk-image-create --version > /dev/null 2>&1; then + echo "ERROR: diskimage-builder repo directory not found at: $DIB_REPO_PATH or in path. Exiting." + exit 1 + fi +fi + +# For system-wide installs, DIB will automatically find the elements, so we only check local path +if [ "$DIB_LOCAL_ELEMENTS_PATH" ]; then + export ELEMENTS_PATH=$OCTAVIA_ELEMENTS_PATH:$DIB_LOCAL_ELEMENTS_PATH +else + export ELEMENTS_PATH=$OCTAVIA_ELEMENTS_PATH +fi + +# Make sure we have a value set for DIB_OCTAVIA_AMP_USE_NFTABLES +export DIB_OCTAVIA_AMP_USE_NFTABLES=${DIB_OCTAVIA_AMP_USE_NFTABLES:-True} + +export CLOUD_INIT_DATASOURCES=${CLOUD_INIT_DATASOURCES:-"ConfigDrive"} + +# Additional RHEL environment checks +if [ "${AMP_BASEOS}" = "rhel" ]; then + if [ -z "${DIB_LOCAL_IMAGE}" ]; then + echo "DIB_LOCAL_IMAGE variable must be set and point to a RHEL base cloud image. Exiting." + echo "For more information, see the README file in ${DIB_ELEMENTS_PATH}/elements/rhel" + exit 1 + fi +fi + +# Find out what platform we are on +if [ -e /etc/os-release ]; then + platform=$(grep '^NAME=' /etc/os-release | sed -e 's/\(NAME="\)\(.*\)\("\)/\2/g') +else + platform=$(head -1 /etc/system-release | grep -e CentOS -e 'Red Hat Enterprise Linux' || :) + if [ -z "$platform" ]; then + echo -e "Unknown Host OS. Impossible to build images.\nAborting" + exit 2 + fi +fi + +if [[ "$AMP_ROOTPW" ]] && [[ "$platform" != 'Ubuntu' ]] && ! [[ "$platform" =~ "Debian" ]]; then + if [ "$(getenforce)" == "Enforcing" ]; then + echo "A root password cannot be enabled for images built on this platform while SELinux is enabled." + exit 1 + fi +fi + +if [ "$AMP_ROOTPW" ]; then + echo "Warning: Using a root password in the image, NOT FOR PRODUCTION USAGE." +fi + +# Make sure we have the required packages installed +if [[ "$platform" = 'Ubuntu' || "$platform" =~ 'Debian' ]]; then + PKG_LIST="qemu-utils git kpartx debootstrap" + for pkg in $PKG_LIST; do + if ! dpkg --get-selections 2> /dev/null | grep -q "^${pkg}[[:space:]]*install$" >/dev/null; then + echo "Required package $pkg is not installed. Exiting." + echo "Binary dependencies on this platform are: ${PKG_LIST}" + exit 1 + fi + done + + if [[ "$platform" = 'Ubuntu' ]]; then + # Also check if we can build the BASEOS on this Ubuntu version + UBUNTU_VERSION=$(lsb_release -r | awk '{print $2}') + if [[ "$AMP_BASEOS" != "ubuntu-minimal" ]] && \ + [[ 1 -eq "$(echo "$UBUNTU_VERSION < 16.04" | bc)" ]]; then + echo "Ubuntu minimum version 16.04 required to build $AMP_BASEOS." + echo "Earlier versions don't support the extended attributes required." + exit 1 + fi + else + # Check if we can build the BASEOS on this Debian version + DEBIAN_VERSION=$(lsb_release -r | awk '{print $2}') + # As minimal Ubuntu version is 14.04, for debian it is Debian 8 Jessie + if [[ "$AMP_BASEOS" != "ubuntu-minimal" ]] && \ + [[ 1 -eq "$(echo "$DEBIAN_VERSION < 8" | bc)" ]]; then + echo "Debian minimum version 8 required to build $AMP_BASEOS." + echo "Earlier versions don't support the extended attributes required." + exit 1 + fi + fi +elif [[ $platform =~ "SUSE" ]]; then + # OpenSUSE + # use rpm -q to check for qemu-tools and git-core + PKG_LIST="qemu-tools git-core" + for pkg in $PKG_LIST; do + if ! rpm -q "$pkg" &> /dev/null; then + echo "Required package ${pkg/\*} is not installed. Exiting." + echo "Binary dependencies on this platform are: ${PKG_LIST}" + exit 1 + fi + done +elif [[ $platform =~ "Gentoo" ]]; then + # Gentoo + # Check /var/db for dev-vcs/git and app-emulation/[qemu|xen-tools] sys-fs/multipath-tools + PKG_LIST="dev-vcs/git app-emulation/qemu|xen-tools sys-fs/multipath-tools" + for pkg in $PKG_LIST; do + if grep -qs '|' <<< "$pkg"; then + c=$(cut -d / -f 1 <<<"$pkg") + for p in $(cut -d / -f 2 <<<"$pkg" | tr "|" " "); do + if [ -d /var/db/pkg/"$c"/"$p"-* ]; then + continue 2 + fi + done + echo "Required package ${pkg/\*} is not installed. Exiting." + echo "Binary dependencies on this platform are: ${PKG_LIST}" + exit 1 + elif [ ! -d /var/db/pkg/"$pkg"-* ]; then + echo "Required package ${pkg/\*} is not installed. Exiting." + echo "Binary dependencies on this platform are: ${PKG_LIST}" + exit 1 + fi + done +else + # fedora/centos/rhel + # Actual qemu-img name may be qemu-img, qemu-img-ev, qemu-img-rhev, ... + # "dnf|yum install qemu-img" works for all, but search requires wildcard + PKG_LIST="qemu-img* git" + for pkg in $PKG_LIST; do + if ! rpm -qa "$pkg" ; then + echo "Required package ${pkg/\*} is not installed. Exiting." + echo "Binary dependencies on this platform are: ${PKG_LIST}" + exit 1 + fi + done +fi + +if [ "$AMP_WORKING_DIR" ]; then + mkdir -p "$AMP_WORKING_DIR" + TEMP=$(mktemp -d "$AMP_WORKING_DIR/diskimage-create.XXXXXX") +else + TEMP=$(mktemp -d diskimage-create.XXXXXX) +fi +pushd "$TEMP" > /dev/null + +# Setup the elements list + +AMP_element_sequence=${AMP_element_sequence:-"base vm"} +if [ "${AMP_BASEOS}" = "rhel" ] && [ "${DIB_RELEASE}" = "8" ]; then + export DIB_INSTALLTYPE_pip_and_virtualenv=package +fi +AMP_element_sequence="$AMP_element_sequence ${AMP_BASEOS}" + +if [ "$AMP_PACKAGE_INSTALL" -eq 1 ]; then + export DIB_INSTALLTYPE_amphora_agent=package +fi + +# Add our backend element (haproxy, etc.) +AMP_element_sequence="$AMP_element_sequence $AMP_BACKEND" + +if [ "$AMP_ROOTPW" ]; then + AMP_element_sequence="$AMP_element_sequence root-passwd" + export DIB_PASSWORD=$AMP_ROOTPW +fi + +# Add the Amphora Agent and Pyroute elements +AMP_element_sequence="$AMP_element_sequence rebind-sshd" +AMP_element_sequence="$AMP_element_sequence no-resolvconf" +AMP_element_sequence="$AMP_element_sequence amphora-agent" +AMP_element_sequence="$AMP_element_sequence octavia-lib" +AMP_element_sequence="$AMP_element_sequence sos" +AMP_element_sequence="$AMP_element_sequence cloud-init-datasources" +AMP_element_sequence="$AMP_element_sequence remove-default-ints" + +# SELinux systems +if [ "${AMP_BASEOS}" = "centos-minimal" ] || [ "${AMP_BASEOS}" = "fedora" ] || [ "${AMP_BASEOS}" = "rhel" ] || [ "${AMP_BASEOS}" = "rocky-container" ]; then + if [ "$AMP_ENABLE_FULL_MAC_SECURITY" -ne 1 ]; then + AMP_element_sequence="$AMP_element_sequence selinux-permissive" + else + # If SELinux is enforced, the amphora image requires the amphora-selinux policies + AMP_element_sequence="$AMP_element_sequence amphora-selinux" + fi +fi + +# AppArmor systems +if [ "${AMP_BASEOS}" = "ubuntu-minimal" ] || [ "${AMP_BASEOS}" = "ubuntu" ]; then + AMP_element_sequence="$AMP_element_sequence amphora-apparmor" +fi + +# Disable the dnf makecache timer +if [ "${AMP_BASEOS}" = "centos-minimal" ] || [ "${AMP_BASEOS}" = "fedora" ] || [ "${AMP_BASEOS}" = "rhel" ] || [ "${AMP_BASEOS}" = "rocky-container" ]; then + AMP_element_sequence="$AMP_element_sequence disable-makecache" +fi + +if [ "${AMP_BASEOS}" = "centos-minimal" ]; then + export DIB_YUM_MINIMAL_CREATE_INTERFACES=0 +fi + +# Add keepalived-octavia element +AMP_element_sequence="$AMP_element_sequence keepalived-octavia" +AMP_element_sequence="$AMP_element_sequence ipvsadmin" + +# Add pip-cache element +AMP_element_sequence="$AMP_element_sequence pip-cache" + +# Add certificate ramfs element +AMP_element_sequence="$AMP_element_sequence certs-ramfs" + +# Add cpu-pinning element +if [ "$AMP_ENABLE_CPUPINNING" -eq 1 ]; then + AMP_element_sequence="$AMP_element_sequence cpu-pinning" +fi + +# Disable SSHD if requested +if [ "$AMP_DISABLE_SSHD" -eq 1 ]; then + AMP_element_sequence="$AMP_element_sequence remove-sshd" + export DIB_OCTAVIA_AMP_USE_SSH=${DIB_OCTAVIA_AMP_USE_SSH:-False} +else + export DIB_OCTAVIA_AMP_USE_SSH=${DIB_OCTAVIA_AMP_USE_SSH:-True} +fi + +# Enable FIPS if requested +if [ "$AMP_ENABLE_FIPS" -eq 1 ]; then + AMP_element_sequence="$AMP_element_sequence amphora-fips" +fi + +# Allow full elements override +if [ "$DIB_ELEMENTS" ]; then + AMP_element_sequence="$DIB_ELEMENTS" +fi + +if [ "$DIB_LOCAL_ELEMENTS" ]; then + AMP_element_sequence="$AMP_element_sequence $DIB_LOCAL_ELEMENTS" +fi + +# Set Grub timeout to 0 (no timeout) for fast boot times +export DIB_GRUB_TIMEOUT=${DIB_GRUB_TIMEOUT:-0} + +# Build the image + +export DIB_CLOUD_INIT_DATASOURCES=$CLOUD_INIT_DATASOURCES + +dib_trace_arg= +if [ -n "$dib_enable_tracing" ]; then + dib_trace_arg="-x" +fi + +if [ "$USE_PYTHON3" = "False" ]; then + export DIB_PYTHON_VERSION=2 +fi + +disk-image-create "$AMP_LOGFILE" "$dib_trace_arg" -a "$AMP_ARCH" -o "$AMP_OUTPUTFILENAME" -t \ +"$AMP_IMAGETYPE" --image-size "$AMP_IMAGESIZE" --image-cache "$AMP_CACHEDIR" "$AMP_DISABLE_TMP_FS" \ +"$AMP_element_sequence" + +popd > /dev/null # out of $TEMP +rm -rf "$TEMP" + +if [ -z "$DIB_REPOREF_amphora_agent" ]; then + echo "Successfully built the amphora image using amphora-agent from the master branch." +else + echo "Successfully built the amphora using the $DIB_REPOREF_amphora_agent amphora-agent." +fi +echo "Amphora image size: `stat -c "%n %s" $AMP_OUTPUTFILENAME`" diff --git a/diskimage-create/image-tests.sh b/diskimage-create/image-tests.sh new file mode 100755 index 0000000000..650c6ddbe1 --- /dev/null +++ b/diskimage-create/image-tests.sh @@ -0,0 +1,107 @@ +#!/bin/bash +# +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +# This file is necessary because tox cannot handle pipes in commands + +echo "Examining the Amphora image. This will take some time." + +if [ "$1" -a -f "$1" ]; then + AMP_IMAGE_LOCATION=$1 +elif [ "$1" ]; then + AMP_IMAGE_LOCATION=$1/amphora-x64-haproxy.qcow2 +else + AMP_IMAGE_LOCATION=amphora-x64-haproxy.qcow2 +fi + +if ! [ -f $AMP_IMAGE_LOCATION ]; then + echo "ERROR: Amphora image not found at: $AMP_IMAGE_LOCATION" + exit 1 +fi + +# Check the image size (rounded in GB) +AMP_IMAGE_SIZE=$(virt-filesystems --long --csv --blkdevs -a $AMP_IMAGE_LOCATION | \ + awk -F ',' '$1 == "/dev/sda" { print int($3/1024^3 + 0.5)}') +if [ $AMP_IMAGE_SIZE != 2 ]; then + echo "ERROR: Amphora image did not pass the default size test" + echo "On Ubuntu you may need to run 'sudo chmod 0644 /boot/vmlinuz*' for libguestfs" + exit 1 +else + echo "Amphora image size is correct" +fi + +# Get image information +AMP_IMAGE_INFO=$(virt-inspector $AMP_IMAGE_LOCATION) +# Check the kernel +echo $AMP_IMAGE_INFO | \ + virt-inspector --xpath \ + '/operatingsystems/operatingsystem/distro' \ + | grep -q 'ubuntu' +if [ $? != 0 ]; then + echo "ERROR: Amphora image is using the wrong default distribution" + exit 1 +else + echo "Amphora image is using the correct distribution" +fi + +echo $AMP_IMAGE_INFO | \ + virt-inspector --xpath \ + '/operatingsystems/operatingsystem/arch' \ + | grep -q 'x86_64' +if [ $? != 0 ]; then + echo "ERROR: Amphora image is using the wrong default architecture" + exit 1 +else + echo "Amphora image is using the correct architecture" +fi + +echo $AMP_IMAGE_INFO | \ + virt-inspector --xpath \ + '/operatingsystems/operatingsystem/format' \ + | grep -q 'installed' +if [ $? != 0 ]; then + echo "ERROR: Amphora image is in the wrong format (should be installed)" + exit 1 +else + echo "Amphora image is using the correct format" +fi + +# Check for HAProxy +echo $AMP_IMAGE_INFO | \ + virt-inspector --xpath \ + '/operatingsystems/operatingsystem/applications/application/name[text()="haproxy"]' \ + | grep -q 'haproxy' +if [ $? != 0 ]; then + echo "ERROR: Amphora image is missing the haproxy package" + exit 1 +else + echo "HAProxy package found in the Amphora image" +fi + +# Check for KeepAlived +echo $AMP_IMAGE_INFO | \ + virt-inspector --xpath \ + '/operatingsystems/operatingsystem/applications/application/name[text()="keepalived"]' \ + | grep -q 'keepalived' +if [ $? != 0 ]; then + echo "ERROR: Amphora image is missing the keepalived package" + exit 1 +else + echo "keepalived package found in the Amphora image" +fi +echo "Amphora image looks good." + +exit 0 diff --git a/diskimage-create/requirements.txt b/diskimage-create/requirements.txt new file mode 100644 index 0000000000..4cd6936a0b --- /dev/null +++ b/diskimage-create/requirements.txt @@ -0,0 +1 @@ +diskimage-builder>=2.24.0 diff --git a/diskimage-create/test-requirements.txt b/diskimage-create/test-requirements.txt new file mode 100644 index 0000000000..925a68f4ab --- /dev/null +++ b/diskimage-create/test-requirements.txt @@ -0,0 +1,3 @@ +bashate +doc8 +Pygments diff --git a/diskimage-create/tox.ini b/diskimage-create/tox.ini new file mode 100644 index 0000000000..be71faded2 --- /dev/null +++ b/diskimage-create/tox.ini @@ -0,0 +1,52 @@ +[tox] +minversion = 3.18 +envlist = bashate,docs,build,test + +[testenv] +basepython = python3 +setenv = VIRTUAL_ENV={envdir} +passenv = + http_proxy + HTTP_PROXY + https_proxy + HTTPS_PROXY + no_proxy + NO_PROXY + DIB_* + CLOUD_INIT_DATASOURCES + OCTAVIA_REPO_PATH +install_command = pip install -U {opts} {packages} +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +allowlist_externals = + virt-filesystems + virt-inspector + awk + mkdir + rm + +[testenv:docs] +commands = doc8 README.rst ../elements/haproxy-octavia/README.rst ../elements/root-passwd/README.rst + +[testenv:bashate] +commands = + bashate diskimage-create.sh + bashate image-tests.sh + +[testenv:build] +# NOTE: specify cache directory explicitly with -c as the `diskimage-create.sh` +# default is based off of `$HOME` which is not passed on in a `tox` environment. +commands = + ./diskimage-create.sh -o {toxinidir}/amphora-x64-haproxy -w {toxworkdir} -c {toxworkdir}/.cache +allowlist_externals = + ./diskimage-create.sh + + +[testenv:test] +# Work around tox limitations with command pipes +# https://bitbucket.org/hpk42/tox/issue/73/pipe-output-of-command-into-file +commands = + ./image-tests.sh {toxinidir}/.amp_tox_test + rm -rf {toxinidir}/.amp_tox_test +allowlist_externals = + ./image-tests.sh diff --git a/diskimage-create/version.txt b/diskimage-create/version.txt new file mode 100644 index 0000000000..49d59571fb --- /dev/null +++ b/diskimage-create/version.txt @@ -0,0 +1 @@ +0.1 diff --git a/doc/requirements.txt b/doc/requirements.txt new file mode 100644 index 0000000000..a357acb23e --- /dev/null +++ b/doc/requirements.txt @@ -0,0 +1,20 @@ +# Docs +hacking>=3.0 # Apache-2.0 +os-api-ref>=1.4.0 # Apache-2.0 +sphinxcontrib-apidoc>=0.2.1 # BSD +docutils>=0.11 # OSI-Approved Open Source, Public Domain +sphinx>=2.0.0,!=2.1.0 # BSD +graphviz!=0.5.0,>=0.4 # MIT License +openstackdocstheme>=2.2.1 # Apache-2.0 +sadisplay>=0.4.8 # BSD +reno>=3.1.0 # Apache-2.0 +sphinx-feature-classification>=0.2.0 # Apache-2.0 + +# PDF Docs +sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD + +# This needs to be installed after above modules +pydot>=1.2.4 # MIT License +pydotplus>=2.0.2 # MIT License +pyparsing>=2.1.0 # MIT +networkx>=1.10 # BSD diff --git a/doc/source/_static/.placeholder b/doc/source/_static/.placeholder new file mode 100644 index 0000000000..e69de29bb2 diff --git a/doc/source/admin/amphora-image-build.rst b/doc/source/admin/amphora-image-build.rst new file mode 120000 index 0000000000..7b1b34c5a6 --- /dev/null +++ b/doc/source/admin/amphora-image-build.rst @@ -0,0 +1 @@ +../../../diskimage-create/README.rst \ No newline at end of file diff --git a/doc/source/admin/apache-httpd.rst b/doc/source/admin/apache-httpd.rst new file mode 100644 index 0000000000..db8f8cd62b --- /dev/null +++ b/doc/source/admin/apache-httpd.rst @@ -0,0 +1,29 @@ + +.. + Copyright 2017 Intel Corporation + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +========================= +Running Octavia in Apache +========================= + +To run Octavia in apache2, copy the ``httpd/octavia-api.conf`` sample +configuration file to the appropriate location for the Apache server. + +On Debian/Ubuntu systems it is:: + + /etc/apache2/sites-available/octavia-api.conf + +Restart Apache to have it start serving Octavia. diff --git a/doc/source/admin/api-audit.rst b/doc/source/admin/api-audit.rst new file mode 100644 index 0000000000..162a28d8e4 --- /dev/null +++ b/doc/source/admin/api-audit.rst @@ -0,0 +1,194 @@ + +==================== +Octavia API Auditing +==================== + +The `keystonemiddleware audit middleware`_ supports delivery of Cloud Auditing +Data Federation (CADF) audit events via Oslo messaging notifier capability. +Based on `notification_driver` configuration, audit events can be routed to +messaging infrastructure (notification_driver = messagingv2) or can be routed +to a log file (notification_driver = log). + +More information about the CADF format can be found on the `DMTF Cloud Auditing Data Federation website `_. + +Audit middleware creates two events per REST API interaction. First event has +information extracted from request data and the second one has request outcome +(response). + +.. _keystonemiddleware audit middleware: https://docs.openstack.org/keystonemiddleware/latest/audit.html + +Configuring Octavia API Auditing +================================ + +Auditing can be enabled by making the following changes to the Octavia +configuration file on your Octavia API instance(s). + +#. Enable auditing:: + + [audit] + ... + enabled = True + +#. Optionally specify the location of the audit map file:: + + [audit] + ... + audit_map_file = /etc/octavia/octavia_api_audit_map.conf + + The default audit map file location is /etc/octavia/octavia_api_audit_map.conf. + +#. Copy the audit map file from the octavia/etc/audit directory to the + location specified in the previous step. A sample file has been provided + in octavia/etc/audit/octavia_api_audit_map.conf.sample. + +#. Optionally specify the REST HTTP methods you do not want to audit:: + + [audit] + ... + ignore_req_list = + +#. Specify the driver to use for sending the audit notifications:: + + [audit_middleware_notifications] + ... + driver = log + + Driver options are: messaging, messagingv2, routing, log, noop + +#. Optionally specify the messaging topic:: + + [audit_middleware_notifications] + ... + topics = + +#. Optionally specify the messaging transport URL:: + + [audit_middleware_notifications] + ... + transport_url = + +#. Restart your Octavia API processes. + +Sampe Audit Events +================== + +Request +------- + +.. code-block:: json + + { + "event_type": "audit.http.request", + "timestamp": "2018-10-11 22:42:22.721025", + "payload": { + "typeURI": "http://schemas.dmtf.org/cloud/audit/1.0/event", + "eventTime": "2018-10-11T22:42:22.720112+0000", + "target": { + "id": "octavia", + "typeURI": "service/load-balancer/loadbalancers", + "addresses": [{ + "url": "http://10.21.21.53/load-balancer", + "name": "admin" + }, { + "url": "http://10.21.21.53/load-balancer", + "name": "private" + }, { + "url": "http://10.21.21.53/load-balancer", + "name": "public" + }], + "name": "octavia" + }, + "observer": { + "id": "target" + }, + "tags": ["correlation_id?value=e5b34bc3-4837-54fa-9892-8e65a9a2e73a"], + "eventType": "activity", + "initiator": { + "typeURI": "service/security/account/user", + "name": "admin", + "credential": { + "token": "***", + "identity_status": "Confirmed" + }, + "host": { + "agent": "openstacksdk/0.17.2 keystoneauth1/3.11.0 python-requests/2.19.1 CPython/2.7.12", + "address": "10.21.21.53" + }, + "project_id": "90168d185e504b5580884a235ba31612", + "id": "2af901396a424d5ca9dffa725226e8c7" + }, + "action": "read/list", + "outcome": "pending", + "id": "8cf14af5-246e-5739-a11e-513ca13b7d36", + "requestPath": "/load-balancer/v2.0/lbaas/loadbalancers" + }, + "priority": "INFO", + "publisher_id": "uwsgi", + "message_id": "63264e0e-e60f-4adc-a656-0d87ab5d6329" + } + +Response +-------- + +.. code-block:: json + + { + "event_type": "audit.http.response", + "timestamp": "2018-10-11 22:42:22.853129", + "payload": { + "typeURI": "http://schemas.dmtf.org/cloud/audit/1.0/event", + "eventTime": "2018-10-11T22:42:22.720112+0000", + "target": { + "id": "octavia", + "typeURI": "service/load-balancer/loadbalancers", + "addresses": [{ + "url": "http://10.21.21.53/load-balancer", + "name": "admin" + }, { + "url": "http://10.21.21.53/load-balancer", + "name": "private" + }, { + "url": "http://10.21.21.53/load-balancer", + "name": "public" + }], + "name": "octavia" + }, + "observer": { + "id": "target" + }, + "tags": ["correlation_id?value=e5b34bc3-4837-54fa-9892-8e65a9a2e73a"], + "eventType": "activity", + "initiator": { + "typeURI": "service/security/account/user", + "name": "admin", + "credential": { + "token": "***", + "identity_status": "Confirmed" + }, + "host": { + "agent": "openstacksdk/0.17.2 keystoneauth1/3.11.0 python-requests/2.19.1 CPython/2.7.12", + "address": "10.21.21.53" + }, + "project_id": "90168d185e504b5580884a235ba31612", + "id": "2af901396a424d5ca9dffa725226e8c7" + }, + "reason": { + "reasonCode": "200", + "reasonType": "HTTP" + }, + "reporterchain": [{ + "reporterTime": "2018-10-11T22:42:22.852613+0000", + "role": "modifier", + "reporter": { + "id": "target" + } + }], + "action": "read/list", + "outcome": "success", + "id": "8cf14af5-246e-5739-a11e-513ca13b7d36", + "requestPath": "/load-balancer/v2.0/lbaas/loadbalancers" + }, + "priority": "INFO", + "publisher_id": "uwsgi", + "message_id": "7cd89dce-af6e-40c5-8634-e87d1ed32a3c" + } diff --git a/doc/source/admin/event-notifications.rst b/doc/source/admin/event-notifications.rst new file mode 100644 index 0000000000..074991b790 --- /dev/null +++ b/doc/source/admin/event-notifications.rst @@ -0,0 +1,115 @@ +=========================== +Octavia Event Notifications +=========================== +Octavia uses the oslo messaging notification system to send notifications for +certain events, such as "octavia.loadbalancer.create.end" after the completion +of a loadbalancer create operation. + +Configuring oslo messaging for event notifications +================================================== +By default, the notifications driver in oslo_messaging is set to an empty +string; therefore, this option must be configured in order for notifications +to be sent. Valid options are defined in `oslo.messaging documentation +`__. +The example provided below is the format produced by the messagingv2 driver. + +You may specify a custom list of topics on which to send notifications. +A topic is created for each notification level, with a dot and the level +appended to the value(s) specified in this list, e.g.: notifications.info, +octavia-notifications.info, etc.. + +Oslo messaging supports separate backends for RPC and notifications. If +different from the **[DEFAULT]** **transport_url** configuration, you +must specify the **transport_url** in the +**[oslo_messaging_notifications]** section of your *octavia.conf* +configuration. + +.. code-block:: ini + + [oslo_messaging_notifications] + driver = messagingv2 + topics = octavia-notifications,notifications + transport_url = transport://user:pass@host1:port/virtual_host + + +Event Types +=========== +Event types supported in Octavia are: + +``'octavia.loadbalancer.update.end'`` + +``'octavia.loadbalancer.create.end'`` + +``'octavia.loadbalancer.delete.end'`` + +Example Notification +==================== +The payload for an oslo.message notification for Octavia loadbalancer events +is the complete loadbalancer dict in json format. +The complete contents of an oslo.message notification for a loadbalancer +event in Octavia follows the format of the following example: + +.. code-block:: json + + { + "message_id": "d84a3800-06ca-410e-a1a3-b40a02306a97", + "publisher_id": null, + "event_type": "octavia.loadbalancer.create.end", + "priority": "INFO", + "payload": { + "enabled": true, + "availability_zone": null, + "created_at": "2022-04-22T23:02:14.000000", + "description": "", + "flavor_id": null, + "id": "8d4c8f66-7ac1-408e-82d5-59f6fcdea9ee", + "listeners": [], + "name": "my-octavia-loadbalancer", + "operating_status": "OFFLINE", + "pools": [], + "project_id": "qs59p6z696cp9cho8ze96edddvpfyvgz", + "provider": "amphora", + "provisioning_status": "PENDING_CREATE", + "tags": [], + "updated_at": null, + "vip": { + "ip_address": "192.168.100.2", + "network_id": "849b08a9-4397-4d6e-929d-90efc055ab8e", + "port_id": "303870a4-bbc3-428c-98dd-492f423869d9", + "qos_policy_id": null, + "subnet_id": "d59311ee-ed3a-42c0-ac97-cebf7945facc" + } + }, + "timestamp": "2022-04-22 23:02:15.717375", + "_unique_id": "71f03f00c96342328f09dbd92fe0d398", + "_context_user": null, + "_context_tenant": "qs59p6z696cp9cho8ze96edddvpfyvgz", + "_context_system_scope": null, + "_context_project": "qs59p6z696cp9cho8ze96edddvpfyvgz", + "_context_domain": null, + "_context_user_domain": null, + "_context_project_domain": null, + "_context_is_admin": false, + "_context_read_only": false, + "_context_show_deleted": false, + "_context_auth_token": null, + "_context_request_id": "req-072bab53-1b9b-46fa-92b0-7f04305c31bf", + "_context_global_request_id": null, + "_context_resource_uuid": null, + "_context_roles": [], + "_context_user_identity": "- qs59p6z696cp9cho8ze96edddvpfyvgz - - -", + "_context_is_admin_project": true + } + + +Disabling Event Notifications +============================= +By default, event notifications are enabled (see configuring oslo messaging +section above for additional requirements). To disable this feature, use +the following setting in your Octavia configuration file: + +.. code-block:: ini + + [controller_worker] + event_notifications = False + diff --git a/doc/source/admin/failover-circuit-breaker.rst b/doc/source/admin/failover-circuit-breaker.rst new file mode 100644 index 0000000000..c9049f9715 --- /dev/null +++ b/doc/source/admin/failover-circuit-breaker.rst @@ -0,0 +1,131 @@ +.. + Copyright Red Hat + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +======================================== +Octavia Amphora Failover Circuit Breaker +======================================== + +During a large infrastructure outage, the automatic failover of stale +amphorae can lead to a mass failover event and create a considerable +amount of extra load on servers. By using the amphora failover +circuit breaker feature, you can avoid these unwanted failover events. +The circuit breaker is a configurable threshold value that you can set, +and will stop amphorae from automatically failing over whenever that +threshold value is met. The circuit breaker feature is disabled by default. + +Configuration +============= + +You define the threshold value for the failover circuit breaker feature +by setting the *failover_threshold* variable. The *failover_threshold* +variable is a member of the *health_manager* group within the +configuration file ``/etc/octavia/octavia.conf``. + +Whenever the number of stale amphorae reaches or surpasses the value +of *failover_threshold*, Octavia performs the following actions: + +* stops automatic failovers of amphorae. +* sets the status of the stale amphorae to *FAILOVER_STOPPED*. +* logs an error message. + +The line below shows a typical error message: + +.. code-block:: bash + + ERROR octavia.db.repositories [-] Stale amphora count reached the threshold (3). 4 amphorae were set into FAILOVER_STOPPED status. + +.. note:: Base the value that you set for *failover_threshold* on the + size of your environment. We recommend that you set the value to a number + greater than the typical number of amphorae that you estimate to run on a + single host, or to a value that reflects between 20% and 30% + of the total number of amphorae. + +Error Recovery +============== + +Automatic Error Recovery +------------------------ + +For amphorae whose status is *FAILOVER_STOPPED*, Octavia will +automatically reset their status to *ALLOCATED* after receiving +new updates from these amphorae. + +Manual Error Recovery +--------------------- + +To recover from the *FAILOVER_STOPPED* condition, you must +manually reduce the value of the stale amphorae below the +circuit breaker threshold. + +You can use the ``openstack loadbalancer amphora list`` command +to list the amphorae that are in *FAILOVER_STOPPED* state. +Use the ``openstack loadbalancer amphora failover`` command to +manually trigger the amphora to failover. + +In this example, *failover_threshold = 3* and an infrastructure +outage caused four amphorae to become unavailable. After the +health manager process detects this state, it sets the status +of all stale amphorae to *FAILOVER_STOPPED* as shown below. + +.. code-block:: bash + + openstack loadbalancer amphora list + +--------------------------------------+--------------------------------------+------------------+--------+---------------+------------+ + | id | loadbalancer_id | status | role | lb_network_ip | ha_ip | + +--------------------------------------+--------------------------------------+------------------+--------+---------------+------------+ + | 79f0e06d-446d-448a-9d2b-c3b89d0c700d | 8fd2cac5-cbca-4bb1-bcfc-daba43e097ab | FAILOVER_STOPPED | BACKUP | 192.168.0.108 | 192.0.2.17 | + | 9c0416d7-6293-4f13-8f67-61e5d757b36e | 4b13dda1-296a-400c-8248-1abad5728057 | ALLOCATED | MASTER | 192.168.0.198 | 192.0.2.42 | + | e11208b7-f13d-4db3-9ded-1ee6f70a0502 | 8fd2cac5-cbca-4bb1-bcfc-daba43e097ab | FAILOVER_STOPPED | MASTER | 192.168.0.154 | 192.0.2.17 | + | ceea9fff-71a2-48c8-a968-e51dc440c572 | ab513cb3-8f5d-461e-b7ae-a06b5083a371 | ALLOCATED | MASTER | 192.168.0.149 | 192.0.2.26 | + | a1351933-2270-493c-8201-d8f9f9fe42f7 | 4b13dda1-296a-400c-8248-1abad5728057 | FAILOVER_STOPPED | BACKUP | 192.168.0.103 | 192.0.2.42 | + | 441718e7-0956-436b-9f99-9a476339d7d2 | ab513cb3-8f5d-461e-b7ae-a06b5083a371 | FAILOVER_STOPPED | BACKUP | 192.168.0.148 | 192.0.2.26 | + +--------------------------------------+--------------------------------------+------------------+--------+---------------+------------+ + +After operators have resolved the infrastructure outage, +they might need to manually trigger failovers to return to +normal operation. In this example, two manual failovers are +necessary to get the number of stale amphorae below the +configured threshold of three: + +.. code-block:: bash + + openstack loadbalancer amphora failover --wait 79f0e06d-446d-448a-9d2b-c3b89d0c700d + openstack loadbalancer amphora list + +--------------------------------------+--------------------------------------+------------------+--------+---------------+------------+ + | id | loadbalancer_id | status | role | lb_network_ip | ha_ip | + +--------------------------------------+--------------------------------------+------------------+--------+---------------+------------+ + | 9c0416d7-6293-4f13-8f67-61e5d757b36e | 4b13dda1-296a-400c-8248-1abad5728057 | ALLOCATED | MASTER | 192.168.0.198 | 192.0.2.42 | + | e11208b7-f13d-4db3-9ded-1ee6f70a0502 | 8fd2cac5-cbca-4bb1-bcfc-daba43e097ab | FAILOVER_STOPPED | MASTER | 192.168.0.154 | 192.0.2.17 | + | ceea9fff-71a2-48c8-a968-e51dc440c572 | ab513cb3-8f5d-461e-b7ae-a06b5083a371 | ALLOCATED | MASTER | 192.168.0.149 | 192.0.2.26 | + | a1351933-2270-493c-8201-d8f9f9fe42f7 | 4b13dda1-296a-400c-8248-1abad5728057 | FAILOVER_STOPPED | BACKUP | 192.168.0.103 | 192.0.2.42 | + | 441718e7-0956-436b-9f99-9a476339d7d2 | ab513cb3-8f5d-461e-b7ae-a06b5083a371 | FAILOVER_STOPPED | BACKUP | 192.168.0.148 | 192.0.2.26 | + | cf734b57-6019-4ec0-8437-115f76d1bbb0 | 8fd2cac5-cbca-4bb1-bcfc-daba43e097ab | ALLOCATED | BACKUP | 192.168.0.141 | 192.0.2.17 | + +--------------------------------------+--------------------------------------+------------------+--------+---------------+------------+ + openstack loadbalancer amphora failover --wait e11208b7-f13d-4db3-9ded-1ee6f70a0502 + openstack loadbalancer amphora list + +--------------------------------------+--------------------------------------+-----------+--------+---------------+------------+ + | id | loadbalancer_id | status | role | lb_network_ip | ha_ip | + +--------------------------------------+--------------------------------------+-----------+--------+---------------+------------+ + | 9c0416d7-6293-4f13-8f67-61e5d757b36e | 4b13dda1-296a-400c-8248-1abad5728057 | ALLOCATED | MASTER | 192.168.0.198 | 192.0.2.42 | + | ceea9fff-71a2-48c8-a968-e51dc440c572 | ab513cb3-8f5d-461e-b7ae-a06b5083a371 | ALLOCATED | MASTER | 192.168.0.149 | 192.0.2.26 | + | cf734b57-6019-4ec0-8437-115f76d1bbb0 | 8fd2cac5-cbca-4bb1-bcfc-daba43e097ab | ALLOCATED | BACKUP | 192.168.0.141 | 192.0.2.17 | + | d2909051-402e-4e75-86c9-ec6725c814a1 | 8fd2cac5-cbca-4bb1-bcfc-daba43e097ab | ALLOCATED | MASTER | 192.168.0.25 | 192.0.2.17 | + | 5133e01a-fb53-457b-b810-edbb5202437e | 4b13dda1-296a-400c-8248-1abad5728057 | ALLOCATED | BACKUP | 192.168.0.76 | 192.0.2.42 | + | f82eff89-e326-4e9d-86bc-58c720220a3f | ab513cb3-8f5d-461e-b7ae-a06b5083a371 | ALLOCATED | BACKUP | 192.168.0.86 | 192.0.2.26 | + +--------------------------------------+--------------------------------------+-----------+--------+---------------+------------+ + +After the number of stale amphorae falls below the configured +threshold value, normal operation resumes and the automatic +failover process attempts to restore the remaining stale amphorae. diff --git a/doc/source/admin/flavors.rst b/doc/source/admin/flavors.rst new file mode 100644 index 0000000000..f007e7a276 --- /dev/null +++ b/doc/source/admin/flavors.rst @@ -0,0 +1,140 @@ +.. + Copyright 2018 Rackspace, US Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +=============== +Octavia Flavors +=============== + +Octavia flavors are a powerful tool for operators to bring enhanced load +balancing capabilities to their users. An Octavia flavor is a predefined +set of provider configuration options that are created by the operator. +When an user requests a load balancer they can request the load balancer +be built with one of the defined flavors. Flavors are defined per provider +driver and expose the unique capabilities of each provider. + +This document is intended to explain the flavors capability for operators +that wish to create flavors for their users. + +There are three steps to creating a new Octavia flavor: + +#. Decide on the provider flavor capabilities that will be configured in the + flavor. +#. Create the flavor profile with the flavor capabilities. +#. Create the user facing flavor. + +Provider Capabilities +===================== + +.. _provider driver flavor capabilities: https://docs.openstack.org/api-ref/load-balancer/v2/index.html##show-provider-flavor-capabilities + +To start the process of defining a flavor, you will want to look at the +flavor capabilities that the provider driver exposes. To do this you can use +the `provider driver flavor capabilities`_ API or the OpenStack client. + +.. code-block:: bash + + openstack loadbalancer provider capability list + +With the default RBAC policy, this command is only available to administrators. + +This will list all of the flavor capabilities the provider supports and may +be configured via a flavor. + +As an example, the amphora provider supports the `loadbalancer_topology` +capability, among many others:: + + +-----------------------+---------------------------------------------------+ + | name | description | + +-----------------------+---------------------------------------------------+ + | loadbalancer_topology | The load balancer topology. One of: SINGLE - One | + | | amphora per load balancer. ACTIVE_STANDBY - Two | + | | amphora per load balancer. | + | ... | ... | + +-----------------------+---------------------------------------------------+ + +Flavor Profiles +=============== + +.. _flavor profile: https://docs.openstack.org/api-ref/load-balancer/v2/index.html#create-flavor-profile + +The next step in the process of creating a flavor is to define a flavor +profile. The flavor profile includes the provider and the flavor data. +The flavor capabilities are the supported flavor data settings for a given +provider. A flavor profile can be created using the `flavor profile`_ API or +the OpenStack client. + +For example, to create a flavor for the amphora provider, we would create the +following flavor profile: + +.. code-block:: bash + + openstack loadbalancer flavorprofile create --name amphora-single-profile --provider amphora --flavor-data '{"loadbalancer_topology": "SINGLE"}' + +With the default RBAC policy, this command is only available to administrators. + +This will create a flavor profile for the amphora provider that creates a load +balancer with a single amphora. When you create a flavor profile, the settings +are validated with the provider to make sure the provider can support the +capabilities specified. + +The output of the command above is:: + + +---------------+--------------------------------------+ + | Field | Value | + +---------------+--------------------------------------+ + | id | 72b53ac2-b191-48eb-8f73-ed012caca23a | + | name | amphora-single-profile | + | provider_name | amphora | + | flavor_data | {"loadbalancer_topology": "SINGLE"} | + +---------------+--------------------------------------+ + +Flavors +======= + +.. _flavor: https://docs.openstack.org/api-ref/load-balancer/v2/index.html#create-flavor + +Finally we will create the user facing Octavia flavor. This defines the +information users will see and use to create a load balancer with an Octavia +flavor. The name of the flavor is the term users can use when creating a load +balancer. We encourage you to include a detailed description for users to +clearly understand the capabilities of the flavor you are providing. + +To continue the example above, to create a flavor with the flavor profile we +created in the previous step we call: + +.. code-block:: bash + + openstack loadbalancer flavor create --name standalone-lb --flavorprofile amphora-single-profile --description "A non-high availability load balancer for testing." --enable + +This will create a user visible Octavia flavor that will create a load balancer +that uses one amphora and is not highly available. Users can specify this +flavor when creating a new load balancer. Disabled flavors are still visible +to users, but they will not be able to create a load balancer using the flavor. + +The output of the command above is:: + + +-------------------+--------------------------------------+ + | Field | Value | + +-------------------+--------------------------------------+ + | id | 25cda2d8-f735-4744-b936-d30405c05359 | + | name | standalone-lb | + | flavor_profile_id | 72b53ac2-b191-48eb-8f73-ed012caca23a | + | enabled | True | + | description | A non-high availability load | + | | balancer for testing. | + +-------------------+--------------------------------------+ + +At this point, the flavor is available for use by users creating new load +balancers. diff --git a/doc/source/admin/guides/certificates.rst b/doc/source/admin/guides/certificates.rst new file mode 100644 index 0000000000..982787d3c6 --- /dev/null +++ b/doc/source/admin/guides/certificates.rst @@ -0,0 +1,312 @@ +.. + Copyright 2018 Rackspace, US Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain a + copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +======================================= +Octavia Certificate Configuration Guide +======================================= + +This document is intended for Octavia administrators setting up certificate +authorities for the two-way TLS authentication used in Octavia for command +and control of :term:`Amphora`. + +This guide does not apply to the configuration of `TERMINATED_TLS` listeners +on load balancers. See the `Load Balancing Cookbook`_ for instructions on +creating `TERMINATED_TLS` listeners. + +.. _Load Balancing Cookbook: ../../user/guides/basic-cookbook.html#deploy-a-tls-terminated-https-load-balancer + +Two-way TLS Authentication in Octavia +===================================== + +The Octavia controller processes communicate with the Amphora over +a TLS connection much like an HTTPS connection to a website. However, Octavia +validates that both sides are trusted by doing a two-way TLS authentication. + +.. note:: + + This is a simplification of the full TLS handshake process. See the + `TLS 1.3 RFC 8446 `_ for the full + handshake. + +Phase One +--------- + +When a controller process, such as the Octavia worker process, connects to +an Amphora, the Amphora will present its `server` certificate +to the controller. The controller will then validate it against the `server` +Certificate Authority (CA) certificate stored on the controller. If the +presented certificate is validated against the `server` CA certificate, the +connection goes into phase two of the two-way TLS authentication. + +Phase Two +--------- + +Once phase one is complete, the controller will present its `client` +certificate to the Amphora. The Amphora will then validate the +certificate against the `client` CA certificate stored inside the Amphora. +If this certificate is successfully validated, the rest of the TLS handshake +will continue to establish the secure communication channel between the +controller and the Amphora. + +Certificate Lifecycles +---------------------- + +The `server` certificates are uniquely generated for each amphora by the +controller using the `server` certificate authority certificates and keys. +These `server` certificates are automatically rotated by the Octavia +housekeeping controller process as they near expiration. + +The `client` certificates are used for the Octavia controller processes. +These are managed by the operator and due to their use on the control plane +of the cloud, typically have a long lifetime. + +See the `Operator Maintenance Guide `_ for more +information about the certificate lifecycles. + +Creating the Certificate Authorities +==================================== + +As discussed above, this configuration uses two certificate authorities; one +for the `server` certificates, and one for the `client` certificates. + +.. note:: + + Technically Octavia can be run using just one certificate authority by + using it to issue certificates for both roles. However, this weakens the + security as a `server` certificate from an amphora could be used to + impersonate a controller. We recommend you use two certificate authorities + for all deployments outside of testing. + +For this document we are going to setup simple OpenSSL based certificate +authorities. However, any standards compliant certificate authority software +can be used to create the required certificates. + +1. Create a working directory for the certificate authorities. Make sure to + set the proper permissions on this directory such that others cannot + access the private keys, random bits, etc. being generated here. + + .. code-block:: bash + + $ mkdir certs + $ chmod 700 certs + $ cd certs + +2. Create the OpenSSL configuration file. This can be shared between the + two certificate authorities. + + .. code-block:: bash + + $ vi openssl.cnf + + .. literalinclude:: sample-configs/openssl.cnf + :language: ini + +3. Make any locally required configuration changes to the openssl.cnf. Some + settings to consider are: + + * The default certificate lifetime is 10 years. + * The default bit length is 2048. + +4. Make directories for the two certificate authorities. + + .. code-block:: bash + + $ mkdir client_ca + $ mkdir server_ca + +5. Starting with the `server` certificate authority, prepare the CA. + + .. code-block:: bash + + $ cd server_ca + $ mkdir certs crl newcerts private + $ chmod 700 private + $ touch index.txt + $ echo 1000 > serial + +6. Create the `server` CA key. + + * You will need to specify a passphrase to protect the key file. + + .. code-block:: bash + + $ openssl genpkey -algorithm RSA -out private/ca.key.pem -aes-128-cbc -pkeyopt rsa_keygen_bits:4096 + $ chmod 400 private/ca.key.pem + +7. Create the `server` CA certificate. + + * You will need to specify the passphrase used in step 6. + * You will also be asked to provide details for the certificate. These are + up to you and should be appropriate for your organization. + * You may want to mention this is the `server` CA in the common name field. + * Since this is the CA certificate, you might want to give it a very long + lifetime, such as twenty years shown in this example command. + + .. code-block:: bash + + $ openssl req -config ../openssl.cnf -key private/ca.key.pem -new -x509 -days 7300 -sha256 -extensions v3_ca -out certs/ca.cert.pem + +8. Moving to the `client` certificate authority, prepare the CA. + + .. code-block:: bash + + $ cd ../client_ca + $ mkdir certs crl csr newcerts private + $ chmod 700 private + $ touch index.txt + $ echo 1000 > serial + +9. Create the `client` CA key. + + * You will need to specify a passphrase to protect the key file. + + .. code-block:: bash + + $ openssl genpkey -algorithm RSA -out private/ca.key.pem -aes-128-cbc -pkeyopt rsa_keygen_bits:4096 + $ chmod 400 private/ca.key.pem + +10. Create the `client` CA certificate. + + * You will need to specify the passphrase used in step 9. + * You will also be asked to provide details for the certificate. These are + up to you and should be appropriate for your organization. + * You may want to mention this is the `client` CA in the common name field. + * Since this is the CA certificate, you might want to give it a very long + lifetime, such as twenty years shown in this example command. + + .. code-block:: bash + + $ openssl req -config ../openssl.cnf -key private/ca.key.pem -new -x509 -days 7300 -sha256 -extensions v3_ca -out certs/ca.cert.pem + +11. Create a key for the `client` certificate to use. + + * You can create one certificate and key to be used by all of the + controllers or you can create a unique certificate and key for each + controller. + * You will need to specify a passphrase to protect the key file. + + .. code-block:: bash + + $ openssl genpkey -algorithm RSA -out private/client.key.pem -aes-128-cbc -pkeyopt rsa_keygen_bits:2048 + +12. Create the certificate request for the `client` certificate used on the + controllers. + + * You will need to specify the passphrase used in step 11. + * You will also be asked to provide details for the certificate. These are + up to you and should be appropriate for your organization. + * You must fill in the common name field. + * You may want to mention this is the `client` certificate in the common + name field, or the individual controller information. + + .. code-block:: bash + + $ openssl req -config ../openssl.cnf -new -sha256 -key private/client.key.pem -out csr/client.csr.pem + +13. Sign the `client` certificate request. + + * You will need to specify the CA passphrase used in step 9. + * Since this certificate is used on the control plane, you might want to + give it a very long lifetime, such as twenty years shown in this example + command. + + .. code-block:: bash + + $ openssl ca -config ../openssl.cnf -extensions usr_cert -days 7300 -notext -md sha256 -in csr/client.csr.pem -out certs/client.cert.pem + +14. Create a concatenated `client` certificate and key file. + + * You will need to specify the CA passphrase used in step 11. + + .. code-block:: bash + + $ openssl rsa -in private/client.key.pem -out private/client.cert-and-key.pem + $ cat certs/client.cert.pem >> private/client.cert-and-key.pem + + +Configuring Octavia +=================== + +In this section we will configure Octavia to use the certificates and keys +created during the `Creating the Certificate Authorities`_ section. + +1. Copy the required files over to your Octavia controllers. + + * Only the Octavia worker, health manager, and housekeeping processes will + need access to these files. + * The first command should return you to the "certs" directory created in + step 1 of the `Creating the Certificate Authorities`_ section. + * These commands assume you are running the octavia processes under the + "octavia" user. + * Note, some of these steps should be run with "sudo" and are indicated by + the "#" prefix. + + .. code-block:: bash + + $ cd .. + # mkdir /etc/octavia/certs + # chmod 700 /etc/octavia/certs + # cp server_ca/private/ca.key.pem /etc/octavia/certs/server_ca.key.pem + # chmod 700 /etc/octavia/certs/server_ca.key.pem + # cp server_ca/certs/ca.cert.pem /etc/octavia/certs/server_ca.cert.pem + # cp client_ca/certs/ca.cert.pem /etc/octavia/certs/client_ca.cert.pem + # cp client_ca/private/client.cert-and-key.pem /etc/octavia/certs/client.cert-and-key.pem + # chmod 700 /etc/octavia/certs/client.cert-and-key.pem + # chown -R octavia.octavia /etc/octavia/certs + +2. Configure the [certificates] section of the octavia.conf file. + + * Only the Octavia worker, health manager, and housekeeping processes will + need these settings. + * The "" should be replaced with the passphrase + that was used in step 6 of the `Creating the Certificate Authorities`_ + section. + + .. code-block:: ini + + [certificates] + cert_generator = local_cert_generator + ca_certificate = /etc/octavia/certs/server_ca.cert.pem + ca_private_key = /etc/octavia/certs/server_ca.key.pem + ca_private_key_passphrase = + +3. Configure the [controller_worker] section of the octavia.conf file. + + * Only the Octavia worker, health manager, and housekeeping processes will + need these settings. + + .. code-block:: ini + + [controller_worker] + client_ca = /etc/octavia/certs/client_ca.cert.pem + +4. Configure the [haproxy_amphora] section of the octavia.conf file. + + * Only the Octavia worker, health manager, and housekeeping processes will + need these settings. + + .. code-block:: ini + + [haproxy_amphora] + client_cert = /etc/octavia/certs/client.cert-and-key.pem + server_ca = /etc/octavia/certs/server_ca.cert.pem + +5. Start the controller processes. + + .. code-block:: bash + + # systemctl start octavia-worker + # systemctl start octavia-healthmanager + # systemctl start octavia-housekeeping diff --git a/doc/source/admin/guides/operator-maintenance.rst b/doc/source/admin/guides/operator-maintenance.rst new file mode 100644 index 0000000000..c3fbf79ecb --- /dev/null +++ b/doc/source/admin/guides/operator-maintenance.rst @@ -0,0 +1,391 @@ +.. + Copyright (c) 2017 Rackspace + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +====================================== +Operator Maintenance Guide +====================================== +This document is intended for operators. For a developer guide see the +:doc:`../../contributor/guides/dev-quick-start` in this documentation +repository. For an end-user guide, please see the +:doc:`../../user/guides/basic-cookbook` in this documentation repository. + +Monitoring +========== + + +Monitoring Load Balancer Amphora +-------------------------------- + +Octavia will monitor the load balancing amphorae itself and initiate failovers +and/or replacements if they malfunction. Therefore, most installations won't +need to monitor the amphorae running the load balancer. + +Octavia will log each failover to the corresponding health manager logs. It is +advisable to use log analytics to monitor failover trends to notice problems in +the OpenStack installation early. We have seen neutron (network) connectivity +issues, Denial of Service attacks, and nova (compute) malfunctions lead to a +higher than normal failover rate. Alternatively, the monitoring of the other +services showed problems as well, so depending on your overall monitoring +strategy this might be optional. + +If additional monitoring is necessary, review the corresponding calls on +the amphora agent REST interface (see +:doc:`../../contributor/api/haproxy-amphora-api`) + +Monitoring Pool Members +----------------------- + +Octavia will use the health information from the underlying load balancing +subsystems to determine the health of members. This information will be +streamed to the Octavia database and made available via the status +tree or other API methods. For critical applications we recommend to +poll this information in regular intervals. + +Monitoring Load Balancers +------------------------- + +You should monitor the provisioning status of a load balancer, and send alerts +if the provisioning status is not ACTIVE. Alerts should not be triggered when +an application is making regular changes to the pool and enters several PENDING +stages. + +The provisioning status of load balancer objects reflect the status of the +control plane being able to contact and successfully provision a create, +update, and delete request. The operating status of a load balancer object +reports on the current functional status of the load balancer. + +For example, a load balancer might have a provisioning status of ERROR, but an +operating status of ONLINE. This could be caused by a neutron networking +failure that blocked that last requested update to the load balancer +configuration from successfully completing. In this case the load balancer is +continuing to process traffic through the load balancer, but might not have +applied the latest configuration updates yet. + +A load balancer in a PENDING provisioning status is immutable, it cannot be +updated or deleted by another process, this PENDING status acts as a lock on +the resource. +If a database outage occurs while a load balancer is deleted, created or +updated, the Octavia control plane will try to remove the PENDING status and +set it to ERROR during a long period of time (around 2h45min with the default +settings), to prevent the resource from remaining immutable. + +Monitoring load balancer functionality +-------------------------------------- + +You can monitor the operational status of your load balancer using the +`openstack loadbalancer status show` command. It reports the current operation +status of the load balancer and its child objects. + +You might also want to use an external monitoring service that connects to your +load balancer listeners and monitors them from outside of the cloud. This type +of monitoring indicates if there is a failure outside of Octavia that might +impact the functionality of your load balancer, such as router failures, +network connectivity issues, and so on. + +.. _Monasca Octavia plugin: https://github.com/openstack/monasca-agent/blob/master/monasca_setup/detection/plugins/octavia.py + +Monitoring Octavia Control Plane +-------------------------------- + +To monitor the Octavia control plane we recommend process monitoring of the +main Octavia processes: + +* octavia-api + +* octavia-worker + +* octavia-health-manager + +* octavia-housekeeping + +The Monasca project has a plugin for such monitoring (see +`Monasca Octavia plugin`_). +Please refer to this project for further information. + +Octavia's control plane components are shared nothing and can be scaled +linearly. For high availability of the control plane we recommend to run at +least one set of components in each availability zone. Furthermore, the +octavia-api endpoint could be behind a load balancer or other HA technology. +That said, if one or more components fail the system will still be available +(though potentially degraded). For instance if you have installed one set of +components in each of the three availability zones even if you lose a whole +zone Octavia will still be responsive and available - only if you lose the +Octavia control plane in all three zones will the service be unavailable. +Please note this only addresses control plane availability; the availability of +the load balancing function depends highly on the chosen topology and the +anti-affinity settings. See our forthcoming HA guide for more details. + +Additionally, we recommend to monitor the Octavia API endpoint(s). There +currently is no special url to use so just polling the root URL in regular +intervals is sufficient. + +There is a host of information in the log files which can be used for log +analytics. A few examples of what could be monitored are: + +* Amphora Build Rate - to determine load of the system + +* Amphora Build Time - to determine how long it takes to build an amphora + +* Failures/Errors - to be notified of system problems early + +.. _rotating_amphora: + +Rotating the Amphora Images +=========================== + +Octavia will start load balancers with a pre-built image which contain the +amphora agent, a load balancing application, and are seeded with cryptographic +certificates through the config drive at start up. + +Rotating the image means making a load balancer amphora running with an old +image failover to an amphora with a new image. This should be without any +measurable interruption in the load balancing functionality when using +ACTIVE/STANDBY topology. Standalone load balancers might experience a short +outage. + +Here are some reasons you might need to rotate the amphora image: + +* There has been a (security) update to the underlying operating system + +* You want to deploy a new version of the amphora agent or haproxy + +* The cryptographic certificates and/or keys on the amphora have been + compromised. + +* Though not related to rotating images, this procedure might be invoked if you + are switching to a different flavor for the underlying virtual machine. + +Preparing a New Amphora Image +----------------------------- + +To prepare a new amphora image you will need to use diskimage-create.sh as +described in the README in the diskimage-create directory. + +For instance, in the ``octavia/diskimage-create`` directory, run: + + .. code-block:: bash + + ./diskimage-create.sh + +Once you have created a new image you will need to upload it into glance. The +following shows how to do this if you have set the image tag in the +Octavia configuration file. Make sure to use a user with the same tenant as +the Octavia service account: + + .. code-block:: bash + + openstack image create --file amphora-x64-haproxy.qcow2 \ + --disk-format qcow2 --tag --private \ + --container-format bare /var/lib/octavia/amphora-x64-haproxy.qcow2 + +If you didn't configure image tags and instead configured an image id, you +will need to update the Octavia configuration file with the new id and restart +the Octavia services (except octavia-api). + +Generating a List of Load Balancers to Rotate +--------------------------------------------- + +The easiest way to generate a list, is to just list the IDs of all +load balancers: + + .. code-block:: bash + + openstack loadbalancer list -c id -f value + +Take note of the IDs. + +Rotating a Load Balancer +------------------------ + +Octavia has an API call to initiate the failover of a load balancer: + + .. code-block:: bash + + openstack loadbalancer failover + +You can observe the failover by querying octavia ``openstack load balancer +show `` until the load balancer goes ``ACTIVE`` again. + +.. _best_practice: + +Best Practices/Optimizations +---------------------------- + +Since a failover puts significant load on the OpenStack installation by +creating new virtual machines and ports, it should either be done at a very +slow pace, during a time with little load, or with the right throttling +enabled in Octavia. The throttling will make sure to prioritize failovers +higher than other operations and depending on how many failovers are +initiated this might crowd out other operations. + +Rotating Cryptographic Certificates +=================================== + +Octavia secures the communication between the amphora agent and the control +plane with two-way SSL encryption. To accomplish that, several certificates +are distributed in the system: + +* Control plane: + + * Amphora certificate authority (CA) certificate: Used to validate + amphora certificates if Octavia acts as a Certificate Authority to + issue new amphora certificates + + * Client certificate: Used to authenticate with the amphora + +* Amphora: + + * Client CA certificate: Used to validate control plane + client certificate + + * Amphora certificate: Presented to control plane processes to prove amphora + identity. + +The heartbeat UDP packets emitted from the amphora are secured with a +symmetric encryption key. This is set by the configuration option +`heartbeat_key` in the `health_manager` section. We recommend setting it to a +random string of a sufficient length. + +.. _rotate-amphora-certs: + +Rotating Amphora Certificates +----------------------------- + +For the server part Octavia will act as a certificate authority itself to +issue amphora certificates to be used by each amphora. Octavia will also +monitor those certificates and refresh them before they expire. + +There are three ways to initiate a rotation manually: + +* Change the expiration date of the certificate in the database. Octavia + will then rotate the amphora certificates with newly issued ones. This + requires the following: + + * Client CA certificate hasn't expired or the + corresponding client certificate on the control plane hasn't been issued by + a different client CA (in case the authority was + compromised) + + * The Amphora CA certificate on the control plane didn't + change in any way which jeopardizes validation of the amphora certificate + (e.g. the certificate was reissued with a new private/public key) + +* If the amphora CA changed in a way which jeopardizes + validation of the amphora certificate an operator can manually upload newly + issued amphora certificates by switching off validation of the old amphora + certificate. This requires a client certificate which can be validated by the + client CA file on the amphora. Refer to + :doc:`../../contributor/api/haproxy-amphora-api` for more details. + +* If the client certificate on the control plane changed in a way that it can't + be validated by the client certificate authority certificate on the amphora, + a failover (see :ref:`rotate-amphora-certs`) of all amphorae needs to be + initiated. Until the failover is completed the amphorae can't be controlled + by the control plane. + +Rotating the Certificate Authority Certificates +----------------------------------------------- + +If there is a compromise of the certificate authorities' certificates, or they +expired, new ones need to be installed into the system. If Octavia is +not acting as the certificate authority only the certificate authority's +cert needs to be changed in the system so amphora can be authenticated again. + +* Issue new certificates (see the script in the bin folder of Octavia if + Octavia is acting as the certificate authority) or follow the instructions + of the third-party certificate authority. Copy the certificate and the + private key (if Octavia acts as a certificate authority) where Octavia can + find them. + +* If the previous certificate files haven't been overridden, adjust the paths + to the new certs in the configuration file and restart all Octavia services + (except octavia-api). + +Review :ref:`rotate-amphora-certs` above to determine if and how the +amphora certificates needs to be rotated. + +Rotating Client Certificates +---------------------------- + +If the client certificates expired new ones need to be issued and installed on +the system: + +* Issue a new client certificate (see the script in the bin folder of Octavia + if self signed certificates are used) or use the ones provided to you by + your certificate authority. + +* Copy the new cert where Octavia can find it. + +* If the previous certificate files haven't been overridden, adjust the paths + to the new certs in the configuration file. In all cases restart all Octavia + services except octavia-api. + +If the client CA certificate has been replaced in addition to +rotating the client certificate the new client CA +certificate needs to be installed in the system. After that initiate a +failover of all amphorae to distribute the new client CA +cert. Until the failover is completed the amphorae can't be controlled by the +control plane. + +Changing The Heartbeat Encryption Key +------------------------------------- + +Special caution needs to be taken to replace the heartbeat encryption key. +Once this is changed Octavia can't read any heartbeats and will assume +all amphora are in an error state and initiate an immediate failover. + +In preparation, read the chapter on :ref:`best_practice` in +the Failover section. + +Given the risks involved with changing this key it should not be changed +during routine maintenance but only when a compromise is strongly suspected. + +.. note:: + For future versions of Octavia an "update amphora" API is planned which + will allow this key to be changed without failover. At that time there would + be a procedure to halt health monitoring while the keys are rotated and then + resume health monitoring. + +Handling a VM Node Failure +-------------------------- + +If a node fails which is running amphora, Octavia will automatically failover +the amphora to a different node (capacity permitting). In some cases, the +node can be recovered (e.g. through a hard reset) and the hypervisor might +bring back the amphora vms. In this case, an operator should manually delete +all amphora on this specific node since Octavia assumes they have been +deleted as part of the failover and will not touch them again. + +.. note:: + As a safety measure an operator can, prior to deleting, manually check if + the VM is in use. First, use the Amphora API to obtain the current list of + amphorae, then match the nova instance ID to the compute_id column in the + amphora API response (it is not currently possible to filter amphora by + compute_id). If there are any matches where the amphora status is not + 'DELETED', the amphora is still considered to be in use. + +Evacuating a Specific Amphora from a Host +----------------------------------------- + +In some cases an amphora needs to be evacuated either because the host is being +shutdown for maintenance or as part of a failover. Octavia has a rich amphora +API to do that. + +First use the amphora API to find the specific amphora. Then, if not already +performed, disable scheduling to this host in nova. Lastly, initiate a failover +of the specific amphora with the failover command on the amphora API. + +Alternatively, a live migration might also work if it happens quick enough for +Octavia not to notice a stale amphora (the default configuration is 60s). diff --git a/doc/source/admin/guides/sample-configs/openssl.cnf b/doc/source/admin/guides/sample-configs/openssl.cnf new file mode 100644 index 0000000000..01b398549b --- /dev/null +++ b/doc/source/admin/guides/sample-configs/openssl.cnf @@ -0,0 +1,106 @@ +# OpenSSL root CA configuration file. + +[ ca ] +# `man ca` +default_ca = CA_default + +[ CA_default ] +# Directory and file locations. +dir = ./ +certs = $dir/certs +crl_dir = $dir/crl +new_certs_dir = $dir/newcerts +database = $dir/index.txt +serial = $dir/serial +RANDFILE = $dir/private/.rand + +# The root key and root certificate. +private_key = $dir/private/ca.key.pem +certificate = $dir/certs/ca.cert.pem + +# For certificate revocation lists. +crlnumber = $dir/crlnumber +crl = $dir/crl/ca.crl.pem +crl_extensions = crl_ext +default_crl_days = 30 + +# SHA-1 is deprecated, so use SHA-2 instead. +default_md = sha256 + +name_opt = ca_default +cert_opt = ca_default +default_days = 3650 +preserve = no +policy = policy_strict + +[ policy_strict ] +# The root CA should only sign intermediate certificates that match. +# See the POLICY FORMAT section of `man ca`. +countryName = match +stateOrProvinceName = match +organizationName = match +organizationalUnitName = optional +commonName = supplied +emailAddress = optional + +[ req ] +# Options for the `req` tool (`man req`). +default_bits = 2048 +distinguished_name = req_distinguished_name +string_mask = utf8only + +# SHA-1 is deprecated, so use SHA-2 instead. +default_md = sha256 + +# Extension to add when the -x509 option is used. +x509_extensions = v3_ca + +[ req_distinguished_name ] +# See . +countryName = Country Name (2 letter code) +stateOrProvinceName = State or Province Name +localityName = Locality Name +0.organizationName = Organization Name +organizationalUnitName = Organizational Unit Name +commonName = Common Name +emailAddress = Email Address + +# Optionally, specify some defaults. +countryName_default = US +stateOrProvinceName_default = Oregon +localityName_default = +0.organizationName_default = OpenStack +organizationalUnitName_default = Octavia +emailAddress_default = +commonName_default = example.org + +[ v3_ca ] +# Extensions for a typical CA (`man x509v3_config`). +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always,issuer +basicConstraints = critical, CA:true +keyUsage = critical, digitalSignature, cRLSign, keyCertSign + +[ usr_cert ] +# Extensions for client certificates (`man x509v3_config`). +basicConstraints = CA:FALSE +nsCertType = client, email +nsComment = "OpenSSL Generated Client Certificate" +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid,issuer +keyUsage = critical, nonRepudiation, digitalSignature, keyEncipherment +extendedKeyUsage = clientAuth, emailProtection + +[ server_cert ] +# Extensions for server certificates (`man x509v3_config`). +basicConstraints = CA:FALSE +nsCertType = server +nsComment = "OpenSSL Generated Server Certificate" +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid,issuer:always +keyUsage = critical, digitalSignature, keyEncipherment +extendedKeyUsage = serverAuth + +[ crl_ext ] +# Extension for CRLs (`man x509v3_config`). +authorityKeyIdentifier=keyid:always diff --git a/doc/source/admin/guides/upgrade.rst b/doc/source/admin/guides/upgrade.rst new file mode 100644 index 0000000000..6fef161d5b --- /dev/null +++ b/doc/source/admin/guides/upgrade.rst @@ -0,0 +1,102 @@ +.. + Copyright 2018 Red Hat, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain a + copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +==================================== +Load Balancing Service Upgrade Guide +==================================== + +This document outlines steps and notes for operators for reference when +upgrading their Load Balancing service from previous versions of OpenStack. + +Plan the upgrade +================ + +Before jumping right in to the upgrade process, there are a few considerations +operators should observe: + +* Carefully read the release notes, particularly the upgrade section. + +* Upgrades are only supported between sequential releases. For example, + upgrading from Pike to Queens is supported while from Pike to Rocky is not. + +* It is expected that each Load Balancing provider provides its own upgrade + documentation. Please refer to it for upgrade instructions. + +* The Load Balancing service builds on top of other OpenStack services, e.g. + Compute, Networking, Image and Identify. On a staging environment, upgrade + the Load Balancing service and verify it works as expected. For example, a + good indicator would be the successful run of `Octavia Tempest tests + `. + +Cold upgrade +============ + +In a cold upgrade (also known as offline upgrade and non-rolling upgrade), the +Load Balancing service is not available because all the control plane services +have to be taken down. No data plane disruption should result during the course +of upgrading. In the case of the Load Balancing service, it means no downtime +nor reconfiguration of service-managed resources (e.g. load balancers, +listeners, pools and members). + +#. Run the :ref:`octavia-status upgrade check ` + command to validate that Octavia is ready for upgrade. + +#. Gracefully stop all Octavia processes. We recommend in this order: + Housekeeping, Health manager, API, Worker. + +#. Optional: Make a backup of the database. + +#. Upgrade all Octavia control plane nodes to the next release. Remember to + also upgrade library dependencies (e.g. octavia-lib). If upgrading Octavia + from distribution packages, your system package manager is expected to + handle this automatically. + +#. Verify that all configuration option names are up-to-date with latest + Octavia version. For example, pay special attention to deprecated + configurations. + +#. Run ``octavia-db-manage upgrade head`` from any Octavia node to upgrade the + database and run any corresponding database migrations. + +#. Start all Octavia processes. + +#. Build a new image and upload it to the Image service. Do not forget to tag + the image. We recommend updating images frequently to include latest bug + fixes and security issues on installed software (operating system, amphora + agent and its dependencies). + +Amphorae upgrade +================ + +Amphorae upgrade may be required in the advent of API incompatibility between +the running amphora agent (old version) and Octavia services (new version). +Octavia will automatically recover by failing over amphorae and thus new +amphora instances will be running on latest amphora agent code. The drawback in +that case is data plane downtime during failover. API breakage is a very rare +case, and would be highlighted in the release notes if this scenario occurs. + +Upgrade testing +=============== + +`Grenade `_ is an OpenStack test +harness project that validates upgrade scenarios between releases. It uses +DevStack to initially perform a base OpenStack install and then upgrade to a +target version. + +Octavia has a `Grenade plugin +`_ and +a CI gate job that validates cold upgrades of an OpenStack deployment with +Octavia enabled. The plugin creates load balancing resources and verifies that +resources are still working during and after upgrade. diff --git a/doc/source/admin/healthcheck.rst b/doc/source/admin/healthcheck.rst new file mode 100644 index 0000000000..e6a67905fb --- /dev/null +++ b/doc/source/admin/healthcheck.rst @@ -0,0 +1,618 @@ +.. + Copyright 2020 Red Hat, Inc. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +============================= +Octavia API Health Monitoring +============================= + +The Octavia API provides a health monitoring endpoint that can be used by +external load balancers to manage the Octavia API pool. When properly +configured, the health monitoring endpoint will reflect the full operational +status of the Octavia API. + +The Octavia API health monitoring endpoint extends the `OpenStack Oslo +middleware healthcheck `_ library to test the Octavia Pecan API framework and associated services. + +Oslo Healthcheck Queries +======================== + +Oslo middleware healthcheck supports HTTP **"GET"** and **"HEAD"** methods. + +The response from Oslo middleware healthcheck can be customized by specifying +the acceptable response type for the request. + +Oslo middleware healthcheck currently supports the following types: + +* text/plain +* text/html +* application/json + +If the requested type is not one of the above, it defaults to text/plain. + +.. note:: + + The content of the response "reasons" will vary based on the backend plugins + enabled in Oslo middleware healthcheck. It is a best practice to only rely + on the HTTP status code for Octavia API health monitoring. + +Example Responses +----------------- + +Example passing output for text/plain with *detailed* False: + +.. code-block:: bash + + $ curl -i http://198.51.100.10/load-balancer/healthcheck + + HTTP/1.1 200 OK + Date: Mon, 16 Mar 2020 18:10:27 GMT + Server: Apache/2.4.29 (Ubuntu) + Content-Type: text/plain; charset=UTF-8 + Content-Length: 2 + x-openstack-request-id: req-9c6f4303-63a7-4f30-8afc-39340658702f + Connection: close + Vary: Accept-Encoding + + OK + +Example failing output for text/plain with *detailed* False: + +.. code-block:: bash + + $ curl -i http://198.51.100.10/load-balancer/healthcheck + + HTTP/1.1 503 Service Unavailable + Date: Mon, 16 Mar 2020 18:42:12 GMT + Server: Apache/2.4.29 (Ubuntu) + Content-Type: text/plain; charset=UTF-8 + Content-Length: 36 + x-openstack-request-id: req-84024269-2dfb-41ad-bfda-b3e1da138bba + Connection: close + +Example passing output for text/html with *detailed* False: + +.. code-block:: bash + + $ curl -i -H "Accept: text/html" http://198.51.100.10/load-balancer/healthcheck + + HTTP/1.1 200 OK + Date: Mon, 16 Mar 2020 18:25:11 GMT + Server: Apache/2.4.29 (Ubuntu) + Content-Type: text/html; charset=UTF-8 + Content-Length: 239 + x-openstack-request-id: req-b212d619-146f-4b50-91a3-5da16051badc + Connection: close + Vary: Accept-Encoding + + + Healthcheck Status + + +

Result of 1 checks:

+ + + + + + + + + + + + +
+ Reason +
OK
+
+ + + + +Example failing output for text/html with *detailed* False: + +.. code-block:: bash + + $ curl -i -H "Accept: text/html" http://198.51.100.10/load-balancer/healthcheck + + HTTP/1.1 503 Service Unavailable + Date: Mon, 16 Mar 2020 18:42:22 GMT + Server: Apache/2.4.29 (Ubuntu) + Content-Type: text/html; charset=UTF-8 + Content-Length: 273 + x-openstack-request-id: req-c91dd214-85ca-4d33-9fa3-2db81566d9e5 + Connection: close + + + Healthcheck Status + + +

Result of 1 checks:

+ + + + + + + + + + + + +
+ Reason +
The Octavia database is unavailable.
+
+ + + + +Example passing output for application/json with *detailed* False: + +.. code-block:: bash + + $ curl -i -H "Accept: application/json" http://192.51.100.10/load-balancer/healthcheck + + HTTP/1.1 200 OK + Date: Mon, 16 Mar 2020 18:34:42 GMT + Server: Apache/2.4.29 (Ubuntu) + Content-Type: application/json + Content-Length: 62 + x-openstack-request-id: req-417dc85c-e64e-496e-a461-494a3e6a5479 + Connection: close + + { + "detailed": false, + "reasons": [ + "OK" + ] + } + +Example failing output for application/json with *detailed* False: + +.. code-block:: bash + + $ curl -i -H "Accept: application/json" http://192.51.100.10/load-balancer/healthcheck + + HTTP/1.1 503 Service Unavailable + Date: Mon, 16 Mar 2020 18:46:28 GMT + Server: Apache/2.4.29 (Ubuntu) + Content-Type: application/json + Content-Length: 96 + x-openstack-request-id: req-de50b057-6105-4fca-a758-c872ef28bbfa + Connection: close + + { + "detailed": false, + "reasons": [ + "The Octavia database is unavailable." + ] + } + +Example Detailed Responses +-------------------------- + +Example passing output for text/plain with *detailed* True: + +.. code-block:: bash + + $ curl -i http://198.51.100.10/load-balancer/healthcheck + + HTTP/1.1 200 OK + Date: Mon, 16 Mar 2020 18:10:27 GMT + Server: Apache/2.4.29 (Ubuntu) + Content-Type: text/plain; charset=UTF-8 + Content-Length: 2 + x-openstack-request-id: req-9c6f4303-63a7-4f30-8afc-39340658702f + Connection: close + Vary: Accept-Encoding + + OK + +Example failing output for text/plain with *detailed* True: + +.. code-block:: bash + + $ curl -i http://198.51.100.10/load-balancer/healthcheck + + HTTP/1.1 503 Service Unavailable + Date: Mon, 16 Mar 2020 23:41:23 GMT + Server: Apache/2.4.29 (Ubuntu) + Content-Type: text/plain; charset=UTF-8 + Content-Length: 36 + x-openstack-request-id: req-2cd046cb-3a6c-45e3-921d-5f4a9e65c63e + Connection: close + +Example passing output for text/html with *detailed* True: + +.. code-block:: bash + + $ curl -i -H "Accept: text/html" http://198.51.100.10/load-balancer/healthcheck + + HTTP/1.1 200 OK + Date: Mon, 16 Mar 2020 22:11:54 GMT + Server: Apache/2.4.29 (Ubuntu) + Content-Type: text/html; charset=UTF-8 + Content-Length: 9927 + x-openstack-request-id: req-ae7404c9-b183-46dc-bb1b-e5f4e4984a57 + Connection: close + Vary: Accept-Encoding + + + Healthcheck Status + +

Server status

+ Server hostname:
devstack2
+ Current time:
2020-03-16 22:11:54.320529
+ Python version:
3.6.9 (default, Nov  7 2019, 10:44:02)
+     [GCC 8.3.0]
+ Platform:
Linux-4.15.0-88-generic-x86_64-with-Ubuntu-18.04-bionic
+
+

Garbage collector:

+ Counts:
(28, 10, 4)
+ Thresholds:
(700, 10, 10)
+
+

Result of 1 checks:

+ + + + + + + + + + + + + + +
+ Kind + + Reason + + Details +
OctaviaDBCheckResultOK
+
+

1 greenthread(s) active:

+ + + + + + +
 <...> 
+
+

1 thread(s) active:

+ + + + + + +
 <...> 
+ + + +Example failing output for text/html with *detailed* True: + +.. code-block:: bash + + $ curl -i -H "Accept: text/html" http://198.51.100.10/load-balancer/healthcheck + + HTTP/1.1 503 Service Unavailable + Date: Mon, 16 Mar 2020 23:43:52 GMT + Server: Apache/2.4.29 (Ubuntu) + Content-Type: text/html; charset=UTF-8 + Content-Length: 10211 + x-openstack-request-id: req-39b65058-6dc3-4069-a2d5-8a9714dba61d + Connection: close + + + Healthcheck Status + +

Server status

+ Server hostname:
devstack2
+ Current time:
2020-03-16 23:43:52.411127
+ Python version:
3.6.9 (default, Nov  7 2019, 10:44:02)
+     [GCC 8.3.0]
+ Platform:
Linux-4.15.0-88-generic-x86_64-with-Ubuntu-18.04-bionic
+
+

Garbage collector:

+ Counts:
(578, 10, 4)
+ Thresholds:
(700, 10, 10)
+
+

Result of 1 checks:

+ + + + + + + + + + + + + + +
+ Kind + + Reason + + Details +
OctaviaDBCheckResultThe Octavia database is unavailable.Database health check failed due to: (pymysql.err.OperationalError) (2003, "Can't connect to MySQL server on '127.0.0.1' ([Errno 111] Connection refused)") + [SQL: SELECT 1] + (Background on this error at: http://sqlalche.me/e/e3q8).
+
+

1 greenthread(s) active:

+ + + + + + +
 <...> 
+
+

1 thread(s) active:

+ + + + + + +
 <...> 
+ + + +Example passing output for application/json with *detailed* True: + +.. code-block:: bash + + $ curl -i -H "Accept: application/json" http://192.51.100.10/load-balancer/healthcheck + + HTTP/1.1 200 OK + Date: Mon, 16 Mar 2020 22:05:26 GMT + Server: Apache/2.4.29 (Ubuntu) + Content-Type: application/json + Content-Length: 9298 + x-openstack-request-id: req-d3913655-6e3f-4086-a252-8bb297ea5fd6 + Connection: close + + { + "detailed": true, + "gc": { + "counts": [ + 27, + 10, + 4 + ], + "threshold": [ + 700, + 10, + 10 + ] + }, + "greenthreads": [ + <...> + ], + "now": "2020-03-16 22:05:26.431429", + "platform": "Linux-4.15.0-88-generic-x86_64-with-Ubuntu-18.04-bionic", + "python_version": "3.6.9 (default, Nov 7 2019, 10:44:02) \n[GCC 8.3.0]", + "reasons": [ + { + "class": "OctaviaDBCheckResult", + "details": "", + "reason": "OK" + } + ], + "threads": [ + <...> + ] + } + +Example failing output for application/json with *detailed* True: + +.. code-block:: bash + + $ curl -i -H "Accept: application/json" http://192.51.100.10/load-balancer/healthcheck + + HTTP/1.1 503 Service Unavailable + Date: Mon, 16 Mar 2020 23:56:43 GMT + Server: Apache/2.4.29 (Ubuntu) + Content-Type: application/json + Content-Length: 9510 + x-openstack-request-id: req-3d62ea04-9bdb-4e19-b218-1a81ff7d7337 + Connection: close + + { + "detailed": true, + "gc": { + "counts": [ + 178, + 0, + 5 + ], + "threshold": [ + 700, + 10, + 10 + ] + }, + "greenthreads": [ + <...> + ], + "now": "2020-03-16 23:58:23.361209", + "platform": "Linux-4.15.0-88-generic-x86_64-with-Ubuntu-18.04-bionic", + "python_version": "3.6.9 (default, Nov 7 2019, 10:44:02) \n[GCC 8.3.0]", + "reasons": [ + { + "class": "OctaviaDBCheckResult", + "details": "(pymysql.err.OperationalError) (2003, \"Can't connect to MySQL server on '127.0.0.1' ([Errno 111] Connection refused)\")\n(Background on this error at: http://sqlalche.me/e/e3q8)", + "reason": "The Octavia database is unavailable." + } + ], + "threads": [ + <...> + ] + } + +Oslo Healthcheck Plugins +======================== + +The Octavia API health monitoring endpoint, implemented with Oslo middleware +healthcheck, is extensible using optional backend plugins. There are currently +plugins provided by the Oslo middleware library and plugins provided by +Octavia. + +**Oslo middleware provided plugins** + +* `disable_by_file `_ +* `disable_by_files_ports `_ + +**Octavia provided plugins** + +* `octavia_db_check`_ + +.. warning:: + + Some plugins may have long timeouts. It is a best practice to configure your + healthcheck query to have connection, read, and/or data timeouts. The + appropriate values will be unique to each deployment depending on the cloud + performance, number of plugins, etc. + +Enabling Octavia API Health Monitoring +====================================== + +To enable the Octavia API health monitoring endpoint, the proper configuration +file settings need to be updated and the Octavia API processes need to be +restarted. + +Start by enabling the endpoint: + +.. code-block:: ini + + [api_settings] + healthcheck_enabled = True + +When the healthcheck_enabled setting is *False*, queries of the /healthcheck +will receive an HTTP 404 Not Found response. + +You will then need to select the desired monitoring backend plugins: + +.. code-block:: ini + + [healthcheck] + backends = octavia_db_check + +.. note:: + + When no plugins are configured, the behavior of Oslo middleware healthcheck + changes. Not only does it not run any tests, it will return 204 results + instead of 200. + +The Octavia API health monitoring endpoint does not require a keystone token +for access to allow external load balancers to query the endpoint. For this +reason we recommend you restrict access to it on your external load balancer +to prevent abuse. + +As an additional protection, the API will cache results for a configurable +period of time. This means that queries to the health monitoring endpoint +will return cached results until the refresh interval has expired, at which +point the health check plugin will rerun the check. + +By default, the refresh interval is five seconds. This can be configured by +adjusting the healthcheck_refresh_interval setting in the Octavia configuration +file: + +.. code-block:: ini + + [api_settings] + healthcheck_refresh_interval = 5 + +Optionally you can enable the "detailed" mode in Oslo middleware healthcheck. +This will cause Oslo middleware healthcheck to return additional information +about the API instance. It will also provide exception details if one was +raised during the health check. This setting is False and disabled by default +in the Octavia API. + +.. code-block:: ini + + [healthcheck] + detailed = True + +.. warning:: + + Enabling the 'detailed' setting will expose sensitive details about + the API process. Do not enabled this unless you are sure it will + not pose a **security risk** to your API instances. + We highly recommend you do not enable this. + +Using Octavia API Health Monitoring +=================================== + +The Octavia API health monitoring endpoint can be accessed via the +/healthmonitor path on the `Octavia API endpoint `_. + +For example, if your Octavia (load-balancer) endpoint in keystone is: + +.. code-block:: bash + + https://10.21.21.78/load-balancer + +You would access the Octavia API health monitoring endpoint via: + +.. code-block:: bash + + https://10.21.21.78/load-balancer/healthcheck + +A keystone token is not required to access this endpoint. + +Octavia Plugins +=============== + +octavia_db_check +---------------- + +The octavia_db_check plugin validates the API instance has a working connection +to the Octavia database. It executes a SQL no-op query, 'SELECT 1;', against +the database. + +.. note:: + + Many OpenStack services and libraries, such as oslo.db and sqlalchemy, also + use the no-op query, 'SELECT 1;' for health checks. + +The possible octavia_db_check results are: + ++---------+--------+-------------+--------------------------------------+ +| Request | Result | Status Code | "reason" Message | ++=========+========+=============+======================================+ +| GET | Pass | 200 | OK | ++---------+--------+-------------+--------------------------------------+ +| HEAD | Pass | 204 | | ++---------+--------+-------------+--------------------------------------+ +| GET | Fail | 503 | The Octavia database is unavailable. | ++---------+--------+-------------+--------------------------------------+ +| HEAD | Fail | 503 | | ++---------+--------+-------------+--------------------------------------+ + +When running Oslo middleware healthcheck in "detailed" mode, the "details" +field will have additional information about the error encountered, including +the exception details if they were available. diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst new file mode 100644 index 0000000000..6395f024d6 --- /dev/null +++ b/doc/source/admin/index.rst @@ -0,0 +1,67 @@ +====================== +Octavia Administration +====================== + + +Getting Started +--------------- + +.. toctree:: + :maxdepth: 1 + + ../reference/introduction.rst + ../reference/glossary.rst + ../contributor/guides/dev-quick-start.rst + +Installation and Configuration Guides +------------------------------------- + +.. toctree:: + :maxdepth: 1 + + amphora-image-build + guides/certificates.rst + ../configuration/configref.rst + ../configuration/policy.rst + +Optional Installation and Configuration Guides +---------------------------------------------- + +.. toctree:: + :maxdepth: 1 + + providers/index.rst + log-offloading.rst + api-audit.rst + healthcheck.rst + flavors.rst + apache-httpd.rst + failover-circuit-breaker.rst + sr-iov.rst + +Maintenance and Operations +-------------------------- + +.. toctree:: + :maxdepth: 1 + + guides/operator-maintenance.rst + octavia-status + guides/upgrade.rst + +Operator Reference +------------------ + +.. toctree:: + :maxdepth: 1 + + Octavia API Reference + ../contributor/api/haproxy-amphora-api.rst + event-notifications.rst + +.. only:: html + + Indices and Search + ------------------ + + * :ref:`search` diff --git a/doc/source/admin/log-offloading.rst b/doc/source/admin/log-offloading.rst new file mode 100644 index 0000000000..a8d278d51f --- /dev/null +++ b/doc/source/admin/log-offloading.rst @@ -0,0 +1,298 @@ +.. + Copyright 2019 Red Hat, Inc. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +============================== +Octavia Amphora Log Offloading +============================== + +The default logging configuration will store the logs locally, on the amphora +filesystem with file rotation. + +Octavia Amphorae can offload their log files via the syslog protocol to syslog +receivers via the load balancer management network (lb-mgmt-net). This allows +log aggregation of both administrative logs and also tenant traffic flow logs. +The syslog receivers can either be local to the load balancer management +network or routable via the load balancer management network. +By default any syslog receiver that supports UDP or TCP syslog protocol can +be used, however the operator also has the option to create an override +rsyslog configuration template to enable other features or protocols their +Amphora image may support. + +This guide will discuss the features of :term:`Amphora` log offloading and how +to configure them. + +Administrative Logs +=================== + +The administrative log offloading feature of the :term:`Amphora` covers all of +the system logging inside the :term:`Amphora` except for the tenant flow logs. +Tenant flow logs can be sent to and processed by the same syslog receiver used +by the administrative logs, but they are configured separately. + +All administrative log messages will be sent using the native log format +for the application sending the message. + +Enabling Administrative Log Offloading +-------------------------------------- + +One or more syslog receiver endpoints must be configured in the Octavia +configuration file to enable administrative log offloading. The first endpoint +will be the primary endpoint to receive the syslog packets. +Read the :ref:`rsyslog failover considerations` section for information about +how to use multiple target servers. + +To configure administrative log offloading, set the following setting in your +Octavia configuration file for all of the controllers and restart them: + +.. code-block:: ini + + [amphora_agent] + admin_log_targets = 192.0.2.1:10514 + +In this example, the syslog receiver will be 192.0.2.1 on port 10514. +If *log_protocol* is not specified UDP will be used. + +.. note:: + + Make sure your syslog receiver endpoints are accessible from the load + balancer management network and you have configured the required + security group or firewall rules to allow the traffic. These endpoints + can be routable addresses from the load balancer management network. + +The load balancer related administrative logs will be sent using a +LOG_LOCAL[0-7] facility. The facility number defaults to 1, but is configurable +using the administrative_log_facility setting in the Octavia configuration +file. + +To configure administrative log facility, set the following setting in your +Octavia configuration file for all of the controllers and restart them: + +.. code-block:: ini + + [amphora_agent] + administrative_log_facility = 1 + +Forwarding All Administrative Logs +---------------------------------- + +By default, the Amphorae will only forward load balancer related administrative +logs, such as the haproxy admin logs, keepalived, and :term:`Amphora` agent +logs. +You can optionally configure the Amphorae to send all of the administrative +logs from the :term:`Amphora`, such as the kernel, system, and security logs. +Even with this setting the tenant flow logs will not be included. You can +configure tenant flow log forwarding in the `Tenant Flow Logs`_ section. + +The load balancer related administrative logs will be sent using the +LOG_LOCAL[0-7] configured using the administrative_log_facility setting. All +other administrative log messages will use their native syslog facilities. + +To configure the Amphorae to forward all administrative logs, set the following +setting in your Octavia configuration file for all of the controllers and +restart them: + +.. code-block:: ini + + [amphora_agent] + forward_all_logs = True + +Tenant Flow Logs +================ + +Enabling Tenant Flow Log Offloading +----------------------------------- + +One or more syslog receiver endpoints must be configured in the Octavia +configuration file to enable tenant flow log offloading. The first endpoint +will be the primary endpoint to receive the syslog packets. +The endpoints configured for tenant flow log offloading may be +the same endpoints as the administrative log offloading configuration. +Read the :ref:`rsyslog failover considerations` section for information +about how to use multiple target servers. + +.. warning:: + + Tenant flow logging can produce a large number of syslog messages + depending on how many connections the load balancers are receiving. + Tenant flow logging produces one log entry per connection to the + load balancer. We recommend you monitor, size, and configure your syslog + receivers appropriately based on the expected number of connections your + load balancers will be handling. + +To configure tenant flow log offloading, set the following setting in your +Octavia configuration file for all of the controllers and restart them: + +.. code-block:: ini + + [amphora_agent] + tenant_log_targets = 192.0.2.1:10514 + +In this example, the syslog receiver will be 192.0.2.1 on port 10514. +If *log_protocol* is not specified UDP will be used. + +.. note:: + + Make sure your syslog receiver endpoints are accessible from the load + balancer management network and you have configured the required + security group or firewall rules to allow the traffic. These endpoints + can be routable addresses from the load balancer management network. + +The load balancer related tenant flow logs will be sent using a +LOG_LOCAL[0-7] facility. The facility number defaults to 0, but is configurable +using the user_log_facility setting in the Octavia configuration file. + +To configure the tenant flow log facility, set the following setting in your +Octavia configuration file for all of the controllers and restart them: + +.. code-block:: ini + + [amphora_agent] + user_log_facility = 0 + +Tenant Flow Log Format +---------------------- + +The default tenant flow log format is: + +.. code-block:: + + project_id loadbalancer_id listener_id client_ip client_port data_time + request_string http_status bytes_read bytes_uploaded + client_certificate_verify(0 or 1) client_certificate_distinguised_name + pool_id member_id processing_time(ms) termination_state + +Any field that is unknown or not applicable to the connection will have a '-' +character in its place. + +An example log entry when using rsyslog as the syslog receiver is: + +.. note:: + + The prefix[1] in this example comes from the rsyslog receiver and is not + part of the syslog message from the amphora. + + [1] "Jun 12 00:44:13 amphora-3e0239c3-5496-4215-b76c-6abbe18de573 haproxy[1644]:" + +.. code-block:: + + Jun 12 00:44:13 amphora-3e0239c3-5496-4215-b76c-6abbe18de573 haproxy[1644]: 5408b89aa45b48c69a53dca1aaec58db fd8f23df-960b-4b12-ba62-2b1dff661ee7 261ecfc2-9e8e-4bba-9ec2-3c903459a895 172.24.4.1 41152 12/Jun/2019:00:44:13.030 "GET / HTTP/1.1" 200 76 73 - "" e37e0e04-68a3-435b-876c-cffe4f2138a4 6f2720b3-27dc-4496-9039-1aafe2fee105 4 -- + +Custom Tenant Flow Log Format +----------------------------- + +You can optionally specify a custom log format for the tenant flow logs. +This string follows the HAProxy log format variables with the exception of +the "{{ project_id }}" and "{{ lb_id }}" variables that will be replaced +by the Octavia :term:`Amphora` driver. These custom variables are optional. + +See the HAProxy documentation for `Custom log format `_ variable definitions. + +To configure a custom log format, set the following setting in your +Octavia configuration file for all of the controllers and restart them: + +.. code-block:: ini + + [haproxy_amphora] + user_log_format = '{{ project_id }} {{ lb_id }} %f %ci %cp %t %{+Q}r %ST %B %U %[ssl_c_verify] %{+Q}[ssl_c_s_dn] %b %s %Tt %tsc' + +.. _rsyslog failover considerations: + +Failover Considerations +======================= + +In order to provide protection against potential data loss because of +downtime of a single syslog server, it may be a advisable to +use multiple log targets. +In such configuration *log_protocol* needs to be set to *TCP*. +With the UDP syslog protocol, RSyslog is unable +to detect if the primary endpoint has failed. + +Also pay attention to the *log_retry_count* and *log_retry_interval* settings +when using multiple log targets. You might want to set *log_retry_count* to 0 +and use a higher value for *log_retry_interval*. Values up to 1800 (30 minutes) +are possible. +That way the failover will happen immediately after the client detects +that the server became unavailable. In such case, that server won't be +used again for at least *log_retry_interval* seconds after that event. +In the following example the primary syslog receiver will be +192.0.2.1 on port 10514. +The backup syslog receiver will be 2001:db8:1::10 on port 10514. + +.. code-block:: ini + + [amphora_agent] + admin_log_targets = 192.0.2.1:10514, 2001:db8:1::10:10514 + tenant_log_targets = 192.0.2.1:10514, 2001:db8:1::10:10514 + log_protocol = TCP + log_retry_count = 0 + log_retry_interval = 1800 + +Disabling Logging +================= + +There may be cases where you need to disable logging inside the +:term:`Amphora`, such as complying with regulatory standards. +Octavia provides multiple options for disabling :term:`Amphora` logging. + +Disable Local Log Storage +------------------------- + +This setting stops log entries from being written to the disk inside the +:term:`Amphora`. Logs can still be sent via :term:`Amphora` log offloading if +log offloading is configured for the Amphorae. Enabling this setting may +provide a performance benefit to the load balancer. + +.. warning:: + + This feature disables ALL log storage in the :term:`Amphora`, including + kernel, system, and security logging. + +.. note:: + + If you enable this setting and are not using :term:`Amphora` log + offloading, we recommend you also `Disable Tenant Flow Logging`_ to + improve load balancing performance. + +To disable local log storage in the :term:`Amphora`, set the following setting +in your Octavia configuration file for all of the controllers and restart them: + +.. code-block:: ini + + [amphora_agent] + disable_local_log_storage = True + +Disable Tenant Flow Logging +--------------------------- + +This setting allows you to disable tenant flow logging irrespective of the +other logging configuration settings. It will take precedent over the other +settings. When this setting is enabled, no tenant flow (connection) logs will +be written to the disk inside the :term:`Amphora` or be sent via the +:term:`Amphora` log offloading. + +.. note:: + + Disabling tenant flow logging can also improve the load balancing + performance of the amphora. Due to the potential performance improvement, + we recommend you enable this setting when using the + `Disable Local Log Storage`_ setting. + +To disable tenant flow logging, set the following setting in your Octavia +configuration file for all of the controllers and restart them: + +.. code-block:: ini + + [haproxy_amphora] + connection_logging = False diff --git a/doc/source/admin/octavia-status.rst b/doc/source/admin/octavia-status.rst new file mode 100644 index 0000000000..109dce4582 --- /dev/null +++ b/doc/source/admin/octavia-status.rst @@ -0,0 +1,83 @@ +============== +octavia-status +============== + +----------------------------------------- +CLI interface for Octavia status commands +----------------------------------------- + +Synopsis +======== + +:: + + octavia-status [] + +Description +=========== + +:program:`octavia-status` is a tool that provides routines for checking the +status of a Octavia deployment. + +Options +======= + +The standard pattern for executing a :program:`octavia-status` command is:: + + octavia-status [] + +Run without arguments to see a list of available command categories:: + + octavia-status + +Categories are: + +* ``upgrade`` + +Detailed descriptions are below: + +You can also run with a category argument such as ``upgrade`` to see a list of +all commands in that category:: + + octavia-status upgrade + +These sections describe the available categories and arguments for +:program:`octavia-status`. + +Upgrade +~~~~~~~ + +.. _octavia-status-checks: + +``octavia-status upgrade check`` + Performs a release-specific readiness check before restarting services with + new code. For example, missing or changed configuration options, + incompatible object states, or other conditions that could lead to + failures while upgrading. + + **Return Codes** + + .. list-table:: + :widths: 20 80 + :header-rows: 1 + + * - Return code + - Description + * - 0 + - All upgrade readiness checks passed successfully and there is nothing + to do. + * - 1 + - At least one check encountered an issue and requires further + investigation. This is considered a warning but the upgrade may be OK. + * - 2 + - There was an upgrade status check failure that needs to be + investigated. This should be considered something that stops an + upgrade. + * - 255 + - An unexpected error occurred. + + **History of Checks** + + **4.0.0 (Stein)** + + * Sample check to be filled in with checks as they are added in Stein. diff --git a/doc/source/admin/providers/a10.rst b/doc/source/admin/providers/a10.rst new file mode 100644 index 0000000000..d6d1bb7417 --- /dev/null +++ b/doc/source/admin/providers/a10.rst @@ -0,0 +1,25 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +A10 Networks OpenStack Octavia Driver +===================================== + +A10 Networks Octavia Driver for Thunder, vThunder and AX Series Appliances. + +Default provider name: **a10** + +The driver source: https://github.com/a10networks/a10-octavia/ + +The documentation: https://github.com/a10networks/a10-octavia/ + +Where to report issues with the driver: Contact A10 Networks diff --git a/doc/source/admin/providers/amphora.rst b/doc/source/admin/providers/amphora.rst new file mode 100644 index 0000000000..bad5b8a843 --- /dev/null +++ b/doc/source/admin/providers/amphora.rst @@ -0,0 +1,34 @@ +.. + Copyright 2018 Rackspace, US Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Amphora +======= + +This is the reference driver for Octavia, meaning it is used for testing the +Octavia code base. It is an open source, scalable, and highly available load +balancing provider. +It adopts taskflow jobboard feature and saves task states into the persistence +backend, this allows to continue task execution if controller work was +interrupted. + +Default provider name: **amphora** + +The driver package: https://pypi.org/project/octavia/ + +The driver source: https://opendev.org/openstack/octavia/ + +The documentation: https://docs.openstack.org/octavia/latest/ + +Where to report issues with the driver: https://launchpad.net/octavia diff --git a/doc/source/admin/providers/f5.rst b/doc/source/admin/providers/f5.rst new file mode 100644 index 0000000000..2f83a4f8c2 --- /dev/null +++ b/doc/source/admin/providers/f5.rst @@ -0,0 +1,23 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +F5 Networks Provider Driver for OpenStack Octavia by SAP SE +============================================================== + +F5 Networks Provider Driver for OpenStack Octavia provided by SAP SE. + +Default provider name: **f5** + +The driver source: https://github.com/sapcc/octavia-f5-provider-driver + +Where to report issues with the driver: Contact SAP SE diff --git a/doc/source/admin/providers/index.rst b/doc/source/admin/providers/index.rst new file mode 100644 index 0000000000..8c60c3c761 --- /dev/null +++ b/doc/source/admin/providers/index.rst @@ -0,0 +1,54 @@ +.. + Copyright 2018 Rackspace, US Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +========================== +Available Provider Drivers +========================== + +Octavia supports enabling multiple provider drivers via the Octavia v2 API. +Drivers, other than the reference Amphora driver, exist outside of the Octavia +repository and are not maintained by the Octavia team. This list is intended +to provide a place for operators to discover and find available load balancing +provider drivers. + +This list is a **"best effort"** to keep updated, so please check with your +favorite load balancer provider to see if they support OpenStack load +balancing. If they don't, make a request for support! + +.. Note:: The provider drivers listed here may not be maintained by the + OpenStack LBaaS (Octavia) team. Please submit bugs for these + projects through their respective bug tracking systems. + +Drivers are installed on all of your Octavia API instances using pip and +automatically integrated with Octavia using `setuptools entry points`_. Once +installed, operators can enable the provider by adding the provider to the +Octavia configuration file `enabled_provider_drivers`_ setting in the +[api_settings] section. Be sure to install and enable the provider on all of +your Octavia API instances. + +.. _setuptools entry points: http://setuptools.readthedocs.io/en/latest/pkg_resources.html?#entry-points +.. _enabled_provider_drivers: https://docs.openstack.org/octavia/latest/configuration/configref.html#api_settings.enabled_provider_drivers + +.. include:: a10.rst + +.. include:: amphora.rst + +.. include:: f5.rst + +.. include:: ovn.rst + +.. include:: radware.rst + +.. include:: vmware-nsx.rst diff --git a/doc/source/admin/providers/ovn.rst b/doc/source/admin/providers/ovn.rst new file mode 100644 index 0000000000..02bbedce43 --- /dev/null +++ b/doc/source/admin/providers/ovn.rst @@ -0,0 +1,29 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +OVN Octavia Provider Driver +=========================== + +OVN provides virtual networking for Open vSwitch and is a component of the Open +vSwitch project. This project provides integration between OpenStack Octavia +and OVN. + +Default provider name: **ovn** + +The driver package: https://pypi.org/project/ovn-octavia-provider/ + +The driver source: https://opendev.org/openstack/ovn-octavia-provider + +The documentation: https://docs.openstack.org/ovn-octavia-provider/latest/ + +Where to report issues with the driver: https://bugs.launchpad.net/neutron/+bugs?field.tag=ovn-octavia-provider diff --git a/doc/source/admin/providers/radware.rst b/doc/source/admin/providers/radware.rst new file mode 100644 index 0000000000..734b19e3f7 --- /dev/null +++ b/doc/source/admin/providers/radware.rst @@ -0,0 +1,25 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Radware Provider Driver for OpenStack Octavia +============================================= + +Radware provider driver for OpenStack Octavia. + +Default provider name: **radware** + +The driver package: https://pypi.org/project/radware_octavia_rocky_driver/ + +The documentation: https://pypi.org/project/radware_octavia_rocky_driver/ + +Where to report issues with the driver: Contact Radware diff --git a/doc/source/admin/providers/vmware-nsx.rst b/doc/source/admin/providers/vmware-nsx.rst new file mode 100644 index 0000000000..eac87f99f5 --- /dev/null +++ b/doc/source/admin/providers/vmware-nsx.rst @@ -0,0 +1,25 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +VMware NSX +========== + +VMware NSX Octavia Driver. + +Default provider name: **vmwareedge** + +The driver package: https://pypi.org/project/vmware-nsx/ + +The driver source: https://opendev.org/x/vmware-nsx + +Where to report issues with the driver: https://bugs.launchpad.net/vmware-nsx diff --git a/doc/source/admin/sr-iov.rst b/doc/source/admin/sr-iov.rst new file mode 100644 index 0000000000..3b8478f779 --- /dev/null +++ b/doc/source/admin/sr-iov.rst @@ -0,0 +1,115 @@ +.. + Copyright 2023 Red Hat, Inc. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +=============================== +Using SR-IOV Ports with Octavia +=============================== + +Single Root I/O Virtualization (SR-IOV) can significantly reduce the latency +through an Octavia Amphora based load balancer while maximizing bandwith and +request rates. With Octavia Amphora load balancers, you can attach SR-IOV +Virtual Functions (VF) as the VIP port and/or backend member ports. + +Enabling SR-IOV on Your Compute Hosts +------------------------------------- + +To allow Octavia load balancers to use SR-IOV, you must configure nova and +neutron to make SR-IOV available on at least one compute host. Please follow +the `Networking Guide `_ to setup your compute hosts for SR-IOV. + +Configuring Host Aggregates, Compute and Octavia Flavors +-------------------------------------------------------- + +Octavia hot-plugs the network ports into the Amphora as the load balancer is +being provisioned. This means we need to use host aggregates and compute flavor +properties to make sure the Amphora are created on SR-IOV enable compute hosts +with the correct networks. + +Host Aggregates +~~~~~~~~~~~~~~~ + +This configuration can be as simple or complex as you need it to be. A simple +approach would be to add one property for the SR-IOV host aggregate, such as: + +.. code-block:: bash + + $ openstack aggregate create sriov_aggregate + $ openstack aggregate add host sriov_aggregate sriov-host.example.org + $ openstack aggregate set --property sriov-nic=true sriov_aggregate + +A more advanced configuration may list out the specific networks that are +available via the SR-IOV VFs: + +.. code-block:: bash + + $ openstack aggregate create sriov_aggregate + $ openstack aggregate add host sriov_aggregate sriov-host.example.org + $ openstack aggregate set --property public-sriov=true --property members-sriov=true sriov_aggregate + +Compute Flavors +~~~~~~~~~~~~~~~ + +Next we need to create a compute flavor that includes the required properties +to match the host aggregate. Here is an example for a basic Octavia Amphora +compute flavor using the advanced host aggregate discussed in the previous +section: + +.. code-block:: bash + + $ openstack flavor create --id amphora-sriov-flavor --ram 1024 --disk 3 --vcpus 1 --private sriov.amphora --property hw_rng:allowed=True --property public-sriov=true --property members-sriov=true + +.. note:: + This flavor is marked "private" so must be created inside the Octavia + service account project. + +Octavia Flavors +~~~~~~~~~~~~~~~ + +Now that we have the compute service setup to properly place our Amphora +instances on hosts with SR-IOV NICs on the right networks, we can create an +Octavia flavor that will use the compute flavor. + +.. code-block:: bash + + $ openstack loadbalancer flavorprofile create --name amphora-sriov-profile --provider amphora --flavor-data '{"compute_flavor": "amphora-sriov-flavor", "sriov_vip": true, "allow_member_sriov": true}' + $ openstack loadbalancer flavor create --name SRIOV-public-members --flavorprofile amphora-sriov-profile --description "A load balancer that uses SR-IOV for the 'public' network and 'members' network." --enable + +When the `allow_member_sriov` Octavia flavor setting is true, users can request +Octavia to attach the member ports using SR-IOV VFs. If Octavia is not able to +successfully attach the member port as an SR-IOV VF, the member will be marked +as `provisioning_status` of `ERROR` as we could not acquire a networking port +for the requested member network. If the member network is already attached +using a non-SR-IOV port, the member will also be marked with +`provisioning_status` of `ERROR`. + +.. note:: + By default, both `sriov_vip` and `allow_member_sriov` are false. + +Building the Amphora Image +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Neutron does not support security groups on SR-IOV ports, so Octavia will use +nftables inside the Amphroa to provide network security. The amphora image +must be built with nftables enabled for SR-IOV enabled load balancers. Images +with nftables enabled can be used for both SR-IOV enabled load balancers as +well as load balancers that are not using SR-IOV ports. When the SR-IOV for +load balancer VIP ports feature was added to Octavia, the default setting for +using nftables has been changed to `True`. Prior to this it needed to be +enabled by setting an environment variable before building the Amphora image: + +.. code-block:: bash + + $ export DIB_OCTAVIA_AMP_USE_NFTABLES=True + $ ./diskimage-create.sh diff --git a/doc/source/cli/index.rst b/doc/source/cli/index.rst new file mode 100644 index 0000000000..55a9e7432e --- /dev/null +++ b/doc/source/cli/index.rst @@ -0,0 +1,10 @@ +============================== +Octavia Command Line Interface +============================== + +Octavia has an OpenStack Client plugin available as the native Command Line +Interface (CLI). + +Please see the `python-octaviaclient documentation +`_ for documentation +on installing and using the CLI. diff --git a/doc/source/conf.py b/doc/source/conf.py new file mode 100644 index 0000000000..5698c81be3 --- /dev/null +++ b/doc/source/conf.py @@ -0,0 +1,346 @@ +# +# Octavia documentation build configuration file, created by +# sphinx-quickstart on Tue May 21 17:43:32 2013. +# +# This file is execfile()d with the current directory set to its containing +# dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import datetime +import os +import sys + +from pydotplus import graphviz +import sadisplay + +import octavia.db.models as models + +sys.path.insert(0, os.path.abspath('../..')) +sys.path.insert(0, os.path.abspath('.')) + +from tools import create_flow_docs + +# Generate our flow diagrams +create_flow_docs.generate( + 'tools/flow-list-v2.txt', 'doc/source/contributor/devref/flow_diagrams_v2') + +# Generate entity relationship diagram +desc = sadisplay.describe( + [getattr(models, attr) for attr in dir(models)], + show_methods=True, + show_properties=True, + show_indexes=True, +) +graph = graphviz.graph_from_dot_data(sadisplay.dot(desc).encode('utf-8')) +graph.write('contributor/devref/erd.svg', format='svg') + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# sys.path.insert(0, os.path.abspath('.')) + +# -- General configuration ---------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.todo', + 'sphinx.ext.viewcode', + 'sphinx.ext.graphviz', + 'sphinx_feature_classification.support_matrix', + 'openstackdocstheme', + 'oslo_config.sphinxext', + 'oslo_policy.sphinxpolicygen', + 'sphinxcontrib.apidoc', + 'sphinxcontrib.rsvgconverter', +] + +todo_include_todos = True + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +copyright = '2014-2019, OpenStack Octavia Team' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = [ + '_build', + 'contributor/specs/skeleton.rst', + 'contributor/specs/template.rst' +] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +add_module_names = False + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'native' + +# A list of ignored prefixes for module index sorting. +modindex_common_prefix = ['octavia.'] + +# -- Options for man page output ---------------------------------------------- +man_pages = [] + +# -- Options for HTML output -------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'openstackdocs' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# html_theme_options = {} +html_theme_options = {'show_other_versions': True} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +html_static_path = ['_static'] + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +html_domain_indices = True + +# If false, no index is generated. +html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = 'Octavia-Specsdoc' + + +# -- Options for LaTeX output ------------------------------------------------- + +# Fix Unicode character for sphinx_feature_classification +# Sphinx default latex engine (pdflatex) doesn't know much unicode +latex_preamble = r""" +\usepackage{newunicodechar} +\newunicodechar{✖}{\sffamily X} +\setcounter{tocdepth}{2} +\authoraddress{\textcopyright %s OpenStack Foundation} +""" % datetime.datetime.now().year + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # openany: Skip blank pages in generated PDFs + 'extraclassoptions': 'openany,oneside', + 'makeindex': '', + 'printindex': '', + 'preamble': latex_preamble +} + +# Disable usage of xindy https://bugzilla.redhat.com/show_bug.cgi?id=1643664 +# Some distros are missing xindy +latex_use_xindy = False + +# Fix missing apostrophe +smartquotes_excludes = {'builders': ['latex']} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass +# [howto/manual]). +latex_documents = [( + 'index', + 'doc-octavia.tex', + 'Octavia Documentation', + 'OpenStack Octavia Team', + 'manual' +)] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +latex_domain_indices = False + +# -- Options for Texinfo output ----------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [( + 'index', + 'Octavia-specs', + 'Octavia Design Specs', + 'OpenStack Octavia Team', + 'octavia-specs', + 'Design specifications for the Octavia project.', + 'Miscellaneous' +)] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + + +# -- Options for Epub output -------------------------------------------------- + +# Bibliographic Dublin Core info. +epub_title = 'Octavia Specs' +epub_author = 'OpenStack Octavia Team' +epub_publisher = 'OpenStack Octavia Team' +epub_copyright = '2014, OpenStack Octavia Team' + +# The language of the text. It defaults to the language option +# or en if the language is not set. +# epub_language = '' + +# The scheme of the identifier. Typical schemes are ISBN or URL. +# epub_scheme = '' + +# The unique identifier of the text. This can be an ISBN number +# or the project homepage. +# epub_identifier = '' + +# A unique identification for the text. +# epub_uid = '' + +# A tuple containing the cover image and cover page html template filenames. +# epub_cover = () + +# HTML files that should be inserted before the pages created by sphinx. +# The format is a list of tuples containing the path and title. +# epub_pre_files = [] + +# HTML files shat should be inserted after the pages created by sphinx. +# The format is a list of tuples containing the path and title. +# epub_post_files = [] + +# A list of files that should not be packed into the epub file. +# epub_exclude_files = [] + +# The depth of the table of contents in toc.ncx. +# epub_tocdepth = 3 + +# Allow duplicate toc entries. +# epub_tocdup = True + +# RBAC sample policy file generation +policy_generator_config_file = '../../etc/policy/octavia-policy-generator.conf' +sample_policy_basename = 'configuration/_static/octavia' + +openstackdocs_repo_name = 'openstack/octavia' +openstackdocs_pdf_link = True +openstackdocs_bug_project = 'octavia' +openstackdocs_bug_tag = 'docs' + +apidoc_output_dir = 'contributor/modules' +apidoc_module_dir = '../../octavia' +apidoc_excluded_paths = [ + 'tests', + 'db/migration' +] diff --git a/doc/source/configuration/configref.rst b/doc/source/configuration/configref.rst new file mode 100644 index 0000000000..280be6e16d --- /dev/null +++ b/doc/source/configuration/configref.rst @@ -0,0 +1,30 @@ +.. + Copyright (c) 2016 Rackspace + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Octavia Configuration Options +============================= + +.. contents:: Table of Contents + :depth: 2 + +.. show-options:: + + keystonemiddleware.auth_token + octavia + oslo.db + oslo.log + oslo.messaging + oslo.middleware.sizelimit + cotyledon diff --git a/doc/source/configuration/index.rst b/doc/source/configuration/index.rst new file mode 100644 index 0000000000..037a354eba --- /dev/null +++ b/doc/source/configuration/index.rst @@ -0,0 +1,9 @@ +===================== +Octavia Configuration +===================== + +.. toctree:: + :maxdepth: 1 + + configref + policy diff --git a/doc/source/configuration/policy.rst b/doc/source/configuration/policy.rst new file mode 100644 index 0000000000..1ba653d93d --- /dev/null +++ b/doc/source/configuration/policy.rst @@ -0,0 +1,336 @@ +================ +Octavia Policies +================ + +.. warning:: + + JSON formatted policy file is deprecated since Octavia 8.0.0 (Wallaby). + This `oslopolicy-convert-json-to-yaml`__ tool will migrate your existing + JSON-formatted policy file to YAML in a backward-compatible way. + +.. __: https://docs.openstack.org/oslo.policy/latest/cli/oslopolicy-convert-json-to-yaml.html + +.. _Keystone Default Roles: https://docs.openstack.org/keystone/latest/admin/service-api-protection.html + +Octavia Advanced Role Based Access Control (RBAC) +------------------------------------------------- + +Octavia adopted the "Advanced Role Based Access Control (RBAC)" default +policies in the Pike release of OpenStack. This provides a fine-grained default +access control policy for the Octavia service. + +The Octavia Advanced RBAC goes beyond the OpenStack legacy RBAC policies of +allowing "owners and admins" full access to all services. It also provides a +more fine-grained RBAC policy than the newer `Keystone Default Roles`_ . + +The default policy is to not allow access unless the auth_strategy is 'noauth'. + +Users must be a member of one of the following roles to have access to +the load-balancer API: + +.. glossary:: + + role:load-balancer_observer + User has access to load-balancer read-only APIs. + + role:load-balancer_global_observer + User has access to load-balancer read-only APIs including resources + owned by others. + + role:load-balancer_member + User has access to load-balancer read and write APIs. + + role:load-balancer_quota_admin + User is considered an admin for quota APIs only. + + role:load-balancer_admin + User is considered an admin for all load-balancer APIs including + resources owned by others. + + role:admin and system_scope:all + User is admin to all service APIs, including Octavia. + +.. note:: + + 'is_admin:True' is a policy rule that takes into account the + auth_strategy == noauth configuration setting. + It is equivalent to 'rule:context_is_admin or {auth_strategy == noauth}' + if that would be valid syntax. + +These roles are in addition to the `Keystone Default Roles`_: + +* role:reader +* role:member + +In addition, the Octavia API supports Keystone scoped tokens. When enabled +in Oslo Policy, users will need to present a token scoped to either the +"system" or a specific "project". See the section `Upgrade Considerations`_ +for more information. + +See the section `Managing Octavia User Roles`_ for examples and advice on how +to apply these RBAC policies in production. + +Legacy Admin or Owner Policy Override File +------------------------------------------ + +An alternate policy file has been provided in octavia/etc/policy called +admin_or_owner-policy.yaml that removes the load-balancer RBAC role +requirement. Please see the README.rst in that directory for more information. + +This will drop the role requirements to allow access to all with the "admin" +role or if the user is a member of the project that created the resource. All +users have access to the Octavia API to create and manage load balancers +under their project. + +OpenStack Default Roles Policy Override File +-------------------------------------------- + +An alternate policy file has been provided in octavia/etc/policy called +keystone_default_roles-policy.yaml that removes the load-balancer RBAC role +requirement. Please see the README.rst in that directory for more information. + +This policy will honor the following `Keystone Default Roles`_ in the Octavia +API: + +* Admin +* Project scoped - Reader +* Project scoped - Member + +In addition, there is an alternate policy file that enables system scoped +tokens checking called keystone_default_roles_scoped-policy.yaml. + +* System scoped - Admin +* System scoped - Reader +* Project scoped - Reader +* Project scoped - Member + + +Managing Octavia User Roles +--------------------------- + +User and group roles are managed through the Keystone (identity) project. + +A role can be added to a user with the following command:: + + openstack role add --project --user + +An example where user "jane", in the "engineering" project, gets a new role +"load-balancer_member":: + + openstack role add --project engineering --user jane load-balancer_member + +Keystone Group Roles +~~~~~~~~~~~~~~~~~~~~ + +Roles can also be assigned to `Keystone groups +`_. +This can simplify the management of user roles greatly. + +For example, your cloud may have a "users" group defined in Keystone. This +group is set up to have all of the regular users of your cloud as a member. +If you want all of your users to have access to the load balancing service +Octavia, you could add the "load-balancer_member" role to the "users" group:: + + openstack role add --domain default --group users load-balancer_member + +Upgrade Considerations +---------------------- + +Starting with the Wallaby release of Octavia, Keystone token scopes and +default roles can be enforced. By default, in the Wallaby release, `Oslo Policy +`_ +will not be enforcing these new roles and scopes. However, at some point in the +future they may become the default. You may want to enable them now to be ready +for the later transition. This section will describe those settings. + +The Oslo Policy project defines two configuration settings, among others, that +can be set in the Octavia configuration file to influence how policies are +handled in the Octavia API. Those two settings are `enforce_scope +`_ and `enforce_new_defaults +`_. + +[oslo_policy] enforce_scope +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Keystone has introduced the concept of `token scopes +`_. +Currently, Oslo Policy defaults to not enforce the scope validation of a +token for backward compatibility reasons. + +The Octavia API supports enforcing the Keystone token scopes as of the Wallaby +release. If you are ready to start enforcing the Keystone token scope in the +Octavia API you can add the following setting to your Octavia API configuration +file:: + + [oslo_policy] + enforce_scope = True + +Currently the primary effect of this setting is to allow a system scoped +admin token when performing administrative API calls to the Octavia API. +It will also allow system scoped reader tokens to have the equivalent of the +load-balancer_global_observer role. + +The Octavia API already enforces the project scoping in Keystone tokens. + +[oslo_policy] enforce_new_defaults +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The Octavia Wallaby release added support for `Keystone Default Roles`_ in +the default policies. The previous Octavia Advanced RBAC policies have now +been deprecated in favor of the new policies requiring one of the new +`Keystone Default Roles`_. +Currently, Oslo Policy defaults to using the deprecated policies that do not +require the new `Keystone Default Roles`_ for backward compatibility. + +The Octavia API supports requiring these new `Keystone Default Roles`_ as of +the Wallaby release. If you are ready to start requiring these roles you can +enable the new policies by adding the following setting to your Octavia API +configuration file:: + + [oslo_policy] + enforce_new_defaults = True + +When the new default policies are enabled in the Octavia API, users with the +load-balancer_observer role will also require the Keystone default role of +"role:reader". Users with the load-balancer_member role will also require +the Keystone default role of "role:member". + +Sample File Generation +---------------------- + +To generate a sample policy.yaml file from the Octavia defaults, run the +oslo policy generation script:: + + oslopolicy-sample-generator + --config-file etc/policy/octavia-policy-generator.conf + --output-file policy.yaml.sample + +Merged File Generation +---------------------- + +This will output a policy file which includes all registered policy defaults +and all policies configured with a policy file. This file shows the effective +policy in use by the project:: + + oslopolicy-policy-generator + --config-file etc/policy/octavia-policy-generator.conf + +This tool uses the output_file path from the config-file. + +List Redundant Configurations +----------------------------- + +This will output a list of matches for policy rules that are defined in a +configuration file where the rule does not differ from a registered default +rule. These are rules that can be removed from the policy file with no change +in effective policy:: + + oslopolicy-list-redundant + --config-file etc/policy/octavia-policy-generator.conf + +Default Octavia Policies - API Effective Rules +---------------------------------------------- + +This section will list the RBAC rules the Octavia API will use followed by a +list of the roles that will be allowed access. + +Without `enforce_scope +`_ and `enforce_new_defaults +`_: + +* load-balancer:read + + * load-balancer_admin + * load-balancer_global_observer + * load-balancer_member and + * load-balancer_observer and + * role:admin + +* load-balancer:read-global + + * load-balancer_admin + * load-balancer_global_observer + * role:admin + +* load-balancer:write + + * load-balancer_admin + * load-balancer_member and + * role:admin + +* load-balancer:read-quota + + * load-balancer_admin + * load-balancer_global_observer + * load-balancer_member and + * load-balancer_observer and + * load-balancer_quota_admin + * role:admin + +* load-balancer:read-quota-global + + * load-balancer_admin + * load-balancer_global_observer + * load-balancer_quota_admin + * role:admin + +* load-balancer:write-quota + + * load-balancer_admin + * load-balancer_quota_admin + * role:admin + +With `enforce_scope +`_ and `enforce_new_defaults +`_: + +* load-balancer:read + + * load-balancer_admin + * load-balancer_global_observer + * load-balancer_member and and role:member + * load-balancer_observer and and role:reader + * role:admin and system_scope:all + * role:reader and system_scope:all + +* load-balancer:read-global + + * load-balancer_admin + * load-balancer_global_observer + * role:admin and system_scope:all + * role:reader and system_scope:all + +* load-balancer:write + + * load-balancer_admin + * load-balancer_member and and role:member + * role:admin and system_scope:all + +* load-balancer:read-quota + + * load-balancer_admin + * load-balancer_global_observer + * load-balancer_member and and role:member + * load-balancer_observer and and role:reader + * load-balancer_quota_admin + * role:admin and system_scope:all + * role:reader and system_scope:all + +* load-balancer:read-quota-global + + * load-balancer_admin + * load-balancer_global_observer + * load-balancer_quota_admin + * role:admin and system_scope:all + * role:reader and system_scope:all + +* load-balancer:write-quota + + * load-balancer_admin + * load-balancer_quota_admin + * role:admin and system_scope:all + +Default Octavia Policies - Generated From The Octavia Code +---------------------------------------------------------- + +.. literalinclude:: _static/octavia.policy.yaml.sample diff --git a/doc/source/contributor/CONSTITUTION.rst b/doc/source/contributor/CONSTITUTION.rst new file mode 120000 index 0000000000..dfb0d92f30 --- /dev/null +++ b/doc/source/contributor/CONSTITUTION.rst @@ -0,0 +1 @@ +../../../CONSTITUTION.rst \ No newline at end of file diff --git a/doc/source/contributor/HACKING.rst b/doc/source/contributor/HACKING.rst new file mode 120000 index 0000000000..7d6e3b9b8a --- /dev/null +++ b/doc/source/contributor/HACKING.rst @@ -0,0 +1 @@ +../../../HACKING.rst \ No newline at end of file diff --git a/doc/source/contributor/api/haproxy-amphora-api.rst b/doc/source/contributor/api/haproxy-amphora-api.rst new file mode 100644 index 0000000000..46bbf13c2c --- /dev/null +++ b/doc/source/contributor/api/haproxy-amphora-api.rst @@ -0,0 +1,1364 @@ +=========================== +Octavia HAProxy Amphora API +=========================== + +Introduction +============ +This document describes the API interface between the reference haproxy driver +and its corresponding haproxy-based amphorae. + +Octavia reference haproxy amphorae use a web service API for configuration and +control. This API should be secured through the use of TLS encryption as well +as bi-directional verification of client- and server-side certificates. (The +exact process for generating and distributing these certificates should be +covered in another document.) + +In addition to the web service configuration and control interface, the +amphorae may use an HMAC-signed UDP protocol for communicating regular, less- +vital information to the controller (ex. statistics updates and health checks). +Information on this will also be covered in another document. + +If a given loadbalancer is being serviced by multiple haproxy amphorae at the +same time, configuration and control actions should be made on all these +amphorae at approximately the same time. (Amphorae do not communicate directly +with each other, except in an active-standby topology, and then this +communication is limited to fail-over protocols.) + +.. contents:: + +Versioning +---------- +All Octavia APIs (including internal APIs like this one) are versioned. For the +purposes of this document, the initial version of this API shall be 1.0. + +Response codes +-------------- +Typical response codes are: + +* 200 OK - Operation was completed as requested. +* 201 Created - Operation successfully resulted in the creation / processing + of a file. +* 202 Accepted - Command was accepted but is not completed. (Note that this is + used for asynchronous processing.) +* 400 Bad Request - API handler was unable to complete request. +* 401 Unauthorized - Authentication of the client certificate failed. +* 404 Not Found - The requested file was not found. +* 500 Internal Server Error - Usually indicates a permissions problem +* 503 Service Unavailable - Usually indicates a change to a listener was + attempted during a transition of amphora topology. + +A note about storing state +-------------------------- +In the below API, it will become apparent that at times the amphora will need +to be aware of the state of things (topology-wise, or simply in terms running +processes on the amphora). When it comes to storing or gathering this data, we +should generally prefer to try to resolve these concerns in the following +order. Note also that not every kind of state data will use all of the steps in +this list: + +1. Get state information by querying running processes (ex. parsing haproxy + status page or querying iptables counters, etc.) +2. Get state by consulting on-disk cache generated by querying running + processes. (In the case where state information is relatively expensive to + collect-- eg. package version listings.) +3. Get state by consulting stored configuration data as sent by the controller. + (ex. amphora topology, haproxy configuration or TLS certificate data) +4. Get state by querying a controller API (not described here). + +In no case should the amphora assume it ever has direct access to the Octavia +database. Also, sensitive data (like TLS certificates) should be stored in +a secure way (ex. memory filesystem). + +API +=== + +Get amphora info +---------------- +* **URL:** /info +* **Method:** GET +* **URL params:** none +* **Data params:** none +* **Success Response:** + + * Code: 200 + + * Content: JSON formatted listing of several basic amphora data. + +* **Error Response:** + + * none + +JSON Response attributes: + +* *hostname* - amphora hostname +* *uuid* - amphora UUID +* *haproxy_version* - Version of the haproxy installed +* *api_version* - Version of haproxy amphora API in use + +**Notes:** The data in this request is used by the controller for determining +the amphora and API version numbers. + +It's also worth noting that this is the only API command that doesn't have a +version string prepended to it. + +**Examples:** + +* Success code 200: + +:: + + { + 'hostname': 'octavia-haproxy-img-00328.local', + 'uuid': '6e2bc8a0-2548-4fb7-a5f0-fb1ef4a696ce', + 'haproxy_version': '1.5.11', + 'api_version': '0.1', + } + +Get amphora details +------------------- + +* **URL:** /1.0/details +* **Method:** GET +* **URL params:** none +* **Data params:** none +* **Success Response:** + + * Code: 200 + + * Content: JSON formatted listing of various amphora statistics. + +* **Error Response:** + + * none + +JSON Response attributes: + +* *hostname* - amphora hostname +* *uuid* - amphora UUID +* *haproxy_version* - Version of the haproxy installed +* *api_version* - Version of haproxy amphora API/agent in use +* *network_tx* - Current total outbound bandwidth in bytes/sec (30-second + snapshot) +* *network_rx* - Current total inbound bandwidth in bytes/sec (30-second + snapshot) +* *active* - Boolean (is amphora in an "active" role?) +* *haproxy_count* - Number of running haproxy processes +* *cpu* - list of percent CPU usage broken down into: + + * total + * user + * system + * soft_irq + +* *memory* - memory usage in kilobytes broken down into: + + * total + * free + * available + * buffers + * cached + * swap_used + * shared + * slab + * committed_as + +* *disk* - disk usage in kilobytes for root filesystem, listed as: + + * used + * available + +* *load* - System load (list) +* *topology* - One of SINGLE, ACTIVE-STANDBY, ACTIVE-ACTIVE +* *topology_status* - One of OK, TOPOLOGY-CHANGE +* *listeners* - list of listener UUIDs being serviced by this amphora +* *packages* - list of load-balancing related packages installed with versions + (eg. OpenSSL, haproxy, nginx, etc.) + +**Notes:** The data in this request is meant to provide intelligence for an +auto-scaling orchestration controller (heat) in order to determine whether +additional (or fewer) virtual amphorae are necessary to handle load. As such, +we may add additional parameters to the JSON listing above if they prove to be +useful for making these decisions. + +The data in this request is also used by the controller for determining overall +health of the amphora, currently-configured topology and role, etc. + +**Examples** + +* Success code 200: + +:: + + { + 'hostname': 'octavia-haproxy-img-00328.local', + 'uuid': '6e2bc8a0-2548-4fb7-a5f0-fb1ef4a696ce', + 'haproxy_version': '1.5.11', + 'api_version': '0.1', + 'networks': { + 'eth0': { + 'network_tx': 3300138, + 'network_rx': 982001, }} + 'active': 'TRUE', + 'haproxy_count': 3, + 'cpu':{ + 'total': 0.43, + 'user': 0.30, + 'system': 0.05, + 'soft_irq': 0.08, + }, + 'memory':{ + 'total': 4087402, + 'free': 760656, + 'available': 2655901, + 'buffers': 90980, + 'cached': 1830143, + 'swap_used': 943, + 'shared': 105792, + 'slab': 158819, + 'committed_as': 2643480, + }, + 'disk':{ + 'used': 1234567, + 'available': 5242880, + }, + 'load': [0.50, 0.45, 0.47], + 'tolopogy': 'SINGLE', + 'topology_status': 'OK', + 'listeners':[ + '02d0da8d-fc65-4bc4-bc46-95cadb2315d2', + '98e706a7-d22c-422f-9632-499fd83e12c0', + ], + 'packages':[ + {'haproxy': '1.5.1'}, + {'bash': '4.3.23'}, + {'lighttpd': '1.4.33-1'}, + {'openssl': '1.0.1f'}, + + ], + } + +Get interface +------------- + +* **URL:** /1.0/interface/*:ip* +* **Method:** GET +* **URL params:** + + * *:ip* = the ip address to find the interface name + +* **Data params:** none +* **Success Response:** + + * Code: 200 + + * Content: OK + * Content: JSON formatted interface + +* **Error Response:** + + * Code: 400 + + * Content: Bad IP address version + + * Code: 404 + + * Content: Error interface not found for IP address + +* **Response:** + +| OK +| eth1 + +**Examples:** + +* Success code 200: + +:: + + GET URL: + https://octavia-haproxy-img-00328.local/1.0/interface/10.0.0.1 + + JSON Response: + { + 'message': 'OK', + 'interface': 'eth1' + } + + +* Error code 404: + +:: + + GET URL: + https://octavia-haproxy-img-00328.local/1.0/interface/10.5.0.1 + + JSON Response: + { + 'message': 'Error interface not found for IP address', + } + + +* Error code 404: + +:: + + GET URL: + https://octavia-haproxy-img-00328.local/1.0/interface/10.6.0.1.1 + + JSON Response: + { + 'message': 'Bad IP address version', + } + + +Get all listeners' statuses +--------------------------- + +* **URL:** /1.0/listeners +* **Method:** GET +* **URL params:** none +* **Data params:** none +* **Success Response:** + + * Code: 200 + + * Content: JSON-formatted listing of each listener's status + +* **Error Response:** + + * none + +JSON Response attributes: + +Note that the command will return an array of *all* listeners' statuses. Each +listener status contains the following attributes: + +* *status* - One of the operational status: ACTIVE, STOPPED, ERROR - + future versions might support provisioning status: + PENDING_CREATE, PENDING_UPDATE, PENDING_DELETE, DELETED +* *uuid* - Listener UUID +* *type* - One of: TCP, HTTP, TERMINATED_HTTPS + +**Notes:** Note that this returns a status if: the pid file exists, the stats +socket exists, or an haproxy configuration is present (not just if there is +a valid haproxy configuration). + +**Examples** + +* Success code 200: + +:: + + [{ + 'status': 'ACTIVE', + 'uuid': 'e2dfddc0-5b9e-11e4-8ed6-0800200c9a66', + 'type': 'HTTP', + }, + { + 'status': 'STOPPED', + 'uuid': '19d45130-5b9f-11e4-8ed6-0800200c9a66', + 'type': 'TERMINATED_HTTPS', + }] + +Start or Stop a load balancer +----------------------------- + +* **URL:** /1.0/loadbalancer/*:object_id*/*:action* +* **Method:** PUT +* **URL params:** + + * *:object_id* = Object UUID + * *:action* = One of: start, stop, reload + +* **Data params:** none +* **Success Response:** + + * Code: 202 + + * Content: OK + * *(Also contains preliminary results of attempt to start / stop / soft \ + restart (reload) the haproxy daemon)* + +* **Error Response:** + + * Code: 400 + + * Content: Invalid request + + * Code: 404 + + * Content: Listener Not Found + + * Code: 500 + + * Content: Error starting / stopping / reload_config haproxy + * *(Also contains error output from attempt to start / stop / soft \ + restart (reload) haproxy)* + + * Code: 503 + + * Content: Topology transition in progress + +* **Response:** + +| OK +| Configuration file is valid +| haproxy daemon for 85e2111b-29c4-44be-94f3-e72045805801 started (pid 32428) + +**Examples:** + +* Success code 201: + +:: + + PUT URL: + https://octavia-haproxy-img-00328.local/1.0/loadbalancer/85e2111b-29c4-44be-94f3-e72045805801/start + + JSON Response: + { + 'message': 'OK', + 'details': 'Configuration file is valid\nhaproxy daemon for 85e2111b-29c4-44be-94f3-e72045805801 started', + } + +* Error code 400: + +:: + + PUT URL: + https://octavia-haproxy-img-00328.local/1.0/loadbalancer/85e2111b-29c4-44be-94f3-e72045805801/BAD_TEST_DATA + + JSON Response: + { + 'message': 'Invalid Request', + 'details': 'Unknown action: BAD_TEST_DATA', + } + +* Error code 404: + +:: + + PUT URL: + https://octavia-haproxy-img-00328.local/1.0/loadbalancer/04bff5c3-5862-4a13-b9e3-9b440d0ed50a/stop + + JSON Response: + { + 'message': 'Listener Not Found', + 'details': 'No loadbalancer with UUID: 04bff5c3-5862-4a13-b9e3-9b440d0ed50a', + } + +* Error code 500: + +:: + + PUT URL: + https://octavia-haproxy-img-00328.local/1.0/loadbalancer/85e2111b-29c4-44be-94f3-e72045805801/stop + + Response: + { + 'message': 'Error stopping haproxy', + 'details': 'haproxy process with PID 3352 not found', + } + +* Error code 503: + +:: + + Response: + { + 'message': 'Topology transition in progress', + } + +Delete a listener +----------------- + +* **URL:** /1.0/listeners/*:listener* +* **Method:** DELETE +* **URL params:** + + * *:listener* = Listener UUID + +* **Data params:** none +* **Success Response:** + + * Code: 200 + + * Content: OK + +* **Error Response:** + + * Code: 404 + + * Content: Not Found + + * Code: 503 + + * Content: Topology transition in progress + +* **Response:** + +| OK + +* **Implied actions:** + + * Stop listener + * Delete IPs, iptables accounting rules, etc. from this amphora if they're no + longer in use. + * Clean up listener configuration directory. + * Delete listener's SSL certificates + * Clean up logs (ship final logs to logging destination if configured) + * Clean up stats socket. + +**Examples** + +* Success code 200: + +:: + + DELETE URL: + https://octavia-haproxy-img-00328.local/1.0/listeners/04bff5c3-5862-4a13-b9e3-9b440d0ed50a + + JSON Response: + { + 'message': 'OK' + } + +* Error code 404: + +:: + + DELETE URL: + https://octavia-haproxy-img-00328.local/1.0/listeners/04bff5c3-5862-4a13-b9e3-9b440d0ed50a + + JSON Response: + { + 'message': 'Listener Not Found', + 'details': 'No listener with UUID: 04bff5c3-5862-4a13-b9e3-9b440d0ed50a', + } + +* Error code 503: + +:: + + Response: + { + 'message': 'Topology transition in progress', + } + +Upload SSL certificate PEM file +------------------------------- + +* **URL:** /1.0/loadbalancer/*:loadbalancer_id*/certificates/*:filename.pem* +* **Method:** PUT +* **URL params:** + + * *:loadbalancer_id* = Load balancer UUID + * *:filename* = PEM filename (see notes below for naming convention) + +* **Data params:** Certificate data. (PEM file should be a concatenation of + unencrypted RSA key, certificate and chain, in that order) +* **Success Response:** + + * Code: 201 + + * Content: OK + +* **Error Response:** + + * Code: 400 + + * Content: No certificate found + + * Code: 400 + + * Content: No RSA key found + + * Code: 400 + + * Content: Certificate and key do not match + + * Code: 404 + + * Content: Not Found + + * Code: 503 + + * Content: Topology transition in progress + +* **Response:** + +| OK + +**Notes:** +* filename.pem should match the primary CN for which the +certificate is valid. All-caps WILDCARD should be used to replace an asterisk +in a wildcard certificate (eg. a CN of '\*.example.com' should have a filename +of 'WILDCARD.example.com.pem'). Filenames must also have the .pem extension. +* In order for the new certificate to become effective the haproxy needs to be +explicitly restarted + +**Examples:** + +* Success code 201: + +:: + + PUT URI: + https://octavia-haproxy-img-00328.local/1.0/loadbalancer/85e2111b-29c4-44be-94f3-e72045805801/certificates/www.example.com.pem + (Put data should contain the certificate information, concatenated as + described above) + + JSON Response: + { + 'message': 'OK' + } + +* Error code 400: + +:: + + PUT URI: + https://octavia-haproxy-img-00328.local/1.0/loadbalancer/85e2111b-29c4-44be-94f3-e72045805801/certificates/www.example.com.pem + (If PUT data does not contain a certificate) + + JSON Response: + { + 'message': 'No certificate found' + } + +* Error code 400: + +:: + + PUT URI: + https://octavia-haproxy-img-00328.local/1.0/loadbalancer/85e2111b-29c4-44be-94f3-e72045805801/certificates/www.example.com.pem + (If PUT data does not contain an RSA key) + + JSON Response: + { + 'message': 'No RSA key found' + } + +* Error code 400: + +:: + + PUT URI: + https://octavia-haproxy-img-00328.local/1.0/loadbalancer/85e2111b-29c4-44be-94f3-e72045805801/certificates/www.example.com.pem + (If the first certificate and the RSA key do not have the same modulus.) + + JSON Response: + { + 'message': 'Certificate and key do not match' + } + +* Error code 404: + +:: + + PUT URI: + https://octavia-haproxy-img-00328.local/1.0/loadbalancer/85e2111b-29c4-44be-94f3-e72045805801/certificates/www.example.com.pem + + JSON Response: + { + 'message': 'Listener Not Found', + 'details': 'No loadbalancer with UUID: 04bff5c3-5862-4a13-b9e3-9b440d0ed50a', + } + +* Error code 503: + +:: + + Response: + { + 'message': 'Topology transition in progress', + } + +Get SSL certificate md5sum +-------------------------- + +* **URL:** /1.0/loadbalancer/*:loadbalancer_id*/certificates/*:filename.pem* +* **Method:** GET +* **URL params:** + + * *:loadbalancer_id* = Load balancer UUID + * *:filename* = PEM filename (see notes below for naming convention) + +* **Data params:** none +* **Success Response:** + + * Code: 200 + + * Content: PEM file md5sum + +* **Error Response:** + + * Code: 404 + + * Content: Not Found + +* **Response:** + +| + +* **Implied actions:** none + +**Notes:** The md5sum is the sum from the raw certificate data as stored on +the amphora (which will usually include the RSA key, certificate and chain +concatenated together). Note that we don't return any actual raw certificate +data as the controller should already know this information, and unnecessarily +disclosing it over the wire from the amphora is a security risk. + +**Examples:** + +* Success code 200: + +:: + + JSON response: + { + 'md5sum': 'd8f6629d5e3c6852fa764fb3f04f2ffd', + } + +* Error code 404: + +:: + + JSON Response: + { + 'message': 'Listener Not Found', + 'details': 'No loadbalancer with UUID: 04bff5c3-5862-4a13-b9e3-9b440d0ed50a', + } + +* Error code 404: + +:: + + JSON Response: + { + 'message': 'Certificate Not Found', + 'details': 'No certificate with file name: www.example.com.pem', + } + +Delete SSL certificate PEM file +------------------------------- + +* **URL:** /1.0/loadbalancer/*:loadbalancer_id*/certificates/*:filename.pem* +* **Method:** DELETE +* **URL params:** + + * *:loadbalancer_id* = Load balancer UUID + * *:filename* = PEM filename (see notes below for naming convention) + +* **Data params:** none +* **Success Response:** + + * Code: 200 + + * Content: OK + +* **Error Response:** + + * Code: 404 + + * Content: Not found + + * Code: 503 + + * Content: Topology transition in progress + +* **Implied actions:** + + * Clean up listener configuration directory if it's now empty. + +**Examples:** + +* Success code 200: + +:: + + DELETE URL: + https://octavia-haproxy-img-00328.local/1.0/loadbalancer/85e2111b-29c4-44be-94f3-e72045805801/certificates/www.example.com.pem + + JSON Response: + { + 'message': 'OK' + } + +* Error code 404: + +:: + + DELETE URL: + https://octavia-haproxy-img-00328.local/1.0/loadbalancer/85e2111b-29c4-44be-94f3-e72045805801/certificates/www.example.com.pem + + JSON Response: + { + 'message': 'Certificate Not Found', + 'details': 'No certificate with file name: www.example.com.pem', + } + +* Error code 503: + +:: + + Response: + { + 'message': 'Topology transition in progress', + } + +Upload load balancer haproxy configuration +------------------------------------------ + +* **URL:** /1.0/loadbalancer/*:amphora_id*/*:loadbalancer_id*/haproxy +* **Method:** PUT +* **URL params:** + + * *:loadbalancer_id* = Load Balancer UUID + * *:amphora_id* = Amphora UUID + +* **Data params:** haproxy configuration file for the listener +* **Success Response:** + + * Code: 201 + + * Content: OK + +* **Error Response:** + + * Code: 400 + + * Content: Invalid configuration + * *(Also includes error output from configuration check command)* + + * Code: 503 + + * Content: Topology transition in progress + +* **Response:** + +| OK +| Configuration file is valid + +* **Implied actions:** + + * Do a syntax check on haproxy configuration file prior to an attempt to + run it. + * Add resources needed for stats, logs, and connectivity + +**Notes:** The uploaded configuration file should be a complete and +syntactically-correct haproxy config. The amphora does not have intelligence +to generate these itself and has only rudimentary ability to parse certain +features out of the configuration file (like bind addresses and ports for +purposes of setting up stats, and specially +formatted comments meant to indicate pools and members that will be parsed +out of the haproxy daemon status interface for tracking health and stats). + +**Examples:** + +* Success code 201: + +:: + + PUT URL: + https://octavia-haproxy-img-00328.local/1.0/loadbalancer/d459b1c8-54b0-4030-9bec-4f449e73b1ef/85e2111b-29c4-44be-94f3-e72045805801/haproxy + (Upload PUT data should be a raw haproxy.conf file.) + + JSON Response: + { + 'message': 'OK' + } + +* Error code 400: + +:: + + JSON Response: + { + 'message': 'Invalid request', + 'details': '[ALERT] 300/013045 (28236) : parsing [haproxy.cfg:4]: unknown keyword 'BAD_LINE' out of section.\n[ALERT] 300/013045 (28236) : Error(s) found in configuration file : haproxy.cfg\n[ALERT] 300/013045 (28236) : Fatal errors found in configuration.', + } + +* Error code 503: + +:: + + Response: + { + 'message': 'Topology transition in progress', + } + +Get loadbalancer haproxy configuration +-------------------------------------- + +* **URL:** /1.0/loadbalancer/*:loadbalancer_id*/haproxy +* **Method:** GET +* **URL params:** + + * *:loadbalancer_id* = Load balancer UUID + +* **Data params:** none +* **Success Response:** + + * Code: 200 + + * Content: haproxy configuration file for the listener + +* **Error Response:** + + * Code: 404 + + * Content: Not found + +* **Response:** + +| # Config file for 85e2111b-29c4-44be-94f3-e72045805801 +| (cut for brevity) + +* **Implied actions:** none + +**Examples:** + +* Success code 200: + +:: + + GET URL: + https://octavia-haproxy-img-00328.local/1.0/loadbalancer/85e2111b-29c4-44be-94f3-e72045805801/haproxy + + Response is the raw haproxy.cfg: + + # Config file for 85e2111b-29c4-44be-94f3-e72045805801 + (cut for brevity) + +* Error code 404: + +:: + + JSON Response: + { + 'message': 'Loadbalancer Not Found', + 'details': 'No loadbalancer with UUID: 04bff5c3-5862-4a13-b9e3-9b440d0ed50a', + } + +Plug VIP +-------- + +* **URL:** /1.0/plug/vip/*:ip* +* **Method:** Post +* **URL params:** + + * *:ip* = the vip's ip address + +* **Data params:** + + * *subnet_cidr*: The vip subnet in cidr notation + * *gateway*: The vip subnet gateway address + * *mac_address*: The mac address of the interface to plug + +* **Success Response:** + + * Code: 202 + + * Content: OK + +* **Error Response:** + * Code: 400 + + * Content: Invalid IP + * Content: Invalid subnet information + + * Code: 404 + + * Content: No suitable network interface found + + * Code: 500 + + * Content: Error plugging VIP + * (Also contains error output from the ip up command) + + * Code: 503 + + * Content: Topology transition in progress + +* **Response:** + +| OK +| VIP ip plugged on interface + +* **Implied actions:** + + * Look for an interface marked as down (recently added port) + * Assign VIP + * Bring that interface up + +**Examples:** + +* Success code 202: + +:: + + POST URL: + https://octavia-haproxy-img-00328.local/1.0/plug/vip/203.0.113.2 + + JSON POST parameters: + { + 'subnet_cidr': '203.0.113.0/24', + 'gateway': '203.0.113.1', + 'mac_address': '78:31:c1:ce:0b:3c' + } + + JSON Response: + { + 'message': 'OK', + 'details': 'VIP 203.0.113.2 plugged on interface eth1' + } + +* Error code 400: + +:: + + JSON Response: + { + 'message': 'Invalid VIP', + } + +* Error code 404: + +:: + + JSON Response: + { + 'message': 'No suitable network interface found', + } + + +Plug Network +------------ + +* **URL:** /1.0/plug/network/ +* **Method:** POST +* **URL params:** none + +* **Data params:** + + * *mac_address*: The mac address of the interface to plug + +* **Success Response:** + + * Code: 202 + + * Content: OK + +* **Error Response:** + + * Code: 404 + + * Content: No suitable network interface found + + * Code: 500 + + * Content: Error plugging Port + * (Also contains error output from the ip up command) + + * Code: 503 + + * Content: Topology transition in progress + +* **Response:** + +| OK +| Plugged interface + +**Examples:** + +* Success code 202: + +:: + + POST URL: + https://octavia-haproxy-img-00328.local/1.0/plug/network/ + + JSON POST parameters: + { + 'mac_address': '78:31:c1:ce:0b:3c' + } + + JSON Response: + { + 'message': 'OK', + 'details': 'Plugged interface eth1' + } + + +* Error code 404: + +:: + + JSON Response: + { + 'message': 'No suitable network interface found', + } + + +Upload SSL server certificate PEM file for Controller Communication +------------------------------------------------------------------- + +* **URL:** /1.0/certificate +* **Method:** PUT + +* **Data params:** Certificate data. (PEM file should be a concatenation of + unencrypted RSA key, certificate and chain, in that order) +* **Success Response:** + + * Code: 202 + + * Content: OK + +* **Error Response:** + + * Code: 400 + + * Content: No certificate found + + * Code: 400 + + * Content: No RSA key found + + * Code: 400 + + * Content: Certificate and key do not match + + +* **Response:** + +| OK + +**Notes:** +Since certificates might be valid for a time smaller than the amphora is in +existence this add a way to rotate them. Once the certificate is uploaded the +agent is being recycled so depending on the implementation the service might +not be available for some time. + +**Examples:** + +* Success code 202: + +:: + + PUT URI: + https://octavia-haproxy-img-00328.local/1.0/certificate + (Put data should contain the certificate information, concatenated as + described above) + + JSON Response: + { + 'message': 'OK' + } + +* Error code 400: + +:: + + PUT URI: + https://octavia-haproxy-img-00328.local/1.0/certificates + (If PUT data does not contain a certificate) + + JSON Response: + { + 'message': 'No certificate found' + } + +* Error code 400: + +:: + + PUT URI: + https://octavia-haproxy-img-00328.local/1.0/certificate + (If PUT data does not contain an RSA key) + + JSON Response: + { + 'message': 'No RSA key found' + } + +* Error code 400: + +:: + + PUT URI: + https://octavia-haproxy-img-00328.local/1.0/certificate + (If the first certificate and the RSA key do not have the same modulus.) + + JSON Response: + { + 'message': 'Certificate and key do not match' + } + + +Upload keepalived configuration +------------------------------- + +* **URL:** /1.0/vrrp/upload +* **Method:** PUT +* **URL params:** none +* **Data params:** none +* **Success Response:** + + * Code: 200 + + * Content: OK + +* **Error Response:** + + * Code: 500 + + * Content: Failed to upload keepalived configuration. + +* **Response:** + +OK + +**Examples:** + +* Success code 200: + +:: + + PUT URI: + https://octavia-haproxy-img-00328.local/1.0/vrrp/upload + + JSON Response: + { + 'message': 'OK' + } + + +Start, Stop, or Reload keepalived +--------------------------------- + +* **URL:** /1.0/vrrp/*:action* +* **Method:** PUT +* **URL params:** + + * *:action* = One of: start, stop, reload + +* **Data params:** none +* **Success Response:** + + * Code: 202 + + * Content: OK + +* **Error Response:** + + * Code: 400 + + * Content: Invalid Request + + * Code: 500 + + * Content: Failed to start / stop / reload keepalived service: + * *(Also contains error output from attempt to start / stop / \ + reload keepalived)* + +* **Response:** + +| OK +| keepalived started + +**Examples:** + +* Success code 202: + +:: + + PUT URL: + https://octavia-haproxy-img-00328.local/1.0/vrrp/start + + JSON Response: + { + 'message': 'OK', + 'details': 'keepalived started', + } + +* Error code: 400 + +:: + + PUT URL: + https://octavia-haproxy-img-00328.local/1.0/vrrp/BAD_TEST_DATA + + JSON Response: + { + 'message': 'Invalid Request', + 'details': 'Unknown action: BAD_TEST_DATA', + } + +* Error code: 500 + +:: + + PUT URL: + https://octavia-haproxy-img-00328.local/1.0/vrrp/stop + + JSON Response: + { + 'message': 'Failed to stop keepalived service: keeepalived process with PID 3352 not found', + 'details': 'keeepalived process with PID 3352 not found', + } + +Update the amphora agent configuration +-------------------------------------- + +* **URL:** /1.0/config +* **Method:** PUT + +* **Data params:** A amphora-agent configuration file +* **Success Response:** + + * Code: 202 + + * Content: OK + +* **Error Response:** + + * Code: 500 + + * message: Unable to update amphora-agent configuration. + * details: *(The exception details)* + +* **Response:** + +| OK + +* **Implied actions:** + + * The running amphora-agent configuration file is mutated. + +**Notes:** Only options that are marked mutable in the oslo configuration +will be updated. + +**Examples:** + +* Success code 202: + +:: + + PUT URL: + https://octavia-haproxy-img-00328.local/1.0/config + (Upload PUT data should be a raw amphora-agent.conf file.) + + JSON Response: + { + 'message': 'OK' + } + +* Error code 500: + +:: + + JSON Response: + { + 'message': 'Unable to update amphora-agent configuration.', + 'details': *(The exception output)*, + } diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst new file mode 100644 index 0000000000..88860ebc05 --- /dev/null +++ b/doc/source/contributor/contributing.rst @@ -0,0 +1,186 @@ +So You Want to Contribute... +============================ + +For general information on contributing to OpenStack, please check out the +`contributor guide `_ to get started. +It covers all the basics that are common to all OpenStack projects: the +accounts you need, the basics of interacting with our Gerrit review system, +how we communicate as a community, etc. + +Below will cover the more project specific information you need to get started +with Octavia. + +Communication +~~~~~~~~~~~~~ + +IRC + People working on the Octavia project may be found in the + ``#openstack-lbaas`` channel on the IRC network described in + https://docs.openstack.org/contributors/common/irc.html + during working hours in their timezone. The channel is logged, so if + you ask a question when no one is around, you can check the log to see + if it's been answered: + http://eavesdrop.openstack.org/irclogs/%23openstack-lbaas/ + +Weekly Meeting + The Octavia team meets weekly on IRC. Please see the OpenStack + meetings page for the current meeting details and ICS file: + http://eavesdrop.openstack.org/#Octavia_Meeting + Meetings are logged: http://eavesdrop.openstack.org/meetings/octavia/ + +Mailing List + We use the openstack-discuss@lists.openstack.org mailing list for + asynchronous discussions or to communicate with other OpenStack teams. + Use the prefix ``[octavia]`` in your subject line (it's a high-volume + list, so most people use email filters). + + More information about the mailing list, including how to subscribe + and read the archives, can be found at: + http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss + +Virtual Meet-ups + From time to time, the Octavia project will have video meetings to + address topics not easily covered by the above methods. These are + announced well in advance at the weekly meeting and on the mailing + list. + +Physical Meet-ups + The Octavia project usually has a presence at the OpenDev/OpenStack + Project Team Gathering that takes place at the beginning of each + development cycle. Planning happens on an etherpad whose URL is + announced at the weekly meetings and on the mailing list. + +Contacting the Core Team +~~~~~~~~~~~~~~~~~~~~~~~~ + +The octavia-core team is an active group of contributors who are responsible +for directing and maintaining the Octavia project. As a new contributor, your +interaction with this group will be mostly through code reviews, because +only members of octavia-core can approve a code change to be merged into the +code repository. + +.. note:: + Although your contribution will require reviews by members of + octavia-core, these aren't the only people whose reviews matter. + Anyone with a gerrit account can post reviews, so you can ask + other developers you know to review your code ... and you can + review theirs. (A good way to learn your way around the codebase + is to review other people's patches.) + + If you're thinking, "I'm new at this, how can I possibly provide + a helpful review?", take a look at `How to Review Changes the + OpenStack Way + `_. + + There are also some Octavia project specific reviewing guidelines + in the :ref:`octavia-style-commandments` section of the Octavia Contributor + Guide. + +You can learn more about the role of core reviewers in the OpenStack +governance documentation: +https://docs.openstack.org/contributors/common/governance.html#core-reviewer + +The membership list of octavia-core is maintained in gerrit: +https://review.opendev.org/#/admin/groups/370,members + +You can also find the members of the octavia-core team at the Octavia weekly +meetings. + +New Feature Planning +~~~~~~~~~~~~~~~~~~~~ + +The Octavia team use both Request For Enhancement (RFE) and Specifications +(specs) processes for new features. + +RFE + When a feature being proposed is easy to understand and will have limited + scope, the requester will create an RFE in Launchpad. This is a bug report + that includes the tag **[RFE]** in the subject prefix. + + Once an RFE bug report is created, a core reviewer or the Project Team Lead + (PTL) will approved the RFE by setting the Importance field to + **Wishlist**. This signals that the core team understands the feature being + proposed and enough detail has been provided to make sure the core team + understands the goal of the change. + +specs + If the new feature is a major change or addition to Octavia that will need + a detailed design to be successful, the Octavia team requires a + specification (spec) proposal be submitted as a patch. + + Octavia specification documents are stored in the /octavia/specs directory + in the main Octavia git repository: + https://opendev.org/openstack/octavia/src/branch/master/specs + This directory includes a `template.rst `_ file that includes instructions for + creating a new Octavia specification. + + These specification documents are then rendered and included in the + `Project Specifications `_ section of the Octavia Contributor + Guide. + +Feel free to ask in ``#openstack-lbaas`` or at the weekly meeting if you +have an idea you want to develop and you're not sure whether it requires +an RFE or a specification. + +The Octavia project observes the OpenStack-wide deadlines, +for example, final release of non-client libraries (octavia-lib), final +release for client libraries (python-octaviaclient), feature freeze, +etc. These are noted and explained on the release schedule for the current +development cycle available at: https://releases.openstack.org/ + +Task Tracking +~~~~~~~~~~~~~ + +We track our tasks in `Launchpad +`_. + +If you're looking for some smaller, easier work item to pick up and get started +on, search for the 'low-hanging-fruit' tag. + +When you start working on a bug, make sure you assign it to yourself. +Otherwise someone else may also start working on it, and we don't want to +duplicate efforts. Also, if you find a bug in the code and want to post a +fix, make sure you file a bug (and assign it to yourself!) just in case someone +else comes across the problem in the meantime. + +Reporting a Bug +~~~~~~~~~~~~~~~ + +You found an issue and want to make sure we are aware of it? You can do so on +`Launchpad +`_. + +Please remember to include the following information: + +* The version of Octavia and OpenStack you observed the issue in. +* Steps to reproduce. +* Expected behavior. +* Observed behavior. +* The log snippet that contains any error information. Please include the lines + directly before the error message(s) as they provide context for the error. + +Getting Your Patch Merged +~~~~~~~~~~~~~~~~~~~~~~~~~ + +The Octavia project policy is that a patch must have two +2s reviews from the +core reviewers before it can be merged. + +Patches for Octavia projects must include unit and functional tests that cover +the new code. Octavia projects include the "openstack-tox-cover" testing job to +help identify test coverage gaps in a patch. This can also be run locally by +running "tox -e cover". + +In addition, some changes may require a release note. Any patch that +changes functionality, adds functionality, or addresses a significant +bug should have a release note. Release notes can be created using the "reno" +tool by running "reno new ". + +Keep in mind that the best way to make sure your patches are reviewed in +a timely manner is to review other people's patches. We're engaged in a +cooperative enterprise here. + +Project Team Lead Duties +~~~~~~~~~~~~~~~~~~~~~~~~ + +All common PTL duties are enumerated in the `PTL guide +`_. diff --git a/doc/source/contributor/design/version0.5/component-design.rst b/doc/source/contributor/design/version0.5/component-design.rst new file mode 100644 index 0000000000..2bff8efcd1 --- /dev/null +++ b/doc/source/contributor/design/version0.5/component-design.rst @@ -0,0 +1,517 @@ +============================= +Octavia v0.5 Component Design +============================= +Please refer to the following diagram of the Octavia v0.5 components: + +.. graphviz:: v0.5-component-design.dot + +This milestone release of Octavia concentrates on making the service delivery +scalable (though individual listeners are not horizontally scalable at this +stage), getting API and other interfaces between major components correct, +without worrying about making the command and control layer scalable. + +Note that this design is not yet "operator grade" but is a good first step to +achieving operator grade (which will occur with version 1 of Octavia). + +================ +LBaaS Components +================ +The entities in this section describe components that are part of the Neutron +LBaaS project, with which Octavia interfaces to deliver load balancing +services. + +USER API HANDLER +---------------- +This is the front-end that users (and user GUIs or what have you) talk to +manipulate load balancing services. + +**Notes:** + +* All implementation details are hidden from the user in this interface + +* Performs a few simple sanity checks on user-supplied data, but otherwise + looks to a driver provide more detail around whether what the user is asking + for is possible on the driver's implementation. + +* Any functionality that the user asks for that their back-end flavor / driver + doesn't support will be met with an error when the user attempts to configure + services this way. (There may be multiple kinds of errors: "incomplete + configuration" would be non-fatal and allow DB objects to be created / + altered. "incompatible configuration" would be fatal and disallow DB objects + from being created / associations made.) Examples of this include: UDP + protocol for a listener on a driver/flavor that uses only haproxy as its + back-end. + +* Drivers should also be able to return 'out of resources' or 'some other + error occurred' errors (hopefully with helpful error messages). + +* This interface is stateless, though drivers may keep state information in a + database. In any case, this interface should be highly scalable. + +* Talks some "intermediate driver interface" with the driver. This takes the + form of python objects passed directly within the python code to the driver. + +========================= +LBaaS / Octavia Crossover +========================= +The entities in this section are "glue" components which allow Octavia to +interface with other services in the OpenStack environment. The idea here is +that we want Octavia to be as loosely-coupled as possible with those services +with which it must interact in order to keep these interfaces as clean as +possible. + +Initially, all the components in this section will be entirely under the +purview of the Octavia project. Over time some of these components might be +eliminated entirely, or reduced in scope as these third-party services +evolve and increase in cleanly-consumable functionality. + +DRIVER +------ +This is the part of the load balancing service that actually interfaces between +the (sanitized) user and operator configuration and the back-end load balancing +appliances or other "service providing entity." + +**Notes:** + +* Configuration of the driver is handled via service profile definitions in + association with the Neutron flavor framework. Specifically, a given flavor + has service profiles associated with it, and service profiles which + specify the Octavia driver will include meta-data (in the form of JSON + configuration) which is used by the driver to define implementation + specifics (for example, HA configuration and other details). + +* Driver will be loaded by the daemon that does the user API and operator API. + It is not, in and of itself, its own daemon, though a given vendor's back-end + may contain its own daemons or other services that the driver interfaces + with. + +* It is thought that the driver front-end should be stateless in order to make + it horizontally scalable and to preserves the statelessness of the user and + operator API handlers. Note that the driver may interface with back-end + components which need not be stateless. + +* It is also possible for multiple instances of the driver will talk to the + same amphora at the same time. Emphasis on the idempotency of the update + algorithms used should help minimize the issues this can potentially cause. + +NETWORK DRIVER +-------------- +In order to keep Octavia's design more clean as a pure consumer of network +services, yet still be able to develop Octavia at a time when it is impossible +to provide the kind of load balancing services we need to provide without +"going around" the existing Neutron API, we have decided to write a "network +driver" component which does those dirty back-end configuration commands via +an API we write, until these can become a standard part of Neutron. This +component should be as loosely coupled with Octavia as Octavia will be with +Neutron and present a standard interface to Octavia for accomplishing network +configuration tasks (some of which will simply be a direct correlation with +existing Neutron API commands). + +**Notes:** + +* This is a daemon or "unofficial extension", presumably living on a Neutron + network node which should have "back door" access to all things Neutron and + exposes an API that should only be used by Octavia. + +* Exactly what API will be provided by this driver will be defined as we + continue to build out the reference implementation for Octavia. + +* Obviously, as we discover missing functionality in the Neutron API, we should + work with the Neutron core devs to get these added to the API in a timely + fashion: We want the Network driver to be as lightweight as possible. + + +================== +Octavia Components +================== +Everything from here down are entities that have to do with the Octavia driver +and load balancing system. Other vendor drivers are unlikely to have the same +components and internal structure. It is planned that Octavia will become the +new reference implementation for LBaaS, though it of course doesn't need to be +the only one. (In fact, a given operator should be able to use multiple vendors +with potentially multiple drivers and multiple driver configurations through +the Neutron Flavor framework.) + + +OPERATOR API HANDLER +-------------------- +This is exactly like the USER API HANDLER in function, except that +implementation details are exposed to the operator, and certain admin-level +features are exposed (ex. listing a given tenant's loadbalancers, & etc.) + +It's also anticipated that the Operator API needs will vary enough from +implementation to implementation that no single Operator API will be sufficient +for the needs of all vendor implementations. (And operators will definitely +have implementation-specific concerns.) Also, we anticipate that most vendors +will already have an operator API or other interface which is controlled and +configured outsite the purview of OpenStack in general. As such it makes sense +for Octavia to have its own operator API / interface. + +**Notes:** + +* This interface is stateless. State should be managed by the controller, and + stored in a highly available database. + + +CONTROLLER +---------- +This is the component providing all the command and control for the +amphorae. On the front end, it takes its commands and controls from the LBaaS +driver. + +It should be noted that in later releases of Octavia, the controller functions +will be split across several components. At this stage we are less concerned +with how this internal communication will happen, and are most concerned with +ensuring communication with amphorae, the amphora LB driver, and the Network +driver are all made as perfect as possible. + +Among the controller's responsibilities are: + +* Sending configuration and certificate information to an amphora LB + driver, which in the reference implementation will be generating + configuration files for haproxy and PEM-formatted user certificates and + sending these to individual amphorae. Configuration files will be + generated from jinja templates kept in an template directory specific to + the haproxy driver. + +* Processing the configuration updates that need to be applied to individual + amphorae, as sent by the amphora LB driver. + +* Interfacing with network driver to plumb additional interfaces on the + amphorae as necessary. + +* Monitoring the health of all amphorae (via a driver interface). + +* Receiving and routing certain kinds of notifications originating on the + amphorae (ex. "member down") + +* This is a stateful service, and should keep its state in a central, highly + available database of some sort. + +* Respecting colocation / apolocation requirements of loadbalancers as set + forth by users. + +* Receiving notifications, statistics data and other short, regular messages + from amphorae and routing them to the appropriate entity. + +* Responding to requests from amphorae for configuration data. + +* Responding to requests from the user API or operator API handler driver for + data about specific loadbalancers or sub-objects, their status, and + statistics. + +* Amphora lifecycle management, including interfacing with Nova and Neutron + to spin up new amphorae as necessary and handle initial configuration and + network plumbing for their LB network interface, and cleaning this up when an + amphora is destroyed. + +* Maintaining a pool of spare amphorae (ie. spawning new ones as necessary + and deleting ones from the pool when we have too much inventory here.) + +* Gracefully spinning down "dirty old amphorae" + +* Loading and calling configured amphora drivers. + +**Notes:** + +* Almost all the intelligence around putting together and validating + loadbalancer configurations will live here-- the Amphora API is meant to + be as simple as possible so that minor feature improvements do not + necessarily entail pushing out new amphorae across an entire installation. + +* The size of the spare amphora pool should be determined by the flavor + being offered. + +* The controller also handles spinning up amphorae in the case of a true + active/standby topology (ie. where the spares pool is effectively zero.) It + should have enough intelligence to communicate to Nova that these amphorae + should not be on the same physical host in this topology. + +* It also handles spinning up new amphorae when one fails in the above + topology. + +* Since spinning up a new amphora is a task that can take a long time, the + controller should spawn a job or child process which handles this highly + asynchronous request. + + +AMPHORA LOAD BALANCER (LB) DRIVER +--------------------------------- +This is the abstraction layer that the controller talks to for communicating +with the amphorae. Since we want to keep Octavia flexible enough so that +certain components (like the amphora) can be replaced by third party +products if the operator so desires, it's important to keep many of the +implementation-specific details contained within driver layers. An amphora +LB driver also gives the operator the ability to have different open-source +amphorae with potentially different capabilities (accessed via different +flavors) which can be handy for, for example, field-testing a new amphora +image. + +The reference implementation for the amphora LB driver will be for the amphora +described below. + +Responsibilities of the amphora LB driver include: + +* Generating configuration files for haproxy and PEM-formatted user + certificates and sending these to individual amphorae. Configuration + files will be generated from jinja templates kept in an template directory + specific to the haproxy driver. + +* Handling all communication to and from amphorae. + + +LB NETWORK +---------- +This is the subnet that controllers will use to communicate with amphorae. +This means that controllers must have connectivity (either layer 2 or routed) +to this subnet in order to function, and vice versa. Since amphorae will be +communicating on it, this means the network is not part of the "undercloud." + +**Notes:** + +* As certain sensitive data (TLS private keys, for example) will be transmitted + over this communication infrastructure, all messages carrying a sensitive + payload should be done via encrypted and authenticated means. Further, we + recommend that messages to and from amphorae be signed regardless of the + sensitivity of their content. + + +AMPHORAE +---------- +This is a Nova VM which actually provides the load balancing services as +configured by the user. Responsibilities of these entities include: + +* Actually accomplishing the load balancing services for user-configured + loadbalancers using haproxy. + +* Sending regular heartbeats (which should include some status information). + +* Responding to specific requests from the controller for very basic + loadbalancer or sub-object status data, including statistics. + +* Doing common high workload, low intelligence tasks that we don't want to + burden the controller with. (ex. Shipping listener logs to a swift data + store, if configured.) + +* Sending "edge" notifications (ie. status changes) to the controller when + members go up and down, when listeners go up and down, etc. + +**Notes:** + +* Each amphora will generally need its own dedicated LB network IP address, + both so that we don't accidentally bind to any IP:port the user wants to use + for loadbalancing services, and so that an amphora that is not yet in use + by any loadbalancer service can still communicate on the network and receive + commands from its controller. Whether this IP address exists on the same + subnet as the loadbalancer services it hosts is immaterial, so long as + front-end and back-end interfaces can be plumbed after an amphora is + launched. + +* Since amphorae speak to controllers in a "trusted" way, it's important to + ensure that users do not have command-line access to the amphorae. In + other words, the amphorae should be a black box from the users' + perspective. + +* Amphorae will be powered using haproxy 1.5 initially. We may decide to use + other software (especially for TLS termination) later on. + +* The "glue scripts" which communicate with the controller should be as + lightweight as possible: Intelligence about how to put together an haproxy + config, for example, should not live on the amphora. Rather, the amphora + should perform simple syntax checks, start / restart haproxy if the checks + pass, and report success/failure of the haproxy restart. + +* With few exceptions, most of the API commands the amphora will ever do + should be safely handled synchronously (ie. nothing should take longer than a + second or two to complete). + +* Connection logs, and other things anticipated to generate a potential large + amount of data should be communicated by the amphora directly to which + ever service is going to consume that data. (for example, if logs are being + shunted off to swift on a nightly basis, the amphora should handle this + directly and not go through the controller.) + + +INTERNAL HEALTH MONITORS +------------------------ +There are actually a few of these, all of which need to be driven by some +daemon(s) which periodically check that heartbeats from monitored entities are +both current and showing "good" status, if applicable. Specifically: + +* Controllers need to be able to monitor the availability and overall health + of amphorae they control. For active amphorae, this check should + happen pretty quickly: About once every 5 seconds. For spare amphorae, + the check can happen much more infrequently (say, once per minute). + +The idea here is that internal health monitors will monitor a periodic +heartbeat coming from the amphorae, and take appropriate action (assuming +these are down) if they fail to check in with a heartbeat frequently enough. +This means that internal health monitors need to take the form of a daemon +which is constantly checking for and processing heartbeat requests (and +updating controller or amphorae statuses, and triggering other events as +appropriate). + + +====================================================== +Some notes on Controller <-> Amphorae communications +====================================================== +In order to keep things as scalable as possible, the thought was that short, +periodic and arguably less vital messages being emitted by the amphora and +associated controller would be done via HMAC-signed UDP, and more vital, more +sensitive, and potentially longer transactional messages would be handled via a +RESTful API on the controller, accessed via bi-directionally authenticated +HTTPS. + +Specifically, we should expect the following to happen over UDP: +* heartbeats from the amphora VM to the controller + +* stats data from the amphora to the controller + +* "edge" alert notifications (change in status) from the amphora to the + controller + +* Notification of pending tasks in queue from controller to amphora + +And the following would happen over TCP: +* haproxy / tls certificate configuration changes + +================================================= +Supported Amphora Virtual Appliance Topologies +================================================= +Initially, I propose we support two topologies with version 0.5 of Octavia: + +Option 1: "Single active node + spares pool" +-------------------------------------------- +* This is similar to what HP is doing right now with Libra: Each amphora is + stand-alone with a frequent health-check monitor in place and upon failure, + an already-spun-up amphora is moved from the spares pool and configured to + take the old one's place. This allows for acceptable recovery times on + amphora failure while still remaining efficient, as far as VM resource + utilization is concerned. + +Option 2: "True Active / Standby" +--------------------------------- +* This is similar to what Blue Box is doing right now where amphorae are + deployed in pairs and use corosync / pacemaker to monitor each other's health + and automatically take over (usually in less than 5 seconds) if the "active" + node fails. This provides for the fastest possible recovery time on hardware + failure, but is much less efficient, as far as VM resource utilization is + concerned. + +* In this topology a floating IP address (different from a Neutron floating + IP!) is used to determine which amphora is the "active" one at any given + time. + +* In this topology, both amphorae need to be colocated on the same subnet. + As such a "spares pool" doesn't make sense for this type of layout, unless + all spares are on the same management network with the active nodes. + +We considered also supporting "Single node" topology, but this turns out to be +the same thing as option 1 above with a spares pool size of zero. + +============================ +Supported Network Topologies +============================ +This is actually where things get tricky, as far as amphora plumbing is +concerned. And it only grows trickier when we consider that front-end +connectivity (ie. to the 'loadbalancer' vip_address) and back-end connectivity +(ie. to members of a loadbalancing pool) can be handled in different ways. +Having said this, we can break things down into LB network, front-end and +back-end topology to discuss the various possible permutations here. + +LB Network +---------- +Each amphora needs to have a connection to a LB network. And each controller +needs to have access to this management network (this could be layer-2 or +routed connectivity). Command and control will happen via the amphorae's +LB network IP. + +Front-end topologies +-------------------- +There are generally two ways to handle the amphorae's connection to the +front-end IP address (this is the vip_address of the loadbalancer object): + +**Option 1: Layer-2 connectivity** + +The amphora can have layer-2 connectivity to the neutron network which is +host to the subnet on which the loadbalancer vip_address resides. In this +scenario, the amphora would need to send ARP responses to requests for the +vip_address, and therefore amphorae need to have interfaces plumbed on said +vip_address subnets which participate in ARP. + +Note that this is somewhat problematic for active / standby virtual appliance +topologies because the vip_address for a given load balancer effectively +becomes a highly-available IP address (a true floating VIP), which means on +service failover from active to standby, the active amphora needs to +relinquish all the vip_addresses it has, and the standby needs to take them +over *and* start up haproxy services. This is OK if a given amphora +only has a few load balancers, but can lead to several minutes' down-time +during a graceful failover if there are a dozen or more load balancers on the +active/standby amphora pair. It's also more risky: The standby node might +not be able to start up all the haproxy services during such a +failover. What's more, most types of VRRP-like services which handle floating +IPs require amphorae to have an additional IP address on the subnet housing +the floating vip_address in order for the standby amphora to monitor the +active amphora. + +Also note that in this topology, amphorae need an additional virtual network +interface plumbed when new front-end loadbalancer vip_addresses are assigned to +them which exist on subnets to which they don't already have access. + +**Option 2: Routed (layer-3) connectivity** + +In this layout, static routes are injected into the routing infrastructure +(Neutron) which essentially allow traffic destined for any given loadbalancer +vip_address to be routed to an IP address which lives on the amphora. (I +would recommend this be something other than the LB network IP.) In this +topology, it's actually important that the loadbalancer vip_address does *not* +exist in any subnet with potential front-end clients because in order for +traffic to reach the loadbalancer, it must pass through the routing +infrastructure (and in this case, front-end clients would attempt layer-2 +connectivity to the vip_address). + +This topology also works much better for active/standby configurations, because +both the active and standby amphorae can bind to the vip_addresses of all +their assigned loadbalancer objects on a dummy, non-ARPing interface, both can +be running all haproxy services at the same time, and keep the +standby server processes from interfering with active loadbalancer traffic +through the use of fencing scripts on the amphorae. Static routing is +accomplished to a highly available floating "routing IP" (using some VRRP-like +service for just this IP) which becomes the trigger for the fencing scripts on +the amphora. In this scenario, fail-overs are both much more reliable, and +can be accomplished in usually < 5 seconds. + +Further, in this topology, amphorae do not need any additional virtual +interfaces plumbed when new front-end loadbalancer vip_addresses are assigned +to them. + + +Back-end topologies +------------------- +There are also two ways that amphorae can potentially talk to back-end +member IP addresses. Unlike the front-end topologies (where option 1 and option +2 are basically mutually exclusive, if not practically exclusive) both of these +types of connectivity can be used on a single amphora, and indeed, within a +single loadbalancer configuration. + +**Option 1: Layer-2 connectivity** + +This is layer-2 connectivity to back-end members, and is implied when a member +object has a subnet_id assigned to it. In this case, the existence of the +subnet_id implies amphorae need to have layer-2 connectivity to that subnet, +which means they need to have a virtual interface plumbed to it, as well as an +IP address on the subnet. This type of connectivity is useful for "secure" +back-end subnets that exist behind a NATing firewall where PAT is not in use on +the firewall. (In this way it effectively bypasses the firewall.) We anticipate +this will be the most common form of back-end connectivity in use by most +OpenStack users. + +**Option 2: Routed (layer-3) connectivity** + +This is routed connectivity to back-end members. This is implied when a member +object does not have a subnet_id specified. In this topology, it is assumed +that member ip_addresses are reachable through standard neutron routing, and +therefore connections to them can be initiated from the amphora's default +gateway. No new virtual interfaces need to be plumbed for this type of +connectivity to members. + diff --git a/doc/source/contributor/design/version0.5/v0.5-component-design.dot b/doc/source/contributor/design/version0.5/v0.5-component-design.dot new file mode 100644 index 0000000000..d5b08430c9 --- /dev/null +++ b/doc/source/contributor/design/version0.5/v0.5-component-design.dot @@ -0,0 +1,230 @@ +/* +* Copyright 2014 OpenStack Foundation +* +* Licensed under the Apache License, Version 2.0 (the "License"); you may +* not use this file except in compliance with the License. You may obtain +* a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +* License for the specific language governing permissions and limitations +* under the License. +*/ +/* + * Requires graphviz version 2.37 or later to render correctly + */ +digraph G { + labelloc="t"; + label="Octavia component design (v0.5)"; + rankdir=LR; + splines=false; + + subgraph cluster_Neutron { + label="Neutron"; + color="green"; + + NetworkingServices [shape=rectangle label="Networking Services"]; + + subgraph cluster_LBaaS { + label="LBaaS"; + color="yellow"; + + UserAPIHandler [shape=none margin=0 label= + < + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

User API Handler
Driver
>]; + }; + }; + + subgraph cluster_Octavia { + label="Octavia"; + color="blue"; + + NetworkingDriver [shape=rectangle label="Networking Driver"]; + + OperatorAPIHandler [shape=none margin=0 label= + < + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Operator API Handler
>]; + + subgraph cluster_Controller { + label=""; + color=white; + Controller [shape=none margin=0 label= + < + + + + + + + + + +
Controller
Health Monitor
Amphora LB Driver
>]; + + Database [shape=cylinder]; + }; + + subgraph cluster_Amphoras2 { + label=""; + color="white"; + id="AmphoraMVs2"; + Amphora3 [shape=none margin=0 label= + < + + + +
Amphora
>]; + Amphora4 [shape=none margin=0 label= + < + + + +
Amphora
>]; + Amphora5 [shape=none margin=0 label= + < + + + +
Amphora
>]; + }; + + subgraph cluster_Network { + label="LB Network"; + color="gray"; + bgcolor="gray"; + + Stuff [style=invis shape=none margin=0 label= + < + + + +
Stuff
>]; + LBNetwork [shape=none margin=0 label=""]; + Things [style=invis shape=none margin=0 label= + < + + + +
Things

>]; + }; + + OperatorAPIHandler -> Controller [dir=none]; + Controller -> LBNetwork [style=invis]; + LBNetwork -> {Amphora3 Amphora4 Amphora5} [style=invis]; + Controller -> {Amphora3 Amphora4 Amphora5} [constraint=false dir=none]; + Controller -> NetworkingDriver [constraint=false dir=none]; + Controller -> Database [constraint=false dir=none]; + + }; + + UserAPIHandler:driver -> Controller [dir=none]; + NetworkingServices -> NetworkingDriver [dir=none]; + +} diff --git a/doc/source/contributor/devref/erd.rst b/doc/source/contributor/devref/erd.rst new file mode 100644 index 0000000000..e4a3c530a7 --- /dev/null +++ b/doc/source/contributor/devref/erd.rst @@ -0,0 +1,24 @@ + +=================================== +Octavia Entity Relationship Diagram +=================================== + +Below is the current Octavia database data model. + +* Solid stars are primary key columns. +* Hollow stars are foreign key columns. +* Items labeled as "PROPERTY" are data model relationships and are not + present in the database. + +.. only:: html + + Click to enlarge the diagram. + + .. image:: erd.svg + :width: 660px + :target: ../../_images/erd.svg + +.. only:: latex + + .. image:: erd.svg + :width: 660px diff --git a/doc/source/contributor/devref/flows.rst b/doc/source/contributor/devref/flows.rst new file mode 100644 index 0000000000..47f18824da --- /dev/null +++ b/doc/source/contributor/devref/flows.rst @@ -0,0 +1,23 @@ +======================== +Octavia Controller Flows +======================== + +Octavia uses OpenStack TaskFlow to orchestrate the actions the Octavia +controller needs to take while managing load balancers. + +This document is meant as a reference for the key flows used in the +Octavia controller. + +The following are flow diagrams for the **amphora V2** driver. + +.. toctree:: + :maxdepth: 1 + + flow_diagrams_v2/AmphoraFlows.rst + flow_diagrams_v2/HealthMonitorFlows.rst + flow_diagrams_v2/L7PolicyFlows.rst + flow_diagrams_v2/L7RuleFlows.rst + flow_diagrams_v2/ListenerFlows.rst + flow_diagrams_v2/LoadBalancerFlows.rst + flow_diagrams_v2/MemberFlows.rst + flow_diagrams_v2/PoolFlows.rst diff --git a/doc/source/contributor/devref/gmr.rst b/doc/source/contributor/devref/gmr.rst new file mode 100644 index 0000000000..5a689fbeed --- /dev/null +++ b/doc/source/contributor/devref/gmr.rst @@ -0,0 +1,88 @@ +.. + Copyright (c) 2015 OpenStack Foundation + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Guru Meditation Reports +======================= + +Octavia contains a mechanism whereby developers and system administrators can +generate a report about the state of a running Octavia executable. This report +is called a *Guru Meditation Report* (*GMR* for short). + +Generating a GMR +---------------- + +A *GMR* can be generated by sending the *USR2* signal to any Octavia process +with support (see below). The *GMR* will then be outputted as standard error +for that particular process. + +For example, suppose that ``octavia-api`` has process id ``8675``, and was run +with ``2>/var/log/octavia/octavia-api-err.log``. Then, ``kill -USR2 8675`` will +trigger the Guru Meditation report to be printed to +``/var/log/octavia/octavia-api-err.log``. + +Structure of a GMR +------------------ + +The *GMR* is designed to be extensible; any particular executable may add its +own sections. However, the base *GMR* consists of several sections: + +Package + Shows information about the package to which this process belongs, including + version information. + +Threads + Shows stack traces and thread ids for each of the threads within this + process. + +Green Threads + Shows stack traces for each of the green threads within this process (green + threads don't have thread ids). + +Configuration + Lists all the configuration options currently accessible via the CONF object + for the current process. + +Adding Support for GMRs to New Executables +------------------------------------------ + +Adding support for a *GMR* to a given executable is fairly easy. + +First import the module: + +.. code-block:: python + + from oslo_reports import guru_meditation_report as gmr + from octavia import version + +Then, register any additional sections (optional): + +.. code-block:: python + + TextGuruMeditation.register_section('Some Special Section', + some_section_generator) + +Finally (under main), before running the "main loop" of the executable (usually +``service.server(server)`` or something similar), register the *GMR* hook: + +.. code-block:: python + + TextGuruMeditation.setup_autorun(version) + +Extending the GMR +----------------- + +As mentioned above, additional sections can be added to the GMR for a +particular executable. For more information, see the inline documentation +under :mod:`oslo.reports` diff --git a/doc/source/contributor/guides/dev-quick-start.rst b/doc/source/contributor/guides/dev-quick-start.rst new file mode 100644 index 0000000000..c99e0bc121 --- /dev/null +++ b/doc/source/contributor/guides/dev-quick-start.rst @@ -0,0 +1,463 @@ +.. + Copyright (c) 2016 IBM + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +====================================== +Developer / Operator Quick Start Guide +====================================== +This document is intended for developers and operators. For an end-user guide, +please see the end-user quick-start guide and cookbook in this documentation +repository. + + +Running Octavia in devstack +=========================== + +tl;dr +----- +* 8GB RAM minimum +* "vmx" or "svm" in ``/proc/cpuinfo`` +* Ubuntu 18.04 or later +* On that host, copy and run as root: + ``octavia/devstack/contrib/new-octavia-devstack.sh`` + + +System requirements +------------------- +Octavia in devstack with a default (non-HA) configuration will deploy one +amphora VM per loadbalancer deployed. The current default amphora image also +requires at least 1GB of RAM to run effectively. As such it is important that +your devstack environment has enough resources dedicated to it to run all its +necessary components. For most devstack environments, the limiting resource +will be RAM. At the present time, we recommend at least 12GB of RAM for the +standard devstack defaults, or 8GB of RAM if cinder and swift are disabled. +More is recommended if you also want to run a couple of application server VMs +(so that Octavia has something to load balance within your devstack +environment). + +Also, because the current implementation of Octavia delivers load balancing +services using amphorae that run as Nova virtual machines, it is effectively +mandatory to enable nested virtualization. The software will work with software +emulated CPUs, but be unusably slow. The idea is to make sure the BIOS of the +systems you're running your devstack on have virtualization features enabled +(Intel VT-x, AMD-V, etc.), and the virtualization software you're using exposes +these features to the guest VM (sometimes called nested virtualization). +For more information, see: +`Configure DevStack with KVM-based Nested Virtualization +`__ + +The devstack environment we recommend should be running Ubuntu Linux 18.04 or +later. These instructions may work for other Linux operating systems or +environments. However, most people doing development on Octavia are using +Ubuntu for their test environment, so you will probably have the easiest time +getting your devstack working with that OS. + + +Deployment +---------- +1. Deploy an Ubuntu 18.04 or later Linux host with at least 8GB of RAM. (This + can be a VM, but again, make sure you have nested virtualization features + enabled in your BIOS and virtualization software.) +2. Copy ``devstack/contrib/new-octavia-devstack.sh`` from this source + repository onto that host. +3. Run new-octavia-devstack.sh as root. +4. Deploy loadbalancers, listeners, etc. + + +Running Octavia in production +============================= + +Notes +----- + +Disclaimers +___________ +This document is not a definitive guide for deploying Octavia in every +production environment. There are many ways to deploy Octavia depending on the +specifics and limitations of your situation. For example, in our experience, +large production environments often have restrictions, hidden "features" or +other elements in the network topology which mean the default Neutron +networking stack (with which Octavia was designed to operate) must be modified +or replaced with a custom networking solution. This may also mean that for your +particular environment, you may need to write your own custom networking driver +to plug into Octavia. Obviously, instructions for doing this are beyond the +scope of this document. + +We hope this document provides the cloud operator or distribution creator with +a basic understanding of how the Octavia components fit together practically. +Through this, it should become more obvious how components of Octavia can be +divided or duplicated across physical hardware in a production cloud +environment to aid in achieving scalability and resiliency for the Octavia load +balancing system. + +In the interest of keeping this guide somewhat high-level and avoiding +obsolescence or operator/distribution-specific environment assumptions by +specifying exact commands that should be run to accomplish the tasks below, we +will instead just describe what needs to be done and leave it to the cloud +operator or distribution creator to "do the right thing" to accomplish the task +for their environment. If you need guidance on specific commands to run to +accomplish the tasks described below, we recommend reading through the +plugin.sh script in devstack subdirectory of this project. The devstack plugin +exercises all the essential components of Octavia in the right order, and this +guide will mostly be an elaboration of this process. + + +Environment Assumptions +_______________________ +The scope of this guide is to provide a basic overview of setting up all +the components of Octavia in a production environment, assuming that the +default in-tree drivers and components (including a "standard" Neutron install) +are going to be used. + +For the purposes of this guide, we will therefore assume the following core +components have already been set up for your production OpenStack environment: + +* Nova +* Neutron +* Glance +* Barbican (if TLS offloading functionality is enabled) +* Keystone +* Rabbit +* MySQL + + +Production Deployment Walkthrough +--------------------------------- + +Create Octavia User +___________________ +By default Octavia will use the 'octavia' user for keystone authentication, and +the admin user for interactions with all other services. + +You must: + +* Create 'octavia' user. +* Add the 'admin' role to this user. + + +Load Balancer Network Configuration +___________________________________ +Octavia makes use of an "LB Network" exclusively as a management network that +the controller uses to talk to amphorae and vice versa. All the amphorae that +Octavia deploys will have interfaces and IP addresses on this network. +Therefore, it's important that the subnet deployed on this network be +sufficiently large to allow for the maximum number of amphorae and controllers +likely to be deployed throughout the lifespan of the cloud installation. + +At the present time, though IPv4 subnets are used by default for the LB Network +(for example: 172.16.0.0/12), IPv6 subnets can be used for the LB Network. + +The LB Network is isolated from tenant networks on the amphorae by means of +network namespaces on the amphorae. Therefore, operators need not be concerned +about overlapping subnet ranges with tenant networks. + +You must also create a Neutron security group which will be applied to amphorae +created on the LB network. It needs to allow amphorae to send UDP heartbeat +packets to the health monitor (by default, UDP port 5555), and ingress on the +amphora's API (by default, TCP port 9443). It can also be helpful to allow SSH +access to the amphorae from the controller for troubleshooting purposes (ie. +TCP port 22), though this is not strictly necessary in production environments. + +Amphorae will send periodic health checks to the controller's health manager. +Any firewall protecting the interface on which the health manager listens must +allow these packets from amphorae on the LB Network (by default, UDP port +5555). + +Finally, you need to add routing or interfaces to this network such that the +Octavia controller (which will be described below) is able to communicate with +hosts on this network. This also implies you should have some idea where you're +going to run the Octavia controller components. + +You must: + +* Create the 'lb-mgmt-net'. +* Assign the 'lb-mgmt-net' to the admin tenant. +* Create a subnet and assign it to the 'lb-mgmt-net'. +* Create neutron security group for amphorae created on the 'lb-mgmt-net'. + which allows appropriate access to the amphorae. +* Update firewall rules on the host running the octavia health manager to allow + health check messages from amphorae. +* Add appropriate routing to / from the 'lb-mgmt-net' such that egress is + allowed, and the controller (to be created later) can talk to hosts on this + network. + + +Create Amphora Image +____________________ +Octavia deploys amphorae based on a virtual machine disk image. By default we +use the OpenStack diskimage-builder project for this. Scripts to accomplish +this are within the diskimage-create directory of this repository. In addition +to creating the disk image, configure a Nova flavor to use for amphorae, and +upload the disk image to glance. + +You must: + +* Create amphora disk image using OpenStack diskimage-builder. +* Create a Nova flavor for the amphorae. +* Add amphora disk image to glance. +* Tag the above glance disk image with 'amphora'. + + +Install Octavia Controller Software +___________________________________ +This seems somewhat obvious, but the important things to note here are that you +should put this somewhere on the network where it will have access to the +database (to be initialized below), the oslo messaging system, and the LB +network. Octavia uses the standard python setuptools, so installation of the +software itself should be straightforward. + +Running multiple instances of the individual Octavia controller components on +separate physical hosts is recommended in order to provide scalability and +availability of the controller software. + +The Octavia controller presently consists of several components which may be +split across several physical machines. For the 4.0 release of Octavia, the +important (and potentially separable) components are the controller worker, +housekeeper, health manager and API controller. Please see the component +diagrams elsewhere in this repository's documentation for detailed descriptions +of each. Please use the following table for hints on which controller +components need access to outside resources: + ++-------------------+----------------------------------------+ +| **Component** | **Resource** | ++-------------------+------------+----------+----------------+ +| | LB Network | Database | OSLO messaging | ++===================+============+==========+================+ +| API | No | Yes | Yes | ++-------------------+------------+----------+----------------+ +| controller worker | Yes | Yes | Yes | ++-------------------+------------+----------+----------------+ +| health monitor | Yes | Yes | No | ++-------------------+------------+----------+----------------+ +| housekeeper | Yes | Yes | No | ++-------------------+------------+----------+----------------+ + +In addition to talking to each other via Oslo messaging, various controller +components must also communicate with other OpenStack components, like nova, +neutron, barbican, etc. via their APIs. + +You must: + +* Pick appropriate host(s) to run the Octavia components. +* Install the dependencies for Octavia. +* Install the Octavia software. + + +Create Octavia Keys and Certificates +____________________________________ +Octavia presently allows for one method for the controller to communicate with +amphorae: The amphora REST API. Both amphora API and Octavia controller do +bi-directional certificate-based authentication in order to authenticate and +encrypt communication. You must therefore create appropriate TLS certificates +which will be used for key signing, authentication, and encryption. There is a +detailed :doc:`../../admin/guides/certificates` to guide you through this +process. + +Please note that certificates created with this guide may not meet your +organization's security policies, since they are self-signed certificates with +arbitrary bit lengths, expiration dates, etc. Operators should obviously +follow their own security guidelines in creating these certificates. + +In addition to the above, it can sometimes be useful for cloud operators to log +into running amphorae to troubleshoot problems. The standard method for doing +this is to use SSH from the host running the controller worker. In order to do +this, you must create an SSH public/private key pair specific to your cloud +(for obvious security reasons). You must add this keypair to nova. You must +then also update octavia.conf with the keypair name you used when adding it to +nova so that amphorae are initialized with it on boot. + +See the Troubleshooting Tips section below for an example of how an operator +can SSH into an amphora. + +You must: + +* Create TLS certificates for communicating with the amphorae. +* Create SSH keys for communicating with the amphorae. +* Add the SSH keypair to nova. + + +Configuring Octavia +___________________ +Going into all of the specifics of how Octavia can be configured is actually +beyond the scope of this document. For full documentation of this, please see +the configuration reference: :doc:`../../configuration/configref` + +A configuration template can be found in ``etc/octavia.conf`` in this +repository. + +It's also important to note that this configuration file will need to be +updated with UUIDs of the LB network, amphora security group, amphora image +tag, SSH key path, TLS certificate path, database credentials, etc. + +At a minimum, the configuration should specify the following, beyond the +defaults. Your specific environment may require more than this: + ++-----------------------+-------------------------------+ +| Section | Configuration parameter | ++=======================+===============================+ +| DEFAULT | transport_url | ++-----------------------+-------------------------------+ +| database | connection | ++-----------------------+-------------------------------+ +| certificates | ca_certificate | ++-----------------------+-------------------------------+ +| certificates | ca_private_key | ++-----------------------+-------------------------------+ +| certificates | ca_private_key_passphrase | ++-----------------------+-------------------------------+ +| controller_worker | amp_boot_network_list | ++-----------------------+-------------------------------+ +| controller_worker | amp_flavor_id | ++-----------------------+-------------------------------+ +| controller_worker | amp_image_owner_id | ++-----------------------+-------------------------------+ +| controller_worker | amp_image_tag | ++-----------------------+-------------------------------+ +| controller_worker | amp_secgroup_list | ++-----------------------+-------------------------------+ +| controller_worker | amp_ssh_key_name [#]_ | ++-----------------------+-------------------------------+ +| controller_worker | amphora_driver | ++-----------------------+-------------------------------+ +| controller_worker | compute_driver | ++-----------------------+-------------------------------+ +| controller_worker | loadbalancer_topology | ++-----------------------+-------------------------------+ +| controller_worker | network_driver | ++-----------------------+-------------------------------+ +| haproxy_amphora | client_cert | ++-----------------------+-------------------------------+ +| haproxy_amphora | server_ca | ++-----------------------+-------------------------------+ +| health_manager | bind_ip | ++-----------------------+-------------------------------+ +| health_manager | controller_ip_port_list | ++-----------------------+-------------------------------+ +| health_manager | heartbeat_key | ++-----------------------+-------------------------------+ +| keystone_authtoken | admin_password | ++-----------------------+-------------------------------+ +| keystone_authtoken | admin_tenant_name | ++-----------------------+-------------------------------+ +| keystone_authtoken | admin_user | ++-----------------------+-------------------------------+ +| keystone_authtoken | www_authenticate_uri | ++-----------------------+-------------------------------+ +| keystone_authtoken | auth_version | ++-----------------------+-------------------------------+ +| oslo_messaging | topic | ++-----------------------+-------------------------------+ +| oslo_messaging_rabbit | rabbit_host | ++-----------------------+-------------------------------+ +| oslo_messaging_rabbit | rabbit_userid | ++-----------------------+-------------------------------+ +| oslo_messaging_rabbit | rabbit_password | ++-----------------------+-------------------------------+ + +.. [#] This is technically optional, but extremely useful for troubleshooting. + +You must: + +* Create or update ``/etc/octavia/octavia.conf`` appropriately. + + +Initialize Octavia Database +___________________________ +This is controlled through alembic migrations under the octavia/db directory in +this repository. A tool has been created to aid in the initialization of the +octavia database. This should be available under +``/usr/local/bin/octavia-db-manage`` on the host on which the octavia +controller worker is installed. Note that this tool looks at the +``/etc/octavia/octavia.conf`` file for its database credentials, so +initializing the database must happen after Octavia is configured. + +It's also important to note here that all of the components of the Octavia +controller will need direct access to the database (including the API handler), +so you must ensure these components are able to communicate with whichever host +is housing your database. + +You must: + +* Create database credentials for Octavia. +* Add these to the ``/etc/octavia/octavia.conf`` file. +* Run ``/usr/local/bin/octavia-db-manage upgrade head`` on the controller + worker host to initialize the octavia database. + + +Launching the Octavia Controller +________________________________ +We recommend using upstart / systemd scripts to ensure the components of the +Octavia controller are all started and kept running. It of course doesn't hurt +to first start by running these manually to ensure configuration and +communication is working between all the components. + +You must: + +* Make sure each Octavia controller component is started appropriately. + + +Install Octavia extension in Horizon +_____________________________________________ +This isn't strictly necessary for all cloud installations, however, if yours +makes use of the Horizon GUI interface for tenants, it is probably also a good +idea to make sure that it is configured with the Octavia extension. + +You may: + +* Install the octavia GUI extension in Horizon + + +Test deployment +_______________ +If all of the above instructions have been followed, it should now be possible +to deploy load balancing services using the OpenStack CLI, +communicating with the Octavia v2 API. + +Example: + +:: + + # openstack loadbalancer create --name lb1 --vip-subnet-id private-subnet + # openstack loadbalancer show lb1 + # openstack loadbalancer listener create --name listener1 --protocol HTTP --protocol-port 80 lb1 + +Upon executing the above, log files should indicate that an amphora is deployed +to house the load balancer, and that this load balancer is further modified to +include a listener. The amphora should be visible to the octavia or admin +tenant using the ``openstack server list`` command, and the listener should +respond on the load balancer's IP on port 80 (with an error 503 in this case, +since no pool or members have been defined yet—but this is usually enough to +see that the Octavia load balancing system is working). For more information +on configuring load balancing services as a tenant, please see the end-user +quick-start guide and cookbook. + + +Troubleshooting Tips +==================== +The troubleshooting hints in this section are meant primarily for developers +or operators troubleshooting underlying Octavia components, rather than +end-users or tenants troubleshooting the load balancing service itself. + + +SSH into Amphorae +----------------- +If you are using the reference amphora image, it may be helpful to log into +running amphorae when troubleshooting service problems. To do this, first +discover the ``lb_network_ip`` address of the amphora you would like to SSH +into by looking in the ``amphora`` table in the octavia database. Then from the +host housing the controller worker, run: + +:: + + ssh -i /etc/octavia/.ssh/octavia_ssh_key ubuntu@[lb_network_ip] diff --git a/doc/source/contributor/guides/providers.rst b/doc/source/contributor/guides/providers.rst new file mode 100644 index 0000000000..76951856a3 --- /dev/null +++ b/doc/source/contributor/guides/providers.rst @@ -0,0 +1,2244 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +================================= +Provider Driver Development Guide +================================= +This document is intended as a guide for developers creating provider drivers +for the Octavia API. This guide is intended to be an up to date version of the +`provider driver specification`_ previously approved. + +.. _provider driver specification: ../specs/version1.1/enable-provider-driver.html + +How Provider Drivers Integrate +============================== +Available drivers will be enabled by entries in the Octavia configuration file. +Drivers will be loaded via stevedore and Octavia will communicate with drivers +through a standard class interface defined below. Most driver functions will be +asynchronous to Octavia, and Octavia will provide a library of functions +that give drivers a way to update status and statistics. Functions that are +synchronous are noted below. + +Octavia API functions not listed here will continue to be handled by the +Octavia API and will not call into the driver. Examples would be show, list, +and quota requests. + +In addition, drivers may provide a provider agent that the Octavia driver-agent +will launch at start up. This is a long-running process that is intended to +support the provider driver. + +Driver Entry Points +------------------- + +Provider drivers will be loaded via +`stevedore `_. Drivers will +have an entry point defined in their setup tools configuration using the +Octavia driver namespace "octavia.api.drivers". This entry point name will +be used to enable the driver in the Octavia configuration file and as the +"provider" parameter users specify when creating a load balancer. An example +for the octavia reference driver would be: + +.. code-block:: python + + amphora = octavia.api.drivers.amphora_driver.driver:AmphoraProviderDriver + +In addition, provider drivers may provide a provider agent also defined by a +setup tools entry point. The provider agent namespace is +"octavia.driver_agent.provider_agents". This will be called once, at Octavia +driver-agent start up, to launch a long-running process. Provider agents must +be enabled in the Octavia configuration file. An example provider agent +entry point would be: + +.. code-block:: python + + amphora_agent = octavia.api.drivers.amphora_driver.agent:AmphoraProviderAgent + + +Stable Provider Driver Interface +================================ + +Provider drivers should only access the following Octavia APIs. All other +Octavia APIs are not considered stable or safe for provider driver use and +may change at any time. + +* octavia_lib.api.drivers.data_models +* octavia_lib.api.drivers.driver_lib +* octavia_lib.api.drivers.exceptions +* octavia_lib.api.drivers.provider_base +* octavia_lib.common.constants + +Octavia Provider Driver API +=========================== + +Provider drivers will be expected to support the full interface described +by the Octavia API, currently v2.0. If a driver does not implement an API +function, drivers should fail a request by raising a ``NotImplementedError`` +exception. If a driver implements a function but does not support a particular +option passed in by the caller, the driver should raise an +``UnsupportedOptionError``. + +It is recommended that drivers use the +`jsonschema `_ package or +`voluptuous `_ to validate the +request against the current driver capabilities. + +See the `Exception Model`_ below for more details. + +.. note:: Driver developers should refer to the official + `Octavia API reference`_ document for details of the fields and + expected outcome of these calls. + +.. _Octavia API reference: https://docs.openstack.org/api-ref/load-balancer/v2/index.html + +Load balancer +------------- + +Create +^^^^^^ + +Creates a load balancer. + +Octavia will pass in the load balancer object with all requested settings. + +The load balancer will be in the ``PENDING_CREATE`` provisioning_status and +``OFFLINE`` operating_status when it is passed to the driver. The driver +will be responsible for updating the provisioning status of the load +balancer to either ``ACTIVE`` if successfully created, or ``ERROR`` if not +created. + +The Octavia API will accept and do basic API validation of the create +request from the user. The load balancer python object representing the +request body will be passed to the driver create method as it was received +and validated with the following exceptions: + +1. The provider will be removed as this is used for driver selection. +2. The flavor will be expanded from the provided ID to be the full + dictionary representing the flavor metadata. + +**Load balancer object** + +As of the writing of this specification the create load balancer object may +contain the following: + ++-------------------+--------+-----------------------------------------------+ +| Name | Type | Description | ++===================+========+===============================================+ +| admin_state_up | bool | Admin state: True if up, False if down. | ++-------------------+--------+-----------------------------------------------+ +| description | string | A human-readable description for the resource.| ++-------------------+--------+-----------------------------------------------+ +| flavor | dict | The flavor keys and values. | ++-------------------+--------+-----------------------------------------------+ +| availability_zone | dict | The availability zone keys and values. | ++-------------------+--------+-----------------------------------------------+ +| listeners | list | A list of `Listener objects`_. | ++-------------------+--------+-----------------------------------------------+ +| loadbalancer_id | string | ID of load balancer to create. | ++-------------------+--------+-----------------------------------------------+ +| name | string | Human-readable name of the resource. | ++-------------------+--------+-----------------------------------------------+ +| pools | list | A list of `Pool object`_. | ++-------------------+--------+-----------------------------------------------+ +| project_id | string | ID of the project owning this resource. | ++-------------------+--------+-----------------------------------------------+ +| vip_address | string | The IP address of the Virtual IP (VIP). | ++-------------------+--------+-----------------------------------------------+ +| vip_network_id | string | The ID of the network for the VIP. | ++-------------------+--------+-----------------------------------------------+ +| vip_port_id | string | The ID of the VIP port. | ++-------------------+--------+-----------------------------------------------+ +| vip_qos_policy_id | string | The ID of the qos policy for the VIP. | ++-------------------+--------+-----------------------------------------------+ +| vip_subnet_id | string | The ID of the subnet for the VIP. | ++-------------------+--------+-----------------------------------------------+ +| vip_sg_ids | list | The list of Neutron Security Group IDs of the | +| | | VIP port (optional) | ++-------------------+--------+-----------------------------------------------+ + +The driver is expected to validate that the driver supports the request +and raise an exception if the request cannot be accepted. + +**VIP port creation** + +Some provider drivers will want to create the Neutron port for the VIP, and +others will want Octavia to create the port instead. In order to support both +use cases, the create_vip_port() method will ask provider drivers to create +a VIP port. If the driver expects Octavia to create the port, the driver +will raise a NotImplementedError exception. Octavia will call this function +before calling loadbalancer_create() in order to determine if it should +create the VIP port. Octavia will call create_vip_port() with a loadbalancer +ID and a partially defined VIP dictionary. Provider drivers that support +port creation will create the port and return a fully populated VIP +dictionary. + +**VIP dictionary** + ++-----------------+--------+-----------------------------------------------+ +| Name | Type | Description | ++=================+========+===============================================+ +| project_id | string | ID of the project owning this resource. | ++-----------------+--------+-----------------------------------------------+ +| vip_address | string | The IP address of the Virtual IP (VIP). | ++-----------------+--------+-----------------------------------------------+ +| vip_network_id | string | The ID of the network for the VIP. | ++-----------------+--------+-----------------------------------------------+ +| vip_port_id | string | The ID of the VIP port. | ++-----------------+--------+-----------------------------------------------+ +|vip_qos_policy_id| string | The ID of the qos policy for the VIP. | ++-----------------+--------+-----------------------------------------------+ +| vip_subnet_id | string | The ID of the subnet for the VIP. | ++-----------------+--------+-----------------------------------------------+ +| vip_sg_ids | list | The list of Neutron Security Group IDs of the | +| | | VIP port (optional) | ++-----------------+--------+-----------------------------------------------+ + +**Creating a Fully Populated Load Balancer** + +If the "listener" option is specified, the provider driver will iterate +through the list and create all of the child objects in addition to +creating the load balancer instance. + +Delete +^^^^^^ + +Removes an existing load balancer. + +Octavia will pass in the load balancer object and cascade boolean as +parameters. + +The load balancer will be in the ``PENDING_DELETE`` provisioning_status when +it is passed to the driver. The driver will notify Octavia that the delete +was successful by setting the provisioning_status to ``DELETED``. If the +delete failed, the driver will update the provisioning_status to ``ERROR``. + +The API includes an option for cascade delete. When cascade is set to +True, the provider driver will delete all child objects of the load balancer. + +Failover +^^^^^^^^ + +Performs a failover of a load balancer. + +Octavia will pass in the load balancer ID as a parameter. + +The load balancer will be in the ``PENDING_UPDATE`` provisioning_status when +it is passed to the driver. The driver will update the provisioning_status +of the load balancer to either ``ACTIVE`` if successfully failed over, or +``ERROR`` if not failed over. + +Failover can mean different things in the context of a provider driver. For +example, the Octavia driver replaces the current amphora(s) with another +amphora. For another provider driver, failover may mean failing over from +an active system to a standby system. + +Update +^^^^^^ + +Modifies an existing load balancer using the values supplied in the load +balancer object. + +Octavia will pass in the original load balancer object which is the baseline +for the update, and a load balancer object with the fields to be updated. +Fields not updated by the user will contain "Unset" as defined in the data +model. + +As of the writing of this specification the update load balancer object may +contain the following: + ++-----------------+--------+-----------------------------------------------+ +| Name | Type | Description | ++=================+========+===============================================+ +| admin_state_up | bool | Admin state: True if up, False if down. | ++-----------------+--------+-----------------------------------------------+ +| description | string | A human-readable description for the resource.| ++-----------------+--------+-----------------------------------------------+ +| loadbalancer_id | string | ID of load balancer to update. | ++-----------------+--------+-----------------------------------------------+ +| name | string | Human-readable name of the resource. | ++-----------------+--------+-----------------------------------------------+ +|vip_qos_policy_id| string | The ID of the qos policy for the VIP. | ++-----------------+--------+-----------------------------------------------+ + +The load balancer will be in the ``PENDING_UPDATE`` provisioning_status when +it is passed to the driver. The driver will update the provisioning_status +of the load balancer to either ``ACTIVE`` if successfully updated, or +``ERROR`` if the update was not successful. + +The driver is expected to validate that the driver supports the request. +The method will then return or raise an exception if the request cannot be +accepted. + +**Abstract class definition** + +.. code-block:: python + + class Driver(object): + + def create_vip_port(self, loadbalancer_id, vip_dictionary): + """Creates a port for a load balancer VIP. + + If the driver supports creating VIP ports, the driver will create a + VIP port and return the vip_dictionary populated with the vip_port_id. + If the driver does not support port creation, the driver will raise + a NotImplementedError. + + :param: loadbalancer_id (string): ID of loadbalancer. + :param: vip_dictionary (dict): The VIP dictionary. + :returns: VIP dictionary with vip_port_id. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: The driver does not support creating + VIP ports. + """ + raise NotImplementedError() + + def loadbalancer_create(self, loadbalancer): + """Creates a new load balancer. + + :param loadbalancer (object): The load balancer object. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: The driver does not support create. + :raises UnsupportedOptionError: The driver does not + support one of the configuration options. + """ + raise NotImplementedError() + + def loadbalancer_delete(self, loadbalancer, cascade=False): + """Deletes a load balancer. + + :param loadbalancer (object): The load balancer object. + :param cascade (bool): If True, deletes all child objects (listeners, + pools, etc.) in addition to the load balancer. + :return: Nothing if the delete request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + """ + raise NotImplementedError() + + def loadbalancer_failover(self, loadbalancer_id): + """Performs a fail over of a load balancer. + + :param loadbalancer_id (string): ID of the load balancer to failover. + :return: Nothing if the failover request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises: NotImplementedError if driver does not support request. + """ + raise NotImplementedError() + + def loadbalancer_update(self, old_loadbalancer, new_loadbalancer): + """Updates a load balancer. + + :param old_loadbalancer (object): The baseline load balancer object. + :param new_loadbalancer (object): The updated load balancer object. + :return: Nothing if the update request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: The driver does not support request. + :raises UnsupportedOptionError: The driver does not + support one of the configuration options. + """ + raise NotImplementedError() + +Listener +-------- + +Create +^^^^^^ + +Creates a listener for a load balancer. + +Octavia will pass in the listener object with all requested settings. + +The listener will be in the ``PENDING_CREATE`` provisioning_status and +``OFFLINE`` operating_status when it is passed to the driver. The driver +will be responsible for updating the provisioning status of the listener +to either ``ACTIVE`` if successfully created, or ``ERROR`` if not created. + +The Octavia API will accept and do basic API validation of the create +request from the user. The listener python object representing the +request body will be passed to the driver create method as it was received +and validated with the following exceptions: + +1. The project_id will be removed, if present, as this field is now +deprecated. The listener will inherit the project_id from the parent +load balancer. +2. The default_tls_container_ref will be expanded and provided to the driver +in PEM format. +3. The sni_container_refs will be expanded and provided to the driver in +PEM format. + +.. _Listener objects: + +**Listener object** + +As of the writing of this specification the create listener object may +contain the following: + ++------------------------------+--------+-------------------------------------+ +| Name | Type | Description | ++==============================+========+=====================================+ +| admin_state_up | bool | Admin state: True if up, False if | +| | | down. | ++------------------------------+--------+-------------------------------------+ +| client_authentication | string | The TLS client authentication mode. | +| | | One of the options ``NONE``, | +| | | ``OPTIONAL`` or ``MANDATORY``. | ++------------------------------+--------+-------------------------------------+ +| client_ca_tls_container_data | string | A PEM encoded certificate. | ++------------------------------+--------+-------------------------------------+ +| client_ca_tls_container_ref | string | The reference to the secrets | +| | | container. | ++------------------------------+--------+-------------------------------------+ +| client_crl_container_data | string | A PEM encoded CRL file. | ++------------------------------+--------+-------------------------------------+ +| client_crl_container_ref | string | The reference to the secrets | +| | | container. | ++------------------------------+--------+-------------------------------------+ +| connection_limit | int | The max number of connections | +| | | permitted for this listener. Default| +| | | is -1, which is infinite | +| | | connections. | ++------------------------------+--------+-------------------------------------+ +| default_pool | object | A `Pool object`_. | ++------------------------------+--------+-------------------------------------+ +| default_pool_id | string | The ID of the pool used by the | +| | | listener if no L7 policies match. | ++------------------------------+--------+-------------------------------------+ +| default_tls_container_data | dict | A `TLS container`_ dict. | ++------------------------------+--------+-------------------------------------+ +| default_tls_container_refs | string | The reference to the secrets | +| | | container. | ++------------------------------+--------+-------------------------------------+ +| description | string | A human-readable description for the| +| | | listener. | ++------------------------------+--------+-------------------------------------+ +| insert_headers | dict | A dictionary of optional headers to | +| | | insert into the request before it is| +| | | sent to the backend member. See | +| | | `Supported HTTP Header Insertions`_.| +| | | Keys and values are specified as | +| | | strings. | ++------------------------------+--------+-------------------------------------+ +| l7policies | list | A list of `L7policy objects`_. | ++------------------------------+--------+-------------------------------------+ +| listener_id | string | ID of listener to create. | ++------------------------------+--------+-------------------------------------+ +| loadbalancer_id | string | ID of load balancer. | ++------------------------------+--------+-------------------------------------+ +| name | string | Human-readable name of the listener.| ++------------------------------+--------+-------------------------------------+ +| project_id | string | ID of the project owning this | +| | | resource. | ++------------------------------+--------+-------------------------------------+ +| protocol | string | Protocol type: One of HTTP, HTTPS, | +| | | TCP, or TERMINATED_HTTPS. | ++------------------------------+--------+-------------------------------------+ +| protocol_port | int | Protocol port number. | ++------------------------------+--------+-------------------------------------+ +| sni_container_data | list | A list of `TLS container`_ dict. | ++------------------------------+--------+-------------------------------------+ +| sni_container_refs | list | A list of references to the SNI | +| | | secrets containers. | ++------------------------------+--------+-------------------------------------+ +| timeout_client_data | int | Frontend client inactivity timeout | +| | | in milliseconds. | ++------------------------------+--------+-------------------------------------+ +| timeout_member_connect | int | Backend member connection timeout in| +| | | milliseconds. | ++------------------------------+--------+-------------------------------------+ +| timeout_member_data | int | Backend member inactivity timeout in| +| | | milliseconds. | ++------------------------------+--------+-------------------------------------+ +| timeout_tcp_inspect | int | Time, in milliseconds, to wait for | +| | | additional TCP packets for content | +| | | inspection. | ++------------------------------+--------+-------------------------------------+ +| allowed_cidrs | list | List of IPv4 or IPv6 CIDRs. | ++------------------------------+--------+-------------------------------------+ + +.. _TLS container: + +As of the writing of this specification the TLS container dictionary +contains the following: + ++---------------+--------+------------------------------------------------+ +| Key | Type | Description | ++===============+========+================================================+ +| certificate | string | The PEM encoded certificate. | ++---------------+--------+------------------------------------------------+ +| intermediates | List | A list of intermediate PEM certificates. | ++---------------+--------+------------------------------------------------+ +| passphrase | string | The private_key passphrase. | ++---------------+--------+------------------------------------------------+ +| primary_cn | string | The primary common name of the certificate. | ++---------------+--------+------------------------------------------------+ +| private_key | string | The PEM encoded private key. | ++---------------+--------+------------------------------------------------+ + +.. _Supported HTTP Header Insertions: + +As of the writing of this specification the Supported HTTP Header Insertions +are: + ++-----------------------+--------+--------------------------------------------+ +| Key | Type | Description | ++=======================+========+============================================+ +| X-Forwarded-For | bool | When True a X-Forwarded-For header is | +| | | inserted into the request to the backend | +| | | member that specifies the client IP | +| | | address. | ++-----------------------+--------+--------------------------------------------+ +| X-Forwarded-Port | int | A X-Forwarded-Port header is inserted into | +| | | the request to the backend member that | +| | | specifies the integer provided. Typically | +| | | this is used to indicate the port the | +| | | client connected to on the load balancer. | ++-----------------------+--------+--------------------------------------------+ +| X-Forwarded-Proto | bool | A X-Forwarded-Proto header is inserted into| +| | | the end of request to the backend member. | +| | | HTTP for the HTTP listener protocol type, | +| | | HTTPS for the TERMINATED_HTTPS listener | +| | | protocol type. | ++-----------------------+--------+--------------------------------------------+ +| X-SSL-Client-Verify | string | When "``true``" a ``X-SSL-Client-Verify`` | +| | | header is inserted into the request to the | +| | | backend ``member`` that contains 0 if the | +| | | client authentication was successful, or an| +| | | result error number greater than 0 that | +| | | align to the openssl verify error codes. | ++-----------------------+--------+--------------------------------------------+ +| X-SSL-Client-Has-Cert | string | When "``true``" a ``X-SSL-Client-Has-Cert``| +| | | header is inserted into the request to the | +| | | backend ``member`` that is ''true'' if a | +| | | client authentication certificate was | +| | | presented, and ''false'' if not. Does not | +| | | indicate validity. | ++-----------------------+--------+--------------------------------------------+ +| X-SSL-Client-DN | string | When "``true``" a ``X-SSL-Client-DN`` | +| | | header is inserted into the request to the | +| | | backend ``member`` that contains the full | +| | | Distinguished Name of the certificate | +| | | presented by the client. | ++-----------------------+--------+--------------------------------------------+ +| X-SSL-Client-CN | string | When "``true``" a ``X-SSL-Client-CN`` | +| | | header is inserted into the request to the | +| | | backend ``member`` that contains the Common| +| | | Name from the full Distinguished Name of | +| | | the certificate presented by the client. | ++-----------------------+--------+--------------------------------------------+ +| X-SSL-Issuer | string | When "``true``" a ``X-SSL-Issuer`` header | +| | | is inserted into the request to the backend| +| | | ``member`` that contains the full | +| | | Distinguished Name of the client | +| | | certificate issuer. | ++-----------------------+--------+--------------------------------------------+ +| X-SSL-Client-SHA1 | string | When "``true``" a ``X-SSL-Client-SHA1`` | +| | | header is inserted into the request to the | +| | | backend ``member`` that contains the SHA-1 | +| | | fingerprint of the certificate presented by| +| | | the client in hex string format. | ++-----------------------+--------+--------------------------------------------+ +|X-SSL-Client-Not-Before| string | When "``true``" a | +| | | ``X-SSL-Client-Not-Before`` | +| | | header is inserted into the request to the | +| | | backend ``member`` that contains the start | +| | | date presented by the client as a formatted| +| | | string YYMMDDhhmmss[Z]. | ++-----------------------+--------+--------------------------------------------+ +|X-SSL-Client-Not-After | string | When "``true``" a | +| | | ``X-SSL-Client-Not-After`` header is | +| | | inserted into the request to the | +| | | backend ``member`` that contains the end | +| | | date presented by the client as a formatted| +| | | string YYMMDDhhmmss[Z]. | ++-----------------------+--------+--------------------------------------------+ + +**Creating a Fully Populated Listener** + +If the "default_pool" or "l7policies" option is specified, the provider +driver will create all of the child objects in addition to creating the +listener instance. + +Delete +^^^^^^ + +Deletes an existing listener. + +Octavia will pass the listener object as a parameter. + +The listener will be in the ``PENDING_DELETE`` provisioning_status when +it is passed to the driver. The driver will notify Octavia that the delete +was successful by setting the provisioning_status to ``DELETED``. If the +delete failed, the driver will update the provisioning_status to ``ERROR``. + +Update +^^^^^^ + +Modifies an existing listener using the values supplied in the listener +object. + +Octavia will pass in the original listener object which is the baseline for the +update, and a listener object with the fields to be updated. +Fields not updated by the user will contain "Unset" as defined in the data +model. + +As of the writing of this specification the update listener object may +contain the following: + ++----------------------------+--------+-------------------------------------+ +| Name | Type | Description | ++============================+========+=====================================+ +| admin_state_up | bool | Admin state: True if up, False if | +| | | down. | ++----------------------------+--------+-------------------------------------+ +| client_authentication | string | The TLS client authentication mode. | +| | | One of the options ``NONE``, | +| | | ``OPTIONAL`` or ``MANDATORY``. | ++----------------------------+--------+-------------------------------------+ +|client_ca_tls_container_data| string | A PEM encoded certificate. | ++----------------------------+--------+-------------------------------------+ +| client_ca_tls_container_ref| string | The reference to the secrets | +| | | container. | ++----------------------------+--------+-------------------------------------+ +| client_crl_container_data | string | A PEM encoded CRL file. | ++----------------------------+--------+-------------------------------------+ +| client_crl_container_ref | string | The reference to the secrets | +| | | container. | ++----------------------------+--------+-------------------------------------+ +| connection_limit | int | The max number of connections | +| | | permitted for this listener. Default| +| | | is -1, which is infinite | +| | | connections. | ++----------------------------+--------+-------------------------------------+ +| default_pool_id | string | The ID of the pool used by the | +| | | listener if no L7 policies match. | ++----------------------------+--------+-------------------------------------+ +| default_tls_container_data | dict | A `TLS container`_ dict. | ++----------------------------+--------+-------------------------------------+ +| default_tls_container_refs | string | The reference to the secrets | +| | | container. | ++----------------------------+--------+-------------------------------------+ +| description | string | A human-readable description for | +| | | the listener. | ++----------------------------+--------+-------------------------------------+ +| insert_headers | dict | A dictionary of optional headers to | +| | | insert into the request before it is| +| | | sent to the backend member. See | +| | | `Supported HTTP Header Insertions`_.| +| | | Keys and values are specified as | +| | | strings. | ++----------------------------+--------+-------------------------------------+ +| listener_id | string | ID of listener to update. | ++----------------------------+--------+-------------------------------------+ +| name | string | Human-readable name of the listener.| ++----------------------------+--------+-------------------------------------+ +| sni_container_data | list | A list of `TLS container`_ dict. | ++----------------------------+--------+-------------------------------------+ +| sni_container_refs | list | A list of references to the SNI | +| | | secrets containers. | ++----------------------------+--------+-------------------------------------+ +| timeout_client_data | int | Frontend client inactivity timeout | +| | | in milliseconds. | ++----------------------------+--------+-------------------------------------+ +| timeout_member_connect | int | Backend member connection timeout in| +| | | milliseconds. | ++----------------------------+--------+-------------------------------------+ +| timeout_member_data | int | Backend member inactivity timeout in| +| | | milliseconds. | ++----------------------------+--------+-------------------------------------+ +| timeout_tcp_inspect | int | Time, in milliseconds, to wait for | +| | | additional TCP packets for content | +| | | inspection. | ++----------------------------+--------+-------------------------------------+ +| allowed_cidrs | list | List of IPv4 or IPv6 CIDRs. | ++----------------------------+--------+-------------------------------------+ + +The listener will be in the ``PENDING_UPDATE`` provisioning_status when +it is passed to the driver. The driver will update the provisioning_status +of the listener to either ``ACTIVE`` if successfully updated, or ``ERROR`` +if the update was not successful. + +The driver is expected to validate that the driver supports the request. +The method will then return or raise an exception if the request cannot be +accepted. + +**Abstract class definition** + +.. code-block:: python + + class Driver(object): + def listener_create(self, listener): + """Creates a new listener. + + :param listener (object): The listener object. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + + def listener_delete(self, listener): + """Deletes a listener. + + :param listener (object): The listener object. + :return: Nothing if the delete request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + """ + raise NotImplementedError() + + def listener_update(self, old_listener, new_listener): + """Updates a listener. + + :param old_listener (object): The baseline listener object. + :param new_listener (object): The updated listener object. + :return: Nothing if the update request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + +Pool +---- + +Create +^^^^^^ + +Creates a pool for a load balancer. + +Octavia will pass in the pool object with all requested settings. + +The pool will be in the ``PENDING_CREATE`` provisioning_status and +``OFFLINE`` operating_status when it is passed to the driver. The driver +will be responsible for updating the provisioning status of the pool +to either ``ACTIVE`` if successfully created, or ``ERROR`` if not created. + +The Octavia API will accept and do basic API validation of the create +request from the user. The pool python object representing the request +body will be passed to the driver create method as it was received and +validated with the following exceptions: + +1. The project_id will be removed, if present, as this field is now + deprecated. The listener will inherit the project_id from the parent + load balancer. + +.. _Pool object: + +**Pool object** + +As of the writing of this specification the create pool object may +contain the following: + ++-----------------------+--------+------------------------------------------+ +| Name | Type | Description | ++=======================+========+==========================================+ +| admin_state_up | bool | Admin state: True if up, False if down. | ++-----------------------+--------+------------------------------------------+ +| ca_tls_container_data | string | A PEM encoded certificate. | ++-----------------------+--------+------------------------------------------+ +| ca_tls_container_ref | string | The reference to the secrets | +| | | container. | ++-----------------------+--------+------------------------------------------+ +| crl_container_data | string | A PEM encoded CRL file. | ++-----------------------+--------+------------------------------------------+ +| crl_container_ref | string | The reference to the secrets | +| | | container. | ++-----------------------+--------+------------------------------------------+ +| description | string | A human-readable description for the | +| | | pool. | ++-----------------------+--------+------------------------------------------+ +| healthmonitor | object | A `Healthmonitor object`_. | ++-----------------------+--------+------------------------------------------+ +| lb_algorithm | string | Load balancing algorithm: One of | +| | | ROUND_ROBIN, LEAST_CONNECTIONS, | +| | | SOURCE_IP or SOURCE_IP_PORT. | ++-----------------------+--------+------------------------------------------+ +| loadbalancer_id | string | ID of load balancer. | ++-----------------------+--------+------------------------------------------+ +| listener_id | string | ID of listener. | ++-----------------------+--------+------------------------------------------+ +| members | list | A list of `Member objects`_. | ++-----------------------+--------+------------------------------------------+ +| name | string | Human-readable name of the pool. | ++-----------------------+--------+------------------------------------------+ +| pool_id | string | ID of pool to create. | ++-----------------------+--------+------------------------------------------+ +| project_id | string | ID of the project owning this resource. | ++-----------------------+--------+------------------------------------------+ +| protocol | string | Protocol type: One of HTTP, HTTPS, | +| | | PROXY, or TCP. | ++-----------------------+--------+------------------------------------------+ +| session_persistence | dict | Defines session persistence as one of | +| | | {'type': <'HTTP_COOKIE' | 'SOURCE_IP'>} | +| | | OR | +| | | {'type': 'APP_COOKIE', | +| | | 'cookie_name': } | ++-----------------------+--------+------------------------------------------+ +| tls_container_data | dict | A `TLS container`_ dict. | ++-----------------------+--------+------------------------------------------+ +| tls_container_ref | string | The reference to the secrets | +| | | container. | ++-----------------------+--------+------------------------------------------+ +| tls_enabled | bool | True when backend re-encryption is | +| | | enabled. | ++-----------------------+--------+------------------------------------------+ + +Delete +^^^^^^ + +Removes an existing pool and all of its members. + +Octavia will pass the pool object as a parameter. + +The pool will be in the ``PENDING_DELETE`` provisioning_status when +it is passed to the driver. The driver will notify Octavia that the delete +was successful by setting the provisioning_status to ``DELETED``. If the +delete failed, the driver will update the provisioning_status to ``ERROR``. + +Update +^^^^^^ + +Modifies an existing pool using the values supplied in the pool object. + +Octavia will pass in the original pool object which is the baseline for the +update, and a pool object with the fields to be updated. +Fields not updated by the user will contain "Unset" as defined in the data +model. + +As of the writing of this specification the update pool object may +contain the following: + ++-----------------------+--------+------------------------------------------+ +| Name | Type | Description | ++=======================+========+==========================================+ +| admin_state_up | bool | Admin state: True if up, False if down. | ++-----------------------+--------+------------------------------------------+ +| ca_tls_container_data | string | A PEM encoded certificate. | ++-----------------------+--------+------------------------------------------+ +| ca_tls_container_ref | string | The reference to the secrets | +| | | container. | ++-----------------------+--------+------------------------------------------+ +| crl_container_data | string | A PEM encoded CRL file. | ++-----------------------+--------+------------------------------------------+ +| crl_container_ref | string | The reference to the secrets | +| | | container. | ++-----------------------+--------+------------------------------------------+ +| description | string | A human-readable description for the | +| | | pool. | ++-----------------------+--------+------------------------------------------+ +| lb_algorithm | string | Load balancing algorithm: One of | +| | | ROUND_ROBIN, LEAST_CONNECTIONS, or | +| | | SOURCE_IP. | ++-----------------------+--------+------------------------------------------+ +| name | string | Human-readable name of the pool. | ++-----------------------+--------+------------------------------------------+ +| pool_id | string | ID of pool to update. | ++-----------------------+--------+------------------------------------------+ +| session_persistence | dict | Defines session persistence as one of | +| | | {'type': <'HTTP_COOKIE' | 'SOURCE_IP'>} | +| | | OR | +| | | {'type': 'APP_COOKIE', | +| | | 'cookie_name': } | ++-----------------------+--------+------------------------------------------+ +| tls_container_data | dict | A `TLS container`_ dict. | ++-----------------------+--------+------------------------------------------+ +| tls_container_ref | string | The reference to the secrets | +| | | container. | ++-----------------------+--------+------------------------------------------+ +| tls_enabled | bool | True when backend re-encryption is | +| | | enabled. | ++-----------------------+--------+------------------------------------------+ + +The pool will be in the ``PENDING_UPDATE`` provisioning_status when it is +passed to the driver. The driver will update the provisioning_status of the +pool to either ``ACTIVE`` if successfully updated, or ``ERROR`` if the +update was not successful. + +The driver is expected to validate that the driver supports the request. +The method will then return or raise an exception if the request cannot be +accepted. + +**Abstract class definition** + +.. code-block:: python + + class Driver(object): + def pool_create(self, pool): + """Creates a new pool. + + :param pool (object): The pool object. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + + def pool_delete(self, pool): + """Deletes a pool and its members. + + :param pool (object): The pool object. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + """ + raise NotImplementedError() + + def pool_update(self, old_pool, new_pool): + """Updates a pool. + + :param old_pool (object): The baseline pool object. + :param new_pool (object): The updated pool object. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + +Member +------ + +Create +^^^^^^ + +Creates a member for a pool. + +Octavia will pass in the member object with all requested settings. + +The member will be in the ``PENDING_CREATE`` provisioning_status and +``OFFLINE`` operating_status when it is passed to the driver. The driver +will be responsible for updating the provisioning status of the member +to either ``ACTIVE`` if successfully created, or ``ERROR`` if not created. + +The Octavia API will accept and do basic API validation of the create +request from the user. The member python object representing the +request body will be passed to the driver create method as it was received +and validated with the following exceptions: + +1. The project_id will be removed, if present, as this field is now + deprecated. The member will inherit the project_id from the parent + load balancer. + +.. _Member objects: + +**Member object** + +As of the writing of this specification the create member object may +contain the following: + ++-----------------------+--------+------------------------------------------+ +| Name | Type | Description | ++=======================+========+==========================================+ +| address | string | The IP address of the backend member to | +| | | receive traffic from the load balancer. | ++-----------------------+--------+------------------------------------------+ +| admin_state_up | bool | Admin state: True if up, False if down. | ++-----------------------+--------+------------------------------------------+ +| backup | bool | Is the member a backup? Backup members | +| | | only receive traffic when all non-backup | +| | | members are down. | ++-----------------------+--------+------------------------------------------+ +| member_id | string | ID of member to create. | ++-----------------------+--------+------------------------------------------+ +| monitor_address | string | An alternate IP address used for health | +| | | monitoring a backend member. | ++-----------------------+--------+------------------------------------------+ +| monitor_port | int | An alternate protocol port used for | +| | | health monitoring a backend member. | ++-----------------------+--------+------------------------------------------+ +| name | string | Human-readable name of the member. | ++-----------------------+--------+------------------------------------------+ +| pool_id | string | ID of pool. | ++-----------------------+--------+------------------------------------------+ +| project_id | string | ID of the project owning this resource. | ++-----------------------+--------+------------------------------------------+ +| protocol_port | int | The port on which the backend member | +| | | listens for traffic. | ++-----------------------+--------+------------------------------------------+ +| subnet_id | string | Subnet ID. | ++-----------------------+--------+------------------------------------------+ +| weight | int | The weight of a member determines the | +| | | portion of requests or connections it | +| | | services compared to the other members of| +| | | the pool. For example, a member with a | +| | | weight of 10 receives five times as many | +| | | requests as a member with a weight of 2. | +| | | A value of 0 means the member does not | +| | | receive new connections but continues to | +| | | service existing connections. A valid | +| | | value is from 0 to 256. Default is 1. | ++-----------------------+--------+------------------------------------------+ +| vnic_type | string | The member vNIC type used for the member | +| | | port. One of normal or direct. | ++-----------------------+--------+------------------------------------------+ + +..note:: The vnic_type of normal and direct are the same as those defined by + neutron ports. + +Delete +^^^^^^ + +Removes a pool member. + +Octavia will pass the member object as a parameter. + +The member will be in the ``PENDING_DELETE`` provisioning_status when +it is passed to the driver. The driver will notify Octavia that the delete +was successful by setting the provisioning_status to ``DELETED``. If the +delete failed, the driver will update the provisioning_status to ``ERROR``. + +Update +^^^^^^ + +Modifies an existing member using the values supplied in the listener object. + +Octavia will pass in the original member object which is the baseline for the +update, and a member object with the fields to be updated. +Fields not updated by the user will contain "Unset" as defined in the data +model. + +As of the writing of this specification the update member object may contain +the following: + ++-----------------------+--------+------------------------------------------+ +| Name | Type | Description | ++=======================+========+==========================================+ +| admin_state_up | bool | Admin state: True if up, False if down. | ++-----------------------+--------+------------------------------------------+ +| backup | bool | Is the member a backup? Backup members | +| | | only receive traffic when all non-backup | +| | | members are down. | ++-----------------------+--------+------------------------------------------+ +| member_id | string | ID of member to update. | ++-----------------------+--------+------------------------------------------+ +| monitor_address | string | An alternate IP address used for health | +| | | monitoring a backend member. | ++-----------------------+--------+------------------------------------------+ +| monitor_port | int | An alternate protocol port used for | +| | | health monitoring a backend member. | ++-----------------------+--------+------------------------------------------+ +| name | string | Human-readable name of the member. | ++-----------------------+--------+------------------------------------------+ +| weight | int | The weight of a member determines the | +| | | portion of requests or connections it | +| | | services compared to the other members of| +| | | the pool. For example, a member with a | +| | | weight of 10 receives five times as many | +| | | requests as a member with a weight of 2. | +| | | A value of 0 means the member does not | +| | | receive new connections but continues to | +| | | service existing connections. A valid | +| | | value is from 0 to 256. Default is 1. | ++-----------------------+--------+------------------------------------------+ + +The member will be in the ``PENDING_UPDATE`` provisioning_status when +it is passed to the driver. The driver will update the provisioning_status +of the member to either ``ACTIVE`` if successfully updated, or ``ERROR`` +if the update was not successful. + +The driver is expected to validate that the driver supports the request. +The method will then return or raise an exception if the request cannot be +accepted. + +Batch Update +^^^^^^^^^^^^ + +Set the state of members for a pool in one API call. This may include +creating new members, deleting old members, and updating existing members. +Existing members are matched based on address/port combination. + +For example, assume a pool currently has two members. These members have the +following address/port combinations: '192.0.2.15:80' and '192.0.2.16:80'. +Now assume a PUT request is made that includes members with address/port +combinations: '192.0.2.16:80' and '192.0.2.17:80'. The member '192.0.2.15:80' +will be deleted because it was not in the request. The member '192.0.2.16:80' +will be updated to match the request data for that member, because it was +matched. The member '192.0.2.17:80' will be created, because no such member +existed. + +The members will be in the ``PENDING_CREATE``, ``PENDING_UPDATE``, or +``PENDING_DELETE`` provisioning_status when it is passed to the driver. +The driver will update the provisioning_status of the members to either +``ACTIVE`` or ``DELETED`` if successfully updated, or ``ERROR`` +if the update was not successful. + +The batch update method will supply a list of `Member objects`_. +Existing members not in this list should be deleted, +existing members in the list should be updated, +and members in the list that do not already exist should be created. + +**Abstract class definition** + +.. code-block:: python + + class Driver(object): + def member_create(self, member): + """Creates a new member for a pool. + + :param member (object): The member object. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + + def member_delete(self, member): + + """Deletes a pool member. + + :param member (object): The member object. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + """ + raise NotImplementedError() + + def member_update(self, old_member, new_member): + + """Updates a pool member. + + :param old_member (object): The baseline member object. + :param new_member (object): The updated member object. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + + def member_batch_update(self, pool_id, members): + """Creates, updates, or deletes a set of pool members. + + :param pool_id (string): The id of the pool to update. + :param members (list): List of member objects. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + +Health Monitor +-------------- + +Create +^^^^^^ + +Creates a health monitor on a pool. + +Octavia will pass in the health monitor object with all requested settings. + +The health monitor will be in the ``PENDING_CREATE`` provisioning_status and +``OFFLINE`` operating_status when it is passed to the driver. The driver +will be responsible for updating the provisioning status of the health +monitor to either ``ACTIVE`` if successfully created, or ``ERROR`` if not +created. + +The Octavia API will accept and do basic API validation of the create +request from the user. The healthmonitor python object representing the +request body will be passed to the driver create method as it was received +and validated with the following exceptions: + +1. The project_id will be removed, if present, as this field is now + deprecated. The listener will inherit the project_id from the parent + load balancer. + +.. _Healthmonitor object: + +**Healthmonitor object** + ++-----------------------+--------+------------------------------------------+ +| Name | Type | Description | ++=======================+========+==========================================+ +| admin_state_up | bool | Admin state: True if up, False if down. | ++-----------------------+--------+------------------------------------------+ +| delay | int | The interval, in seconds, between health | +| | | checks. | ++-----------------------+--------+------------------------------------------+ +| domain_name | string | The domain name to be passed in the host | +| | | header for health monitor checks. | ++-----------------------+--------+------------------------------------------+ +| expected_codes | string | The expected HTTP status codes to get | +| | | from a successful health check. This may | +| | | be a single value, a list, or a range. | ++-----------------------+--------+------------------------------------------+ +| healthmonitor_id | string | ID of health monitor to create. | ++-----------------------+--------+------------------------------------------+ +| http_method | string | The HTTP method that the health monitor | +| | | uses for requests. One of CONNECT, | +| | | DELETE, GET, HEAD, OPTIONS, PATCH, POST, | +| | | PUT, or TRACE. | ++-----------------------+--------+------------------------------------------+ +| http_version | float | The HTTP version to use for health | +| | | monitor connections. One of '1.0' or | +| | | '1.1'. Defaults to '1.0'. | ++-----------------------+--------+------------------------------------------+ +| max_retries | int | The number of successful checks before | +| | | changing the operating status of the | +| | | member to ONLINE. | ++-----------------------+--------+------------------------------------------+ +| max_retries_down | int | The number of allowed check failures | +| | | before changing the operating status of | +| | | the member to ERROR. A valid value is | +| | | from 1 to 10. | ++-----------------------+--------+------------------------------------------+ +| name | string | Human-readable name of the monitor. | ++-----------------------+--------+------------------------------------------+ +| pool_id | string | The pool to monitor. | ++-----------------------+--------+------------------------------------------+ +| project_id | string | ID of the project owning this resource. | ++-----------------------+--------+------------------------------------------+ +| timeout | int | The time, in seconds, after which a | +| | | health check times out. This value must | +| | | be less than the delay value. | ++-----------------------+--------+------------------------------------------+ +| type | string | The type of health monitor. One of HTTP, | +| | | HTTPS, PING, SCTP, TCP, TLS-HELLO or | +| | | UDP-CONNECT. | ++-----------------------+--------+------------------------------------------+ +| url_path | string | The HTTP URL path of the request sent by | +| | | the monitor to test the health of a | +| | | backend member. Must be a string that | +| | | begins with a forward slash (/). | ++-----------------------+--------+------------------------------------------+ + +Delete +^^^^^^ + +Deletes an existing health monitor. + +Octavia will pass in the health monitor object as a parameter. + +The health monitor will be in the ``PENDING_DELETE`` provisioning_status +when it is passed to the driver. The driver will notify Octavia that the +delete was successful by setting the provisioning_status to ``DELETED``. +If the delete failed, the driver will update the provisioning_status to +``ERROR``. + +Update +^^^^^^ + +Modifies an existing health monitor using the values supplied in the +health monitor object. + +Octavia will pass in the original health monitor object which is the baseline +for the update, and a health monitor object with the fields to be updated. +Fields not updated by the user will contain "Unset" as defined in the data +model. + +As of the writing of this specification the update health monitor object may +contain the following: + ++-----------------------+--------+------------------------------------------+ +| Name | Type | Description | ++=======================+========+==========================================+ +| admin_state_up | bool | Admin state: True if up, False if down. | ++-----------------------+--------+------------------------------------------+ +| delay | int | The interval, in seconds, between health | +| | | checks. | ++-----------------------+--------+------------------------------------------+ +| domain_name | string | The domain name to be passed in the host | +| | | header for health monitor checks. | ++-----------------------+--------+------------------------------------------+ +| expected_codes | string | The expected HTTP status codes to get | +| | | from a successful health check. This may | +| | | be a single value, a list, or a range. | ++-----------------------+--------+------------------------------------------+ +| healthmonitor_id | string | ID of health monitor to create. | ++-----------------------+--------+------------------------------------------+ +| http_method | string | The HTTP method that the health monitor | +| | | uses for requests. One of CONNECT, | +| | | DELETE, GET, HEAD, OPTIONS, PATCH, POST, | +| | | PUT, or TRACE. | ++-----------------------+--------+------------------------------------------+ +| http_version | float | The HTTP version to use for health | +| | | monitor connections. One of '1.0' or | +| | | '1.1'. Defaults to '1.0'. | ++-----------------------+--------+------------------------------------------+ +| max_retries | int | The number of successful checks before | +| | | changing the operating status of the | +| | | member to ONLINE. | ++-----------------------+--------+------------------------------------------+ +| max_retries_down | int | The number of allowed check failures | +| | | before changing the operating status of | +| | | the member to ERROR. A valid value is | +| | | from 1 to 10. | ++-----------------------+--------+------------------------------------------+ +| name | string | Human-readable name of the monitor. | ++-----------------------+--------+------------------------------------------+ +| timeout | int | The time, in seconds, after which a | +| | | health check times out. This value must | +| | | be less than the delay value. | ++-----------------------+--------+------------------------------------------+ +| url_path | string | The HTTP URL path of the request sent by | +| | | the monitor to test the health of a | +| | | backend member. Must be a string that | +| | | begins with a forward slash (/). | ++-----------------------+--------+------------------------------------------+ + +The health monitor will be in the ``PENDING_UPDATE`` provisioning_status +when it is passed to the driver. The driver will update the +provisioning_status of the health monitor to either ``ACTIVE`` if +successfully updated, or ``ERROR`` if the update was not successful. + +The driver is expected to validate that the driver supports the request. +The method will then return or raise an exception if the request cannot be +accepted. + +**Abstract class definition** + +.. code-block:: python + + class Driver(object): + def health_monitor_create(self, healthmonitor): + """Creates a new health monitor. + + :param healthmonitor (object): The health monitor object. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + + def health_monitor_delete(self, healthmonitor): + """Deletes a healthmonitor_id. + + :param healthmonitor (object): The health monitor object. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + """ + raise NotImplementedError() + + def health_monitor_update(self, old_healthmonitor, new_healthmonitor): + """Updates a health monitor. + + :param old_healthmonitor (object): The baseline health monitor + object. + :param new_healthmonitor (object): The updated health monitor object. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + +L7 Policy +--------- + +Create +^^^^^^ + +Creates an L7 policy. + +Octavia will pass in the L7 policy object with all requested settings. + +The L7 policy will be in the ``PENDING_CREATE`` provisioning_status and +``OFFLINE`` operating_status when it is passed to the driver. The driver +will be responsible for updating the provisioning status of the L7 policy +to either ``ACTIVE`` if successfully created, or ``ERROR`` if not created. + +The Octavia API will accept and do basic API validation of the create +request from the user. The l7policy python object representing the +request body will be passed to the driver create method as it was received +and validated with the following exceptions: + +1. The project_id will be removed, if present, as this field is now + deprecated. The l7policy will inherit the project_id from the parent + load balancer. + +.. _L7policy objects: + +**L7policy object** + +As of the writing of this specification the create l7policy object may +contain the following: + ++-----------------------+--------+------------------------------------------+ +| Name | Type | Description | ++=======================+========+==========================================+ +| action | string | The L7 policy action. One of | +| | | REDIRECT_TO_POOL, REDIRECT_TO_URL, or | +| | | REJECT. | ++-----------------------+--------+------------------------------------------+ +| admin_state_up | bool | Admin state: True if up, False if down. | ++-----------------------+--------+------------------------------------------+ +| description | string | A human-readable description for the | +| | | L7 policy. | ++-----------------------+--------+------------------------------------------+ +| l7policy_id | string | The ID of the L7 policy. | ++-----------------------+--------+------------------------------------------+ +| listener_id | string | The ID of the listener. | ++-----------------------+--------+------------------------------------------+ +| name | string | Human-readable name of the L7 policy. | ++-----------------------+--------+------------------------------------------+ +| position | int | The position of this policy on the | +| | | listener. Positions start at 1. | ++-----------------------+--------+------------------------------------------+ +| project_id | string | ID of the project owning this resource. | ++-----------------------+--------+------------------------------------------+ +| redirect_http_code | int | The HTTP status code to be returned on | +| | | a redirect policy. | ++-----------------------+--------+------------------------------------------+ +| redirect_pool_id | string | Requests matching this policy will be | +| | | redirected to the pool with this ID. | +| | | Only valid if action is REDIRECT_TO_POOL.| ++-----------------------+--------+------------------------------------------+ +| redirect_prefix | string | Requests matching this policy will be | +| | | redirected to this Prefix URL. Only | +| | | valid if ``action`` is | +| | | ``REDIRECT_PREFIX``. | ++-----------------------+--------+------------------------------------------+ +| redirect_url | string | Requests matching this policy will be | +| | | redirected to this URL. Only valid if | +| | | action is REDIRECT_TO_URL. | ++-----------------------+--------+------------------------------------------+ +| rules | list | A list of l7rule objects. | ++-----------------------+--------+------------------------------------------+ + +*Creating a Fully Populated L7 policy* + +If the "rules" option is specified, the provider driver will create all of +the child objects in addition to creating the L7 policy instance. + +Delete +^^^^^^ + +Deletes an existing L7 policy. + +Octavia will pass in the L7 policy object as a parameter. + +The l7policy will be in the ``PENDING_DELETE`` provisioning_status when +it is passed to the driver. The driver will notify Octavia that the delete +was successful by setting the provisioning_status to ``DELETED``. If the +delete failed, the driver will update the provisioning_status to ``ERROR``. + +Update +^^^^^^ + +Modifies an existing L7 policy using the values supplied in the l7policy +object. + +Octavia will pass in the original L7 policy object which is the baseline for +the update, and an L7 policy object with the fields to be updated. +Fields not updated by the user will contain "Unset" as defined in the data +model. + +As of the writing of this specification the update L7 policy object may +contain the following: + ++-----------------------+--------+------------------------------------------+ +| Name | Type | Description | ++=======================+========+==========================================+ +| action | string | The L7 policy action. One of | +| | | REDIRECT_TO_POOL, REDIRECT_TO_URL, or | +| | | REJECT. | ++-----------------------+--------+------------------------------------------+ ++-----------------------+--------+------------------------------------------+ +| admin_state_up | bool | Admin state: True if up, False if down. | ++-----------------------+--------+------------------------------------------+ +| description | string | A human-readable description for the | +| | | L7 policy. | ++-----------------------+--------+------------------------------------------+ +| l7policy_id | string | The ID of the L7 policy. | ++-----------------------+--------+------------------------------------------+ +| name | string | Human-readable name of the L7 policy. | ++-----------------------+--------+------------------------------------------+ +| position | int | The position of this policy on the | +| | | listener. Positions start at 1. | ++-----------------------+--------+------------------------------------------+ +| redirect_http_code | int | The HTTP status code to be returned on | +| | | a redirect policy. | ++-----------------------+--------+------------------------------------------+ +| redirect_pool_id | string | Requests matching this policy will be | +| | | redirected to the pool with this ID. | +| | | Only valid if action is REDIRECT_TO_POOL.| ++-----------------------+--------+------------------------------------------+ +| redirect_prefix | string | Requests matching this policy will be | +| | | redirected to this Prefix URL. Only | +| | | valid if ``action`` is | +| | | ``REDIRECT_PREFIX``. | ++-----------------------+--------+------------------------------------------+ +| redirect_url | string | Requests matching this policy will be | +| | | redirected to this URL. Only valid if | +| | | action is REDIRECT_TO_URL. | ++-----------------------+--------+------------------------------------------+ + +The L7 policy will be in the ``PENDING_UPDATE`` provisioning_status when +it is passed to the driver. The driver will update the provisioning_status +of the L7 policy to either ``ACTIVE`` if successfully updated, or ``ERROR`` +if the update was not successful. + +The driver is expected to validate that the driver supports the request. +The method will then return or raise an exception if the request cannot be +accepted. + +**Abstract class definition** + +.. code-block:: python + + class Driver(object): + def l7policy_create(self, l7policy): + """Creates a new L7 policy. + + :param l7policy (object): The l7policy object. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + + def l7policy_delete(self, l7policy): + """Deletes an L7 policy. + + :param l7policy (object): The l7policy object. + :return: Nothing if the delete request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + """ + raise NotImplementedError() + + def l7policy_update(self, old_l7policy, new_l7policy): + """Updates an L7 policy. + + :param old_l7policy (object): The baseline l7policy object. + :param new_l7policy (object): The updated l7policy object. + :return: Nothing if the update request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + +L7 Rule +------- + +Create +^^^^^^ + +Creates a new L7 rule for an existing L7 policy. + +Octavia will pass in the L7 rule object with all requested settings. + +The L7 rule will be in the ``PENDING_CREATE`` provisioning_status and +``OFFLINE`` operating_status when it is passed to the driver. The driver +will be responsible for updating the provisioning status of the L7 rule +to either ``ACTIVE`` if successfully created, or ``ERROR`` if not created. + +The Octavia API will accept and do basic API validation of the create +request from the user. The l7rule python object representing the +request body will be passed to the driver create method as it was received +and validated with the following exceptions: + +1. The project_id will be removed, if present, as this field is now + deprecated. The listener will inherit the project_id from the parent + load balancer. + +.. _L7rule objects: + +**L7rule object** + +As of the writing of this specification the create l7rule object may +contain the following: + ++-----------------------+--------+------------------------------------------+ +| Name | Type | Description | ++=======================+========+==========================================+ +| admin_state_up | bool | Admin state: True if up, False if down. | ++-----------------------+--------+------------------------------------------+ +| compare_type | string | The comparison type for the L7 rule. One | +| | | of CONTAINS, ENDS_WITH, EQUAL_TO, REGEX, | +| | | or STARTS_WITH. | ++-----------------------+--------+------------------------------------------+ +| invert | bool | When True the logic of the rule is | +| | | inverted. For example, with invert True, | +| | | equal to would become not equal to. | ++-----------------------+--------+------------------------------------------+ +| key | string | The key to use for the comparison. For | +| | | example, the name of the cookie to | +| | | evaluate. | ++-----------------------+--------+------------------------------------------+ +| l7policy_id | string | The ID of the L7 policy. | ++-----------------------+--------+------------------------------------------+ +| l7rule_id | string | The ID of the L7 rule. | ++-----------------------+--------+------------------------------------------+ +| project_id | string | ID of the project owning this resource. | ++-----------------------+--------+------------------------------------------+ +| type | string | The L7 rule type. One of COOKIE, | +| | | FILE_TYPE, HEADER, HOST_NAME, or PATH. | ++-----------------------+--------+------------------------------------------+ +| value | string | The value to use for the comparison. For | +| | | example, the file type to compare. | ++-----------------------+--------+------------------------------------------+ + +Delete +^^^^^^ + +Deletes an existing L7 rule. + +Octavia will pass in the L7 rule object as a parameter. + +The L7 rule will be in the ``PENDING_DELETE`` provisioning_status when +it is passed to the driver. The driver will notify Octavia that the delete +was successful by setting the provisioning_status to ``DELETED``. If the +delete failed, the driver will update the provisioning_status to ``ERROR``. + +Update +^^^^^^ + +Modifies an existing L7 rule using the values supplied in the l7rule object. + +Octavia will pass in the original L7 rule object which is the baseline for the +update, and an L7 rule object with the fields to be updated. +Fields not updated by the user will contain "Unset" as defined in the data +model. + +As of the writing of this specification the update L7 rule object may +contain the following: + ++-----------------------+--------+------------------------------------------+ +| Name | Type | Description | ++=======================+========+==========================================+ +| admin_state_up | bool | Admin state: True if up, False if down. | ++-----------------------+--------+------------------------------------------+ +| compare_type | string | The comparison type for the L7 rule. One | +| | | of CONTAINS, ENDS_WITH, EQUAL_TO, REGEX, | +| | | or STARTS_WITH. | ++-----------------------+--------+------------------------------------------+ +| invert | bool | When True the logic of the rule is | +| | | inverted. For example, with invert True, | +| | | equal to would become not equal to. | ++-----------------------+--------+------------------------------------------+ +| key | string | The key to use for the comparison. For | +| | | example, the name of the cookie to | +| | | evaluate. | ++-----------------------+--------+------------------------------------------+ +| l7rule_id | string | The ID of the L7 rule. | ++-----------------------+--------+------------------------------------------+ +| type | string | The L7 rule type. One of COOKIE, | +| | | FILE_TYPE, HEADER, HOST_NAME, or PATH. | ++-----------------------+--------+------------------------------------------+ +| value | string | The value to use for the comparison. For | +| | | example, the file type to compare. | ++-----------------------+--------+------------------------------------------+ + +The L7 rule will be in the ``PENDING_UPDATE`` provisioning_status when +it is passed to the driver. The driver will update the provisioning_status +of the L7 rule to either ``ACTIVE`` if successfully updated, or ``ERROR`` +if the update was not successful. + +The driver is expected to validate that the driver supports the request. +The method will then return or raise an exception if the request cannot be +accepted. + +**Abstract class definition** + +.. code-block:: python + + class Driver(object): + def l7rule_create(self, l7rule): + + """Creates a new L7 rule. + + :param l7rule (object): The L7 rule object. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + + def l7rule_delete(self, l7rule): + + """Deletes an L7 rule. + + :param l7rule (object): The L7 rule object. + :return: Nothing if the delete request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + """ + raise NotImplementedError() + + def l7rule_update(self, old_l7rule, new_l7rule): + + """Updates an L7 rule. + + :param old_l7rule (object): The baseline L7 rule object. + :param new_l7rule (object): The updated L7 rule object. + :return: Nothing if the update request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + +Flavor +------ + +Octavia flavors are defined in a separate `flavor specification`_. +Support for flavors will be provided through two provider driver interfaces, +one to query supported flavor metadata keys and another to validate that a +flavor is supported. Both functions are synchronous. + +.. _flavor specification: ../specs/version1.0/flavors.html + +get_supported_flavor_metadata +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Retrieves a dictionary of supported flavor keys and their description. +For example: + +.. code-block:: python + + {"topology": "The load balancer topology for the flavor. One of: SINGLE, ACTIVE_STANDBY", + "compute_flavor": "The compute driver flavor to use for the load balancer instances"} + +validate_flavor +^^^^^^^^^^^^^^^ + +Validates that the driver supports the flavor metadata dictionary. + +The validate_flavor method will be passed a flavor metadata dictionary that +the driver will validate. This is used when an operator uploads a new flavor +that applies to the driver. + +The validate_flavor method will either return or raise a +``UnsupportedOptionError`` exception. + +Following are interface definitions for flavor support: + +.. code-block:: python + + def get_supported_flavor_metadata(): + """Returns a dictionary of flavor metadata keys supported by this driver. + + The returned dictionary will include key/value pairs, 'name' and + 'description.' + + :returns: The flavor metadata dictionary + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: The driver does not support flavors. + """ + raise NotImplementedError() + +.. code-block:: python + + def validate_flavor(flavor_metadata): + """Validates if driver can support flavor as defined in flavor_metadata. + + :param flavor_metadata (dict): Dictionary with flavor metadata. + :return: Nothing if the flavor is valid and supported. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: The driver does not support flavors. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + +Availability Zone +----------------- + +Octavia availability zones have no explicit spec, but are modeled closely +after the existing `flavor specification`_. +Support for availability_zones will be provided through two provider driver +interfaces, one to query supported availability zone metadata keys and another +to validate that an availability zone is supported. Both functions are +synchronous. + +get_supported_availability_zone_metadata +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Retrieves a dictionary of supported availability zone keys and their +description. For example: + +.. code-block:: python + + {"compute_zone": "The compute availability zone to use for this loadbalancer.", + "volume_zone": "The volume availability zone to use for this loadbalancer.", + "management_network": "The management network ID for the loadbalancer.", + "valid_vip_networks": "List of network IDs that are allowed for VIP use. This overrides/replaces the list of allowed networks configured in `octavia.conf`."} + +validate_availability_zone +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Validates that the driver supports the availability zone metadata dictionary. + +The validate_availability_zone method will be passed an availability zone +metadata dictionary that the driver will validate. This is used when an +operator uploads a new availability zone that applies to the driver. + +The validate_availability_zone method will either return or raise a +``UnsupportedOptionError`` exception. + +Following are interface definitions for availability zone support: + +.. code-block:: python + + def get_supported_availability_zone_metadata(): + """Returns a dict of supported availability zone metadata keys. + + The returned dictionary will include key/value pairs, 'name' and + 'description.' + + :returns: The availability zone metadata dictionary + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: The driver does not support AZs. + """ + raise NotImplementedError() + +.. code-block:: python + + def validate_availability_zone(availability_zone_metadata): + """Validates if driver can support the availability zone. + + :param availability_zone_metadata: Dictionary with az metadata. + :type availability_zone_metadata: dict + :return: Nothing if the availability zone is valid and supported. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: The driver does not support availability + zones. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + +Exception Model +--------------- + +DriverError +^^^^^^^^^^^ + +This is a catch all exception that drivers can return if there is an +unexpected error. An example might be a delete call for a load balancer the +driver does not recognize. This exception includes two strings: The user fault +string and the optional operator fault string. The user fault string, +"user_fault_string", will be provided to the API requester. The operator fault +string, "operator_fault_string", will be logged in the Octavia API log file +for the operator to use when debugging. + +.. code-block:: python + + + class DriverError(Exception): + user_fault_string = _("An unknown driver error occurred.") + operator_fault_string = _("An unknown driver error occurred.") + + def __init__(self, *args, **kwargs): + self.user_fault_string = kwargs.pop('user_fault_string', + self.user_fault_string) + self.operator_fault_string = kwargs.pop('operator_fault_string', + self.operator_fault_string) + + super(DriverError, self).__init__(*args, **kwargs) + +NotImplementedError +^^^^^^^^^^^^^^^^^^^ + +Driver implementations may not support all operations, and are free to reject +a request. If the driver does not implement an API function, the driver will +raise a NotImplementedError exception. + +.. code-block:: python + + class NotImplementedError(Exception): + user_fault_string = _("A feature is not implemented by this driver.") + operator_fault_string = _("A feature is not implemented by this driver.") + + def __init__(self, *args, **kwargs): + self.user_fault_string = kwargs.pop('user_fault_string', + self.user_fault_string) + self.operator_fault_string = kwargs.pop('operator_fault_string', + self.operator_fault_string) + + super(NotImplementedError, self).__init__(*args, **kwargs) + +UnsupportedOptionError +^^^^^^^^^^^^^^^^^^^^^^ + +Provider drivers will validate that they can complete the request -- that all +options are supported by the driver. If the request fails validation, drivers +will raise an UnsupportedOptionError exception. For example, if a driver does +not support a flavor passed as an option to load balancer create(), the driver +will raise an UnsupportedOptionError and include a message parameter providing +an explanation of the failure. + +.. code-block:: python + + class UnsupportedOptionError(Exception): + user_fault_string = _("A specified option is not supported by this driver.") + operator_fault_string = _("A specified option is not supported by this driver.") + + def __init__(self, *args, **kwargs): + self.user_fault_string = kwargs.pop('user_fault_string', + self.user_fault_string) + self.operator_fault_string = kwargs.pop('operator_fault_string', + self.operator_fault_string) + + super(UnsupportedOptionError, self).__init__(*args, **kwargs) + + +Driver Support Library +====================== + +Provider drivers need support for updating provisioning status, operating +status, and statistics. Drivers will not directly use database operations, +and instead will callback to octavia-lib using a new API. + +.. warning:: + + The methods listed here are the only callable methods for drivers. + All other interfaces are not considered stable or safe for drivers to + access. See `Stable Provider Driver Interface`_ for a list of acceptable + APIs for provider driver use. + +.. warning:: + + This library is interim and will be removed when the driver support endpoint + is made available. At which point drivers will not import any code from + octavia-lib. + +Update Provisioning and Operating Status API +-------------------------------------------- + +The update status API defined below can be used by provider drivers +to update the provisioning and/or operating status of Octavia resources +(load balancer, listener, pool, member, health monitor, L7 policy, or L7 +rule). + +For the following status API, valid values for provisioning status +and operating status parameters are as defined by Octavia status codes. If an +existing object is not included in the input parameter, the status remains +unchanged. + +.. note:: + + If the driver-agent exceeds its configured `status_max_processes` this call + may block while it waits for a status process slot to become available. + The operator will be notified if the driver-agent approaches or reaches + the configured limit. + +provisioning_status: status associated with lifecycle of the +resource. See `Octavia Provisioning Status Codes `_. + +operating_status: the observed status of the resource. See `Octavia +Operating Status Codes `_. + +The dictionary takes this form: + +.. code-block:: python + + { "loadbalancers": [{"id": "123", + "provisioning_status": "ACTIVE", + "operating_status": "ONLINE"},...], + "healthmonitors": [], + "l7policies": [], + "l7rules": [], + "listeners": [], + "members": [], + "pools": [] + } + +.. code-block:: python + + def update_loadbalancer_status(status): + """Update load balancer status. + + :param status (dict): dictionary defining the provisioning status and + operating status for load balancer objects, including pools, + members, listeners, L7 policies, and L7 rules. + :raises: UpdateStatusError + :returns: None + """ + +Update Statistics API +--------------------- + +Provider drivers can update statistics for listeners using the following API. +Similar to the status function above, a single dictionary +with multiple listener statistics is used to update statistics in a single +call. If an existing listener is not included, the statistics that object +remain unchanged. + +.. note:: + + If the driver-agent exceeds its configured `stats_max_processes` this call + may block while it waits for a stats process slot to become available. + The operator will be notified if the driver-agent approaches or reaches + the configured limit. + +The general form of the input dictionary is a list of listener statistics: + +.. code-block:: python + + { "listeners": [{"id": "123", + "active_connections": 12, + "bytes_in": 238908, + "bytes_out": 290234, + "request_errors": 0, + "total_connections": 3530},...] + } + +.. code-block:: python + + def update_listener_statistics(statistics): + """Update listener statistics. + + :param statistics (dict): Statistics for listeners: + id (string): ID of the listener. + active_connections (int): Number of currently active connections. + bytes_in (int): Total bytes received. + bytes_out (int): Total bytes sent. + request_errors (int): Total requests not fulfilled. + total_connections (int): The total connections handled. + :raises: UpdateStatisticsError + :returns: None + """ + +Get Resource Support +-------------------- + +Provider drivers may need to get information about an Octavia resource. +As an example of its use, a provider driver may need to sync with Octavia, +and therefore need to fetch all of the Octavia resources it is responsible +for managing. Provider drivers can use the existing Octavia API to get these +resources. See the `Octavia API Reference `_. + +API Exception Model +------------------- + +The driver support API will include exceptions: +two API groups: + +* UpdateStatusError +* UpdateStatisticsError +* DriverAgentNotFound +* DriverAgentTimeout + +Each exception class will include a message field that describes the error and +references to the failed record if available. + +.. code-block:: python + + class UpdateStatusError(Exception): + fault_string = _("The status update had an unknown error.") + status_object = None + status_object_id = None + status_record = None + + def __init__(self, *args, **kwargs): + self.fault_string = kwargs.pop('fault_string', + self.fault_string) + self.status_object = kwargs.pop('status_object', None) + self.status_object_id = kwargs.pop('status_object_id', None) + self.status_record = kwargs.pop('status_record', None) + + super(UpdateStatusError, self).__init__(self.fault_string, + *args, **kwargs) + + class UpdateStatisticsError(Exception): + fault_string = _("The statistics update had an unknown error.") + stats_object = None + stats_object_id = None + stats_record = None + + def __init__(self, *args, **kwargs): + self.fault_string = kwargs.pop('fault_string', + self.fault_string) + self.stats_object = kwargs.pop('stats_object', None) + self.stats_object_id = kwargs.pop('stats_object_id', None) + self.stats_record = kwargs.pop('stats_record', None) + + super(UpdateStatisticsError, self).__init__(self.fault_string, + *args, **kwargs) + + class DriverAgentNotFound(Exception): + fault_string = _("The driver-agent process was not found or not ready.") + + def __init__(self, *args, **kwargs): + self.fault_string = kwargs.pop('fault_string', self.fault_string) + super(DriverAgentNotFound, self).__init__(self.fault_string, + *args, **kwargs) + + class DriverAgentTimeout(Exception): + fault_string = _("The driver-agent timeout.") + + def __init__(self, *args, **kwargs): + self.fault_string = kwargs.pop('fault_string', self.fault_string) + super(DriverAgentTimeout, self).__init__(self.fault_string, + *args, **kwargs) + +Provider Agents +=============== + +Provider agents are long-running processes started by the Octavia driver-agent +process at start up. They are intended to allow provider drivers a long running +process that can handle periodic jobs for the provider driver or receive events +from another provider agent. Provider agents are optional and not required for +a successful Octavia provider driver. + +Provider Agents have access to the same `Stable Provider Driver Interface`_ +as the provider driver. A provider agent must not access any other Octavia +code. + +.. warning:: + + The methods listed in the `Driver Support Library`_ section are the only + Octavia callable methods for provider agents. + All other interfaces are not considered stable or safe for provider agents to + access. See `Stable Provider Driver Interface`_ for a list of acceptable + APIs for provider agents use. + +Declaring Your Provider Agent +----------------------------- + +The Octavia driver-agent will use +`stevedore `_ to load enabled +provider agents at start up. Provider agents are enabled in the Octavia +configuration file. Provider agents that are installed, but not enabled, will +not be loaded. An example configuration file entry for a provider agent is: + +.. code-block:: INI + + [driver_agent] + enabled_provider_agents = amphora_agent, noop_agent + +The provider agent name must match the provider agent name declared in your +python setup tools entry point. For example: + +.. code-block:: python + + octavia.driver_agent.provider_agents = + amphora_agent = octavia.api.drivers.amphora_driver.agent:AmphoraProviderAgent + noop_agent = octavia.api.drivers.noop_driver.agent:noop_provider_agent + +Provider Agent Method Invocation +-------------------------------- + +On start up of the Octavia driver-agent, the method defined in the entry point +will be launched in its own `multiprocessing Process `_. + +Your provider agent method will be passed a `multiprocessing Event `_ that will +be used to signal that the provider agent should shutdown. When this event +is "set", the provider agent should gracefully shutdown. If the provider agent +fails to exit within the Octavia configuration file setting +"provider_agent_shutdown_timeout" period, the driver-agent will forcefully +shutdown the provider agent with a SIGKILL signal. + +Example Provider Agent Method +----------------------------- + +If, for example, you declared a provider agent as "my_agent": + +.. code-block:: python + + octavia.driver_agent.provider_agents = + my_agent = example_inc.drivers.my_driver.agent:my_provider_agent + +The signature of your "my_provider_agent" method would be: + +.. code-block:: python + + def my_provider_agent(exit_event): + + +Documenting the Driver +====================== + +Octavia provides two documents to let operators and users know about available +drivers and their features. + +Available Provider Drivers +-------------------------- + +The :doc:`../../admin/providers/index` document provides administrators with a +guide to the available Octavia provider drivers. Since provider drivers are +not included in the Octavia source repositories, this guide is an important +tool for administrators to find your provider driver. + +You can submit information for your provider driver by submitting a patch to +the Octavia documentation following the normal OpenStack process. + +See the +`OpenStack Contributor Guide `_ +for more information on submitting a patch to OpenStack. + +Octavia Provider Feature Matrix +------------------------------- + +The Octavia documentation includes a +:doc:`../../user/feature-classification/index` that informs users on which +Octavia features are supported by each provider driver. + +The feature matrices are built using the `Oslo sphinx-feature-classification +`_ library. +This allows a simple INI file format for describing the capabilities of an +Octavia provider driver. + +Each driver should define a [driver.] section and then add a line +to each feature specifying the level of support the provider driver provides +for the feature. + +For example, the Amphora driver support for "admin_state_up" would add the +following to the feature-matrix-lb.ini file. + +.. code-block:: INI + + [driver.amphora] + title=Amphora Provider + link=https://docs.openstack.org/api-ref/load-balancer/v2/index.html + + [operation.admin_state_up] + ... + driver.amphora=complete + +Valid driver feature support statuses are: + +``complete`` + Fully implemented, expected to work at all times. + +``partial`` + Implemented, but with caveats about when it will work. + +``missing`` + Not implemented at all. + +You can also optionally provide additional, provider driver specific, notes for +users by defining a "driver-notes.". + +.. code-block:: INI + + [operation.admin_state_up] + ... + driver.amphora=complete + driver-notes.amphora=The Amphora driver fully supports admin_state_up. + +Driver notes are highly recommended when a provider driver declares a +``partial`` status. diff --git a/doc/source/contributor/guides/remote-debugger.rst b/doc/source/contributor/guides/remote-debugger.rst new file mode 100644 index 0000000000..d742f5bb16 --- /dev/null +++ b/doc/source/contributor/guides/remote-debugger.rst @@ -0,0 +1,181 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +====================== +Debugging Octavia code +====================== + +This document describes how to setup and debug Octavia code using your favorite +IDE (e.g. PyCharm, Visual Studio Code). + +Prerequisites +============= + +* Octavia installed. +* IDE installed and Octavia added as project. + +Setup +===== + +Both PyCharm Professional edition and Visual Studio Code offer remote debugging +features that can be used for debugging Octavia components. + +.. note:: Before running a new Octavia process you should + make sure that processes of that component are no longer running. + You can use ``ps aux`` in order to verify that. + +PyCharm +------- + +.. note:: Remote debugging is a *PyCharm Professional* feature. + +PyCharm offers two ways of debugging remotely [1]_. In general, the +"through a remote interpreter" approach is more convenient and should +be preferred. +On the other hand, the "Python debug server" approach is the only +one that works for debugging the API component (because of uWSGI). +Therefore, this guide will explain both approaches. + +Using a remote interpreter +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +First, configure a remote interpreter for the VM as documented in [2]_. +Adding a deployment configuration with correct path mappings allows +PyCharm to upload local changes to the remote host automatically. + +Then, create a new *Run/Debug Configuration* by selecting +*Run -> Edit Configurations...* in the menu bar. +Add a new configuration and make sure +*Module name* is selected instead of *Script path*. Enter the module name of +the Octavia component you want to debug, for instance +``octavia.cmd.octavia_worker``. Additionally, add +``--config-file /etc/octavia/octavia.conf`` to *Parameters*. +Then check whether the right remote Python interpreter +is selected. After you confirm the settings by clicking *OK* you should be +able to run/debug the Octavia component with that new run configuration. + +Using a Python debug server +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +As mentioned above the "remote interpreter" approach does not work with +*Octavia-API* because that process is managed by uWSGI. Here the +Python debug server approach [3]_ needs to be used. You will need to +install the ``pydevd-pycharm`` via ``pip`` as shown when creating the run/debug +configuration. However, it is not necessary to modify the Python code +in any way because Octavia code is already set up for it to work. + +Export *DEBUGGER_TYPE*, *DEBUGGER_HOST* and *DEBUGGER_PORT* (host and port of +the system running the IDE, respectively), and start the Octavia service you +want to debug. For example, to debug the Octavia API service:: + + $ export DEBUGGER_TYPE=pydev + $ export DEBUGGER_HOST=192.168.121.1 + $ export DEBUGGER_PORT=5678 + $ uwsgi --ini /etc/octavia/octavia-uwsgi.ini + +.. note:: You must run the Octavia/uWSGI command directly. Starting it + via ``systemctl`` will not work with the debug server. + +Visual Studio Code +------------------ + +While PyCharm synchronizes local changes with +the remote host, Code will work on the remote environment directly +through a SSH tunnel. That means that you don't even need to have +source code on your local machine in order to debug code on the remote. + +Detail information about remote debugging over SSH can be found +in the official Visual Studio Code documentation [4]_. +This guide will focus on the essential steps only. + +Using the remote development extension pack +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. note:: This approach will not work with the Octavia API component + because that component is managed by uWSGI. + +After installing the *Visual Studio Code Remote Development Extension Pack* +[5]_ you need to open the *Remote Explorer* view and connect to the +SSH target. This will open a new window and on the bottom left of that window +you should see *SSH:* followed by the SSH host name. In the *Explorer* +view you can then choose to either clone a repository or open an +existing folder on the remote. For instance when working with +devstack you might want to open */opt/stack* or */opt/stack/octavia*. + +Next, you should configure the *launch.json*, which contains the run +configurations. Use the following template and adjust it to your needs:: + + { + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "name": "Octavia Worker", + "type": "python", + "request": "launch", + "module": "octavia.cmd.octavia_worker", + "args": ["--config-file", "/etc/octavia/octavia.conf"], + "justMyCode": true + } + ] + } + +Make sure that the correct Python interpreter is selected in the status bar. +In a devstack environment the global Python interpreter */usr/bin/python3* +should be the correct one. Now you can start debugging by pressing *F5*. + +.. note:: When running this the first time Visual Studio Code might ask you + to install the Python debugger extension on the remote, which you must + do. Simply follow the steps shown in the IDE. + +Using ptvsd +~~~~~~~~~~~ + +.. warning:: ptvsd has been deprecated and replaced by debugpy. However, debugpy doesn't seem + work with uWSGI processes. The information in this section might be outdated. + +Another example is debugging the Octavia API service with the ptvsd debugger: + +:: + + $ export DEBUGGER_TYPE=ptvsd + $ export DEBUGGER_HOST=192.168.121.1 + $ export DEBUGGER_PORT=5678 + $ /usr/bin/uwsgi --ini /etc/octavia/octavia-uwsgi.ini -p 1 + +The service will connect to your IDE, at which point remote debugging is +active. Resume the program on the debugger to continue with the initialization +of the service. At this point, the service should be operational and you can +start debugging. + +Troubleshooting +=============== + +Remote process does not connect with local PyCharm debug server +--------------------------------------------------------------- + +#. Check if the debug server is still running +#. Check if the values of the exported *DEBUGGER_* variables above are correct. +#. Check if the remote machine can reach the port of the debug server:: + + $ nc -zvw10 $DEBUGGER_HOST $DEBUGGER_PORT + + If it cannot connect, the connection may be blocked by a firewall. + +.. [1] https://www.jetbrains.com/help/pycharm/remote-debugging-with-product.html +.. [2] https://www.jetbrains.com/help/pycharm/remote-debugging-with-product.html#remote-interpreter +.. [3] https://www.jetbrains.com/help/pycharm/remote-debugging-with-product.html#remote-debug-config +.. [4] https://code.visualstudio.com/docs/remote/ssh +.. [5] https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.vscode-remote-extensionpack diff --git a/doc/source/contributor/index.rst b/doc/source/contributor/index.rst new file mode 100644 index 0000000000..96bc7660a3 --- /dev/null +++ b/doc/source/contributor/index.rst @@ -0,0 +1,126 @@ +=================== +Octavia Contributor +=================== + +Contributor Guidelines +---------------------- +.. toctree:: + :glob: + :maxdepth: 1 + + contributing.rst + CONSTITUTION.rst + HACKING.rst + +Contributor Reference +--------------------- +.. toctree:: + :glob: + :maxdepth: 1 + + guides/* + devref/* + + +Internal APIs +------------- +.. toctree:: + :glob: + :maxdepth: 1 + + api/* + +.. Note:: The documents listed below are design documents and specifications + created and approved at a previous point in time. The code base and + current functionality may deviate from these original documents. + Please see the Octavia documentation for the current feature + details. + +Design Documentation +-------------------- + +Version 0.5 (liberty) +````````````````````` +.. toctree:: + :glob: + :maxdepth: 1 + + design/version0.5/* + +Project Specifications +---------------------- + +Version 0.5 (liberty) +````````````````````` +.. toctree:: + :glob: + :maxdepth: 1 + + specs/version0.5/* + +Version 0.8 (mitaka) +```````````````````` +.. toctree:: + :glob: + :maxdepth: 1 + + specs/version0.8/* + +Version 0.9 (newton) +```````````````````` +.. toctree:: + :glob: + :maxdepth: 1 + + specs/version0.9/* + +Version 1.0 (pike) +`````````````````` +.. toctree:: + :glob: + :maxdepth: 1 + + specs/version1.0/* + +Version 1.1 (queens) +```````````````````` +.. toctree:: + :glob: + :maxdepth: 1 + + specs/version1.1/* + +Version 14.0 (caracal) +`````````````````````` +.. toctree:: + :glob: + :maxdepth: 1 + + specs/version14.0/* + +Version 15.0 (Dalmatian) +```````````````````````` +.. toctree:: + :glob: + :maxdepth: 1 + + specs/version15.0/* + +.. only:: latex + + Module Reference + ---------------- + + .. toctree:: + :hidden: + + modules/modules + +.. only:: html + + Indices and Search + ------------------ + + * :ref:`genindex` + * :ref:`modindex` + * :ref:`search` diff --git a/doc/source/contributor/specs b/doc/source/contributor/specs new file mode 120000 index 0000000000..0a70be60cd --- /dev/null +++ b/doc/source/contributor/specs @@ -0,0 +1 @@ +../../../specs \ No newline at end of file diff --git a/doc/source/index.rst b/doc/source/index.rst new file mode 100644 index 0000000000..028067fbda --- /dev/null +++ b/doc/source/index.rst @@ -0,0 +1,33 @@ +.. octavia documentation master file + +===================== +Octavia Documentation +===================== + +Welcome to the OpenStack Octavia project documentation. Octavia brings +network load balancing to OpenStack. + +See :doc:`reference/introduction` for an overview of Octavia. + +For information on what is new see the `Octavia Release Notes `_. + +.. only:: html + + To align with the overall OpenStack documentation, the Octavia documentation + is grouped into the following topic areas. + +.. toctree:: + :maxdepth: 1 + + admin/index.rst + Octavia API Reference + cli/index.rst + configuration/index.rst + contributor/index.rst + install/index.rst + reference/index.rst + user/index.rst + +.. only:: html + + :ref:`search` diff --git a/doc/source/install/index.rst b/doc/source/install/index.rst new file mode 100644 index 0000000000..75eaaab7f9 --- /dev/null +++ b/doc/source/install/index.rst @@ -0,0 +1,9 @@ +==================== +Octavia Installation +==================== + +.. toctree:: + :maxdepth: 1 + + Installation overview guide <../contributor/guides/dev-quick-start> + install.rst diff --git a/doc/source/install/install-amphorav2.rst b/doc/source/install/install-amphorav2.rst new file mode 100644 index 0000000000..1c60be2c7c --- /dev/null +++ b/doc/source/install/install-amphorav2.rst @@ -0,0 +1,98 @@ +.. _install-amphorav2: + +Additional configuration steps to configure amphorav2 provider +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The amphorav2 provider driver improves control plane resiliency. Should a +control plane host go down during a load balancer provisioning operation, an +alternate controller can resume the in-process provisioning and complete the +request. This solves the issue with resources stuck in PENDING_* states by +writing info about task states in persistent backend and monitoring job claims +via jobboard. + +If you would like to use amphorav2 provider with jobboard-based controller +for load-balancer service the following additional steps are required. + +This provider driver can also run without jobboard and its dependencies (extra +database, Redis/Zookeeper). This is the default setting while jobboard remains +an experimental feature. + + +Prerequisites +------------- + +Amphorav2 provider requires creation of additional database +``octavia_persistence`` to store info about state of tasks and progress of its +execution. +Also to monitor progress on taskflow jobs amphorav2 provider uses +jobboard. As jobboard backend could be used Redis or Zookeeper key-value +storages. Operator should chose the one that is more preferable for specific +cloud. The default is Redis. +Key-values storage clients should be install with extras [zookeeper] or [redis] +during installation of octavia packages. + +1. Create the database, complete these steps: + + * Use the database access client to connect to the database + server as the ``root`` user: + + .. code-block:: console + + # mysql + + * Create the ``octavia_persistence`` database: + + .. code-block:: console + + CREATE DATABASE octavia_persistence; + + * Grant proper access to the ``octavia_persistence`` database: + + .. code-block:: console + + GRANT ALL PRIVILEGES ON octavia_persistence.* TO 'octavia'@'localhost' \ + IDENTIFIED BY 'OCTAVIA_DBPASS'; + GRANT ALL PRIVILEGES ON octavia_persistence.* TO 'octavia'@'%' \ + IDENTIFIED BY 'OCTAVIA_DBPASS'; + + Replace OCTAVIA_DBPASS with a suitable password. + + +2. Install desired key-value backend (Redis or Zookeper). + +Additional configuration to octavia components +---------------------------------------------- + +1. Edit the ``/etc/octavia/octavia.conf`` file ``[task_flow]`` section + + * Configure database access for persistence backend: + + .. code-block:: ini + + [task_flow] + persistence_connection = mysql+pymysql://octavia:OCTAVIA_DBPASS@controller/octavia_persistence + + Replace OCTAVIA_DBPASS with the password you chose for the Octavia databases. + + * Set desired jobboard backend and its configuration: + + .. code-block:: ini + + [task_flow] + jobboard_enabled = True + jobboard_backend_driver = 'redis_taskflow_driver' + jobboard_backend_hosts = KEYVALUE_HOST_IPS + jobboard_backend_port = KEYVALUE_PORT + jobboard_backend_password = OCTAVIA_JOBBOARDPASS + jobboard_backend_namespace = 'octavia_jobboard' + + Replace OCTAVIA_JOBBOARDPASS with the password you chose for the Octavia + key-value storage. + Replace KEYVALUE_HOST_IPS and KEYVALUE_PORT with ip and port which + chosen key-value storage is using. + +2. Populate the octavia database: + + .. code-block:: console + + # octavia-db-manage --config-file /etc/octavia/octavia.conf upgrade_persistence diff --git a/doc/source/install/install-osa.rst b/doc/source/install/install-osa.rst new file mode 100644 index 0000000000..469e72ff0c --- /dev/null +++ b/doc/source/install/install-osa.rst @@ -0,0 +1,7 @@ +.. _install-osa: + +Deploying with OpenStack-Ansible +-------------------------------- +You can also deploy and set up Octavia using `OpenStack-Ansible `_ by following +the `Octavia role for OpenStack-Ansible `_ +which installs and configures Octavia as part of your OpenStack deployment. diff --git a/doc/source/install/install-ubuntu.rst b/doc/source/install/install-ubuntu.rst new file mode 100644 index 0000000000..fd860d5944 --- /dev/null +++ b/doc/source/install/install-ubuntu.rst @@ -0,0 +1,497 @@ +.. _install-ubuntu: + +Install and configure for Ubuntu +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This section describes how to install and configure the Load-balancer +service for Ubuntu 18.04 (LTS). + +Prerequisites +------------- + +Before you install and configure the service, you must create a database, +service credentials, and API endpoints. + +1. Create the database, complete these steps: + + * Use the database access client to connect to the database + server as the ``root`` user: + + .. code-block:: console + + # mysql + + * Create the ``octavia`` database: + + .. code-block:: console + + CREATE DATABASE octavia; + + * Grant proper access to the ``octavia`` database: + + .. code-block:: console + + GRANT ALL PRIVILEGES ON octavia.* TO 'octavia'@'localhost' \ + IDENTIFIED BY 'OCTAVIA_DBPASS'; + GRANT ALL PRIVILEGES ON octavia.* TO 'octavia'@'%' \ + IDENTIFIED BY 'OCTAVIA_DBPASS'; + + Replace OCTAVIA_DBPASS with a suitable password. + + * Exit the database access client. + + .. code-block:: console + + exit; + +2. Source the ``admin`` credentials to gain access to admin-only CLI commands: + + .. code-block:: console + + $ . admin-openrc + +3. To create the Octavia service credentials, complete these steps: + + * Create the ``octavia`` user: + + .. code-block:: console + + $ openstack user create --domain default --password-prompt octavia + User Password: + Repeat User Password: + +---------------------+----------------------------------+ + | Field | Value | + +---------------------+----------------------------------+ + | domain_id | default | + | enabled | True | + | id | b18ee38e06034b748141beda8fc8bfad | + | name | octavia | + | options | {} | + | password_expires_at | None | + +---------------------+----------------------------------+ + + * Add the ``admin`` role to the ``octavia`` user: + + .. code-block:: console + + $ openstack role add --project service --user octavia admin + + .. note:: + + This command produces no output. + + .. note:: + The Octavia service does not require the full admin role. + Details of how to run Octavia without the admin role will come in a future version of this document. + + * Create the octavia service entities: + + .. code-block:: console + + $ openstack service create --name octavia --description "OpenStack Octavia" load-balancer + +-------------+----------------------------------+ + | Field | Value | + +-------------+----------------------------------+ + | description | OpenStack Octavia | + | enabled | True | + | id | d854f6fff0a64f77bda8003c8dedfada | + | name | octavia | + | type | load-balancer | + +-------------+----------------------------------+ + +4. Create the Load-balancer service API endpoints: + + .. code-block:: console + + $ openstack endpoint create --region RegionOne \ + load-balancer public http://controller:9876 + +--------------+----------------------------------+ + | Field | Value | + +--------------+----------------------------------+ + | enabled | True | + | id | 47cf883de46242c39f147c52f2958ebf | + | interface | public | + | region | RegionOne | + | region_id | RegionOne | + | service_id | d854f6fff0a64f77bda8003c8dedfada | + | service_name | octavia | + | service_type | load-balancer | + | url | http://controller:9876 | + +--------------+----------------------------------+ + + $ openstack endpoint create --region RegionOne \ + load-balancer internal http://controller:9876 + +--------------+----------------------------------+ + | Field | Value | + +--------------+----------------------------------+ + | enabled | True | + | id | 225aef8465ef4df48a341aaaf2b0a390 | + | interface | internal | + | region | RegionOne | + | region_id | RegionOne | + | service_id | d854f6fff0a64f77bda8003c8dedfada | + | service_name | octavia | + | service_type | load-balancer | + | url | http://controller:9876 | + +--------------+----------------------------------+ + + $ openstack endpoint create --region RegionOne \ + load-balancer admin http://controller:9876 + +--------------+----------------------------------+ + | Field | Value | + +--------------+----------------------------------+ + | enabled | True | + | id | 375eb5057fb546edbdf3ee4866179672 | + | interface | admin | + | region | RegionOne | + | region_id | RegionOne | + | service_id | d854f6fff0a64f77bda8003c8dedfada | + | service_name | octavia | + | service_type | load-balancer | + | url | http://controller:9876 | + +--------------+----------------------------------+ + +5. Create octavia-openrc file + + .. code-block:: console + + cat << EOF >> $HOME/octavia-openrc + export OS_PROJECT_DOMAIN_NAME=Default + export OS_USER_DOMAIN_NAME=Default + export OS_PROJECT_NAME=service + export OS_USERNAME=octavia + export OS_PASSWORD=OCTAVIA_PASS + export OS_AUTH_URL=http://controller:5000 + export OS_IDENTITY_API_VERSION=3 + export OS_IMAGE_API_VERSION=2 + export OS_VOLUME_API_VERSION=3 + EOF + + Replace OCTAVIA_PASS with the password you chose for the octavia user in + the Identity service. + +6. Source the ``octavia`` credentials to gain access to octavia CLI commands: + + .. code-block:: console + + $ . $HOME/octavia-openrc + +7. Create the amphora image + + For creating amphora image, please refer to the `Building Octavia Amphora Images `_. + +8. Upload the amphora image + + .. code-block:: console + + $ openstack image create --disk-format qcow2 --container-format bare \ + --private --tag amphora \ + --file amphora-x64-haproxy + +9. Create a flavor for the amphora image + + .. code-block:: console + + $ openstack flavor create --id 200 --vcpus 1 --ram 1024 \ + --disk 2 "amphora" --private + +Install and configure components +-------------------------------- + +1. Install the packages: + + .. code-block:: console + + # apt install octavia-api octavia-health-manager octavia-housekeeping \ + octavia-worker python3-octavia python3-octaviaclient + + If octavia-common and octavia-api packages ask you to configure, choose No. + +2. Create the certificates + + .. code-block:: console + + $ git clone https://opendev.org/openstack/octavia.git + $ cd octavia/bin/ + $ source create_dual_intermediate_CA.sh + $ sudo mkdir -p /etc/octavia/certs/private + $ sudo chmod 755 /etc/octavia -R + $ sudo cp -p etc/octavia/certs/server_ca.cert.pem /etc/octavia/certs + $ sudo cp -p etc/octavia/certs/server_ca-chain.cert.pem /etc/octavia/certs + $ sudo cp -p etc/octavia/certs/server_ca.key.pem /etc/octavia/certs/private + $ sudo cp -p etc/octavia/certs/client_ca.cert.pem /etc/octavia/certs + $ sudo cp -p etc/octavia/certs/client.cert-and-key.pem /etc/octavia/certs/private + + For the production environment, Please refer to the `Octavia Certificate Configuration Guide `_. + +3. Source the ``octavia`` credentials to gain access to octavia CLI commands: + + .. code-block:: console + + $ . octavia-openrc + +4. Create security groups and their rules + + .. code-block:: console + + $ openstack security group create lb-mgmt-sec-grp + $ openstack security group rule create --protocol icmp lb-mgmt-sec-grp + $ openstack security group rule create --protocol tcp --dst-port 22 lb-mgmt-sec-grp + $ openstack security group rule create --protocol tcp --dst-port 9443 lb-mgmt-sec-grp + $ openstack security group create lb-health-mgr-sec-grp + $ openstack security group rule create --protocol udp --dst-port 5555 lb-health-mgr-sec-grp + +5. Create a key pair for logging in to the amphora instance + + .. code-block:: console + + $ openstack keypair create --public-key ~/.ssh/id_rsa.pub mykey + + .. note:: + + Check whether " ~/.ssh/id_rsa.pub" file exists or not in advance. + If the file does not exist, run the ssh-keygen command to create it. + +6. Create dhclient.conf file for dhclient + + .. code-block:: console + + $ cd $HOME + $ sudo mkdir -m755 -p /etc/dhcp/octavia + $ sudo cp octavia/etc/dhcp/dhclient.conf /etc/dhcp/octavia + +7. Create a network + + .. note:: + During the execution of the below command, please save the of + BRNAME and MGMT_PORT_MAC in a notepad for further reference. + + .. code-block:: console + + $ OCTAVIA_MGMT_SUBNET=172.16.0.0/12 + $ OCTAVIA_MGMT_SUBNET_START=172.16.0.100 + $ OCTAVIA_MGMT_SUBNET_END=172.16.31.254 + $ OCTAVIA_MGMT_PORT_IP=172.16.0.2 + + $ openstack network create lb-mgmt-net + $ openstack subnet create --subnet-range $OCTAVIA_MGMT_SUBNET --allocation-pool \ + start=$OCTAVIA_MGMT_SUBNET_START,end=$OCTAVIA_MGMT_SUBNET_END \ + --network lb-mgmt-net lb-mgmt-subnet + + $ SUBNET_ID=$(openstack subnet show lb-mgmt-subnet -f value -c id) + $ PORT_FIXED_IP="--fixed-ip subnet=$SUBNET_ID,ip-address=$OCTAVIA_MGMT_PORT_IP" + + $ MGMT_PORT_ID=$(openstack port create --security-group \ + lb-health-mgr-sec-grp --device-owner Octavia:health-mgr \ + --host=$(hostname) -c id -f value --network lb-mgmt-net \ + $PORT_FIXED_IP octavia-health-manager-listen-port) + + $ MGMT_PORT_MAC=$(openstack port show -c mac_address -f value \ + $MGMT_PORT_ID) + + $ sudo ip link add o-hm0 type veth peer name o-bhm0 + $ NETID=$(openstack network show lb-mgmt-net -c id -f value) + $ BRNAME=brq$(echo $NETID|cut -c 1-11) + $ sudo brctl addif $BRNAME o-bhm0 + $ sudo ip link set o-bhm0 up + + $ sudo ip link set dev o-hm0 address $MGMT_PORT_MAC + $ sudo iptables -I INPUT -i o-hm0 -p udp --dport 5555 -j ACCEPT + $ sudo dhclient -v o-hm0 -cf /etc/dhcp/octavia + +8. Below settings are required to create veth pair after the host reboot + + Edit the ``/etc/systemd/network/o-hm0.network`` file + + .. code-block:: ini + + [Match] + Name=o-hm0 + + [Network] + DHCP=yes + + Edit the ``/etc/systemd/system/octavia-interface.service`` file + + .. code-block:: ini + + [Unit] + Description=Octavia Interface Creator + Requires=neutron-linuxbridge-agent.service + After=neutron-linuxbridge-agent.service + + [Service] + Type=oneshot + RemainAfterExit=true + ExecStart=/opt/octavia-interface.sh start + ExecStop=/opt/octavia-interface.sh stop + + [Install] + WantedBy=multi-user.target + + Edit the ``/opt/octavia-interface.sh`` file + + .. code-block:: console + + #!/bin/bash + + set -ex + + MAC=$MGMT_PORT_MAC + BRNAME=$BRNAME + + if [ "$1" == "start" ]; then + ip link add o-hm0 type veth peer name o-bhm0 + brctl addif $BRNAME o-bhm0 + ip link set o-bhm0 up + ip link set dev o-hm0 address $MAC + ip link set o-hm0 up + iptables -I INPUT -i o-hm0 -p udp --dport 5555 -j ACCEPT + elif [ "$1" == "stop" ]; then + ip link del o-hm0 + else + brctl show $BRNAME + ip a s dev o-hm0 + fi + + You need to substitute $MGMT_PORT_MAC and $BRNAME for the values in your environment. + +9. Edit the ``/etc/octavia/octavia.conf`` file + + * In the ``[database]`` section, configure database access: + + .. code-block:: ini + + [database] + connection = mysql+pymysql://octavia:OCTAVIA_DBPASS@controller/octavia + + Replace OCTAVIA_DBPASS with the password you chose for the Octavia databases. + + * In the ``[DEFAULT]`` section, configure the transport url for RabbitMQ message broker. + + .. code-block:: ini + + [DEFAULT] + transport_url = rabbit://openstack:RABBIT_PASS@controller + + Replace RABBIT_PASS with the password you chose for the openstack account in RabbitMQ. + + * In the ``[oslo_messaging]`` section, configure the transport url for RabbitMQ message broker and topic name. + + .. code-block:: ini + + [oslo_messaging] + ... + topic = octavia_prov + + Replace RABBIT_PASS with the password you chose for the openstack account in RabbitMQ. + + * In the ``[api_settings]`` section, configure the host IP and port to bind to. + + .. code-block:: ini + + [api_settings] + bind_host = 0.0.0.0 + bind_port = 9876 + + * In the ``[keystone_authtoken]`` section, configure Identity service access. + + .. code-block:: ini + + [keystone_authtoken] + www_authenticate_uri = http://controller:5000 + auth_url = http://controller:5000 + memcached_servers = controller:11211 + auth_type = password + project_domain_name = Default + user_domain_name = Default + project_name = service + username = octavia + password = OCTAVIA_PASS + + Replace OCTAVIA_PASS with the password you chose for the octavia user in + the Identity service. + + * In the ``[service_auth]`` section, configure credentials for using other openstack services + + .. code-block:: ini + + [service_auth] + auth_url = http://controller:5000 + memcached_servers = controller:11211 + auth_type = password + project_domain_name = Default + user_domain_name = Default + project_name = service + username = octavia + password = OCTAVIA_PASS + + Replace OCTAVIA_PASS with the password you chose for the octavia user in + the Identity service. + + * In the ``[certificates]`` section, configure the absolute path to the CA Certificate, the Private Key for signing, and passphrases. + + .. code-block:: ini + + [certificates] + ... + server_certs_key_passphrase = insecure-key-do-not-use-this-key + ca_private_key_passphrase = not-secure-passphrase + ca_private_key = /etc/octavia/certs/private/server_ca.key.pem + ca_certificate = /etc/octavia/certs/server_ca.cert.pem + + .. note:: + + The values of ca_private_key_passphrase and server_certs_key_passphrase are default and should not be used in production. + The server_certs_key_passphrase must be a base64 compatible and 32 characters long string. + + * In the ``[haproxy_amphora]`` section, configure the client certificate and the CA. + + .. code-block:: ini + + [haproxy_amphora] + ... + server_ca = /etc/octavia/certs/server_ca-chain.cert.pem + client_cert = /etc/octavia/certs/private/client.cert-and-key.pem + + * In the ``[health_manager]`` section, configure the IP and port number for heartbeat. + + .. code-block:: ini + + [health_manager] + ... + bind_port = 5555 + bind_ip = 172.16.0.2 + controller_ip_port_list = 172.16.0.2:5555 + + * In the ``[controller_worker]`` section, configure worker settings. + + .. code-block:: ini + + [controller_worker] + ... + amp_image_owner_id = + amp_image_tag = amphora + amp_ssh_key_name = mykey + amp_secgroup_list = + amp_boot_network_list = + amp_flavor_id = 200 + network_driver = allowed_address_pairs_driver + compute_driver = compute_nova_driver + amphora_driver = amphora_haproxy_rest_driver + client_ca = /etc/octavia/certs/client_ca.cert.pem + +10. Populate the octavia database: + + .. code-block:: console + + # octavia-db-manage --config-file /etc/octavia/octavia.conf upgrade head + +Finalize installation +--------------------- + +Restart the services: + + .. code-block:: console + + # systemctl restart octavia-api octavia-health-manager octavia-housekeeping octavia-worker diff --git a/doc/source/install/install.rst b/doc/source/install/install.rst new file mode 100644 index 0000000000..e1fcedf09b --- /dev/null +++ b/doc/source/install/install.rst @@ -0,0 +1,21 @@ +.. _install: + +Install and configure +~~~~~~~~~~~~~~~~~~~~~ + +This section describes how to install and configure the Load-balancer +service, code-named Octavia, on the controller node. + +This section assumes that you already have a working OpenStack +environment with at least the following components installed: +Identity Service, Image Service, Placement Service, Compute Service, +and Networking Service. + +Note that installation and configuration vary by distribution. + +.. toctree:: + :maxdepth: 2 + + install-ubuntu.rst + install-amphorav2.rst + install-osa.rst diff --git a/doc/source/reference/glossary.rst b/doc/source/reference/glossary.rst new file mode 100644 index 0000000000..8bf9c32a3b --- /dev/null +++ b/doc/source/reference/glossary.rst @@ -0,0 +1,163 @@ +================ +Octavia Glossary +================ +As the Octavia project evolves, it's important that people working on Octavia, +users using Octavia, and operators deploying Octavia use a common set of +terminology in order to avoid misunderstandings and confusion. To that end, we +are providing the following glossary of terms. + +Note also that many of these terms are expanded upon in design documents in +this same repository. What follows is a brief but necessarily incomplete +description of these terms. + +.. glossary:: :sorted: + + Amphora + Virtual machine, container, dedicated hardware, appliance or device + that actually performs the task of load balancing in the Octavia + system. More specifically, an amphora takes requests from clients on + the front-end and distributes these to back-end systems. Amphorae + communicate with their controllers over the LB Network through a driver + interface on the controller. + + Amphora Load Balancer Driver + Component of the controller that does all the communication with + amphorae. Drivers communicate with the controller through a generic + base class and associated methods, and translate these into control + commands appropriate for whatever type of software is running on the + back-end amphora corresponding with the driver. This communication + happens over the LB network. + + Apolocation + Term used to describe when two or more amphorae are not colocated on + the same physical hardware (which is often essential in HA topologies). + May also be used to describe two or more loadbalancers which are not + colocated on the same amphora. + + Controller + Daemon with access to both the LB Network and OpenStack components + which coordinates and manages the overall activity of the Octavia load + balancing system. Controllers will usually use an abstracted driver + interface (usually a base class) for communicating with various other + components in the OpenStack environment in order to facilitate loose + coupling with these other components. These are the "brains" of the + Octavia system. + + HAProxy + Load balancing software used in the reference implementation for + Octavia. (See http://www.haproxy.org/ ). HAProxy processes run on + amphorae and actually accomplish the task of delivering the load + balancing service. + + Health Monitor + An object that defines a check method for each member of the pool. + The health monitor itself is a pure-db object which describes the + method the load balancing software on the amphora should use to + monitor the health of back-end members of the pool with which the + health monitor is associated. + + L7 Policy + Layer 7 Policy + Collection of L7 rules that get logically ANDed together as well as a + routing policy for any given HTTP or terminated HTTPS client requests + which match said rules. An L7 Policy is associated with exactly one + HTTP or terminated HTTPS listener. + + For example, a user could specify an L7 policy that any client request + that matches the L7 rule "request URI starts with '/api'" should + get routed to the "api" pool. + + L7 Rule + Layer 7 Rule + Single logical expression used to match a condition present in a given + HTTP or terminated HTTPS request. L7 rules typically match against + a specific header or part of the URI and are used in conjunction with + L7 policies to accomplish L7 switching. An L7 rule is associated with + exactly one L7 policy. + + For example, a user could specify an L7 rule that matches any request + URI path that begins with "/api" + + L7 Switching + Layer 7 Switching + This is a load balancing feature specific to HTTP or terminated HTTPS + sessions, in which different client requests are routed to different + back-end pools depending on one or more layer 7 policies the user might + configure. + + For example, using L7 switching, a user could specify that any + requests with a URI path that starts with "/api" get routed to the + "api" back-end pool, and that all other requests get routed to the + default pool. + + LB Network + Load Balancer Network. The network over which the controller(s) and + amphorae communicate. The LB network itself will usually be a nova or + neutron network to which both the controller and amphorae have access, + but is not associated with any one tenant. The LB Network is generally + also *not* part of the undercloud and should not be directly exposed to + any OpenStack core components other than the Octavia Controller. + + Listener + Object representing the listening endpoint of a load balanced service. + TCP / UDP port, as well as protocol information and other protocol- + specific details are attributes of the listener. Notably, though, the + IP address is not. + + Load Balancer + Object describing a logical grouping of listeners on one or more VIPs + and associated with one or more amphorae. (Our "Loadbalancer" most + closely resembles a Virtual IP address in other load balancing + implementations.) Whether the load balancer exists on more than one + amphora depends on the topology used. The load balancer is also often + the root object used in various Octavia APIs. + + Load Balancing + The process of taking client requests on a front-end interface and + distributing these to a number of back-end servers according to various + rules. Load balancing allows for many servers to participate in + delivering some kind TCP or UDP service to clients in an effectively + transparent and often highly-available and scalable way (from the + client's perspective). + + Member + Object representing a single back-end server or system that is a + part of a pool. A member is associated with only one pool. + + Octavia + Octavia is an operator-grade open source load balancing solution. Also + known as the Octavia system or Octavia project. The term by itself + should be used to refer to the system as a whole and not any + individual component within the Octavia load balancing system. + + Pool + Object representing the grouping of members to which the listener + forwards client requests. Note that a pool is associated with only + one listener, but a listener might refer to several pools (and switch + between them using layer 7 policies). + + TLS Termination + Transport Layer Security Termination + Type of load balancing protocol where HTTPS sessions are terminated + (decrypted) on the amphora as opposed to encrypted packets being + forwarded on to back-end servers without being decrypted on the + amphora. Also known as SSL termination. The main advantages to this + type of load balancing are that the payload can be read and / or + manipulated by the amphora, and that the expensive tasks of handling + the encryption are off-loaded from the back-end servers. This is + particularly useful if layer 7 switching is employed in the same + listener configuration. + + VIP + Virtual IP Address + Single service IP address which is associated with a load balancer. + This is similar to what is described here: + http://en.wikipedia.org/wiki/Virtual_IP_address + In a highly available load balancing topology in Octavia, the VIP might + be assigned to several amphorae, and a layer-2 protocol like CARP, + VRRP, or HSRP (or something unique to the networking infrastructure) + might be used to maintain its availability. In layer-3 (routed) + topologies, the VIP address might be assigned to an upstream networking + device which routes packets to amphorae, which then load balance + requests to back-end members. + diff --git a/doc/source/reference/index.rst b/doc/source/reference/index.rst new file mode 100644 index 0000000000..2b0df0f291 --- /dev/null +++ b/doc/source/reference/index.rst @@ -0,0 +1,19 @@ +================= +Octavia Reference +================= + +.. toctree:: + :glob: + :maxdepth: 1 + + * + Command Line Interface Reference + +.. only:: html + + Indices and Search + ------------------ + + * :ref:`genindex` + * :ref:`modindex` + * :ref:`search` diff --git a/doc/source/reference/introduction.rst b/doc/source/reference/introduction.rst new file mode 100644 index 0000000000..636d8727ea --- /dev/null +++ b/doc/source/reference/introduction.rst @@ -0,0 +1,159 @@ +.. + Copyright (c) 2016 IBM + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +=================== +Introducing Octavia +=================== +Welcome to Octavia! + +Octavia is an open source, operator-scale load balancing solution designed to +work with OpenStack. + +Octavia was born out of the Neutron LBaaS project. Its conception influenced +the transformation of the Neutron LBaaS project, as Neutron LBaaS moved from +version 1 to version 2. Starting with the Liberty release of OpenStack, Octavia +has become the reference implementation for Neutron LBaaS version 2. + +Octavia accomplishes its delivery of load balancing services by managing a +fleet of virtual machines, containers, or bare metal servers—collectively known +as *amphorae*\— which it spins up on demand. This on-demand, horizontal scaling +feature differentiates Octavia from other load balancing solutions, thereby +making Octavia truly suited "for the cloud". + +Where Octavia fits into the OpenStack ecosystem +----------------------------------------------- +Load balancing is essential for enabling simple or automatic delivery scaling +and availability. In turn, application delivery scaling and availability must +be considered vital features of any cloud. Together, these facts imply that +load balancing is a vital feature of any cloud. + +Therefore, we consider Octavia to be as essential as Nova, Neutron, Glance or +any other "core" project that enables the essential features of a modern +OpenStack cloud. + +In accomplishing its role, Octavia makes use of other OpenStack projects: + +* **Nova** - For managing amphora lifecycle and spinning up compute resources + on demand. +* **Neutron** - For network connectivity between amphorae, tenant environments, + and external networks. +* **Barbican** - For managing TLS certificates and credentials, when TLS + session termination is configured on the amphorae. +* **Keystone** - For authentication against the Octavia API, and for Octavia to + authenticate with other OpenStack projects. +* **Glance** - For storing the amphora virtual machine image. +* **Oslo** - For communication between Octavia controller components, making + Octavia work within the standard OpenStack framework and review system, and + project code structure. +* **Taskflow** - Is technically part of Oslo; however, Octavia makes + extensive use of this job flow system when orchestrating back-end service + configuration and management. + +Octavia is designed to interact with the components listed previously. In each +case, we've taken care to define this interaction through a driver interface. +That way, external components can be swapped out with functionally-equivalent +replacements— without having to restructure major components of Octavia. For +example, if you use an SDN solution other than Neutron in your environment, it +should be possible for you to write an Octavia networking driver for your SDN +environment, which can be a drop-in replacement for the standard Neutron +networking driver in Octavia. + +As of Pike, it is recommended to run Octavia as a standalone load balancing +solution. Neutron LBaaS is deprecated in the Queens release, and Octavia is its +replacement. Whenever possible, operators are **strongly** advised to migrate +to Octavia. For end-users, this transition should be relatively seamless, +because Octavia supports the Neutron LBaaS v2 API and it has a similar CLI +interface. Alternatively, if end-users cannot migrate on their side in the +forseable future, operators could enable the experimental Octavia proxy plugin +in Neutron LBaaS. + +It is also possible to use Octavia as a Neutron LBaaS plugin, in the same way +as any other vendor. You can think of Octavia as an "open source vendor" for +Neutron LBaaS. + +Octavia supports third-party vendor drivers just like Neutron LBaaS, +and fully replaces Neutron LBaaS as the load balancing solution for +OpenStack. + +For further information on OpenStack Neutron LBaaS deprecation, please refer to +https://wiki.openstack.org/wiki/Neutron/LBaaS/Deprecation. + + +Octavia terminology +------------------- +Before you proceed further in this introduction, please note: + +Experience shows that—within the subsegment of the IT industry that creates, +deploys, and uses load balancing devices or services— terminology is often used +inconsistently. To reduce confusion, the Octavia team has created a glossary of +terms as they are defined and used within the context of the Octavia project +and Neutron LBaaS version 2. This glossary is available here: :doc:`glossary` + +If you are familiar with Neutron LBaaS version 1 terms and usage, it is +especially important for you to understand how the meanings of the terms "VIP," +"load balancer," and "load balancing," have changed in Neutron LBaaS version 2. + +Our use of these terms should remain consistent with the :doc:`glossary` +throughout Octavia's documentation, in discussions held by Octavia team members +on public mailing lists, in IRC channels, and at conferences. To avoid +misunderstandings, it's a good idea to familiarize yourself with these glossary +definitions. + + +A 10,000-foot overview of Octavia components +-------------------------------------------- +.. image:: octavia-component-overview.svg + :width: 660px + :alt: Octavia Component Overview + +Octavia version 4.0 consists of the following major components: + +* **amphorae** - Amphorae are the individual virtual machines, containers, or + bare metal servers that accomplish the delivery of load balancing services to + tenant application environments. In Octavia version 0.8, the reference + implementation of the amphorae image is an Ubuntu virtual machine running + HAProxy. + +* **controller** - The Controller is the "brains" of Octavia. It consists of + five sub-components, which are individual daemons. They can be run on + separate back-end infrastructure if desired: + + * **API Controller** - As the name implies, this subcomponent runs Octavia's + API. It takes API requests, performs simple sanitizing on them, and ships + them off to the controller worker over the Oslo messaging bus. + + * **Controller Worker** - This subcomponent takes sanitized API commands from + the API controller and performs the actions necessary to fulfill the API + request. + + * **Health Manager** - This subcomponent monitors individual amphorae to + ensure they are up and running, and otherwise healthy. It also handles + failover events if amphorae fail unexpectedly. + + * **Housekeeping Manager** - This subcomponent cleans up stale (deleted) + database records and manages amphora certificate rotation. + + * **Driver Agent** - The driver agent receives status and statistics updates + from provider drivers. + +* **network** - Octavia cannot accomplish what it does without manipulating + the network environment. Amphorae are spun up with a network interface on the + "load balancer network," and they may also plug directly into tenant networks + to reach back-end pool members, depending on how any given load balancing + service is deployed by the tenant. + +For a more complete description of Octavia's components, please see the +:doc:`../contributor/design/version0.5/component-design` document within this +documentation repository. diff --git a/doc/source/reference/octavia-component-overview.svg b/doc/source/reference/octavia-component-overview.svg new file mode 100644 index 0000000000..696ae67863 --- /dev/null +++ b/doc/source/reference/octavia-component-overview.svg @@ -0,0 +1,291 @@ + + + + + + + + + + + + + + + + + + + + + + Page-1 + + Parallelogram.1023 + + + + Rectangle + Controller Worker Driver + + Controller Worker Driver + + Square + Certificate Driver + + Certificate Driver + + Square.3 + Compute Driver + + Compute Driver + + Square.4 + Network Driver + + Network Driver + + Square.5 + Amphora Driver + + Amphora Driver + + Ellipse.6 + Neutron + + Neutron + + Ellipse.5 + Nova + + Nova + + Ellipse.69 + Barbican / Castellan + + Barbican / Castellan + + Ellipse.3 + Oslo Messaging + + Oslo Messaging + + Dynamic connector.1006 + + + + Dynamic connector.1007 + + + + Dynamic connector.1008 + + + + Dynamic connector.1009 + + + + Dynamic connector.1010 + + + + Sheet.1011 + + Square.9 + Octavia API + + Octavia API + + Sheet.15 + + + + + + + Sheet.1012 + + Square.6 + Octavia Worker + + Octavia Worker + + Sheet.16 + + + + + + + Sheet.1013 + + Square.7 + Health Manager + + Health Manager + + Sheet.17 + + + + + + + Sheet.1014 + + Square.8 + Housekeeping Manager + + Housekeeping Manager + + Sheet.18 + + + + + + + Dynamic connector.1016 + + + + Dynamic connector.1017 + + + + Dynamic connector.1018 + + + + Can.1019 + Database + + Sheet.1020 + + + + + Database + + + Parallelogram.1021 + + + + Parallelogram.1022 + Amphora + + Amphora + + Sheet.1054 + + Dynamic connector.1050 + + + + Dynamic connector.1051 + + + + + Sheet.1056 + Amphora Driver + + Amphora Driver + + Sheet.1057 + + Square.7 + Driver Agent + + Driver Agent + + Sheet.19 + + + + + + + Dynamic connector.1059 + + + + Dynamic connector + + + + diff --git a/doc/source/user/feature-classification/feature-matrix-healthmonitor.ini b/doc/source/user/feature-classification/feature-matrix-healthmonitor.ini new file mode 100644 index 0000000000..2b2abc3a1a --- /dev/null +++ b/doc/source/user/feature-classification/feature-matrix-healthmonitor.ini @@ -0,0 +1,176 @@ +# Copyright (c) 2019 Red Hat, Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +[driver.amphora] +title=Amphora Provider +link=https://docs.openstack.org/api-ref/load-balancer/v2/index.html + +[driver.ovn] +title=OVN Provider +link=https://docs.openstack.org/ovn-octavia-provider/latest/admin/driver.html + +# Note: These should be in the order they appear in a create call. + +[operation.admin_state_up] +title=admin_state_up +status=mandatory +notes=Enables and disables the health monitor. +cli=openstack loadbalancer healthmonitor create [--enable | --disable] +driver.amphora=complete +driver.ovn=complete + +[operation.delay] +title=delay +status=mandatory +notes=The time, in seconds, between sending probes to members. +cli=openstack loadbalancer healthmonitor create --delay +driver.amphora=complete +driver.ovn=complete + +[operation.domain_name] +title=domain_name +status=optional +notes=The domain name, which be injected into the HTTP Host Header to the backend server for HTTP health check. +cli=openstack loadbalancer healthmonitor create [--domain-name ] +driver.amphora=complete +driver.ovn=missing + +[operation.expected_codes] +title=expected_codes +status=optional +notes=The list of HTTP status codes expected in response from the member to declare it healthy. +cli=openstack loadbalancer healthmonitor create [--expected-codes ] +driver.amphora=complete +driver.ovn=missing + +[operation.http_method] +title=http_method +status=optional +notes=The HTTP method that the health monitor uses for requests. +cli=openstack loadbalancer healthmonitor create [--http-method ] +driver.amphora=complete +driver.ovn=missing + +[operation.http_version] +title=http_version +status=optional +notes=The HTTP version to use for health checks. +cli=openstack loadbalancer healthmonitor create [[--http-version ]] +driver.amphora=complete +driver.ovn=missing + +[operation.name] +title=name +status=optional +notes=The name of the health monitor. Provided by the Octavia API service. +cli=openstack loadbalancer healthmonitor create [--name ] +driver.amphora=complete +driver.ovn=complete + +[operation.max_retries] +title=max_retries +status=mandatory +notes=The number of successful checks before changing the operating status of the member to ONLINE. +cli=openstack loadbalancer healthmonitor create --max-retries +driver.amphora=complete +driver.ovn=complete + +[operation.max_retries_down] +title=max_retries_down +status=optional +notes=The number of allowed check failures before changing the operating status of the member to ERROR. +cli=openstack loadbalancer healthmonitor create [--max-retries-down ] +driver.amphora=complete +driver.ovn=complete + +[operation.tags] +title=tags +status=optional +notes=The tags for the health monitor. Provided by the Octavia API service. +cli=openstack loadbalancer healthmonitor create [--tag ] +driver.amphora=complete +driver.ovn=complete + +[operation.timeout] +title=timeout +status=mandatory +notes=The maximum time, in seconds, that a monitor waits to connect before it times out. +cli=openstack loadbalancer healthmonitor create --timeout +driver.amphora=complete +driver.ovn=complete + +[operation.type.HTTP] +title=type - HTTP +status=optional +notes=Use HTTP for the health monitor. +cli=openstack loadbalancer healthmonitor create --type HTTP +driver.amphora=complete +driver.ovn=missing + +[operation.type.HTTPS] +title=type - HTTPS +status=optional +notes=Use HTTPS for the health monitor. +cli=openstack loadbalancer healthmonitor create --type HTTPS +driver.amphora=complete +driver.ovn=missing + +[operation.type.PING] +title=type - PING +status=optional +notes=Use PING for the health monitor. +cli=openstack loadbalancer healthmonitor create --type PING +driver.amphora=partial +driver-notes.amphora=CentOS 7 based amphora do not support PING health monitors. +driver.ovn=missing + +[operation.type.TCP] +title=type - TCP +status=optional +notes=Use TCP for the health monitor. +cli=openstack loadbalancer healthmonitor create --type TCP +driver.amphora=complete +driver.ovn=complete + +[operation.type.TLS-HELLO] +title=type - TLS-HELLO +status=optional +notes=Use TLS-HELLO handshake for the health monitor. +cli=openstack loadbalancer healthmonitor create --type TLS-HELLO +driver.amphora=complete +driver.ovn=missing + +[operation.type.UDP-CONNECT] +title=type - UDP-CONNECT +status=optional +notes=Use UDP-CONNECT for the health monitor. +cli=openstack loadbalancer healthmonitor create --type UDP-CONNECT +driver.amphora=complete +driver.ovn=complete + +[operation.type.SCTP] +title=type - SCTP +status=optional +notes=Use SCTP for the health monitor. +cli=openstack loadbalancer healthmonitor create --type SCTP +driver.amphora=complete +driver.ovn=missing + +[operation.url_path] +title=url_path +status=optional +notes=The HTTP URL path of the request sent by the monitor to test the health of a backend member. +cli=openstack loadbalancer healthmonitor create [--url-path ] +driver.amphora=complete +driver.ovn=missing diff --git a/doc/source/user/feature-classification/feature-matrix-l7policy.ini b/doc/source/user/feature-classification/feature-matrix-l7policy.ini new file mode 100644 index 0000000000..9b6685d598 --- /dev/null +++ b/doc/source/user/feature-classification/feature-matrix-l7policy.ini @@ -0,0 +1,127 @@ +# Copyright (c) 2019 Red Hat, Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +[driver.amphora] +title=Amphora Provider +link=https://docs.openstack.org/api-ref/load-balancer/v2/index.html + +[driver.ovn] +title=OVN Provider +link=https://docs.openstack.org/ovn-octavia-provider/latest/admin/driver.html + +# Note: These should be in the order they appear in a create call. + +[operation.action.REDIRECT_TO_POOL] +title=action - REDIRECT_TO_POOL +status=optional +notes=The L7 policy action REDIRECT_TO_POOL. +cli=openstack loadbalancer l7policy create --action REDIRECT_TO_POOL +driver.amphora=complete +driver.ovn=missing + +[operation.action.REDIRECT_TO_PREFIX] +title=action - REDIRECT_TO_PREFIX +status=optional +notes=The L7 policy action REDIRECT_TO_PREFIX. +cli=openstack loadbalancer l7policy create --action REDIRECT_TO_PREFIX +driver.amphora=complete +driver.ovn=missing + +[operation.action.REDIRECT_TO_URL] +title=action - REDIRECT_TO_URL +status=optional +notes=The L7 policy action REDIRECT_TO_URL. +cli=openstack loadbalancer l7policy create --action REDIRECT_TO_URL +driver.amphora=complete +driver.ovn=missing + +[operation.action.REJECT] +title=action - REJECT +status=optional +notes=The L7 policy action REJECT. +cli=openstack loadbalancer l7policy create --action REJECT +driver.amphora=complete +driver.ovn=missing + +[operation.admin_state_up] +title=admin_state_up +status=mandatory +notes=Enables and disables the L7 policy. +cli=openstack loadbalancer l7policy create [--enable | --disable] +driver.amphora=complete +driver.ovn=missing + +[operation.description] +title=description +status=optional +notes=The description of the L7 policy. Provided by the Octavia API service. +cli=openstack loadbalancer l7policy create [--description ] +driver.amphora=complete +driver.ovn=missing + +[operation.name] +title=name +status=optional +notes=The name of the L7 policy. Provided by the Octavia API service. +cli=openstack loadbalancer l7policy create [--name ] +driver.amphora=complete +driver.ovn=missing + +[operation.position] +title=position +status=optional +notes=The position of this policy on the listener. +cli=openstack loadbalancer l7policy create [--position ] +driver.amphora=complete +driver.ovn=missing + +[operation.redirect_http_code] +title=redirect_http_code +status=optional +notes=Requests matching this policy will be redirected to the specified URL or Prefix URL with the HTTP response code. +cli=openstack loadbalancer l7policy create [--redirect-http-code ] +driver.amphora=complete +driver.ovn=missing + +[operation.redirect_pool_id] +title=redirect_pool_id +status=optional +notes=Requests matching this policy will be redirected to the pool with this ID. +cli=openstack loadbalancer l7policy create [--redirect-pool ] +driver.amphora=complete +driver.ovn=missing + +[operation.redirect_prefix] +title=redirect_prefix +status=optional +notes=Requests matching this policy will be redirected to this Prefix URL. +cli=openstack loadbalancer l7policy create [--redirect-prefix ] +driver.amphora=complete +driver.ovn=missing + +[operation.redirect_url] +title=redirect_url +status=optional +notes=Requests matching this policy will be redirected to this URL. +cli=openstack loadbalancer l7policy create [--redirect-url ] +driver.amphora=complete +driver.ovn=missing + +[operation.tags] +title=tags +status=optional +notes=The tags for the L7 policy. Provided by the Octavia API service. +cli=openstack loadbalancer l7policy create [--tag ] +driver.amphora=complete +driver.ovn=missing diff --git a/doc/source/user/feature-classification/feature-matrix-l7rule.ini b/doc/source/user/feature-classification/feature-matrix-l7rule.ini new file mode 100644 index 0000000000..df782d7cc0 --- /dev/null +++ b/doc/source/user/feature-classification/feature-matrix-l7rule.ini @@ -0,0 +1,167 @@ +# Copyright (c) 2019 Red Hat, Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +[driver.amphora] +title=Amphora Provider +link=https://docs.openstack.org/api-ref/load-balancer/v2/index.html + +[driver.ovn] +title=OVN Provider +link=https://docs.openstack.org/ovn-octavia-provider/latest/admin/driver.html + +# Note: These should be in the order they appear in a create call. + +[operation.admin_state_up] +title=admin_state_up +status=mandatory +notes=Enables and disables the L7 rule. +cli=openstack loadbalancer l7rule create [--enable | --disable] +driver.amphora=complete +driver.ovn=missing + +[operation.compare_type.CONTAINS] +title=compare_type - CONTAINS +status=mandatory +notes=The CONTAINS comparison type for the L7 rule. +cli=openstack loadbalancer l7rule create --compare-type CONTAINS +driver.amphora=complete +driver.ovn=missing + +[operation.compare_type.ENDS_WITH] +title=compare_type - ENDS_WITH +status=mandatory +notes=The ENDS_WITH comparison type for the L7 rule. +cli=openstack loadbalancer l7rule create --compare-type ENDS_WITH +driver.amphora=complete +driver.ovn=missing + +[operation.compare_type.EQUAL_TO] +title=compare_type - EQUAL_TO +status=mandatory +notes=The EQUAL_TO comparison type for the L7 rule. +cli=openstack loadbalancer l7rule create --compare-type EQUAL_TO +driver.amphora=complete +driver.ovn=missing + +[operation.compare_type.REGEX] +title=compare_type - REGEX +status=mandatory +notes=The REGEX comparison type for the L7 rule. +cli=openstack loadbalancer l7rule create --compare-type REGEX +driver.amphora=complete +driver.ovn=missing + +[operation.compare_type.STARTS_WITH] +title=compare_type - STARTS_WITH +status=mandatory +notes=The STARTS_WITH comparison type for the L7 rule. +cli=openstack loadbalancer l7rule create --compare-type STARTS_WITH +driver.amphora=complete +driver.ovn=missing + +[operation.invert] +title=invert +status=optional +notes=When true the logic of the rule is inverted. +cli=openstack loadbalancer l7rule create [--invert] +driver.amphora=complete +driver.ovn=missing + +[operation.key] +title=key +status=optional +notes=The key to use for the comparison. +cli=openstack loadbalancer l7rule create [--key ] +driver.amphora=complete +driver.ovn=missing + +[operation.tags] +title=tags +status=optional +notes=The tags for the L7 rule. Provided by the Octavia API service. +cli=openstack loadbalancer l7rule create [--tag ] +driver.amphora=complete +driver.ovn=missing + +[operation.type.COOKIE] +title=type - COOKIE +status=optional +notes=The COOKIE L7 rule type. +cli=openstack loadbalancer l7rule create --type COOKIE +driver.amphora=complete +driver.ovn=missing + +[operation.type.FILE_TYPE] +title=type - FILE_TYPE +status=optional +notes=The FILE_TYPE L7 rule type. +cli=openstack loadbalancer l7rule create --type FILE_TYPE +driver.amphora=complete +driver.ovn=missing + +[operation.type.HEADER] +title=type - HEADER +status=optional +notes=The HEADER L7 rule type. +cli=openstack loadbalancer l7rule create --type HEADER +driver.amphora=complete +driver.ovn=missing + +[operation.type.HOST_NAME] +title=type - HOST_NAME +status=optional +notes=The HOST_NAME L7 rule type. +cli=openstack loadbalancer l7rule create --type HOST_NAME +driver.amphora=complete +driver.ovn=missing + +[operation.type.PATH] +title=type - PATH +status=optional +notes=The PATH L7 rule type. +cli=openstack loadbalancer l7rule create --type PATH +driver.amphora=complete +driver.ovn=missing + +[operation.type.SSL_CONN_HAS_CERT] +title=type - SSL_CONN_HAS_CERT +status=optional +notes=The SSL_CONN_HAS_CERT L7 rule type. +cli=openstack loadbalancer l7rule create --type SSL_CONN_HAS_CERT +driver.amphora=complete +driver.ovn=missing + +[operation.type.SSL_VERIFY_RESULT] +title=type - SSL_VERIFY_RESULT +status=optional +notes=The SSL_VERIFY_RESULT L7 rule type. +cli=openstack loadbalancer l7rule create --type SSL_VERIFY_RESULT +driver.amphora=complete +driver.ovn=missing + +[operation.type.SSL_DN_FIELD] +title=type - SSL_DN_FIELD +status=optional +notes=The SSL_DN_FIELD L7 rule type. +cli=openstack loadbalancer l7rule create --type SSL_DN_FIELD +driver.amphora=complete +driver.ovn=missing + +[operation.value] +title=value +status=mandatory +notes=The value to use for the comparison. +cli=openstack loadbalancer l7rule create --value +driver.amphora=complete +driver.ovn=missing diff --git a/doc/source/user/feature-classification/feature-matrix-lb.ini b/doc/source/user/feature-classification/feature-matrix-lb.ini new file mode 100644 index 0000000000..289a0393d1 --- /dev/null +++ b/doc/source/user/feature-classification/feature-matrix-lb.ini @@ -0,0 +1,138 @@ +# Copyright (c) 2019 Red Hat, Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +[driver.amphora] +title=Amphora Provider +link=https://docs.openstack.org/api-ref/load-balancer/v2/index.html + +[driver.ovn] +title=OVN Provider +link=https://docs.openstack.org/ovn-octavia-provider/latest/admin/driver.html + +# Note: These should be in the order they appear in a create call. + +[operation.additional_vips] +title=additional_vips +status=optional +notes=Additional VIPs to the primary one. Every additional VIP shall include the ``subnet_id`` as mandatory and the ``ip_address`` as optional. Additional VIP subnets must all belong to the same network as the primary VIP. +cli=openstack loadbalancer create [--additional-vip ] +driver.amphora=complete +driver.ovn=complete + +[operation.admin_state_up] +title=admin_state_up +status=mandatory +notes=Enables and disables the load balancer. +cli=openstack loadbalancer create [--enable | --disable] +driver.amphora=complete +driver.ovn=complete + +[operation.availability_zone] +title=availability_zone +status=optional +notes=The availability zone to deploy the load balancer into. +cli=openstack loadbalancer create [--availability-zone ] +driver.amphora=complete +driver.ovn=missing + +[operation.description] +title=description +status=optional +notes=The description of the load balancer. Provided by the Octavia API service. +cli=openstack loadbalancer create [--description ] +driver.amphora=complete +driver.ovn=complete + +[operation.flavor] +title=flavor +status=optional +notes=The flavor of the load balancer. +cli=openstack loadbalancer create [--flavor ] +driver.amphora=complete +driver.ovn=missing + +[operation.name] +title=name +status=optional +notes=The name of the load balancer. Provided by the Octavia API service. +cli=openstack loadbalancer create [--name ] +driver.amphora=complete +driver.ovn=complete + +[operation.stats] +title=Load Balancer statistics +status=mandatory +notes=The ability to show statistics for a load balancer. +cli=openstack loadbalancer stats show +driver.amphora=complete +driver.ovn=missing + +[operation.status] +title=Load Balancer status tree +status=mandatory +notes=The ability to show a status tree for the load balancer. +cli=openstack loadbalancer status show +driver.amphora=complete +driver.ovn=complete + +[operation.tags] +title=tags +status=optional +notes=The tags for the load balancer. Provided by the Octavia API service. +cli=openstack loadbalancer create [--tag ] +driver.amphora=complete +driver.ovn=complete + +[operation.vip_address] +title=vip_address +status=optional +cli=openstack loadbalancer create [--vip-address ] +driver.amphora=complete +driver.ovn=complete + +[operation.vip_network_id] +title=vip_network_id +status=optional +cli=openstack loadbalancer create [--vip-network-id ] +driver.amphora=complete +driver.ovn=complete + +[operation.vip_port_id] +title=vip_port_id +status=optional +cli=openstack loadbalancer create [--vip-port-id ] +driver.amphora=complete +driver.ovn=complete + +[operation.vip_qos_policy_id] +title=vip_qos_policy_id +status=optional +cli=openstack loadbalancer create [--vip-qos-policy-id ] +driver.amphora=complete +driver.ovn=complete + +[operation.vip_subnet_id] +title=vip_subnet_id +status=optional +cli=openstack loadbalancer create [--vip-subnet-id ] +driver.amphora=complete +driver.ovn=complete + +[operation.vip_sg_id] +title=vip_sg_id +status=optional +note=Optional Security Group of the VIP port (can be set multiple times). +cli=openstack loadbalancer create [--vip-sg-id ] +driver.amphora=complete +driver.ovn=missing diff --git a/doc/source/user/feature-classification/feature-matrix-listener.ini b/doc/source/user/feature-classification/feature-matrix-listener.ini new file mode 100644 index 0000000000..1d152e384d --- /dev/null +++ b/doc/source/user/feature-classification/feature-matrix-listener.ini @@ -0,0 +1,327 @@ +# Copyright (c) 2019 Red Hat, Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +[driver.amphora] +title=Amphora Provider +link=https://docs.openstack.org/api-ref/load-balancer/v2/index.html + +[driver.ovn] +title=OVN Provider +link=https://docs.openstack.org/ovn-octavia-provider/latest/admin/driver.html + +# Note: These should be in the order they appear in a create call. + +[operation.admin_state_up] +title=admin_state_up +status=mandatory +notes=Enables and disables the listener. +cli=openstack loadbalancer listener create [--enable | --disable] +driver.amphora=complete +driver.ovn=complete + +[operation.allowed_cidr] +title=allowed_cidr +status=optional +notes=CIDR to allow access to the listener (can be set multiple times). +cli=openstack loadbalancer listener create [--allowed-cidr ] +driver.amphora=complete +driver.ovn=missing + +[operation.alpn_protocol] +title=alpn_protocol +status=optional +notes=List of accepted ALPN protocols (can be set multiple times). +cli=openstack loadbalancer listener create [--alpn-protocol ] +driver.amphora=complete +driver.ovn=missing + +[operation.client_authentication] +title=client_authentication +status=optional +notes=The TLS client authentication mode. +cli=openstack loadbalancer listener create [--client-authentication {NONE,OPTIONAL,MANDATORY}] +driver.amphora=complete +driver.ovn=missing + +[operation.client_ca_tls_container_ref] +title=client_ca_tls_container_ref +status=optional +notes=The ref of the key manager service secret containing a PEM format client CA certificate bundle for TERMINATED_TLS listeners. +cli=openstack loadbalancer listener create [--client-ca-tls-container-ref ] +driver.amphora=complete +driver.ovn=missing + +[operation.client_crl_container_ref] +title=client_crl_container_ref +status=optional +notes=The URI of the key manager service secret containing a PEM format CA revocation list file for TERMINATED_TLS listeners. +cli=openstack loadbalancer listener create [--client-crl-container-ref ] +driver.amphora=complete +driver.ovn=missing + +[operation.connection_limit] +title=connection_limit +status=optional +notes=The maximum number of connections permitted for this listener. +cli=openstack loadbalancer listener create [--connection-limit ] +driver.amphora=complete +driver.ovn=missing + +[operation.default_tls_container_ref] +title=default_tls_container_ref +status=optional +notes=The URI of the key manager service secret containing a PKCS12 format certificate/key bundle for TERMINATED_TLS listeners. +cli=openstack loadbalancer listener create [--default-tls-container-ref ] +driver.amphora=complete +driver.ovn=missing + +[operation.description] +title=description +status=optional +notes=The description of the listener. Provided by the Octavia API service. +cli=openstack loadbalancer listener create [--description ] +driver.amphora=complete +driver.ovn=complete + +[operation.insert_headers.X-Forwarded-For] +title=insert_headers - X-Forwarded-For +status=optional +notes=When “true” a X-Forwarded-For header is inserted into the request to the backend member that specifies the client IP address. +cli=openstack loadbalancer listener create [--insert-headers X-Forwarded-For=true] +driver.amphora=complete +driver.ovn=missing + +[operation.insert_headers.X-Forwarded-Port] +title=insert_headers - X-Forwarded-Port +status=optional +notes=When “true” a X-Forwarded-Port header is inserted into the request to the backend member that specifies the listener port. +cli=openstack loadbalancer listener create [--insert-headers X-Forwarded-Port=true] +driver.amphora=complete +driver.ovn=missing + +[operation.insert_headers.X-Forwarded-Proto] +title=insert_headers - X-Forwarded-Proto +status=optional +notes=When “true” a X-Forwarded-Proto header is inserted into the request to the backend member. +cli=openstack loadbalancer listener create [--insert-headers X-Forwarded-Proto=true] +driver.amphora=complete +driver.ovn=missing + +[operation.insert_headers.X-SSL-Client-Verify] +title=insert_headers - X-SSL-Client-Verify +status=optional +notes=When “true” a X-SSL-Client-Verify header is inserted into the request to the backend member that contains 0 if the client authentication was successful, or an result error number greater than 0 that align to the openssl verify error codes. +cli=openstack loadbalancer listener create [--insert-headers X-SSL-Client-Verify=true] +driver.amphora=complete +driver.ovn=missing + +[operation.insert_headers.X-SSL-Client-Has-Cert] +title=insert_headers - X-SSL-Client-Has-Cert +status=optional +notes=When “true” a X-SSL-Client-Has-Cert header is inserted into the request to the backend member that is ‘’true’’ if a client authentication certificate was presented, and ‘’false’’ if not. Does not indicate validity. +cli=openstack loadbalancer listener create [--insert-headers X-SSL-Client-Has-Cert=true] +driver.amphora=complete +driver.ovn=missing + +[operation.insert_headers.X-SSL-Client-DN] +title=insert_headers - X-SSL-Client-DN +status=optional +notes=When “true” a X-SSL-Client-DN header is inserted into the request to the backend member that contains the full Distinguished Name of the certificate presented by the client. +cli=openstack loadbalancer listener create [--insert-headers X-SSL-Client-DN=true] +driver.amphora=complete +driver.ovn=missing + +[operation.insert_headers.X-SSL-Client-CN] +title=insert_headers - X-SSL-Client-CN +status=optional +notes=When “true” a X-SSL-Client-CN header is inserted into the request to the backend member that contains the Common Name from the full Distinguished Name of the certificate presented by the client. +cli=openstack loadbalancer listener create [--insert-headers X-SSL-Client-CN=true] +driver.amphora=complete +driver.ovn=missing + +[operation.insert_headers.X-SSL-Issuer] +title=insert_headers - X-SSL-Issuer +status=optional +notes=When “true” a X-SSL-Issuer header is inserted into the request to the backend member that contains the full Distinguished Name of the client certificate issuer. +cli=openstack loadbalancer listener create [--insert-headers X-SSL-Issuer=true] +driver.amphora=complete +driver.ovn=missing + +[operation.insert_headers.X-SSL-Client-SHA1] +title=insert_headers - X-SSL-Client-SHA1 +status=optional +notes=When “true” a X-SSL-Client-SHA1 header is inserted into the request to the backend member that contains the SHA-1 fingerprint of the certificate presented by the client in hex string format. +cli=openstack loadbalancer listener create [--insert-headers X-SSL-Client-SHA1=true] +driver.amphora=complete +driver.ovn=missing + +[operation.insert_headers.X-SSL-Client-Not-Before] +title=insert_headers - X-SSL-Client-Not-Before +status=optional +notes=When “true” a X-SSL-Client-Not-Before header is inserted into the request to the backend member that contains the start date presented by the client as a formatted string YYMMDDhhmmss[Z]. +cli=openstack loadbalancer listener create [--insert-headers X-SSL-Client-Not-Before=true] +driver.amphora=complete +driver.ovn=missing + +[operation.insert_headers.X-SSL-Client-Not-After] +title=insert_headers - X-SSL-Client-Not-After +status=optional +notes=When “true” a X-SSL-Client-Not-After header is inserted into the request to the backend member that contains the end date presented by the client as a formatted string YYMMDDhhmmss[Z]. +cli=openstack loadbalancer listener create [--insert-headers X-SSL-Client-Not-Aftr=true] +driver.amphora=complete +driver.ovn=missing + +[operation.name] +title=name +status=optional +notes=The name of the load balancer listener. Provided by the Octavia API service. +cli=openstack loadbalancer listener create [--name ] +driver.amphora=complete +driver.ovn=complete + +[operation.protocol.HTTP] +title=protocol - HTTP +status=optional +notes=HTTP protocol support for the listener. +cli=openstack loadbalancer listener create --protocol HTTP +driver.amphora=complete +driver.ovn=missing + +[operation.protocol.HTTPS] +title=protocol - HTTPS +status=optional +notes=HTTPS protocol support for the listener. +cli=openstack loadbalancer listener create --protocol HTTPS +driver.amphora=complete +driver.ovn=missing + +[operation.protocol.TCP] +title=protocol - TCP +status=optional +notes=TCP protocol support for the listener. +cli=openstack loadbalancer listener create --protocol TCP +driver.amphora=complete +driver.ovn=complete + +[operation.protocol.TERMINATED_HTTPS] +title=protocol - TERMINATED_HTTPS +status=optional +notes=Terminated HTTPS protocol support for the listener. +cli=openstack loadbalancer listener create --protocol TERMINATED_HTTPS +driver.amphora=complete +driver.ovn=missing + +[operation.protocol.UDP] +title=protocol - UDP +status=optional +notes=UDP protocol support for the listener. +cli=openstack loadbalancer listener create --protocol UDP +driver.amphora=complete +driver.ovn=complete + +[operation.protocol.SCTP] +title=protocol - SCTP +status=optional +notes=SCTP protocol support for the listener. +cli=openstack loadbalancer listener create --protocol SCTP +driver.amphora=complete +driver.ovn=missing + +[operation.protocol.PROMETHEUS] +title=protocol - PROMETHEUS +status=optional +notes=Prometheus exporter support for the listener. +cli=openstack loadbalancer listener create --protocol PROMETHEUS +driver.amphora=complete +driver.ovn=missing + +[operation.protocol_port] +title=protocol_port +status=mandatory +notes=The protocol port number for the listener. +cli=openstack loadbalancer listener create --protocol-port +driver.amphora=complete +driver.ovn=complete + +[operation.sni_container_refs] +title=sni_container_refs +status=optional +notes=A list of URIs to the key manager service secrets containing PKCS12 format certificate/key bundles for TERMINATED_TLS listeners. +cli=openstack loadbalancer listener create [--sni-container-refs [ [ ...]]] +driver.amphora=complete +driver.ovn=missing + +[operation.stats] +title=Listener statistics +status=mandatory +notes=The ability to show statistics for a listener. +cli=openstack loadbalancer listener stats show +driver.amphora=complete +driver.ovn=missing + +[operation.tags] +title=tags +status=optional +notes=The tags for the load balancer listener. Provided by the Octavia API service. +cli=openstack loadbalancer listener create [--tags ] +driver.amphora=complete +driver.ovn=complete + +[operation.timeout_client_data] +title=timeout_client_data +status=optional +notes=Frontend client inactivity timeout in milliseconds. +cli=openstack loadbalancer listener create [--timeout-client-data ] +driver.amphora=complete +driver.ovn=missing + +[operation.timeout_member_connect] +title=timeout_member_connect +status=optional +notes=Backend member connection timeout in milliseconds. +cli=openstack loadbalancer listener create [--timeout-member-connect ] +driver.amphora=complete +driver.ovn=missing + +[operation.timeout-member-data] +title=timeout-member-data +status=optional +notes=Backend member inactivity timeout in milliseconds. +cli=openstack loadbalancer listener create [--timeout-member-data ] +driver.amphora=complete +driver.ovn=missing + +[operation.timeout-tcp-inspect] +title=timeout-tcp-inspect +status=optional +notes=Time, in milliseconds, to wait for additional TCP packets for content inspection. +cli=openstack loadbalancer listener create [--timeout-tcp-inspect ] +driver.amphora=complete +driver.ovn=missing + +[operation.tls_ciphers] +title=tls_ciphers +status=optional +notes=List of accepted TLS ciphers. +cli=openstack loadbalancer listener create [--tls-ciphers ] +driver.amphora=complete +driver.ovn=missing + +[operation.tls_versions] +title=tls_versions +status=optional +notes=List of accepted TLS protocol versions. +cli=openstack loadbalancer listener create [--tls-versions ] +driver.amphora=complete +driver.ovn=missing diff --git a/doc/source/user/feature-classification/feature-matrix-member.ini b/doc/source/user/feature-classification/feature-matrix-member.ini new file mode 100644 index 0000000000..9213d1289d --- /dev/null +++ b/doc/source/user/feature-classification/feature-matrix-member.ini @@ -0,0 +1,111 @@ +# Copyright (c) 2019 Red Hat, Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +[driver.amphora] +title=Amphora Provider +link=https://docs.openstack.org/api-ref/load-balancer/v2/index.html + +[driver.ovn] +title=OVN Provider +link=https://docs.openstack.org/ovn-octavia-provider/latest/admin/driver.html + +# Note: These should be in the order they appear in a create call. + +[operation.admin_state_up] +title=admin_state_up +status=mandatory +notes=Enables and disables the member. +cli=openstack loadbalancer member create [--enable | --disable] +driver.amphora=complete +driver.ovn=complete + +[operation.address] +title=address +status=mandatory +notes=The IP address for the member. +cli=openstack loadbalancer member create --address +driver.amphora=complete +driver.ovn=complete + +[operation.backup] +title=backup +status=optional +notes=True if the member is a backup member server. +cli=openstack loadbalancer member create [--enable-backup] +driver.amphora=complete +driver.ovn=missing + +[operation.batch] +title=Batch update members +status=mandatory +notes=Ability to update the members of a pool in one API call. +driver.amphora=complete +driver.ovn=partial +driver-notes.ovn=The OVN provider does not support all of the member features. + +[operation.monitor_address] +title=monitor_address +status=optional +notes=An alternate IP address used for health monitoring a backend member. +cli=openstack loadbalancer member create [--monitor-address ] +driver.amphora=complete +driver.ovn=missing + +[operation.monitor_port] +title=monitor_port +status=optional +notes=An alternate protocol port used for health monitoring a backend member. +cli=openstack loadbalancer member create [--monitor-port ] +driver.amphora=complete +driver.ovn=missing + +[operation.name] +title=name +status=optional +notes=The name for the member. Provided by the Octavia API service. +cli=openstack loadbalancer member create [--name ] +driver.amphora=complete +driver.ovn=complete + +[operation.protocol_port] +title=protocol_port +status=mandatory +notes=The protocol port number to connect with on the member server. +cli=openstack loadbalancer member create --protocol_port +driver.amphora=complete +driver.ovn=complete + +[operation.subnet_id] +title=subnet_id +status=optional +notes=The subnet ID the member service is accessible from. +cli=openstack loadbalancer member create [--subnet-id ] +driver.amphora=complete +driver.ovn=complete + +[operation.tags] +title=tags +status=optional +notes=The tags for the member. Provided by the Octavia API service. +cli=openstack loadbalancer member create [--tag ] +driver.amphora=complete +driver.ovn=complete + +[operation.weight] +title=weight +status=optional +notes=The weight of a member determines the portion of requests or connections it services compared to the other members of the pool. +cli=openstack loadbalancer member create [--weight ] +driver.amphora=complete +driver.ovn=missing diff --git a/doc/source/user/feature-classification/feature-matrix-pool.ini b/doc/source/user/feature-classification/feature-matrix-pool.ini new file mode 100644 index 0000000000..708fedc743 --- /dev/null +++ b/doc/source/user/feature-classification/feature-matrix-pool.ini @@ -0,0 +1,243 @@ +# Copyright (c) 2019 Red Hat, Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +[driver.amphora] +title=Amphora Provider +link=https://docs.openstack.org/api-ref/load-balancer/v2/index.html + +[driver.ovn] +title=OVN Provider +link=https://docs.openstack.org/ovn-octavia-provider/latest/admin/driver.html + +# Note: These should be in the order they appear in a create call. + +[operation.admin_state_up] +title=admin_state_up +status=mandatory +notes=Enables and disables the pool. +cli=openstack loadbalancer pool create [--enable | --disable] --listener +driver.amphora=complete +driver.ovn=complete + +[operation.alpn_protocol] +title=alpn_protocol +status=optional +notes=List of accepted ALPN protocols (can be set multiple times). +cli=openstack loadbalancer pool create [--alpn-protocol ] --listener +driver.amphora=complete +driver.ovn=missing + +[operation.ca_tls_container_ref] +title=ca_tls_container_ref +status=optional +notes=The reference of the key manager service secret containing a PEM format CA certificate bundle for tls_enabled pools. +cli=openstack loadbalancer pool create [--ca-tls-container-ref ] --listener +driver.amphora=complete +driver.ovn=missing + +[operation.crl_container_ref] +title=crl_container_ref +status=optional +notes=The reference of the key manager service secret containing a PEM format CA revocation list file for tls_enabled pools. +cli=openstack loadbalancer pool create [--crl-container-ref ] --listener +driver.amphora=complete +driver.ovn=missing + +[operation.lb_algorithm.LEAST_CONNECTIONS] +title=lb_algorithm - LEAST_CONNECTIONS +notes=The pool will direct connections to the member server with the least connections in use. +cli=openstack loadbalancer pool create --lb-algorithm LEAST_CONNECTIONS --listener +driver.amphora=complete +driver.ovn=missing + +[operation.lb_algorithm.ROUND_ROBIN] +title=lb_algorithm - ROUND_ROBIN +notes=The pool will direct connections to the next member server, one after the other, rotating through the available member servers. +cli=openstack loadbalancer pool create --lb-algorithm ROUND_ROBIN --listener +driver.amphora=complete +driver.ovn=missing + +[operation.lb_algorithm.SOURCE_IP] +title=lb_algorithm - SOURCE_IP +notes=The pool will direct connections to the member server based on a hash of the source IP. +cli=openstack loadbalancer pool create --lb-algorithm SOURCE_IP --listener +driver.amphora=complete +driver.ovn=missing + +[operation.lb_algorithm.SOURCE_IP_PORT] +title=lb_algorithm - SOURCE_IP_PORT +notes=The pool will direct connections to the member server based on a hash of the source IP and Port. +cli=openstack loadbalancer pool create --lb-algorithm SOURCE_IP_PORT --listener +driver.amphora=missing +driver.ovn=complete + +[operation.description] +title=description +status=optional +notes=The description of the pool. Provided by the Octavia API service. +cli=openstack loadbalancer pool create [--description ] --listener +driver.amphora=complete +driver.ovn=complete + +[operation.name] +title=name +status=optional +notes=The name of the pool. Provided by the Octavia API service. +cli=openstack loadbalancer pool create [--name ] --listener +driver.amphora=complete +driver.ovn=complete + +[operation.protocol.HTTP] +title=protocol - HTTP +status=optional +notes=HTTP protocol support for the pool. +cli=openstack loadbalancer pool create --protocol HTTP --listener +driver.amphora=complete +driver.ovn=missing + +[operation.protocol.HTTPS] +title=protocol - HTTPS +status=optional +notes=HTTPS protocol support for the pool. +cli=openstack loadbalancer pool create --protocol HTTP --listener +driver.amphora=complete +driver.ovn=missing + +[operation.protocol.PROXY] +title=protocol - PROXY +status=optional +notes=PROXY protocol support for the pool. +cli=openstack loadbalancer pool create --protocol PROXY --listener +driver.amphora=complete +driver.ovn=missing + +[operation.protocol.PROXYV2] +title=protocol - PROXYV2 +status=optional +notes=PROXY protocol version 2 support for the pool. +cli=openstack loadbalancer pool create --protocol PROXYV2 --listener +driver.amphora=complete +driver.ovn=missing + +[operation.protocol.TCP] +title=protocol - TCP +status=optional +notes=TCP protocol support for the pool. +cli=openstack loadbalancer pool create --protocol TCP --listener +driver.amphora=complete +driver.ovn=complete + +[operation.protocol.UDP] +title=protocol - UDP +status=optional +notes=UDP protocol support for the pool. +cli=openstack loadbalancer pool create --protocol UDP --listener +driver.amphora=complete +driver.ovn=complete + +[operation.protocol.SCTP] +title=protocol - SCTP +status=optional +notes=SCTP protocol support for the pool. +cli=openstack loadbalancer pool create --protocol SCTP --listener +driver.amphora=complete +driver.ovn=missing + +[operation.session_persistence.APP_COOKIE] +title=session_persistence - APP_COOKIE +status=optional +notes=Session persistence using an application supplied cookie. +cli=openstack loadbalancer pool create --session-persistence type=APP_COOKIE --listener +driver.amphora=complete +driver.ovn=missing + +[operation.session_persistence.cookie_name] +title=session_persistence - cookie_name +status=optional +notes=The name of the application cookie to use for session persistence. +cli=openstack loadbalancer pool create --session-persistence cookie_name=chocolate --listener +driver.amphora=complete +driver.ovn=missing + +[operation.session_persistence.HTTP_COOKIE] +title=session_persistence - HTTP_COOKIE +status=optional +notes=Session persistence using a cookie created by the load balancer. +cli=openstack loadbalancer pool create --session-persistence type=HTTP_COOKIE --listener +driver.amphora=complete +driver.ovn=missing + +[operation.session_persistence.persistence_timeout] +title=session_persistence - persistence_timeout +status=optional +notes=The timeout, in seconds, after which a SCTP or UDP flow may be rescheduled to a different member. +cli=openstack loadbalancer pool create --session-persistence persistence_timeout=360 --listener +driver.amphora=complete +driver.ovn=missing + +[operation.session_persistence.persistence_granularity] +title=session_persistence - persistence_granularity +status=optional +notes=The netmask used to determine SCTP or UDP SOURCE_IP session persistence. +cli=openstack loadbalancer pool create --session-persistence persistence_granularity=255.255.255.255 --listener +driver.amphora=complete +driver.ovn=missing + +[operation.session_persistence.SOURCE_IP] +title=session_persistence - SOURCE_IP +status=optional +notes=Session persistence using the source IP address. +cli=openstack loadbalancer pool create --session-persistence type=SOURCE_IP --listener +driver.amphora=complete +driver.ovn=missing + +[operation.tags] +title=tags +status=optional +notes=The tags for the pool. Provided by the Octavia API service. +cli=openstack loadbalancer pool create [--tag ] --listener +driver.amphora=complete +driver.ovn=complete + +[operation.tls_ciphers] +title=tls_ciphers +status=optional +notes=List of TLS ciphers available for member connections. +cli=openstack loadbalancer pool create [--tls-ciphers ] --listener +driver.amphora=complete +driver.ovn=missing + +[operation.tls_container_ref] +title=tls_container_ref +status=optional +notes=The reference to the key manager service secret containing a PKCS12 format certificate/key bundle for tls_enabled pools for TLS client authentication to the member servers. +cli=openstack loadbalancer pool create [--tls-container-ref ] --listener +driver.amphora=complete +driver.ovn=missing + +[operation.tls_enabled] +title=tls_enabled +status=optional +notes=When true connections to backend member servers will use TLS encryption. +cli=openstack loadbalancer pool create [--enable-tls] --listener +driver.amphora=complete +driver.ovn=missing + +[operation.tls_versions] +title=tls_versions +status=optional +notes=List of TLS protocol versions available for member connections. +cli=openstack loadbalancer pool create [--tls-versions ] --listener +driver.amphora=complete +driver.ovn=missing diff --git a/doc/source/user/feature-classification/index.rst b/doc/source/user/feature-classification/index.rst new file mode 100644 index 0000000000..aed3eede35 --- /dev/null +++ b/doc/source/user/feature-classification/index.rst @@ -0,0 +1,109 @@ +.. + Copyright (c) 2019 Red Hat, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +=============================== +Octavia Provider Feature Matrix +=============================== + +Load Balancer Features +====================== + +Provider feature support matrix for an Octavia load balancer. + +Load Balancer API Features +-------------------------- + +These features are documented in the Octavia API reference +`Create a Load Balancer `_ section. + +.. support_matrix:: feature-matrix-lb.ini + +Listener Features +================= + +Provider feature support matrix for an Octavia load balancer listener. + +Listener API Features +--------------------- + +These features are documented in the Octavia API reference +`Create a Listener `_ section. + +.. support_matrix:: feature-matrix-listener.ini + +Pool Features +============= + +Provider feature support matrix for an Octavia load balancer pool. + +Pool API Features +----------------- + +These features are documented in the Octavia API reference +`Create a Pool `_ section. + +.. support_matrix:: feature-matrix-pool.ini + +Member Features +=============== + +Provider feature support matrix for an Octavia load balancer member. + +Member API Features +------------------- + +These features are documented in the Octavia API reference +`Create a Member `_ section. + +.. support_matrix:: feature-matrix-member.ini + +Health Monitor Features +======================= + +Provider feature support matrix for an Octavia load balancer health monitor. + +Health Monitor API Features +--------------------------- + +These features are documented in the Octavia API reference +`Create a Health Monitor `_ section. + +.. support_matrix:: feature-matrix-healthmonitor.ini + +L7 Policy Features +================== + +Provider feature support matrix for an Octavia load balancer L7 Policies. + +L7 Policy API Features +---------------------- + +These features are documented in the Octavia API reference +`Create an L7 Policy `_ section. + +.. support_matrix:: feature-matrix-l7policy.ini + +L7 Rule Features +================ + +Provider feature support matrix for an Octavia load balancer L7 Rules. + +L7 Rule API Features +-------------------- + +These features are documented in the Octavia API reference +`Create an L7 Rule `_ section. + +.. support_matrix:: feature-matrix-l7rule.ini diff --git a/doc/source/user/guides/basic-cookbook.rst b/doc/source/user/guides/basic-cookbook.rst new file mode 100644 index 0000000000..264efd38eb --- /dev/null +++ b/doc/source/user/guides/basic-cookbook.rst @@ -0,0 +1,1033 @@ +.. + Copyright (c) 2016 IBM + Copyright 2019 Red Hat, Inc. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +============================= +Basic Load Balancing Cookbook +============================= + +Introduction +============ +This document contains several examples of using basic load balancing services +as a tenant or "regular" cloud user. + +For the purposes of this guide we assume that the neutron and barbican +command-line interfaces, via the OpenStack client, are going to be used to +configure all features of Octavia. In order to keep these examples short, +we also assume that tasks not directly associated with deploying load balancing +services have already been accomplished. This might include such things as +deploying and configuring web servers, setting up Neutron networks, obtaining +TLS certificates from a trusted provider, and so on. A description of the +starting conditions is given in each example below. + +Please also note that this guide assumes you are familiar with the specific +load balancer terminology defined in the :doc:`../../reference/glossary`. For a +description of load balancing itself and the Octavia project, please see: +:doc:`../../reference/introduction`. + + +Examples +======== + +Deploy a basic HTTP load balancer +--------------------------------- +While this is technically the simplest complete load balancing solution that +can be deployed, we recommend deploying HTTP load balancers with a health +monitor to ensure back-end member availability. See :ref:`basic-lb-with-hm` +below. + +**Scenario description**: + +* Back-end servers 192.0.2.10 and 192.0.2.11 on subnet *private-subnet* have + been configured with an HTTP application on TCP port 80. +* Subnet *public-subnet* is a shared external subnet created by the cloud + operator which is reachable from the internet. +* We want to configure a basic load balancer that is accessible from the + internet, which distributes web requests to the back-end servers. + +**Solution**: + +1. Create load balancer *lb1* on subnet *public-subnet*. +2. Create listener *listener1*. +3. Create pool *pool1* as *listener1*'s default pool. +4. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. + +**CLI commands**: + +:: + + openstack loadbalancer create --name lb1 --vip-subnet-id public-subnet --wait + openstack loadbalancer listener create --name listener1 --protocol HTTP --protocol-port 80 --wait lb1 + openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --wait + openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.10 --protocol-port 80 --wait pool1 + openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.11 --protocol-port 80 --wait pool1 + + +.. _basic-lb-with-hm: + +Deploy a basic HTTP load balancer with a health monitor +------------------------------------------------------- +This is the simplest recommended load balancing solution for HTTP applications. +This solution is appropriate for operators with provider networks that are not +compatible with Neutron floating-ip functionality (such as IPv6 networks). +However, if you need to retain control of the external IP through which a load +balancer is accessible, even if the load balancer needs to be destroyed or +recreated, it may be more appropriate to deploy your basic load balancer using +a floating IP. See :ref:`basic-lb-with-hm-and-fip` below. + +**Scenario description**: + +* Back-end servers 192.0.2.10 and 192.0.2.11 on subnet *private-subnet* have + been configured with an HTTP application on TCP port 80. +* These back-end servers have been configured with a health check at the URL + path "/healthcheck". See :ref:`http-health-monitors` below. +* Subnet *public-subnet* is a shared external subnet created by the cloud + operator which is reachable from the internet. +* We want to configure a basic load balancer that is accessible from the + internet, which distributes web requests to the back-end servers, and which + checks the "/healthcheck" path to ensure back-end member health. + +**Solution**: + +1. Create load balancer *lb1* on subnet *public-subnet*. +2. Create listener *listener1*. +3. Create pool *pool1* as *listener1*'s default pool. +4. Create a health monitor on *pool1* which tests the "/healthcheck" path. +5. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. + +**CLI commands**: + +:: + + openstack loadbalancer create --name lb1 --vip-subnet-id public-subnet --wait + openstack loadbalancer listener create --name listener1 --protocol HTTP --protocol-port 80 --wait lb1 + openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --wait + openstack loadbalancer healthmonitor create --delay 5 --max-retries 4 --timeout 10 --type HTTP --url-path /healthcheck --wait pool1 + openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.10 --protocol-port 80 --wait pool1 + openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.11 --protocol-port 80 --wait pool1 + + +.. _basic-lb-with-hm-and-fip: + +Deploy a basic HTTP load balancer using a floating IP +----------------------------------------------------- +It can be beneficial to use a floating IP when setting up a load balancer's VIP +in order to ensure you retain control of the IP that gets assigned as the +floating IP in case the load balancer needs to be destroyed, moved, or +recreated. + +Note that this is not possible to do with IPv6 load balancers as floating IPs +do not work with IPv6. + +**Scenario description**: + +* Back-end servers 192.0.2.10 and 192.0.2.11 on subnet *private-subnet* have + been configured with an HTTP application on TCP port 80. +* These back-end servers have been configured with a health check at the URL + path "/healthcheck". See :ref:`http-health-monitors` below. +* Neutron network *public* is a shared external network created by the cloud + operator which is reachable from the internet. +* We want to configure a basic load balancer that is accessible from the + internet, which distributes web requests to the back-end servers, and which + checks the "/healthcheck" path to ensure back-end member health. Further, we + want to do this using a floating IP. + +**Solution**: + +1. Create load balancer *lb1* on subnet *private-subnet*. +2. Create listener *listener1*. +3. Create pool *pool1* as *listener1*'s default pool. +4. Create a health monitor on *pool1* which tests the "/healthcheck" path. +5. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. +6. Create a floating IP address on *public-subnet*. +7. Associate this floating IP with the *lb1*'s VIP port. + +**CLI commands**: + +:: + + openstack loadbalancer create --name lb1 --vip-subnet-id private-subnet --wait + openstack loadbalancer listener create --name listener1 --protocol HTTP --protocol-port 80 --wait lb1 + openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --wait + openstack loadbalancer healthmonitor create --delay 5 --max-retries 4 --timeout 10 --type HTTP --url-path /healthcheck --wait pool1 + openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.10 --protocol-port 80 --wait pool1 + openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.11 --protocol-port 80 --wait pool1 + openstack floating ip create public + # The following IDs should be visible in the output of previous commands + openstack floating ip set --port + + +Deploy a basic HTTP load balancer with session persistence +---------------------------------------------------------- +**Scenario description**: + +* Back-end servers 192.0.2.10 and 192.0.2.11 on subnet *private-subnet* have + been configured with an HTTP application on TCP port 80. +* The application is written such that web clients should always be directed to + the same back-end server throughout their web session, based on an + application cookie inserted by the web application named 'PHPSESSIONID'. +* These back-end servers have been configured with a health check at the URL + path "/healthcheck". See :ref:`http-health-monitors` below. +* Subnet *public-subnet* is a shared external subnet created by the cloud + operator which is reachable from the internet. +* We want to configure a basic load balancer that is accessible from the + internet, which distributes web requests to the back-end servers, persists + sessions using the PHPSESSIONID as a key, and which checks the "/healthcheck" + path to ensure back-end member health. + +**Solution**: + +1. Create load balancer *lb1* on subnet *public-subnet*. +2. Create listener *listener1*. +3. Create pool *pool1* as *listener1*'s default pool which defines session + persistence on the 'PHPSESSIONID' cookie. +4. Create a health monitor on *pool1* which tests the "/healthcheck" path. +5. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. + +**CLI commands**: + +:: + + openstack loadbalancer create --name lb1 --vip-subnet-id public-subnet --wait + openstack loadbalancer listener create --name listener1 --protocol HTTP --protocol-port 80 --wait lb1 + openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --session-persistence type=APP_COOKIE,cookie_name=PHPSESSIONID --wait + openstack loadbalancer healthmonitor create --delay 5 --max-retries 4 --timeout 10 --type HTTP --url-path /healthcheck --wait pool1 + openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.10 --protocol-port 80 --wait pool1 + openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.11 --protocol-port 80 --wait pool1 + + +Deploy a TCP load balancer +-------------------------- +This is generally suitable when load balancing a non-HTTP TCP-based service. + +**Scenario description**: + +* Back-end servers 192.0.2.10 and 192.0.2.11 on subnet *private-subnet* have + been configured with an custom application on TCP port 23456 +* Subnet *public-subnet* is a shared external subnet created by the cloud + operator which is reachable from the internet. +* We want to configure a basic load balancer that is accessible from the + internet, which distributes requests to the back-end servers. +* We want to employ a TCP health check to ensure that the back-end servers are + available. + +**Solution**: + +1. Create load balancer *lb1* on subnet *public-subnet*. +2. Create listener *listener1*. +3. Create pool *pool1* as *listener1*'s default pool. +4. Create a health monitor on *pool1* which probes *pool1*'s members' TCP + service port. +5. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. + +**CLI commands**: + +:: + + openstack loadbalancer create --name lb1 --vip-subnet-id public-subnet --wait + openstack loadbalancer listener create --name listener1 --protocol TCP --protocol-port 23456 --wait lb1 + openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol TCP --wait + openstack loadbalancer healthmonitor create --delay 5 --max-retries 4 --timeout 10 --type TCP --wait pool1 + openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.10 --protocol-port 80 --wait pool1 + openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.11 --protocol-port 80 --wait pool1 + + +Deploy a QoS ruled load balancer +-------------------------------- +This solution limits the bandwidth available through the Load Balancer's VIP by +applying a Neutron Quality of Service(QoS) policy to the VIP, so Load Balancer +can accept the QoS Policy from Neutron; Then limits the vip of Load Balancer +incoming or outgoing traffic. + +.. note:: + Before using this feature, please make sure the Neutron QoS extension(qos) + is enabled on running OpenStack environment by command + + .. code-block:: console + + openstack extension list + +**Scenario description**: + +* QoS-policy created from Neutron with bandwidth-limit-rules by us. +* Back-end servers 192.0.2.10 and 192.0.2.11 on subnet *private-subnet* have + been configured with an HTTP application on TCP port 80. +* Subnet *public-subnet* is a shared external subnet created by the cloud + operator which is reachable from the internet. +* We want to configure a basic load balancer and want to limit the traffic + bandwidth when web traffic reaches the vip. + +**Solution**: + +1. Create QoS policy *qos-policy-bandwidth* with *bandwidth_limit* in Neutron. +2. Create load balancer *lb1* on subnet *public-subnet* with the id of + *qos-policy-bandwidth*. +3. Create listener *listener1*. +4. Create pool *pool1* as *listener1*'s default pool. +5. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. + +**CLI commands**: + +:: + + openstack network qos policy create qos-policy-bandwidth + openstack network qos rule create --type bandwidth_limit --max-kbps 1024 --max-burst-kbits 1024 qos-policy-bandwidth + openstack loadbalancer create --name lb1 --vip-subnet-id public-subnet --vip-qos-policy-id qos-policy-bandwidth --wait + openstack loadbalancer listener create --name listener1 lb1 --protocol HTTP --protocol-port 80 --wait + openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --wait + openstack loadbalancer member create --subnet-id --address 192.0.2.10 --protocol-port 80 --wait pool1 + openstack loadbalancer member create --subnet-id --address 192.0.2.11 --protocol-port 80 --wait pool1 + + +Deploy a load balancer with access control list +----------------------------------------------- +This solution limits incoming traffic to a listener to a set of allowed +source IP addresses. Any other incoming traffic will be rejected. + + +**Scenario description**: + +* Back-end servers 192.0.2.10 and 192.0.2.11 on subnet *private-subnet* have + been configured with an custom application on TCP port 23456 +* Subnet *public-subnet* is a shared external subnet created by the cloud + operator which is reachable from the internet. +* We want to configure a basic load balancer that is accessible from the + internet, which distributes requests to the back-end servers. +* The application on TCP port 23456 is accessible to a limited source IP + addresses (192.0.2.0/24 and 198.51.100/24). + +**Solution**: + +1. Create load balancer *lb1* on subnet *public-subnet*. +2. Create listener *listener1* with allowed CIDRs. +3. Create pool *pool1* as *listener1*'s default pool. +4. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. + +**CLI commands**: + +:: + + openstack loadbalancer create --name lb1 --vip-subnet-id public-subnet --wait + openstack loadbalancer listener create --name listener1 --protocol TCP --protocol-port 23456 --allowed-cidr 192.0.2.0/24 --allowed-cidr 198.51.100/24 --wait lb1 + openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol TCP --wait + openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.10 --protocol-port 80 --wait pool1 + openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.11 --protocol-port 80 --wait pool1 + + +Deploy a non-terminated HTTPS load balancer +------------------------------------------- +A non-terminated HTTPS load balancer acts effectively like a generic TCP load +balancer: The load balancer will forward the raw TCP traffic from the web +client to the back-end servers without decrypting it. This means that the +back-end servers themselves must be configured to terminate the HTTPS +connection with the web clients, and in turn, the load balancer cannot insert +headers into the HTTP session indicating the client IP address. (That is, to +the back-end server, all web requests will appear to originate from the load +balancer.) Also, advanced load balancer features (like Layer 7 functionality) +cannot be used with non-terminated HTTPS. + +**Scenario description**: + +* Back-end servers 192.0.2.10 and 192.0.2.11 on subnet *private-subnet* have + been configured with a TLS-encrypted web application on TCP port 443. +* Subnet *public-subnet* is a shared external subnet created by the cloud + operator which is reachable from the internet. +* We want to configure a basic load balancer that is accessible from the + internet, which distributes requests to the back-end servers. +* We want to employ a TCP health check to ensure that the back-end servers are + available. + +**Solution**: + +1. Create load balancer *lb1* on subnet *public-subnet*. +2. Create listener *listener1*. +3. Create pool *pool1* as *listener1*'s default pool. +4. Create a health monitor on *pool1* which probes *pool1*'s members' TCP + service port. +5. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. + +**CLI commands**: + +:: + + openstack loadbalancer create --name lb1 --vip-subnet-id public-subnet --wait + openstack loadbalancer listener create --name listener1 --protocol HTTPS --protocol-port 443 --wait lb1 + openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTPS --wait + openstack loadbalancer healthmonitor create --delay 5 --max-retries 4 --timeout 10 --type HTTPS --url-path /healthcheck --wait pool1 + openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.10 --protocol-port 443 --wait pool1 + openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.11 --protocol-port 443 --wait pool1 + + +.. _basic-tls-terminated-listener: + +Deploy a TLS-terminated HTTPS load balancer +------------------------------------------- +With a TLS-terminated HTTPS load balancer, web clients communicate with the +load balancer over TLS protocols. The load balancer terminates the TLS session +and forwards the decrypted requests to the back-end servers. By terminating the +TLS session on the load balancer, we offload the CPU-intensive encryption work +to the load balancer, and enable the possibility of using advanced load +balancer features, like Layer 7 features and header manipulation. + +**Scenario description**: + +* Back-end servers 192.0.2.10 and 192.0.2.11 on subnet *private-subnet* have + been configured with regular HTTP application on TCP port 80. +* Subnet *public-subnet* is a shared external subnet created by the cloud + operator which is reachable from the internet. +* A TLS certificate, key, and intermediate certificate chain for + www.example.com have been obtained from an external certificate authority. + These now exist in the files server.crt, server.key, and ca-chain.crt in the + current directory. The key and certificate are PEM-encoded, and the + intermediate certificate chain is multiple PEM-encoded certs concatenated + together. The key is not encrypted with a passphrase. +* We want to configure a TLS-terminated HTTPS load balancer that is accessible + from the internet using the key and certificate mentioned above, which + distributes requests to the back-end servers over the non-encrypted HTTP + protocol. +* Octavia is configured to use barbican for key management. + +**Solution**: + +1. Combine the individual cert/key/intermediates to a single PKCS12 file. +2. Create a barbican *secret* resource for the PKCS12 file. We will call + this *tls_secret1*. +3. Create load balancer *lb1* on subnet *public-subnet*. +4. Create listener *listener1* as a TERMINATED_HTTPS listener referencing + *tls_secret1* as its default TLS container. +5. Create pool *pool1* as *listener1*'s default pool. +6. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. + +**CLI commands**: + +:: + + openssl pkcs12 -export -inkey server.key -in server.crt -certfile ca-chain.crt -passout pass: -out server.p12 + openstack secret store --name='tls_secret1' -t 'application/octet-stream' -e 'base64' --payload="$(base64 < server.p12)" + openstack loadbalancer create --name lb1 --vip-subnet-id public-subnet --wait + openstack loadbalancer listener create --protocol-port 443 --protocol TERMINATED_HTTPS --name listener1 --default-tls-container=$(openstack secret list | awk '/ tls_secret1 / {print $2}') --wait lb1 + openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --wait + openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.10 --protocol-port 80 --wait pool1 + openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.11 --protocol-port 80 --wait pool1 + +.. note:: + A good security practise for production servers is to enable + HTTP Strict Transport Security (HSTS), + which can be configured during listener creation using the + ``--hsts-max-age`` option and optionally ``--hsts-include-subdomains`` + ``--hsts-prefetch``. + + +Deploy a TLS-terminated HTTPS load balancer with SNI +---------------------------------------------------- +This example is exactly like :ref:`basic-tls-terminated-listener`, except that +we have multiple TLS certificates that we would like to use on the same +listener using Server Name Indication (SNI) technology. + +**Scenario description**: + +* Back-end servers 192.0.2.10 and 192.0.2.11 on subnet *private-subnet* have + been configured with regular HTTP application on TCP port 80. +* Subnet *public-subnet* is a shared external subnet created by the cloud + operator which is reachable from the internet. +* TLS certificates, keys, and intermediate certificate chains for + www.example.com and www2.example.com have been obtained from an external + certificate authority. These now exist in the files server.crt, server.key, + ca-chain.crt, server2.crt, server2.key, and ca-chain2.crt in the + current directory. The keys and certificates are PEM-encoded, and the + intermediate certificate chains are multiple certs PEM-encoded and + concatenated together. Neither key is encrypted with a passphrase. +* We want to configure a TLS-terminated HTTPS load balancer that is accessible + from the internet using the keys and certificates mentioned above, which + distributes requests to the back-end servers over the non-encrypted HTTP + protocol. +* If a web client connects that is not SNI capable, we want the load balancer + to respond with the certificate for www.example.com. + +**Solution**: + +1. Combine the individual cert/key/intermediates to single PKCS12 files. +2. Create barbican *secret* resources for the PKCS12 files. We will call them + *tls_secret1* and *tls_secret2*. +3. Create load balancer *lb1* on subnet *public-subnet*. +4. Create listener *listener1* as a TERMINATED_HTTPS listener referencing + *tls_secret1* as its default TLS container, and referencing both + *tls_secret1* and *tls_secret2* using SNI. +5. Create pool *pool1* as *listener1*'s default pool. +6. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. + +**CLI commands**: + +:: + + openssl pkcs12 -export -inkey server.key -in server.crt -certfile ca-chain.crt -passout pass: -out server.p12 + openssl pkcs12 -export -inkey server2.key -in server2.crt -certfile ca-chain2.crt -passout pass: -out server2.p12 + openstack secret store --name='tls_secret1' -t 'application/octet-stream' -e 'base64' --payload="$(base64 < server.p12)" + openstack secret store --name='tls_secret2' -t 'application/octet-stream' -e 'base64' --payload="$(base64 < server2.p12)" + openstack loadbalancer create --name lb1 --vip-subnet-id public-subnet --wait + openstack loadbalancer listener create --protocol-port 443 --protocol TERMINATED_HTTPS --name listener1 --default-tls-container=$(openstack secret list | awk '/ tls_secret1 / {print $2}') --sni-container-refs $(openstack secret list | awk '/ tls_secret1 / {print $2}') $(openstack secret list | awk '/ tls_secret2 / {print $2}') --wait -- lb1 + openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --wait + openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.10 --protocol-port 80 --wait pool1 + openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.11 --protocol-port 80 --wait pool1 + +Deploy a TLS-terminated HTTPS load balancer with client authentication +---------------------------------------------------------------------- +With a TLS-terminated HTTPS load balancer, web clients communicate with the +load balancer over TLS protocols. The load balancer terminates the TLS session +and forwards the decrypted requests to the back-end servers. By terminating the +TLS session on the load balancer, we offload the CPU-intensive encryption work +to the load balancer, and enable the possibility of using advanced load +balancer features, like Layer 7 features and header manipulation. +Adding client authentication allows users to authenticate connections +to the VIP using certificates. This is also known as two-way TLS +authentication. + +**Scenario description**: + +* Back-end servers 192.0.2.10 and 192.0.2.11 on subnet *private-subnet* have + been configured with a regular HTTP application on TCP port 80. +* Subnet *public-subnet* is a shared external subnet created by the cloud + operator which is reachable from the internet. +* A TLS certificate, key, and intermediate certificate chain for + www.example.com have been obtained from an external certificate authority. + These now exist in the files server.crt, server.key, and ca-chain.crt in the + current directory. The key and certificate are PEM-encoded, and the + intermediate certificate chain is multiple PEM-encoded certificates + concatenated together. The key is not encrypted with a passphrase. +* A Certificate Authority (CA) certificate chain and optional Certificate + Revocation List (CRL) have been obtained from an external certificate + authority to authenticate client certificates against. +* We want to configure a TLS-terminated HTTPS load balancer that is accessible + from the internet using the key and certificate mentioned above, which + distributes requests to the back-end servers over the non-encrypted HTTP + protocol. +* Octavia is configured to use barbican for key management. + +**Solution**: + +1. Combine the individual cert/key/intermediates to a single PKCS12 file. +2. Create a barbican *secret* resource for the PKCS12 file. We will call + this *tls_secret1*. +3. Create a barbican *secret* resource for the client CA certificate. We will + call this *client_ca_cert*. +4. Optionally create a barbican *secret* for the CRL file. We will call this + *client_ca_crl*. +5. Create load balancer *lb1* on subnet *public-subnet*. +6. Create listener *listener1* as a TERMINATED_HTTPS listener referencing + *tls_secret1* as its default TLS container, client authentication enabled, + *client_ca_cert* as the client CA tls container reference, and + *client_ca_crl* as the client CRL container reference. +7. Create pool *pool1* as *listener1*'s default pool. +8. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. + +**CLI commands**: + +:: + + openssl pkcs12 -export -inkey server.key -in server.crt -certfile ca-chain.crt -passout pass: -out server.p12 + openstack secret store --name='tls_secret1' -t 'application/octet-stream' -e 'base64' --payload="$(base64 < server.p12)" + openstack secret store --name='client_ca_cert' -t 'application/octet-stream' -e 'base64' --payload="$(base64 < client_ca.pem)" + openstack secret store --name='client_ca_crl' -t 'application/octet-stream' -e 'base64' --payload="$(base64 < client_ca.crl)" + openstack loadbalancer create --name lb1 --vip-subnet-id public-subnet --wait + openstack loadbalancer listener create --protocol-port 443 --protocol TERMINATED_HTTPS --name listener1 --default-tls-container=$(openstack secret list | awk '/ tls_secret1 / {print $2}') --client-authentication=MANDATORY --client-ca-tls-container-ref=$(openstack secret list | awk '/ client_ca_cert / {print $2}') --client-crl-container=$(openstack secret list | awk '/ client_ca_crl / {print $2}') --wait lb1 + openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --wait + openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.10 --protocol-port 80 --wait pool1 + openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.11 --protocol-port 80 --wait pool1 + +.. _h2-alpn-tls-terminated-listener: + +Deploy a secure HTTP/2 load balancer with ALPN TLS extension +------------------------------------------------------------ +This example is exactly like :ref:`basic-tls-terminated-listener`, except that +we would like to enable HTTP/2 load balancing. The load balancer negotiates +HTTP/2 with clients as part of the TLS handshake via the Application-Layer +Protocol Negotiation (ALPN) TLS extension. + +**Scenario description**: + +* Back-end servers 192.0.2.10 and 192.0.2.11 on subnet *private-subnet* have + been configured with regular HTTP application on TCP port 80. +* Subnet *public-subnet* is a shared external subnet created by the cloud + operator which is reachable from the internet. +* A TLS certificate, key, and intermediate certificate chain for + www.example.com have been obtained from an external certificate authority. + These now exist in the files server.crt, server.key, and ca-chain.crt in the + current directory. The key and certificate are PEM-encoded, and the + intermediate certificate chain is multiple PEM-encoded certs concatenated + together. The key is not encrypted with a passphrase. +* We want to configure a TLS-terminated HTTP/2 load balancer that is accessible + from the internet using the key and certificate mentioned above, which + distributes requests to the back-end servers over the non-encrypted HTTP + protocol. +* Octavia is configured to use barbican for key management. + +**Solution**: + +1. Combine the individual cert/key/intermediates to a single PKCS12 file. +2. Create a barbican *secret* resource for the PKCS12 file. We will call + this *tls_secret1*. +3. Create load balancer *lb1* on subnet *public-subnet*. +4. Create listener *listener1* as a TERMINATED_HTTPS listener referencing + *tls_secret1* as its default TLS container, and *h2* ALPN protocol ID and + *http/1.1* as fall-back protocol should the client not support HTTP/2. +5. Create pool *pool1* as *listener1*'s default pool. +6. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. + +**CLI commands**: + +:: + + openssl pkcs12 -export -inkey server.key -in server.crt -certfile ca-chain.crt -passout pass: -out server.p12 + openstack secret store --name='tls_secret1' -t 'application/octet-stream' -e 'base64' --payload="$(base64 < server.p12)" + openstack loadbalancer create --name lb1 --vip-subnet-id public-subnet --wait + openstack loadbalancer listener create --protocol-port 443 --protocol TERMINATED_HTTPS --alpn-protocol h2 --alpn-protocol http/1.1 --name listener1 --default-tls-container=$(openstack secret list | awk '/ tls_secret1 / {print $2}') --wait lb1 + openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --wait + openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.10 --protocol-port 80 --wait pool1 + openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.11 --protocol-port 80 --wait pool1 + +Deploy HTTP and TLS-terminated HTTPS load balancing on the same IP and backend +------------------------------------------------------------------------------ +This example is exactly like :ref:`basic-tls-terminated-listener`, except that +we would like to have both an HTTP and TERMINATED_HTTPS listener that use the +same back-end pool (and therefore, probably respond with the exact same +content regardless of whether the web client uses the HTTP or HTTPS protocol +to connect). + +Please note that if you wish all HTTP requests to be redirected to HTTPS (so +that requests are only served via HTTPS, and attempts to access content over +HTTP just get redirected to the HTTPS listener), then please see `the example +`__ in the :doc:`l7-cookbook`. + +**Scenario description**: + +* Back-end servers 192.0.2.10 and 192.0.2.11 on subnet *private-subnet* have + been configured with regular HTTP application on TCP port 80. +* Subnet *public-subnet* is a shared external subnet created by the cloud + operator which is reachable from the internet. +* A TLS certificate, key, and intermediate certificate chain for + www.example.com have been obtained from an external certificate authority. + These now exist in the files server.crt, server.key, and ca-chain.crt in the + current directory. The key and certificate are PEM-encoded, and the + intermediate certificate chain is multiple PEM-encoded certs concatenated + together. The key is not encrypted with a passphrase. +* We want to configure a TLS-terminated HTTPS load balancer that is accessible + from the internet using the key and certificate mentioned above, which + distributes requests to the back-end servers over the non-encrypted HTTP + protocol. +* We also want to configure a HTTP load balancer on the same IP address as + the above which serves the exact same content (ie. forwards to the same + back-end pool) as the TERMINATED_HTTPS listener. + +**Solution**: + +1. Combine the individual cert/key/intermediates to a single PKCS12 file. +2. Create a barbican *secret* resource for the PKCS12 file. We will call + this *tls_secret1*. +3. Create load balancer *lb1* on subnet *public-subnet*. +4. Create listener *listener1* as a TERMINATED_HTTPS listener referencing + *tls_secret1* as its default TLS container. +5. Create pool *pool1* as *listener1*'s default pool. +6. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. +7. Create listener *listener2* as an HTTP listener with *pool1* as its + default pool. + +**CLI commands**: + +:: + + openssl pkcs12 -export -inkey server.key -in server.crt -certfile ca-chain.crt -passout pass: -out server.p12 + openstack secret store --name='tls_secret1' -t 'application/octet-stream' -e 'base64' --payload="$(base64 < server.p12)" + openstack loadbalancer create --name lb1 --vip-subnet-id public-subnet --wait + openstack loadbalancer listener create --protocol-port 443 --protocol TERMINATED_HTTPS --name listener1 --default-tls-container=$(openstack secret list | awk '/ tls_secret1 / {print $2}') --wait lb1 + openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --wait + openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.10 --protocol-port 80 --wait pool1 + openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.11 --protocol-port 80 --wait pool1 + openstack loadbalancer listener create --protocol-port 80 --protocol HTTP --name listener2 --default-pool pool1 --wait lb1 + +.. _pool-with-backend-reencryption: + +Deploy a load balancer with backend re-encryption +------------------------------------------------- +This example will demonstrate how to enable TLS encryption from the load +balancer to the backend member servers. Typically this is used with TLS +termination enabled on the listener, but, to simplify the example, we are going +to use an unencrypted HTTP listener. For information on setting up a TLS +terminated listener, see the above section +:ref:`basic-tls-terminated-listener`. + +**Scenario description**: + +* Back-end servers 192.0.2.10 and 192.0.2.11 on subnet *private-subnet* have + been configured with an HTTPS application on TCP port 443. +* A Certificate Authority (CA) certificate chain and optional Certificate + Revocation List (CRL) have been obtained from an external certificate + authority to authenticate member server certificates against. +* Subnet *public-subnet* is a shared external subnet created by the cloud + operator which is reachable from the internet. +* We want to configure a basic load balancer that is accessible from the + internet, which distributes web requests to the back-end servers. + +**Solution**: + +1. Create a barbican *secret* resource for the member CA certificate. We will + call this *member_ca_cert*. +2. Optionally create a barbican *secret* for the CRL file. We will call this + *member_ca_crl*. +3. Create load balancer *lb1* on subnet *public-subnet*. +4. Create listener *listener1*. +5. Create pool *pool1* as *listener1*'s default pool, that is TLS enabled, with + a Certificate Authority (CA) certificate chain *member_ca_cert* to validate + the member server certificate, and a Certificate Revocation List (CRL) + *member_ca_crl* to check the member server certificate against. +6. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. + +**CLI commands**: + +:: + + openstack secret store --name='member_ca_cert' -t 'application/octet-stream' -e 'base64' --payload="$(base64 < member_ca.pem)" + openstack secret store --name='member_ca_crl' -t 'application/octet-stream' -e 'base64' --payload="$(base64 < member_ca.crl)" + openstack loadbalancer create --name lb1 --vip-subnet-id public-subnet --wait + openstack loadbalancer listener create --name listener1 --protocol HTTP --protocol-port 80 --wait lb1 + openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --enable-tls --ca-tls-container-ref $(openstack secret list | awk '/ member_ca_cert / {print $2}') --crl-container-ref $(openstack secret list | awk '/ member_ca_crl / {print $2}') --wait + openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.10 --protocol-port 443 --wait pool1 + openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.11 --protocol-port 443 --wait pool1 + +Deploy a load balancer with backend re-encryption and client authentication +--------------------------------------------------------------------------- +This example will demonstrate how to enable TLS encryption from the load +balancer to the backend member servers with the load balancer being +authenticated using TLS client authentication. Typically this is used with TLS +termination enabled on the listener, but, to simplify the example, we are going +to use an unencrypted HTTP listener. For information on setting up a TLS +terminated listener, see the above section +:ref:`basic-tls-terminated-listener`. + +**Scenario description**: + +* Back-end servers 192.0.2.10 and 192.0.2.11 on subnet *private-subnet* have + been configured with an HTTPS application on TCP port 443. +* A Certificate Authority (CA) certificate chain and optional Certificate + Revocation List (CRL) have been obtained from an external certificate + authority to authenticate member server certificates against. +* A TLS certificate and key have been obtained from an external Certificate + Authority (CA). The now exist in the files member.crt and member.key. The + key and certificate are PEM-encoded and the key is not encrypted with a + passphrase (for this example). +* Subnet *public-subnet* is a shared external subnet created by the cloud + operator which is reachable from the internet. +* We want to configure a basic load balancer that is accessible from the + internet, which distributes web requests to the back-end servers. + +**Solution**: + +1. Combine the member client authentication certificate and key to a single + PKCS12 file. +2. Create a barbican *secret* resource for the PKCS12 file. We will call + this *member_secret1*. +3. Create a barbican *secret* resource for the member CA certificate. We will + call this *member_ca_cert*. +4. Optionally create a barbican *secret* for the CRL file. We will call this + *member_ca_crl*. +5. Create load balancer *lb1* on subnet *public-subnet*. +6. Create listener *listener1*. +7. Create pool *pool1* as *listener1*'s default pool, that is TLS enabled, with + a TLS container reference for the member client authentication key and + certificate pkcs12, also with a Certificate Authority (CA) certificate chain + *member_ca_cert* to validate the member server certificate, and a + Certificate Revocation List (CRL) *member_ca_crl* to check the member server + certificate against. +8. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. + +**CLI commands**: + +:: + + openssl pkcs12 -export -inkey member.key -in member.crt -passout pass: -out member.p12 + openstack secret store --name='member_secret1' -t 'application/octet-stream' -e 'base64' --payload="$(base64 < member.p12)" + openstack secret store --name='member_ca_cert' -t 'application/octet-stream' -e 'base64' --payload="$(base64 < member_ca.pem)" + openstack secret store --name='member_ca_crl' -t 'application/octet-stream' -e 'base64' --payload="$(base64 < member_ca.crl)" + openstack loadbalancer create --name lb1 --vip-subnet-id public-subnet --wait + openstack loadbalancer listener create --name listener1 --protocol HTTP --protocol-port 80 --wait lb1 + openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --enable-tls --ca-tls-container-ref $(openstack secret list | awk '/ member_ca_cert / {print $2}') --crl-container-ref $(openstack secret list | awk '/ member_ca_crl / {print $2}') --tls-container-ref $(openstack secret list | awk '/ member_secret1 / {print $2}') --wait + openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.10 --protocol-port 443 --wait pool1 + openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.11 --protocol-port 443 --wait pool1 + +Deploy a HTTP/2 load balancer with ALPN TLS extension and backend re-encryption +------------------------------------------------------------------------------- +This example will demonstrate how to enable HTTP/2 load balancing. We deploy +the same h2 alpn protocol and TLS terminated listener that we use in +:ref:`h2-alpn-tls-terminated-listener` and we deploy the same pool and members +with backend re-encryption and h2 alpn protocols that we use in +:ref:`pool-with-backend-reencryption`. + +**Scenario description**: + +* Back-end servers 192.0.2.10 and 192.0.2.11 on subnet *private-subnet* have + been configured with an HTTPS application on TCP port 443. +* Subnet *public-subnet* is a shared external subnet created by the cloud + operator which is reachable from the internet. +* A TLS certificate, key, and intermediate certificate chain for + www.example.com have been obtained from an external certificate authority. + These now exist in the files server.crt, server.key, and ca-chain.crt in the + current directory. The key and certificate are PEM-encoded, and the + intermediate certificate chain is multiple PEM-encoded certs concatenated + together. The key is not encrypted with a passphrase. +* We want to configure a TLS-terminated HTTP/2 load balancer that is accessible + from the internet using the key and certificate mentioned above, which + distributes requests to back-end servers. +* Octavia is configured to use barbican for key management. + +**Solution**: + +1. Combine the individual cert/key/intermediates to a single PKCS12 file. +2. Create a barbican *secret* resource for the PKCS12 file. We will call + this *tls_secret1*. +3. Create load balancer *lb1* on subnet *public-subnet*. +4. Create listener *listener1* as a TERMINATED_HTTPS listener referencing + *tls_secret1* as its default TLS container, and *h2* ALPN protocol ID and + *http/1.1* as fall-back protocol should the client not support HTTP/2. +5. Create pool *pool1* as *listener1*'s default pool, that is TLS enabled, and + *h2* ALPN protocol ID and *http/1.1* as fall-back protocol should the + client not support HTTP/2. +6. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. + +**CLI commands**: + +:: + + openssl pkcs12 -export -inkey server.key -in server.crt -certfile ca-chain.crt -passout pass: -out server.p12 + openstack secret store --name='tls_secret1' -t 'application/octet-stream' -e 'base64' --payload="$(base64 < server.p12)" + openstack loadbalancer create --name lb1 --vip-subnet-id public-subnet --wait + openstack loadbalancer listener create --protocol-port 443 --protocol TERMINATED_HTTPS --alpn-protocol h2 --alpn-protocol http/1.1 --name listener1 --default-tls-container=$(openstack secret list | awk '/ tls_secret1 / {print $2}') --wait lb1 + openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --enable-tls --alpn-protocol h2 --alpn-protocol http/1.1 --wait + openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.10 --protocol-port 443 --wait pool1 + openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.11 --protocol-port 443 --wait pool1 + +Deploy a UDP load balancer with a health monitor +------------------------------------------------ + +This is a load balancer solution suitable for UDP-based services. + +**Scenario description**: + +* Back-end servers 192.0.2.10 and 192.0.2.11 on subnet *private-subnet* have + been configured with an application on UDP port 1234. +* Subnet *public-subnet* is a shared external subnet created by the cloud + operator which is reachable from the internet. +* We want to configure a basic load balancer that is accessible from the + internet, which distributes requests to the back-end servers. +* We want to employ a UDP health check to ensure that the back-end servers are + available. UDP health checks may not work correctly if ICMP Destination + Unreachable (ICMP type 3) messages are blocked by a security rule (see + :ref:`other-health-monitors`). + +**Solution**: + +1. Create load balancer *lb1* on subnet *private-subnet*. +2. Create listener *listener1*. +3. Create pool *pool1* as *listener1*'s default pool. +4. Create a health monitor on *pool1* which connects to the back-end servers. +5. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. + +**CLI commands**: + +:: + + openstack loadbalancer create --name lb1 --vip-subnet-id public-subnet --wait + openstack loadbalancer listener create --name listener1 --protocol UDP --protocol-port 1234 --wait lb1 + openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol UDP --wait + openstack loadbalancer healthmonitor create --delay 3 --max-retries 2 --timeout 2 --type UDP-CONNECT --wait pool1 + openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.10 --protocol-port 1234 --wait pool1 + openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.11 --protocol-port 1234 --wait pool1 + + +.. _health-monitor-best-practices: + +Health Monitor Best Practices +============================= +An Octavia health monitor is a process that does periodic health checks on each +back-end member to pre-emptively detect failed members and temporarily +pull them out of the pool. + +If the health monitor detects a failed member, it removes it from the pool and +marks the member in ERROR. After you have corrected the member and it is +functional again, the health monitor automatically changes the status of the +member from ERROR to ONLINE, and resumes passing traffic to it. + +Always use health monitors in production load balancers. If you do not have a +health monitor, failed members are not removed from the pool. This can lead to +service disruption for web clients. + +See also the command, `loadbalancer healthmonitor create `_. + +.. _all-health-monitors: + +Configuration arguments for all health monitors +----------------------------------------------- + +All health monitor types for Octavia require the following configurable +arguments: + +* ``delay``: Number of seconds to wait between health checks. +* ``timeout``: Number of seconds to wait for any given health check to + complete. ``timeout`` should always be smaller than ``delay``. +* ``max-retries``: Number of subsequent health checks a given back-end + server must fail before it is considered *down*, or that a failed back-end + server must pass to be considered *up* again. + + +.. _http-health-monitors: + +Configuration arguments for HTTP health monitors +------------------------------------------------ + +In addition to the arguments listed earlier in :ref:`all-health-monitors`, HTTP +health monitor types *also* require the following arguments, which are set by +default: + +* ``url-path``: Path part of the URL that should be retrieved from the back-end + server. By default this is "/". +* ``http-method``: HTTP method that should be used to retrieve the + ``url-path``. By default this is "GET". +* ``expected-codes``: List of HTTP status codes that indicate an OK health + check. By default this is just "200". + +For a complete list of configuration arguments for Octavia health monitors, see +the command, `loadbalancer healthmonitor create `_. + +Please keep the following best practices in mind when writing the code that +generates the health check in your web application: + +* The health monitor ``url-path`` should not require authentication to load. +* By default the health monitor ``url-path`` should return a HTTP 200 OK status + code to indicate a healthy server unless you specify alternate + ``expected-codes``. +* The health check should do enough internal checks to ensure the application + is healthy and no more. This may mean ensuring database or other external + storage connections are up and running, server load is acceptable, the site + is not in maintenance mode, and other tests specific to your application. +* The page generated by the health check should be very light weight: + + * It should return in a sub-second interval. + * It should not induce significant load on the application server. + +* The page generated by the health check should never be cached, though the + code running the health check may reference cached data. For example, you may + find it useful to run a more extensive health check via cron and store the + results of this to disk. The code generating the page at the health monitor + ``url-path`` would incorporate the results of this cron job in the tests it + performs. +* Since Octavia only cares about the HTTP status code returned, and since + health checks are run so frequently, it may make sense to use the "HEAD" or + "OPTIONS" HTTP methods to cut down on unnecessary processing of a whole page. + + +.. _other-health-monitors: + +Other health monitors +--------------------- +Other health monitor types include ``PING``, ``TCP``, ``HTTPS``, ``SCTP``, +``TLS-HELLO``, and ``UDP-CONNECT``. + +``PING`` health monitors send periodic ICMP PING requests to the back-end +servers. Obviously, your back-end servers must be configured to allow PINGs in +order for these health checks to pass. + +.. warning:: + + Health monitors of type ``PING`` only check if the member is reachable and + responds to ICMP echo requests. It will not detect if your application + running on that instance is healthy or not. Most pools should use one of + the other health monitor options. ``PING`` should only be used in specific + cases where an ICMP echo request is a valid health check. + +``TCP`` health monitors open a TCP connection to the back-end server's protocol +port. Your custom TCP application should be written to respond OK to the load +balancer connecting, opening a TCP connection, and closing it again after the +TCP handshake without sending any data. + +``HTTPS`` health monitors operate exactly like HTTP health monitors, but with +ssl back-end servers. Unfortunately, this causes problems if the servers are +performing client certificate validation, as HAProxy won't have a valid cert. +In this case, using ``TLS-HELLO`` type monitoring is an alternative. + +``SCTP`` health monitors send an INIT packet to the back-end server's port. +If an application is listening on this port, the Operating System should reply +with an INIT ACK packet, but if the port is closed, it replies with an ABORT +packet. +If the health monitor receives an INIT ACK packet, it immediately closes the +connection with an ABORT packet, and considers that the server is ONLINE. + +``TLS-HELLO`` health monitors simply ensure the back-end server responds to +SSLv3 client hello messages. It will not check any other health metrics, like +status code or body contents. + +``UDP-CONNECT`` health monitors do a basic UDP port connect. Health monitors +of this type may not work correctly if Destination Unreachable (ICMP type 3) is +not enabled on the member server or is blocked by a security rule. A member +server may be marked as operating status ONLINE when it is actually down. + + +Intermediate certificate chains +=============================== +Some TLS certificates require you to install an intermediate certificate chain +in order for web client browsers to trust the certificate. This chain can take +several forms, and is a file provided by the organization from whom you +obtained your TLS certificate. + +PEM-encoded chains +------------------ +The simplest form of the intermediate chain is a PEM-encoded text file that +either contains a sequence of individually-encoded PEM certificates, or a PEM +encoded PKCS7 block(s). If this is the type of intermediate chain you have been +provided, the file will contain either ``-----BEGIN PKCS7-----`` or +``-----BEGIN CERTIFICATE-----`` near the top of the file, and one or more +blocks of 64-character lines of ASCII text (that will look like gobbedlygook to +a human). These files are also typically named with a ``.crt`` or ``.pem`` +extension. + +DER-encoded chains +------------------ +If the intermediates chain provided to you is a file that contains what appears +to be random binary data, it is likely that it is a PKCS7 chain in DER format. +These files also may be named with a ``.p7b`` extension. + +You may use the binary DER file as-is when building your PKCS12 bundle: + +:: + + openssl pkcs12 -export -inkey server.key -in server.crt -certfile ca-chain.p7b -passout pass: -out server.p12 + +... or you can convert it to a series of PEM-encoded certificates: + +:: + + openssl pkcs7 -in intermediates-chain.p7b -inform DER -print_certs -out intermediates-chain.crt + +... or you can convert it to a PEM-encoded PKCS7 bundle: + +:: + + openssl pkcs7 -in intermediates-chain.p7b -inform DER -outform PEM -out intermediates-chain.crt + + +If the file is not a PKCS7 DER bundle, either of the two ``openssl pkcs7`` +commands will fail. + +Further reading +=============== +For examples of using Layer 7 features for more advanced load balancing, please +see: :doc:`l7-cookbook` diff --git a/doc/source/user/guides/l7-cookbook.rst b/doc/source/user/guides/l7-cookbook.rst new file mode 100644 index 0000000000..f5c8fb34fd --- /dev/null +++ b/doc/source/user/guides/l7-cookbook.rst @@ -0,0 +1,420 @@ +.. + Copyright (c) 2016 IBM + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +================ +Layer 7 Cookbook +================ + +Introduction +============ +This document gives several examples of common L7 load balancer usage. For a +description of L7 load balancing see: :doc:`l7` + +For the purposes of this guide we assume that the OpenStack Client command-line +interface is going to be used to configure all features of Octavia with the +Octavia driver back-end. Also, in order to keep these examples short, we assume +that many non-L7 configuration tasks (such as deploying loadbalancers, +listeners, pools, members, healthmonitors, etc.) have already been +accomplished. A description of the starting conditions is given in each example +below. + + +Examples +======== + +.. _redirect-http-to-https: + +Redirect *http://www.example.com/* to *https://www.example.com/* +---------------------------------------------------------------- +**Scenario description**: + +* Load balancer *lb1* has been set up with ``TERMINATED_HTTPS`` listener + *tls_listener* on TCP port 443. +* *tls_listener* has been populated with a default pool, members, etc. +* *tls_listener* is available under the DNS name *https://www.example.com/* +* We want any regular HTTP requests to TCP port 80 on *lb1* to be redirected + to *tls_listener* on TCP port 443. + +**Solution**: + +1. Create listener *http_listener* as an HTTP listener on *lb1* port 80. +2. Set up an L7 Policy *policy1* on *http_listener* with action + ``REDIRECT_TO_URL`` pointed at the URL *https://www.example.com/* +3. Add an L7 Rule to *policy1* which matches all requests. + + +**CLI commands**: + +.. code-block:: bash + + openstack loadbalancer listener create --name http_listener --protocol HTTP --protocol-port 80 lb1 + openstack loadbalancer l7policy create --action REDIRECT_PREFIX --redirect-prefix https://www.example.com/ --name policy1 http_listener + openstack loadbalancer l7rule create --compare-type STARTS_WITH --type PATH --value / policy1 + + +.. _send-requests-to-static-pool: + +Send requests starting with /js or /images to *static_pool* +----------------------------------------------------------- +**Scenario description**: + +* Listener *listener1* on load balancer *lb1* is set up to send all requests to + its default_pool *pool1*. +* We are introducing static content servers 10.0.0.10 and 10.0.0.11 on subnet + *private-subnet*, and want any HTTP requests with a URL that starts with + either "/js" or "/images" to be sent to those two servers instead of *pool1*. + +**Solution**: + +1. Create pool *static_pool* on *lb1*. +2. Populate *static_pool* with the new back-end members. +3. Create L7 Policy *policy1* with action ``REDIRECT_TO_POOL`` pointed at + *static_pool*. +4. Create an L7 Rule on *policy1* which looks for "/js" at the start of + the request path. +5. Create L7 Policy *policy2* with action ``REDIRECT_TO_POOL`` pointed at + *static_pool*. +6. Create an L7 Rule on *policy2* which looks for "/images" at the start + of the request path. + +**CLI commands**: + +.. code-block:: bash + + openstack loadbalancer pool create --lb-algorithm ROUND_ROBIN --loadbalancer lb1 --name static_pool --protocol HTTP + openstack loadbalancer member create --address 10.0.0.10 --protocol-port 80 --subnet-id private-subnet static_pool + openstack loadbalancer member create --address 10.0.0.11 --protocol-port 80 --subnet-id private-subnet static_pool + openstack loadbalancer l7policy create --action REDIRECT_TO_POOL --redirect-pool static_pool --name policy1 listener1 + openstack loadbalancer l7rule create --compare-type STARTS_WITH --type PATH --value /js policy1 + openstack loadbalancer l7policy create --action REDIRECT_TO_POOL --redirect-pool static_pool --name policy2 listener1 + openstack loadbalancer l7rule create --compare-type STARTS_WITH --type PATH --value /images policy2 + +**Alternate solution** (using regular expressions): + +1. Create pool *static_pool* on *lb1*. +2. Populate *static_pool* with the new back-end members. +3. Create L7 Policy *policy1* with action ``REDIRECT_TO_POOL`` pointed at + *static_pool*. +4. Create an L7 Rule on *policy1* which uses a regular expression to match + either "/js" or "/images" at the start of the request path. + +**CLI commands**: + +.. code-block:: bash + + openstack loadbalancer pool create --lb-algorithm ROUND_ROBIN --loadbalancer lb1 --name static_pool --protocol HTTP + openstack loadbalancer member create --address 10.0.0.10 --protocol-port 80 --subnet-id private-subnet static_pool + openstack loadbalancer member create --address 10.0.0.11 --protocol-port 80 --subnet-id private-subnet static_pool + openstack loadbalancer l7policy create --action REDIRECT_TO_POOL --redirect-pool static_pool --name policy1 listener1 + openstack loadbalancer l7rule create --compare-type REGEX --type PATH --value '^/(js|images)' policy1 + + +Send requests for *http://www2.example.com/* to *pool2* +------------------------------------------------------- +**Scenario description**: + +* Listener *listener1* on load balancer *lb1* is set up to send all requests to + its default_pool *pool1*. +* We have set up a new pool *pool2* on *lb1* and want any requests using the + HTTP/1.1 hostname *www2.example.com* to be sent to *pool2* instead. + +**Solution**: + +1. Create L7 Policy *policy1* with action ``REDIRECT_TO_POOL`` pointed at + *pool2*. +2. Create an L7 Rule on *policy1* which matches the hostname + *www2.example.com*. + +**CLI commands**: + +.. code-block:: bash + + openstack loadbalancer l7policy create --action REDIRECT_TO_POOL --redirect-pool pool2 --name policy1 listener1 + openstack loadbalancer l7rule create --compare-type EQUAL_TO --type HOST_NAME --value www2.example.com policy1 + + +Send requests for *\*.example.com* to *pool2* +--------------------------------------------- +**Scenario description**: + +* Listener *listener1* on load balancer *lb1* is set up to send all requests to + its default_pool *pool1*. +* We have set up a new pool *pool2* on *lb1* and want any requests using any + HTTP/1.1 hostname like *\*.example.com* to be sent to *pool2* instead. + +**Solution**: + +1. Create L7 Policy *policy1* with action ``REDIRECT_TO_POOL`` pointed at + *pool2*. +2. Create an L7 Rule on *policy1* which matches any hostname that ends with + *example.com*. + +**CLI commands**: + +.. code-block:: bash + + openstack loadbalancer l7policy create --action REDIRECT_TO_POOL --redirect-pool pool2 --name policy1 listener1 + openstack loadbalancer l7rule create --compare-type ENDS_WITH --type HOST_NAME --value example.com policy1 + + +Send unauthenticated users to *login_pool* (scenario 1) +------------------------------------------------------- +**Scenario description**: + +* ``TERMINATED_HTTPS`` listener *listener1* on load balancer *lb1* is set up + to send all requests to its default_pool *pool1*. +* The site behind *listener1* requires all web users to authenticate, after + which a browser cookie *auth_token* will be set. +* When web users log out, or if the *auth_token* is invalid, the application + servers in *pool1* clear the *auth_token*. +* We want to introduce new secure authentication server 10.0.1.10 on Neutron + subnet *secure_subnet* (a different Neutron subnet from the default + application servers) which handles authenticating web users and sets the + *auth_token*. + +*Note:* Obviously, to have a more secure authentication system that is less +vulnerable to attacks like XSS, the new secure authentication server will need +to set session variables to which the default_pool servers will have access +outside the data path with the web client. There may be other security concerns +as well. This example is not meant to address how these are to be +accomplished--it's mainly meant to show how L7 application routing can be done +based on a browser cookie. + +**Solution**: + +1. Create pool *login_pool* on *lb1*. +2. Add member 10.0.1.10 on *secure_subnet* to *login_pool*. +3. Create L7 Policy *policy1* with action ``REDIRECT_TO_POOL`` pointed at + *login_pool*. +4. Create an L7 Rule on *policy1* which looks for browser cookie *auth_token* + (with any value) and matches if it is *NOT* present. + +**CLI commands**: + +.. code-block:: bash + + openstack loadbalancer pool create --lb-algorithm ROUND_ROBIN --loadbalancer lb1 --name login_pool --protocol HTTP + openstack loadbalancer member create --address 10.0.1.10 --protocol-port 80 --subnet-id secure_subnet login_pool + openstack loadbalancer l7policy create --action REDIRECT_TO_POOL --redirect-pool login_pool --name policy1 listener1 + openstack loadbalancer l7rule create --compare-type REGEX --key auth_token --type COOKIE --value '.*' --invert policy1 + + +Send unauthenticated users to *login_pool* (scenario 2) +-------------------------------------------------------- +**Scenario description**: + +* ``TERMINATED_HTTPS`` listener *listener1* on load balancer *lb1* is set up + to send all requests to its default_pool *pool1*. +* The site behind *listener1* requires all web users to authenticate, after + which a browser cookie *auth_token* will be set. +* When web users log out, or if the *auth_token* is invalid, the application + servers in *pool1* set *auth_token* to the literal string "INVALID". +* We want to introduce new secure authentication server 10.0.1.10 on Neutron + subnet *secure_subnet* (a different Neutron subnet from the default + application servers) which handles authenticating web users and sets the + *auth_token*. + +*Note:* Obviously, to have a more secure authentication system that is less +vulnerable to attacks like XSS, the new secure authentication server will need +to set session variables to which the default_pool servers will have access +outside the data path with the web client. There may be other security concerns +as well. This example is not meant to address how these are to be +accomplished-- it's mainly meant to show how L7 application routing can be done +based on a browser cookie. + +**Solution**: + +1. Create pool *login_pool* on *lb1*. +2. Add member 10.0.1.10 on *secure_subnet* to *login_pool*. +3. Create L7 Policy *policy1* with action ``REDIRECT_TO_POOL`` pointed at + *login_pool*. +4. Create an L7 Rule on *policy1* which looks for browser cookie *auth_token* + (with any value) and matches if it is *NOT* present. +5. Create L7 Policy *policy2* with action ``REDIRECT_TO_POOL`` pointed at + *login_pool*. +6. Create an L7 Rule on *policy2* which looks for browser cookie *auth_token* + and matches if it is equal to the literal string "INVALID". + +**CLI commands**: + +.. code-block:: bash + + openstack loadbalancer pool create --lb-algorithm ROUND_ROBIN --loadbalancer lb1 --name login_pool --protocol HTTP + openstack loadbalancer member create --address 10.0.1.10 --protocol-port 80 --subnet-id secure_subnet login_pool + openstack loadbalancer l7policy create --action REDIRECT_TO_POOL --redirect-pool login_pool --name policy1 listener1 + openstack loadbalancer l7rule create --compare-type REGEX --key auth_token --type COOKIE --value '.*' --invert policy1 + openstack loadbalancer l7policy create --action REDIRECT_TO_POOL --redirect-pool login_pool --name policy2 listener1 + openstack loadbalancer l7rule create --compare-type EQUAL_TO --key auth_token --type COOKIE --value INVALID policy2 + + +Send requests for *http://api.example.com/api* to *api_pool* +------------------------------------------------------------ +**Scenario description**: + +* Listener *listener1* on load balancer *lb1* is set up to send all requests + to its default_pool *pool1*. +* We have created pool *api_pool* on *lb1*, however, for legacy business logic + reasons, we only want requests sent to this pool if they match the hostname + *api.example.com* AND the request path starts with */api*. + +**Solution**: + +1. Create L7 Policy *policy1* with action ``REDIRECT_TO_POOL`` pointed at + *api_pool*. +2. Create an L7 Rule on *policy1* which matches the hostname *api.example.com*. +3. Create an L7 Rule on *policy1* which matches */api* at the start of the + request path. (This rule will be logically ANDed with the previous rule.) + +**CLI commands**: + +.. code-block:: bash + + openstack loadbalancer l7policy create --action REDIRECT_TO_POOL --redirect-pool api_pool --name policy1 listener1 + openstack loadbalancer l7rule create --compare-type EQUAL_TO --type HOST_NAME --value api.example.com policy1 + openstack loadbalancer l7rule create --compare-type STARTS_WITH --type PATH --value /api policy1 + + +Set up A/B testing on an existing production site using a cookie +---------------------------------------------------------------- +**Scenario description**: + +* Listener *listener1* on load balancer *lb1* is a production site set up as + described under :ref:`send-requests-to-static-pool` (alternate solution) + above. Specifically: + + * HTTP requests with a URL that starts with either "/js" or "/images" are + sent to pool *static_pool*. + * All other requests are sent to *listener1's* default_pool *pool1*. + +* We are introducing a "B" version of the production site, complete with its + own default_pool and static_pool. We will call these *pool_B* and + *static_pool_B* respectively. +* The *pool_B* members should be 10.0.0.50 and 10.0.0.51, and the + *static_pool_B* members should be 10.0.0.100 and 10.0.0.101 on subnet + *private-subnet*. +* Web clients which should be routed to the "B" version of the site get a + cookie set by the member servers in *pool1*. This cookie is called + "site_version" and should have the value "B". + +**Solution**: + +1. Create pool *pool_B* on *lb1*. +2. Populate *pool_B* with its new back-end members. +3. Create pool *static_pool_B* on *lb1*. +4. Populate *static_pool_B* with its new back-end members. +5. Create L7 Policy *policy2* with action ``REDIRECT_TO_POOL`` pointed at + *static_pool_B*. This should be inserted at position 1. +6. Create an L7 Rule on *policy2* which uses a regular expression to match + either "/js" or "/images" at the start of the request path. +7. Create an L7 Rule on *policy2* which matches the cookie "site_version" to + the exact string "B". +8. Create L7 Policy *policy3* with action ``REDIRECT_TO_POOL`` pointed at + *pool_B*. This should be inserted at position 2. +9. Create an L7 Rule on *policy3* which matches the cookie "site_version" to + the exact string "B". + +*A word about L7 Policy position*: Since L7 Policies are evaluated in order +according to their position parameter, and since the first L7 Policy whose L7 +Rules all evaluate to True is the one whose action is followed, it is important +that L7 Policies with the most specific rules get evaluated first. + +For example, in this solution, if *policy3* were to appear in the listener's L7 +Policy list before *policy2* (that is, if *policy3* were to have a lower +position number than *policy2*), then if a web client were to request the URL +http://www.example.com/images/a.jpg with the cookie "site_version:B", then +*policy3* would match, and the load balancer would send the request to +*pool_B*. From the scenario description, this request clearly was meant to be +sent to *static_pool_B*, which is why *policy2* needs to be evaluated before +*policy3*. + +**CLI commands**: + +.. code-block:: bash + + openstack loadbalancer pool create --lb-algorithm ROUND_ROBIN --loadbalancer lb1 --name pool_B --protocol HTTP + openstack loadbalancer member create --address 10.0.0.50 --protocol-port 80 --subnet-id private-subnet pool_B + openstack loadbalancer member create --address 10.0.0.51 --protocol-port 80 --subnet-id private-subnet pool_B + openstack loadbalancer pool create --lb-algorithm ROUND_ROBIN --loadbalancer lb1 --name static_pool_B --protocol HTTP + openstack loadbalancer member create --address 10.0.0.100 --protocol-port 80 --subnet-id private-subnet static_pool_B + openstack loadbalancer member create --address 10.0.0.101 --protocol-port 80 --subnet-id private-subnet static_pool_B + openstack loadbalancer l7policy create --action REDIRECT_TO_POOL --redirect-pool static_pool_B --name policy2 --position 1 listener1 + openstack loadbalancer l7rule create --compare-type REGEX --type PATH --value '^/(js|images)' policy2 + openstack loadbalancer l7rule create --compare-type EQUAL_TO --key site_version --type COOKIE --value B policy2 + openstack loadbalancer l7policy create --action REDIRECT_TO_POOL --redirect-pool pool_B --name policy3 --position 2 listener1 + openstack loadbalancer l7rule create --compare-type EQUAL_TO --key site_version --type COOKIE --value B policy3 + + +Redirect requests with an invalid TLS client authentication certificate +----------------------------------------------------------------------- +**Scenario description**: + +* Listener *listener1* on load balancer *lb1* is configured for ``OPTIONAL`` + client_authentication. +* Web clients that do not present a TLS client authentication certificate + should be redirected to a signup page at *http://www.example.com/signup*. + +**Solution**: + +1. Create the load balancer *lb1*. +2. Create a listener *listner1* of type ``TERMINATED_TLS`` with a + client_ca_tls_container_ref and client_authentication ``OPTIONAL``. +3. Create a L7 Policy *policy1* on *listener1* with action ``REDIRECT_TO_URL`` + pointed at the URL *http://www.example.com/signup*. +4. Add an L7 Rule to *policy1* that does not match ``SSL_CONN_HAS_CERT``. + +**CLI commands**: + +.. code-block:: bash + + openstack loadbalancer create --name lb1 --vip-subnet-id public-subnet + openstack loadbalancer listener create --name listener1 --protocol TERMINATED_HTTPS --client-authentication OPTIONAL --protocol-port 443 --default-tls-container-ref http://192.0.2.15:9311/v1/secrets/697c2a6d-ffbe-40b8-be5e-7629fd636bca --client-ca-tls-container-ref http://192.0.2.15:9311/v1/secrets/dba60b77-8dad-4171-8a96-f21e1ca5fb46 lb1 + openstack loadbalancer l7policy create --action REDIRECT_TO_URL --redirect-url http://www.example.com/signup --name policy1 listener1 + openstack loadbalancer l7rule create --type SSL_CONN_HAS_CERT --invert --compare-type EQUAL_TO --value True policy1 + + +Send users from the finance department to pool2 +----------------------------------------------- +**Scenario description**: + +* Users from the finance department have client certificates with the OU field + of the distinguished name set to ``finance``. +* Only users with valid finance department client certificates should be able + to access ``pool2``. Others will be rejected. + +**Solution**: + +1. Create the load balancer *lb1*. +2. Create a listener *listner1* of type ``TERMINATED_TLS`` with a + client_ca_tls_container_ref and client_authentication ``MANDATORY``. +3. Create a pool *pool2* on load balancer *lb1*. +4. Create a L7 Policy *policy1* on *listener1* with action ``REDIRECT_TO_POOL`` + pointed at *pool2*. +5. Add an L7 Rule to *policy1* that matches ``SSL_CONN_HAS_CERT``. +6. Add an L7 Rule to *policy1* that matches ``SSL_VERIFY_RESULT`` with a value + of 0. +7. Add an L7 Rule to *policy1* of type ``SSL_DN_FIELD`` that looks for + "finance" in the "OU" field of the client authentication distinguished name. + +**CLI commands**: + +.. code-block:: bash + + openstack loadbalancer create --name lb1 --vip-subnet-id public-subnet + openstack loadbalancer listener create --name listener1 --protocol TERMINATED_HTTPS --client-authentication MANDATORY --protocol-port 443 --default-tls-container-ref http://192.0.2.15:9311/v1/secrets/697c2a6d-ffbe-40b8-be5e-7629fd636bca --client-ca-tls-container-ref http://192.0.2.15:9311/v1/secrets/dba60b77-8dad-4171-8a96-f21e1ca5fb46 lb1 + openstack loadbalancer pool create --lb-algorithm ROUND_ROBIN --loadbalancer lb1 --name pool2 --protocol HTTP + openstack loadbalancer l7policy create --action REDIRECT_TO_POOL --redirect-pool pool2 --name policy1 listener1 + openstack loadbalancer l7rule create --type SSL_CONN_HAS_CERT --compare-type EQUAL_TO --value True policy1 + openstack loadbalancer l7rule create --type SSL_VERIFY_RESULT --compare-type EQUAL_TO --value 0 policy1 + openstack loadbalancer l7rule create --type SSL_DN_FIELD --compare-type EQUAL_TO --key OU --value finance policy1 diff --git a/doc/source/user/guides/l7.rst b/doc/source/user/guides/l7.rst new file mode 100644 index 0000000000..05e64c1089 --- /dev/null +++ b/doc/source/user/guides/l7.rst @@ -0,0 +1,195 @@ +.. + Copyright (c) 2016 IBM + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +====================== +Layer 7 Load Balancing +====================== + +What is L7 load balancing? +========================== +Layer 7 load balancing takes its name from the OSI model, indicating that the +load balancer distributes requests to back-end pools based on layer 7 +(application) data. Layer 7 load balancing is also known as "request +switching," "application load balancing," "content based routing," "content +based switching," and "content based balancing." + +A layer 7 load balancer consists of a listener that accepts requests on behalf +of a number of back-end pools and distributes those requests based on policies +that use application data to determine which pools should service any given +request. This allows for the application infrastructure to be specifically +tuned/optimized to serve specific types of content. For example, one group of +back-end servers (pool) can be tuned to serve only images, another for +execution of server-side scripting languages like PHP and ASP, and another for +static content such as HTML, CSS, and JavaScript. + +Unlike lower-level load balancing, layer 7 load balancing does not require that +all pools behind the load balancing service have the same content. In fact, it +is generally expected that a layer 7 load balancer expects the back-end servers +from different pools will have different content. Layer 7 load balancers are +capable of directing requests based on URI, host, HTTP headers, and other data +in the application message. + + +L7 load balancing in Octavia +---------------------------- +The layer 7 load balancing capabilities described in this document were added +to Neutron LBaaS and Octavia in the Mitaka release cycle (Octavia 0.8). + +While layer 7 load balancing in general can theoretically be done for any +well-defined layer 7 application interface, for the purposes of Octavia, L7 +functionality refers only to the HTTP protocol and its semantics. + + +How does it work? +================= +Neutron LBaaS and Octavia accomplish the logic of layer 7 load balancing +through the use of L7 Rules and L7 Policies. An L7 Rule is a single, simple +logical test which evaluates to true or false. An L7 Policy is a collection of +L7 rules, as well as a defined action that should be taken if all the rules +associated with the policy match. + +These concepts and their specific details are expanded upon below. + + +L7 Rules +-------- +An L7 Rule is a single, simple logical test which returns either true or false. +It consists of a rule type, a comparison type, a value, and an optional key +that gets used depending on the rule type. An L7 rule must always be associated +with an L7 policy. + +See also: `Octavia API Reference `_ + +Rule types +__________ +L7 rules have the following types: + +* ``HOST_NAME``: The rule does a comparison between the HTTP/1.1 hostname in + the request against the value parameter in the rule. +* ``PATH``: The rule compares the path portion of the HTTP URI against the + value parameter in the rule. +* ``FILE_TYPE``: The rule compares the last portion of the URI against the + value parameter in the rule. (eg. "txt", "jpg", etc.) +* ``HEADER``: The rule looks for a header defined in the key parameter and + compares it against the value parameter in the rule. +* ``COOKIE``: The rule looks for a cookie named by the key parameter and + compares it against the value parameter in the rule. +* ``SSL_CONN_HAS_CERT``: The rule will match if the client has presented a + certificate for TLS client authentication. This does not imply the + certificate is valid. +* ``SSL_VERIFY_RESULT``: This rule will match the TLS client authentication + certificate validation result. A value of '0' means the certificate was + successfully validated. A value greater than '0' means the certificate + failed validation. This value follows the `openssl-verify result codes `_. +* ``SSL_DN_FIELD``: The rule looks for a Distinguished Name field defined in + the key parameter and compares it against the value parameter in the rule. + +Comparison types +________________ +L7 rules of a given type always do comparisons. The types of comparisons we +support are listed below. Note that not all rule types support all comparison +types: + +* ``REGEX``: Perl type regular expression matching +* ``STARTS_WITH``: String starts with +* ``ENDS_WITH``: String ends with +* ``CONTAINS``: String contains +* ``EQUAL_TO``: String is equal to + +Invert +______ +In order to more fully express the logic required by some policies, rules may +have their result inverted. That is to say, if the invert parameter of a given +rule is true, the result of its comparison will be inverted. (For example, an +inverted "equal to" rule effectively becomes a "not equal to", and an inverted +"regex" rule returns true only if the given regex does not match.) + + +L7 Policies +----------- +An L7 Policy is a collection of L7 rules associated with a Listener, and which +may also have an association to a back-end pool. Policies describe actions that +should be taken by the load balancing software if all of the rules in the +policy return true. + +See also: `Octavia API Reference `_ + +Policy Logic +____________ +Policy logic is very simple: All the rules associated with a given policy are +logically ANDed together. A request must match all the policy's rules to match +the policy. + +If you need to express a logical OR operation between rules, then do this by +creating multiple policies with the same action (or, possibly, by making a more +elaborate regular expression). + +Policy Actions +______________ +If an L7 policy matches a given request, then that policy's action is executed. +The following are the actions an L7 Policy may take: + +* ``REJECT``: The request is denied with an appropriate response code, and not + forwarded on to any back-end pool. +* ``REDIRECT_TO_URL``: The request is sent an HTTP redirect to the URL defined + in the ``redirect_url`` parameter. +* ``REDIRECT_TO_POOL``: The request is forwarded to the back-end pool + associated with the L7 policy. + +Policy Position +_______________ +When multiple L7 Policies are associated with a listener, then the policies' +``position`` parameter becomes important. The ``position`` parameter is used +when determining the order in which L7 policies are evaluated. Here are a few +notes about how policy position affects listener behavior: + +* In the reference implementation (haproxy amphorae) of Octavia, haproxy + enforces the following ordering regarding policy actions: + + * ``REJECT`` policies take precedence over all other policies. + * ``REDIRECT_TO_URL`` policies take precedence over ``REDIRECT_TO_POOL`` + policies. + * ``REDIRECT_TO_POOL`` policies are only evaluated after all of the above, + and in the order specified by the ``position`` of the policy. + +* L7 Policies are evaluated in a specific order (as defined by the ``position`` + attribute), and the first policy that matches a given request will be the one + whose action is followed. +* If no policy matches a given request, then the request is routed to the + listener's default pool ,if it exists. If the listener has no default pool, + then an error 503 is returned. +* Policy position numbering starts with 1. +* If a new policy is created with a position that matches that of an existing + policy, then the new policy is inserted at the given position. +* If a new policy is created without specifying a position, or specifying a + position that is greater than the number of policies already in the list, the + new policy will just be appended to the list. +* When policies are inserted, deleted, or appended to the list, the policy + position values are re-ordered from 1 without skipping numbers. For example, + if policy A, B, and C have position values of 1, 2 and 3 respectively, if you + delete policy B from the list, policy C's position becomes 2. + + +L7 usage examples +================= +For a cookbook of common L7 usage examples, please see the :doc:`l7-cookbook` + + +Useful links +============ +* `Octavia API Reference `_ +* `LBaaS Layer 7 rules `_ +* `Using ACLs and fetching samples `_ +* `OpenSSL openssl-verify command `_ diff --git a/doc/source/user/guides/monitoring.rst b/doc/source/user/guides/monitoring.rst new file mode 100644 index 0000000000..1163c76aa3 --- /dev/null +++ b/doc/source/user/guides/monitoring.rst @@ -0,0 +1,153 @@ +.. + Copyright 2021 Red Hat, Inc. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +========================= +Monitoring Load Balancers +========================= + +Introduction +============ + +Octavia provides multiple ways to monitor your load balancers. You can query +statistics via the Octavia API or directly from your load balancer. + +This guide will discuss the various options available to monitor your Octavia +load balancer. + +Monitoring Using the Octavia API +================================ + +Octavia collects key metrics from all load balancers, including load balancers +built with third party provider drivers that support collecting statistics. +Octavia aggregates these statistics and makes them available via the Octavia +API. Load balancer statistics are available at the load balancer or listener +level. + +.. _OpenStack Client: https://docs.openstack.org/python-openstackclient/latest/ + +Load balancer statistics can be queried using the `OpenStack Client`_. + +.. code-block:: bash + + $ openstack loadbalancer stats show + + +--------------------+-----------+ + | Field | Value | + +--------------------+-----------+ + | active_connections | 0 | + | bytes_in | 2236722 | + | bytes_out | 100973832 | + | request_errors | 0 | + | total_connections | 3606 | + +--------------------+-----------+ + +Individual listener statistics can also be queried using the +`OpenStack Client`_. + +.. code-block:: bash + + $ openstack loadbalancer listener stats show + + +--------------------+-------+ + | Field | Value | + +--------------------+-------+ + | active_connections | 0 | + | bytes_in | 89 | + | bytes_out | 237 | + | request_errors | 0 | + | total_connections | 1 | + +--------------------+-------+ + +Load balancer statistics queried via the Octavia API include metrics for all +listener protocols. + +Monitoring with Prometheus +========================== + +Some provider drivers, such as the Octavia amphora driver, provide a prometheus +endpoint. This allows you to configure your Prometheus infrastructure to collect +metrics from Octavia load balancers. + +To add a Prometheus endpoint on an Octavia load balancer, create a listener +with a special protocol ``PROMETHEUS``. This will enable the endpoint as +``/metrics`` on the listener. The listener supports all of the features of an +Octavia load balancer, such as allowed_cidrs, but does not support attaching +pools or L7 policies. All metrics will be identified by the Octavia object +ID (UUID) of the resources. + +.. note:: Currectly UDP and SCTP metrics are not reported via Prometheus + endpoints when using the amphora provider. + +To create a Prometheus endpoint on port 8088 for load balancer lb1, you would +run the following command. + +.. code-block:: bash + + $ openstack loadbalancer listener create --name stats-listener --protocol PROMETHEUS --protocol-port 8088 lb1 + +-----------------------------+--------------------------------------+ + | Field | Value | + +-----------------------------+--------------------------------------+ + | admin_state_up | True | + | connection_limit | -1 | + | created_at | 2021-10-03T01:44:25 | + | default_pool_id | None | + | default_tls_container_ref | None | + | description | | + | id | fb57d764-470a-4b6b-8820-627452f55b96 | + | insert_headers | None | + | l7policies | | + | loadbalancers | b081ed89-f6f8-48cb-a498-5e12705e2cf9 | + | name | stats-listener | + | operating_status | OFFLINE | + | project_id | 4c1caeee063747f8878f007d1a323b2f | + | protocol | PROMETHEUS | + | protocol_port | 8088 | + | provisioning_status | PENDING_CREATE | + | sni_container_refs | [] | + | timeout_client_data | 50000 | + | timeout_member_connect | 5000 | + | timeout_member_data | 50000 | + | timeout_tcp_inspect | 0 | + | updated_at | None | + | client_ca_tls_container_ref | None | + | client_authentication | NONE | + | client_crl_container_ref | None | + | allowed_cidrs | None | + | tls_ciphers | None | + | tls_versions | None | + | alpn_protocols | None | + | tags | | + +-----------------------------+--------------------------------------+ + +Once the ``PROMETHEUS`` listener is ``ACTIVE``, you can configure Prometheus to +collect metrics from the load balancer by updating the prometheus.yml file. + +.. code-block:: yaml + + [scrape_configs] + - job_name: 'Octavia LB1' + static_configs: + - targets: ['192.0.2.10:8088'] + +For more information on setting up Prometheus, see the +`Prometheus project web site `_. + +.. note:: The metrics exposed via the ``/metrics`` endpoint will use a + custom Octavia namespace. + +You can connect `Grafana `_ to the +`Prometheus `_ instance to provide additional graphing +and dashboard capabilities. A Grafana dashboard for Octavia load balancers is +included in the etc/grafana directory of the Octavia code. diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst new file mode 100644 index 0000000000..ae00ce8cfb --- /dev/null +++ b/doc/source/user/index.rst @@ -0,0 +1,48 @@ +============ +Octavia User +============ + +Cookbooks +========= +.. toctree:: + :glob: + :maxdepth: 1 + + guides/basic-cookbook + guides/l7-cookbook + +Guides +====== +.. toctree:: + :glob: + :maxdepth: 1 + + guides/l7 + feature-classification/index + guides/monitoring + +References +========== +.. toctree:: + :glob: + :maxdepth: 1 + + Octavia API Reference + Command Line Interface Reference + sdks + +Videos +====== +.. toctree:: + :glob: + :maxdepth: 1 + + Introduction to OpenStack Load Balancing (2017 Boston Summit) + +.. only:: html + + Indices and Search + ------------------ + + * :ref:`genindex` + * :ref:`search` diff --git a/doc/source/user/sdks.rst b/doc/source/user/sdks.rst new file mode 100644 index 0000000000..9325f47aae --- /dev/null +++ b/doc/source/user/sdks.rst @@ -0,0 +1,45 @@ +.. + Copyright (c) 2018 Rackspace, US Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +======================================= +Octavia Software Development Kits (SDK) +======================================= + +Introduction +============ + +This is a list of known SDKs and language bindings that support OpenStack +load balancing via the Octavia API. This list is a "best effort" to keep +updated, so please check with your favorite SDK project to see if they +support OpenStack load balancing. If not, open a bug for them! + +.. Note:: The projects listed here may not be maintained by the OpenStack + LBaaS team. Please submit bugs for these projects through their + respective bug tracking systems. + +Go +== + +`Gophercloud `_ + +Java +==== + +`OpenStack4j `_ + +Python +====== + +`OpenStack SDK `_ diff --git a/elements/amphora-agent/README.rst b/elements/amphora-agent/README.rst new file mode 100644 index 0000000000..3c20dfe99e --- /dev/null +++ b/elements/amphora-agent/README.rst @@ -0,0 +1,8 @@ +Element to install an Octavia Amphora agent. + +By default, it installs the agent from source. To enable installation from +distribution repositories, define the following: + export DIB_INSTALLTYPE_amphora_agent=package + +Note: this requires a system base image modified to include OpenStack +repositories diff --git a/elements/amphora-agent/element-deps b/elements/amphora-agent/element-deps new file mode 100644 index 0000000000..ea84e496ad --- /dev/null +++ b/elements/amphora-agent/element-deps @@ -0,0 +1,6 @@ +dib-init-system +install-static +package-installs +pkg-map +source-repositories +svc-map diff --git a/elements/amphora-agent/install.d/amphora-agent-source-install/75-amphora-agent-install b/elements/amphora-agent/install.d/amphora-agent-source-install/75-amphora-agent-install new file mode 100755 index 0000000000..c154b882dc --- /dev/null +++ b/elements/amphora-agent/install.d/amphora-agent-source-install/75-amphora-agent-install @@ -0,0 +1,52 @@ +#!/bin/bash + +if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then + set -x +fi +set -eu +set -o pipefail + +SCRIPTDIR=$(dirname $0) +AMP_VENV=/opt/amphora-agent-venv + +/usr/bin/python3 -m venv $AMP_VENV + +$AMP_VENV/bin/pip install pip --upgrade + +$AMP_VENV/bin/pip install -U -c /opt/upper-constraints.txt /opt/amphora-agent + +# Let's capture the git reference we installed in the venv +git --git-dir=/opt/amphora-agent/.git rev-parse HEAD >> /opt/amphora-agent.gitref + +# Link the amphora-agent out to /usr/local/bin where the startup scripts look +ln -s $AMP_VENV/bin/amphora-agent /usr/local/bin/amphora-agent || true + +# Also link out the vrrp check script(s) so they're in PATH for keepalived +ln -s $AMP_VENV/bin/haproxy-vrrp-* /usr/local/bin/ || true + +# Link health checker script +ln -s $AMP_VENV/bin/amphora-health-checker /usr/local/bin/amphora-health-checker || true + +# Link amphora interface script +ln -s $AMP_VENV/bin/amphora-interface /usr/local/bin/amphora-interface || true + +# Link the prometheus proxy +ln -s $AMP_VENV/bin/prometheus-proxy /usr/local/bin/prometheus-proxy || true + +mkdir /etc/octavia +# we assume certs, etc will come in through the config drive +mkdir /etc/octavia/certs +mkdir -p /var/lib/octavia + +install -D -g root -o root -m 0644 ${SCRIPTDIR}/amphora-agent.logrotate /etc/logrotate.d/amphora-agent + +case "$DIB_INIT_SYSTEM" in + systemd) + install -D -g root -o root -m 0644 ${SCRIPTDIR}/amphora-agent.service /usr/lib/systemd/system/amphora-agent.service + install -D -g root -o root -m 0644 ${SCRIPTDIR}/prometheus-proxy.service /usr/lib/systemd/system/prometheus-proxy.service + ;; + *) + echo "Unsupported init system" + exit 1 + ;; +esac diff --git a/elements/amphora-agent/install.d/amphora-agent-source-install/amphora-agent.conf b/elements/amphora-agent/install.d/amphora-agent-source-install/amphora-agent.conf new file mode 100644 index 0000000000..addd3af681 --- /dev/null +++ b/elements/amphora-agent/install.d/amphora-agent-source-install/amphora-agent.conf @@ -0,0 +1,18 @@ +description "Start up the Octavia Amphora Agent" + +start on started certs-ramfs +stop on runlevel [!2345] + +respawn +respawn limit 2 2 + +exec amphora-agent --config-file /etc/octavia/amphora-agent.conf + +post-start script + PID=`status amphora-agent | egrep -oi '([0-9]+)$' | head -n1` + echo $PID > /var/run/amphora-agent.pid +end script + +post-stop script + rm -f /var/run/amphora-agent.pid +end script diff --git a/elements/amphora-agent/install.d/amphora-agent-source-install/amphora-agent.init b/elements/amphora-agent/install.d/amphora-agent-source-install/amphora-agent.init new file mode 100644 index 0000000000..793d59963d --- /dev/null +++ b/elements/amphora-agent/install.d/amphora-agent-source-install/amphora-agent.init @@ -0,0 +1,67 @@ +### BEGIN INIT INFO +# Provides: amphora-agent +# Required-Start: $remote_fs $syslog $network certs-ramfs +# Required-Stop: $remote_fs $syslog $network +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Runs the Amphora Agent processes +# Description: This script runs Octavia Amphora Agent processes. +# This script will start the Amphora Agent services +# and kill them. +### END INIT INFO + +# Using the lsb functions to perform the operations. +. /lib/lsb/init-functions +# Process name ( For display ) +NAME=amphora-agent +# Daemon name, where is the actual executable +DAEMON=/usr/local/bin/amphora-agent +# pid file for the daemon +PIDFILE=/var/run/amphora-agent.pid + +# If the daemon is not there, then exit. +test -x $DAEMON || exit 5 + +case $1 in + start) + # Checked the PID file exists and check the actual status of process + if [ -e $PIDFILE ]; then + status_of_proc -p $PIDFILE $DAEMON "$NAME process" && status="0" || status="$?" + # If the status is SUCCESS then don't need to start again. + if [ $status = "0" ]; then + exit # Exit + fi + fi + # Start the daemon. + log_daemon_msg "Starting the process" "$NAME" + # Start the daemon with the help of start-stop-daemon + # Log the message appropriately + if start-stop-daemon --start -m --quiet --oknodo --pidfile $PIDFILE --startas $DAEMON -- --config-file /etc/octavia/amphora-agent.conf ; then + log_end_msg 0 + else + log_end_msg 1 + fi + ;; + stop) + # Stop the daemon. + if [ -e $PIDFILE ]; then + status_of_proc -p $PIDFILE $DAEMON "Stopping the $NAME process" && status="0" || status="$?" + if [ "$status" = 0 ]; then + start-stop-daemon --stop --quiet --oknodo --pidfile $PIDFILE + /bin/rm -rf $PIDFILE + fi + else + log_daemon_msg "$NAME process is not running" + log_end_msg 0 + fi + ;; + restart) + # Restart the daemon. + $0 stop && sleep 2 && $0 start + ;; + *) + # For invalid arguments, print the usage message. + echo "Usage: $0 {start|stop|restart|reload|status}" + exit 2 + ;; +esac diff --git a/elements/amphora-agent/install.d/amphora-agent-source-install/amphora-agent.logrotate b/elements/amphora-agent/install.d/amphora-agent-source-install/amphora-agent.logrotate new file mode 100644 index 0000000000..c2b87642dd --- /dev/null +++ b/elements/amphora-agent/install.d/amphora-agent-source-install/amphora-agent.logrotate @@ -0,0 +1,14 @@ +/var/log/amphora-agent.log { + daily + rotate 10 + missingok + notifempty + compress + delaycompress + sharedscripts + postrotate + # Signal name shall not have the SIG prefix in kill command + # http://pubs.opengroup.org/onlinepubs/9699919799/utilities/kill.html + kill -s USR1 $(cat /var/run/amphora-agent.pid) + endscript +} diff --git a/elements/amphora-agent/install.d/amphora-agent-source-install/amphora-agent.service b/elements/amphora-agent/install.d/amphora-agent-source-install/amphora-agent.service new file mode 100644 index 0000000000..f57a816d13 --- /dev/null +++ b/elements/amphora-agent/install.d/amphora-agent-source-install/amphora-agent.service @@ -0,0 +1,15 @@ +[Unit] +Description=OpenStack Octavia Amphora Agent +After=network.target syslog.service certs-ramfs.service +Requires=certs-ramfs.service +Wants=syslog.service + +[Service] +ExecStart=/usr/local/bin/amphora-agent --config-file /etc/octavia/amphora-agent.conf +KillMode=mixed +Restart=always +ExecStartPost=/bin/sh -c "echo $MAINPID > /var/run/amphora-agent.pid" +PIDFile=/var/run/amphora-agent.pid + +[Install] +WantedBy=multi-user.target diff --git a/elements/amphora-agent/install.d/amphora-agent-source-install/prometheus-proxy.conf b/elements/amphora-agent/install.d/amphora-agent-source-install/prometheus-proxy.conf new file mode 100644 index 0000000000..d78f537ec3 --- /dev/null +++ b/elements/amphora-agent/install.d/amphora-agent-source-install/prometheus-proxy.conf @@ -0,0 +1,19 @@ +description "Start up the Octavia Prometheus Proxy" + +start on started amphora-agent +stop on runlevel [!2345] + +respawn +# Handle the race condition with the netns being created +respawn limit unlimited + +exec /usr/local/bin/prometheus-proxy + +post-start script + PID=`status prometheus-proxy | egrep -oi '([0-9]+)$' | head -n1` + echo $PID > /var/run/prometheus-proxy.pid +end script + +post-stop script + rm -f /var/run/prometheus-proxy.pid +end script diff --git a/elements/amphora-agent/install.d/amphora-agent-source-install/prometheus-proxy.init b/elements/amphora-agent/install.d/amphora-agent-source-install/prometheus-proxy.init new file mode 100644 index 0000000000..9278328623 --- /dev/null +++ b/elements/amphora-agent/install.d/amphora-agent-source-install/prometheus-proxy.init @@ -0,0 +1,65 @@ +### BEGIN INIT INFO +# Provides: prometheus-proxy +# Required-Start: $remote_fs $syslog $network certs-ramfs +# Required-Stop: $remote_fs $syslog $network +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Runs the Prometheus Proxy processes +# Description: This script runs Octavia Prometheus Proxy processes. +### END INIT INFO + +# Using the lsb functions to perform the operations. +. /lib/lsb/init-functions +# Process name ( For display ) +NAME=prometheus-proxy +# Daemon name, where is the actual executable +DAEMON=/usr/local/bin/prometheus-proxy +# pid file for the daemon +PIDFILE=/var/run/prometheus-proxy.pid + +# If the daemon is not there, then exit. +test -x $DAEMON || exit 5 + +case $1 in + start) + # Checked the PID file exists and check the actual status of process + if [ -e $PIDFILE ]; then + status_of_proc -p $PIDFILE $DAEMON "$NAME process" && status="0" || status="$?" + # If the status is SUCCESS then don't need to start again. + if [ $status = "0" ]; then + exit # Exit + fi + fi + # Start the daemon. + log_daemon_msg "Starting the process" "$NAME" + # Start the daemon with the help of start-stop-daemon + # Log the message appropriately + if start-stop-daemon --start -m --quiet --oknodo --pidfile $PIDFILE --startas $DAEMON; then + log_end_msg 0 + else + log_end_msg 1 + fi + ;; + stop) + # Stop the daemon. + if [ -e $PIDFILE ]; then + status_of_proc -p $PIDFILE $DAEMON "Stopping the $NAME process" && status="0" || status="$?" + if [ "$status" = 0 ]; then + start-stop-daemon --stop --quiet --oknodo --pidfile $PIDFILE + /bin/rm -rf $PIDFILE + fi + else + log_daemon_msg "$NAME process is not running" + log_end_msg 0 + fi + ;; + restart) + # Restart the daemon. + $0 stop && sleep 2 && $0 start + ;; + *) + # For invalid arguments, print the usage message. + echo "Usage: $0 {start|stop|restart|reload|status}" + exit 2 + ;; +esac diff --git a/elements/amphora-agent/install.d/amphora-agent-source-install/prometheus-proxy.service b/elements/amphora-agent/install.d/amphora-agent-source-install/prometheus-proxy.service new file mode 100644 index 0000000000..5509d76f6a --- /dev/null +++ b/elements/amphora-agent/install.d/amphora-agent-source-install/prometheus-proxy.service @@ -0,0 +1,14 @@ +[Unit] +Description=OpenStack Octavia Prometheus Proxy +After=network.target syslog.service amphora-agent.service +Wants=amphora-agent.service + +[Service] +ExecStart=/usr/local/bin/prometheus-proxy +KillMode=mixed +Restart=always +ExecStartPost=/bin/sh -c "echo $MAINPID > /var/run/prometheus-proxy.pid" +PIDFile=/var/run/prometheus-proxy.pid + +[Install] +WantedBy=multi-user.target diff --git a/elements/amphora-agent/package-installs.yaml b/elements/amphora-agent/package-installs.yaml new file mode 100644 index 0000000000..621ddfbcf0 --- /dev/null +++ b/elements/amphora-agent/package-installs.yaml @@ -0,0 +1,92 @@ +amphora-agent: + installtype: package + +# Note: Uninstall of this does not remove all of the development packages. +# So some are called out here explicitly to be removed. +build-essential: + build-only: True +gcc: + uninstall: True +gcc-11: + uninstall: True + when: + - DISTRO_NAME = ubuntu +gcc-12: + uninstall: True + when: + - DISTRO_NAME = ubuntu +git-man: + uninstall: True +perl: + uninstall: True +# diskimage-builder installs firewalld in rockylinux, it's not needed as it +# blocks management and tenant traffic by default and we use security groups +firewalld: + uninstall: True + +libffi-dev: + build-only: True +libssl-dev: + build-only: True +python3-dev: + build-only: True + installtype: source +python3-pip: + installtype: source +python3: +python3-venv: + installtype: source + +acl: +# Note: Red Hat family does not currently ship acpid for ppc64le. +# This sets up a pkg-map to exclude it for Red Hat family ppc64le arch +acpid-ppc64le: + arch: ppc64le, ppc64el +acpid: + arch: amd64, aarch64, arm64, s390x +apparmor: +apt-transport-https: +at: +bash-completion: +cloud-guest-utils: +cloud-init: +cron: +curl: +dbus: +dkms: +dmeventd: +ethtool: +gawk: +ifenslave: +ifupdown: +iptables: +iputils-tracepath: +irqbalance: +isc-dhcp-client: +less: +logrotate: +lsof: +net-tools: +netbase: +netcat-openbsd: +network-scripts: +open-vm-tools: + arch: amd64 +openssh-client: +openssh-server: + when: DIB_OCTAVIA_AMP_USE_SSH = True +pollinate: +ppc64-diag: + arch: ppc64le, ppc64el +psmisc: +rsyslog: +screen: +socat: +tcpdump: +ubuntu-cloudimage-keyring: +uuid-runtime: +vim-tiny: +vlan: + +nftables: + when: DIB_OCTAVIA_AMP_USE_NFTABLES = True diff --git a/elements/amphora-agent/pkg-map b/elements/amphora-agent/pkg-map new file mode 100644 index 0000000000..36f98950a2 --- /dev/null +++ b/elements/amphora-agent/pkg-map @@ -0,0 +1,67 @@ +{ + "release": { + "rhel": { + "9": { + "isc-dhcp-client": "dhcp-client", + "python3-dev": "platform-python-devel", + "python3-venv": "", + "python3": "python39", + "vlan": "", + "screen": "", + "dkms": "", + "network-scripts": "" + } + }, + "centos": { + "9-stream": { + "isc-dhcp-client": "dhcp-client", + "python3-dev": "platform-python-devel", + "python3-venv": "", + "python3": "python39", + "vlan": "", + "screen": "", + "dkms": "", + "network-scripts": "" + } + }, + "rocky": { + "9": { + "isc-dhcp-client": "dhcp-client", + "python3-dev": "platform-python-devel", + "python3-venv": "", + "python3": "python39", + "vlan": "", + "screen": "", + "dkms": "", + "network-scripts": "" + } + } + }, + "family": { + "redhat": { + "amphora-agent": "openstack-octavia-amphora-agent", + "acpid-ppc64le": "", + "netcat-openbsd": "nmap-ncat", + "netbase": "", + "cron": "cronie", + "ifenslave": "", + "iputils-tracepath": "", + "cloud-guest-utils": "cloud-utils-growpart", + "apparmor": "", + "dmeventd": "", + "isc-dhcp-client": "dhclient", + "uuid-runtime": "", + "ubuntu-cloudimage-keyring": "", + "vim-tiny": "vim-minimal", + "apt-transport-https": "", + "pollinate": "", + "ifupdown": "", + "network-scripts": "network-scripts" + } + }, + "default": { + "amphora-agent": "amphora-agent", + "acpid-ppc64le": "acpid", + "network-scripts": "" + } +} diff --git a/elements/amphora-agent/post-install.d/11-enable-amphora-agent-systemd b/elements/amphora-agent/post-install.d/11-enable-amphora-agent-systemd new file mode 100755 index 0000000000..65fb85f3d2 --- /dev/null +++ b/elements/amphora-agent/post-install.d/11-enable-amphora-agent-systemd @@ -0,0 +1,12 @@ +#!/bin/bash + +if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then + set -x +fi + +set -eu +set -o pipefail + +if [ "$DIB_INIT_SYSTEM" == "systemd" ]; then + systemctl enable $(svc-map amphora-agent) +fi diff --git a/elements/amphora-agent/post-install.d/12-enable-prometheus-proxy-systemd b/elements/amphora-agent/post-install.d/12-enable-prometheus-proxy-systemd new file mode 100755 index 0000000000..74f134bb7e --- /dev/null +++ b/elements/amphora-agent/post-install.d/12-enable-prometheus-proxy-systemd @@ -0,0 +1,12 @@ +#!/bin/bash + +if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then + set -x +fi + +set -eu +set -o pipefail + +if [ "$DIB_INIT_SYSTEM" == "systemd" ]; then + systemctl enable $(svc-map prometheus-proxy) +fi diff --git a/elements/amphora-agent/post-install.d/90-remove-build-deps b/elements/amphora-agent/post-install.d/90-remove-build-deps new file mode 100755 index 0000000000..0858385ff3 --- /dev/null +++ b/elements/amphora-agent/post-install.d/90-remove-build-deps @@ -0,0 +1,26 @@ +#!/bin/bash + +if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then + set -x +fi + +set -eu +set -o pipefail + +[ "${DIB_INSTALLTYPE_amphora_agent:-}" = "package" ] && exit 0 + +rm -rf /opt/amphora-agent + +case $DISTRO_NAME in + ubuntu | debian ) + apt-get --assume-yes purge --auto-remove + ;; + fedora | centos* | rhel* | rocky ) + YUM=${YUM:-yum} + ${YUM} -v -y autoremove + ;; + *) + echo "ERROR: Unsupported distribution $DISTRO_NAME" + exit 1 + ;; +esac diff --git a/elements/amphora-agent/source-repository-amphora-agent b/elements/amphora-agent/source-repository-amphora-agent new file mode 100644 index 0000000000..53684a4761 --- /dev/null +++ b/elements/amphora-agent/source-repository-amphora-agent @@ -0,0 +1,3 @@ +# This is used for source-based builds +amphora-agent git /opt/amphora-agent https://opendev.org/openstack/octavia +upper-constraints file /opt/upper-constraints.txt https://opendev.org/openstack/requirements/raw/branch/master/upper-constraints.txt diff --git a/elements/amphora-agent/static/etc/NetworkManager/conf.d/dns-none.conf b/elements/amphora-agent/static/etc/NetworkManager/conf.d/dns-none.conf new file mode 100644 index 0000000000..9032e19419 --- /dev/null +++ b/elements/amphora-agent/static/etc/NetworkManager/conf.d/dns-none.conf @@ -0,0 +1,4 @@ +[main] +# Set dns to none, it prevents NetworkManager to update the /etc/resolv.conf +# file. +dns=none diff --git a/elements/amphora-agent/static/etc/NetworkManager/conf.d/no-auto-default.conf b/elements/amphora-agent/static/etc/NetworkManager/conf.d/no-auto-default.conf new file mode 100644 index 0000000000..8fce22d00c --- /dev/null +++ b/elements/amphora-agent/static/etc/NetworkManager/conf.d/no-auto-default.conf @@ -0,0 +1,7 @@ +[main] +# Disable auto configuration for newly detected devices. +# This prevents having temporary addresses and routes in the default namespace +# between the detection of a new devices and its move to the amphora-haproxy +# namespace. +# The management interface configuration is triggered by cloud-init. +no-auto-default=* diff --git a/elements/amphora-agent/static/etc/rsyslog.d/55-octavia-socket.conf b/elements/amphora-agent/static/etc/rsyslog.d/55-octavia-socket.conf new file mode 100644 index 0000000000..8983a13850 --- /dev/null +++ b/elements/amphora-agent/static/etc/rsyslog.d/55-octavia-socket.conf @@ -0,0 +1,4 @@ +module(load="imuxsock") +input(type="imuxsock" + Socket="/run/rsyslog/octavia/log" + CreatePath="on") diff --git a/elements/amphora-agent/static/usr/local/bin/lvs-masquerade.sh b/elements/amphora-agent/static/usr/local/bin/lvs-masquerade.sh new file mode 100755 index 0000000000..b5621b0b12 --- /dev/null +++ b/elements/amphora-agent/static/usr/local/bin/lvs-masquerade.sh @@ -0,0 +1,132 @@ +#!/bin/bash +# +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +set -e + +usage() { + echo + echo "Usage: $(basename "$0") [add|delete] [ipv4|ipv6] [sriov]" + echo + exit 1 +} + +if [ -z "$1" ] || [ -z "$2" ] || [ -z "$3" ]; then + usage +fi + +if [ "$1" == "add" ]; then + + if [ -x "$(command -v nft)" ]; then + # Note: inet for nat requires a 5.2 or newer kernel. + if [ "$2" == "ipv4" ]; then + nft add table ip octavia-ipv4 + nft add chain ip octavia-ipv4 ip-udp-masq { type nat hook postrouting priority 100\;} + nft add rule ip octavia-ipv4 ip-udp-masq oifname "$3" meta l4proto udp masquerade + nft add chain ip octavia-ipv4 ip-sctp-masq { type nat hook postrouting priority 100\;} + nft add rule ip octavia-ipv4 ip-sctp-masq oifname "$3" meta l4proto sctp masquerade + if ! [ "$4" == "sriov" ]; then + nft -- add chain ip octavia-ipv4 prerouting { type filter hook prerouting priority -300 \; } + nft add rule ip octavia-ipv4 prerouting iifname "$3" meta l4proto tcp notrack + nft -- add chain ip octavia-ipv4 output { type filter hook output priority -300 \; } + nft add rule ip octavia-ipv4 output oifname "$3" meta l4proto tcp notrack + fi + + elif [ "$2" == "ipv6" ]; then + nft add table ip6 octavia-ipv6 + nft add chain ip6 octavia-ipv6 ip6-udp-masq { type nat hook postrouting priority 100\;} + nft add rule ip6 octavia-ipv6 ip6-udp-masq oifname "$3" meta l4proto udp masquerade + nft add chain ip6 octavia-ipv6 ip6-sctp-masq { type nat hook postrouting priority 100\;} + nft add rule ip6 octavia-ipv6 ip6-sctp-masq oifname "$3" meta l4proto sctp masquerade + if ! [ "$4" == "sriov" ]; then + nft -- add chain ip6 octavia-ipv6 prerouting { type filter hook prerouting priority -300 \; } + nft add rule ip6 octavia-ipv6 prerouting iifname "$3" meta l4proto tcp notrack + nft -- add chain ip6 octavia-ipv6 output { type filter hook output priority -300 \; } + nft add rule ip6 octavia-ipv6 output oifname "$3" meta l4proto tcp notrack + fi + else + usage + fi + + else # nft not found, fall back to iptables + if [ "$2" == "ipv4" ]; then + /sbin/iptables -t nat -A POSTROUTING -p udp -o $3 -j MASQUERADE + /sbin/iptables -t nat -A POSTROUTING -p sctp -o $3 -j MASQUERADE + + if ! [ "$4" == "sriov" ]; then + /sbin/iptables -t raw -A PREROUTING -p tcp -i $3 -j NOTRACK + /sbin/iptables -t raw -A OUTPUT -p tcp -o $3 -j NOTRACK + fi + elif [ "$2" == "ipv6" ]; then + /sbin/ip6tables -t nat -A POSTROUTING -p udp -o $3 -j MASQUERADE + /sbin/ip6tables -t nat -A POSTROUTING -p sctp -o $3 -j MASQUERADE + + if ! [ "$4" == "sriov" ]; then + /sbin/ip6tables -t raw -A PREROUTING -p tcp -i $3 -j NOTRACK + /sbin/ip6tables -t raw -A OUTPUT -p tcp -o $3 -j NOTRACK + fi + else + usage + fi + fi + +elif [ "$1" == "delete" ]; then + + if [ -x "$(command -v nft)" ]; then + if [ "$2" == "ipv4" ]; then + nft flush chain ip octavia-ipv4 ip-udp-masq + nft delete chain ip octavia-ipv4 ip-udp-masq + nft flush chain ip octavia-ipv4 ip-sctp-masq + nft delete chain ip octavia-ipv4 ip-sctp-masq + # Don't abort the script if these chains don't exist + nft flush chain ip octavia-ipv4 prerouting || true + nft delete chain ip octavia-ipv4 prerouting || true + nft flush chain ip octavia-ipv4 output || true + nft delete chain ip octavia-ipv4 output || true + elif [ "$2" == "ipv6" ]; then + nft flush chain ip6 octavia-ipv6 ip6-udp-masq + nft delete chain ip6 octavia-ipv6 ip6-udp-masq + nft flush chain ip6 octavia-ipv6 ip6-sctp-masq + nft delete chain ip6 octavia-ipv6 ip6-sctp-masq + # Don't abort the script if these chains don't exist + nft flush chain ip6 octavia-ipv6 prerouting || true + nft delete chain ip6 octavia-ipv6 prerouting || true + nft flush chain ip6 octavia-ipv6 output || true + nft delete chain ip6 octavia-ipv6 output || true + else + usage + fi + + else # nft not found, fall back to iptables + if [ "$2" == "ipv4" ]; then + /sbin/iptables -t nat -D POSTROUTING -p udp -o $3 -j MASQUERADE + /sbin/iptables -t nat -D POSTROUTING -p sctp -o $3 -j MASQUERADE + # Don't abort the script if these chains don't exist + /sbin/iptables -t raw -D PREROUTING -p tcp -i $3 -j NOTRACK || true + /sbin/iptables -t raw -D OUTPUT -p tcp -o $3 -j NOTRACK || true + elif [ "$2" == "ipv6" ]; then + /sbin/ip6tables -t nat -D POSTROUTING -p udp -o $3 -j MASQUERADE + /sbin/ip6tables -t nat -D POSTROUTING -p sctp -o $3 -j MASQUERADE + # Don't abort the script if these chains don't exist + /sbin/ip6tables -t raw -D PREROUTING -p tcp -i $3 -j NOTRACK || true + /sbin/ip6tables -t raw -D OUTPUT -p tcp -o $3 -j NOTRACK || true + else + usage + fi + fi +else + usage +fi diff --git a/elements/amphora-agent/static/usr/local/bin/udp-masquerade.sh b/elements/amphora-agent/static/usr/local/bin/udp-masquerade.sh new file mode 120000 index 0000000000..f4de332a7e --- /dev/null +++ b/elements/amphora-agent/static/usr/local/bin/udp-masquerade.sh @@ -0,0 +1 @@ +lvs-masquerade.sh \ No newline at end of file diff --git a/elements/amphora-agent/svc-map b/elements/amphora-agent/svc-map new file mode 100644 index 0000000000..2f11e0e046 --- /dev/null +++ b/elements/amphora-agent/svc-map @@ -0,0 +1,6 @@ +amphora-agent: + default: amphora-agent + redhat: octavia-amphora-agent +prometheus-proxy: + default: prometheus-proxy + redhat: octavia-prometheus-proxy diff --git a/elements/amphora-apparmor/README.rst b/elements/amphora-apparmor/README.rst new file mode 100644 index 0000000000..42d915f372 --- /dev/null +++ b/elements/amphora-apparmor/README.rst @@ -0,0 +1,4 @@ +Element to configure apparmor for Octavia + +This element will configure apparmor to allow rsyslog to create a log socket +for Octavia Amphora logging diff --git a/elements/amphora-apparmor/element-deps b/elements/amphora-apparmor/element-deps new file mode 100644 index 0000000000..0a65984a16 --- /dev/null +++ b/elements/amphora-apparmor/element-deps @@ -0,0 +1 @@ +install-static diff --git a/elements/amphora-apparmor/post-install.d/10-fix-rsyslog b/elements/amphora-apparmor/post-install.d/10-fix-rsyslog new file mode 100755 index 0000000000..9ed402950e --- /dev/null +++ b/elements/amphora-apparmor/post-install.d/10-fix-rsyslog @@ -0,0 +1,11 @@ +#!/bin/bash +# This is a workaround to a known kernel bug with apparmor: +# https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1373070 +# +# Apparmor cannot handle namespaces correctly and will drop the '/' prefix +# from a file path, thus causing the process to not have access. +# +# The reported workaround is to add flags=(attach_disconnected) to the rsyslog +# profile. + +sed -i 's#profile rsyslogd /usr/sbin/rsyslogd {#profile rsyslogd /usr/sbin/rsyslogd flags=(attach_disconnected) {#g' /etc/apparmor.d/usr.sbin.rsyslogd diff --git a/elements/amphora-apparmor/static/etc/apparmor.d/rsyslog.d/octavia b/elements/amphora-apparmor/static/etc/apparmor.d/rsyslog.d/octavia new file mode 100644 index 0000000000..8643e2c2ed --- /dev/null +++ b/elements/amphora-apparmor/static/etc/apparmor.d/rsyslog.d/octavia @@ -0,0 +1,4 @@ +# Allow rsyslog to create the octavia logging socket +/run/rsyslog/ w, +/run/rsyslog/octavia/ w, +/run/rsyslog/octavia/log rwk, diff --git a/elements/amphora-fips/README.rst b/elements/amphora-fips/README.rst new file mode 100644 index 0000000000..5c2656a3b1 --- /dev/null +++ b/elements/amphora-fips/README.rst @@ -0,0 +1,7 @@ +Element to enable FIPS mode inside the Amphora. + +This element configures the Amphora OS to enable FIPS 140-2 mode in the +operating system for the Amphora. + +Note: Current this element only supports the Red Hat family of operating +systems. diff --git a/elements/amphora-fips/element-deps b/elements/amphora-fips/element-deps new file mode 100644 index 0000000000..b0c47881bc --- /dev/null +++ b/elements/amphora-fips/element-deps @@ -0,0 +1,4 @@ +bootloader +dracut-regenerate +package-installs +pkg-map diff --git a/elements/amphora-fips/environment.d/95-enable-fips b/elements/amphora-fips/environment.d/95-enable-fips new file mode 100755 index 0000000000..83f7858117 --- /dev/null +++ b/elements/amphora-fips/environment.d/95-enable-fips @@ -0,0 +1,28 @@ +#!/bin/bash + +if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then + set -x +fi + +set -eu +set -o pipefail + +case $DISTRO_NAME in + ubuntu | debian ) + echo "ERROR: $DISTRO_NAME is not supported for FIPS mode." + exit 1 + ;; + fedora | centos* | rhel* | rocky ) + DIB_DRACUT_ENABLED_MODULES+=" +- name: fips + " + export DIB_DRACUT_ENABLED_MODULES + + DIB_BOOTLOADER_DEFAULT_CMDLINE+=" fips=1" + export DIB_BOOTLOADER_DEFAULT_CMDLINE + ;; + *) + echo "ERROR: Unsupported distribution $DISTRO_NAME" + exit 1 + ;; +esac diff --git a/elements/amphora-fips/package-installs.yaml b/elements/amphora-fips/package-installs.yaml new file mode 100644 index 0000000000..7a9998cc4e --- /dev/null +++ b/elements/amphora-fips/package-installs.yaml @@ -0,0 +1,2 @@ +# Required for fips-mode-setup to enable fips mode +crypto-policies-scripts: diff --git a/elements/amphora-fips/pkg-map b/elements/amphora-fips/pkg-map new file mode 100644 index 0000000000..32c7761863 --- /dev/null +++ b/elements/amphora-fips/pkg-map @@ -0,0 +1,10 @@ +{ + "family": { + "redhat": { + "crypto-policies-scripts": "crypto-policies-scripts" + } + }, + "default": { + "crypto-policies-scripts": "" + } +} diff --git a/elements/amphora-fips/post-install.d/10-enable-fips b/elements/amphora-fips/post-install.d/10-enable-fips new file mode 100755 index 0000000000..33fa1456ec --- /dev/null +++ b/elements/amphora-fips/post-install.d/10-enable-fips @@ -0,0 +1,22 @@ +#!/bin/bash + +if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then + set -x +fi + +set -eu +set -o pipefail + +case $DISTRO_NAME in + ubuntu | debian ) + echo "ERROR: $DISTRO_NAME is not supported for FIPS mode." + exit 1 + ;; + fedora | centos* | rhel* | rocky ) + update-crypto-policies --no-reload --set FIPS + ;; + *) + echo "ERROR: Unsupported distribution $DISTRO_NAME" + exit 1 + ;; +esac diff --git a/elements/amphora-selinux/README.rst b/elements/amphora-selinux/README.rst new file mode 100644 index 0000000000..521fcaa39d --- /dev/null +++ b/elements/amphora-selinux/README.rst @@ -0,0 +1,3 @@ +Element to install the required selinux policies for the amphora. + +Note: This element is only valid for rhel/centos 8 or newer. diff --git a/elements/amphora-selinux/element-deps b/elements/amphora-selinux/element-deps new file mode 100644 index 0000000000..73015c249e --- /dev/null +++ b/elements/amphora-selinux/element-deps @@ -0,0 +1,2 @@ +package-installs +pkg-map diff --git a/elements/amphora-selinux/package-installs.json b/elements/amphora-selinux/package-installs.json new file mode 100644 index 0000000000..eb032ef86d --- /dev/null +++ b/elements/amphora-selinux/package-installs.json @@ -0,0 +1,4 @@ +{ + "openstack-selinux": null, + "policycoreutils-python-utils": null +} diff --git a/elements/amphora-selinux/pkg-map b/elements/amphora-selinux/pkg-map new file mode 100644 index 0000000000..4be8dd2621 --- /dev/null +++ b/elements/amphora-selinux/pkg-map @@ -0,0 +1,12 @@ +{ + "family": { + "redhat": { + "openstack-selinux": "openstack-selinux", + "policycoreutils-python-utils": "policycoreutils-python-utils" + } + }, + "default": { + "openstack-selinux": "", + "policycoreutils-python-utils": "" + } +} diff --git a/elements/amphora-selinux/post-install.d/50-selinux-policies b/elements/amphora-selinux/post-install.d/50-selinux-policies new file mode 100755 index 0000000000..001ce628fe --- /dev/null +++ b/elements/amphora-selinux/post-install.d/50-selinux-policies @@ -0,0 +1,22 @@ +#!/bin/bash + +if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then + set -x +fi +set -eu +set -o pipefail + +enable_selinux_bool () { + policy=$1 + if semanage boolean -l | grep $policy; then + echo "Enabling $policy SELinux policy" + semanage boolean -N -m --on $policy + fi +} + +enable_selinux_bool os_haproxy_enable_nsfs +enable_selinux_bool os_haproxy_ping +enable_selinux_bool cluster_use_execmem +# Allows keepalived to connect to any ports (required by TCP-based HMs on UDP +# pools) +enable_selinux_bool keepalived_connect_any diff --git a/elements/certs-ramfs/README.rst b/elements/certs-ramfs/README.rst new file mode 100644 index 0000000000..e8e87f05de --- /dev/null +++ b/elements/certs-ramfs/README.rst @@ -0,0 +1,4 @@ +Element to setup an encrypted ramfs to store the TLS certificates and keys. + +Enabling this element will mean that the amphora can no longer recover from a +reboot. diff --git a/elements/certs-ramfs/element-deps b/elements/certs-ramfs/element-deps new file mode 100644 index 0000000000..41b5ab2462 --- /dev/null +++ b/elements/certs-ramfs/element-deps @@ -0,0 +1,3 @@ +dib-init-system +package-installs +install-static diff --git a/elements/certs-ramfs/init-scripts/systemd/certs-ramfs.service b/elements/certs-ramfs/init-scripts/systemd/certs-ramfs.service new file mode 100644 index 0000000000..b11eee7df6 --- /dev/null +++ b/elements/certs-ramfs/init-scripts/systemd/certs-ramfs.service @@ -0,0 +1,14 @@ +[Unit] +Description=Creates an encrypted ramfs for Octavia certs +Before=amphora-agent.service +After=cloud-config.target + +[Service] +Type=oneshot +ExecStart=/usr/local/bin/certfs-ramfs +ExecStop=/bin/sh -c 'certs_path=$$(awk "/base_cert_dir / {printf \\$$3}" /etc/octavia/amphora-agent.conf); umount "$${certs_path}"; cryptsetup luksClose /dev/mapper/certfs-ramfs;' +RemainAfterExit=yes +TimeoutSec=0 + +[Install] +WantedBy=amphora-agent.service diff --git a/elements/certs-ramfs/package-installs.yaml b/elements/certs-ramfs/package-installs.yaml new file mode 100644 index 0000000000..2edcf41eb6 --- /dev/null +++ b/elements/certs-ramfs/package-installs.yaml @@ -0,0 +1 @@ +cryptsetup: diff --git a/elements/certs-ramfs/post-install.d/30-enable-certs-ramfs-service b/elements/certs-ramfs/post-install.d/30-enable-certs-ramfs-service new file mode 100755 index 0000000000..a79fe24a83 --- /dev/null +++ b/elements/certs-ramfs/post-install.d/30-enable-certs-ramfs-service @@ -0,0 +1,17 @@ +#!/bin/bash + +if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then + set -x +fi +set -eu +set -o pipefail + +case "$DIB_INIT_SYSTEM" in + systemd) + systemctl enable certs-ramfs.service + ;; + *) + echo "Unsupported init system $DIB_INIT_SYSTEM" + exit 1 + ;; +esac diff --git a/elements/certs-ramfs/static/usr/local/bin/certfs-ramfs b/elements/certs-ramfs/static/usr/local/bin/certfs-ramfs new file mode 100755 index 0000000000..5d93a6a814 --- /dev/null +++ b/elements/certs-ramfs/static/usr/local/bin/certfs-ramfs @@ -0,0 +1,16 @@ +#!/bin/bash + +cryptsetup_args="--type=luks1" + +# Only 1 block ram device is needed +modprobe brd rd_nr=1 + +passphrase=$(head /dev/urandom | tr -dc "a-zA-Z0-9" | fold -w 32 | head -n 1) +certs_path=$(awk "/base_cert_dir / {printf \$3}" /etc/octavia/amphora-agent.conf) +mkdir -p "${certs_path}" + +echo -n "${passphrase}" | cryptsetup $cryptsetup_args luksFormat /dev/ram0 - +echo -n "${passphrase}" | cryptsetup $cryptsetup_args luksOpen /dev/ram0 certfs-ramfs - + +mkfs.ext2 /dev/mapper/certfs-ramfs +mount /dev/mapper/certfs-ramfs "${certs_path}" diff --git a/elements/certs-ramfs/svc-map b/elements/certs-ramfs/svc-map new file mode 100644 index 0000000000..5837681f11 --- /dev/null +++ b/elements/certs-ramfs/svc-map @@ -0,0 +1,2 @@ +certs-ramfs: + default: certs-ramfs diff --git a/elements/cpu-pinning/README.rst b/elements/cpu-pinning/README.rst new file mode 100644 index 0000000000..a59432c5f3 --- /dev/null +++ b/elements/cpu-pinning/README.rst @@ -0,0 +1,6 @@ +Element to enable optimizations for vertical scaling + +This element configures the Linux kernel to isolate all but the first +vCPU of the system, so that they are used by HAProxy threads exclusively. +It also installs and activates a customized TuneD profile that should further +tweak vertical scaling performance. diff --git a/elements/cpu-pinning/element-deps b/elements/cpu-pinning/element-deps new file mode 100644 index 0000000000..483dfd9a67 --- /dev/null +++ b/elements/cpu-pinning/element-deps @@ -0,0 +1,2 @@ +install-static +package-installs diff --git a/elements/cpu-pinning/environment.d/80-kernel-cpu-affinity b/elements/cpu-pinning/environment.d/80-kernel-cpu-affinity new file mode 100644 index 0000000000..24e1c23ef7 --- /dev/null +++ b/elements/cpu-pinning/environment.d/80-kernel-cpu-affinity @@ -0,0 +1,37 @@ +#!/bin/bash + +# +# Copyright Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then + set -x +fi + +set -euo pipefail + +case $DISTRO_NAME in + ubuntu | debian | fedora | centos* | rhel* | rocky ) + DIB_BOOTLOADER_DEFAULT_CMDLINE+=" irqaffinity=0" + # This will be ignored on single vCPU systems + DIB_BOOTLOADER_DEFAULT_CMDLINE+=" isolcpus=1-N" + DIB_BOOTLOADER_DEFAULT_CMDLINE+=" nohz=on nohz_full=1-N" + export DIB_BOOTLOADER_DEFAULT_CMDLINE + ;; + *) + echo "ERROR: Unsupported distribution $DISTRO_NAME" + exit 1 + ;; +esac diff --git a/elements/cpu-pinning/package-installs.yaml b/elements/cpu-pinning/package-installs.yaml new file mode 100644 index 0000000000..a537247400 --- /dev/null +++ b/elements/cpu-pinning/package-installs.yaml @@ -0,0 +1,3 @@ +irqbalance: + uninstall: True +tuned: diff --git a/elements/cpu-pinning/post-install.d/20-enable-tuned b/elements/cpu-pinning/post-install.d/20-enable-tuned new file mode 100644 index 0000000000..b369b2ca81 --- /dev/null +++ b/elements/cpu-pinning/post-install.d/20-enable-tuned @@ -0,0 +1,11 @@ +#!/bin/bash + +if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then + set -x +fi + +set -euo pipefail + +if [ "$DIB_INIT_SYSTEM" == "systemd" ]; then + systemctl enable $(svc-map tuned) +fi diff --git a/elements/cpu-pinning/post-install.d/30-set-tuned-profile b/elements/cpu-pinning/post-install.d/30-set-tuned-profile new file mode 100644 index 0000000000..347dcbe3b7 --- /dev/null +++ b/elements/cpu-pinning/post-install.d/30-set-tuned-profile @@ -0,0 +1,3 @@ +#!/bin/sh + +chmod +x /usr/lib/tuned/amphora/script.sh diff --git a/elements/cpu-pinning/static/etc/tuned/active_profile b/elements/cpu-pinning/static/etc/tuned/active_profile new file mode 100644 index 0000000000..b6b31fe78a --- /dev/null +++ b/elements/cpu-pinning/static/etc/tuned/active_profile @@ -0,0 +1 @@ +virtual-guest optimize-serial-console amphora diff --git a/elements/cpu-pinning/static/etc/tuned/amphora/script.sh b/elements/cpu-pinning/static/etc/tuned/amphora/script.sh new file mode 100644 index 0000000000..8088595ce0 --- /dev/null +++ b/elements/cpu-pinning/static/etc/tuned/amphora/script.sh @@ -0,0 +1,23 @@ +#!/bin/sh + +# Comment the line in ...tuned/functions that fails on the amp: +# DISKS_SYS="$(command ls -d1 /sys/block/{sd,cciss,dm-,vd,dasd,xvd}* 2>/dev/null)" +sed -i 's/^DISKS_SYS=/#&/' /usr/lib/tuned/functions +. /usr/lib/tuned/functions + +start() { + setup_kvm_mod_low_latency + disable_ksm + + return "$?" +} + +stop() { + if [ "$1" = "full_rollback" ]; then + teardown_kvm_mod_low_latency + enable_ksm + fi + return "$?" +} + +process $@ diff --git a/elements/cpu-pinning/static/etc/tuned/amphora/tuned.conf b/elements/cpu-pinning/static/etc/tuned/amphora/tuned.conf new file mode 100644 index 0000000000..b53536a612 --- /dev/null +++ b/elements/cpu-pinning/static/etc/tuned/amphora/tuned.conf @@ -0,0 +1,67 @@ +# +# tuned configuration +# +# Copyright Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +[main] +summary=Customized profile for use on Octavia amphorae +include=network-latency + +[variables] +isolated_cores=${f:cpulist_invert:0} +no_balance_cores=${isolated_cores} + +# Fail if isolated_cores are not set +assert1=${f:assertion_non_equal:isolated_cores are set:${isolated_cores}:${isolated_cores_assert_check}} + +# tmpdir +tmpdir=${f:strip:${f:exec:mktemp:-d}} + +isolated_cores_expanded=${f:cpulist_unpack:${isolated_cores}} +isolated_cpumask=${f:cpulist2hex:${isolated_cores_expanded}} +not_isolated_cores_expanded=${f:cpulist_invert:${isolated_cores_expanded}} +isolated_cores_online_expanded=${f:cpulist_online:${isolated_cores}} +not_isolated_cores_online_expanded=${f:cpulist_online:${not_isolated_cores_expanded}} +not_isolated_cpumask=${f:cpulist2hex:${not_isolated_cores_expanded}} +# Make sure no_balance_cores is defined before +# no_balance_cores_expanded is defined, so that child profiles can set +# no_balance_cores directly in the profile (tuned.conf) +no_balance_cores_expanded=${f:cpulist_unpack:${no_balance_cores}} + +# Fail if isolated_cores contains CPUs which are not online +assert2=${f:assertion:isolated_cores contains online CPU(s):${isolated_cores_expanded}:${isolated_cores_online_expanded}} + +[sysctl] +kernel.numa_balancing=0 +kernel.hung_task_timeout_secs = 600 +vm.stat_interval = 10 +# See https://bugzilla.redhat.com/show_bug.cgi?id=1797629 +kernel.timer_migration = 0 + +[sysfs] +/sys/bus/workqueue/devices/writeback/cpumask = ${not_isolated_cpumask} +/sys/devices/virtual/workqueue/cpumask = ${not_isolated_cpumask} +/sys/devices/virtual/workqueue/*/cpumask = ${not_isolated_cpumask} +/sys/devices/system/machinecheck/machinecheck*/ignore_ce = 1 + +[systemd] +cpu_affinity=${not_isolated_cores_expanded} + +[script] +script=${i:PROFILE_DIR}/script.sh + +[scheduler] +isolated_cores=${isolated_cores} +ps_blacklist=.*pmd.*;.*PMD.*;^DPDK;.*qemu-kvm.*;^contrail-vroute$;^lcore-slave-.*;^rte_mp_handle$;^rte_mp_async$;^eal-intr-thread$ diff --git a/elements/cpu-pinning/svc-map b/elements/cpu-pinning/svc-map new file mode 100644 index 0000000000..937a5ff9d0 --- /dev/null +++ b/elements/cpu-pinning/svc-map @@ -0,0 +1,4 @@ +tuned: + default: tuned +irqbalance: + default: irqbalance diff --git a/elements/disable-makecache/README.rst b/elements/disable-makecache/README.rst new file mode 100644 index 0000000000..d82e6e2638 --- /dev/null +++ b/elements/disable-makecache/README.rst @@ -0,0 +1,6 @@ +This element disables the dnf makecache hourly timer. + +The amphora typically do not have internet access nor access to DNS servers. +We want to disable this makecache timer to stop the amphora from attempting +to update/download the dnf cache every hour. Without this element it will +run and log a failure every hour. diff --git a/elements/disable-makecache/post-install.d/80-disable-makecache b/elements/disable-makecache/post-install.d/80-disable-makecache new file mode 100755 index 0000000000..f8445f7f86 --- /dev/null +++ b/elements/disable-makecache/post-install.d/80-disable-makecache @@ -0,0 +1,18 @@ +#!/bin/bash + +if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then + set -x +fi + +set -eu +set -o pipefail + +case $DISTRO_NAME in + fedora | centos* | rhel* | rocky ) + systemctl disable dnf-makecache.timer || true + ;; + *) + echo "ERROR: Unsupported distribution $DISTRO_NAME" + exit 1 + ;; +esac diff --git a/elements/disable-makecache/svc-map b/elements/disable-makecache/svc-map new file mode 100644 index 0000000000..34c2f13082 --- /dev/null +++ b/elements/disable-makecache/svc-map @@ -0,0 +1,2 @@ +disable-makecache: + default: disable-makecache diff --git a/elements/haproxy-octavia/README.rst b/elements/haproxy-octavia/README.rst new file mode 100644 index 0000000000..c986ab7727 --- /dev/null +++ b/elements/haproxy-octavia/README.rst @@ -0,0 +1,3 @@ +Element to install an Octavia Amphora with an haproxy backend. + + diff --git a/elements/haproxy-octavia/element-deps b/elements/haproxy-octavia/element-deps new file mode 100644 index 0000000000..72d42f8db5 --- /dev/null +++ b/elements/haproxy-octavia/element-deps @@ -0,0 +1,3 @@ +package-installs +sysctl +pkg-map diff --git a/elements/haproxy-octavia/install.d/76-haproxy b/elements/haproxy-octavia/install.d/76-haproxy new file mode 100755 index 0000000000..36be2ced09 --- /dev/null +++ b/elements/haproxy-octavia/install.d/76-haproxy @@ -0,0 +1,6 @@ +#!/bin/bash + +set -eux +set -o pipefail + +[ -d /var/lib/haproxy ] || install -d -D -m 0755 -o root -g root /var/lib/haproxy diff --git a/elements/haproxy-octavia/package-installs.json b/elements/haproxy-octavia/package-installs.json new file mode 100644 index 0000000000..2c8927dfc7 --- /dev/null +++ b/elements/haproxy-octavia/package-installs.json @@ -0,0 +1,4 @@ +{ + "haproxy": null, + "iputils-ping": null +} diff --git a/elements/haproxy-octavia/pkg-map b/elements/haproxy-octavia/pkg-map new file mode 100644 index 0000000000..e7aa52f8f3 --- /dev/null +++ b/elements/haproxy-octavia/pkg-map @@ -0,0 +1,18 @@ +{ + "distro": { + "ubuntu": { + "haproxy": "haproxy" + } + }, + "family": { + "debian": { + "haproxy": "haproxy" + }, + "redhat": { + "iputils-ping": "iputils" + } + }, + "default": { + "haproxy": "haproxy" + } +} diff --git a/elements/haproxy-octavia/post-install.d/20-disable-default-haproxy b/elements/haproxy-octavia/post-install.d/20-disable-default-haproxy new file mode 100755 index 0000000000..fa9a78b947 --- /dev/null +++ b/elements/haproxy-octavia/post-install.d/20-disable-default-haproxy @@ -0,0 +1,6 @@ +#!/bin/bash + +set -eu +set -o pipefail + +systemctl disable haproxy diff --git a/elements/haproxy-octavia/post-install.d/20-haproxy-tune-kernel b/elements/haproxy-octavia/post-install.d/20-haproxy-tune-kernel new file mode 100755 index 0000000000..35cd16bcf8 --- /dev/null +++ b/elements/haproxy-octavia/post-install.d/20-haproxy-tune-kernel @@ -0,0 +1,40 @@ +#!/bin/bash + +set -eu +set -o pipefail + +sysctl-write-value net.ipv4.tcp_max_tw_buckets 5800000 +sysctl-write-value net.ipv4.tcp_max_orphans 5800000 +sysctl-write-value net.ipv4.tcp_max_syn_backlog 100000 +sysctl-write-value net.ipv4.tcp_keepalive_time 300 +sysctl-write-value net.ipv4.tcp_tw_reuse 1 +sysctl-write-value net.core.somaxconn 65534 # netns aware +sysctl-write-value net.ipv4.tcp_synack_retries 3 +sysctl-write-value net.core.netdev_max_backlog 100000 +# This should allow HAProxy maxconn to be 1,000,000 +sysctl-write-value fs.file-max 2600000 # netns aware +sysctl-write-value fs.nr_open 2600000 # netns aware + +# It's ok for these to fail if conntrack module isn't loaded +sysctl-write-value net.netfilter.nf_conntrack_tcp_timeout_time_wait 5 || true +sysctl-write-value net.netfilter.nf_conntrack_tcp_timeout_fin_wait 5 || true + +# Enable MTU icmp black hole detection (RFC4821) +sysctl-write-value net.ipv4.tcp_mtu_probing 1 + +sysctl-write-value net.ipv4.tcp_fin_timeout 5 +sysctl-write-value net.ipv4.ip_nonlocal_bind 1 +sysctl-write-value net.ipv6.ip_nonlocal_bind 1 +sysctl-write-value net.core.rmem_max 67108864 +sysctl-write-value net.core.wmem_max 67108864 +sysctl-write-value net.ipv4.tcp_rmem "4096 87380 33554432" +sysctl-write-value net.ipv4.tcp_wmem "4096 87380 33554432" +sysctl-write-value net.ipv4.ip_local_port_range "1025 65534" + +# Allow unprivileged users to send ICMP echo requests +# https://bugzilla.redhat.com/show_bug.cgi?id=2037807 +# This sysctl is already included in /usr/lib/sysctl.d/50-default.conf on Centos +# 8 Stream (with a '-'' prefix that prevents sysctl from throwing an error if +# the setting doesn't exist in the kernel), but sysctl --system doesn't apply it +# correctly when creating the amphora-haproxy namespace. +sysctl-write-value net.ipv4.ping_group_range "0 2147483647" diff --git a/elements/haproxy-octavia/post-install.d/20-haproxy-user-group-config b/elements/haproxy-octavia/post-install.d/20-haproxy-user-group-config new file mode 100755 index 0000000000..290158ea9a --- /dev/null +++ b/elements/haproxy-octavia/post-install.d/20-haproxy-user-group-config @@ -0,0 +1,21 @@ +#!/bin/bash + +set -eu +set -o pipefail + +case $DISTRO_NAME in + ubuntu | debian ) + HAPROXY_USER_GROUP=nogroup + ;; + fedora | centos* | rhel* | rocky ) + HAPROXY_USER_GROUP=haproxy + ;; + *) + HAPROXY_USER_GROUP=nogroup + ;; +esac + +cat >> /var/lib/octavia/haproxy-default-user-group.conf <> /etc/rsyslog.d/49-haproxy.conf < /var/lib/octavia/ping-wrapper.sh < /dev/null 2>&1 +else + $ping_cmd -q -n -w 1 -c 1 \$HAPROXY_SERVER_ADDR > /dev/null 2>&1 +fi +EOF + +chmod 755 /var/lib/octavia/ping-wrapper.sh diff --git a/elements/haproxy-octavia/svc-map b/elements/haproxy-octavia/svc-map new file mode 100644 index 0000000000..bbca347e25 --- /dev/null +++ b/elements/haproxy-octavia/svc-map @@ -0,0 +1,2 @@ +haproxy: + default: haproxy diff --git a/elements/ipvsadmin/README.rst b/elements/ipvsadmin/README.rst new file mode 100644 index 0000000000..6cf3103253 --- /dev/null +++ b/elements/ipvsadmin/README.rst @@ -0,0 +1,3 @@ +Element to install ipvsadmin. + + diff --git a/elements/ipvsadmin/element-deps b/elements/ipvsadmin/element-deps new file mode 100644 index 0000000000..73015c249e --- /dev/null +++ b/elements/ipvsadmin/element-deps @@ -0,0 +1,2 @@ +package-installs +pkg-map diff --git a/elements/ipvsadmin/package-installs.json b/elements/ipvsadmin/package-installs.json new file mode 100644 index 0000000000..5c0982e44f --- /dev/null +++ b/elements/ipvsadmin/package-installs.json @@ -0,0 +1,3 @@ +{ + "ipvsadm": null +} diff --git a/elements/ipvsadmin/svc-map b/elements/ipvsadmin/svc-map new file mode 100644 index 0000000000..a75033b4bb --- /dev/null +++ b/elements/ipvsadmin/svc-map @@ -0,0 +1,2 @@ +ipvsadmin: + default: ipvsadmin diff --git a/elements/keepalived-octavia/README.rst b/elements/keepalived-octavia/README.rst new file mode 100644 index 0000000000..c196323346 --- /dev/null +++ b/elements/keepalived-octavia/README.rst @@ -0,0 +1,3 @@ +Element to install an Octavia Amphora with keepalived backend. + + diff --git a/elements/keepalived-octavia/element-deps b/elements/keepalived-octavia/element-deps new file mode 100644 index 0000000000..73015c249e --- /dev/null +++ b/elements/keepalived-octavia/element-deps @@ -0,0 +1,2 @@ +package-installs +pkg-map diff --git a/elements/keepalived-octavia/package-installs.json b/elements/keepalived-octavia/package-installs.json new file mode 100644 index 0000000000..1c54782b32 --- /dev/null +++ b/elements/keepalived-octavia/package-installs.json @@ -0,0 +1,3 @@ +{ + "keepalived": null +} diff --git a/elements/keepalived-octavia/pkg-map b/elements/keepalived-octavia/pkg-map new file mode 100644 index 0000000000..860cb7df9a --- /dev/null +++ b/elements/keepalived-octavia/pkg-map @@ -0,0 +1,16 @@ +{ + "distro": { + "ubuntu": { + "keepalived": "keepalived" + } + }, + "family": { + "debian": { + "keepalived": "keepalived" + } + }, + "default": { + "keepalived": "keepalived" + } + +} diff --git a/elements/keepalived-octavia/svc-map b/elements/keepalived-octavia/svc-map new file mode 100644 index 0000000000..614c8a4f20 --- /dev/null +++ b/elements/keepalived-octavia/svc-map @@ -0,0 +1,2 @@ +vrrp-octavia: + default: vrrp-octavia diff --git a/elements/no-resolvconf/README.rst b/elements/no-resolvconf/README.rst new file mode 100644 index 0000000000..0d834ec696 --- /dev/null +++ b/elements/no-resolvconf/README.rst @@ -0,0 +1,11 @@ +This element clears out /etc/resolv.conf and prevents dhclient from populating +it with data from DHCP. This means that DNS resolution will not work from the +amphora. This is OK because all outbound connections from the amphora will +be based using raw IP addresses. + +In addition we remove dns from the nsswitch.conf hosts setting. + +This has the real benefit of speeding up host boot and configutation times. +This is especially helpful when running tempest tests in a devstack environment +where DNS resolution from the amphora usually doesn't work anyway: This means +that the amphora never waits for DNS timeouts to occur. diff --git a/elements/no-resolvconf/finalise.d/99-disable-resolv-conf b/elements/no-resolvconf/finalise.d/99-disable-resolv-conf new file mode 100755 index 0000000000..1bdddad379 --- /dev/null +++ b/elements/no-resolvconf/finalise.d/99-disable-resolv-conf @@ -0,0 +1,23 @@ +#!/bin/bash + +# Override resolv.conf file from DIB with a custom one. +# Having at least one nameserver is now required by dnfpython (>=) +echo "nameserver 127.0.0.1" > /etc/resolv.conf +echo "nameserver 127.0.0.1" > /etc/resolv.conf.ORIG +if [ -d /etc/dhcp/dhclient-enter-hooks.d ] +then + # Debian/Ubuntu + echo "#!/bin/sh +make_resolv_conf() { : ; }" > /etc/dhcp/dhclient-enter-hooks.d/noresolvconf + chmod +x /etc/dhcp/dhclient-enter-hooks.d/noresolvconf + rm -f /etc/dhcp/dhclient-enter-hooks.d/resolvconf +else + # RHEL/CentOS/Fedora + echo "#!/bin/sh +make_resolv_conf() { : ; }" > /etc/dhcp/dhclient-enter-hooks + chmod +x /etc/dhcp/dhclient-enter-hooks +fi + +if [ -e /etc/nsswitch.conf ]; then + sed -i -e "/hosts:/ s/dns//g" /etc/nsswitch.conf +fi diff --git a/elements/octavia-lib/README.rst b/elements/octavia-lib/README.rst new file mode 100644 index 0000000000..be75612456 --- /dev/null +++ b/elements/octavia-lib/README.rst @@ -0,0 +1,24 @@ +Element to install octavia-lib from a Git source. + +This element allows octavia-lib installs from an arbitraty Git repository. +This is especially useful for development environments. + +By default, octavia-lib is installed from upstream master branch or from an +upstream stable branch for OpenStack release series. + +To install from an alternative Git location, define the following: + +.. sourcecode:: sh + + DIB_REPOLOCATION_octavia_lib= + DIB_REPOREF_octavia_lib= + +If you wish to build an image using code from a Gerrit review, you can set +``DIB_REPOLOCATION_octavia_lib`` and ``DIB_REPOREF_octavia_lib`` to the values +given by Gerrit in the fetch/pull section of a review. For example, installing +octavia-lib with change 744519 at patchset 2: + +.. sourcecode:: sh + + DIB_REPOLOCATION_octavia_lib=https://review.opendev.org/openstack/octavia-lib + DIB_REPOREF_octavia_lib=refs/changes/19/744519/2 diff --git a/elements/octavia-lib/element-deps b/elements/octavia-lib/element-deps new file mode 100644 index 0000000000..7bfe55bdc1 --- /dev/null +++ b/elements/octavia-lib/element-deps @@ -0,0 +1,2 @@ +source-repositories +amphora-agent diff --git a/elements/octavia-lib/install.d/octavia-lib-source-install/76-octavia-lib-install b/elements/octavia-lib/install.d/octavia-lib-source-install/76-octavia-lib-install new file mode 100755 index 0000000000..e5fed70207 --- /dev/null +++ b/elements/octavia-lib/install.d/octavia-lib-source-install/76-octavia-lib-install @@ -0,0 +1,17 @@ +#!/bin/bash + +if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then + set -x +fi +set -eu +set -o pipefail + +[ "${DIB_INSTALLTYPE_amphora_agent:-}" = "package" ] && exit 0 + +AMP_VENV=/opt/amphora-agent-venv + +sed -i 's|octavia-lib|#octavia-lib|' /opt/upper-constraints.txt +$AMP_VENV/bin/pip install -U -c /opt/upper-constraints.txt /opt/octavia-lib + +# Let's capture the git reference we installed in the venv +git --git-dir=/opt/octavia-lib/.git rev-parse HEAD >> /opt/octavia-lib.gitref diff --git a/elements/octavia-lib/post-install.d/89-remove-build-deps b/elements/octavia-lib/post-install.d/89-remove-build-deps new file mode 100755 index 0000000000..e397795a16 --- /dev/null +++ b/elements/octavia-lib/post-install.d/89-remove-build-deps @@ -0,0 +1,12 @@ +#!/bin/bash + +if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then + set -x +fi + +set -eu +set -o pipefail + +[ "${DIB_INSTALLTYPE_octavia_lib:-}" = "package" ] && exit 0 + +rm -rf /opt/octavia-lib diff --git a/elements/octavia-lib/source-repository-octavia-lib b/elements/octavia-lib/source-repository-octavia-lib new file mode 100644 index 0000000000..33694cbbb3 --- /dev/null +++ b/elements/octavia-lib/source-repository-octavia-lib @@ -0,0 +1,2 @@ +# This is used for source-based builds +octavia-lib git /opt/octavia-lib https://opendev.org/openstack/octavia-lib diff --git a/elements/rebind-sshd/README.rst b/elements/rebind-sshd/README.rst new file mode 100644 index 0000000000..b1319e5bd2 --- /dev/null +++ b/elements/rebind-sshd/README.rst @@ -0,0 +1,8 @@ +This element adds a post-BOUND script to the dhclient configuration to rebind +the ssh daemon to listen only on the management network interface. The reason +for doing this is that some use cases require load balancing services on TCP +port 22 to work, and if SSH binds to the wildcard address on port 22, then +haproxy can't. + +This also has the secondary benefit of making the amphora slightly more secure +as its SSH daemon will only respond to requests on the management network. diff --git a/elements/rebind-sshd/finalise.d/98-rebind-sshd-after-dhcp b/elements/rebind-sshd/finalise.d/98-rebind-sshd-after-dhcp new file mode 100755 index 0000000000..2942731717 --- /dev/null +++ b/elements/rebind-sshd/finalise.d/98-rebind-sshd-after-dhcp @@ -0,0 +1,17 @@ +#!/bin/bash + +# isc dhcpd specific section +if [[ $DISTRO_NAME = "ubuntu" || $DISTRO_NAME = "debian" ]]; then + + mkdir -p /etc/dhcp/dhclient-enter-hooks.d + echo '#!/bin/sh +if [ "$reason" = "BOUND" ]; then + if `grep -q "#ListenAddress 0.0.0.0" /etc/ssh/sshd_config`; then + /bin/sed -i "s/^#ListenAddress 0.0.0.0.*$/ListenAddress $new_ip_address/g" /etc/ssh/sshd_config + if `/bin/ps -ef|/bin/grep -v grep|/bin/grep -q sshd`; then + /usr/sbin/service ssh restart + fi + fi +fi' > /etc/dhcp/dhclient-enter-hooks.d/rebind-sshd + chmod +x /etc/dhcp/dhclient-enter-hooks.d/rebind-sshd +fi diff --git a/elements/remove-default-ints/README.rst b/elements/remove-default-ints/README.rst new file mode 100644 index 0000000000..9769f1a848 --- /dev/null +++ b/elements/remove-default-ints/README.rst @@ -0,0 +1,6 @@ +This element removes any default network interfaces from the interface +configuration in the image. These are not needed in the amphora as cloud-init +will create the required default interface configuration files. + +For Ubuntu this element will remove the network +configuration files from /etc/network/interfaces.d. diff --git a/elements/remove-default-ints/post-install.d/91-remove-default-ints b/elements/remove-default-ints/post-install.d/91-remove-default-ints new file mode 100755 index 0000000000..dfc355ac47 --- /dev/null +++ b/elements/remove-default-ints/post-install.d/91-remove-default-ints @@ -0,0 +1,8 @@ +#!/bin/bash + +set -eu +set -o xtrace + +if [[ "$DISTRO_NAME" == "ubuntu" ]]; then + sudo rm -f /etc/network/interfaces.d/* +fi diff --git a/elements/remove-sshd/README.rst b/elements/remove-sshd/README.rst new file mode 100644 index 0000000000..a0be79497c --- /dev/null +++ b/elements/remove-sshd/README.rst @@ -0,0 +1,11 @@ +=========== +remove-sshd +=========== +This element ensures that openssh server is uninstalled and will not start. + +Note +---- +Most cloud images come with the openssh server service installed and enabled +during boot. However, sometimes this is not appropriate. In these cases, +using this element may be helpful to ensure your image will not accessible via +SSH. diff --git a/elements/remove-sshd/package-installs.yaml b/elements/remove-sshd/package-installs.yaml new file mode 100644 index 0000000000..3dc83e10d3 --- /dev/null +++ b/elements/remove-sshd/package-installs.yaml @@ -0,0 +1,2 @@ +openssh-server: + uninstall: True diff --git a/elements/root-passwd/README.rst b/elements/root-passwd/README.rst new file mode 100644 index 0000000000..0610a89571 --- /dev/null +++ b/elements/root-passwd/README.rst @@ -0,0 +1,5 @@ +This element assigns a password to the root account in the image and enables +password login via ssh. + +This is useful when booting outside of a cloud environment (e.g. manually via +kvm) and for testing. diff --git a/elements/root-passwd/post-install.d/99-setup b/elements/root-passwd/post-install.d/99-setup new file mode 100755 index 0000000000..1c93f459aa --- /dev/null +++ b/elements/root-passwd/post-install.d/99-setup @@ -0,0 +1,11 @@ +#!/bin/bash +if [ -z "$DIB_PASSWORD" ]; then + echo "Error during setup password for root" + exit 1 +fi +sed -i "s/disable_root: true/disable_root: false/" /etc/cloud/cloud.cfg +install-packages augeas-tools openssh-server openssh-client +augtool -s set /files/etc/ssh/sshd_config/PasswordAuthentication yes +augtool -s set /files/etc/ssh/sshd_config/PermitRootLogin yes +augtool -s set /files/etc/ssh/ssh_config/PasswordAuthentication yes +echo -e "$DIB_PASSWORD\n$DIB_PASSWORD\n" | passwd diff --git a/elements/sos/README.rst b/elements/sos/README.rst new file mode 100644 index 0000000000..46e3ef3152 --- /dev/null +++ b/elements/sos/README.rst @@ -0,0 +1,11 @@ +Element to install sosreport. + +sosreport is a tool that collects information about a system. + +The sos plugin for Octavia can gather information of installed packages, log +and configuration files for Octavia controller components and amphora agent. +The result is a generated report that can be used for troubleshooting. The +plugin redacts confidential data such as passwords, certificates and secrets. + +At present sos only installs in Red Hat family images as the plugin does not +support other distributions. diff --git a/elements/sos/element-deps b/elements/sos/element-deps new file mode 100644 index 0000000000..73015c249e --- /dev/null +++ b/elements/sos/element-deps @@ -0,0 +1,2 @@ +package-installs +pkg-map diff --git a/elements/sos/package-installs.yaml b/elements/sos/package-installs.yaml new file mode 100644 index 0000000000..d3caa1ee36 --- /dev/null +++ b/elements/sos/package-installs.yaml @@ -0,0 +1 @@ +sos: diff --git a/elements/sos/pkg-map b/elements/sos/pkg-map new file mode 100644 index 0000000000..126ce07727 --- /dev/null +++ b/elements/sos/pkg-map @@ -0,0 +1,10 @@ +{ + "family": { + "redhat": { + "sos": "sos" + } + }, + "default": { + "sos": "" + } +} diff --git a/etc/audit/octavia_api_audit_map.conf.sample b/etc/audit/octavia_api_audit_map.conf.sample new file mode 100644 index 0000000000..a99b80f16c --- /dev/null +++ b/etc/audit/octavia_api_audit_map.conf.sample @@ -0,0 +1,37 @@ +[DEFAULT] +# default target endpoint type +# should match the endpoint type defined in service catalog +target_endpoint_type = load-balancer + +[custom_actions] +failover = update/failover + +# possible end path of API requests +# path of api requests for CADF target typeURI +# Just need to include top resource path to identify class +# of resources. Ex: Log audit event for API requests +# path containing "nodes" keyword and node uuid. +[path_keywords] +amphorae = amphora +availabilityzones = availabilityzone +availabilityzoneprofiles = availabilityzoneprofile +config = None +defaults = None +failover = None +flavors = flavor +flavorprofiles = flavorprofile +healthmonitors = healthmonitor +l7policies = l7policy +listeners = listener +loadbalancers = loadbalancer +members = member +pools = pool +providers = None +quotas = quota +rules = rule +stats = None +status = None + +# map endpoint type defined in service catalog to CADF typeURI +[service_endpoints] +load-balancer = service/load-balancer diff --git a/etc/certificates/openssl.cnf b/etc/certificates/openssl.cnf new file mode 100644 index 0000000000..0e2c328f4a --- /dev/null +++ b/etc/certificates/openssl.cnf @@ -0,0 +1,350 @@ +# +# OpenSSL example configuration file. +# This is mostly being used for generation of certificate requests. +# + +# This definition stops the following lines choking if HOME isn't +# defined. +HOME = . +RANDFILE = $ENV::HOME/.rnd + +# Extra OBJECT IDENTIFIER info: +#oid_file = $ENV::HOME/.oid +oid_section = new_oids + +# To use this configuration file with the "-extfile" option of the +# "openssl x509" utility, name here the section containing the +# X.509v3 extensions to use: +# extensions = +# (Alternatively, use a configuration file that has only +# X.509v3 extensions in its main [= default] section.) + +[ new_oids ] + +# We can add new OIDs in here for use by 'ca', 'req' and 'ts'. +# Add a simple OID like this: +# testoid1=1.2.3.4 +# Or use config file substitution like this: +# testoid2=${testoid1}.5.6 + +# Policies used by the TSA examples. +tsa_policy1 = 1.2.3.4.1 +tsa_policy2 = 1.2.3.4.5.6 +tsa_policy3 = 1.2.3.4.5.7 + +#################################################################### +[ ca ] +default_ca = CA_default # The default ca section + +#################################################################### +[ CA_default ] + +dir = ./ # Where everything is kept +certs = $dir/certs # Where the issued certs are kept +crl_dir = $dir/crl # Where the issued crl are kept +database = $dir/index.txt # database index file. +#unique_subject = no # Set to 'no' to allow creation of + # several ctificates with same subject. +new_certs_dir = $dir/newcerts # default place for new certs. + +certificate = $dir/ca_01.pem # The CA certificate +serial = $dir/serial # The current serial number +crlnumber = $dir/crlnumber # the current crl number + # must be commented out to leave a V1 CRL +crl = $dir/crl.pem # The current CRL +private_key = $dir/private/cakey.pem# The private key +RANDFILE = $dir/private/.rand # private random number file + +x509_extensions = usr_cert # The extensions to add to the cert + +# Comment out the following two lines for the "traditional" +# (and highly broken) format. +name_opt = ca_default # Subject Name options +cert_opt = ca_default # Certificate field options + +# Extension copying option: use with caution. +# copy_extensions = copy + +# Extensions to add to a CRL. Note: Netscape communicator chokes on V2 CRLs +# so this is commented out by default to leave a V1 CRL. +# crlnumber must also be commented out to leave a V1 CRL. +# crl_extensions = crl_ext + +default_days = 365 # how long to certify for +default_crl_days= 30 # how long before next CRL +default_md = default # use public key default MD +preserve = no # keep passed DN ordering + +# A few difference way of specifying how similar the request should look +# For type CA, the listed attributes must be the same, and the optional +# and supplied fields are just that :-) +policy = policy_match + +# For the CA policy +[ policy_match ] +countryName = match +stateOrProvinceName = match +organizationName = match +organizationalUnitName = optional +commonName = supplied +emailAddress = optional + +# For the 'anything' policy +# At this point in time, you must list all acceptable 'object' +# types. +[ policy_anything ] +countryName = optional +stateOrProvinceName = optional +localityName = optional +organizationName = optional +organizationalUnitName = optional +commonName = supplied +emailAddress = optional + +#################################################################### +[ req ] +default_bits = 2048 +default_keyfile = privkey.pem +distinguished_name = req_distinguished_name +attributes = req_attributes +x509_extensions = v3_ca # The extensions to add to the self signed cert + +# Passwords for private keys if not present they will be prompted for +# input_password = secret +# output_password = secret + +# This sets a mask for permitted string types. There are several options. +# default: PrintableString, T61String, BMPString. +# pkix : PrintableString, BMPString (PKIX recommendation before 2004) +# utf8only: only UTF8Strings (PKIX recommendation after 2004). +# nombstr : PrintableString, T61String (no BMPStrings or UTF8Strings). +# MASK:XXXX a literal mask value. +# WARNING: ancient versions of Netscape crash on BMPStrings or UTF8Strings. +string_mask = utf8only + +# req_extensions = v3_req # The extensions to add to a certificate request + +[ req_distinguished_name ] +countryName = Country Name (2 letter code) +countryName_default = AU +countryName_min = 2 +countryName_max = 2 + +stateOrProvinceName = State or Province Name (full name) +stateOrProvinceName_default = Some-State + +localityName = Locality Name (eg, city) + +0.organizationName = Organization Name (eg, company) +0.organizationName_default = Internet Widgits Pty Ltd + +# we can do this but it is not needed normally :-) +#1.organizationName = Second Organization Name (eg, company) +#1.organizationName_default = World Wide Web Pty Ltd + +organizationalUnitName = Organizational Unit Name (eg, section) +#organizationalUnitName_default = + +commonName = Common Name (e.g. server FQDN or YOUR name) +commonName_max = 64 + +emailAddress = Email Address +emailAddress_max = 64 + +# SET-ex3 = SET extension number 3 + +[ req_attributes ] +challengePassword = A challenge password +challengePassword_min = 4 +challengePassword_max = 20 + +unstructuredName = An optional company name + +[ usr_cert ] + +# These extensions are added when 'ca' signs a request. + +# This goes against PKIX guidelines but some CAs do it and some software +# requires this to avoid interpreting an end user certificate as a CA. + +basicConstraints=CA:FALSE + +# Here are some examples of the usage of nsCertType. If it is omitted +# the certificate can be used for anything *except* object signing. + +# This is OK for an SSL server. +# nsCertType = server + +# For an object signing certificate this would be used. +# nsCertType = objsign + +# For normal client use this is typical +# nsCertType = client, email + +# and for everything including object signing: +# nsCertType = client, email, objsign + +# This is typical in keyUsage for a client certificate. +# keyUsage = nonRepudiation, digitalSignature, keyEncipherment + +# This will be displayed in Netscape's comment listbox. +nsComment = "OpenSSL Generated Certificate" + +# PKIX recommendations harmless if included in all certificates. +subjectKeyIdentifier=hash +authorityKeyIdentifier=keyid,issuer + +# This stuff is for subjectAltName and issuerAltname. +# Import the email address. +# subjectAltName=email:copy +# An alternative to produce certificates that aren't +# deprecated according to PKIX. +# subjectAltName=email:move + +# Copy subject details +# issuerAltName=issuer:copy + +#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem +#nsBaseUrl +#nsRevocationUrl +#nsRenewalUrl +#nsCaPolicyUrl +#nsSslServerName + +# This is required for TSA certificates. +# extendedKeyUsage = critical,timeStamping + +[ v3_req ] + +# Extensions to add to a certificate request + +basicConstraints = CA:FALSE +keyUsage = nonRepudiation, digitalSignature, keyEncipherment + +[ v3_ca ] + + +# Extensions for a typical CA + + +# PKIX recommendation. + +subjectKeyIdentifier=hash + +authorityKeyIdentifier=keyid:always,issuer + +# This is what PKIX recommends but some broken software chokes on critical +# extensions. +#basicConstraints = critical,CA:true +# So we do this instead. +basicConstraints = CA:true + +# Key usage: this is typical for a CA certificate. However since it will +# prevent it being used as an test self-signed certificate it is best +# left out by default. +# keyUsage = cRLSign, keyCertSign + +# Some might want this also +# nsCertType = sslCA, emailCA + +# Include email address in subject alt name: another PKIX recommendation +# subjectAltName=email:copy +# Copy issuer details +# issuerAltName=issuer:copy + +# DER hex encoding of an extension: beware experts only! +# obj=DER:02:03 +# Where 'obj' is a standard or added object +# You can even override a supported extension: +# basicConstraints= critical, DER:30:03:01:01:FF + +[ crl_ext ] + +# CRL extensions. +# Only issuerAltName and authorityKeyIdentifier make any sense in a CRL. + +# issuerAltName=issuer:copy +authorityKeyIdentifier=keyid:always + +[ proxy_cert_ext ] +# These extensions should be added when creating a proxy certificate + +# This goes against PKIX guidelines but some CAs do it and some software +# requires this to avoid interpreting an end user certificate as a CA. + +basicConstraints=CA:FALSE + +# Here are some examples of the usage of nsCertType. If it is omitted +# the certificate can be used for anything *except* object signing. + +# This is OK for an SSL server. +# nsCertType = server + +# For an object signing certificate this would be used. +# nsCertType = objsign + +# For normal client use this is typical +# nsCertType = client, email + +# and for everything including object signing: +# nsCertType = client, email, objsign + +# This is typical in keyUsage for a client certificate. +# keyUsage = nonRepudiation, digitalSignature, keyEncipherment + +# This will be displayed in Netscape's comment listbox. +nsComment = "OpenSSL Generated Certificate" + +# PKIX recommendations harmless if included in all certificates. +subjectKeyIdentifier=hash +authorityKeyIdentifier=keyid,issuer + +# This stuff is for subjectAltName and issuerAltname. +# Import the email address. +# subjectAltName=email:copy +# An alternative to produce certificates that aren't +# deprecated according to PKIX. +# subjectAltName=email:move + +# Copy subject details +# issuerAltName=issuer:copy + +#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem +#nsBaseUrl +#nsRevocationUrl +#nsRenewalUrl +#nsCaPolicyUrl +#nsSslServerName + +# This really needs to be in place for it to be a proxy certificate. +proxyCertInfo=critical,language:id-ppl-anyLanguage,pathlen:3,policy:foo + +#################################################################### +[ tsa ] + +default_tsa = tsa_config1 # the default TSA section + +[ tsa_config1 ] + +# These are used by the TSA reply generation only. +dir = ./demoCA # TSA root directory +serial = $dir/tsaserial # The current serial number (mandatory) +crypto_device = builtin # OpenSSL engine to use for signing +signer_cert = $dir/tsacert.pem # The TSA signing certificate + # (optional) +certs = $dir/cacert.pem # Certificate chain to include in reply + # (optional) +signer_key = $dir/private/tsakey.pem # The TSA private key (optional) + +default_policy = tsa_policy1 # Policy if request did not specify it + # (optional) +other_policies = tsa_policy2, tsa_policy3 # acceptable policies (optional) +digests = md5, sha1 # Acceptable message digests (mandatory) +accuracy = secs:1, millisecs:500, microsecs:100 # (optional) +clock_precision_digits = 0 # number of digits after dot. (optional) +ordering = yes # Is ordering defined for timestamps? + # (optional, default: no) +tsa_name = yes # Must the TSA name be included in the reply? + # (optional, default: no) +ess_cert_id_chain = no # Must the ESS cert id chain be included? + # (optional, default: no) diff --git a/etc/config/octavia-config-generator.conf b/etc/config/octavia-config-generator.conf new file mode 100644 index 0000000000..c1809b8aa7 --- /dev/null +++ b/etc/config/octavia-config-generator.conf @@ -0,0 +1,18 @@ +[DEFAULT] +output_file = etc/octavia/octavia.conf.sample +wrap_width = 79 +summarize = true +namespace = octavia +namespace = oslo.db +namespace = oslo.log +namespace = oslo.messaging +namespace = oslo.middleware.cors +namespace = oslo.middleware.http_proxy_to_wsgi +namespace = oslo.middleware.healthcheck +namespace = oslo.middleware.sizelimit +namespace = oslo.policy +namespace = oslo.reports +namespace = keystonemiddleware.audit +namespace = keystonemiddleware.auth_token +namespace = cotyledon +namespace = castellan.config diff --git a/etc/grafana/OctaviaAmphoraDashboard.json b/etc/grafana/OctaviaAmphoraDashboard.json new file mode 100644 index 0000000000..a5221ef7be --- /dev/null +++ b/etc/grafana/OctaviaAmphoraDashboard.json @@ -0,0 +1,11589 @@ +{ + "__inputs": [ + { + "name": "DS_PROMETHEUS", + "label": "prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__elements": {}, + "__requires": [ + { + "type": "panel", + "id": "bargauge", + "name": "Bar gauge", + "version": "" + }, + { + "type": "panel", + "id": "gauge", + "name": "Gauge", + "version": "" + }, + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "10.4.2" + }, + { + "type": "panel", + "id": "piechart", + "name": "Pie chart", + "version": "" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "stat", + "name": "Stat", + "version": "" + }, + { + "type": "panel", + "id": "timeseries", + "name": "Time series", + "version": "" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "description": "A dashboard for OpenStack Octavia load balancer metrics", + "editable": true, + "fiscalYearStartMonth": 0, + "gnetId": 15828, + "graphTooltip": 0, + "id": null, + "links": [], + "liveNow": false, + "panels": [ + { + "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "adltc5klfaccgf" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 49, + "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "adltc5klfaccgf" + }, + "refId": "A" + } + ], + "title": "Summary Metrics", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Percent CPU utilization. Displays the maximum value if multiple load balancers are selected.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 100, + "min": 0, + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 50 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 0, + "y": 1 + }, + "id": 2, + "options": { + "minVizHeight": 75, + "minVizWidth": 75, + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "sizing": "auto", + "text": {} + }, + "pluginVersion": "10.4.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "max(octavia_loadbalancer_cpu{instance=~'${lb}'})", + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "CPU Load", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Percent RAM utilization. Displays the maximum value if multiple load balancers are selected. ", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 100, + "min": 0, + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 50 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 3, + "y": 1 + }, + "id": 4, + "options": { + "minVizHeight": 75, + "minVizWidth": 75, + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "sizing": "auto", + "text": {} + }, + "pluginVersion": "10.4.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "max(octavia_loadbalancer_memory{instance=~'${lb}'})", + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "Memory Usage", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Instant read of the current number of connections per second across the selected load balancers.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 50 + }, + { + "color": "dark-red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 6, + "y": 1 + }, + "id": 6, + "options": { + "minVizHeight": 75, + "minVizWidth": 75, + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "/^octavia_loadbalancer_current_connection_rate$/", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "sizing": "auto", + "text": {} + }, + "pluginVersion": "10.4.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_loadbalancer_current_connection_rate{instance=~'${lb}'})", + "format": "time_series", + "instant": true, + "interval": "", + "legendFormat": "octavia_loadbalancer_current_connection_rate", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_loadbalancer_max_connections{instance=~'${lb}'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "octavia_loadbalancer_max_connections", + "refId": "B" + } + ], + "title": "Load Balancer(s) CPS", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Instant read of the current throughput across the selected load balancers.", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "semi-dark-green", + "mode": "fixed" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": "" + } + ] + }, + "unit": "bps" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 5, + "x": 10, + "y": 1 + }, + "id": 15, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": {}, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "10.4.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(irate(octavia_listener_bytes_in_total{instance=~'${lb}',listener=~'${listener}'}[$__rate_interval])*8)", + "interval": "", + "legendFormat": "In", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(irate(octavia_listener_bytes_out_total{instance=~'${lb}',listener=~'${listener}'}[$__rate_interval])*8)", + "hide": false, + "interval": "", + "legendFormat": "Out", + "refId": "B" + } + ], + "title": "Throughput", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Shows the percentage of members in each state across the selected load balancers.\nOptions are: Online, Offline (admin down/disabled), Error, or Draining.", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "#808080", + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [], + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Error" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "dark-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Online" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-green", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Offline" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "dark-orange", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Draining" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-yellow", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 15, + "y": 1 + }, + "id": 13, + "options": { + "displayLabels": [], + "legend": { + "displayMode": "list", + "placement": "right", + "showLegend": true, + "values": [ + "percent" + ] + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "count(octavia_member_status{instance=~'${lb}',pool=~'${pool}',member=~'${member}', state=\"DOWN\"} == 1)", + "instant": true, + "interval": "", + "legendFormat": "Error", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "count(octavia_member_status{instance=~'${lb}',pool=~'${pool}',member=~'${member}', state=\"UP\"} == 1)", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Online", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "count(octavia_member_status{instance=~'${lb}',pool=~'${pool}',member=~'${member}', state=\"MAINT\"} == 1)", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Offline", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "count(octavia_member_status{instance=~'${lb}',pool=~'${pool}',member=~'${member}', state=\"DRAIN\"} == 1)", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Draining", + "refId": "D" + } + ], + "title": "Member Status", + "type": "piechart" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "dark-green", + "mode": "fixed" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 14, + "w": 5, + "x": 19, + "y": 1 + }, + "id": 95, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "value_and_name", + "wideLayout": true + }, + "pluginVersion": "10.4.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "count(octavia_loadbalancer_cpu{instance=~'${lb}'})", + "instant": true, + "interval": "", + "legendFormat": "Load Balancers", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "count(octavia_listener_status{instance=~'${lb}',listener=~'${listener}',state=\"UP\"})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Listeners", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "count(octavia_pool_status{instance=~'${lb}',listener=~'${listener}',pool=~'${pool}',state=\"UP\"})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Pools", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "count(group by (member) (octavia_member_status{instance=~'${lb}',listener=~'${listener}',pool=~'${pool}',member=~'${member}',state=\"UP\"}))", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Members", + "refId": "D" + } + ], + "title": "Total in Selection", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 13, + "gradientMode": "opacity", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": "Pool.*" + }, + "properties": [ + { + "id": "custom.transform", + "value": "negative-Y" + } + ] + }, + { + "matcher": { + "id": "byRegexp", + "options": ".*1xx.*" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-blue", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byRegexp", + "options": ".*2xx.*" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-green", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byRegexp", + "options": ".*3xx.*" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-orange", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byRegexp", + "options": ".*4xx.*" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-yellow", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byRegexp", + "options": ".*5xx.*" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byRegexp", + "options": ".*other.*" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-purple", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 10, + "w": 15, + "x": 0, + "y": 5 + }, + "id": 17, + "options": { + "legend": { + "calcs": [ + "min", + "max", + "mean", + "lastNotNull" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(irate(octavia_listener_http_responses_total{instance=~\"$lb\",listener=~\"$listener\"}[$__rate_interval])) by (code)", + "interval": "", + "legendFormat": "Front {{ code }}", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(irate(octavia_pool_http_responses_total{instance=~\"$lb\",pool=~\"$pool\"}[$__rate_interval])) by (code)", + "hide": false, + "interval": "", + "legendFormat": "Pool {{ code }}", + "refId": "B" + } + ], + "title": "HTTP Response Codes", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The maximum observed connections per second and requests per second across the selected load balancers. Values may reset on load balancer configuration change or failover.", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "semi-dark-green", + "mode": "fixed" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 4, + "x": 15, + "y": 5 + }, + "id": 19, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "10.4.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "max(octavia_loadbalancer_max_connection_rate{instance=~'${lb}'})", + "instant": true, + "interval": "", + "legendFormat": "Connections Per Second", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "max(octavia_loadbalancer_max_ssl_rate{instance=~'${lb}'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "TLS Connections Per Second", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "max(sum by (instance) (octavia_listener_http_requests_rate_max{instance=~'${lb}', listener=~'${listener}'}))", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "HTTP Requests Per Second", + "refId": "C" + } + ], + "title": "Peak Rates", + "type": "stat" + }, + { + "collapsed": true, + "datasource": { + "type": "prometheus", + "uid": "adltc5klfaccgf" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 15 + }, + "id": 23, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 16 + }, + "id": 25, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "octavia_loadbalancer_cpu{instance=~'${lb}'}", + "interval": "", + "legendFormat": "{{ instance }}", + "refId": "A" + } + ], + "title": "CPU Usage", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 16 + }, + "id": 27, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "octavia_loadbalancer_memory{instance=~'${lb}'}", + "interval": "", + "legendFormat": "{{ instance }}", + "refId": "A" + } + ], + "title": "Memory Usage", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 24 + }, + "id": 33, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_loadbalancer_connections_total{instance=~'${lb}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ instance }}", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_loadbalancer_ssl_connections_total{instance=~'${lb}'}[$__rate_interval])", + "hide": false, + "interval": "", + "legendFormat": "TLS {{ instance }}", + "refId": "B" + } + ], + "title": "Concurrent Connections", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 24 + }, + "id": 35, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_loadbalancer_requests_total{instance=~'${lb}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ instance }}", + "refId": "A" + } + ], + "title": "Concurrent Requests", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": "Pool.*" + }, + "properties": [ + { + "id": "custom.transform", + "value": "negative-Y" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 32 + }, + "id": 37, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "octavia_loadbalancer_current_frontend_ssl_key_rate{instance=~'${lb}'}", + "instant": false, + "interval": "", + "legendFormat": "Listener {{ instance }}", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "octavia_loadbalancer_current_backend_ssl_key_rate{instance=~'${lb}'}", + "hide": false, + "interval": "", + "legendFormat": "Pool {{ instance }}", + "refId": "B" + } + ], + "title": "TLS Key Rates", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 32 + }, + "id": 39, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "octavia_loadbalancer_frontend_ssl_reuse{instance=~'${lb}'}", + "interval": "", + "legendFormat": "{{ instance }}", + "refId": "A" + } + ], + "title": "TLS Session Reuse", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 40 + }, + "id": 41, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_loadbalancer_ssl_cache_lookups_total{instance=~'${lb}'}[$__rate_interval])", + "interval": "", + "legendFormat": "Lookups {{ instance }}", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_loadbalancer_ssl_cache_misses_total{instance=~'${lb}'}[$__rate_interval])", + "hide": false, + "interval": "", + "legendFormat": "Miss {{ instance }}", + "refId": "B" + } + ], + "title": "TLS Cache", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "Bps" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": "Out.*" + }, + "properties": [ + { + "id": "custom.transform", + "value": "negative-Y" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 40 + }, + "id": 43, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_loadbalancer_http_comp_bytes_in_total{instance=~'${lb}'}[$__rate_interval])", + "interval": "", + "legendFormat": "In {{ instance }}", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_loadbalancer_http_comp_bytes_out_total{instance=~'${lb}'}[$__rate_interval])", + "hide": false, + "interval": "", + "legendFormat": "Out {{ instance }}", + "refId": "B" + } + ], + "title": "Compression Throughput", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 48 + }, + "id": 47, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "count by (instance) (octavia_listener_status{instance=~'${lb}',state=\"UP\"})", + "interval": "", + "legendFormat": "{{ instance }}", + "refId": "A" + } + ], + "title": "Active Listeners", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Traffic flow logs dropped due to system load.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 48 + }, + "id": 45, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_loadbalancer_dropped_logs_total{instance=~'${lb}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ instance }}", + "refId": "A" + } + ], + "title": "Logs Dropped", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": "Failures.*" + }, + "properties": [ + { + "id": "unit", + "value": "none" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 56 + }, + "id": 29, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "octavia_memory_pool_allocated_bytes{instance=~'${lb}'}", + "interval": "", + "legendFormat": "Allocated {{ instance }}", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "octavia_memory_pool_used_bytes{instance=~'${lb}'}", + "hide": false, + "interval": "", + "legendFormat": "Used {{ instance }}", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_memory_pool_failures_total{instance=~'${lb}'}[$__rate_interval])", + "hide": false, + "interval": "", + "legendFormat": "Failures {{ instance }}", + "refId": "C" + } + ], + "title": "Memory Pool Usage", + "type": "timeseries" + } + ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "adltc5klfaccgf" + }, + "refId": "A" + } + ], + "title": "Load Balancer Metrics", + "type": "row" + }, + { + "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "adltc5klfaccgf" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 16 + }, + "id": 51, + "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "adltc5klfaccgf" + }, + "refId": "A" + } + ], + "title": "Listener Metrics", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The current listener status. 0=Offline, 1=Online, 2=Degraded (at the configured capacity).", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [] + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Online" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-green", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Degraded" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "yellow", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Offline" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "dark-orange", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 17 + }, + "id": 53, + "options": { + "displayLabels": [ + "value" + ], + "legend": { + "displayMode": "list", + "placement": "right", + "showLegend": true, + "values": [ + "percent" + ] + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "count(octavia_listener_status{instance=~'${lb}',listener=~'${listener}', state=\"DOWN\"} == 1)", + "instant": true, + "interval": "", + "legendFormat": "Offline", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "count(octavia_listener_status{instance=~'${lb}',listener=~'${listener}', state=\"UP\"} == 1)", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Online", + "refId": "B" + } + ], + "title": "Listener Status", + "type": "piechart" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The maximum observed concurrent sessions, sessions per second, and requests per second across the selected listeners. Values may reset on load balancer configuration change or failover.", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "dark-green", + "mode": "fixed" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 17 + }, + "id": 62, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": {}, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "10.4.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "max(octavia_listener_max_sessions{instance=~'${lb}',listener=~'${listener}'})", + "instant": true, + "interval": "", + "legendFormat": "Concurrent Sessions", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "max(octavia_listener_max_session_rate{instance=~'${lb}',listener=~'${listener}'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Sessions Per Second", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "max(octavia_listener_http_requests_rate_max{instance=~'${lb}',listener=~'${listener}'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Requests Per Second", + "refId": "C" + } + ], + "title": "Peak Values", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "fixed" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [], + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "1xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-blue", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "2xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-green", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "3xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-orange", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "4xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-yellow", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "5xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Other" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-purple", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 17 + }, + "id": 75, + "options": { + "displayLabels": [ + "percent", + "name" + ], + "legend": { + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.4.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_listener_http_responses_total{instance=~'${lb}',listener=~'${listener}',code='1xx'})", + "instant": true, + "interval": "", + "legendFormat": "1xx", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_listener_http_responses_total{instance=~'${lb}',listener=~'${listener}',code='2xx'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "2xx", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_listener_http_responses_total{instance=~'${lb}',listener=~'${listener}',code='3xx'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "3xx", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_listener_http_responses_total{instance=~'${lb}',listener=~'${listener}',code='4xx'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "4xx", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_listener_http_responses_total{instance=~'${lb}',listener=~'${listener}',code='5xx'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "5xx", + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_listener_http_responses_total{instance=~'${lb}',listener=~'${listener}',code='other'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Other", + "refId": "F" + } + ], + "title": "HTTP Response Status Codes", + "type": "piechart" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "fixed" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "1xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-blue", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "2xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-green", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "3xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-orange", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "4xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-yellow", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "5xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Other" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-purple", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 24 + }, + "id": 73, + "options": { + "displayMode": "basic", + "maxVizHeight": 300, + "minVizHeight": 16, + "minVizWidth": 8, + "namePlacement": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "sizing": "auto", + "valueMode": "color" + }, + "pluginVersion": "10.4.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_listener_http_responses_total{instance=~'${lb}',listener=~'${listener}',code='1xx'})", + "instant": true, + "interval": "", + "legendFormat": "1xx", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_listener_http_responses_total{instance=~'${lb}',listener=~'${listener}',code='2xx'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "2xx", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_listener_http_responses_total{instance=~'${lb}',listener=~'${listener}',code='3xx'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "3xx", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_listener_http_responses_total{instance=~'${lb}',listener=~'${listener}',code='4xx'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "4xx", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_listener_http_responses_total{instance=~'${lb}',listener=~'${listener}',code='5xx'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "5xx", + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_listener_http_responses_total{instance=~'${lb}',listener=~'${listener}',code='other'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Other", + "refId": "F" + } + ], + "title": "HTTP Response Status Codes", + "type": "bargauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "fixed" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "1xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-blue", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "2xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-green", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "3xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-orange", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "4xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-yellow", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "5xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Other" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-purple", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 24 + }, + "id": 74, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.4.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(irate(octavia_listener_http_responses_total{instance=~'${lb}',listener=~'${listener}',code='1xx'}[$__rate_interval]))", + "instant": false, + "interval": "", + "legendFormat": "1xx", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(irate(octavia_listener_http_responses_total{instance=~'${lb}',listener=~'${listener}',code='2xx'}[$__rate_interval]))", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "2xx", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(irate(octavia_listener_http_responses_total{instance=~'${lb}',listener=~'${listener}',code='3xx'}[$__rate_interval]))", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "3xx", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(irate(octavia_listener_http_responses_total{instance=~'${lb}',listener=~'${listener}',code='4xx'}[$__rate_interval]))", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "4xx", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(irate(octavia_listener_http_responses_total{instance=~'${lb}',listener=~'${listener}',code='5xx'}[$__rate_interval]))", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "5xx", + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(irate(octavia_listener_http_responses_total{instance=~'${lb}',listener=~'${listener}',code='other'}[$__rate_interval]))", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "Other", + "refId": "F" + } + ], + "title": "HTTP Response Status Code Rate", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 33 + }, + "id": 55, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_listener_connections_total{instance=~'${lb}',listener=~'${listener}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ listener }}", + "refId": "A" + } + ], + "title": "Concurrent Connections", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 33 + }, + "id": 57, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_listener_sessions_total{instance=~'${lb}',listener=~'${listener}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ listener }}", + "refId": "A" + } + ], + "title": "Concurrent Sessions", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "Bps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 41 + }, + "id": 59, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_listener_bytes_in_total{instance=~'${lb}',listener=~'${listener}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ listener }}", + "refId": "A" + } + ], + "title": "Listener Throughput In", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "Bps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 41 + }, + "id": 60, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_listener_bytes_out_total{instance=~'${lb}',listener=~'${listener}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ listener }}", + "refId": "A" + } + ], + "title": "Listener Throughput Out", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 49 + }, + "id": 67, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_listener_denied_connections_total{instance=~'${lb}',listener=~'${listener}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ listener }}", + "refId": "A" + } + ], + "title": "Denied Connections", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 49 + }, + "id": 68, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_listener_denied_sessions_total{instance=~'${lb}',listener=~'${listener}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ listener }}", + "refId": "A" + } + ], + "title": "Denied Sessions", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 57 + }, + "id": 64, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_listener_requests_denied_total{instance=~'${lb}',listener=~'${listener}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ listener }}", + "refId": "A" + } + ], + "title": "Denied Requests", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 57 + }, + "id": 66, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_listener_responses_denied_total{instance=~'${lb}',listener=~'${listener}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ listener }}", + "refId": "A" + } + ], + "title": "Denied Responses", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 65 + }, + "id": 69, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_listener_http_requests_total{instance=~'${lb}',listener=~'${listener}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ listener }}", + "refId": "A" + } + ], + "title": "Requests", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 65 + }, + "id": 70, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_listener_request_errors_total{instance=~'${lb}',listener=~'${listener}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ listener }}", + "refId": "A" + } + ], + "title": "Request Errors", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 73 + }, + "id": 71, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "(irate(octavia_listener_http_cache_hits_total{instance=~'${lb}',listener=~'${listener}'}[$__rate_interval])/irate(octavia_listener_http_cache_lookups_total{instance=~'${lb}',listener=~'${listener}'}[$__rate_interval]))*100", + "interval": "", + "legendFormat": "{{ listener }}", + "refId": "A" + } + ], + "title": "HTTP Cache Hit Rate", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 73 + }, + "id": 72, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_listener_http_comp_responses_total{instance=~'${lb}',listener=~'${listener}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ listener }}", + "refId": "A" + } + ], + "title": "HTTP Compressed Responses", + "type": "timeseries" + }, + { + "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "adltc5klfaccgf" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 81 + }, + "id": 77, + "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "adltc5klfaccgf" + }, + "refId": "A" + } + ], + "title": "Pool Metrics", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The current pool status. 0=Error, 1=Online", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [], + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Online" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-green", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Error" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-red", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 82 + }, + "id": 78, + "options": { + "displayLabels": [ + "value" + ], + "legend": { + "displayMode": "list", + "placement": "right", + "showLegend": true, + "values": [ + "percent" + ] + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "count(octavia_pool_status{instance=~'${lb}',pool=~'${pool}', state=\"DOWN\"} == 1)", + "instant": true, + "interval": "", + "legendFormat": "Error", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "count(octavia_pool_status{instance=~'${lb}',pool=~'${pool}', state=\"UP\"} == 1)", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Online", + "refId": "B" + } + ], + "title": "Pool Status", + "type": "piechart" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The maximum observed concurrent sessions, sessions per second, and queued requests across the selected pools. Values may reset on load balancer configuration change or failover.", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "dark-green", + "mode": "fixed" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 82 + }, + "id": 80, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "10.4.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "max(octavia_pool_max_sessions{instance=~'${lb}',pool=~'${pool}'})", + "instant": true, + "interval": "", + "legendFormat": "Concurrent Sessions", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "max(octavia_pool_max_session_rate{instance=~'${lb}',pool=~'${pool}'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Sessions Per Second", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "max(octavia_pool_max_queue{instance=~'${lb}',pool=~'${pool}'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Queued Requests", + "refId": "C" + } + ], + "title": "Peak Values", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "fixed" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [], + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "1xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-blue", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "2xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-green", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "3xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-orange", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "4xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-yellow", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "5xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Other" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-purple", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 82 + }, + "id": 81, + "options": { + "displayLabels": [ + "percent", + "name" + ], + "legend": { + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.4.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_pool_http_responses_total{instance=~'${lb}',pool=~'${pool}',code='1xx'})", + "instant": true, + "interval": "", + "legendFormat": "1xx", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_pool_http_responses_total{instance=~'${lb}',pool=~'${pool}',code='2xx'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "2xx", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_pool_http_responses_total{instance=~'${lb}',pool=~'${pool}',code='3xx'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "3xx", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_pool_http_responses_total{instance=~'${lb}',pool=~'${pool}',code='4xx'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "4xx", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_pool_http_responses_total{instance=~'${lb}',pool=~'${pool}',code='5xx'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "5xx", + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_pool_http_responses_total{instance=~'${lb}',pool=~'${pool}',code='other'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Other", + "refId": "F" + } + ], + "title": "HTTP Response Status Codes", + "type": "piechart" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "fixed" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "1xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-blue", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "2xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-green", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "3xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-orange", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "4xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-yellow", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "5xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Other" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-purple", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 89 + }, + "id": 82, + "options": { + "displayMode": "basic", + "maxVizHeight": 300, + "minVizHeight": 16, + "minVizWidth": 8, + "namePlacement": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "sizing": "auto", + "valueMode": "color" + }, + "pluginVersion": "10.4.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_pool_http_responses_total{instance=~'${lb}',pool=~'${pool}',code='1xx'})", + "instant": true, + "interval": "", + "legendFormat": "1xx", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_pool_http_responses_total{instance=~'${lb}',pool=~'${pool}',code='2xx'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "2xx", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_pool_http_responses_total{instance=~'${lb}',pool=~'${pool}',code='3xx'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "3xx", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_pool_http_responses_total{instance=~'${lb}',pool=~'${pool}',code='4xx'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "4xx", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_pool_http_responses_total{instance=~'${lb}',pool=~'${pool}',code='5xx'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "5xx", + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_pool_http_responses_total{instance=~'${lb}',pool=~'${pool}',code='other'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Other", + "refId": "F" + } + ], + "title": "HTTP Response Status Codes", + "type": "bargauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "fixed" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "1xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-blue", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "2xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-green", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "3xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-orange", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "4xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-yellow", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "5xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Other" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-purple", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 89 + }, + "id": 83, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.4.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(irate(octavia_pool_http_responses_total{instance=~'${lb}',pool=~'${pool}',code='1xx'}[$__rate_interval]))", + "instant": false, + "interval": "", + "legendFormat": "1xx", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(irate(octavia_pool_http_responses_total{instance=~'${lb}',pool=~'${pool}',code='2xx'}[$__rate_interval]))", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "2xx", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(irate(octavia_pool_http_responses_total{instance=~'${lb}',pool=~'${pool}',code='3xx'}[$__rate_interval]))", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "3xx", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(irate(octavia_pool_http_responses_total{instance=~'${lb}',pool=~'${pool}',code='4xx'}[$__rate_interval]))", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "4xx", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(irate(octavia_pool_http_responses_total{instance=~'${lb}',pool=~'${pool}',code='5xx'}[$__rate_interval]))", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "5xx", + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(irate(octavia_pool_http_responses_total{instance=~'${lb}',pool=~'${pool}',code='other'}[$__rate_interval]))", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "Other", + "refId": "F" + } + ], + "title": "HTTP Response Status Code Rate", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The rate at which the client aborted the connection. I.e. the user closed the browser window.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 98 + }, + "id": 84, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_pool_client_aborts_total{instance=~'${lb}',pool=~'${pool}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ pool }}", + "refId": "A" + } + ], + "title": "Client Abort Rate", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Rate that the backend member servers are aborting connections.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 98 + }, + "id": 91, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_pool_member_aborts_total{instance=~'${lb}',pool=~'${pool}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ pool }}", + "refId": "A" + } + ], + "title": "Member Server Abort Rate", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 106 + }, + "id": 92, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_pool_sessions_total{instance=~'${lb}',pool=~'${pool}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ pool }}", + "refId": "A" + } + ], + "title": "Pool Concurrent Sessions", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 106 + }, + "id": 87, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "octavia_pool_current_queue{instance=~'${lb}',pool=~'${pool}'}", + "interval": "", + "legendFormat": "{{ pool }}", + "refId": "A" + } + ], + "title": "Pool Queued Requests", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 114 + }, + "id": 90, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_pool_connection_attempts_total{instance=~'${lb}',pool=~'${pool}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ pool }}", + "refId": "A" + } + ], + "title": "Pool Connection Rate", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 114 + }, + "id": 86, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_pool_connection_reuses_total{instance=~'${lb}',pool=~'${pool}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ pool }}", + "refId": "A" + } + ], + "title": "Pool Connection Reuse Rate", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "Bps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 122 + }, + "id": 85, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_pool_bytes_in_total{instance=~'${lb}',pool=~'${pool}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ pool }}", + "refId": "A" + } + ], + "title": "Pool Throughput In", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "Bps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 122 + }, + "id": 88, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_pool_bytes_out_total{instance=~'${lb}',pool=~'${pool}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ pool }}", + "refId": "A" + } + ], + "title": "Pool Throughput Out", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "dark-green", + "mode": "fixed" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 130 + }, + "id": 93, + "options": { + "displayMode": "gradient", + "maxVizHeight": 300, + "minVizHeight": 16, + "minVizWidth": 8, + "namePlacement": "auto", + "orientation": "vertical", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": true + }, + "showUnfilled": true, + "sizing": "auto", + "valueMode": "color" + }, + "pluginVersion": "10.4.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum by (member) (octavia_member_loadbalanced_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}'})", + "format": "table", + "instant": true, + "interval": "", + "legendFormat": "{{ member }}", + "refId": "A" + } + ], + "title": "Pool Member Dispatches", + "transformations": [ + { + "id": "sortBy", + "options": { + "fields": {}, + "sort": [ + { + "desc": true, + "field": "member" + } + ] + } + } + ], + "type": "bargauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 130 + }, + "id": 89, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_pool_http_requests_total{instance=~'${lb}',pool=~'${pool}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ pool }}", + "refId": "A" + } + ], + "title": "Pool HTTP Request Rate", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 138 + }, + "id": 96, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "octavia_pool_active_members{instance=~'${lb}',pool=~'${pool}'}", + "interval": "", + "legendFormat": "{{ pool }}", + "refId": "A" + } + ], + "title": "Pool Members", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 138 + }, + "id": 97, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "octavia_pool_backup_members{instance=~'${lb}',pool=~'${pool}'}", + "interval": "", + "legendFormat": "{{ pool }}", + "refId": "A" + } + ], + "title": "Pool Backup Members", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 146 + }, + "id": 107, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_pool_requests_denied_total{instance=~'${lb}',pool=~'${pool}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ pool }}", + "refId": "A" + } + ], + "title": "Pool Requests Denied", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 146 + }, + "id": 108, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_pool_responses_denied_total{instance=~'${lb}',pool=~'${pool}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ pool }}", + "refId": "A" + } + ], + "title": "Pool Responses Denied", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 154 + }, + "id": 109, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_pool_connection_errors_total{instance=~'${lb}',pool=~'${pool}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ pool }}", + "refId": "A" + } + ], + "title": "Pool Connection Errors", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 154 + }, + "id": 110, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_pool_response_errors_total{instance=~'${lb}',pool=~'${pool}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ pool }}", + "refId": "A" + } + ], + "title": "Pool Response Errors", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 162 + }, + "id": 111, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_pool_retry_warnings_total{instance=~'${lb}',pool=~'${pool}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ pool }}", + "refId": "A" + } + ], + "title": "Pool Retry Warnings", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 162 + }, + "id": 112, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_pool_redispatch_warnings_total{instance=~'${lb}',pool=~'${pool}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ pool }}", + "refId": "A" + } + ], + "title": "Pool Redispatch Warnings", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 170 + }, + "id": 113, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_pool_check_up_down_total{instance=~'${lb}',pool=~'${pool}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ pool }}", + "refId": "A" + } + ], + "title": "Pool Up/Down Transition Rate", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Maximum Time Since Last Transition" + }, + "properties": [ + { + "id": "unit", + "value": "s" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Maximum Downtime" + }, + "properties": [ + { + "id": "unit", + "value": "s" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 170 + }, + "id": 114, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "10.4.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_pool_check_up_down_total{instance=~'${lb}',pool=~'${pool}'})", + "instant": true, + "interval": "", + "legendFormat": "Total Transitions", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "max(octavia_pool_check_last_change_seconds{instance=~'${lb}',pool=~'${pool}'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Maximum Time Since Last Transition", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "max(octavia_pool_downtime_seconds_total{instance=~'${lb}',pool=~'${pool}'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Maximum Downtime", + "refId": "C" + } + ], + "title": "Pool Up/Down Summary", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 178 + }, + "id": 115, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "(irate(octavia_pool_http_cache_hits_total{instance=~'${lb}',pool=~'${pool}'}[$__rate_interval])/irate(octavia_pool_http_cache_lookups_total{instance=~'${lb}',pool=~'${pool}'}[$__rate_interval]))*100", + "interval": "", + "legendFormat": "{{ pool }}", + "refId": "A" + } + ], + "title": "Pool HTTP Cache Hit Rate", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 178 + }, + "id": 116, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_pool_http_comp_responses_total{instance=~'${lb}',pool=~'${pool}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ pool }}", + "refId": "A" + } + ], + "title": "Pool HTTP Compressed Responses", + "type": "timeseries" + }, + { + "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "adltc5klfaccgf" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 186 + }, + "id": 99, + "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "adltc5klfaccgf" + }, + "refId": "A" + } + ], + "title": "Pool Timings", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The current maximum time across the selected pools. Values may reset on load balancer configuration change or failover.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 12, + "x": 0, + "y": 187 + }, + "id": 105, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "vertical", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "value_and_name", + "wideLayout": true + }, + "pluginVersion": "10.4.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "max(octavia_pool_queue_time_average_seconds{instance=~'${lb}',pool=~'${pool}'})", + "instant": true, + "interval": "", + "legendFormat": "Queue Time", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "max(octavia_pool_connect_time_average_seconds{instance=~'${lb}',pool=~'${pool}'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Connect Time", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "max(octavia_pool_response_time_average_seconds{instance=~'${lb}',pool=~'${pool}'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Response Time", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "max(octavia_pool_total_time_average_seconds{instance=~'${lb}',pool=~'${pool}'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Total Time", + "refId": "D" + } + ], + "title": "Current Maximum Times", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The maximum observed time across the selected pools. Values may reset on load balancer configuration change or failover.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 12, + "x": 12, + "y": 187 + }, + "id": 106, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "vertical", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "value_and_name", + "wideLayout": true + }, + "pluginVersion": "10.4.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "max(octavia_pool_max_queue_time_seconds{instance=~'${lb}',pool=~'${pool}'})", + "instant": true, + "interval": "", + "legendFormat": "Queue Time", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "max(octavia_pool_max_connect_time_seconds{instance=~'${lb}',pool=~'${pool}'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Connect Time", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "max(octavia_pool_max_response_time_seconds{instance=~'${lb}',pool=~'${pool}'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Response Time", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "max(octavia_pool_max_total_time_seconds{instance=~'${lb}',pool=~'${pool}'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Total Time", + "refId": "D" + } + ], + "title": "Peak Times", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Average queue time for last 1024 successful connections.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 191 + }, + "id": 101, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "octavia_pool_queue_time_average_seconds{instance=~'${lb}',pool=~'${pool}'}", + "interval": "", + "legendFormat": "{{ pool }}", + "refId": "A" + } + ], + "title": "Average Queue Time", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Average connect time for last 1024 successful connections.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 191 + }, + "id": 102, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "octavia_pool_connect_time_average_seconds{instance=~'${lb}',pool=~'${pool}'}", + "interval": "", + "legendFormat": "{{ pool }}", + "refId": "A" + } + ], + "title": "Average Connect Time", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Average response time for last 1024 successful connections. Inclusive of member server time.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 199 + }, + "id": 103, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "octavia_pool_response_time_average_seconds{instance=~'${lb}',pool=~'${pool}'}", + "interval": "", + "legendFormat": "{{ pool }}", + "refId": "A" + } + ], + "title": "Average Response Time", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Average total time for last 1024 successful connections. Inclusive of member server time.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 199 + }, + "id": 104, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "octavia_pool_total_time_average_seconds{instance=~'${lb}',pool=~'${pool}'}", + "interval": "", + "legendFormat": "{{ pool }}", + "refId": "A" + } + ], + "title": "Average Total Time", + "type": "timeseries" + }, + { + "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "adltc5klfaccgf" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 207 + }, + "id": 118, + "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "adltc5klfaccgf" + }, + "refId": "A" + } + ], + "title": "Member Metrics", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Shows the percentage of members in each state across the selected load balancers.\nOptions are: Online, Offline (admin down/disabled), Error, or Draining.", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "#808080", + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [], + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Error" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "dark-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Online" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-green", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Offline" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "dark-orange", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Draining" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-yellow", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 208 + }, + "id": 122, + "options": { + "displayLabels": [], + "legend": { + "displayMode": "list", + "placement": "right", + "showLegend": true, + "values": [ + "percent" + ] + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "count(octavia_member_status{instance=~'${lb}',pool=~'${pool}',member=~'${member}', state=\"DOWN\"} == 1)", + "instant": true, + "interval": "", + "legendFormat": "Error", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "count(octavia_member_status{instance=~'${lb}',pool=~'${pool}',member=~'${member}', state=\"UP\"} == 1)", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Online", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "count(octavia_member_status{instance=~'${lb}',pool=~'${pool}',member=~'${member}', state=\"MAINT\"} == 1)", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Offline", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "count(octavia_member_status{instance=~'${lb}',pool=~'${pool}',member=~'${member}', state=\"DRAIN\"} == 1)", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Draining", + "refId": "D" + } + ], + "title": "Member Status", + "type": "piechart" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The maximum observed concurrent sessions, sessions per second, and queued requests across the selected members. Values may reset on load balancer configuration change or failover.", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "dark-green", + "mode": "fixed" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 208 + }, + "id": 123, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "10.4.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "max(octavia_member_max_sessions{instance=~'${lb}',pool=~'${pool}',member=~'${member}'})", + "instant": true, + "interval": "", + "legendFormat": "Concurrent Sessions", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "max(octavia_member_max_session_rate{instance=~'${lb}',pool=~'${pool}',member=~'${member}'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Sessions Per Second", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "max(octavia_member_max_queue{instance=~'${lb}',pool=~'${pool}',member=~'${member}'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Queued Requests", + "refId": "C" + } + ], + "title": "Peak Values", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "fixed" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [], + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "1xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-blue", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "2xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-green", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "3xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-orange", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "4xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-yellow", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "5xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Other" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-purple", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 208 + }, + "id": 124, + "options": { + "displayLabels": [ + "percent", + "name" + ], + "legend": { + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.4.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_member_http_responses_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}',code='1xx'})", + "instant": true, + "interval": "", + "legendFormat": "1xx", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_member_http_responses_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}',code='2xx'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "2xx", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_member_http_responses_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}',code='3xx'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "3xx", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_member_http_responses_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}',code='4xx'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "4xx", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_member_http_responses_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}',code='5xx'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "5xx", + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_member_http_responses_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}',code='other'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Other", + "refId": "F" + } + ], + "title": "HTTP Response Status Codes", + "type": "piechart" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "fixed" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "1xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-blue", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "2xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-green", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "3xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-orange", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "4xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-yellow", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "5xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Other" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-purple", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 215 + }, + "id": 125, + "options": { + "displayMode": "basic", + "maxVizHeight": 300, + "minVizHeight": 16, + "minVizWidth": 8, + "namePlacement": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "sizing": "auto", + "valueMode": "color" + }, + "pluginVersion": "10.4.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_member_http_responses_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}',code='1xx'})", + "instant": true, + "interval": "", + "legendFormat": "1xx", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_member_http_responses_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}',code='2xx'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "2xx", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_member_http_responses_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}',code='3xx'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "3xx", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_member_http_responses_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}',code='4xx'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "4xx", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_member_http_responses_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}',code='5xx'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "5xx", + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_member_http_responses_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}',code='other'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Other", + "refId": "F" + } + ], + "title": "HTTP Response Status Codes", + "type": "bargauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "fixed" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "1xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-blue", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "2xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-green", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "3xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-orange", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "4xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-yellow", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "5xx" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Other" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-purple", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 215 + }, + "id": 126, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.4.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(irate(octavia_member_http_responses_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}',code='1xx'}[$__rate_interval]))", + "instant": false, + "interval": "", + "legendFormat": "1xx", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(irate(octavia_member_http_responses_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}',code='2xx'}[$__rate_interval]))", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "2xx", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(irate(octavia_member_http_responses_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}',code='3xx'}[$__rate_interval]))", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "3xx", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(irate(octavia_member_http_responses_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}',code='4xx'}[$__rate_interval]))", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "4xx", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(irate(octavia_member_http_responses_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}',code='5xx'}[$__rate_interval]))", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "5xx", + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(irate(octavia_member_http_responses_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}',code='other'}[$__rate_interval]))", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "Other", + "refId": "F" + } + ], + "title": "HTTP Response Status Code Rate", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The rate at which the client aborted the connection. I.e. the user closed the browser window.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 224 + }, + "id": 127, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_member_client_aborts_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ member }}", + "refId": "A" + } + ], + "title": "Client Abort Rate", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Rate that the backend member servers are aborting connections.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 224 + }, + "id": 128, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_member_server_aborts_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ member }}", + "refId": "A" + } + ], + "title": "Member Server Abort Rate", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 232 + }, + "id": 129, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_member_sessions_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ member }}", + "refId": "A" + } + ], + "title": "Member Concurrent Sessions", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 232 + }, + "id": 130, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "octavia_member_current_queue{instance=~'${lb}',pool=~'${pool}',member=~'${member}'}", + "interval": "", + "legendFormat": "{{ member }}", + "refId": "A" + } + ], + "title": "Member Queued Requests", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "Bps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 240 + }, + "id": 131, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_member_bytes_in_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ member }}", + "refId": "A" + } + ], + "title": "Member Throughput In", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "Bps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 240 + }, + "id": 132, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_member_bytes_out_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ member }}", + "refId": "A" + } + ], + "title": "Member Throughput Out", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 248 + }, + "id": 133, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_member_connection_attempts_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ member }}", + "refId": "A" + } + ], + "title": "Member Connection Rate", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 248 + }, + "id": 134, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_member_connection_reuses_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ member }}", + "refId": "A" + } + ], + "title": "Member Connection Reuse Rate", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "dark-green", + "mode": "fixed" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 256 + }, + "id": 135, + "options": { + "displayMode": "gradient", + "orientation": "vertical", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": true + }, + "showUnfilled": true + }, + "pluginVersion": "8.4.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum by (member) (octavia_member_loadbalanced_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}'})", + "format": "table", + "instant": true, + "interval": "", + "legendFormat": "{{ member }}", + "refId": "A" + } + ], + "title": "Member Dispatches", + "transformations": [ + { + "id": "sortBy", + "options": { + "fields": {}, + "sort": [ + { + "desc": true, + "field": "member" + } + ] + } + } + ], + "type": "bargauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 256 + }, + "id": 136, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "octavia_member_idle_connections_current{instance=~'${lb}',pool=~'${pool}',member=~'${member}'}", + "interval": "", + "legendFormat": "{{ member }}", + "refId": "A" + } + ], + "title": "Member Idle Connections", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 264 + }, + "id": 137, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_member_responses_denied_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ member }}", + "refId": "A" + } + ], + "title": "Member Responses Denied", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 264 + }, + "id": 138, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_member_connection_errors_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ member }}", + "refId": "A" + } + ], + "title": "Member Connection Errors", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 272 + }, + "id": 139, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_member_response_errors_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ member }}", + "refId": "A" + } + ], + "title": "Member Response Errors", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 272 + }, + "id": 140, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_member_retry_warnings_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ member }}", + "refId": "A" + } + ], + "title": "Member Retry Warnings", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 280 + }, + "id": 141, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_member_redispatch_warnings_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ member }}", + "refId": "A" + } + ], + "title": "Member Redispatch Warnings", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Failed check rate. (Only counts checks failed when the member is up).", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 280 + }, + "id": 142, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_member_check_failures_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ member }}", + "refId": "A" + } + ], + "title": "Member Health Check Failure Rate", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 288 + }, + "id": 143, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "irate(octavia_member_check_up_down_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}'}[$__rate_interval])", + "interval": "", + "legendFormat": "{{ member }}", + "refId": "A" + } + ], + "title": "Member Up/Down Transition Rate", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Maximum Time Since Last Transition" + }, + "properties": [ + { + "id": "unit", + "value": "s" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Maximum Downtime" + }, + "properties": [ + { + "id": "unit", + "value": "s" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 288 + }, + "id": 144, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "8.4.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "sum(octavia_member_check_up_down_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}'})", + "instant": true, + "interval": "", + "legendFormat": "Total Transitions", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "max(octavia_member_check_last_change_seconds{instance=~'${lb}',pool=~'${pool}',member=~'${member}'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Maximum Time Since Last Transition", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "max(octavia_member_downtime_seconds_total{instance=~'${lb}',pool=~'${pool}',member=~'${member}'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Maximum Downtime", + "refId": "C" + } + ], + "title": "Member Up/Down Summary", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 296 + }, + "id": 145, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "octavia_member_weight{instance=~'${lb}',pool=~'${pool}',member=~'${member}'}", + "interval": "", + "legendFormat": "{{ member }}", + "refId": "A" + } + ], + "title": "Member Weight", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Current throttle percentage for the member, when slowstart is active, or no value if not in slowstart.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "max": 100, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 296 + }, + "id": 146, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "octavia_member_current_throttle{instance=~'${lb}',pool=~'${pool}',member=~'${member}'}", + "instant": false, + "interval": "", + "legendFormat": "{{ member }}", + "refId": "A" + } + ], + "title": "Member Throttle", + "type": "timeseries" + }, + { + "collapsed": true, + "datasource": { + "type": "prometheus", + "uid": "adltc5klfaccgf" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 304 + }, + "id": 120, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The current maximum time across the selected members. Values may reset on load balancer configuration change or failover.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 12, + "x": 0, + "y": 7 + }, + "id": 147, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "vertical", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "value_and_name" + }, + "pluginVersion": "8.4.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "max(octavia_member_queue_time_average_seconds{instance=~'${lb}',pool=~'${pool}',member=~'${member}'})", + "instant": true, + "interval": "", + "legendFormat": "Queue Time", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "max(octavia_member_connect_time_average_seconds{instance=~'${lb}',pool=~'${pool}',member=~'${member}'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Connect Time", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "max(octavia_member_response_time_average_seconds{instance=~'${lb}',pool=~'${pool}',member=~'${member}'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Response Time", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "max(octavia_member_total_time_average_seconds{instance=~'${lb}',pool=~'${pool}',member=~'${member}'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Total Time", + "refId": "D" + } + ], + "title": "Current Maximum Times", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The maximum observed time across the selected members. Values may reset on load balancer configuration change or failover.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 12, + "x": 12, + "y": 7 + }, + "id": 148, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "vertical", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "value_and_name" + }, + "pluginVersion": "8.4.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "max(octavia_member_max_queue_time_seconds{instance=~'${lb}',pool=~'${pool}',member=~'${member}'})", + "instant": true, + "interval": "", + "legendFormat": "Queue Time", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "max(octavia_member_max_connect_time_seconds{instance=~'${lb}',pool=~'${pool}',member=~'${member}'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Connect Time", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "max(octavia_member_max_response_time_seconds{instance=~'${lb}',pool=~'${pool}',member=~'${member}'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Response Time", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": false, + "expr": "max(octavia_member_max_total_time_seconds{instance=~'${lb}',pool=~'${pool}',member=~'${member}'})", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "Total Time", + "refId": "D" + } + ], + "title": "Peak Times", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Average queue time for last 1024 successful connections.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 11 + }, + "id": 149, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "octavia_member_queue_time_average_seconds{instance=~'${lb}',pool=~'${pool}',member=~'${member}'}", + "interval": "", + "legendFormat": "{{ member }}", + "refId": "A" + } + ], + "title": "Average Queue Time", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Average connect time for last 1024 successful connections.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 11 + }, + "id": 150, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "octavia_member_connect_time_average_seconds{instance=~'${lb}',pool=~'${pool}',member=~'${member}'}", + "interval": "", + "legendFormat": "{{ member }}", + "refId": "A" + } + ], + "title": "Average Connect Time", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Average response time for last 1024 successful connections. Inclusive of member server time.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 19 + }, + "id": 151, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "octavia_member_response_time_average_seconds{instance=~'${lb}',pool=~'${pool}',member=~'${member}'}", + "interval": "", + "legendFormat": "{{ member }}", + "refId": "A" + } + ], + "title": "Average Response Time", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Average total time for last 1024 successful connections. Inclusive of member server time.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 19 + }, + "id": 152, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "octavia_member_total_time_average_seconds{instance=~'${lb}',pool=~'${pool}',member=~'${member}'}", + "interval": "", + "legendFormat": "{{ member }}", + "refId": "A" + } + ], + "title": "Average Total Time", + "type": "timeseries" + } + ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "adltc5klfaccgf" + }, + "refId": "A" + } + ], + "title": "Member Timings", + "type": "row" + } + ], + "refresh": "5s", + "schemaVersion": 39, + "tags": [ + "OpenStack", + "Octavia", + "LoadBalancer" + ], + "templating": { + "list": [ + { + "allValue": ".*", + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "label_values(octavia_loadbalancer_cpu, instance)", + "hide": 0, + "includeAll": true, + "label": "Load Balancer", + "multi": true, + "name": "lb", + "options": [], + "query": { + "query": "label_values(octavia_loadbalancer_cpu, instance)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 5, + "type": "query" + }, + { + "allValue": ".*", + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "label_values(octavia_listener_status{instance=~'${lb:raw}', listener!=\"native-prometheus\"}, listener)", + "description": "Filter to a specific listener.", + "hide": 0, + "includeAll": true, + "label": "Listener", + "multi": true, + "name": "listener", + "options": [], + "query": { + "query": "label_values(octavia_listener_status{instance=~'${lb:raw}', listener!=\"native-prometheus\"}, listener)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": ".*", + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "label_values(octavia_pool_status{instance=~'${lb:raw}'}, pool)", + "hide": 0, + "includeAll": true, + "label": "Pool", + "multi": true, + "name": "pool", + "options": [], + "query": { + "query": "label_values(octavia_pool_status{instance=~'${lb:raw}'}, pool)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": ".*", + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "label_values(octavia_member_status{instance=~'${lb:raw}', pool=~'${pool:raw}'}, member)", + "hide": 0, + "includeAll": true, + "label": "Member", + "multi": true, + "name": "member", + "options": [], + "query": { + "query": "label_values(octavia_member_status{instance=~'${lb:raw}', pool=~'${pool:raw}'}, member)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + } + ] + }, + "time": { + "from": "now-5m", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "OpenStack Octavia Amphora Load Balancer", + "uid": "4yHpTrB7z", + "version": 2, + "weekStart": "" +} diff --git a/etc/policy/README.rst b/etc/policy/README.rst new file mode 100644 index 0000000000..ed00f07eef --- /dev/null +++ b/etc/policy/README.rst @@ -0,0 +1,40 @@ +=========================== +Octavia Sample Policy Files +=========================== + +The sample policy.yaml files described here can be copied into +/etc/octavia/policy.yaml to override the default RBAC policy for Octavia. + +See the `Octavia Policy Guide `_ for more information about these policy override files. + +admin_or_owner-policy.yaml +-------------------------- +This policy file disables the requirement for load-balancer service users to +have one of the load-balancer:* roles. It provides a similar policy to +legacy OpenStack policies where any user or admin has access to load-balancer +resources that they own. Users with the admin role has access to all +load-balancer resources, whether they own them or not. + +keystone_default_roles-policy.yaml +---------------------------------- +This policy file disables the requirement for load-balancer service users to +have one of the load-balancer:* roles. + +This policy will honor the following Keystone default roles in the Octavia API: + +* Admin +* Project scoped - Reader +* Project scoped - Member + +keystone_default_roles_scoped-policy.yaml +---------------------------------- +This policy file disables the requirement for load-balancer service users to +have one of the load-balancer:* roles. + +This policy will honor the following Keystone default roles and scopes in the +Octavia API: + +* System scoped - Admin +* System scoped - Reader +* Project scoped - Reader +* Project scoped - Member diff --git a/etc/policy/admin_or_owner-policy.yaml b/etc/policy/admin_or_owner-policy.yaml new file mode 100644 index 0000000000..d81d7a203f --- /dev/null +++ b/etc/policy/admin_or_owner-policy.yaml @@ -0,0 +1,18 @@ +# This policy.yaml will revert the Octavia API to follow the legacy +# admin-or-owner RBAC policies. +# It provides a similar policy to legacy OpenStack policies where any +# user or admin has access to load-balancer resources that they own. +# Users with the admin role has access to all load-balancer resources, +# whether they own them or not. + +# Role Rules +"context_is_admin": "role:admin or role:load-balancer_admin" +"admin_or_owner": "is_admin:True or project_id:%(project_id)s" + +# Rules +"load-balancer:read": "rule:admin_or_owner" +"load-balancer:read-global": "is_admin:True" +"load-balancer:write": "rule:admin_or_owner" +"load-balancer:read-quota": "rule:admin_or_owner" +"load-balancer:read-quota-global": "is_admin:True" +"load-balancer:write-quota": "is_admin:True" diff --git a/etc/policy/keystone_default_roles-policy.yaml b/etc/policy/keystone_default_roles-policy.yaml new file mode 100644 index 0000000000..6b8ec283b8 --- /dev/null +++ b/etc/policy/keystone_default_roles-policy.yaml @@ -0,0 +1,37 @@ +# This policy YAML file will revert the Octavia API to follow the keystone +# "default role" RBAC policies. +# +# The [oslo_policy] enforce_scope and enforce_new_defaults must be True. +# +# Users will not be required to be a member of the load-balancer_* roles +# to take action on Octavia resources. +# Keystone token scoping and "default roles"/personas will still be enforced. + +# Role Rules +"system_admin": "role:admin" +"system_reader": "role:admin" +"project_reader": "role:reader and project_id:%(project_id)s" +"project_member": "role:member and project_id:%(project_id)s" + +"context_is_admin": "role:admin" + +# API Rules +"load-balancer:admin": "is_admin:True or + rule:system_admin or + role:load-balancer_admin" + +"load-balancer:read": "is_admin:True or + rule:system_reader or + rule:project_reader" + +"load-balancer:read-global": "is_admin:True or rule:system_reader" + +"load-balancer:write": "is_admin:True or rule:project_member" + +"load-balancer:read-quota": "is_admin:True or + rule:system_reader or + rule:project_reader" + +"load-balancer:read-quota-global": "is_admin:True or rule:system_reader" + +"load-balancer:write-quota": "is_admin:True" diff --git a/etc/policy/keystone_default_roles_scoped-policy.yaml b/etc/policy/keystone_default_roles_scoped-policy.yaml new file mode 100644 index 0000000000..61d7bb857d --- /dev/null +++ b/etc/policy/keystone_default_roles_scoped-policy.yaml @@ -0,0 +1,37 @@ +# This policy YAML file will revert the Octavia API to follow the keystone +# "default role" RBAC policies. +# +# The [oslo_policy] enforce_scope and enforce_new_defaults must be True. +# +# Users will not be required to be a member of the load-balancer_* roles +# to take action on Octavia resources. +# Keystone token scoping and "default roles"/personas will still be enforced. + +# Role Rules +"system_admin": "role:admin and system_scope:all" +"system_reader": "role:reader and system_scope:all" +"project_reader": "role:reader and project_id:%(project_id)s" +"project_member": "role:member and project_id:%(project_id)s" + +"context_is_admin": "role:admin and system_scope:all" + +# API Rules +"load-balancer:admin": "is_admin:True or + rule:system_admin or + role:load-balancer_admin" + +"load-balancer:read": "is_admin:True or + rule:system_reader or + rule:project_reader" + +"load-balancer:read-global": "is_admin:True or rule:system_reader" + +"load-balancer:write": "is_admin:True or rule:project_member" + +"load-balancer:read-quota": "is_admin:True or + rule:system_reader or + rule:project_reader" + +"load-balancer:read-quota-global": "is_admin:True or rule:system_reader" + +"load-balancer:write-quota": "is_admin:True" diff --git a/etc/policy/octavia-advanced-rbac-policy.yaml b/etc/policy/octavia-advanced-rbac-policy.yaml new file mode 100644 index 0000000000..2c0041f245 --- /dev/null +++ b/etc/policy/octavia-advanced-rbac-policy.yaml @@ -0,0 +1,74 @@ +# This policy YAML file implements the "Advanced RBAC" rules for Octavia that +# were introduced in the Pike release of the Octavia API. +# +# These rules require users to have a load-balancer_* role to be able to access +# the Octavia v2 API. +# +# This is stricter than the "Keystone Default Roles" implemented in the code +# as part of the "Consistent and Secure Default RBAC" OpenStack community goal. + +# The default is to not allow access unless the auth_strategy is 'noauth'. +# Users must be a member of one of the following roles to have access to +# the load-balancer API: +# +# role:load-balancer_observer +# User has access to load-balancer read-only APIs +# role:load-balancer_global_observer +# User has access to load-balancer read-only APIs including resources +# owned by others. +# role:load-balancer_member +# User has access to load-balancer read and write APIs +# role:load-balancer_admin +# User is considered an admin for all load-balnacer APIs including +# resources owned by others. +# role:admin +# User is admin to all APIs + +"context_is_admin": "role:admin or + role:load-balancer_admin" + +# API access roles + +"load-balancer:owner": "project_id:%(project_id)s" + +# Note: 'is_admin:True' is a policy rule that takes into account the +# auth_strategy == noauth configuration setting. +# It is equivalent to 'rule:context_is_admin or {auth_strategy == noauth}' + +"load-balancer:admin": "is_admin:True or + role:admin or + role:load-balancer_admin" + +"load-balancer:observer_and_owner": "role:load-balancer_observer and + rule:load-balancer:owner" + +"load-balancer:global_observer": "role:load-balancer_global_observer" + +"load-balancer:member_and_owner": "role:load-balancer_member and + rule:load-balancer:owner" + +# API access methods + +"load-balancer:read": "rule:load-balancer:observer_and_owner or + rule:load-balancer:global_observer or + rule:load-balancer:member_and_owner or + rule:load-balancer:admin" + +"load-balancer:read-global": "rule:load-balancer:global_observer or + rule:load-balancer:admin" + +"load-balancer:write": "rule:load-balancer:member_and_owner or + rule:load-balancer:admin" + +"load-balancer:read-quota": "rule:load-balancer:observer_and_owner or + rule:load-balancer:global_observer or + rule:load-balancer:member_and_owner or + role:load-balancer_quota_admin or + rule:load-balancer:admin" + +"load-balancer:read-quota-global": "rule:load-balancer:global_observer or + role:load-balancer_quota_admin or + rule:load-balancer:admin" + +"load-balancer:write-quota": "role:load-balancer_quota_admin or + rule:load-balancer:admin" diff --git a/etc/policy/octavia-policy-generator.conf b/etc/policy/octavia-policy-generator.conf new file mode 100644 index 0000000000..0364582bb0 --- /dev/null +++ b/etc/policy/octavia-policy-generator.conf @@ -0,0 +1,4 @@ +[DEFAULT] +format = yaml +output_file = etc/octavia/policy.yaml.sample +namespace = octavia diff --git a/httpd/octavia-api.conf b/httpd/octavia-api.conf new file mode 100644 index 0000000000..aa708f446d --- /dev/null +++ b/httpd/octavia-api.conf @@ -0,0 +1,23 @@ +Listen 9876 + + + + WSGIDaemonProcess octavia-wsgi processes=5 threads=1 user=octavia group=octavia display-name=%{GROUP} + WSGIProcessGroup octavia-wsgi + WSGIScriptAlias / /usr/local/bin/octavia-wsgi + WSGIApplicationGroup %{GLOBAL} + + ErrorLog /var/log/apache2/octavia-wsgi.log + + + WSGIProcessGroup octavia-wsgi + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + + diff --git a/octavia/__init__.py b/octavia/__init__.py new file mode 100644 index 0000000000..3e9bdbabc8 --- /dev/null +++ b/octavia/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2011-2014 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import gettext + +gettext.install('octavia') diff --git a/octavia/amphorae/__init__.py b/octavia/amphorae/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/amphorae/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/amphorae/backends/__init__.py b/octavia/amphorae/backends/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/amphorae/backends/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/amphorae/backends/agent/__init__.py b/octavia/amphorae/backends/agent/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/amphorae/backends/agent/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/amphorae/backends/agent/agent_jinja_cfg.py b/octavia/amphorae/backends/agent/agent_jinja_cfg.py new file mode 100644 index 0000000000..3709d84770 --- /dev/null +++ b/octavia/amphorae/backends/agent/agent_jinja_cfg.py @@ -0,0 +1,60 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +import jinja2 + +from octavia.common.config import cfg +from octavia.common import constants + +CONF = cfg.CONF + +TEMPLATES_DIR = (os.path.dirname(os.path.realpath(__file__)) + + constants.AGENT_API_TEMPLATES + '/') + + +class AgentJinjaTemplater: + + def __init__(self): + template_loader = jinja2.FileSystemLoader(searchpath=os.path.dirname( + TEMPLATES_DIR)) + jinja_env = jinja2.Environment(loader=template_loader, autoescape=True) + self.agent_template = jinja_env.get_template( + constants.AGENT_CONF_TEMPLATE) + + def build_agent_config(self, amphora_id, topology): + return self.agent_template.render( + {'agent_server_ca': CONF.amphora_agent.agent_server_ca, + 'agent_server_cert': CONF.amphora_agent.agent_server_cert, + 'agent_server_network_dir': + CONF.amphora_agent.agent_server_network_dir, + 'agent_request_read_timeout': + CONF.amphora_agent.agent_request_read_timeout, + 'amphora_id': amphora_id, + 'base_cert_dir': CONF.haproxy_amphora.base_cert_dir, + 'base_path': CONF.haproxy_amphora.base_path, + 'bind_host': CONF.haproxy_amphora.bind_host, + 'bind_port': CONF.haproxy_amphora.bind_port, + 'controller_list': CONF.health_manager.controller_ip_port_list, + 'debug': CONF.debug, + 'haproxy_cmd': CONF.haproxy_amphora.haproxy_cmd, + 'heartbeat_interval': CONF.health_manager.heartbeat_interval, + 'heartbeat_key': CONF.health_manager.heartbeat_key, + 'amphora_udp_driver': CONF.amphora_agent.amphora_udp_driver, + 'agent_tls_protocol': CONF.amphora_agent.agent_tls_protocol, + 'topology': topology, + 'administrative_log_facility': + CONF.amphora_agent.administrative_log_facility, + 'user_log_facility': CONF.amphora_agent.user_log_facility}) diff --git a/octavia/amphorae/backends/agent/api_server/__init__.py b/octavia/amphorae/backends/agent/api_server/__init__.py new file mode 100644 index 0000000000..5de10d17a2 --- /dev/null +++ b/octavia/amphorae/backends/agent/api_server/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +VERSION = '1.0' diff --git a/octavia/amphorae/backends/agent/api_server/amphora_info.py b/octavia/amphorae/backends/agent/api_server/amphora_info.py new file mode 100644 index 0000000000..54951ec86d --- /dev/null +++ b/octavia/amphorae/backends/agent/api_server/amphora_info.py @@ -0,0 +1,206 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import re +import socket +import subprocess + +from oslo_log import log as logging +import pyroute2 +import webob + +from octavia.amphorae.backends.agent import api_server +from octavia.amphorae.backends.agent.api_server import util +from octavia.amphorae.backends.utils import network_utils +from octavia.common import constants as consts +from octavia.common import exceptions + +LOG = logging.getLogger(__name__) + + +class AmphoraInfo: + def __init__(self, osutils): + self._osutils = osutils + + def compile_amphora_info(self, extend_lvs_driver=None): + extend_body = {} + if extend_lvs_driver: + extend_body = self._get_extend_body_from_lvs_driver( + extend_lvs_driver) + body = {'hostname': socket.gethostname(), + 'haproxy_version': + self._get_version_of_installed_package('haproxy'), + 'api_version': api_server.VERSION} + if extend_body: + body.update(extend_body) + return webob.Response(json=body) + + def compile_amphora_details(self, extend_lvs_driver=None): + haproxy_loadbalancer_list = sorted(util.get_loadbalancers()) + haproxy_listener_list = sorted(util.get_listeners()) + extend_body = {} + lvs_listener_list = [] + if extend_lvs_driver: + lvs_listener_list = util.get_lvs_listeners() + extend_data = self._get_extend_body_from_lvs_driver( + extend_lvs_driver) + lvs_count = self._count_lvs_listener_processes( + extend_lvs_driver, + lvs_listener_list) + extend_body['lvs_listener_process_count'] = lvs_count + extend_body.update(extend_data) + meminfo = self._get_meminfo() + cpu = self._cpu() + st = os.statvfs('/') + listeners = ( + sorted(set(haproxy_listener_list + lvs_listener_list)) + if lvs_listener_list else haproxy_listener_list) + body = {'hostname': socket.gethostname(), + 'haproxy_version': + self._get_version_of_installed_package('haproxy'), + 'api_version': api_server.VERSION, + 'networks': self._get_networks(), + 'active': True, + 'haproxy_count': + self._count_haproxy_processes(haproxy_loadbalancer_list), + 'cpu_count': os.cpu_count(), + 'cpu': { + 'total': cpu['total'], + 'user': cpu['user'], + 'system': cpu['system'], + 'soft_irq': cpu['softirq'], }, + 'memory': { + 'total': meminfo['MemTotal'], + 'free': meminfo['MemFree'], + 'buffers': meminfo['Buffers'], + 'cached': meminfo['Cached'], + 'swap_used': meminfo['SwapCached'], + 'shared': meminfo['Shmem'], + 'slab': meminfo['Slab'], }, + 'disk': { + 'used': (st.f_blocks - st.f_bfree) * st.f_frsize, + 'available': st.f_bavail * st.f_frsize}, + 'load': self._load(), + 'active_tuned_profiles': self._get_active_tuned_profiles(), + 'topology': consts.TOPOLOGY_SINGLE, + 'topology_status': consts.TOPOLOGY_STATUS_OK, + 'listeners': listeners, + 'packages': {}} + if extend_body: + body.update(extend_body) + return webob.Response(json=body) + + def _get_version_of_installed_package(self, name): + + cmd = self._osutils.cmd_get_version_of_installed_package(name) + version = subprocess.check_output(cmd.split(), encoding='utf-8') + return version + + def _count_haproxy_processes(self, lb_list): + num = 0 + for lb_id in lb_list: + if util.is_lb_running(lb_id): + # optional check if it's still running + num += 1 + return num + + def _count_lvs_listener_processes(self, lvs_driver, listener_list): + num = 0 + for listener_id in listener_list: + if util.is_lvs_listener_running(listener_id): + # optional check if it's still running + num += 1 + return num + + def _get_extend_body_from_lvs_driver(self, extend_lvs_driver): + extend_info = extend_lvs_driver.get_subscribed_amp_compile_info() + extend_data = {} + for extend in extend_info: + package_version = self._get_version_of_installed_package(extend) + extend_data[f'{extend}_version'] = package_version + return extend_data + + def _get_meminfo(self): + re_parser = re.compile(r'^(?P\S*):\s*(?P\d*)\s*kB') + result = {} + with open('/proc/meminfo', encoding='utf-8') as meminfo: + for line in meminfo: + match = re_parser.match(line) + if not match: + continue # skip lines that don't parse + key, value = match.groups(['key', 'value']) + result[key] = int(value) + return result + + def _cpu(self): + with open('/proc/stat', encoding='utf-8') as f: + cpu = f.readline() + vals = cpu.split(' ') + return { + 'user': vals[2], + 'nice': vals[3], + 'system': vals[4], + 'idle': vals[5], + 'iowait': vals[6], + 'irq': vals[7], + 'softirq': vals[8], + 'total': sum(int(i) for i in vals[2:]) + } + + def _load(self): + with open('/proc/loadavg', encoding='utf-8') as f: + load = f.readline() + vals = load.split(' ') + return vals[:3] + + def _get_networks(self): + networks = {} + with pyroute2.NetNS(consts.AMPHORA_NAMESPACE) as netns: + for interface in netns.get_links(): + interface_name = None + for item in interface['attrs']: + if (item[0] == consts.IFLA_IFNAME and + not item[1].startswith('eth')): + break + if item[0] == consts.IFLA_IFNAME: + interface_name = item[1] + if item[0] == 'IFLA_STATS64': + networks[interface_name] = { + 'network_tx': item[1]['tx_bytes'], + 'network_rx': item[1]['rx_bytes']} + return networks + + def get_interface(self, ip_addr): + try: + interface = network_utils.get_interface_name( + ip_addr, net_ns=consts.AMPHORA_NAMESPACE) + except exceptions.InvalidIPAddress: + return webob.Response(json={'message': "Invalid IP address"}, + status=400) + except exceptions.NotFound: + return webob.Response( + json={'message': "Error interface not found for IP address"}, + status=404) + return webob.Response(json={'message': 'OK', 'interface': interface}, + status=200) + + def _get_active_tuned_profiles(self) -> str: + """Returns the active TuneD profile(s)""" + try: + with open("/etc/tuned/active_profile", encoding="utf-8") as f: + return f.read(1024).strip() + except OSError as ex: + LOG.debug("Reading active TuneD profiles failed: %r", ex) + return "" diff --git a/octavia/amphorae/backends/agent/api_server/certificate_update.py b/octavia/amphorae/backends/agent/api_server/certificate_update.py new file mode 100644 index 0000000000..79510a13d2 --- /dev/null +++ b/octavia/amphorae/backends/agent/api_server/certificate_update.py @@ -0,0 +1,39 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import stat + +import flask +from oslo_config import cfg +import webob + +BUFFER = 1024 + +CONF = cfg.CONF + + +def upload_server_cert(): + stream = flask.request.stream + file_path = CONF.amphora_agent.agent_server_cert + flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC + # mode 00600 + mode = stat.S_IRUSR | stat.S_IWUSR + with os.fdopen(os.open(file_path, flags, mode), 'wb') as crt_file: + b = stream.read(BUFFER) + while b: + crt_file.write(b) + b = stream.read(BUFFER) + + return webob.Response(json={'message': 'OK'}, status=202) diff --git a/octavia/amphorae/backends/agent/api_server/haproxy_compatibility.py b/octavia/amphorae/backends/agent/api_server/haproxy_compatibility.py new file mode 100644 index 0000000000..4a918864be --- /dev/null +++ b/octavia/amphorae/backends/agent/api_server/haproxy_compatibility.py @@ -0,0 +1,55 @@ +# Copyright 2017 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import re +import subprocess + +from oslo_log import log as logging + +LOG = logging.getLogger(__name__) + + +def get_haproxy_versions(): + """Get major and minor version number from haproxy + + :returns major_version: The major version digit + :returns minor_version: The minor version digit + """ + cmd = "haproxy -v" + + version = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) + + version_re = re.search(r'.*version (.+?)\.(.+?)(\.|-dev).*', + version.decode('utf-8')) + + major_version = int(version_re.group(1)) + minor_version = int(version_re.group(2)) + + return major_version, minor_version + + +def process_cfg_for_version_compat(haproxy_cfg): + + major, minor = get_haproxy_versions() + + # Versions less than 1.6 do not support external health checks + # Removed those configuration times + if major < 2 and minor < 6: + LOG.warning("Found %(major)s.%(minor)s version of haproxy. " + "Disabling external checks. Health monitor of type " + "PING will revert to TCP.", + {'major': major, 'minor': minor}) + haproxy_cfg = re.sub(r" * ?.*external-check ?.*\s", "", haproxy_cfg) + + return haproxy_cfg diff --git a/octavia/amphorae/backends/agent/api_server/keepalived.py b/octavia/amphorae/backends/agent/api_server/keepalived.py new file mode 100644 index 0000000000..781219d56a --- /dev/null +++ b/octavia/amphorae/backends/agent/api_server/keepalived.py @@ -0,0 +1,149 @@ +# Copyright 2015 Hewlett Packard Enterprise Development Company LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import stat +import subprocess + +import flask +import jinja2 +from oslo_config import cfg +from oslo_log import log as logging +import webob + +from octavia.amphorae.backends.agent.api_server import loadbalancer +from octavia.amphorae.backends.agent.api_server import util +from octavia.common import constants as consts + + +BUFFER = 100 +CONF = cfg.CONF + +LOG = logging.getLogger(__name__) + +j2_env = jinja2.Environment(autoescape=True, loader=jinja2.FileSystemLoader( + os.path.dirname(os.path.realpath(__file__)) + consts.AGENT_API_TEMPLATES)) +SYSTEMD_TEMPLATE = j2_env.get_template(consts.KEEPALIVED_JINJA2_SYSTEMD) +check_script_template = j2_env.get_template(consts.CHECK_SCRIPT_CONF) + + +class Keepalived: + + def upload_keepalived_config(self): + stream = loadbalancer.Wrapped(flask.request.stream) + + if not os.path.exists(util.keepalived_dir()): + os.makedirs(util.keepalived_dir()) + if not os.path.exists(util.keepalived_check_scripts_dir()): + os.makedirs(util.keepalived_check_scripts_dir()) + + conf_file = util.keepalived_cfg_path() + flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC + # mode 00644 + mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + with os.fdopen(os.open(conf_file, flags, mode), 'wb') as f: + b = stream.read(BUFFER) + while b: + f.write(b) + b = stream.read(BUFFER) + + file_path = util.keepalived_init_path() + + template = SYSTEMD_TEMPLATE + + # Render and install the network namespace systemd service + util.install_netns_systemd_service() + util.run_systemctl_command( + consts.ENABLE, consts.AMP_NETNS_SVC_PREFIX, False) + + # mode 00644 + mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + if not os.path.exists(file_path): + with os.fdopen(os.open(file_path, flags, mode), 'w') as text_file: + text = template.render( + keepalived_pid=util.keepalived_pid_path(), + keepalived_cmd=consts.KEEPALIVED_CMD, + keepalived_cfg=util.keepalived_cfg_path(), + keepalived_log=util.keepalived_log_path(), + amphora_nsname=consts.AMPHORA_NAMESPACE, + amphora_netns=consts.AMP_NETNS_SVC_PREFIX, + administrative_log_facility=( + CONF.amphora_agent.administrative_log_facility), + ) + text_file.write(text) + + # Renders the Keepalived check script + keepalived_path = util.keepalived_check_script_path() + # mode 00755 + mode = (stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | + stat.S_IROTH | stat.S_IXOTH) + open_obj = os.open(keepalived_path, flags, mode) + with os.fdopen(open_obj, 'w') as text_file: + text = check_script_template.render( + check_scripts_dir=util.keepalived_check_scripts_dir() + ) + text_file.write(text) + + # Configure the monitoring of haproxy + util.vrrp_check_script_update(None, consts.AMP_ACTION_START) + + # Make sure the new service is enabled on boot + try: + util.run_systemctl_command(consts.ENABLE, + consts.KEEPALIVED_SYSTEMD) + except subprocess.CalledProcessError as e: + return webob.Response(json={ + 'message': "Error enabling octavia-keepalived service", + 'details': e.output}, status=500) + + res = webob.Response(json={'message': 'OK'}, status=200) + res.headers['ETag'] = stream.get_md5() + + return res + + def manager_keepalived_service(self, action): + action = action.lower() + if action not in [consts.AMP_ACTION_START, + consts.AMP_ACTION_STOP, + consts.AMP_ACTION_RELOAD]: + return webob.Response(json={ + 'message': 'Invalid Request', + 'details': f"Unknown action: {action}"}, status=400) + + if action == consts.AMP_ACTION_START: + keepalived_pid_path = util.keepalived_pid_path() + try: + # Is there a pid file for keepalived? + with open(keepalived_pid_path, encoding='utf-8') as pid_file: + pid = int(pid_file.readline()) + os.kill(pid, 0) + + # If we got here, it means the keepalived process is running. + # We should reload it instead of trying to start it again. + action = consts.AMP_ACTION_RELOAD + except OSError: + pass + + try: + util.run_systemctl_command(action, + consts.KEEPALIVED_SYSTEMD) + except subprocess.CalledProcessError as e: + return webob.Response(json={ + 'message': f"Failed to {action} octavia-keepalived service", + 'details': e.output}, status=500) + + return webob.Response( + json={'message': 'OK', + 'details': f'keepalived {action}ed'}, + status=202) diff --git a/octavia/amphorae/backends/agent/api_server/keepalivedlvs.py b/octavia/amphorae/backends/agent/api_server/keepalivedlvs.py new file mode 100644 index 0000000000..35c9d0d2ae --- /dev/null +++ b/octavia/amphorae/backends/agent/api_server/keepalivedlvs.py @@ -0,0 +1,304 @@ +# Copyright 2011-2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import re +import shutil +import stat +import subprocess + +import flask +import jinja2 +from oslo_config import cfg +from oslo_log import log as logging +import webob +from werkzeug import exceptions + +from octavia.amphorae.backends.agent.api_server import loadbalancer +from octavia.amphorae.backends.agent.api_server import lvs_listener_base +from octavia.amphorae.backends.agent.api_server import util +from octavia.common import constants as consts + +BUFFER = 100 +CHECK_SCRIPT_NAME = 'udp_check.sh' +CONF = cfg.CONF +KEEPALIVED_CHECK_SCRIPT_NAME = 'lvs_udp_check.sh' +LOG = logging.getLogger(__name__) + +j2_env = jinja2.Environment(autoescape=True, loader=jinja2.FileSystemLoader( + os.path.dirname(os.path.realpath(__file__)) + consts.AGENT_API_TEMPLATES)) +SYSTEMD_TEMPLATE = j2_env.get_template(consts.KEEPALIVED_JINJA2_SYSTEMD) +check_script_file_template = j2_env.get_template( + consts.KEEPALIVED_CHECK_SCRIPT) + + +class KeepalivedLvs(lvs_listener_base.LvsListenerApiServerBase): + + _SUBSCRIBED_AMP_COMPILE = ['keepalived', 'ipvsadm'] + + def upload_lvs_listener_config(self, listener_id): + stream = loadbalancer.Wrapped(flask.request.stream) + NEED_CHECK = True + + if not os.path.exists(util.keepalived_lvs_dir()): + os.makedirs(util.keepalived_lvs_dir()) + if not os.path.exists(util.keepalived_backend_check_script_dir()): + current_file_dir, _ = os.path.split(os.path.abspath(__file__)) + + try: + script_dir = os.path.join(os.path.abspath( + os.path.join(current_file_dir, '../..')), 'utils') + assert True is os.path.exists(script_dir) + assert True is os.path.exists(os.path.join( + script_dir, CHECK_SCRIPT_NAME)) + except Exception as e: + raise exceptions.Conflict( + description='%(file_name)s not Found for ' + 'UDP Listener %(listener_id)s' % + {'file_name': CHECK_SCRIPT_NAME, + 'listener_id': listener_id}) from e + os.makedirs(util.keepalived_backend_check_script_dir()) + shutil.copy2(os.path.join(script_dir, CHECK_SCRIPT_NAME), + util.keepalived_backend_check_script_path()) + os.chmod(util.keepalived_backend_check_script_path(), stat.S_IEXEC) + # Based on current topology setting, only the amphora instances in + # Active-Standby topology will create the directory below. So for + # Single topology, it should not create the directory and the check + # scripts for status change. + if (CONF.controller_worker.loadbalancer_topology != + consts.TOPOLOGY_ACTIVE_STANDBY): + NEED_CHECK = False + + conf_file = util.keepalived_lvs_cfg_path(listener_id) + flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC + # mode 00644 + mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + with os.fdopen(os.open(conf_file, flags, mode), 'wb') as f: + b = stream.read(BUFFER) + while b: + f.write(b) + b = stream.read(BUFFER) + + file_path = util.keepalived_lvs_init_path(listener_id) + + template = SYSTEMD_TEMPLATE + + # Render and install the network namespace systemd service + util.install_netns_systemd_service() + util.run_systemctl_command( + consts.ENABLE, consts.AMP_NETNS_SVC_PREFIX, False) + + # Render and install the keepalivedlvs init script + # mode 00644 + mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + keepalived_pid, vrrp_pid, check_pid = util.keepalived_lvs_pids_path( + listener_id) + if not os.path.exists(file_path): + with os.fdopen(os.open(file_path, flags, mode), 'w') as text_file: + text = template.render( + keepalived_pid=keepalived_pid, + vrrp_pid=vrrp_pid, + check_pid=check_pid, + keepalived_cmd=consts.KEEPALIVED_CMD, + keepalived_cfg=util.keepalived_lvs_cfg_path(listener_id), + amphora_nsname=consts.AMPHORA_NAMESPACE, + amphora_netns=consts.AMP_NETNS_SVC_PREFIX, + administrative_log_facility=( + CONF.amphora_agent.administrative_log_facility), + ) + text_file.write(text) + + # Make sure the keepalivedlvs service is enabled on boot + try: + util.run_systemctl_command( + consts.ENABLE, + consts.KEEPALIVEDLVS_SYSTEMD % listener_id) + except subprocess.CalledProcessError as e: + return webob.Response(json={ + 'message': ("Error enabling " + "octavia-keepalivedlvs service"), + 'details': e.output}, status=500) + + if NEED_CHECK: + # inject the check script for keepalived process + script_path = os.path.join(util.keepalived_check_scripts_dir(), + KEEPALIVED_CHECK_SCRIPT_NAME) + if not os.path.exists(script_path): + if not os.path.exists(util.keepalived_check_scripts_dir()): + os.makedirs(util.keepalived_check_scripts_dir()) + + with os.fdopen(os.open(script_path, flags, stat.S_IEXEC), + 'w') as script_file: + text = check_script_file_template.render( + consts=consts, + keepalived_lvs_pid_dir=util.keepalived_lvs_dir() + ) + script_file.write(text) + util.vrrp_check_script_update(None, consts.AMP_ACTION_START) + + res = webob.Response(json={'message': 'OK'}, status=200) + res.headers['ETag'] = stream.get_md5() + return res + + def _check_lvs_listener_exists(self, listener_id): + if not os.path.exists(util.keepalived_lvs_cfg_path(listener_id)): + raise exceptions.HTTPException( + response=webob.Response(json={ + 'message': 'UDP Listener Not Found', + 'details': f"No UDP listener with UUID: {listener_id}"}, + status=404)) + + def get_lvs_listener_config(self, listener_id): + """Gets the keepalivedlvs config + + :param listener_id: the id of the listener + """ + self._check_lvs_listener_exists(listener_id) + with open(util.keepalived_lvs_cfg_path(listener_id), + encoding='utf-8') as file: + cfg = file.read() + resp = webob.Response(cfg, content_type='text/plain') + return resp + + def manage_lvs_listener(self, listener_id, action): + action = action.lower() + if action not in [consts.AMP_ACTION_START, + consts.AMP_ACTION_STOP, + consts.AMP_ACTION_RELOAD]: + return webob.Response(json={ + 'message': 'Invalid Request', + 'details': f"Unknown action: {action}"}, status=400) + + # When octavia requests a reload of keepalived, force a restart since + # a keepalived reload doesn't restore members in their initial state. + # + # TODO(gthiemonge) remove this when keepalived>=2.0.14 is widely use + if action == consts.AMP_ACTION_RELOAD: + action = consts.AMP_ACTION_RESTART + + self._check_lvs_listener_exists(listener_id) + if action == consts.AMP_ACTION_RELOAD: + if consts.OFFLINE == self._check_lvs_listener_status(listener_id): + action = consts.AMP_ACTION_START + + try: + util.run_systemctl_command( + action, consts.KEEPALIVEDLVS_SYSTEMD % listener_id) + except subprocess.CalledProcessError as e: + return webob.Response(json={ + 'message': (f"Failed to {action} keepalivedlvs listener " + f"{listener_id}"), + 'details': e.output}, status=500) + + is_vrrp = (CONF.controller_worker.loadbalancer_topology == + consts.TOPOLOGY_ACTIVE_STANDBY) + # TODO(gthiemonge) remove RESTART from the list (same as previous todo + # in this function) + if not is_vrrp and action in [consts.AMP_ACTION_START, + consts.AMP_ACTION_RESTART, + consts.AMP_ACTION_RELOAD]: + util.send_vip_advertisements(listener_id=listener_id) + + return webob.Response( + json={'message': 'OK', + 'details': (f'keepalivedlvs listener {listener_id} ' + f'{action}ed')}, + status=202) + + def _check_lvs_listener_status(self, listener_id): + if os.path.exists(util.keepalived_lvs_pids_path(listener_id)[0]): + if os.path.exists(os.path.join( + '/proc', util.get_keepalivedlvs_pid(listener_id))): + # Check if the listener is disabled + with open(util.keepalived_lvs_cfg_path(listener_id), + encoding='utf-8') as file: + cfg = file.read() + m = re.search('virtual_server', cfg) + if m: + return consts.ACTIVE + return consts.OFFLINE + return consts.ERROR + return consts.OFFLINE + + def get_all_lvs_listeners_status(self): + """Gets the status of all UDP listeners + + Gets the status of all UDP listeners on the amphora. + """ + listeners = [] + + for lvs_listener in util.get_lvs_listeners(): + status = self._check_lvs_listener_status(lvs_listener) + listeners.append({ + 'status': status, + 'uuid': lvs_listener, + 'type': 'UDP', + }) + return listeners + + def delete_lvs_listener(self, listener_id): + try: + self._check_lvs_listener_exists(listener_id) + except exceptions.HTTPException: + return webob.Response(json={'message': 'OK'}) + + # check if that keepalived is still running and if stop it + keepalived_pid, vrrp_pid, check_pid = util.keepalived_lvs_pids_path( + listener_id) + if os.path.exists(keepalived_pid) and os.path.exists( + os.path.join('/proc', + util.get_keepalivedlvs_pid(listener_id))): + try: + util.run_systemctl_command( + consts.STOP, + consts.KEEPALIVEDLVS_SYSTEMD % listener_id) + except subprocess.CalledProcessError as e: + LOG.error("Failed to stop keepalivedlvs service: %s", e) + return webob.Response(json={ + 'message': "Error stopping keepalivedlvs", + 'details': e.output}, status=500) + + # Since the lvs check script based on the keepalived pid file for + # checking whether it is alived. So here, we had stop the keepalived + # process by the previous step, must make sure the pid files are not + # exist. + if (os.path.exists(keepalived_pid) or + os.path.exists(vrrp_pid) or os.path.exists(check_pid)): + for pid in [keepalived_pid, vrrp_pid, check_pid]: + os.remove(pid) + + # disable the service + init_path = util.keepalived_lvs_init_path(listener_id) + + try: + util.run_systemctl_command( + consts.DISABLE, + consts.KEEPALIVEDLVS_SYSTEMD % listener_id) + except subprocess.CalledProcessError as e: + LOG.error("Failed to disable " + "octavia-keepalivedlvs-%(list)s service: " + "%(err)s", {'list': listener_id, 'err': str(e)}) + return webob.Response(json={ + 'message': ( + f"Error disabling octavia-keepalivedlvs-{listener_id} " + "service"), + 'details': e.output}, status=500) + + # delete init script ,config file and log file for that listener + if os.path.exists(init_path): + os.remove(init_path) + if os.path.exists(util.keepalived_lvs_cfg_path(listener_id)): + os.remove(util.keepalived_lvs_cfg_path(listener_id)) + + return webob.Response(json={'message': 'OK'}) diff --git a/octavia/amphorae/backends/agent/api_server/loadbalancer.py b/octavia/amphorae/backends/agent/api_server/loadbalancer.py new file mode 100644 index 0000000000..961dc324b5 --- /dev/null +++ b/octavia/amphorae/backends/agent/api_server/loadbalancer.py @@ -0,0 +1,464 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import hashlib +import io +import os +import re +import shutil +import stat +import subprocess +import time + +import flask +import jinja2 +from oslo_config import cfg +from oslo_log import log as logging +import webob +from werkzeug import exceptions + +from octavia.amphorae.backends.agent.api_server import haproxy_compatibility +from octavia.amphorae.backends.agent.api_server import util +from octavia.amphorae.backends.utils import haproxy_query +from octavia.common import constants as consts +from octavia.common import utils as octavia_utils + +LOG = logging.getLogger(__name__) +BUFFER = 100 +HAPROXY_RELOAD_RETRIES = 3 +HAPROXY_QUERY_RETRIES = 5 + +CONF = cfg.CONF + +SYSTEMD_CONF = 'systemd.conf.j2' + +JINJA_ENV = jinja2.Environment( + autoescape=True, + loader=jinja2.FileSystemLoader(os.path.dirname( + os.path.realpath(__file__) + ) + consts.AGENT_API_TEMPLATES)) +SYSTEMD_TEMPLATE = JINJA_ENV.get_template(SYSTEMD_CONF) + + +# Wrap a stream so we can compute the md5 while reading +class Wrapped: + def __init__(self, stream_): + self.stream = stream_ + self.hash = hashlib.md5(usedforsecurity=False) # nosec + + def read(self, line): + block = self.stream.read(line) + if block: + self.hash.update(block) + return block + + def get_md5(self): + return self.hash.hexdigest() + + def __getattr__(self, attr): + return getattr(self.stream, attr) + + +class Loadbalancer: + + def get_haproxy_config(self, lb_id): + """Gets the haproxy config + + :param listener_id: the id of the listener + """ + self._check_lb_exists(lb_id) + with open(util.config_path(lb_id), encoding='utf-8') as file: + cfg = file.read() + resp = webob.Response(cfg, content_type='text/plain') + resp.headers['ETag'] = ( + hashlib.md5(octavia_utils.b(cfg), + usedforsecurity=False).hexdigest()) # nosec + return resp + + def upload_haproxy_config(self, amphora_id, lb_id): + """Upload the haproxy config + + :param amphora_id: The id of the amphora to update + :param lb_id: The id of the loadbalancer + """ + stream = Wrapped(flask.request.stream) + # We have to hash here because HAProxy has a string length limitation + # in the configuration file "peer " lines + peer_name = octavia_utils.base64_sha1_string(amphora_id).rstrip('=') + if not os.path.exists(util.haproxy_dir(lb_id)): + os.makedirs(util.haproxy_dir(lb_id)) + + name = os.path.join(util.haproxy_dir(lb_id), 'haproxy.cfg.new') + flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC + # mode 00600 + mode = stat.S_IRUSR | stat.S_IWUSR + b = stream.read(BUFFER) + s_io = io.StringIO() + while b: + # Write haproxy configuration to StringIO + s_io.write(b.decode('utf8')) + b = stream.read(BUFFER) + + # Since haproxy user_group is now auto-detected by the amphora agent, + # remove it from haproxy configuration in case it was provided + # by an older Octavia controller. This is needed in order to prevent + # a duplicate entry for 'group' in haproxy configuration, which will + # result an error when haproxy starts. + new_config = re.sub(r"\s+group\s.+", "", s_io.getvalue()) + + # Handle any haproxy version compatibility issues + new_config = haproxy_compatibility.process_cfg_for_version_compat( + new_config) + + with os.fdopen(os.open(name, flags, mode), 'w') as file: + file.write(new_config) + + # use haproxy to check the config + cmd = (f"haproxy -c -L {peer_name} -f {name} -f " + f"{consts.HAPROXY_USER_GROUP_CFG}") + + try: + subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT, + encoding='utf-8') + except subprocess.CalledProcessError as e: + LOG.error("Failed to verify haproxy file: %s %s", e, e.output) + # Save the last config that failed validation for debugging + os.rename(name, ''.join([name, '-failed'])) + return webob.Response( + json={'message': "Invalid request", 'details': e.output}, + status=400) + + # file ok - move it + os.rename(name, util.config_path(lb_id)) + + init_path = util.init_path(lb_id) + + template = SYSTEMD_TEMPLATE + # Render and install the network namespace systemd service + util.install_netns_systemd_service() + util.run_systemctl_command( + consts.ENABLE, consts.AMP_NETNS_SVC_PREFIX + '.service', False) + + # mode 00644 + mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + + hap_major, hap_minor = haproxy_compatibility.get_haproxy_versions() + if not os.path.exists(init_path): + with os.fdopen(os.open(init_path, flags, mode), 'w') as text_file: + + text = template.render( + peer_name=peer_name, + haproxy_pid=util.pid_path(lb_id), + haproxy_cmd=util.CONF.haproxy_amphora.haproxy_cmd, + haproxy_cfg=util.config_path(lb_id), + haproxy_state_file=util.state_file_path(lb_id), + haproxy_socket=util.haproxy_sock_path(lb_id), + haproxy_user_group_cfg=consts.HAPROXY_USER_GROUP_CFG, + amphora_netns=consts.AMP_NETNS_SVC_PREFIX, + amphora_nsname=consts.AMPHORA_NAMESPACE, + haproxy_major_version=hap_major, + haproxy_minor_version=hap_minor + ) + text_file.write(text) + + # Make sure the new service is enabled on boot + try: + util.run_systemctl_command( + consts.ENABLE, consts.LOADBALANCER_SYSTEMD % lb_id) + except subprocess.CalledProcessError as e: + return webob.Response(json={ + 'message': "Error enabling octavia-keepalived service", + 'details': e.output}, status=500) + + res = webob.Response(json={'message': 'OK'}, status=202) + res.headers['ETag'] = stream.get_md5() + + return res + + def _check_haproxy_uptime(self, lb_id): + stat_sock_file = util.haproxy_sock_path(lb_id) + lb_query = haproxy_query.HAProxyQuery(stat_sock_file) + retries = HAPROXY_QUERY_RETRIES + for idx in range(retries): + try: + info = lb_query.show_info() + uptime_sec = info['Uptime_sec'] + except Exception as e: + LOG.warning('Failed to get haproxy info: %s, retrying.', e) + time.sleep(1) + continue + uptime = int(uptime_sec) + return uptime + LOG.error('Failed to get haproxy uptime after %d tries.', retries) + return None + + def start_stop_lb(self, lb_id, action): + action = action.lower() + if action not in [consts.AMP_ACTION_START, + consts.AMP_ACTION_STOP, + consts.AMP_ACTION_RELOAD]: + return webob.Response(json={ + 'message': 'Invalid Request', + 'details': f"Unknown action: {action}"}, status=400) + + self._check_lb_exists(lb_id) + is_vrrp = (CONF.controller_worker.loadbalancer_topology == + consts.TOPOLOGY_ACTIVE_STANDBY) + + if is_vrrp: + util.vrrp_check_script_update(lb_id, action) + + # HAProxy does not start the process when given a reload + # so start it if haproxy is not already running + if action == consts.AMP_ACTION_RELOAD: + if consts.OFFLINE == self._check_haproxy_status(lb_id): + action = consts.AMP_ACTION_START + else: + # We first have to save the state when we reload + haproxy_state_file = util.state_file_path(lb_id) + stat_sock_file = util.haproxy_sock_path(lb_id) + + lb_query = haproxy_query.HAProxyQuery(stat_sock_file) + if not lb_query.save_state(haproxy_state_file): + # We accept to reload haproxy even if the state_file is + # not generated, but we probably want to know about that + # failure! + LOG.warning('Failed to save haproxy-%s state!', lb_id) + + retries = (HAPROXY_RELOAD_RETRIES + if action == consts.AMP_ACTION_RELOAD + else 1) + saved_exc = None + for idx in range(retries): + try: + util.run_systemctl_command( + action, consts.LOADBALANCER_SYSTEMD % lb_id) + except subprocess.CalledProcessError as e: + # Mitigation for + # https://bugs.launchpad.net/octavia/+bug/2054666 + if ('is not active, cannot reload.' in e.output and + action == consts.AMP_ACTION_RELOAD): + + saved_exc = e + + # Wait a few seconds and check that haproxy was restarted + uptime = self._check_haproxy_uptime(lb_id) + # If haproxy is not reachable or was restarted more than 15 + # sec ago, let's retry (or maybe restart?) + if not uptime or uptime > 15: + continue + # haproxy probably crashed and was restarted, log it and + # continue + LOG.warning("An error occured with haproxy while it " + "was reloaded, check the haproxy logs for " + "more details.") + break + if 'Job is already running' not in e.output: + return webob.Response(json={ + 'message': f"Error {action}ing haproxy", + 'details': e.output + }, status=500) + break + else: + # no break, we reach the retry limit for reloads + return webob.Response(json={ + 'message': f"Error {action}ing haproxy", + 'details': saved_exc.output}, status=500) + + # If we are not in active/standby we need to send an IP + # advertisement (GARP or NA). Keepalived handles this for + # active/standby load balancers. + if not is_vrrp and action in [consts.AMP_ACTION_START, + consts.AMP_ACTION_RELOAD]: + util.send_vip_advertisements(lb_id) + + if action in [consts.AMP_ACTION_STOP, + consts.AMP_ACTION_RELOAD]: + return webob.Response(json={ + 'message': 'OK', + 'details': f'Listener {lb_id} {action}ed'}, status=202) + + details = ( + f'Configuration file is valid\nhaproxy daemon for {lb_id} started' + ) + + return webob.Response(json={'message': 'OK', 'details': details}, + status=202) + + def delete_lb(self, lb_id): + try: + self._check_lb_exists(lb_id) + except exceptions.HTTPException: + return webob.Response(json={'message': 'OK'}) + + # check if that haproxy is still running and if stop it + if os.path.exists(util.pid_path(lb_id)) and os.path.exists( + os.path.join('/proc', util.get_haproxy_pid(lb_id))): + try: + util.run_systemctl_command( + consts.STOP, consts.LOADBALANCER_SYSTEMD % lb_id) + except subprocess.CalledProcessError as e: + LOG.error("Failed to stop haproxy-%s service: %s %s", + lb_id, e, e.output) + return webob.Response(json={ + 'message': "Error stopping haproxy", + 'details': e.output}, status=500) + + # parse config and delete stats socket + try: + stats_socket = util.parse_haproxy_file(lb_id)[0] + os.remove(stats_socket) + except Exception: + pass + + # Since this script should be deleted at LB delete time + # we can check for this path to see if VRRP is enabled + # on this amphora and not write the file if VRRP is not in use + if os.path.exists(util.keepalived_check_script_path()): + util.vrrp_check_script_update( + lb_id, action=consts.AMP_ACTION_STOP) + + # delete the ssl files + try: + shutil.rmtree(self._cert_dir(lb_id)) + except Exception: + pass + + # disable the service + init_path = util.init_path(lb_id) + + util.run_systemctl_command( + consts.DISABLE, consts.LOADBALANCER_SYSTEMD % lb_id, False) + + # delete the directory + init script for that listener + shutil.rmtree(util.haproxy_dir(lb_id)) + if os.path.exists(init_path): + os.remove(init_path) + + return webob.Response(json={'message': 'OK'}) + + def get_all_listeners_status(self, other_listeners=None): + """Gets the status of all listeners + + This method will not consult the stats socket + so a listener might show as ACTIVE but still be + in ERROR + + Currently type==SSL is also not detected + """ + listeners = [] + + for lb in util.get_loadbalancers(): + stats_socket, listeners_on_lb = util.parse_haproxy_file(lb) + + for listener_id, listener in listeners_on_lb.items(): + listeners.append({ + 'status': consts.ACTIVE, + 'uuid': listener_id, + 'type': listener['mode'], + }) + + if other_listeners: + listeners = listeners + other_listeners + return webob.Response(json=listeners, content_type='application/json') + + def upload_certificate(self, lb_id, filename): + self._check_ssl_filename_format(filename) + + # create directory if not already there + if not os.path.exists(self._cert_dir(lb_id)): + os.makedirs(self._cert_dir(lb_id)) + + stream = Wrapped(flask.request.stream) + file = self._cert_file_path(lb_id, filename) + flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC + # mode 00600 + mode = stat.S_IRUSR | stat.S_IWUSR + with os.fdopen(os.open(file, flags, mode), 'wb') as crt_file: + b = stream.read(BUFFER) + while b: + crt_file.write(b) + b = stream.read(BUFFER) + + resp = webob.Response(json={'message': 'OK'}) + resp.headers['ETag'] = stream.get_md5() + return resp + + def get_certificate_md5(self, lb_id, filename): + self._check_ssl_filename_format(filename) + + cert_path = self._cert_file_path(lb_id, filename) + path_exists = os.path.exists(cert_path) + if not path_exists: + return webob.Response(json={ + 'message': 'Certificate Not Found', + 'details': f"No certificate with filename: {filename}"}, + status=404) + + with open(cert_path, encoding='utf-8') as crt_file: + cert = crt_file.read() + md5sum = hashlib.md5(octavia_utils.b(cert), + usedforsecurity=False).hexdigest() # nosec + resp = webob.Response(json={'md5sum': md5sum}) + resp.headers['ETag'] = md5sum + return resp + + def delete_certificate(self, lb_id, filename): + self._check_ssl_filename_format(filename) + if os.path.exists(self._cert_file_path(lb_id, filename)): + os.remove(self._cert_file_path(lb_id, filename)) + return webob.Response(json={'message': 'OK'}) + + def _get_listeners_on_lb(self, lb_id): + if os.path.exists(util.pid_path(lb_id)): + if os.path.exists( + os.path.join('/proc', util.get_haproxy_pid(lb_id))): + # Check if the listener is disabled + with open(util.config_path(lb_id), encoding='utf-8') as file: + cfg = file.read() + m = re.findall('^frontend (.*)$', cfg, re.MULTILINE) + return m or [] + else: # pid file but no process... + return [] + else: + return [] + + def _check_lb_exists(self, lb_id): + # check if we know about that lb + if lb_id not in util.get_loadbalancers(): + raise exceptions.HTTPException( + response=webob.Response(json={ + 'message': 'Loadbalancer Not Found', + 'details': f"No loadbalancer with UUID: {lb_id}"}, + status=404)) + + def _check_ssl_filename_format(self, filename): + # check if the format is (xxx.)*xxx.pem + if not re.search(r'(\w.)+pem', filename): + raise exceptions.HTTPException( + response=webob.Response(json={ + 'message': 'Filename has wrong format'}, status=400)) + + def _cert_dir(self, lb_id): + return os.path.join(util.CONF.haproxy_amphora.base_cert_dir, lb_id) + + def _cert_file_path(self, lb_id, filename): + return os.path.join(self._cert_dir(lb_id), filename) + + def _check_haproxy_status(self, lb_id): + if os.path.exists(util.pid_path(lb_id)): + if os.path.exists( + os.path.join('/proc', util.get_haproxy_pid(lb_id))): + return consts.ACTIVE + return consts.OFFLINE diff --git a/octavia/amphorae/backends/agent/api_server/lvs_listener_base.py b/octavia/amphorae/backends/agent/api_server/lvs_listener_base.py new file mode 100644 index 0000000000..ec068248a3 --- /dev/null +++ b/octavia/amphorae/backends/agent/api_server/lvs_listener_base.py @@ -0,0 +1,90 @@ +# Copyright 2018 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import abc + +from oslo_config import cfg + +CONF = cfg.CONF + + +class LvsListenerApiServerBase(metaclass=abc.ABCMeta): + """Base LVS Listener Server API + + """ + + _SUBSCRIBED_AMP_COMPILE = [] + + def get_subscribed_amp_compile_info(self): + return self._SUBSCRIBED_AMP_COMPILE + + @abc.abstractmethod + def upload_lvs_listener_config(self, listener_id): + """Upload the configuration for LVS. + + :param listener_id: The id of a LVS Listener + + :returns: HTTP response with status code. + :raises Exception: If any file / directory is not found or + fail to create. + + """ + + @abc.abstractmethod + def get_lvs_listener_config(self, listener_id): + """Gets the LVS Listener configuration details + + :param listener_id: the id of the LVS Listener + + :returns: HTTP response with status code. + :raises Exception: If the listener is failed to find. + + """ + + @abc.abstractmethod + def manage_lvs_listener(self, listener_id, action): + """Gets the LVS Listener configuration details + + :param listener_id: the id of the LVS Listener + :param action: the operation type. + + :returns: HTTP response with status code. + :raises Exception: If the listener is failed to find. + + """ + + @abc.abstractmethod + def get_all_lvs_listeners_status(self): + """Gets the status of all LVS Listeners + + This method will not consult the stats socket + so a listener might show as ACTIVE but still be + in ERROR + + :returns: a list of LVS Listener status + :raises Exception: If the listener pid located directory is not exist + + """ + + @abc.abstractmethod + def delete_lvs_listener(self, listener_id): + """Delete a LVS Listener from a amphora + + :param listener_id: The id of the listener + + :returns: HTTP response with status code. + :raises Exception: If unsupported initial system of amphora. + + """ diff --git a/octavia/amphorae/backends/agent/api_server/osutils.py b/octavia/amphorae/backends/agent/api_server/osutils.py new file mode 100644 index 0000000000..ad4577046a --- /dev/null +++ b/octavia/amphorae/backends/agent/api_server/osutils.py @@ -0,0 +1,138 @@ +# Copyright 2017 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import subprocess + +import distro +from oslo_config import cfg +from oslo_log import log as logging +import webob +from werkzeug import exceptions + +from octavia.amphorae.backends.utils import interface_file +from octavia.common import constants as consts +from octavia.common import exceptions as octavia_exceptions + +CONF = cfg.CONF + +LOG = logging.getLogger(__name__) + + +class BaseOS: + + def __init__(self, os_name): + self.os_name = os_name + self.package_name_map = {} + + @classmethod + def _get_subclasses(cls): + for subclass in cls.__subclasses__(): + yield from subclass._get_subclasses() + yield subclass + + @classmethod + def get_os_util(cls): + os_name = distro.id() + for subclass in cls._get_subclasses(): + if subclass.is_os_name(os_name): + return subclass(os_name) + raise octavia_exceptions.InvalidAmphoraOperatingSystem(os_name=os_name) + + def _map_package_name(self, package_name): + return self.package_name_map.get(package_name, package_name) + + def write_interface_file(self, interface, ip_address, prefixlen): + interface = interface_file.InterfaceFile( + name=interface, + if_type=consts.LO, + addresses=[{ + "address": ip_address, + "prefixlen": prefixlen + }] + ) + interface.write() + + def write_vip_interface_file(self, interface, vips, mtu, vrrp_info, + fixed_ips=None, is_sriov=False): + vip_interface = interface_file.VIPInterfaceFile( + name=interface, + mtu=mtu, + vips=vips, + vrrp_info=vrrp_info, + fixed_ips=fixed_ips, + topology=CONF.controller_worker.loadbalancer_topology, + is_sriov=is_sriov) + vip_interface.write() + + def write_port_interface_file(self, interface, fixed_ips, mtu, + is_sriov=False): + port_interface = interface_file.PortInterfaceFile( + name=interface, + mtu=mtu, + fixed_ips=fixed_ips, + is_sriov=is_sriov) + port_interface.write() + + @classmethod + def bring_interface_up(cls, interface, name): + cmd = ("ip netns exec {ns} amphora-interface up {params}".format( + ns=consts.AMPHORA_NAMESPACE, params=interface)) + LOG.debug("Executing: %s", cmd) + try: + out = subprocess.check_output(cmd.split(), + stderr=subprocess.STDOUT, + encoding='utf-8') + for line in out.split('\n'): + LOG.debug(line) + except subprocess.CalledProcessError as e: + LOG.error('Failed to set up %s due to error: %s %s', interface, + e, e.output) + raise exceptions.HTTPException( + response=webob.Response(json={ + 'message': f'Error plugging {name}', + 'details': e.output}, status=500)) + + +class Ubuntu(BaseOS): + + @classmethod + def is_os_name(cls, os_name): + return os_name in ['ubuntu', 'debian'] + + def cmd_get_version_of_installed_package(self, package_name): + name = self._map_package_name(package_name) + return f"dpkg-query -W -f=${{Version}} {name}" + + +class RH(BaseOS): + + @classmethod + def is_os_name(cls, os_name): + return os_name in ['fedora', 'rhel', 'rocky'] + + def cmd_get_version_of_installed_package(self, package_name): + name = self._map_package_name(package_name) + return f"rpm -q --queryformat %{{VERSION}} {name}" + + +class CentOS(RH): + + def __init__(self, os_name): + super().__init__(os_name) + if distro.version() == '7': + self.package_name_map.update({'haproxy': 'haproxy18'}) + + @classmethod + def is_os_name(cls, os_name): + return os_name in ['centos'] diff --git a/octavia/amphorae/backends/agent/api_server/plug.py b/octavia/amphorae/backends/agent/api_server/plug.py new file mode 100644 index 0000000000..7cad0482d1 --- /dev/null +++ b/octavia/amphorae/backends/agent/api_server/plug.py @@ -0,0 +1,305 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# Copyright 2016 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import ipaddress +import itertools +import os +import socket +import stat + +from oslo_config import cfg +from oslo_log import log as logging +import pyroute2 +import webob +from werkzeug import exceptions + +from octavia.amphorae.backends.agent.api_server import util +from octavia.common import constants as consts + + +CONF = cfg.CONF + +LOG = logging.getLogger(__name__) + + +class Plug: + def __init__(self, osutils): + self._osutils = osutils + + def plug_lo(self): + self._osutils.write_interface_file( + interface="lo", + ip_address="127.0.0.1", + prefixlen=8) + + def render_vips(self, vips): + rendered_vips = [] + for vip in vips: + ip_address = ipaddress.ip_address(vip['ip_address']) + subnet_cidr = ipaddress.ip_network(vip['subnet_cidr']) + prefixlen = subnet_cidr.prefixlen + host_routes = vip['host_routes'] + gateway = vip['gateway'] + rendered_vips.append({ + 'ip_address': ip_address.exploded, + 'ip_version': ip_address.version, + 'gateway': gateway, + 'host_routes': host_routes, + 'prefixlen': prefixlen + }) + return rendered_vips + + def build_vrrp_info(self, vrrp_ip, subnet_cidr, gateway, host_routes): + vrrp_info = {} + if vrrp_ip: + ip_address = ipaddress.ip_address(vrrp_ip) + subnet_cidr = ipaddress.ip_network(subnet_cidr) + prefixlen = subnet_cidr.prefixlen + vrrp_info.update({ + 'ip': ip_address.exploded, + 'ip_version': ip_address.version, + 'gateway': gateway, + 'host_routes': host_routes, + 'prefixlen': prefixlen + }) + return vrrp_info + + def plug_vip(self, vip, subnet_cidr, gateway, + mac_address, mtu=None, vrrp_ip=None, host_routes=(), + additional_vips=(), is_sriov=False): + vips = [{ + 'ip_address': vip, + 'subnet_cidr': subnet_cidr, + 'gateway': gateway, + 'host_routes': host_routes + }] + list(additional_vips) + + try: + rendered_vips = self.render_vips(vips) + except ValueError as e: + vip_error_message = f"Invalid VIP: {e}" + return webob.Response(json={'message': vip_error_message}, + status=400) + + try: + vrrp_info = self.build_vrrp_info(vrrp_ip, subnet_cidr, + gateway, host_routes) + except ValueError as e: + return webob.Response( + json={'message': f"Invalid VRRP Address: {e}"}, + status=400) + + # Check if the interface is already in the network namespace + # Do not attempt to re-plug the VIP if it is already in the + # network namespace + if self._netns_interface_exists(mac_address): + return webob.Response( + json={'message': "Interface already exists"}, status=409) + + # Check that the interface has been fully plugged + self._interface_by_mac(mac_address) + + # Always put the VIP interface as eth1 + primary_interface = consts.NETNS_PRIMARY_INTERFACE + + self._osutils.write_vip_interface_file( + interface=primary_interface, + vips=rendered_vips, + mtu=mtu, + vrrp_info=vrrp_info, + is_sriov=is_sriov) + + # Update the list of interfaces to add to the namespace + # This is used in the amphora reboot case to re-establish the namespace + self._update_plugged_interfaces_file(primary_interface, mac_address) + + with pyroute2.IPRoute() as ipr: + # Move the interfaces into the namespace + idx = ipr.link_lookup(address=mac_address)[0] + ipr.link('set', index=idx, net_ns_fd=consts.AMPHORA_NAMESPACE, + IFLA_IFNAME=primary_interface) + + # bring interfaces up + self._osutils.bring_interface_up(primary_interface, 'VIP') + + vip_message = "VIPs plugged on interface {interface}: {vips}".format( + interface=primary_interface, + vips=", ".join(v['ip_address'] for v in rendered_vips) + ) + + return webob.Response(json={ + 'message': "OK", + 'details': vip_message}, status=202) + + def _check_ip_addresses(self, fixed_ips): + if fixed_ips: + for ip in fixed_ips: + try: + socket.inet_pton(socket.AF_INET, ip.get('ip_address')) + except OSError: + socket.inet_pton(socket.AF_INET6, ip.get('ip_address')) + + def plug_network(self, mac_address, fixed_ips, mtu=None, + vip_net_info=None, is_sriov=False): + try: + self._check_ip_addresses(fixed_ips=fixed_ips) + except OSError: + return webob.Response(json={ + 'message': "Invalid network port"}, status=400) + + # Check if the interface is already in the network namespace + # Do not attempt to re-plug the network if it is already in the + # network namespace, just ensure all fixed_ips are up + if self._netns_interface_exists(mac_address): + # Get the existing interface name and path + existing_interface = self._netns_interface_by_mac(mac_address) + + # If we have net_info, this is the special case of plugging a new + # subnet on the vrrp port, which is essentially a re-vip-plug + if vip_net_info: + vrrp_ip = vip_net_info.get('vrrp_ip') + subnet_cidr = vip_net_info['subnet_cidr'] + gateway = vip_net_info['gateway'] + host_routes = vip_net_info.get('host_routes', []) + + vips = [{ + 'ip_address': vip_net_info['vip'], + 'subnet_cidr': subnet_cidr, + 'gateway': gateway, + 'host_routes': host_routes + }] + vip_net_info.get('additional_vips', []) + rendered_vips = self.render_vips(vips) + vrrp_info = self.build_vrrp_info(vrrp_ip, subnet_cidr, + gateway, host_routes) + + self._osutils.write_vip_interface_file( + interface=existing_interface, + vips=rendered_vips, + mtu=mtu, + vrrp_info=vrrp_info, + fixed_ips=fixed_ips, + is_sriov=is_sriov) + self._osutils.bring_interface_up(existing_interface, 'vip') + # Otherwise, we are just plugging a run-of-the-mill network + else: + # Write an updated config + self._osutils.write_port_interface_file( + interface=existing_interface, + fixed_ips=fixed_ips, + mtu=mtu, is_sriov=is_sriov) + self._osutils.bring_interface_up(existing_interface, 'network') + + util.send_member_advertisements(fixed_ips) + return webob.Response(json={ + 'message': "OK", + 'details': "Updated existing interface {interface}".format( + # TODO(rm_work): Everything in this should probably use + # HTTP code 200, but continuing to use 202 for consistency. + interface=existing_interface)}, status=202) + + # This is the interface as it was initially plugged into the + # default network namespace, this will likely always be eth1 + default_netns_interface = self._interface_by_mac(mac_address) + + # We need to determine the interface name when inside the namespace + # to avoid name conflicts + netns_interface = self._netns_get_next_interface() + + LOG.info('Plugged interface %s will become %s in the namespace %s', + default_netns_interface, netns_interface, + consts.AMPHORA_NAMESPACE) + self._osutils.write_port_interface_file( + interface=netns_interface, + fixed_ips=fixed_ips, + mtu=mtu, is_sriov=is_sriov) + + # Update the list of interfaces to add to the namespace + self._update_plugged_interfaces_file(netns_interface, mac_address) + + with pyroute2.IPRoute() as ipr: + # Move the interfaces into the namespace + idx = ipr.link_lookup(address=mac_address)[0] + ipr.link('set', index=idx, + net_ns_fd=consts.AMPHORA_NAMESPACE, + IFLA_IFNAME=netns_interface) + + self._osutils.bring_interface_up(netns_interface, 'network') + util.send_member_advertisements(fixed_ips) + + return webob.Response(json={ + 'message': "OK", + 'details': f"Plugged on interface {netns_interface}"}, status=202) + + def _interface_by_mac(self, mac): + try: + with pyroute2.IPRoute() as ipr: + idx = ipr.link_lookup(address=mac)[0] + # Workaround for https://github.com/PyCQA/pylint/issues/8497 + # pylint: disable=E1136, E1121, E1133 + addr = ipr.get_links(idx)[0] + for attr in addr['attrs']: + if attr[0] == consts.IFLA_IFNAME: + return attr[1] + except Exception as e: + LOG.info('Unable to find interface with MAC: %s, rescanning ' + 'and returning 404. Reported error: %s', mac, str(e)) + + # Poke the kernel to re-enumerate the PCI bus. + # We have had cases where nova hot plugs the interface but + # the kernel doesn't get the memo. + filename = '/sys/bus/pci/rescan' + flags = os.O_WRONLY + if os.path.isfile(filename): + with os.fdopen(os.open(filename, flags), 'w') as rescan_file: + rescan_file.write('1') + raise exceptions.HTTPException( + response=webob.Response(json={ + 'details': "No suitable network interface found"}, status=404)) + + def _update_plugged_interfaces_file(self, interface, mac_address): + # write interfaces to plugged_interfaces file and prevent duplicates + plug_inf_file = consts.PLUGGED_INTERFACES + flags = os.O_RDWR | os.O_CREAT + # mode 0644 + mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + with os.fdopen(os.open(plug_inf_file, flags, mode), 'r+') as text_file: + inf_list = [inf.split()[0].rstrip() for inf in text_file] + if mac_address not in inf_list: + text_file.write(f"{mac_address} {interface}\n") + + def _netns_interface_by_mac(self, mac_address): + with pyroute2.NetNS(consts.AMPHORA_NAMESPACE, + flags=os.O_CREAT) as netns: + for link in netns.get_links(): + attr_dict = dict(link['attrs']) + if attr_dict.get(consts.IFLA_ADDRESS) == mac_address: + return attr_dict.get(consts.IFLA_IFNAME) + return None + + def _netns_interface_exists(self, mac_address): + return self._netns_interface_by_mac(mac_address) is not None + + def _netns_get_next_interface(self): + with pyroute2.NetNS(consts.AMPHORA_NAMESPACE, + flags=os.O_CREAT) as netns: + existing_ifaces = [ + dict(link['attrs']).get(consts.IFLA_IFNAME) + for link in netns.get_links()] + # find the first unused ethXXX + for idx in itertools.count(start=2): + iface_name = f"eth{idx}" + if iface_name not in existing_ifaces: + break + return iface_name diff --git a/octavia/amphorae/backends/agent/api_server/rules_schema.py b/octavia/amphorae/backends/agent/api_server/rules_schema.py new file mode 100644 index 0000000000..57ad8fe02d --- /dev/null +++ b/octavia/amphorae/backends/agent/api_server/rules_schema.py @@ -0,0 +1,52 @@ +# Copyright 2024 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from octavia_lib.common import constants as lib_consts + +from octavia.common import constants as consts + +# This is a JSON schema validation dictionary +# https://json-schema.org/latest/json-schema-validation.html + +SUPPORTED_RULES_SCHEMA = { + '$schema': 'http://json-schema.org/draft-07/schema#', + 'title': 'Octavia Amphora NFTables Rules Schema', + 'description': 'This schema is used to validate an nftables rules JSON ' + 'document sent from a controller.', + 'type': 'array', + 'items': { + 'additionalProperties': False, + 'properties': { + consts.PROTOCOL: { + 'type': 'string', + 'description': 'The protocol for the rule. One of: ' + 'TCP, UDP, VRRP, SCTP', + 'enum': list((lib_consts.PROTOCOL_SCTP, + lib_consts.PROTOCOL_TCP, + lib_consts.PROTOCOL_UDP, + consts.VRRP)) + }, + consts.CIDR: { + 'type': ['string', 'null'], + 'description': 'The allowed source CIDR.' + }, + consts.PORT: { + 'type': 'number', + 'description': 'The protocol port number.', + 'minimum': 1, + 'maximum': 65535 + } + }, + 'required': [consts.PROTOCOL, consts.CIDR, consts.PORT] + } +} diff --git a/octavia/amphorae/backends/agent/api_server/server.py b/octavia/amphorae/backends/agent/api_server/server.py new file mode 100644 index 0000000000..b46eabe3ee --- /dev/null +++ b/octavia/amphorae/backends/agent/api_server/server.py @@ -0,0 +1,289 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import stat + +import flask +from jsonschema import validate +from oslo_config import cfg +from oslo_log import log as logging +import webob +from werkzeug import exceptions + +from octavia.amphorae.backends.agent import api_server +from octavia.amphorae.backends.agent.api_server import amphora_info +from octavia.amphorae.backends.agent.api_server import certificate_update +from octavia.amphorae.backends.agent.api_server import keepalived +from octavia.amphorae.backends.agent.api_server import keepalivedlvs +from octavia.amphorae.backends.agent.api_server import loadbalancer +from octavia.amphorae.backends.agent.api_server import osutils +from octavia.amphorae.backends.agent.api_server import plug +from octavia.amphorae.backends.agent.api_server import rules_schema +from octavia.amphorae.backends.agent.api_server import util +from octavia.amphorae.backends.utils import nftable_utils +from octavia.common import constants as consts + + +BUFFER = 1024 +CONF = cfg.CONF +PATH_PREFIX = '/' + api_server.VERSION +LOG = logging.getLogger(__name__) + + +# make the error pages all json +def make_json_error(ex): + code = ex.code if isinstance(ex, exceptions.HTTPException) else 500 + response = webob.Response(json={'error': str(ex), 'http_code': code}) + response.status_code = code + return response + + +def register_app_error_handler(app): + for code in exceptions.default_exceptions: + app.register_error_handler(code, make_json_error) + + +class Server: + def __init__(self, hm_queue): + self.app = flask.Flask(__name__) + self._osutils = osutils.BaseOS.get_os_util() + self._keepalived = keepalived.Keepalived() + self._loadbalancer = loadbalancer.Loadbalancer() + self._lvs_listener = keepalivedlvs.KeepalivedLvs() + self._plug = plug.Plug(self._osutils) + self._amphora_info = amphora_info.AmphoraInfo(self._osutils) + self._hm_queue = hm_queue + + register_app_error_handler(self.app) + + self._plug.plug_lo() + + self.app.add_url_rule(rule='/', view_func=self.version_discovery, + methods=['GET']) + self.app.add_url_rule(rule=PATH_PREFIX + + '/loadbalancer///haproxy', + view_func=self.upload_haproxy_config, + methods=['PUT']) + # TODO(gthiemonge) rename 'udp_listener' endpoint to 'lvs_listener' + # when api_version is bumped + self.app.add_url_rule(rule=PATH_PREFIX + + '/listeners//' + '/udp_listener', + view_func=self.upload_lvs_listener_config, + methods=['PUT']) + self.app.add_url_rule(rule=PATH_PREFIX + + '/loadbalancer//haproxy', + view_func=self.get_haproxy_config, + methods=['GET']) + # TODO(gthiemonge) rename 'udp_listener' endpoint to 'lvs_listener' + # when api_version is bumped + self.app.add_url_rule(rule=PATH_PREFIX + + '/listeners//udp_listener', + view_func=self.get_lvs_listener_config, + methods=['GET']) + self.app.add_url_rule(rule=PATH_PREFIX + + '/loadbalancer//', + view_func=self.start_stop_lb_object, + methods=['PUT']) + self.app.add_url_rule(rule=PATH_PREFIX + '/listeners/', + view_func=self.delete_lb_object, + methods=['DELETE']) + self.app.add_url_rule(rule=PATH_PREFIX + '/config', + view_func=self.upload_config, + methods=['PUT']) + self.app.add_url_rule(rule=PATH_PREFIX + '/details', + view_func=self.get_details, + methods=['GET']) + self.app.add_url_rule(rule=PATH_PREFIX + '/info', + view_func=self.get_info, + methods=['GET']) + self.app.add_url_rule(rule=PATH_PREFIX + '/listeners', + view_func=self.get_all_listeners_status, + methods=['GET']) + self.app.add_url_rule(rule=PATH_PREFIX + '/loadbalancer/' + '/certificates/', + view_func=self.upload_certificate, + methods=['PUT']) + self.app.add_url_rule(rule=PATH_PREFIX + '/loadbalancer/' + '/certificates/', + view_func=self.get_certificate_md5, + methods=['GET']) + self.app.add_url_rule(rule=PATH_PREFIX + '/loadbalancer/' + '/certificates/', + view_func=self.delete_certificate, + methods=['DELETE']) + self.app.add_url_rule(rule=PATH_PREFIX + '/plug/vip/', + view_func=self.plug_vip, + methods=['POST']) + self.app.add_url_rule(rule=PATH_PREFIX + '/plug/network', + view_func=self.plug_network, + methods=['POST']) + self.app.add_url_rule(rule=PATH_PREFIX + '/certificate', + view_func=self.upload_cert, methods=['PUT']) + self.app.add_url_rule(rule=PATH_PREFIX + '/vrrp/upload', + view_func=self.upload_vrrp_config, + methods=['PUT']) + self.app.add_url_rule(rule=PATH_PREFIX + '/vrrp/', + view_func=self.manage_service_vrrp, + methods=['PUT']) + self.app.add_url_rule(rule=PATH_PREFIX + '/interface/', + view_func=self.get_interface, + methods=['GET']) + self.app.add_url_rule(rule=PATH_PREFIX + '/interface//rules', + view_func=self.set_interface_rules, + methods=['PUT']) + + def upload_haproxy_config(self, amphora_id, lb_id): + return self._loadbalancer.upload_haproxy_config(amphora_id, lb_id) + + def upload_lvs_listener_config(self, amphora_id, listener_id): + return self._lvs_listener.upload_lvs_listener_config(listener_id) + + def get_haproxy_config(self, lb_id): + return self._loadbalancer.get_haproxy_config(lb_id) + + def get_lvs_listener_config(self, listener_id): + return self._lvs_listener.get_lvs_listener_config(listener_id) + + def start_stop_lb_object(self, object_id, action): + backend = util.get_backend_for_lb_object(object_id) + if backend == consts.LVS_BACKEND: + return self._lvs_listener.manage_lvs_listener( + listener_id=object_id, action=action) + return self._loadbalancer.start_stop_lb(lb_id=object_id, action=action) + + def delete_lb_object(self, object_id): + backend = util.get_backend_for_lb_object(object_id) + if backend == consts.LVS_BACKEND: + return self._lvs_listener.delete_lvs_listener(object_id) + return self._loadbalancer.delete_lb(object_id) + + def get_details(self): + return self._amphora_info.compile_amphora_details( + extend_lvs_driver=self._lvs_listener) + + def get_info(self): + return self._amphora_info.compile_amphora_info( + extend_lvs_driver=self._lvs_listener) + + def get_all_listeners_status(self): + lvs_listeners = self._lvs_listener.get_all_lvs_listeners_status() + return self._loadbalancer.get_all_listeners_status( + other_listeners=lvs_listeners) + + def upload_certificate(self, lb_id, filename): + return self._loadbalancer.upload_certificate(lb_id, filename) + + def get_certificate_md5(self, lb_id, filename): + return self._loadbalancer.get_certificate_md5(lb_id, filename) + + def delete_certificate(self, lb_id, filename): + return self._loadbalancer.delete_certificate(lb_id, filename) + + def plug_vip(self, vip): + # Catch any issues with the subnet info json + try: + net_info = flask.request.get_json() + assert type(net_info) is dict + assert 'subnet_cidr' in net_info + assert 'gateway' in net_info + assert 'mac_address' in net_info + except Exception as e: + raise exceptions.BadRequest( + description='Invalid subnet information') from e + return self._plug.plug_vip(vip, + net_info['subnet_cidr'], + net_info['gateway'], + net_info['mac_address'], + net_info.get('mtu'), + net_info.get('vrrp_ip'), + net_info.get('host_routes', ()), + net_info.get('additional_vips', ()), + net_info.get('is_sriov', False)) + + def plug_network(self): + try: + port_info = flask.request.get_json() + assert type(port_info) is dict + assert 'mac_address' in port_info + except Exception as e: + raise exceptions.BadRequest( + description='Invalid port information') from e + return self._plug.plug_network(port_info['mac_address'], + port_info.get('fixed_ips'), + port_info.get('mtu'), + port_info.get('vip_net_info'), + port_info.get('is_sriov')) + + def upload_cert(self): + return certificate_update.upload_server_cert() + + def upload_vrrp_config(self): + return self._keepalived.upload_keepalived_config() + + def manage_service_vrrp(self, action): + return self._keepalived.manager_keepalived_service(action) + + def get_interface(self, ip_addr): + return self._amphora_info.get_interface(ip_addr) + + def upload_config(self): + try: + stream = flask.request.stream + file_path = cfg.find_config_files(project=CONF.project, + prog=CONF.prog)[0] + flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC + # mode 00600 + mode = stat.S_IRUSR | stat.S_IWUSR + with os.fdopen(os.open(file_path, flags, mode), 'wb') as cfg_file: + b = stream.read(BUFFER) + while b: + cfg_file.write(b) + b = stream.read(BUFFER) + + CONF.mutate_config_files() + # Signal to the health manager process to reload it's configuration + self._hm_queue.put('reload') + except Exception as e: + LOG.error("Unable to update amphora-agent configuration: %s", + str(e)) + return webob.Response(json={ + 'message': "Unable to update amphora-agent configuration.", + 'details': str(e)}, status=500) + + return webob.Response(json={'message': 'OK'}, status=202) + + def version_discovery(self): + return webob.Response(json={'api_version': api_server.VERSION}) + + def set_interface_rules(self, ip_addr): + interface_webob = self._amphora_info.get_interface(ip_addr) + + if interface_webob.status_code != 200: + return interface_webob + interface = interface_webob.json['interface'] + + try: + rules_info = flask.request.get_json() + validate(rules_info, rules_schema.SUPPORTED_RULES_SCHEMA) + except Exception as e: + raise exceptions.BadRequest( + description='Invalid rules information') from e + + nftable_utils.write_nftable_rules_file(interface, rules_info) + + nftable_utils.load_nftables_file() + + return webob.Response(json={'message': 'OK'}, status=200) diff --git a/octavia/amphorae/backends/agent/api_server/templates/amphora-netns.systemd.j2 b/octavia/amphorae/backends/agent/api_server/templates/amphora-netns.systemd.j2 new file mode 100644 index 0000000000..ce5f8c7096 --- /dev/null +++ b/octavia/amphorae/backends/agent/api_server/templates/amphora-netns.systemd.j2 @@ -0,0 +1,34 @@ +[Unit] +Description=Configure {{ amphora_nsname }} network namespace +StopWhenUnneeded=true + +[Service] +Type=oneshot +RemainAfterExit=yes + +# Re-add the namespace +ExecStart=-/sbin/ip netns add {{ amphora_nsname }} +# Load the system sysctl into the new namespace +ExecStart=-/sbin/ip netns exec {{ amphora_nsname }} sysctl --system +# Enable kernel module ip_vs for lvs function in amphora network namespace +ExecStart=-/sbin/ip netns exec {{ amphora_nsname }} modprobe ip_vs +# Set nf_conntrack_buckets sysctl in the main namespace (nf_conntrack_buckets +# cannot be set in another net namespace, but its value is inherited from the +# main namespace) +ExecStart=-/sbin/sysctl -w net.netfilter.nf_conntrack_buckets=125000 +# Update conntrack table sizes using the formula for the default values +ExecStart=-/sbin/sysctl -w net.netfilter.nf_conntrack_max=125000 +ExecStart=-/sbin/sysctl -w net.netfilter.nf_conntrack_expect_max=488 +# Enable ip_forward and conntrack kernel configuration +ExecStart=-/sbin/ip netns exec {{ amphora_nsname }} sysctl -w net.ipv4.ip_forward=1 +ExecStart=-/sbin/ip netns exec {{ amphora_nsname }} sysctl -w net.ipv4.vs.conntrack=1 +ExecStart=-/sbin/ip netns exec {{ amphora_nsname }} sysctl -w net.ipv6.conf.all.forwarding=1 +# Ensure the connection cache is flushed immediately on real server removal +ExecStart=-/sbin/ip netns exec {{ amphora_nsname }} sysctl -w net.ipv4.vs.expire_nodest_conn=1 +ExecStart=-/sbin/ip netns exec {{ amphora_nsname }} sysctl -w net.ipv4.vs.expire_quiescent_template=1 +# We need the plugged_interfaces file sorted to join the host interfaces +ExecStart=-/bin/sh -c '/usr/bin/sort -k 1 /var/lib/octavia/plugged_interfaces > /var/lib/octavia/plugged_interfaces.sorted' +# Assign the interfaces into the namespace with the appropriate name +ExecStart=-/bin/sh -c '/sbin/ip link | awk \'{getline n; print $0,n}\' | awk \'{sub(":","",$2)} { for(i=1;i<=NF;i++) if ($i == "link/ether") {print $(i+1) " " $2} }\' | sort -k 1 | join -j 1 - /var/lib/octavia/plugged_interfaces.sorted | awk \'{system("ip link set "$2" netns {{ amphora_nsname }} name "$3"")}\'' +# Bring up all of the namespace interfaces +ExecStart=-/sbin/ip netns exec {{ amphora_nsname }} amphora-interface up all diff --git a/octavia/amphorae/backends/agent/api_server/templates/keepalived.systemd.j2 b/octavia/amphorae/backends/agent/api_server/templates/keepalived.systemd.j2 new file mode 100644 index 0000000000..de9b1ea06c --- /dev/null +++ b/octavia/amphorae/backends/agent/api_server/templates/keepalived.systemd.j2 @@ -0,0 +1,21 @@ +[Unit] +Description=Keepalive Daemon (LVS and VRRP) +After=network-online.target {{ amphora_netns }}.service +Wants=network-online.target +Requires={{ amphora_netns }}.service + +[Service] +# Force context as we start keepalived under "ip netns exec" +SELinuxContext=system_u:system_r:keepalived_t:s0 +Type=forking +KillMode=process +{% if vrrp_pid and check_pid %} +ExecStart=/sbin/ip netns exec {{ amphora_nsname }} {{ keepalived_cmd }} --log-facility={{ administrative_log_facility }} -f {{ keepalived_cfg }} -p {{ keepalived_pid }} -r {{ vrrp_pid }} -c {{ check_pid }} +{% else %} +ExecStart=/sbin/ip netns exec {{ amphora_nsname }} {{ keepalived_cmd }} --log-facility={{ administrative_log_facility }} -f {{ keepalived_cfg }} -p {{ keepalived_pid }} +{% endif %} +ExecReload=/bin/kill -HUP $MAINPID +PIDFile={{ keepalived_pid }} + +[Install] +WantedBy=multi-user.target diff --git a/octavia/amphorae/backends/agent/api_server/templates/keepalived_check_script.conf.j2 b/octavia/amphorae/backends/agent/api_server/templates/keepalived_check_script.conf.j2 new file mode 100644 index 0000000000..50728cd56a --- /dev/null +++ b/octavia/amphorae/backends/agent/api_server/templates/keepalived_check_script.conf.j2 @@ -0,0 +1,29 @@ +{# +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +-#} +#!/bin/bash + +# Don't try to run the directory when it is empty +shopt -s nullglob + +status=0 +for file in {{ check_scripts_dir }}/* +do + echo "Running check script: " $file + bash $file + status=$(( $status + $? )) +done +exit $status diff --git a/octavia/amphorae/backends/agent/api_server/templates/keepalived_lvs_check_script.sh.j2 b/octavia/amphorae/backends/agent/api_server/templates/keepalived_lvs_check_script.sh.j2 new file mode 100644 index 0000000000..0c42dd1fc3 --- /dev/null +++ b/octavia/amphorae/backends/agent/api_server/templates/keepalived_lvs_check_script.sh.j2 @@ -0,0 +1,17 @@ +#!/bin/bash + +# Don't try to run the directory when it is empty +shopt -s nullglob + +status=0 +for file in {{ keepalived_lvs_pid_dir }}/* +do + file_ext=${file#*.} + case $file_ext in + pid) echo "Check keepalived pid file: " $file;; + *) continue;; + esac + systemctl status $(basename $file .pid) > /dev/null + status=$(( $status + $? )) +done +exit $status diff --git a/octavia/amphorae/backends/agent/api_server/templates/systemd.conf.j2 b/octavia/amphorae/backends/agent/api_server/templates/systemd.conf.j2 new file mode 100644 index 0000000000..545486e3a7 --- /dev/null +++ b/octavia/amphorae/backends/agent/api_server/templates/systemd.conf.j2 @@ -0,0 +1,36 @@ +[Unit] +Description=HAProxy Load Balancer +After=network.target syslog.service {{ amphora_netns }}.service +Before=octavia-keepalived.service +Wants=syslog.service +Requires={{ amphora_netns }}.service + +[Service] +# Force context as we start haproxy under "ip netns exec" +SELinuxContext=system_u:system_r:haproxy_t:s0 + +Environment="CONFIG={{ haproxy_cfg }}" "USERCONFIG={{ haproxy_user_group_cfg }}" "PIDFILE={{ haproxy_pid }}" + +ExecStartPre={{ haproxy_cmd }} -f $CONFIG -f $USERCONFIG -c -q -L {{ peer_name }} + +ExecReload={{ haproxy_cmd }} -c -f $CONFIG -f $USERCONFIG -L {{ peer_name }} +ExecReload=/bin/kill -USR2 $MAINPID + +{%- if haproxy_major_version < 2 and haproxy_minor_version < 8 %} + +ExecStart=/sbin/ip netns exec {{ amphora_nsname }} {{ haproxy_cmd }}-systemd-wrapper -f $CONFIG -f $USERCONFIG -p $PIDFILE -L {{ peer_name }} + +{%- else %} + +ExecStart=/sbin/ip netns exec {{ amphora_nsname }} {{ haproxy_cmd }} -Ws -f $CONFIG -f $USERCONFIG -p $PIDFILE -L {{ peer_name }} + +Type=notify + +{%- endif %} + +KillMode=mixed +Restart=always +LimitNOFILE=2600000 + +[Install] +WantedBy=multi-user.target diff --git a/octavia/amphorae/backends/agent/api_server/util.py b/octavia/amphorae/backends/agent/api_server/util.py new file mode 100644 index 0000000000..77d2d8b4af --- /dev/null +++ b/octavia/amphorae/backends/agent/api_server/util.py @@ -0,0 +1,422 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import os +import re +import stat +import subprocess +import typing as tp + +import jinja2 +from oslo_config import cfg +from oslo_log import log as logging + +from octavia.amphorae.backends.utils import ip_advertisement +from octavia.amphorae.backends.utils import network_utils +from octavia.common import constants as consts + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + +FRONTEND_BACKEND_PATTERN = re.compile(r'\n(frontend|backend)\s+(\S+)\n') +LISTENER_MODE_PATTERN = re.compile(r'^\s+mode\s+(.*)$', re.MULTILINE) +TLS_CERT_PATTERN = re.compile(r'^\s+bind\s+\S+\s+ssl crt-list\s+(\S*)', + re.MULTILINE) +STATS_SOCKET_PATTERN = re.compile(r'stats socket\s+(\S+)') + + +class ParsingError(Exception): + pass + + +def init_path(lb_id): + return os.path.join(consts.SYSTEMD_DIR, f'haproxy-{lb_id}.service') + + +def keepalived_lvs_dir(): + return os.path.join(CONF.haproxy_amphora.base_path, 'lvs') + + +def keepalived_lvs_init_path(listener_id): + return os.path.join(consts.SYSTEMD_DIR, + consts.KEEPALIVEDLVS_SYSTEMD % + str(listener_id)) + + +def keepalived_backend_check_script_dir(): + return os.path.join(CONF.haproxy_amphora.base_path, 'lvs/check/') + + +def keepalived_backend_check_script_path(): + return os.path.join(keepalived_backend_check_script_dir(), + 'udp_check.sh') + + +def keepalived_lvs_pids_path(listener_id): + pids_path = {} + for file_ext in ['pid', 'vrrp.pid', 'check.pid']: + pids_path[file_ext] = ( + os.path.join(CONF.haproxy_amphora.base_path, + f"lvs/octavia-keepalivedlvs-{str(listener_id)}." + f"{file_ext}")) + return pids_path['pid'], pids_path['vrrp.pid'], pids_path['check.pid'] + + +def keepalived_lvs_cfg_path(listener_id): + return os.path.join(CONF.haproxy_amphora.base_path, + f"lvs/octavia-keepalivedlvs-{str(listener_id)}.conf") + + +def haproxy_dir(lb_id): + return os.path.join(CONF.haproxy_amphora.base_path, lb_id) + + +def pid_path(lb_id): + return os.path.join(haproxy_dir(lb_id), lb_id + '.pid') + + +def config_path(lb_id): + return os.path.join(haproxy_dir(lb_id), 'haproxy.cfg') + + +def state_file_path(lb_id): + return os.path.join(haproxy_dir(lb_id), 'servers-state') + + +def get_haproxy_pid(lb_id): + with open(pid_path(lb_id), encoding='utf-8') as f: + return f.readline().rstrip() + + +def get_keepalivedlvs_pid(listener_id): + pid_file = keepalived_lvs_pids_path(listener_id)[0] + with open(pid_file, encoding='utf-8') as f: + return f.readline().rstrip() + + +def haproxy_sock_path(lb_id): + return os.path.join(CONF.haproxy_amphora.base_path, lb_id + '.sock') + + +def haproxy_check_script_path(): + return os.path.join(keepalived_check_scripts_dir(), + 'haproxy_check_script.sh') + + +def keepalived_dir(): + return os.path.join(CONF.haproxy_amphora.base_path, 'vrrp') + + +def keepalived_init_path(): + return os.path.join(consts.SYSTEMD_DIR, consts.KEEPALIVED_SYSTEMD) + + +def keepalived_pid_path(): + return os.path.join(CONF.haproxy_amphora.base_path, + 'vrrp/octavia-keepalived.pid') + + +def keepalived_cfg_path(): + return os.path.join(CONF.haproxy_amphora.base_path, + 'vrrp/octavia-keepalived.conf') + + +def keepalived_log_path(): + return os.path.join(CONF.haproxy_amphora.base_path, + 'vrrp/octavia-keepalived.log') + + +def keepalived_check_scripts_dir(): + return os.path.join(CONF.haproxy_amphora.base_path, + 'vrrp/check_scripts') + + +def keepalived_check_script_path(): + return os.path.join(CONF.haproxy_amphora.base_path, + 'vrrp/check_script.sh') + + +def get_listeners(): + """Get Listeners + + :returns: An array with the ids of all listeners, e.g. ['123', '456', ...] + or [] if no listeners exist + """ + listeners = [] + for lb_id in get_loadbalancers(): + listeners_on_lb = parse_haproxy_file(lb_id)[1] + listeners.extend(list(listeners_on_lb.keys())) + return listeners + + +def get_loadbalancers(): + """Get Load balancers + + :returns: An array with the uuids of all load balancers, + e.g. ['123', '456', ...] or [] if no loadbalancers exist + """ + if os.path.exists(CONF.haproxy_amphora.base_path): + return [f for f in os.listdir(CONF.haproxy_amphora.base_path) + if os.path.exists(config_path(f))] + return [] + + +def is_lb_running(lb_id): + return os.path.exists(pid_path(lb_id)) and os.path.exists( + os.path.join('/proc', get_haproxy_pid(lb_id))) + + +def get_lvs_listeners(): + result = [] + if os.path.exists(keepalived_lvs_dir()): + for f in os.listdir(keepalived_lvs_dir()): + if f.endswith('.conf'): + prefix = f.split('.')[0] + if re.search("octavia-keepalivedlvs-", prefix): + result.append(f.split( + 'octavia-keepalivedlvs-')[1].split('.')[0]) + return result + + +def is_lvs_listener_running(listener_id): + pid_file = keepalived_lvs_pids_path(listener_id)[0] + return os.path.exists(pid_file) and os.path.exists( + os.path.join('/proc', get_keepalivedlvs_pid(listener_id))) + + +def install_netns_systemd_service(): + flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC + # mode 00644 + mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + + # TODO(bcafarel): implement this for other init systems + # netns handling depends on a separate unit file + netns_path = os.path.join(consts.SYSTEMD_DIR, + consts.AMP_NETNS_SVC_PREFIX + '.service') + + jinja_env = jinja2.Environment( + autoescape=True, loader=jinja2.FileSystemLoader(os.path.dirname( + os.path.realpath(__file__) + ) + consts.AGENT_API_TEMPLATES)) + + if not os.path.exists(netns_path): + with os.fdopen(os.open(netns_path, flags, mode), 'w') as text_file: + text = jinja_env.get_template( + consts.AMP_NETNS_SVC_PREFIX + '.systemd.j2').render( + amphora_nsname=consts.AMPHORA_NAMESPACE) + text_file.write(text) + + +def run_systemctl_command(command, service, raise_error=True): + cmd = f"systemctl {command} {service}" + try: + subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT, + encoding='utf-8') + except subprocess.CalledProcessError as e: + LOG.debug("Failed to %(cmd)s %(srvc)s service: " + "%(err)s %(out)s", {'cmd': command, 'srvc': service, + 'err': e, 'out': e.output}) + if raise_error: + raise + + +def get_backend_for_lb_object(object_id): + """Returns the backend for a listener. + + If the listener is a TCP based listener return 'HAPROXY'. + If the listener is a UDP or SCTP based listener return 'LVS' + If the listener is not identifiable, return None. + + :param listener_id: The ID of the listener to identify. + :returns: HAPROXY_BACKEND, LVS_BACKEND or None + """ + if os.path.exists(config_path(object_id)): + return consts.HAPROXY_BACKEND + if os.path.exists(keepalived_lvs_cfg_path(object_id)): + return consts.LVS_BACKEND + return None + + +def parse_haproxy_file(lb_id): + with open(config_path(lb_id), encoding='utf-8') as file: + cfg = file.read() + + listeners = {} + + m = FRONTEND_BACKEND_PATTERN.split(cfg) + last_token = None + last_id = None + for section in m: + if last_token is None: + # We aren't in a section yet, see if this line starts one + if section == 'frontend': + last_token = section + elif last_token == 'frontend': + # We're in a frontend section, save the id for later + last_token = last_token + "_id" + last_id = section + elif last_token == 'frontend_id': + # We're in a frontend section and already have the id + # Look for the mode + mode_matcher = LISTENER_MODE_PATTERN.search(section) + if not mode_matcher: + raise ParsingError() + listeners[last_id] = { + 'mode': mode_matcher.group(1).upper(), + } + # Now see if this is a TLS frontend + tls_matcher = TLS_CERT_PATTERN.search(section) + if tls_matcher: + # TODO(rm_work): Can't we have terminated tcp? + listeners[last_id]['mode'] = 'TERMINATED_HTTPS' + listeners[last_id]['ssl_crt'] = tls_matcher.group(1) + # Clear out the token and id and start over + last_token = last_id = None + + m = STATS_SOCKET_PATTERN.search(cfg) + if not m: + raise ParsingError() + stats_socket = m.group(1) + + return stats_socket, listeners + + +def vrrp_check_script_update(lb_id, action): + os.makedirs(keepalived_dir(), exist_ok=True) + os.makedirs(keepalived_check_scripts_dir(), exist_ok=True) + + lb_ids = get_loadbalancers() + lvs_ids = get_lvs_listeners() + # If no LBs are found, so make sure keepalived thinks haproxy is down. + if not lb_ids: + if not lvs_ids: + with open(haproxy_check_script_path(), + 'w', encoding='utf-8') as text_file: + text_file.write('exit 1') + else: + try: + LOG.debug("Attempting to remove old haproxy check script...") + os.remove(haproxy_check_script_path()) + LOG.debug("Finished removing old haproxy check script.") + except FileNotFoundError: + LOG.debug("No haproxy check script to remove.") + return + if action == consts.AMP_ACTION_STOP: + lb_ids.remove(lb_id) + args = [] + for lbid in lb_ids: + args.append(haproxy_sock_path(lbid)) + + cmd = f"haproxy-vrrp-check {' '.join(args)}; exit $?" + with open(haproxy_check_script_path(), 'w', encoding='utf-8') as text_file: + text_file.write(cmd) + + +def get_haproxy_vip_addresses(lb_id): + """Get the VIP addresses for a load balancer. + + :param lb_id: The load balancer ID to get VIP addresses from. + :returns: List of VIP addresses (IPv4 and IPv6) + """ + vips = [] + with open(config_path(lb_id), encoding='utf-8') as file: + for line in file: + current_line = line.strip() + if current_line.startswith('bind'): + for section in current_line.split(' '): + # We will always have a port assigned per the template. + if ':' in section: + if ',' in section: + addr_port = section.rstrip(',') + vips.append(addr_port.rpartition(':')[0]) + else: + vips.append(section.rpartition(':')[0]) + break + return vips + + +def get_lvs_vip_addresses(listener_id: str) -> list[str]: + """Get the VIP addresses for a LVS load balancer. + + :param listener_id: The listener ID to get VIP addresses from. + :returns: List of VIP addresses (IPv4 and IPv6) + """ + vips = [] + # Extract the VIP addresses from keepalived configuration + # Format is + # virtual_server_group ipv-group { + # vip_address1 port1 + # vip_address2 port2 + # } + # it can be repeated in case of dual-stack LBs + with open(keepalived_lvs_cfg_path(listener_id), encoding='utf-8') as file: + vsg_section = False + for line in file: + current_line = line.strip() + if vsg_section: + if current_line.startswith('}'): + vsg_section = False + else: + vip_address = current_line.split(' ')[0] + vips.append(vip_address) + elif line.startswith('virtual_server_group '): + vsg_section = True + return vips + + +def send_vip_advertisements(lb_id: tp.Optional[str] = None, + listener_id: tp.Optional[str] = None): + """Sends address advertisements for each load balancer VIP. + + This method will send either GARP (IPv4) or neighbor advertisements (IPv6) + for the VIP addresses on a load balancer. + + :param lb_id: The load balancer ID to send advertisements for. + :returns: None + """ + try: + if lb_id: + vips = get_haproxy_vip_addresses(lb_id) + else: + vips = get_lvs_vip_addresses(listener_id) + + for vip in vips: + interface = network_utils.get_interface_name( + vip, net_ns=consts.AMPHORA_NAMESPACE) + ip_advertisement.send_ip_advertisement( + interface, vip, net_ns=consts.AMPHORA_NAMESPACE) + except Exception as e: + LOG.debug('Send VIP advertisement failed due to :%s. ' + 'This amphora may not be the MASTER. Ignoring.', str(e)) + + +def send_member_advertisements(fixed_ips: tp.Iterable[tp.Dict[str, str]]): + """Sends advertisements for each fixed_ip of a list + + This method will send either GARP (IPv4) or neighbor advertisements (IPv6) + for the addresses of the subnets of the members. + + :param fixed_ips: a list of dicts that contain 'ip_address' elements + :returns: None + """ + try: + for fixed_ip in fixed_ips: + ip_address = fixed_ip[consts.IP_ADDRESS] + interface = network_utils.get_interface_name( + ip_address, net_ns=consts.AMPHORA_NAMESPACE) + ip_advertisement.send_ip_advertisement( + interface, ip_address, net_ns=consts.AMPHORA_NAMESPACE) + except Exception as e: + LOG.debug('Send member advertisement failed due to: %s', str(e)) diff --git a/octavia/amphorae/backends/agent/templates/amphora_agent_conf.template b/octavia/amphorae/backends/agent/templates/amphora_agent_conf.template new file mode 100644 index 0000000000..f92ede8d17 --- /dev/null +++ b/octavia/amphorae/backends/agent/templates/amphora_agent_conf.template @@ -0,0 +1,46 @@ +{# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +#} +[DEFAULT] +debug = {{ debug }} +use_syslog = True +syslog_log_facility = LOG_LOCAL{{ administrative_log_facility }} + +[haproxy_amphora] +base_cert_dir = {{ base_cert_dir }} +base_path = {{ base_path }} +bind_host = {{ bind_host }} +bind_port = {{ bind_port }} +haproxy_cmd = {{ haproxy_cmd }} +user_log_facility = {{ user_log_facility }} +administrative_log_facility = {{ administrative_log_facility }} + +[health_manager] +controller_ip_port_list = {{ controller_list|join(', ') }} +heartbeat_interval = {{ heartbeat_interval }} +heartbeat_key = {{ heartbeat_key }} + +[amphora_agent] +agent_server_ca = {{ agent_server_ca }} +agent_server_cert = {{ agent_server_cert }} +{% if agent_server_network_dir -%} +agent_server_network_dir = {{ agent_server_network_dir }} +{% endif -%} +agent_request_read_timeout = {{ agent_request_read_timeout }} +amphora_id = {{ amphora_id }} +amphora_udp_driver = {{ amphora_udp_driver }} +agent_tls_protocol = {{ agent_tls_protocol }} + +[controller_worker] +loadbalancer_topology = {{ topology }} diff --git a/octavia/amphorae/backends/health_daemon/__init__.py b/octavia/amphorae/backends/health_daemon/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/amphorae/backends/health_daemon/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/amphorae/backends/health_daemon/health_daemon.py b/octavia/amphorae/backends/health_daemon/health_daemon.py new file mode 100644 index 0000000000..9d64e94355 --- /dev/null +++ b/octavia/amphorae/backends/health_daemon/health_daemon.py @@ -0,0 +1,277 @@ +#! /usr/bin/env python + +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import errno +import json +import os +import queue +import stat +import time + +from oslo_config import cfg +from oslo_log import log as logging + +from octavia.amphorae.backends.agent.api_server import util +from octavia.amphorae.backends.health_daemon import health_sender +from octavia.amphorae.backends.utils import haproxy_query +from octavia.amphorae.backends.utils import keepalivedlvs_query + + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) +SEQ = 0 +# MSG_VER is an incrementing integer heartbeat message format version +# this allows for backward compatibility when the amphora-agent is older +# than the controller version and the message format has backwards +# incompatible changes. +# +# ver 1 - Adds UDP listener status when no pool or members are present +# ver 2 - Switch to all listeners in a single combined haproxy config +# ver 3 - Switch stats reporting to deltas + +MSG_VER = 3 + +DELTA_METRICS = ('bin', 'bout', 'ereq', 'stot') + +# Filesystem persistent counters for statistics deltas +COUNTERS = None +COUNTERS_FILE = None + + +def get_counters_file(): + global COUNTERS_FILE + if COUNTERS_FILE is None: + stats_file_path = os.path.join( + CONF.haproxy_amphora.base_path, "stats_counters.json") + # Open for read+write and create if necessary + flags = os.O_RDWR | os.O_CREAT + # mode 00644 + mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP + try: + COUNTERS_FILE = os.fdopen( + os.open(stats_file_path, flags, mode), 'r+') + except OSError: + LOG.info("Failed to open `%s`, ignoring...", stats_file_path) + COUNTERS_FILE.seek(0) + return COUNTERS_FILE + + +def get_counters(): + global COUNTERS + if COUNTERS is None: + try: + COUNTERS = json.load(get_counters_file()) or {} + except (json.JSONDecodeError, AttributeError): + COUNTERS = {} + return COUNTERS + + +def persist_counters(): + """Attempt to persist the latest statistics values""" + if COUNTERS is None: + return + try: + stats = json.dumps(COUNTERS) + counters_file = get_counters_file() + counters_file.truncate(0) + counters_file.write(stats) + counters_file.flush() + except (OSError, AttributeError): + LOG.warning("Couldn't persist statistics counter file!") + + +def list_sock_stat_files(hadir=None): + stat_sock_files = {} + if hadir is None: + hadir = CONF.haproxy_amphora.base_path + lb_ids = util.get_loadbalancers() + for lb_id in lb_ids: + sock_file = lb_id + ".sock" + stat_sock_files[lb_id] = os.path.join(hadir, sock_file) + return stat_sock_files + + +def run_sender(cmd_queue): + LOG.info('Health Manager Sender starting.') + sender = health_sender.UDPStatusSender() + + keepalived_cfg_path = util.keepalived_cfg_path() + keepalived_pid_path = util.keepalived_pid_path() + + while True: + try: + # If the keepalived config file is present check + # that it is running, otherwise don't send the health + # heartbeat + if os.path.isfile(keepalived_cfg_path): + # Is there a pid file for keepalived? + with open(keepalived_pid_path, encoding='utf-8') as pid_file: + pid = int(pid_file.readline()) + os.kill(pid, 0) + + message = build_stats_message() + sender.dosend(message) + except OSError as e: + if e.errno == errno.ENOENT: + # Missing PID file, skip health heartbeat. + LOG.error('Missing keepalived PID file %s, skipping health ' + 'heartbeat.', keepalived_pid_path) + elif e.errno == errno.ESRCH: + # Keepalived is not running, skip health heartbeat. + LOG.error('Keepalived is configured but not running, ' + 'skipping health heartbeat.') + else: + LOG.exception('Failed to check keepalived and haproxy status ' + 'due to exception %s, skipping health ' + 'heartbeat.', str(e)) + except Exception as e: + LOG.exception('Failed to check keepalived and haproxy status due ' + 'to exception %s, skipping health heartbeat.', + str(e)) + + try: + cmd = cmd_queue.get_nowait() + if cmd == 'reload': + LOG.info('Reloading configuration') + CONF.reload_config_files() + elif cmd == 'shutdown': + LOG.info('Health Manager Sender shutting down.') + break + except queue.Empty: + pass + time.sleep(CONF.health_manager.heartbeat_interval) + + +def get_stats(stat_sock_file): + try: + stats_query = haproxy_query.HAProxyQuery(stat_sock_file) + stats = stats_query.show_stat() + pool_status = stats_query.get_pool_status() + except Exception as e: + LOG.warning('Unable to query the HAProxy stats (%s) due to: %s', + stat_sock_file, str(e)) + # Return empty lists so that the heartbeat will still be sent + return [], {} + return stats, pool_status + + +def calculate_stats_deltas(listener_id, row): + counters = get_counters() + listener_counters = counters.get(listener_id, {}) + counters[listener_id] = listener_counters + + delta_values = {} + for metric_key in DELTA_METRICS: + current_value = int(row[metric_key]) + # Get existing counter for our metrics + last_value = listener_counters.get(metric_key, 0) + # Store the new absolute value + listener_counters[metric_key] = current_value + # Calculate a delta for each metric + delta = current_value - last_value + # Did HAProxy restart or reset counters? + if delta < 0: + delta = current_value # If so, reset ours. + delta_values[metric_key] = delta + + return delta_values + + +def build_stats_message(): + """Build a stats message based on retrieved listener statistics. + + Example version 3 message without UDP (note that values are deltas, + not absolutes):: + + {"id": "", + "seq": 67, + "listeners": { + "": { + "status": "OPEN", + "stats": { + "tx": 0, + "rx": 0, + "conns": 0, + "totconns": 0, + "ereq": 0 + } + } + }, + "pools": { + ":": { + "status": "UP", + "members": { + "": "no check" + } + } + }, + "ver": 3 + } + """ + global SEQ + msg = {'id': CONF.amphora_agent.amphora_id, + 'seq': SEQ, 'listeners': {}, 'pools': {}, + 'ver': MSG_VER} + SEQ += 1 + stat_sock_files = list_sock_stat_files() + # TODO(rm_work) There should only be one of these in the new config system + for lb_id, stat_sock_file in stat_sock_files.items(): + if util.is_lb_running(lb_id): + (stats, pool_status) = get_stats(stat_sock_file) + for row in stats: + if row['svname'] == 'FRONTEND': + listener_id = row['pxname'] + delta_values = calculate_stats_deltas(listener_id, row) + msg['listeners'][listener_id] = { + 'status': row['status'], + 'stats': {'tx': delta_values['bout'], + 'rx': delta_values['bin'], + 'conns': int(row['scur']), + 'totconns': delta_values['stot'], + 'ereq': delta_values['ereq']}} + for pool_id, pool in pool_status.items(): + msg['pools'][pool_id] = {"status": pool['status'], + "members": pool['members']} + + # UDP listener part + lvs_listener_ids = util.get_lvs_listeners() + if lvs_listener_ids: + listeners_stats = keepalivedlvs_query.get_lvs_listeners_stats() + if listeners_stats: + for listener_id, listener_stats in listeners_stats.items(): + delta_values = calculate_stats_deltas( + listener_id, listener_stats['stats']) + pool_status = ( + keepalivedlvs_query.get_lvs_listener_pool_status( + listener_id)) + lvs_listener_dict = {} + lvs_listener_dict['status'] = listener_stats['status'] + lvs_listener_dict['stats'] = { + 'tx': delta_values['bout'], + 'rx': delta_values['bin'], + 'conns': listener_stats['stats']['scur'], + 'totconns': delta_values['stot'], + 'ereq': delta_values['ereq'] + } + if pool_status: + pool_id = pool_status['lvs']['uuid'] + msg['pools'][pool_id] = { + "status": pool_status['lvs']['status'], + "members": pool_status['lvs']['members'] + } + msg['listeners'][listener_id] = lvs_listener_dict + persist_counters() + return msg diff --git a/octavia/amphorae/backends/health_daemon/health_sender.py b/octavia/amphorae/backends/health_daemon/health_sender.py new file mode 100644 index 0000000000..8bc61eb59d --- /dev/null +++ b/octavia/amphorae/backends/health_daemon/health_sender.py @@ -0,0 +1,92 @@ +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import socket + +from oslo_config import cfg +from oslo_log import log as logging + +from octavia.amphorae.backends.health_daemon import status_message + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +def round_robin_addr(addrinfo_list): + if not addrinfo_list: + return None + addrinfo = addrinfo_list.pop(0) + addrinfo_list.append(addrinfo) + return addrinfo + + +class UDPStatusSender: + def __init__(self): + self._update_dests() + self.v4sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + self.v6sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) + + def update(self, dest, port): + addrlist = socket.getaddrinfo(dest, port, 0, socket.SOCK_DGRAM) + # addrlist = [(family, socktype, proto, canonname, sockaddr) ...] + # e.g. 4 = sockaddr - what we actually need + for addr in addrlist: + self.dests.append(addr) # Just grab the first match + break + + def _send_msg(self, dest, msg): + # Note: heartbeat_key is mutable and must be looked up for each call + envelope_str = status_message.wrap_envelope( + msg, str(CONF.health_manager.heartbeat_key)) + # dest = (family, socktype, proto, canonname, sockaddr) + # e.g. 0 = sock family, 4 = sockaddr - what we actually need + try: + if dest[0] == socket.AF_INET: + self.v4sock.sendto(envelope_str, dest[4]) + elif dest[0] == socket.AF_INET6: + self.v6sock.sendto(envelope_str, dest[4]) + except OSError: + # Pass here as on amp boot it will get one or more + # error: [Errno 101] Network is unreachable + # while the networks are coming up + # No harm in trying to send as it will still failover + # if the message isn't received + pass + + # The controller_ip_port_list configuration has mutated, reload it. + def _update_dests(self): + self.dests = [] + for ipport in CONF.health_manager.controller_ip_port_list: + try: + ip, port = ipport.rsplit(':', 1) + if ip and ip[0] == '[' and ip[-1] == ']': + ip = ip[1:-1] + except ValueError: + LOG.error("Invalid ip and port '%s' in health_manager " + "controller_ip_port_list", ipport) + break + self.update(ip, port) + self.current_controller_ip_port_list = ( + CONF.health_manager.controller_ip_port_list) + + def dosend(self, obj): + # Check for controller_ip_port_list mutation + if not (self.current_controller_ip_port_list == + CONF.health_manager.controller_ip_port_list): + self._update_dests() + dest = round_robin_addr(self.dests) + if dest is None: + LOG.error('No controller address found. Unable to send heartbeat.') + return + self._send_msg(dest, obj) diff --git a/octavia/amphorae/backends/health_daemon/status_message.py b/octavia/amphorae/backends/health_daemon/status_message.py new file mode 100644 index 0000000000..bb4000c31c --- /dev/null +++ b/octavia/amphorae/backends/health_daemon/status_message.py @@ -0,0 +1,96 @@ +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import binascii +import hashlib +import hmac +import zlib + +from oslo_log import log as logging +from oslo_serialization import jsonutils + +from octavia.common import exceptions + +LOG = logging.getLogger(__name__) + +hash_algo = hashlib.sha256 +hash_len = 32 +hex_hash_len = 64 + + +def to_hex(byte_array): + return binascii.hexlify(byte_array).decode() + + +def encode_obj(obj): + json_bytes = jsonutils.dumps(obj).encode('utf-8') + binary_array = zlib.compress(json_bytes, 9) + return binary_array + + +def decode_obj(binary_array): + json_str = zlib.decompress(binary_array).decode('utf-8') + obj = jsonutils.loads(json_str) + return obj + + +def wrap_envelope(obj, key, hex=True): + payload = encode_obj(obj) + hmc = get_hmac(payload, key, hex=hex) + envelope = payload + hmc + return envelope + + +def unwrap_envelope(envelope, key): + """A backward-compatible way to get data. + + We may still receive package from amphorae that are using digest() instead + of hexdigest() + """ + try: + return get_payload(envelope, key, hex=True) + except Exception: + return get_payload(envelope, key, hex=False) + + +def get_payload(envelope, key, hex=True): + len = hex_hash_len if hex else hash_len + payload = envelope[:-len] + expected_hmc = envelope[-len:] + calculated_hmc = get_hmac(payload, key, hex=hex) + if not hmac.compare_digest(expected_hmc, calculated_hmc): + LOG.warning( + 'calculated hmac(hex=%(hex)s): %(s1)s not equal to msg hmac: ' + '%(s2)s dropping packet', + { + 'hex': hex, + 's1': to_hex(calculated_hmc), + 's2': to_hex(expected_hmc) + } + ) + fmt = 'calculated hmac: {0} not equal to msg hmac: {1} dropping packet' + raise exceptions.InvalidHMACException(fmt.format( + to_hex(calculated_hmc), to_hex(expected_hmc))) + obj = decode_obj(payload) + return obj + + +def get_hmac(payload, key, hex=True): + """Get digest for the payload. + + The hex param is for backward compatibility, so the package data sent from + the existing amphorae can still be checked in the previous approach. + """ + hmc = hmac.new(key.encode("utf-8"), payload, hashlib.sha256) + return hmc.hexdigest().encode("utf-8") if hex else hmc.digest() diff --git a/octavia/amphorae/backends/utils/__init__.py b/octavia/amphorae/backends/utils/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/amphorae/backends/utils/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/amphorae/backends/utils/haproxy_query.py b/octavia/amphorae/backends/utils/haproxy_query.py new file mode 100644 index 0000000000..e2433add51 --- /dev/null +++ b/octavia/amphorae/backends/utils/haproxy_query.py @@ -0,0 +1,166 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import csv +import socket + +from oslo_log import log as logging + +from octavia.common import constants as consts +from octavia.common import utils as octavia_utils +from octavia.i18n import _ + +LOG = logging.getLogger(__name__) + + +class HAProxyQuery: + """Class used for querying the HAProxy statistics socket. + + The CSV output is defined in the HAProxy documentation: + + http://cbonte.github.io/haproxy-dconv/configuration-1.4.html#9 + """ + + def __init__(self, stats_socket): + """Initialize the class + + :param stats_socket: Path to the HAProxy statistics socket file. + """ + + self.socket = stats_socket + + def _query(self, query): + """Send the given query to the haproxy statistics socket. + + :returns: the output of a successful query as a string with trailing + newlines removed, or raise an Exception if the query fails. + """ + + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + + try: + sock.connect(self.socket) + except OSError as e: + raise Exception( + _("HAProxy '{0}' query failed.").format(query)) from e + + try: + sock.send(octavia_utils.b(query + '\n')) + data = '' + while True: + x = sock.recv(1024) + if not x: + break + data += x.decode('ascii') if ( + isinstance(x, bytes)) else x + return data.rstrip() + finally: + sock.close() + + def show_info(self): + """Get and parse output from 'show info' command.""" + results = self._query('show info') + + dict_results = {} + for r in results.split('\n'): + vals = r.split(":", 1) + dict_results[vals[0].strip()] = vals[1].strip() + return dict_results + + def show_stat(self, proxy_iid=-1, object_type=-1, server_id=-1): + """Get and parse output from 'show stat' command. + + :param proxy_iid: Proxy ID (column 27 in CSV output). -1 for all. + :param object_type: Select the type of dumpable object. Values can + be ORed. + -1 - everything + 1 - frontends + 2 - backends + 4 - servers + :param server_id: Server ID (column 28 in CSV output?), or -1 + for everything. + :returns: stats (split into an array by newline) + + """ + + results = self._query( + f'show stat {proxy_iid} {object_type} {server_id}') + list_results = results[2:].split('\n') + csv_reader = csv.DictReader(list_results) + stats_list = list(csv_reader) + # We don't want to report the internal prometheus proxy stats + # up to the control plane as it shouldn't be billed traffic + return [stat for stat in stats_list + if "prometheus" not in stat['pxname']] + + def get_pool_status(self): + """Get status for each server and the pool as a whole. + + :returns: pool data structure + {: { + 'uuid': , + 'status': 'UP'|'DOWN', + 'members': [: 'UP'|'DOWN'|'DRAIN'|'no check'] }} + """ + + results = self.show_stat(object_type=6) # servers + pool + + final_results = {} + for line in results: + # pxname: pool, svname: server_name, status: status + + # We don't want to report the internal prometheus proxy stats + # up to health manager as it shouldn't be billed traffic + if 'prometheus' in line['pxname']: + continue + + if line['pxname'] not in final_results: + final_results[line['pxname']] = {'members': {}} + + if line['svname'] == 'BACKEND': + # BACKEND describes a pool of servers in HAProxy + pool_id, listener_id = line['pxname'].split(':') + final_results[line['pxname']]['pool_uuid'] = pool_id + final_results[line['pxname']]['listener_uuid'] = listener_id + final_results[line['pxname']]['status'] = line['status'] + else: + # Due to a bug in some versions of HAProxy, DRAIN mode isn't + # calculated correctly, but we can spoof the correct + # value here. + if line['status'] == consts.UP and line['weight'] == '0': + line['status'] = consts.DRAIN + + final_results[line['pxname']]['members'][line['svname']] = ( + line['status']) + return final_results + + def save_state(self, state_file_path): + """Save haproxy connection state to a file. + + :param state_file_path: Absolute path to the state file + + :returns: bool (True if success, False otherwise) + """ + + try: + result = self._query('show servers state') + # No need for binary mode, the _query converts bytes to ascii. + with open(state_file_path, 'w', encoding='utf-8') as fh: + fh.write(result + "\n") + return True + except Exception as e: + # Catch any exception - may be socket issue, or write permission + # issue as well. + LOG.warning("Unable to save state: %r", e) + return False diff --git a/octavia/amphorae/backends/utils/interface.py b/octavia/amphorae/backends/utils/interface.py new file mode 100644 index 0000000000..fc029f8aa7 --- /dev/null +++ b/octavia/amphorae/backends/utils/interface.py @@ -0,0 +1,422 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import errno +import ipaddress +import os +import socket +import subprocess +import time + +from oslo_config import cfg +from oslo_log import log as logging +import pyroute2 +# pylint: disable=no-name-in-module +from pyroute2.netlink.rtnl import ifaddrmsg +# pylint: disable=no-name-in-module +from pyroute2.netlink.rtnl import rt_proto + +from octavia.amphorae.backends.utils import interface_file +from octavia.amphorae.backends.utils import nftable_utils +from octavia.common import constants as consts +from octavia.common import exceptions + +CONF = cfg.CONF + +LOG = logging.getLogger(__name__) + + +class InterfaceController: + ADD = 'add' + DELETE = 'delete' + SET = 'set' + FLUSH = 'flush' + + TENTATIVE_WAIT_INTERVAL = .2 + TENTATIVE_WAIT_TIMEOUT = 30 + + def interface_file_list(self): + net_dir = interface_file.InterfaceFile.get_directory() + + for f in os.listdir(net_dir): + for ext in interface_file.InterfaceFile.get_extensions(): + if f.endswith(ext): + yield os.path.join(net_dir, f) + + def list(self): + interfaces = {} + for f in self.interface_file_list(): + iface = interface_file.InterfaceFile.from_file(f) + interfaces[iface.name] = iface + return interfaces + + def _family(self, address): + return (socket.AF_INET6 + if ipaddress.ip_network(address, strict=False).version == 6 + else socket.AF_INET) + + def _ipr_command(self, method, *args, + retry_on_invalid_argument=False, + retry_interval=.2, + raise_on_error=True, + max_retries=20, + **kwargs): + + for dummy in range(max_retries + 1): + try: + method(*args, **kwargs) + break + except pyroute2.NetlinkError as e: + if e.code == errno.EINVAL and retry_on_invalid_argument: + LOG.debug("Retrying after %f sec.", retry_interval) + time.sleep(retry_interval) + continue + + if args: + command = args[0] + if command == self.ADD and e.code != errno.EEXIST: + args_str = ', '.join(str(a) for a in args) + kwargs_str = ', '.join( + f'{k}={v}' for k, v in kwargs.items() + ) + msg = (f"Cannot call {method.__name__} {command} " + f"with ({args_str}, {kwargs_str}): {e}") + if raise_on_error: + raise exceptions.AmphoraNetworkConfigException(msg) + LOG.error(msg) + return + else: + msg = "Cannot call {} {} (with {}) after {} retries.".format( + method.__name__, args, kwargs, max_retries) + if raise_on_error: + raise exceptions.AmphoraNetworkConfigException(msg) + LOG.error(msg) + + def _dhclient_up(self, interface_name): + cmd = ["/sbin/dhclient", + "-lf", + f"/var/lib/dhclient/dhclient-{interface_name}.leases", + "-pf", + f"/run/dhclient-{interface_name}.pid", + interface_name] + LOG.debug("Running '%s'", cmd) + subprocess.check_output(cmd, stderr=subprocess.STDOUT) + + def _dhclient_down(self, interface_name): + cmd = ["/sbin/dhclient", + "-r", + "-lf", + f"/var/lib/dhclient/dhclient-{interface_name}.leases", + "-pf", + f"/run/dhclient-{interface_name}.pid", + interface_name] + LOG.debug("Running '%s'", cmd) + subprocess.check_output(cmd, stderr=subprocess.STDOUT) + + def _ipv6auto_up(self, interface_name): + # Set values to enable SLAAC on interface_name + # accept_ra is set to 2 to accept router advertisements if forwarding + # is enabled on the interface + for key, value in (('accept_ra', 2), + ('autoconf', 1)): + cmd = ["/sbin/sysctl", + "-w", + f"net.ipv6.conf.{interface_name}.{key}={value}"] + LOG.debug("Running '%s'", cmd) + subprocess.check_output(cmd, stderr=subprocess.STDOUT) + + def _ipv6auto_down(self, interface_name): + for key, value in (('accept_ra', 0), + ('autoconf', 0)): + cmd = ["/sbin/sysctl", + "-w", + f"net.ipv6.conf.{interface_name}.{key}={value}"] + LOG.debug("Running '%s'", cmd) + subprocess.check_output(cmd, stderr=subprocess.STDOUT) + + def _wait_tentative(self, ipr, idx): + start = time.time() + while time.time() - start < self.TENTATIVE_WAIT_TIMEOUT: + addrs = ipr.get_addr(idx) + has_tentative = [ + True + for addr in addrs + if (addr[consts.FAMILY] == socket.AF_INET6 and + addr['flags'] & ifaddrmsg.IFA_F_TENTATIVE)] + if not has_tentative: + return + time.sleep(self.TENTATIVE_WAIT_INTERVAL) + LOG.warning("Some IPV6 addresses remain still in 'tentative' state " + "after %d seconds.", self.TENTATIVE_WAIT_TIMEOUT) + + def _normalize_ip_address(self, address): + if not address: + return None + ip_address = ipaddress.ip_address(address) + return ip_address.compressed + + def _normalize_ip_network(self, address): + if not address: + return None + ip_network = ipaddress.ip_network(address, strict=False) + return ip_network.compressed + + def up(self, interface): + LOG.info("Setting interface %s up", interface.name) + + if interface.is_sriov: + nftable_utils.write_nftable_rules_file(interface.name, []) + nftable_utils.load_nftables_file() + + with pyroute2.IPRoute() as ipr: + idx = ipr.link_lookup(ifname=interface.name)[0] + + # Workaround for https://github.com/PyCQA/pylint/issues/8497 + # pylint: disable=E1136, E1121 + link = ipr.get_links(idx)[0] + current_state = link.get(consts.STATE) + + if current_state == consts.IFACE_DOWN: + self._ipr_command(ipr.link, self.SET, index=idx, + state=consts.IFACE_UP, mtu=interface.mtu) + for address in interface.addresses: + if address.get(consts.DHCP): + self._dhclient_up(interface.name) + if address.get(consts.IPV6AUTO): + self._ipv6auto_up(interface.name) + + self._addresses_up(interface, ipr, idx) + self._routes_up(interface, ipr, idx) + # only the vip port updates the rules + if interface.if_type == consts.VIP: + self._rules_up(interface, ipr, idx) + + self._scripts_up(interface, current_state) + + def _addresses_up(self, interface, ipr, idx): + # Get existing addresses, this list is used to delete removed addresses + current_addresses = [] + for addr in ipr.get_addr(index=idx): + attrs = dict(addr['attrs']) + # Skip non-static (ex: dynamic) addresses + if not attrs['IFA_FLAGS'] & ifaddrmsg.IFA_F_PERMANENT: + continue + + key = (self._normalize_ip_address(attrs['IFA_ADDRESS']), + addr[consts.PREFIXLEN]) + current_addresses.append(key) + + # Add new addresses + for address in interface.addresses: + if (consts.ADDRESS not in address or + address.get(consts.DHCP) or + address.get(consts.IPV6AUTO)): + continue + key = (self._normalize_ip_address(address.get(consts.ADDRESS)), + address.get(consts.PREFIXLEN)) + if key in current_addresses: + current_addresses.remove(key) + elif address.get(consts.OCTAVIA_OWNED, True): + # By default all addresses are managed/owned by Octavia + address[consts.FAMILY] = self._family( + address[consts.ADDRESS]) + LOG.debug("%s: Adding address %s", interface.name, + address) + self._ipr_command(ipr.addr, self.ADD, index=idx, **address) + + self._wait_tentative(ipr, idx) + + # Remove unused addresses + for addr, prefixlen in current_addresses: + address = { + consts.ADDRESS: addr, + consts.PREFIXLEN: prefixlen, + consts.FAMILY: self._family(addr) + } + LOG.debug("%s: Deleting address %s", interface.name, + address) + self._ipr_command(ipr.addr, self.DELETE, index=idx, + **address) + + def _routes_up(self, interface, ipr, idx): + # Get existing routes, this list will be used to remove old/unused + # routes + current_routes = [] + for route in ipr.get_routes(oif=idx): + # We only consider 'static' routes (routes that are added by + # octavia-interface), we don't update kernel or ra routes. + if route['proto'] != rt_proto['static']: + continue + + attrs = dict(route['attrs']) + family = route[consts.FAMILY] + # Disabling B104: hardcoded_bind_all_interfaces + dst = attrs.get( + 'RTA_DST', + '0.0.0.0' if family == socket.AF_INET else '::') # nosec + + key = (f"{self._normalize_ip_address(dst)}/" + f"{route.get('dst_len', 0)}", + self._normalize_ip_address(attrs.get('RTA_GATEWAY')), + self._normalize_ip_address(attrs.get('RTA_PREFSRC')), + attrs.get('RTA_TABLE')) + current_routes.append(key) + + # Add new routes + for route in interface.routes: + key = (self._normalize_ip_network(route.get(consts.DST)), + self._normalize_ip_address(route.get(consts.GATEWAY)), + self._normalize_ip_address(route.get(consts.PREFSRC)), + route.get(consts.TABLE, 254)) + if key in current_routes: + # Route is already there, we want to keep it, remove it from + # the list of routes to delete + current_routes.remove(key) + else: + route[consts.FAMILY] = self._family(route[consts.DST]) + LOG.debug("%s: Adding route %s", interface.name, route) + # Set retry_on_invalid_argument=True because the interface + # might not be ready after setting its addresses + # Set raise_on_error to False, possible invalid + # (user-defined) routes from the subnet's host_routes will + # not break the script. + self._ipr_command(ipr.route, self.ADD, + retry_on_invalid_argument=True, + raise_on_error=False, + oif=idx, **route) + + # Delete unused routes (only 'static' routes are considered, we only + # delete routes we have previously added) + for r in current_routes: + route = {consts.DST: r[0], + consts.GATEWAY: r[1], + consts.PREFSRC: r[2], + consts.TABLE: r[3], + consts.FAMILY: self._family(r[0])} + + LOG.debug("%s: Deleting route %s", interface.name, route) + self._ipr_command(ipr.route, self.DELETE, + retry_on_invalid_argument=True, + raise_on_error=False, + oif=idx, **route) + + def _rules_up(self, interface, ipr, idx): + # Get existing rules + current_rules = [] + for rule in ipr.get_rules(): + attrs = dict(rule['attrs']) + if not attrs.get('FRA_SRC'): + continue + + # skip the rules defined by the kernel (FRA_PROTOCOL == 2) or by + # keepalived (FRA_PROTOCOL == 18) + # we only consider removing the rules that we have previously added + if attrs.get('FRA_PROTOCOL') in (2, 18): + continue + + key = (attrs.get('FRA_TABLE'), + self._normalize_ip_address(attrs.get('FRA_SRC')), + rule[consts.SRC_LEN]) + current_rules.append(key) + + # Add new rules + for rule in interface.rules: + key = (rule.get(consts.TABLE, 254), + self._normalize_ip_address(rule.get(consts.SRC)), + rule.get(consts.SRC_LEN)) + if key in current_rules: + current_rules.remove(key) + else: + rule[consts.FAMILY] = self._family(rule[consts.SRC]) + LOG.debug("%s: Adding rule %s", interface.name, rule) + self._ipr_command(ipr.rule, self.ADD, + retry_on_invalid_argument=True, + **rule) + + # Remove old rules + for r in current_rules: + rule = {consts.TABLE: r[0], + consts.SRC: r[1], + consts.SRC_LEN: r[2]} + if rule[consts.SRC]: + rule[consts.FAMILY] = self._family(rule[consts.SRC]) + LOG.debug("%s: Deleting rule %s", interface.name, rule) + self._ipr_command(ipr.rule, self.DELETE, + retry_on_invalid_argument=True, + **rule) + + def _scripts_up(self, interface, current_state): + for script in interface.scripts[consts.IFACE_UP]: + LOG.debug("%s: Running command '%s'", + interface.name, script[consts.COMMAND]) + subprocess.check_output(script[consts.COMMAND].split()) + + def down(self, interface): + LOG.info("Setting interface %s down", interface.name) + + for address in interface.addresses: + if address.get(consts.DHCP): + self._dhclient_down(interface.name) + if address.get(consts.IPV6AUTO): + self._ipv6auto_down(interface.name) + + with pyroute2.IPRoute() as ipr: + idx = ipr.link_lookup(ifname=interface.name)[0] + + # Workaround for https://github.com/PyCQA/pylint/issues/8497 + # pylint: disable=E1136, E1121 + link = ipr.get_links(idx)[0] + current_state = link.get(consts.STATE) + + if current_state == consts.IFACE_UP: + # only the vip port updates the rules + if interface.if_type == consts.VIP: + for rule in interface.rules: + rule[consts.FAMILY] = self._family(rule[consts.SRC]) + LOG.debug("%s: Deleting rule %s", interface.name, rule) + self._ipr_command(ipr.rule, self.DELETE, + raise_on_error=False, **rule) + + for route in interface.routes: + route[consts.FAMILY] = self._family(route[consts.DST]) + LOG.debug("%s: Deleting route %s", interface.name, route) + self._ipr_command(ipr.route, self.DELETE, + raise_on_error=False, oif=idx, **route) + + for address in interface.addresses: + if consts.ADDRESS not in address: + continue + address[consts.FAMILY] = self._family( + address[consts.ADDRESS]) + LOG.debug("%s: Deleting address %s", + interface.name, address) + self._ipr_command(ipr.addr, self.DELETE, + raise_on_error=False, + index=idx, **address) + + self._ipr_command(ipr.flush_addr, raise_on_error=False, + index=idx) + + self._ipr_command(ipr.link, self.SET, raise_on_error=False, + index=idx, state=consts.IFACE_DOWN) + + if current_state == consts.IFACE_UP: + for script in interface.scripts[consts.IFACE_DOWN]: + LOG.debug("%s: Running command '%s'", + interface.name, script[consts.COMMAND]) + try: + subprocess.check_output(script[consts.COMMAND].split()) + except Exception as e: + LOG.error("Error while running command '%s' on %s: %s", + script[consts.COMMAND], interface.name, e) diff --git a/octavia/amphorae/backends/utils/interface_file.py b/octavia/amphorae/backends/utils/interface_file.py new file mode 100644 index 0000000000..674281e9ff --- /dev/null +++ b/octavia/amphorae/backends/utils/interface_file.py @@ -0,0 +1,297 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import ipaddress +import json +import os +import stat + +from oslo_config import cfg + +from octavia.common import constants as consts + +CONF = cfg.CONF + + +class InterfaceFile: + def __init__(self, name, if_type, mtu=None, addresses=None, + routes=None, rules=None, scripts=None, is_sriov=False): + self.name = name + self.if_type = if_type + self.mtu = mtu + self.addresses = addresses or [] + self.routes = routes or [] + self.rules = rules or [] + self.scripts = scripts or { + consts.IFACE_UP: [], + consts.IFACE_DOWN: [] + } + self.is_sriov = is_sriov + + @classmethod + def get_extensions(cls): + return [".json"] + + @classmethod + def load(cls, fp): + return json.load(fp) + + @classmethod + def dump(cls, obj): + return json.dumps(obj) + + @classmethod + def from_file(cls, filename): + with open(filename, encoding='utf-8') as fp: + config = cls.load(fp) + + return InterfaceFile(**config) + + @classmethod + def get_directory(cls): + return (CONF.amphora_agent.agent_server_network_dir or + consts.AMP_NET_DIR_TEMPLATE) + + @classmethod + def get_host_routes(cls, routes, **kwargs): + host_routes = [] + if routes: + for hr in routes: + route = { + consts.DST: hr['destination'], + consts.GATEWAY: hr['nexthop'], + consts.FLAGS: [consts.ONLINK] + } + route.update(kwargs) + host_routes.append(route) + return host_routes + + def write(self): + mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC + + net_dir = self.get_directory() + + try: + os.makedirs(net_dir) + except OSError: + pass + + interface_file = f"{self.name}.json" + + with os.fdopen(os.open(os.path.join(net_dir, interface_file), + flags, mode), 'w') as fp: + interface = { + consts.NAME: self.name, + consts.IF_TYPE: self.if_type, + consts.ADDRESSES: self.addresses, + consts.ROUTES: self.routes, + consts.RULES: self.rules, + consts.SCRIPTS: self.scripts, + consts.IS_SRIOV: self.is_sriov + } + if self.mtu: + interface[consts.MTU] = self.mtu + fp.write(self.dump(interface)) + + +class VIPInterfaceFile(InterfaceFile): + def __init__(self, name, mtu, vips, vrrp_info, fixed_ips, topology, + is_sriov=False): + + super().__init__(name, if_type=consts.VIP, mtu=mtu, is_sriov=is_sriov) + + has_ipv4 = any(vip['ip_version'] == 4 for vip in vips) + has_ipv6 = any(vip['ip_version'] == 6 for vip in vips) + + if vrrp_info: + self.addresses.append({ + consts.ADDRESS: vrrp_info['ip'], + consts.PREFIXLEN: vrrp_info['prefixlen'] + }) + else: + if has_ipv4: + self.addresses.append({ + consts.DHCP: True + }) + if has_ipv6: + self.addresses.append({ + consts.IPV6AUTO: True + }) + + ip_versions = set() + + for vip in vips: + gateway = vip.get('gateway') + ip_version = vip['ip_version'] + ip_versions.add(ip_version) + + if gateway: + # Add default routes if there's a gateway + self.routes.append({ + consts.DST: ( + "::/0" if ip_version == 6 else "0.0.0.0/0"), + consts.GATEWAY: gateway, + consts.FLAGS: [consts.ONLINK] + }) + if topology != consts.TOPOLOGY_ACTIVE_STANDBY: + self.routes.append({ + consts.DST: ( + "::/0" if ip_version == 6 else "0.0.0.0/0"), + consts.GATEWAY: gateway, + consts.FLAGS: [consts.ONLINK], + consts.TABLE: 1, + }) + + # In ACTIVE_STANDBY topology, keepalived configures the VIP + # address. Keep track of it in the interface file but mark it with + # a special flag so the amphora-interface would not add/delete + # keepalived-maintained things. + self.addresses.append({ + consts.ADDRESS: vip['ip_address'], + consts.PREFIXLEN: 128 if ip_version == 6 else 32, + # OCTAVIA_OWNED = False when this address is managed by another + # tool (keepalived) + consts.OCTAVIA_OWNED: ( + topology != consts.TOPOLOGY_ACTIVE_STANDBY) + }) + + vip_cidr = ipaddress.ip_network( + f"{vip['ip_address']}/{vip['prefixlen']}", + strict=False) + self.routes.append({ + consts.DST: vip_cidr.exploded, + consts.SCOPE: 'link' + }) + + if topology != consts.TOPOLOGY_ACTIVE_STANDBY: + self.routes.append({ + consts.DST: vip_cidr.exploded, + consts.PREFSRC: vip['ip_address'], + consts.SCOPE: 'link', + consts.TABLE: 1 + }) + self.rules.append({ + consts.SRC: vip['ip_address'], + consts.SRC_LEN: 128 if ip_version == 6 else 32, + consts.TABLE: 1 + }) + + self.routes.extend(self.get_host_routes(vip['host_routes'])) + self.routes.extend(self.get_host_routes(vip['host_routes'], + table=1)) + + for fixed_ip in fixed_ips or (): + ip_addr = fixed_ip['ip_address'] + cidr = fixed_ip['subnet_cidr'] + ip = ipaddress.ip_address(ip_addr) + network = ipaddress.ip_network(cidr) + prefixlen = network.prefixlen + self.addresses.append({ + consts.ADDRESS: fixed_ip['ip_address'], + consts.PREFIXLEN: prefixlen, + }) + + ip_versions.add(ip.version) + + gateway = fixed_ip.get('gateway') + if gateway: + # Add default routes if there's a gateway + self.routes.append({ + consts.DST: ( + "::/0" if ip.version == 6 else "0.0.0.0/0"), + consts.GATEWAY: gateway, + consts.FLAGS: [consts.ONLINK] + }) + if topology != consts.TOPOLOGY_ACTIVE_STANDBY: + self.routes.append({ + consts.DST: ( + "::/0" if ip.version == 6 else "0.0.0.0/0"), + consts.GATEWAY: gateway, + consts.FLAGS: [consts.ONLINK], + consts.TABLE: 1, + }) + + host_routes = self.get_host_routes( + fixed_ip.get('host_routes', [])) + self.routes.extend(host_routes) + + if is_sriov: + sriov_param = ' sriov' + else: + sriov_param = '' + + for ip_v in ip_versions: + self.scripts[consts.IFACE_UP].append({ + consts.COMMAND: ( + "/usr/local/bin/lvs-masquerade.sh add {} {}{}".format( + 'ipv6' if ip_v == 6 else 'ipv4', name, sriov_param)) + }) + self.scripts[consts.IFACE_DOWN].append({ + consts.COMMAND: ( + "/usr/local/bin/lvs-masquerade.sh delete {} {}{}".format( + 'ipv6' if ip_v == 6 else 'ipv4', name, sriov_param)) + }) + + +class PortInterfaceFile(InterfaceFile): + def __init__(self, name, mtu, fixed_ips, is_sriov=False): + super().__init__(name, if_type=consts.BACKEND, mtu=mtu, + is_sriov=is_sriov) + + if fixed_ips: + ip_versions = set() + + for fixed_ip in fixed_ips: + ip_addr = fixed_ip['ip_address'] + cidr = fixed_ip['subnet_cidr'] + ip = ipaddress.ip_address(ip_addr) + network = ipaddress.ip_network(cidr) + prefixlen = network.prefixlen + self.addresses.append({ + consts.ADDRESS: fixed_ip['ip_address'], + consts.PREFIXLEN: prefixlen, + }) + + ip_versions.add(ip.version) + + host_routes = self.get_host_routes( + fixed_ip.get('host_routes', [])) + self.routes.extend(host_routes) + else: + ip_versions = {4, 6} + + self.addresses.append({ + consts.DHCP: True, + consts.IPV6AUTO: True + }) + + if is_sriov: + sriov_param = ' sriov' + else: + sriov_param = '' + + for ip_version in ip_versions: + self.scripts[consts.IFACE_UP].append({ + consts.COMMAND: ( + "/usr/local/bin/lvs-masquerade.sh add {} {}{}".format( + 'ipv6' if ip_version == 6 else 'ipv4', name, + sriov_param)) + }) + self.scripts[consts.IFACE_DOWN].append({ + consts.COMMAND: ( + "/usr/local/bin/lvs-masquerade.sh delete {} {}{}".format( + 'ipv6' if ip_version == 6 else 'ipv4', name, + sriov_param)) + }) diff --git a/octavia/amphorae/backends/utils/ip_advertisement.py b/octavia/amphorae/backends/utils/ip_advertisement.py new file mode 100644 index 0000000000..723dae1377 --- /dev/null +++ b/octavia/amphorae/backends/utils/ip_advertisement.py @@ -0,0 +1,183 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import fcntl +import socket +import struct + +from oslo_log import log as logging + +from octavia.amphorae.backends.utils import network_namespace +from octavia.common import constants +from octavia.common import utils as common_utils + +LOG = logging.getLogger(__name__) + + +def garp(interface, ip_address, net_ns=None): + """Sends a gratuitous ARP for ip_address on the interface. + + :param interface: The interface name to send the GARP on. + :param ip_address: The IP address to advertise in the GARP. + :param net_ns: The network namespace to send the GARP from. + :returns: None + """ + ARP_ETHERTYPE = 0x0806 + BROADCAST_MAC = b'\xff\xff\xff\xff\xff\xff' + + # Get a socket, optionally inside a network namespace + garp_socket = None + if net_ns: + with network_namespace.NetworkNamespace(net_ns): + garp_socket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW) + else: + garp_socket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW) + + # Bind the socket with the ARP ethertype protocol + garp_socket.bind((interface, ARP_ETHERTYPE)) + + # Get the MAC address of the interface + source_mac = garp_socket.getsockname()[4] + + garp_msg = [ + struct.pack('!h', 1), # Hardware type ethernet + struct.pack('!h', 0x0800), # Protocol type IPv4 + struct.pack('!B', 6), # Hardware size + struct.pack('!B', 4), # Protocol size + struct.pack('!h', 1), # Opcode request + source_mac, # Sender MAC address + socket.inet_aton(ip_address), # Sender IP address + BROADCAST_MAC, # Target MAC address + socket.inet_aton(ip_address)] # Target IP address + + garp_ethernet = [ + BROADCAST_MAC, # Ethernet destination + source_mac, # Ethernet source + struct.pack('!h', ARP_ETHERTYPE), # Ethernet type + b''.join(garp_msg)] # The GARP message + + garp_socket.send(b''.join(garp_ethernet)) + garp_socket.close() + + +def calculate_icmpv6_checksum(packet): + """Calculate the ICMPv6 checksum for a packet. + + :param packet: The packet bytes to checksum. + :returns: The checksum integer. + """ + total = 0 + + # Add up 16-bit words + num_words = len(packet) // 2 + for chunk in struct.unpack(f"!{num_words}H", packet[0:num_words * 2]): + total += chunk + + # Add any left over byte + if len(packet) % 2: + total += packet[-1] << 8 + + # Fold 32-bits into 16-bits + total = (total >> 16) + (total & 0xffff) + total += total >> 16 + return ~total + 0x10000 & 0xffff + + +def neighbor_advertisement(interface, ip_address, net_ns=None): + """Sends a unsolicited neighbor advertisement for an ip on the interface. + + :param interface: The interface name to send the GARP on. + :param ip_address: The IP address to advertise in the GARP. + :param net_ns: The network namespace to send the GARP from. + :returns: None + """ + ALL_NODES_ADDR = 'ff02::1' + SIOCGIFHWADDR = 0x8927 + + # Get a socket, optionally inside a network namespace + na_socket = None + if net_ns: + with network_namespace.NetworkNamespace(net_ns): + na_socket = socket.socket( + socket.AF_INET6, socket.SOCK_RAW, + socket.getprotobyname(constants.IPV6_ICMP)) + else: + na_socket = socket.socket(socket.AF_INET6, socket.SOCK_RAW, + socket.getprotobyname(constants.IPV6_ICMP)) + + # Per RFC 4861 section 4.4, the hop limit should be 255 + na_socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, 255) + + # Bind the socket with the source address + na_socket.bind((ip_address, 0)) + + # Get the byte representation of the MAC address of the interface + # Note: You can't use getsockname() to get the MAC on this type of socket + source_mac = fcntl.ioctl( + na_socket.fileno(), SIOCGIFHWADDR, struct.pack( + '256s', bytes(interface, 'utf-8')))[18:24] + + # Get the byte representation of the source IP address + source_ip_bytes = socket.inet_pton(socket.AF_INET6, ip_address) + + icmpv6_na_msg_prefix = [ + struct.pack('!B', 136), # ICMP Type Neighbor Advertisement + struct.pack('!B', 0)] # ICMP Code + icmpv6_na_msg_postfix = [ + struct.pack('!I', 0xa0000000), # Flags (Router, Override) + source_ip_bytes, # Target address + struct.pack('!B', 2), # ICMPv6 option type target link-layer address + struct.pack('!B', 1), # ICMPv6 option length + source_mac] # ICMPv6 option link-layer address + + # Calculate the ICMPv6 checksum + icmpv6_pseudo_header = [ + source_ip_bytes, # Source IP address + socket.inet_pton(socket.AF_INET6, ALL_NODES_ADDR), # Destination IP + struct.pack('!I', 58), # IPv6 next header (ICMPv6) + struct.pack('!h', 32)] # IPv6 payload length + icmpv6_tmp_chksum = struct.pack('!H', 0) # Checksum->zeros for calculation + tmp_chksum_msg = b''.join(icmpv6_pseudo_header + icmpv6_na_msg_prefix + + [icmpv6_tmp_chksum] + icmpv6_pseudo_header) + checksum = struct.pack('!H', calculate_icmpv6_checksum(tmp_chksum_msg)) + + # Build the ICMPv6 unsolicitated neighbor advertisement + icmpv6_msg = b''.join(icmpv6_na_msg_prefix + [checksum] + + icmpv6_na_msg_postfix) + + na_socket.sendto(icmpv6_msg, (ALL_NODES_ADDR, 0, 0, 0)) + na_socket.close() + + +def send_ip_advertisement(interface, ip_address, net_ns=None): + """Send an address advertisement. + + This method will send either GARP (IPv4) or neighbor advertisements (IPv6) + for the ip address specified. + + :param interface: The interface name to send the advertisement on. + :param ip_address: The IP address to advertise. + :param net_ns: The network namespace to send the advertisement from. + :returns: None + """ + try: + if common_utils.is_ipv4(ip_address): + garp(interface, ip_address, net_ns) + elif common_utils.is_ipv6(ip_address): + neighbor_advertisement(interface, ip_address, net_ns) + else: + LOG.error('Unknown IP version for address: "%s". Skipping', + ip_address) + except Exception as e: + LOG.warning('Unable to send address advertisement for address: "%s", ' + 'error: %s. Skipping', ip_address, str(e)) diff --git a/octavia/amphorae/backends/utils/keepalivedlvs_query.py b/octavia/amphorae/backends/utils/keepalivedlvs_query.py new file mode 100644 index 0000000000..70efc12fbc --- /dev/null +++ b/octavia/amphorae/backends/utils/keepalivedlvs_query.py @@ -0,0 +1,485 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import ipaddress +import os +import re +import subprocess + +from octavia_lib.common import constants as lib_consts +from oslo_log import log as logging + +from octavia.amphorae.backends.agent.api_server import util +from octavia.common import constants + +LOG = logging.getLogger(__name__) +KERNEL_LVS_PATH = '/proc/net/ip_vs' +KERNEL_LVS_STATS_PATH = '/proc/net/ip_vs_stats' +LVS_KEY_REGEX = re.compile(r"RemoteAddress:Port\s+(.*$)") +V4_RS_VALUE_REGEX = re.compile(r"(\w{8}:\w{4})\s+(.*$)") +V4_HEX_IP_REGEX = re.compile(r"(\w{2})(\w{2})(\w{2})(\w{2})") +V6_RS_VALUE_REGEX = re.compile(r"(\[[\[\w{4}:]+\b\]:\w{4})\s+(.*$)") + +NS_REGEX = re.compile(r"net_namespace\s(\w+-\w+)") +VS_ADDRESS_REGEX = re.compile(r"virtual_server_group .* \{\n" + r"\s+([a-f\d\.:]+)\s(\d{1,5})\n") +RS_ADDRESS_REGEX = re.compile(r"real_server\s([a-f\d\.:]+)\s(\d{1,5})") +CONFIG_COMMENT_REGEX = re.compile( + r"#\sConfiguration\sfor\s(\w+)\s(\w{8}-\w{4}-\w{4}-\w{4}-\w{12})") +DISABLED_CONFIG_COMMENT_REGEX = re.compile( + r"#\s(\w+)\s(\w{8}-\w{4}-\w{4}-\w{4}-\w{12}) is disabled") + +CHECKER_REGEX = re.compile(r"(MISC_CHECK|HTTP_GET|TCP_CHECK)") + + +def read_kernel_file(ns_name, file_path): + cmd = f"ip netns exec {ns_name} cat {file_path}" + try: + output = subprocess.check_output(cmd.split(), + stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + LOG.error("Failed to get kernel lvs status in ns %(ns_name)s " + "%(kernel_lvs_path)s: %(err)s %(out)s", + {'ns_name': ns_name, 'kernel_lvs_path': file_path, + 'err': e, 'out': e.output}) + raise e + # py3 treat the output as bytes type. + if isinstance(output, bytes): + output = output.decode('utf-8') + return output + + +def get_listener_realserver_mapping(ns_name, listener_ip_ports, + health_monitor_enabled): + # returned result: + # actual_member_result = {'rs_ip:listened_port': { + # 'status': 'UP', + # 'Forward': forward_type, + # 'Weight': 5, + # 'ActiveConn': 0, + # 'InActConn': 0 + # }} + idex_list = [] + for listener_ip_port in listener_ip_ports: + listener_ip, listener_port = listener_ip_port.rsplit(':', 1) + ip_obj = ipaddress.ip_address(listener_ip.strip('[]')) + output = read_kernel_file(ns_name, KERNEL_LVS_PATH).split('\n') + if ip_obj.version == 4: + ip_to_hex_format = f"{ip_obj._ip:08X}" + else: + ip_to_hex_format = r'\[' + ip_obj.exploded + r'\]' + port_hex_format = f"{int(listener_port):04X}" + idex_list.append(ip_to_hex_format + ':' + port_hex_format) + idex = f"({'|'.join(idex_list)})" + + if health_monitor_enabled: + member_status = constants.UP + else: + member_status = constants.NO_CHECK + + actual_member_result = {} + find_target_block = False + result_keys = [] + for line in output: + if 'RemoteAddress:Port' in line: + result_keys = re.split(r'\s+', + LVS_KEY_REGEX.findall(line)[0].strip()) + elif (line.startswith(constants.PROTOCOL_UDP) or + line.startswith(lib_consts.PROTOCOL_SCTP)): + find_target_block = re.match(r'^(UDP|SCTP)\s+%s\s+\w+' % idex, + line) is not None + elif find_target_block and line: + rs_is_ipv4 = True + all_values = V4_RS_VALUE_REGEX.findall(line) + # If can not get all_values with ipv4 regex, then this line must be + # a ipv6 real server record. + if not all_values: + all_values = V6_RS_VALUE_REGEX.findall(line) + rs_is_ipv4 = False + + all_values = all_values[0] + ip_port = all_values[0] + result_values = re.split(r"\s+", all_values[1].strip()) + member_ip, member_port = ip_port.rsplit(':', 1) + port_string = str(int(member_port, 16)) + if rs_is_ipv4: + ip_string = ipaddress.ip_address(int(member_ip, 16)).compressed + member_ip_port_string = ip_string + ':' + port_string + else: + ip_string = ipaddress.ip_address( + member_ip.strip('[]')).compressed + member_ip_port_string = '[' + ip_string + ']:' + port_string + result_key_count = len(result_keys) + for index in range(result_key_count): + if member_ip_port_string not in actual_member_result: + actual_member_result[ + member_ip_port_string] = {'status': member_status, + result_keys[index]: + result_values[index]} + else: + # The other values include the weight + actual_member_result[ + member_ip_port_string][ + result_keys[index]] = result_values[index] + continue + + return actual_member_result + + +def get_lvs_listener_resource_ipports_nsname(listener_id): + # resource_ipport_mapping = {'Listener': {'id': listener-id, + # 'ipports': [ipport1, ipport2]}, + # 'Pool': {'id': pool-id}, + # 'Members': [{'id': member-id-1, + # 'ipport': ipport}, + # {'id': member-id-2, + # 'ipport': ipport}], + # 'HealthMonitor': {'id': healthmonitor-id}} + resource_ipport_mapping = {} + with open(util.keepalived_lvs_cfg_path(listener_id), + encoding='utf-8') as f: + cfg = f.read() + + ret = VS_ADDRESS_REGEX.findall(cfg) + + def _escape_ip(ip): + ret = ipaddress.ip_address(ip) + if ret.version == 6: + return "[" + ret.compressed + "]" + return ret.compressed + + listener_ip_ports = [ + _escape_ip(ip_port[0]) + ":" + ip_port[1] + for ip_port in ret + ] + + ns_name = NS_REGEX.findall(cfg)[0] + + disabled_resource_ids = DISABLED_CONFIG_COMMENT_REGEX.findall(cfg) + + listener_disabled = any(True + for resource in disabled_resource_ids + if resource[0] == 'Listener') + if listener_disabled: + return None, ns_name + + if not listener_ip_ports: + # If not get listener_ip_port from the lvs config file, + # that means the listener's default pool have no enabled member + # yet. But at this moment, we can get listener_id and ns_name, so + # for this function, we will just return ns_name + return resource_ipport_mapping, ns_name + + cfg_line = cfg.split('\n') + rs_ip_port_list = [] + for line in cfg_line: + if 'real_server' in line: + res = RS_ADDRESS_REGEX.findall(line) + rs_ip_port_list.append(res[0]) + + resource_type_ids = CONFIG_COMMENT_REGEX.findall(cfg) + + for resource_type, resource_id in resource_type_ids: + value = {'id': resource_id} + if resource_type == 'Member': + resource_type = f'{resource_type}s' + if resource_type not in resource_ipport_mapping: + value = [value] + if resource_type not in resource_ipport_mapping: + resource_ipport_mapping[resource_type] = value + elif resource_type == 'Members': + resource_ipport_mapping[resource_type].append(value) + + disabled_member_ids = [ + resource[1] + for resource in disabled_resource_ids + if resource[0] == 'Member' + ] + + resource_type = 'Members' + for member_id in disabled_member_ids: + value = {'id': member_id, + 'ipport': None} + if resource_type not in resource_ipport_mapping: + resource_ipport_mapping[resource_type] = [] + resource_ipport_mapping[resource_type].append(value) + + if rs_ip_port_list: + rs_ip_port_count = len(rs_ip_port_list) + for index in range(rs_ip_port_count): + member_ip = ipaddress.ip_address( + rs_ip_port_list[index][0]) + if member_ip.version == 6: + rs_ip_port_list[index] = ( + '[' + member_ip.compressed + ']', + rs_ip_port_list[index][1]) + resource_ipport_mapping['Members'][index]['ipport'] = ( + rs_ip_port_list[index][0] + ':' + + rs_ip_port_list[index][1]) + + resource_ipport_mapping['Listener']['ipports'] = ( + listener_ip_ports) + + return resource_ipport_mapping, ns_name + + +def get_lvs_listener_pool_status(listener_id): + (resource_ipport_mapping, + ns_name) = get_lvs_listener_resource_ipports_nsname(listener_id) + if 'Pool' not in resource_ipport_mapping: + return {} + if 'Members' not in resource_ipport_mapping: + return {'lvs': { + 'uuid': resource_ipport_mapping['Pool']['id'], + 'status': constants.UP, + 'members': {} + }} + + config_path = util.keepalived_lvs_cfg_path(listener_id) + pids_pathes = util.keepalived_lvs_pids_path(listener_id) + + config_stat = os.stat(config_path) + check_pid_stat = os.stat(pids_pathes[2]) + + # Indicates that keepalived configuration has been updated but the service + # has yet to be restarted. + # NOTE: It only works if we are doing a RESTART on configuration change, + # Iaa34db6cb1dfed98e96a585c5d105e263c7efa65 forces a RESTART instead of a + # RELOAD, we need to be careful if we want to switch back to RELOAD after + # updating to a recent keepalived release. + restarting = config_stat.st_mtime > check_pid_stat.st_mtime + + with open(util.keepalived_lvs_cfg_path(listener_id), + encoding='utf-8') as f: + cfg = f.read() + hm_enabled = len(CHECKER_REGEX.findall(cfg)) > 0 + + realserver_result = get_listener_realserver_mapping( + ns_name, resource_ipport_mapping['Listener']['ipports'], + hm_enabled) + pool_status = constants.UP + member_results = {} + if realserver_result: + member_ip_port_list = [ + member['ipport'] for member in resource_ipport_mapping['Members']] + down_member_ip_port_set = set( + member_ip_port_list) - set(list(realserver_result.keys())) + + for member_ip_port in member_ip_port_list: + member_id = None + for member in resource_ipport_mapping['Members']: + if member['ipport'] == member_ip_port: + member_id = member['id'] + if member_ip_port is None: + status = constants.MAINT + elif member_ip_port in down_member_ip_port_set: + status = ( + constants.RESTARTING if restarting else constants.DOWN) + elif int(realserver_result[member_ip_port]['Weight']) == 0: + status = constants.DRAIN + else: + status = realserver_result[member_ip_port]['status'] + + if member_id: + member_results[member_id] = status + else: + if hm_enabled: + pool_status = constants.DOWN + + for member in resource_ipport_mapping['Members']: + if member['ipport'] is None: + member_results[member['id']] = constants.MAINT + elif hm_enabled: + member_results[member['id']] = ( + constants.RESTARTING if restarting else constants.DOWN) + else: + member_results[member['id']] = constants.NO_CHECK + + return { + 'lvs': + { + 'uuid': resource_ipport_mapping['Pool']['id'], + 'status': pool_status, + 'members': member_results + } + } + + +def get_ipvsadm_info(ns_name, is_stats_cmd=False): + cmd_list = ['ip', 'netns', 'exec', ns_name, 'ipvsadm', '-Ln'] + # use --exact to ensure output is integer only + if is_stats_cmd: + cmd_list += ['--stats', '--exact'] + output = subprocess.check_output(cmd_list, stderr=subprocess.STDOUT) + if isinstance(output, bytes): + output = output.decode('utf-8') + output = output.split('\n') + fields = [] + # mapping = {'listeneripport': {'Linstener': vs_values, + # 'members': [rs_values1, rs_values2]}} + last_key = None + value_mapping = {} + output_line_num = len(output) + + def split_line(line): + return re.sub(r'\s+', ' ', line.strip()).split(' ') + for line_num in range(output_line_num): + # ipvsadm -Ln + if 'Flags' in output[line_num]: + fields = split_line(output[line_num]) + elif fields and 'Flags' in fields and fields.index('Flags') == len( + fields) - 1: + fields.extend(split_line(output[line_num])) + # ipvsadm -Ln --stats + elif 'Prot' in output[line_num]: + fields = split_line(output[line_num]) + elif 'RemoteAddress' in output[line_num]: + start = fields.index('LocalAddress:Port') + 1 + temp_fields = fields[start:] + fields.extend(split_line(output[line_num])) + fields.extend(temp_fields) + # here we get the all fields + elif (constants.PROTOCOL_UDP in output[line_num] or + lib_consts.PROTOCOL_SCTP in output[line_num]): + # if UDP/TCP in this line, we can know this line is + # VS configuration. + vs_values = split_line(output[line_num]) + for value in vs_values: + if ':' in value: + value_mapping[value] = {'Listener': vs_values, + 'Members': []} + last_key = value + break + # here the line must be a RS which belongs to a VS + elif '->' in output[line_num] and last_key: + rs_values = split_line(output[line_num]) + rs_values.remove('->') + value_mapping[last_key]['Members'].append(rs_values) + + index = fields.index('->') + vs_fields = fields[:index] + if 'Flags' in vs_fields: + vs_fields.remove('Flags') + rs_fields = fields[index + 1:] + for key in list(value_mapping.keys()): + value_mapping[key]['Listener'] = list( + zip(vs_fields, value_mapping[key]['Listener'])) + member_res = [] + for member_value in value_mapping[key]['Members']: + member_res.append(list(zip(rs_fields, member_value))) + value_mapping[key]['Members'] = member_res + + return value_mapping + + +def get_lvs_listeners_stats(): + lvs_listener_ids = util.get_lvs_listeners() + need_check_listener_ids = [ + listener_id for listener_id in lvs_listener_ids + if util.is_lvs_listener_running(listener_id)] + ipport_mapping = {} + listener_stats_res = {} + for check_listener_id in need_check_listener_ids: + # resource_ipport_mapping = {'Listener': {'id': listener-id, + # 'ipport': ipport}, + # 'Pool': {'id': pool-id}, + # 'Members': [{'id': member-id-1, + # 'ipport': ipport}, + # {'id': member-id-2, + # 'ipport': ipport}], + # 'HealthMonitor': {'id': healthmonitor-id}} + resource_ipport_mapping, ns_name = ( + get_lvs_listener_resource_ipports_nsname(check_listener_id)) + + # Listener is disabled, we don't need to send an update + if resource_ipport_mapping is None: + continue + + # Since we found the keepalived running, acknowledge the listener + # in the heartbeat. If this listener has a pool and members, + # the stats will be updated later in the code flow. + listener_stats_res.update({ + check_listener_id: { + 'stats': { + 'bout': 0, + 'bin': 0, + 'scur': 0, + 'stot': 0, + 'ereq': 0}, + 'status': constants.OPEN}}) + + # If we can not read the lvs configuration from file, that means + # the pool of this listener may own zero enabled member, but the + # keepalived process is running. So we need to skip it. + if not resource_ipport_mapping: + continue + ipport_mapping.update({check_listener_id: resource_ipport_mapping}) + + # So here, if we can not get any ipport_mapping, + # we do nothing, just return + if not ipport_mapping: + return listener_stats_res + + # contains bout, bin, scur, stot, ereq, status + # bout(OutBytes), bin(InBytes), stot(Conns) from cmd ipvsadm -Ln --stats + # scur(ActiveConn) from cmd ipvsadm -Ln + # status, can see configuration in any cmd, treat it as OPEN + # ereq is still 0, as UDP case does not support it. + scur_res = get_ipvsadm_info(constants.AMPHORA_NAMESPACE) + stats_res = get_ipvsadm_info(constants.AMPHORA_NAMESPACE, + is_stats_cmd=True) + for listener_id, ipport in ipport_mapping.items(): + listener_ipports = ipport['Listener']['ipports'] + # This would be in Error, wait for the next loop to sync for the + # listener at this moment. Also this is for skip the case no enabled + # member in UDP listener, so we don't check it for failover. + scur_found = stats_found = False + for listener_ipport in listener_ipports: + if listener_ipport in scur_res: + scur_found = True + if listener_ipport in stats_res: + stats_found = True + if not scur_found or not stats_found: + continue + + scur, bout, bin, stot, ereq = 0, 0, 0, 0, 0 + # As all results contain this listener, so its status should be OPEN + status = constants.OPEN + # Get scur + for listener_ipport in listener_ipports: + if listener_ipport not in scur_res: + continue + for m in scur_res[listener_ipport]['Members']: + for item in m: + if item[0] == 'ActiveConn': + scur += int(item[1]) + + # Get bout, bin, stot + for item in stats_res[listener_ipport]['Listener']: + if item[0] == 'Conns': + stot += int(item[1]) + elif item[0] == 'OutBytes': + bout += int(item[1]) + elif item[0] == 'InBytes': + bin += int(item[1]) + + listener_stats_res.update({ + listener_id: { + 'stats': { + 'bout': bout, + 'bin': bin, + 'scur': scur, + 'stot': stot, + 'ereq': ereq}, + 'status': status}}) + + return listener_stats_res diff --git a/octavia/amphorae/backends/utils/network_namespace.py b/octavia/amphorae/backends/utils/network_namespace.py new file mode 100644 index 0000000000..8b5e6b1e58 --- /dev/null +++ b/octavia/amphorae/backends/utils/network_namespace.py @@ -0,0 +1,50 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import ctypes +import os + + +class NetworkNamespace: + """A network namespace context manager. + + Runs wrapped code inside the specified network namespace. + + :param netns: The network namespace name to enter. + """ + # from linux/sched.h - We want to enter a network namespace + CLONE_NEWNET = 0x40000000 + + @staticmethod + def _error_handler(result, func, arguments): + if result == -1: + errno = ctypes.get_errno() + raise OSError(errno, os.strerror(errno)) + + def __init__(self, netns): + self.current_netns = f'/proc/{os.getpid()}/ns/net' + self.target_netns = f'/var/run/netns/{netns}' + # reference: man setns(2) + self.set_netns = ctypes.CDLL('libc.so.6', use_errno=True).setns + self.set_netns.errcheck = self._error_handler + + def __enter__(self): + # Save the current network namespace + self.current_netns_fd = open(self.current_netns, encoding='utf-8') + with open(self.target_netns, encoding='utf-8') as fd: + self.set_netns(fd.fileno(), self.CLONE_NEWNET) + + def __exit__(self, *args): + # Return to the previous network namespace + self.set_netns(self.current_netns_fd.fileno(), self.CLONE_NEWNET) + self.current_netns_fd.close() diff --git a/octavia/amphorae/backends/utils/network_utils.py b/octavia/amphorae/backends/utils/network_utils.py new file mode 100644 index 0000000000..6311b695e6 --- /dev/null +++ b/octavia/amphorae/backends/utils/network_utils.py @@ -0,0 +1,84 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import ipaddress + +import pyroute2 + +from octavia.common import constants as consts +from octavia.common import exceptions + + +def _find_interface(ip_address, rtnl_api, normalized_addr): + """Find the interface using a routing netlink API. + + :param ip_address: The IP address to search with. + :param rtnl_api: A pyroute2 rtnl_api instance. (IPRoute, NetNS, etc.) + :returns: The interface name if found, None if not found. + :raises exceptions.InvalidIPAddress: Invalid IP address provided. + """ + for addr in rtnl_api.get_addr(address=ip_address): + # Save the interface index as IPv6 records don't list a textual + # interface + interface_idx = addr['index'] + # Search through the attributes of each address record + for attr in addr['attrs']: + # Look for the attribute name/value pair for the address + if attr[0] == 'IFA_ADDRESS': + # Compare the normalized address with the address we are + # looking for. Since we have matched the name above, attr[1] + # is the address value + if normalized_addr == ipaddress.ip_address(attr[1]).compressed: + # Lookup the matching interface name by getting the + # interface with the index we found in the above address + # search + lookup_int = rtnl_api.get_links(interface_idx) + # Search through the attributes of the matching interface + # record + for int_attr in lookup_int[0]['attrs']: + # Look for the attribute name/value pair that includes + # the interface name + if int_attr[0] == consts.IFLA_IFNAME: + # Return the matching interface name that is in + # int_attr[1] for the matching interface attribute + # name + return int_attr[1] + # We didn't find an interface with that IP address. + return None + + +def get_interface_name(ip_address, net_ns=None): + """Gets the interface name from an IP address. + + :param ip_address: The IP address to lookup. + :param net_ns: The network namespace to find the interface in. + :returns: The interface name. + :raises exceptions.InvalidIPAddress: Invalid IP address provided. + :raises octavia.common.exceptions.NotFound: No interface was found. + """ + # We need to normalize the address as IPv6 has multiple representations + # fe80:0000:0000:0000:f816:3eff:fef2:2058 == fe80::f816:3eff:fef2:2058 + try: + normalized_addr = ipaddress.ip_address(ip_address).compressed + except ValueError as e: + raise exceptions.InvalidIPAddress(ip_addr=ip_address) from e + + if net_ns: + with pyroute2.NetNS(net_ns) as rtnl_api: + interface = _find_interface(ip_address, rtnl_api, normalized_addr) + else: + with pyroute2.IPRoute() as rtnl_api: + interface = _find_interface(ip_address, rtnl_api, normalized_addr) + if interface is not None: + return interface + raise exceptions.NotFound(resource='IP address', id=ip_address) diff --git a/octavia/amphorae/backends/utils/nftable_utils.py b/octavia/amphorae/backends/utils/nftable_utils.py new file mode 100644 index 0000000000..b03f7813f1 --- /dev/null +++ b/octavia/amphorae/backends/utils/nftable_utils.py @@ -0,0 +1,147 @@ +# Copyright 2024 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import os +import stat +import subprocess + +from octavia_lib.common import constants as lib_consts +from oslo_log import log as logging +from webob import exc + +from octavia.amphorae.backends.utils import network_namespace +from octavia.common import constants as consts +from octavia.common import utils + +LOG = logging.getLogger(__name__) + + +def write_nftable_rules_file(interface_name, rules): + flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC + # mode 00600 + mode = stat.S_IRUSR | stat.S_IWUSR + + # Create some strings shared on both code paths + table_string = f'table {consts.NFT_FAMILY} {consts.NFT_TABLE} {{\n' + chain_string = f' chain {consts.NFT_CHAIN} {{\n' + vip_chain_string = f' chain {consts.NFT_VIP_CHAIN} {{\n' + hook_string = (' type filter hook input priority filter; ' + 'policy drop;\n') + + # Conntrack is used to allow flow return traffic + conntrack_string = (' ct state vmap { established : accept, ' + 'related : accept, invalid : drop }\n') + + # Allow loopback traffic on the loopback interface, no where else + loopback_string = ' iif lo accept\n' + loopback_addr_string = ' ip saddr 127.0.0.0/8 drop\n' + loopback_ipv6_addr_string = ' ip6 saddr ::1 drop\n' + + # Allow ICMP destination unreachable for PMTUD + icmp_string = ' icmp type destination-unreachable accept\n' + # Allow the required neighbor solicitation/discovery PMTUD ICMPV6 + icmpv6_string = (' icmpv6 type { nd-neighbor-solicit, ' + 'nd-router-advert, nd-neighbor-advert, packet-too-big, ' + 'destination-unreachable } accept\n') + # Allow DHCP responses + dhcp_string = ' udp sport 67 udp dport 68 accept\n' + dhcpv6_string = ' udp sport 547 udp dport 546 accept\n' + + # If the packet came in on the VIP interface, goto the VIP rules chain + vip_interface_goto_string = ( + f' iifname {consts.NETNS_PRIMARY_INTERFACE} ' + f'goto {consts.NFT_VIP_CHAIN}\n') + + # Check if an existing rules file exists or we be need to create an + # "drop all" file with no rules except for VRRP. If it exists, we should + # not overwrite it here as it could be a reboot unless we were passed new + # rules. + if os.path.isfile(consts.NFT_RULES_FILE): + if not rules: + return + with os.fdopen( + os.open(consts.NFT_RULES_FILE, flags, mode), 'w') as file: + # Clear the existing rules in the kernel + # Note: The "nft -f" method is atomic, so clearing the rules will + # not leave the amphora exposed. + # Create and delete the table to not get errors if the table does + # not exist yet. + file.write(f'table {consts.NFT_FAMILY} {consts.NFT_TABLE} ' + '{}\n') + file.write(f'delete table {consts.NFT_FAMILY} ' + f'{consts.NFT_TABLE}\n') + file.write(table_string) + file.write(chain_string) + file.write(hook_string) + file.write(conntrack_string) + file.write(loopback_string) + file.write(loopback_addr_string) + file.write(loopback_ipv6_addr_string) + file.write(icmp_string) + file.write(icmpv6_string) + file.write(dhcp_string) + file.write(dhcpv6_string) + file.write(vip_interface_goto_string) + file.write(' }\n') # close the chain + file.write(vip_chain_string) + for rule in rules: + file.write(f' {_build_rule_cmd(rule)}\n') + file.write(' }\n') # close the chain + file.write('}\n') # close the table + else: # No existing rules, create the "drop all" base rules + with os.fdopen( + os.open(consts.NFT_RULES_FILE, flags, mode), 'w') as file: + file.write(table_string) + file.write(chain_string) + file.write(hook_string) + file.write(icmp_string) + file.write(icmpv6_string) + file.write(dhcp_string) + file.write(dhcpv6_string) + file.write(' }\n') # close the chain + file.write('}\n') # close the table + + +def _build_rule_cmd(rule): + prefix_saddr = '' + if rule[consts.CIDR] and rule[consts.CIDR] != '0.0.0.0/0': + cidr_ip_version = utils.ip_version(rule[consts.CIDR].split('/')[0]) + if cidr_ip_version == 4: + prefix_saddr = f'ip saddr {rule[consts.CIDR]} ' + elif cidr_ip_version == 6: + prefix_saddr = f'ip6 saddr {rule[consts.CIDR]} ' + else: + raise exc.HTTPBadRequest(explanation='Unknown ip version') + + if rule[consts.PROTOCOL] == lib_consts.PROTOCOL_SCTP: + return f'{prefix_saddr}sctp dport {rule[consts.PORT]} accept' + if rule[consts.PROTOCOL] == lib_consts.PROTOCOL_TCP: + return f'{prefix_saddr}tcp dport {rule[consts.PORT]} accept' + if rule[consts.PROTOCOL] == lib_consts.PROTOCOL_UDP: + return f'{prefix_saddr}udp dport {rule[consts.PORT]} accept' + if rule[consts.PROTOCOL] == consts.VRRP: + return f'{prefix_saddr}ip protocol 112 accept' + raise exc.HTTPBadRequest(explanation='Unknown protocol used in rules') + + +def load_nftables_file(): + cmd = [consts.NFT_CMD, '-o', '-f', consts.NFT_RULES_FILE] + try: + with network_namespace.NetworkNamespace(consts.AMPHORA_NAMESPACE): + subprocess.check_output(cmd, stderr=subprocess.STDOUT) + except Exception as e: + if hasattr(e, 'output'): + LOG.error(e.output) + else: + LOG.error(e) + raise diff --git a/octavia/amphorae/backends/utils/udp_check.sh b/octavia/amphorae/backends/utils/udp_check.sh new file mode 100644 index 0000000000..37443b053c --- /dev/null +++ b/octavia/amphorae/backends/utils/udp_check.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +nc_cmd=`which nc` + +nc_flavor=$($nc_cmd --version 2>&1 | grep -o nmap) +case "$nc_flavor" in +nmap) + nc_flavor_opts="-i1" + ;; +*) # default, probably openbsd + nc_flavor_opts="-w1" + ;; +esac + +$nc_cmd -uzv $nc_flavor_opts $1 $2 > /dev/null +exit $? diff --git a/octavia/amphorae/driver_exceptions/__init__.py b/octavia/amphorae/driver_exceptions/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/amphorae/driver_exceptions/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/amphorae/driver_exceptions/exceptions.py b/octavia/amphorae/driver_exceptions/exceptions.py new file mode 100644 index 0000000000..789d12f23d --- /dev/null +++ b/octavia/amphorae/driver_exceptions/exceptions.py @@ -0,0 +1,132 @@ +# Copyright 2011-2014 OpenStack Foundation,author: Min Wang,German Eichberger +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_utils import excutils + +from octavia.i18n import _ + + +class AmphoraDriverError(Exception): + + message = _("A super class for all other exceptions and the catch.") + + def __init__(self, **kwargs): + try: + super().__init__(self.message % kwargs) + self.msg = self.message % kwargs + except Exception: + with excutils.save_and_reraise_exception() as ctxt: + if not self.use_fatal_exceptions(): + ctxt.reraise = False + # at least get the core message out if something happened + super().__init__(self.message) + + def __unicode__(self): + return self.msg + + @staticmethod + def use_fatal_exceptions(): + """Return True if use fatal exceptions by raising them.""" + return False + + +class NotFoundError(AmphoraDriverError): + + message = _('this amphora couldn\'t be found') + + +class InfoException(AmphoraDriverError): + + message = _('gathering information about this amphora failed') + + +class MetricsException(AmphoraDriverError): + + message = _('gathering metrics failed') + + +class UnauthorizedException(AmphoraDriverError): + + message = _('the driver can\'t access the amphora') + + +class StatisticsException(AmphoraDriverError): + + message = _('gathering statistics failed') + + +class TimeOutException(AmphoraDriverError): + + message = _('contacting the amphora timed out') + + +class DeleteFailed(AmphoraDriverError): + + message = _('this load balancer couldn\'t be deleted') + + +class SuspendFailed(AmphoraDriverError): + + message = _('this load balancer couldn\'t be suspended') + + +class EnableFailed(AmphoraDriverError): + + message = _('this load balancer couldn\'t be enabled') + + +class ArchiveException(AmphoraDriverError): + + message = _('couldn\'t archive the logs') + + +class ProvisioningErrors(AmphoraDriverError): + + message = _('Super class for provisioning amphora errors') + + +class ListenerProvisioningError(ProvisioningErrors): + + message = _('couldn\'t provision Listener') + + +class LoadBalancerProvisoningError(ProvisioningErrors): + + message = _('couldn\'t provision LoadBalancer') + + +class HealthMonitorProvisioningError(ProvisioningErrors): + + message = _('couldn\'t provision HealthMonitor') + + +class NodeProvisioningError(ProvisioningErrors): + + message = _('couldn\'t provision Node') + + +class AmpDriverNotImplementedError(AmphoraDriverError): + + message = _('Amphora does not implement this feature.') + + +class AmpConnectionRetry(AmphoraDriverError): + + message = _('Could not connect to amphora, exception caught: ' + '%(exception)s') + + +class AmpVersionUnsupported(AmphoraDriverError): + + message = _('Amphora version %(version)s is no longer supported.') diff --git a/octavia/amphorae/drivers/__init__.py b/octavia/amphorae/drivers/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/amphorae/drivers/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/amphorae/drivers/driver_base.py b/octavia/amphorae/drivers/driver_base.py new file mode 100644 index 0000000000..e06cb01aaf --- /dev/null +++ b/octavia/amphorae/drivers/driver_base.py @@ -0,0 +1,311 @@ +# Copyright 2011-2014 OpenStack Foundation,author: Min Wang,German Eichberger +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +from typing import Optional + +from octavia.db import models as db_models + + +class AmphoraLoadBalancerDriver(metaclass=abc.ABCMeta): + @abc.abstractmethod + def update_amphora_listeners(self, loadbalancer, amphora, + timeout_dict): + """Update the amphora with a new configuration. + + :param loadbalancer: List of listeners to update. + :type loadbalancer: list(octavia.db.models.Listener) + :param amphora: The index of the specific amphora to update + :type amphora: octavia.db.models.Amphora + :param timeout_dict: Dictionary of timeout values for calls to the + amphora. May contain: req_conn_timeout, + req_read_timeout, conn_max_retries, + conn_retry_interval + :type timeout_dict: dict + :returns: None + + Builds a new configuration, pushes it to the amphora, and reloads + the listener on one amphora. + """ + + @abc.abstractmethod + def update(self, loadbalancer): + """Update the amphora with a new configuration. + + :param loadbalancer: loadbalancer object, need to use its + vip.ip_address property + :type loadbalancer: octavia.db.models.LoadBalancer + :returns: None + + At this moment, we just build the basic structure for testing, will + add more function along with the development. + """ + + @abc.abstractmethod + def start(self, loadbalancer, amphora, timeout_dict=None): + """Start the listeners on the amphora. + + :param loadbalancer: loadbalancer object to start listeners + :type loadbalancer: octavia.db.models.LoadBalancer + :param amphora: Amphora to start. If None, start on all amphora + :type amphora: octavia.db.models.Amphora + :param timeout_dict: Dictionary of timeout values for calls to the + amphora. May contain: req_conn_timeout, + req_read_timeout, conn_max_retries, + conn_retry_interval + :type timeout_dict: dict + :returns: return a value list (listener, vip, status flag--enable) + + At this moment, we just build the basic structure for testing, will + add more function along with the development. + """ + + @abc.abstractmethod + def reload(self, loadbalancer, amphora, timeout_dict=None): + """Reload the listeners on the amphora. + + :param loadbalancer: loadbalancer object to reload listeners + :type loadbalancer: octavia.db.models.LoadBalancer + :param amphora: Amphora to start. If None, reload on all amphora + :type amphora: octavia.db.models.Amphora + :param timeout_dict: Dictionary of timeout values for calls to the + amphora. May contain: req_conn_timeout, + req_read_timeout, conn_max_retries, + conn_retry_interval + :type timeout_dict: dict + :returns: return a value list (listener, vip, status flag--enable) + + At this moment, we just build the basic structure for testing, will + add more function along with the development. + """ + + @abc.abstractmethod + def delete(self, listener): + """Delete the listener on the vip. + + :param listener: listener object, + need to use its protocol_port property + :type listener: octavia.db.models.Listener + :returns: return a value list (listener, vip, status flag--delete) + + At this moment, we just build the basic structure for testing, will + add more function along with the development. + """ + + @abc.abstractmethod + def get_info(self, amphora, raise_retry_exception=False): + """Returns information about the amphora. + + :param amphora: amphora object, need to use its id property + :type amphora: octavia.db.models.Amphora + :param raise_retry_exception: Flag if outside task should be retried + :type boolean: False by default + :returns: return a value list (amphora.id, status flag--'info') + + At this moment, we just build the basic structure for testing, will + add more function along with the development, eventually, we want it + to return information as: + {"Rest Interface": "1.0", "Amphorae": "1.0", + "packages":{"ha proxy":"1.5"}} + some information might come from querying the amphora + """ + + @abc.abstractmethod + def get_diagnostics(self, amphora): + """Return ceilometer ready diagnostic data. + + :param amphora: amphora object, need to use its id property + :type amphora: octavia.db.models.Amphora + :returns: return a value list (amphora.id, status flag--'ge + t_diagnostics') + + At this moment, we just build the basic structure for testing, will + add more function along with the development, eventually, we want it + run some expensive self tests to determine if the amphora and the lbs + are healthy the idea is that those tests are triggered more infrequent + than the health gathering. + """ + + @abc.abstractmethod + def finalize_amphora(self, amphora): + """Finalize the amphora before any listeners are configured. + + :param amphora: amphora object, need to use its id property + :type amphora: octavia.db.models.Amphora + :returns: None + + At this moment, we just build the basic structure for testing, will + add more function along with the development. This is a hook for + drivers who need to do additional work before an amphora becomes ready + to accept listeners. Please keep in mind that amphora might be kept in + an offline pool after this call. + """ + + def post_vip_plug(self, amphora, load_balancer, amphorae_network_config, + vrrp_port, vip_subnet, additional_vip_data=None): + """Called after network driver has allocated and plugged the VIP + + :param amphora: + :type amphora: octavia.db.models.Amphora + :param load_balancer: A load balancer that just had its vip allocated + and plugged in the network driver. + :type load_balancer: octavia.common.data_models.LoadBalancer + :param amphorae_network_config: A data model containing information + about the subnets and ports that an + amphorae owns. + :type amphorae_network_config: octavia.network.data_models. + AmphoraNetworkConfig + :param vrrp_port: VRRP port associated with the load balancer + :type vrrp_port: octavia.network.data_models.Port + + :param vip_subnet: VIP subnet associated with the load balancer + :type vip_subnet: octavia.network.data_models.Subnet + + :type vip_network: octavia.network.data_models.AmphoraNetworkConfig + + :type additional_vip_data: list of + octavia.network.data_models.AdditionalVipData + + :returns: None + + + This is to do any additional work needed on the amphorae to plug + the vip, such as bring up interfaces. + """ + + def post_network_plug(self, amphora, port, amphora_network_config): + """Called after amphora added to network + + :param amphora: amphora object, needs id and network ip(s) + :type amphora: octavia.db.models.Amphora + :param port: contains information of the plugged port + :type port: octavia.network.data_models.Port + :param amphora_network_config: A data model containing information + about the subnets and ports that an + amphorae owns. + :type amphora_network_config: octavia.network.data_models. + AmphoraNetworkConfig + + This method is optional to implement. After adding an amphora to a + network, there may be steps necessary on the amphora to allow it to + access said network. Ex: creating an interface on an amphora for a + neutron network to utilize. + """ + + def upload_cert_amp(self, amphora, pem_file): + """Upload cert info to the amphora. + + :param amphora: amphora object, needs id and network ip(s) + :type amphora: octavia.db.models.Amphora + :param pem_file: a certificate file + :type pem_file: file object + + Upload cert file to amphora for Controller Communication. + """ + + def update_amphora_agent_config(self, amphora, agent_config): + """Upload and update the amphora agent configuration. + + :param amphora: amphora object, needs id and network ip(s) + :type amphora: octavia.db.models.Amphora + :param agent_config: The new amphora agent configuration file. + :type agent_config: string + """ + + @abc.abstractmethod + def get_interface_from_ip(self, amphora, ip_address, timeout_dict=None): + """Get the interface name from an IP address. + + :param amphora: The amphora to query. + :type amphora: octavia.db.models.Amphora + :param ip_address: The IP address to lookup. (IPv4 or IPv6) + :type ip_address: string + :param timeout_dict: Dictionary of timeout values for calls to the + amphora. May contain: req_conn_timeout, + req_read_timeout, conn_max_retries, + conn_retry_interval + :type timeout_dict: dict + """ + + @abc.abstractmethod + def check(self, amphora: db_models.Amphora, + timeout_dict: Optional[dict] = None): + """Check connectivity to the amphora. + + :param amphora: The amphora to query. + :param timeout_dict: Dictionary of timeout values for calls to the + amphora. May contain: req_conn_timeout, + req_read_timeout, conn_max_retries, + conn_retry_interval + :raises TimeOutException: The amphora didn't reply + """ + + @abc.abstractmethod + def set_interface_rules(self, amphora: db_models.Amphora, ip_address, + rules): + """Sets interface firewall rules in the amphora + + :param amphora: The amphora to query. + :param ip_address: The IP address assigned to the interface the rules + will be applied on. + :param rules: The l1st of allow rules to apply. + """ + + +class VRRPDriverMixin(metaclass=abc.ABCMeta): + """Abstract mixin class for VRRP support in loadbalancer amphorae + + Usage: To plug VRRP support in another service driver XYZ, use: + @plug_mixin(XYZ) + class XYZ: ... + """ + @abc.abstractmethod + def update_vrrp_conf(self, loadbalancer, amphorae_network_config, amphora, + timeout_dict=None): + """Update amphorae of the loadbalancer with a new VRRP configuration + + :param loadbalancer: loadbalancer object + :param amphorae_network_config: amphorae network configurations + :param amphora: The amphora object to update. + :param timeout_dict: Dictionary of timeout values for calls to the + amphora. May contain: req_conn_timeout, + req_read_timeout, conn_max_retries, + conn_retry_interval + """ + + @abc.abstractmethod + def stop_vrrp_service(self, loadbalancer): + """Stop the vrrp services running on the loadbalancer's amphorae + + :param loadbalancer: loadbalancer object + """ + + @abc.abstractmethod + def start_vrrp_service(self, amphora, timeout_dict=None): + """Start the VRRP services on the amphora + + :param amphora: The amphora object to start the service on. + :param timeout_dict: Dictionary of timeout values for calls to the + amphora. May contain: req_conn_timeout, + req_read_timeout, conn_max_retries, + conn_retry_interval + """ + + @abc.abstractmethod + def reload_vrrp_service(self, loadbalancer): + """Reload the VRRP services of all amphorae of the loadbalancer + + :param loadbalancer: loadbalancer object + """ diff --git a/octavia/amphorae/drivers/haproxy/__init__.py b/octavia/amphorae/drivers/haproxy/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/amphorae/drivers/haproxy/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/amphorae/drivers/haproxy/data_models.py b/octavia/amphorae/drivers/haproxy/data_models.py new file mode 100644 index 0000000000..c1875dd331 --- /dev/null +++ b/octavia/amphorae/drivers/haproxy/data_models.py @@ -0,0 +1,110 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import octavia.common.data_models as models + + +class Topology(models.BaseDataModel): + + def __init__(self, hostname=None, uuid=None, topology=None, role=None, + ip=None, ha_ip=None): + self.hostname = hostname + self.uuid = uuid + self.topology = topology + self.role = role + self.ip = ip + self.ha_ip = ha_ip + + +class Info(models.BaseDataModel): + + def __init__(self, hostname=None, uuid=None, version=None, + api_version=None): + self.hostname = hostname + self.uuid = uuid + self.version = version + self.api_version = api_version + + +class Details(models.BaseDataModel): + + def __init__(self, hostname=None, uuid=None, version=None, + api_version=None, network_tx=None, network_rx=None, + active=None, haproxy_count=None, cpu=None, memory=None, + disk=None, load=None, listeners=None, packages=None): + self.hostname = hostname + self.uuid = uuid + self.version = version + self.api_version = api_version + self.network_tx = network_tx + self.network_rx = network_rx + self.active = active + self.haproxy_count = haproxy_count + self.cpu = cpu + self.memory = memory + self.disk = disk + self.load = load or [] + self.listeners = listeners or [] + self.packages = packages or [] + + +class CPU(models.BaseDataModel): + + def __init__(self, total=None, user=None, system=None, soft_irq=None): + self.total = total + self.user = user + self.system = system + self.soft_irq = soft_irq + + +class Memory(models.BaseDataModel): + + def __init__(self, total=None, free=None, available=None, buffers=None, + cached=None, swap_used=None, shared=None, slab=None, + committed_as=None): + self.total = total + self.free = free + self.available = available + self.buffers = buffers + self.cached = cached + self.swap_used = swap_used + self.shared = shared + self.slab = slab + self.committed_as = committed_as + + +class Disk(models.BaseDataModel): + + def __init__(self, used=None, available=None): + self.used = used + self.available = available + + +class ListenerStatus(models.BaseDataModel): + + def __init__(self, status=None, uuid=None, provisioning_status=None, + type=None, pools=None): + self.status = status + self.uuid = uuid + self.provisioning_status = provisioning_status + self.type = type + self.pools = pools or [] + + +class Pool(models.BaseDataModel): + + def __init__(self, uuid=None, status=None, members=None): + self.uuid = uuid + self.status = status + self.members = members or [] diff --git a/octavia/amphorae/drivers/haproxy/exceptions.py b/octavia/amphorae/drivers/haproxy/exceptions.py new file mode 100644 index 0000000000..b5a30175d1 --- /dev/null +++ b/octavia/amphorae/drivers/haproxy/exceptions.py @@ -0,0 +1,90 @@ +# Copyright 2014 Rackspace +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from webob import exc + +from oslo_log import log as logging + +LOG = logging.getLogger(__name__) + + +def check_exception(response, ignore=tuple(), log_error=True): + status_code = response.status_code + responses = { + 400: InvalidRequest, + 401: Unauthorized, + 403: Forbidden, + 404: NotFound, + 405: InvalidRequest, + 409: Conflict, + 500: InternalServerError, + 503: ServiceUnavailable + } + if (status_code not in ignore) and (status_code in responses): + try: + if log_error: + LOG.error('Amphora agent returned unexpected result code %s ' + 'with response %s', status_code, response.json()) + except Exception: + # Handle the odd case where there is no response body + # like when using requests_mock which doesn't support has_body + pass + raise responses[status_code]() + + return response + + +class APIException(exc.HTTPClientError): + msg = "Something unknown went wrong" + code = 500 + + def __init__(self, **kwargs): + self.msg = self.msg % kwargs + super().__init__(detail=self.msg) + + +class InvalidRequest(APIException): + msg = "Invalid request" + code = 400 + + +class Unauthorized(APIException): + msg = "Unauthorized" + code = 401 + + +class Forbidden(APIException): + msg = "Forbidden" + code = 403 + + +class NotFound(APIException): + msg = "Not Found" + code = 404 + + +class Conflict(APIException): + msg = "Conflict" + code = 409 + + +class InternalServerError(APIException): + msg = "Internal Server Error" + code = 500 + + +class ServiceUnavailable(APIException): + msg = "Service Unavailable" + code = 503 diff --git a/octavia/amphorae/drivers/haproxy/rest_api_driver.py b/octavia/amphorae/drivers/haproxy/rest_api_driver.py new file mode 100644 index 0000000000..663a600a34 --- /dev/null +++ b/octavia/amphorae/drivers/haproxy/rest_api_driver.py @@ -0,0 +1,885 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# Copyright (c) 2015 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import functools +import hashlib +import json +import os +import ssl +import time +from typing import Optional +import warnings + +from oslo_context import context as oslo_context +from oslo_log import log as logging +import requests +from stevedore import driver as stevedore_driver + +from octavia.amphorae.driver_exceptions import exceptions as driver_except +from octavia.amphorae.drivers import driver_base +from octavia.amphorae.drivers.haproxy import exceptions as exc +from octavia.amphorae.drivers.keepalived import vrrp_rest_driver +from octavia.common.config import cfg +from octavia.common import constants as consts +import octavia.common.jinja.haproxy.combined_listeners.jinja_cfg as jinja_combo +from octavia.common.jinja.lvs import jinja_cfg as jinja_udp_cfg +from octavia.common.tls_utils import cert_parser +from octavia.common import utils +from octavia.db import api as db_api +from octavia.db import models as db_models +from octavia.db import repositories as repo + + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF + + +class HaproxyAmphoraLoadBalancerDriver( + driver_base.AmphoraLoadBalancerDriver, + vrrp_rest_driver.KeepalivedAmphoraDriverMixin): + + def __init__(self): + super().__init__() + self.clients = { + 'base': AmphoraAPIClientBase(), + '1.0': AmphoraAPIClient1_0(), + } + self.cert_manager = stevedore_driver.DriverManager( + namespace='octavia.cert_manager', + name=CONF.certificates.cert_manager, + invoke_on_load=True, + ).driver + + self.jinja_combo = jinja_combo.JinjaTemplater( + base_amp_path=CONF.haproxy_amphora.base_path, + base_crt_dir=CONF.haproxy_amphora.base_cert_dir, + haproxy_template=CONF.haproxy_amphora.haproxy_template, + connection_logging=CONF.haproxy_amphora.connection_logging) + self.lvs_jinja = jinja_udp_cfg.LvsJinjaTemplater() + + def _get_haproxy_versions(self, amphora, timeout_dict=None): + """Get major and minor version number from haproxy + + Example: ['1', '6'] + + :returns version_list: A list with the major and minor numbers + """ + self._populate_amphora_api_version( + amphora, timeout_dict=timeout_dict) + amp_info = self.clients[amphora.api_version].get_info( + amphora, timeout_dict=timeout_dict) + haproxy_version_string = amp_info['haproxy_version'] + + return haproxy_version_string.split('.')[:2] + + def _populate_amphora_api_version(self, amphora, timeout_dict=None, + raise_retry_exception=False): + """Populate the amphora object with the api_version + + This will query the amphora for version discovery and populate + the api_version string attribute on the amphora object. + + :returns: None + """ + if not getattr(amphora, 'api_version', None): + try: + amphora.api_version = self.clients['base'].get_api_version( + amphora, timeout_dict=timeout_dict, + raise_retry_exception=raise_retry_exception)['api_version'] + except exc.NotFound: + # Amphora is too old for version discovery, default to 0.5 + amphora.api_version = '0.5' + LOG.debug('Amphora %s has API version %s', + amphora.id, amphora.api_version) + api_version = list(map(int, amphora.api_version.split('.'))) + + if api_version[0] == 0 and api_version[1] <= 5: # 0.5 or earlier + raise driver_except.AmpVersionUnsupported( + version=amphora.api_version) + + return api_version + + def check(self, amphora: db_models.Amphora, + timeout_dict: Optional[dict] = None): + """Check connectivity to the amphora.""" + self._populate_amphora_api_version(amphora, timeout_dict) + + def update_amphora_listeners(self, loadbalancer, amphora, + timeout_dict=None): + """Update the amphora with a new configuration. + + :param loadbalancer: The load balancer to update + :type loadbalancer: object + :param amphora: The amphora to update + :type amphora: object + :param timeout_dict: Dictionary of timeout values for calls to the + amphora. May contain: req_conn_timeout, + req_read_timeout, conn_max_retries, + conn_retry_interval + :returns: None + + Updates the configuration of the listeners on a single amphora. + """ + # if the amphora does not yet have listeners, no need to update them. + if not loadbalancer.listeners: + LOG.debug('No listeners found to update.') + return + if amphora is None or amphora.status == consts.DELETED: + return + + # Check which HAProxy version is on the amp + haproxy_versions = self._get_haproxy_versions( + amphora, timeout_dict=timeout_dict) + # Check if version is supported + self._populate_amphora_api_version(amphora) + + has_tcp = False + certs = {} + listeners_to_update = [] + for listener in loadbalancer.listeners: + LOG.debug("%s updating listener %s on amphora %s", + self.__class__.__name__, listener.id, amphora.id) + if listener.protocol in consts.LVS_PROTOCOLS: + # Generate Keepalived LVS configuration from listener object + config = self.lvs_jinja.build_config(listener=listener) + self.clients[amphora.api_version].upload_udp_config( + amphora, listener.id, config, timeout_dict=timeout_dict) + self.clients[amphora.api_version].reload_listener( + amphora, listener.id, timeout_dict=timeout_dict) + else: + has_tcp = True + obj_id = loadbalancer.id + + try: + certs.update({ + listener.tls_certificate_id: + self._process_tls_certificates( + listener, amphora, obj_id)['tls_cert']}) + certs.update({listener.client_ca_tls_certificate_id: + self._process_secret( + listener, + listener.client_ca_tls_certificate_id, + amphora, obj_id)}) + certs.update({listener.client_crl_container_id: + self._process_secret( + listener, + listener.client_crl_container_id, + amphora, obj_id)}) + + certs.update(self._process_listener_pool_certs( + listener, amphora, obj_id)) + + listeners_to_update.append(listener) + except Exception as e: + LOG.exception('Unable to update listener %s due to ' + '"%s". Skipping this listener.', + listener.id, str(e)) + listener_repo = repo.ListenerRepository() + with db_api.session().begin() as session: + listener_repo.update(session, listener.id, + provisioning_status=consts.ERROR, + operating_status=consts.ERROR) + + if has_tcp: + if listeners_to_update: + # Generate HaProxy configuration from listener object + amp_details = self.clients[amphora.api_version].get_details( + amphora) + config = self.jinja_combo.build_config( + host_amphora=amphora, listeners=listeners_to_update, + tls_certs=certs, + haproxy_versions=haproxy_versions, + amp_details=amp_details) + self.clients[amphora.api_version].upload_config( + amphora, loadbalancer.id, config, + timeout_dict=timeout_dict) + self.clients[amphora.api_version].reload_listener( + amphora, loadbalancer.id, timeout_dict=timeout_dict) + else: + # If we aren't updating any listeners, make sure there are + # no listeners hanging around. For example if this update + # was called from a listener delete. + self.clients[amphora.api_version].delete_listener( + amphora, loadbalancer.id) + + def _udp_update(self, listener, vip): + LOG.debug("Amphora %s keepalivedlvs, updating " + "listener %s, vip %s", + self.__class__.__name__, listener.protocol_port, + vip.ip_address) + + for amp in listener.load_balancer.amphorae: + if amp.status != consts.DELETED: + # Generate Keepalived LVS configuration from listener object + self._populate_amphora_api_version(amp) + config = self.lvs_jinja.build_config(listener=listener) + self.clients[amp.api_version].upload_udp_config( + amp, listener.id, config) + self.clients[amp.api_version].reload_listener( + amp, listener.id) + + def update(self, loadbalancer): + for amphora in loadbalancer.amphorae: + if amphora.status != consts.DELETED: + self.update_amphora_listeners(loadbalancer, amphora) + + def upload_cert_amp(self, amp, pem): + LOG.debug("Amphora %s updating cert in REST driver " + "with amphora id %s,", + self.__class__.__name__, amp.id) + self._populate_amphora_api_version(amp) + self.clients[amp.api_version].update_cert_for_rotation(amp, pem) + + def _apply(self, func_name, loadbalancer, amphora=None, *args, **kwargs): + if amphora is None: + amphorae = loadbalancer.amphorae + else: + amphorae = [amphora] + + for amp in amphorae: + if amp.status != consts.DELETED: + self._populate_amphora_api_version( + amp, timeout_dict=args[0]) + has_tcp = False + for listener in loadbalancer.listeners: + if listener.protocol in consts.LVS_PROTOCOLS: + getattr(self.clients[amp.api_version], func_name)( + amp, listener.id, *args) + else: + has_tcp = True + if has_tcp: + getattr(self.clients[amp.api_version], func_name)( + amp, loadbalancer.id, *args) + + def reload(self, loadbalancer, amphora=None, timeout_dict=None): + self._apply('reload_listener', loadbalancer, amphora, timeout_dict) + + def start(self, loadbalancer, amphora=None, timeout_dict=None): + self._apply('start_listener', loadbalancer, amphora, timeout_dict) + + def delete(self, listener): + # Delete any UDP/SCTP listeners the old way (we didn't update the way + # they are configured) + loadbalancer = listener.load_balancer + if listener.protocol in consts.LVS_PROTOCOLS: + for amp in loadbalancer.amphorae: + if amp.status != consts.DELETED: + self._populate_amphora_api_version(amp) + self.clients[amp.api_version].delete_listener( + amp, listener.id) + return + + # In case the listener is not UDP or SCTP, things get more complicated. + for amp in loadbalancer.amphorae: + if amp.status != consts.DELETED: + self._combined_config_delete(amp, listener) + + def _combined_config_delete(self, amphora, listener): + # Remove the listener from the listener list on the LB before + # passing the whole thing over to update (so it'll actually delete) + # In case of amphorae in ACTIVE_STANDBY topology, ensure that we don't + # remove an already removed listener. + if listener in listener.load_balancer.listeners: + listener.load_balancer.listeners.remove(listener) + + # Check if there's any certs that we need to delete + certs = self._process_tls_certificates(listener) + certs_to_delete = set() + if certs['tls_cert']: + certs_to_delete.add(certs['tls_cert'].id) + for sni_cert in certs['sni_certs']: + certs_to_delete.add(sni_cert.id) + + # Delete them (they'll be recreated before the reload if they are + # needed for other listeners anyway) + self._populate_amphora_api_version(amphora) + for cert_id in certs_to_delete: + self.clients[amphora.api_version].delete_cert_pem( + amphora, listener.load_balancer.id, + f'{cert_id}.pem') + + # See how many non-UDP/SCTP listeners we have left + non_lvs_listener_count = len([ + 1 for li in listener.load_balancer.listeners + if li.protocol not in consts.LVS_PROTOCOLS]) + if non_lvs_listener_count > 0: + # We have other listeners, so just update is fine. + # TODO(rm_work): This is a little inefficient since this duplicates + # a lot of the detection logic that has already been done, but it + # is probably safer to reuse the existing code-path. + self.update_amphora_listeners(listener.load_balancer, amphora) + else: + # Deleting the last listener, so really do the delete + self.clients[amphora.api_version].delete_listener( + amphora, listener.load_balancer.id) + + def get_info(self, amphora, raise_retry_exception=False, + timeout_dict=None): + self._populate_amphora_api_version( + amphora, raise_retry_exception=raise_retry_exception, + timeout_dict=timeout_dict) + return self.clients[amphora.api_version].get_info( + amphora, raise_retry_exception=raise_retry_exception, + timeout_dict=timeout_dict) + + def get_diagnostics(self, amphora): + pass + + def finalize_amphora(self, amphora): + pass + + def _build_net_info(self, port, amphora, subnet, mtu=None, sriov=False): + # NOTE(blogan): using the vrrp port here because that + # is what the allowed address pairs network driver sets + # this particular port to. This does expose a bit of + # tight coupling between the network driver and amphora + # driver. We will need to revisit this to try and remove + # this tight coupling. + # NOTE (johnsom): I am loading the vrrp_ip into the + # net_info structure here so that I don't break + # compatibility with old amphora agent versions. + host_routes = [{'nexthop': hr[consts.NEXTHOP], + 'destination': hr[consts.DESTINATION]} + for hr in subnet[consts.HOST_ROUTES]] + net_info = {'subnet_cidr': subnet[consts.CIDR], + 'gateway': subnet[consts.GATEWAY_IP], + 'mac_address': port[consts.MAC_ADDRESS], + 'vrrp_ip': amphora[consts.VRRP_IP], + 'mtu': mtu or port[consts.NETWORK][consts.MTU], + 'host_routes': host_routes, + 'additional_vips': [], + 'is_sriov': sriov} + return net_info + + def post_vip_plug(self, amphora, load_balancer, amphorae_network_config, + vrrp_port, vip_subnet, additional_vip_data=None): + if amphora.status != consts.DELETED: + self._populate_amphora_api_version(amphora) + port = vrrp_port.to_dict(recurse=True) + mtu = port[consts.NETWORK][consts.MTU] + LOG.debug("Post-VIP-Plugging with vrrp_ip %s vrrp_port %s", + amphora.vrrp_ip, port[consts.ID]) + sriov = False + if load_balancer.vip.vnic_type == consts.VNIC_TYPE_DIRECT: + sriov = True + net_info = self._build_net_info( + port, amphora.to_dict(), + vip_subnet.to_dict(recurse=True), mtu, sriov) + for add_vip in additional_vip_data: + add_host_routes = [{'nexthop': hr.nexthop, + 'destination': hr.destination} + for hr in add_vip.subnet.host_routes] + add_net_info = {'subnet_cidr': add_vip.subnet.cidr, + 'ip_address': add_vip.ip_address, + 'gateway': add_vip.subnet.gateway_ip, + 'host_routes': add_host_routes} + net_info['additional_vips'].append(add_net_info) + try: + self.clients[amphora.api_version].plug_vip( + amphora, load_balancer.vip.ip_address, net_info) + except exc.Conflict: + LOG.warning('VIP with MAC %(mac)s already exists on amphora, ' + 'skipping post_vip_plug', + {'mac': port[consts.MAC_ADDRESS]}) + + def post_network_plug(self, amphora, port, amphora_network_config): + fixed_ips = [] + for fixed_ip in port.fixed_ips: + host_routes = [{'nexthop': hr.nexthop, + 'destination': hr.destination} + for hr in fixed_ip.subnet.host_routes] + ip = {'ip_address': fixed_ip.ip_address, + 'subnet_cidr': fixed_ip.subnet.cidr, + 'host_routes': host_routes, + 'gateway': fixed_ip.subnet.gateway_ip} + fixed_ips.append(ip) + port_info = {'mac_address': port.mac_address, + 'fixed_ips': fixed_ips, + 'mtu': port.network.mtu, + 'is_sriov': False} + if port.vnic_type == consts.VNIC_TYPE_DIRECT: + port_info['is_sriov'] = True + if port.id == amphora.vrrp_port_id: + # We have to special-case sharing the vrrp port and pass through + # enough extra information to populate the whole VIP port + net_info = self._build_net_info( + port.to_dict(recurse=True), amphora.to_dict(), + amphora_network_config[consts.VIP_SUBNET], + port.network.mtu) + net_info['vip'] = amphora.ha_ip + port_info['vip_net_info'] = net_info + try: + self._populate_amphora_api_version(amphora) + self.clients[amphora.api_version].plug_network(amphora, port_info) + except exc.Conflict: + LOG.warning('Network with MAC %(mac)s already exists on amphora, ' + 'skipping post_network_plug', + {'mac': port.mac_address}) + + def _process_tls_certificates(self, listener, amphora=None, obj_id=None): + """Processes TLS data from the listener. + + Converts and uploads PEM data to the Amphora API + + return TLS_CERT and SNI_CERTS + """ + tls_cert = None + sni_certs = [] + certs = [] + cert_filename_list = [] + + data = cert_parser.load_certificates_data( + self.cert_manager, listener) + if data['tls_cert'] is not None: + tls_cert = data['tls_cert'] + # Note, the first cert is the TLS default cert + certs.append(tls_cert) + if data['sni_certs']: + sni_certs = data['sni_certs'] + certs.extend(sni_certs) + + if amphora and obj_id: + for cert in certs: + pem = cert_parser.build_pem(cert) + md5sum = hashlib.md5( + pem, usedforsecurity=False).hexdigest() # nosec + name = f'{cert.id}.pem' + cert_filename_list.append( + os.path.join( + CONF.haproxy_amphora.base_cert_dir, obj_id, name)) + self._upload_cert(amphora, obj_id, pem, md5sum, name) + + if certs: + # Build and upload the crt-list file for haproxy + crt_list = "\n".join(cert_filename_list) + crt_list = f'{crt_list}\n'.encode() + md5sum = hashlib.md5( + crt_list, usedforsecurity=False).hexdigest() # nosec + name = f'{listener.id}.pem' + self._upload_cert(amphora, obj_id, crt_list, md5sum, name) + return {'tls_cert': tls_cert, 'sni_certs': sni_certs} + + def _process_secret(self, listener, secret_ref, amphora=None, obj_id=None): + """Get the secret from the cert manager and upload it to the amp. + + :returns: The filename of the secret in the amp. + """ + if not secret_ref: + return None + context = oslo_context.RequestContext(project_id=listener.project_id) + secret = self.cert_manager.get_secret(context, secret_ref) + try: + secret = secret.encode('utf-8') + except AttributeError: + pass + md5sum = hashlib.md5( + secret, usedforsecurity=False).hexdigest() # nosec + id = hashlib.sha1(secret).hexdigest() # nosec + name = f'{id}.pem' + + if amphora and obj_id: + self._upload_cert( + amphora, obj_id, pem=secret, md5sum=md5sum, name=name) + return name + + def _process_listener_pool_certs(self, listener, amphora, obj_id): + # {'POOL-ID': { + # 'client_cert': client_full_filename, + # 'ca_cert': ca_cert_full_filename, + # 'crl': crl_full_filename}} + pool_certs_dict = {} + for pool in listener.pools: + if pool.id not in pool_certs_dict: + pool_certs_dict[pool.id] = self._process_pool_certs( + listener, pool, amphora, obj_id) + for l7policy in listener.l7policies: + if (l7policy.redirect_pool and + l7policy.redirect_pool.id not in pool_certs_dict): + pool_certs_dict[l7policy.redirect_pool.id] = ( + self._process_pool_certs(listener, l7policy.redirect_pool, + amphora, obj_id)) + return pool_certs_dict + + def _process_pool_certs(self, listener, pool, amphora, obj_id): + pool_cert_dict = {} + + # Handle the client cert(s) and key + if pool.tls_certificate_id: + data = cert_parser.load_certificates_data(self.cert_manager, pool) + tls_cert = data['tls_cert'] + pem = cert_parser.build_pem(tls_cert) + try: + pem = pem.encode('utf-8') + except AttributeError: + pass + md5sum = hashlib.md5( + pem, usedforsecurity=False).hexdigest() # nosec + name = f'{tls_cert.id}.pem' + if amphora and obj_id: + self._upload_cert(amphora, obj_id, pem=pem, + md5sum=md5sum, name=name) + pool_cert_dict['client_cert'] = os.path.join( + CONF.haproxy_amphora.base_cert_dir, obj_id, name) + if pool.ca_tls_certificate_id: + name = self._process_secret(listener, pool.ca_tls_certificate_id, + amphora, obj_id) + pool_cert_dict['ca_cert'] = os.path.join( + CONF.haproxy_amphora.base_cert_dir, obj_id, name) + if pool.crl_container_id: + name = self._process_secret(listener, pool.crl_container_id, + amphora, obj_id) + pool_cert_dict['crl'] = os.path.join( + CONF.haproxy_amphora.base_cert_dir, obj_id, name) + + return pool_cert_dict + + def _upload_cert(self, amp, listener_id, pem, md5sum, name): + try: + if self.clients[amp.api_version].get_cert_md5sum( + amp, listener_id, name, ignore=(404,)) == md5sum: + return + except exc.NotFound: + pass + + self.clients[amp.api_version].upload_cert_pem( + amp, listener_id, name, pem) + + def update_amphora_agent_config(self, amphora, agent_config, + timeout_dict=None): + """Update the amphora agent configuration file. + + :param amphora: The amphora to update. + :type amphora: object + :param agent_config: The new amphora agent configuration. + :type agent_config: string + :param timeout_dict: Dictionary of timeout values for calls to the + amphora. May contain: req_conn_timeout, + req_read_timeout, conn_max_retries, + conn_retry_interval + :returns: None + + Note: This will mutate the amphora agent config and adopt the + new values. + """ + try: + self._populate_amphora_api_version(amphora) + self.clients[amphora.api_version].update_agent_config( + amphora, agent_config, timeout_dict=timeout_dict) + except exc.NotFound as e: + LOG.debug('Amphora %s does not support the update_agent_config ' + 'API.', amphora.id) + raise driver_except.AmpDriverNotImplementedError() from e + + def get_interface_from_ip(self, amphora, ip_address, timeout_dict=None): + """Get the interface name for an IP address. + + :param amphora: The amphora to query. + :type amphora: octavia.db.models.Amphora + :param ip_address: The IP address to lookup. (IPv4 or IPv6) + :type ip_address: string + :param timeout_dict: Dictionary of timeout values for calls to the + amphora. May contain: req_conn_timeout, + req_read_timeout, conn_max_retries, + conn_retry_interval + :type timeout_dict: dict + :returns: the interface name string if found. + :raises octavia.amphorae.drivers.haproxy.exceptions.NotFound: + No interface found on the amphora + :raises TimeOutException: The amphora didn't reply + """ + self._populate_amphora_api_version(amphora, timeout_dict) + response_json = self.clients[amphora.api_version].get_interface( + amphora, ip_address, timeout_dict, log_error=False) + return response_json.get('interface', None) + + def set_interface_rules(self, amphora: db_models.Amphora, + ip_address, rules, timeout_dict=None): + """Sets interface firewall rules in the amphora + + :param amphora: The amphora to query. + :param ip_address: The IP address assigned to the interface the rules + will be applied on. + :param rules: The l1st of allow rules to apply. + """ + try: + self._populate_amphora_api_version(amphora, timeout_dict) + self.clients[amphora.api_version].set_interface_rules( + amphora, ip_address, rules, timeout_dict=timeout_dict) + except exc.NotFound as e: + LOG.debug('Amphora %s does not support the set_interface_rules ' + 'API.', amphora.id) + raise driver_except.AmpDriverNotImplementedError() from e + + +# Check a custom hostname +class CustomHostNameCheckingAdapter(requests.adapters.HTTPAdapter): + def cert_verify(self, conn, url, verify, cert): + conn.assert_hostname = self.uuid + return super().cert_verify(conn, url, verify, cert) + + def init_poolmanager(self, *pool_args, **pool_kwargs): + proto = CONF.amphora_agent.agent_tls_protocol.replace('.', '_') + pool_kwargs['ssl_version'] = getattr(ssl, f"PROTOCOL_{proto}") + return super().init_poolmanager(*pool_args, **pool_kwargs) + + +class AmphoraAPIClientBase: + def __init__(self): + super().__init__() + + self.get = functools.partial(self.request, 'get') + self.post = functools.partial(self.request, 'post') + self.put = functools.partial(self.request, 'put') + self.delete = functools.partial(self.request, 'delete') + self.head = functools.partial(self.request, 'head') + + self.session = requests.Session() + self.session.cert = CONF.haproxy_amphora.client_cert + self.ssl_adapter = CustomHostNameCheckingAdapter() + self.session.mount('https://', self.ssl_adapter) + + def _base_url(http://github.com/self,%20ip,%20api_version=None): + if utils.is_ipv6_lla(ip): + ip = f'[{ip}%{CONF.haproxy_amphora.lb_network_interface}]' + elif utils.is_ipv6(ip): + ip = f'[{ip}]' + if api_version: + return (f"https://{ip}:{CONF.haproxy_amphora.bind_port}" + f"/{api_version}/") + return f"https://{ip}:{CONF.haproxy_amphora.bind_port}/" + + def request(self, method: str, amp: db_models.Amphora, path: str = '/', + timeout_dict: Optional[dict] = None, + retry_404: bool = True, raise_retry_exception: bool = False, + **kwargs): + cfg_ha_amp = CONF.haproxy_amphora + if timeout_dict is None: + timeout_dict = {} + req_conn_timeout = timeout_dict.get( + consts.REQ_CONN_TIMEOUT, cfg_ha_amp.rest_request_conn_timeout) + req_read_timeout = timeout_dict.get( + consts.REQ_READ_TIMEOUT, cfg_ha_amp.rest_request_read_timeout) + conn_max_retries = timeout_dict.get( + consts.CONN_MAX_RETRIES, cfg_ha_amp.connection_max_retries) + conn_retry_interval = timeout_dict.get( + consts.CONN_RETRY_INTERVAL, cfg_ha_amp.connection_retry_interval) + + LOG.debug("request url %s", path) + _request = getattr(self.session, method.lower()) + _url = self._base_url(http://github.com/amp.lb_network_ip,%20amp.api_version) + path + LOG.debug("request url %s", _url) + reqargs = { + 'verify': CONF.haproxy_amphora.server_ca, + 'url': _url, + 'timeout': (req_conn_timeout, req_read_timeout), } + reqargs.update(kwargs) + headers = reqargs.setdefault('headers', {}) + + headers['User-Agent'] = ( + f"Octavia HaProxy Rest Client/{amp.api_version} " + f"(https://wiki.openstack.org/wiki/Octavia)") + self.ssl_adapter.uuid = amp.id + exception = None + # Keep retrying + for dummy in range(conn_max_retries): + try: + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + message="A true SSLContext object is not available" + ) + r = _request(**reqargs) + LOG.debug('Connected to amphora. Response: %(resp)s', + {'resp': r}) + + content_type = r.headers.get('content-type', '') + # Check the 404 to see if it is just that the network in the + # amphora is not yet up, in which case retry. + # Otherwise return the response quickly. + if r.status_code == 404: + if not retry_404: + raise exc.NotFound() + LOG.debug('Got a 404 (content-type: %(content_type)s) -- ' + 'connection data: %(content)s', + {'content_type': content_type, + 'content': r.content}) + if content_type.find("application/json") == -1: + LOG.debug("Amphora agent not ready.") + raise requests.ConnectionError + try: + json_data = r.json().get('details', '') + if 'No suitable network interface found' in json_data: + LOG.debug("Amphora network interface not found.") + raise requests.ConnectionError + except json.JSONDecodeError: # if r.json() fails + pass # TODO(rm_work) Should we do something? + return r + except (requests.ConnectionError, requests.Timeout) as e: + exception = e + LOG.warning("Could not connect to instance. Retrying.") + time.sleep(conn_retry_interval) + if raise_retry_exception: + # For taskflow persistence cause attribute should + # be serializable to JSON. Pass None, as cause exception + # is described in the expection message. + raise driver_except.AmpConnectionRetry( + exception=str(e)) from None + LOG.error("Connection retries (currently set to %(max_retries)s) " + "exhausted. The amphora is unavailable. Reason: " + "%(exception)s", + {'max_retries': conn_max_retries, + 'exception': exception}) + raise driver_except.TimeOutException() + + def get_api_version(self, amp, timeout_dict=None, + raise_retry_exception=False): + amp.api_version = None + r = self.get(amp, retry_404=False, timeout_dict=timeout_dict, + raise_retry_exception=raise_retry_exception) + # Handle 404 special as we don't want to log an ERROR on 404 + exc.check_exception(r, (404,)) + if r.status_code == 404: + raise exc.NotFound() + return r.json() + + +class AmphoraAPIClient1_0(AmphoraAPIClientBase): + def __init__(self): + super().__init__() + + self.start_listener = functools.partial(self._action, + consts.AMP_ACTION_START) + self.reload_listener = functools.partial(self._action, + consts.AMP_ACTION_RELOAD) + + self.start_vrrp = functools.partial(self._vrrp_action, + consts.AMP_ACTION_START) + self.stop_vrrp = functools.partial(self._vrrp_action, + consts.AMP_ACTION_STOP) + self.reload_vrrp = functools.partial(self._vrrp_action, + consts.AMP_ACTION_RELOAD) + + def upload_config(self, amp, loadbalancer_id, config, timeout_dict=None): + r = self.put( + amp, + f'loadbalancer/{amp.id}/{loadbalancer_id}/haproxy', + timeout_dict, data=config) + return exc.check_exception(r) + + def get_listener_status(self, amp, listener_id): + r = self.get( + amp, + f'listeners/{listener_id}') + if exc.check_exception(r): + return r.json() + return None + + def _action(self, action, amp, object_id, timeout_dict=None): + r = self.put( + amp, f'loadbalancer/{object_id}/{action}', + timeout_dict=timeout_dict) + return exc.check_exception(r) + + def upload_cert_pem(self, amp, loadbalancer_id, pem_filename, pem_file): + r = self.put( + amp, + f'loadbalancer/{loadbalancer_id}/certificates/{pem_filename}', + data=pem_file) + return exc.check_exception(r) + + def get_cert_md5sum(self, amp, loadbalancer_id, pem_filename, + ignore=tuple()): + r = self.get( + amp, + f'loadbalancer/{loadbalancer_id}/certificates/{pem_filename}') + if exc.check_exception(r, ignore): + return r.json().get("md5sum") + return None + + def delete_cert_pem(self, amp, loadbalancer_id, pem_filename): + r = self.delete( + amp, + f'loadbalancer/{loadbalancer_id}/certificates/{pem_filename}') + return exc.check_exception(r, (404,)) + + def update_cert_for_rotation(self, amp, pem_file): + r = self.put(amp, 'certificate', data=pem_file) + return exc.check_exception(r) + + def delete_listener(self, amp, object_id): + r = self.delete( + amp, f'listeners/{object_id}') + return exc.check_exception(r, (404,)) + + def get_info(self, amp, raise_retry_exception=False, + timeout_dict=None): + r = self.get(amp, "info", raise_retry_exception=raise_retry_exception, + timeout_dict=timeout_dict) + if exc.check_exception(r): + return r.json() + return None + + def get_details(self, amp): + r = self.get(amp, "details") + if exc.check_exception(r): + return r.json() + return None + + def get_all_listeners(self, amp): + r = self.get(amp, "listeners") + if exc.check_exception(r): + return r.json() + return None + + def plug_network(self, amp, port): + r = self.post(amp, 'plug/network', + json=port) + return exc.check_exception(r) + + def plug_vip(self, amp, vip, net_info): + r = self.post(amp, + f'plug/vip/{vip}', + json=net_info) + return exc.check_exception(r) + + def upload_vrrp_config(self, amp, config): + r = self.put(amp, 'vrrp/upload', data=config) + return exc.check_exception(r) + + def _vrrp_action(self, action, amp, timeout_dict=None): + r = self.put(amp, f'vrrp/{action}', + timeout_dict=timeout_dict) + return exc.check_exception(r) + + def get_interface(self, amp, ip_addr, timeout_dict=None, log_error=True): + r = self.get(amp, f'interface/{ip_addr}', + timeout_dict=timeout_dict) + return exc.check_exception(r, log_error=log_error).json() + + # The function is used for all LVS-supported protocol listener (UDP, SCTP) + def upload_udp_config(self, amp, listener_id, config, timeout_dict=None): + r = self.put( + amp, + f'listeners/{amp.id}/{listener_id}/udp_listener', timeout_dict, + data=config) + return exc.check_exception(r) + + def update_agent_config(self, amp, agent_config, timeout_dict=None): + r = self.put(amp, 'config', timeout_dict, data=agent_config) + return exc.check_exception(r) + + def set_interface_rules(self, amp, ip_address, rules, timeout_dict=None): + r = self.put(amp, f'interface/{ip_address}/rules', timeout_dict, + json=rules) + return exc.check_exception(r) diff --git a/octavia/amphorae/drivers/health/__init__.py b/octavia/amphorae/drivers/health/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/amphorae/drivers/health/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/amphorae/drivers/health/heartbeat_udp.py b/octavia/amphorae/drivers/health/heartbeat_udp.py new file mode 100644 index 0000000000..24ff60c1d1 --- /dev/null +++ b/octavia/amphorae/drivers/health/heartbeat_udp.py @@ -0,0 +1,649 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from concurrent import futures +import socket +import time +import timeit + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import excutils +from oslo_utils import timeutils +import sqlalchemy +from stevedore import driver as stevedore_driver + +from octavia.amphorae.backends.health_daemon import status_message +from octavia.common import constants +from octavia.common import data_models +from octavia.common import exceptions +from octavia.db import api as db_api +from octavia.db import repositories as repo +from octavia.statistics import stats_base + +UDP_MAX_SIZE = 64 * 1024 +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class UDPStatusGetter: + """This class defines methods that will gather heartbeats + + The heartbeats are transmitted via UDP and this class will bind to a port + and absorb them + """ + def __init__(self): + self.key = cfg.CONF.health_manager.heartbeat_key + self.ip = cfg.CONF.health_manager.bind_ip + self.port = cfg.CONF.health_manager.bind_port + self.sockaddr = None + LOG.info('attempting to listen on %(ip)s port %(port)s', + {'ip': self.ip, 'port': self.port}) + self.sock = None + self.update(self.key, self.ip, self.port) + + self.health_executor = futures.ProcessPoolExecutor( + max_workers=CONF.health_manager.health_update_threads) + self.stats_executor = futures.ProcessPoolExecutor( + max_workers=CONF.health_manager.stats_update_threads) + self.health_updater = UpdateHealthDb() + + def update(self, key, ip, port): + """Update the running config for the udp socket server + + :param key: The hmac key used to verify the UDP packets. String + :param ip: The ip address the UDP server will read from + :param port: The port the UDP server will read from + :return: None + """ + self.key = key + for addrinfo in socket.getaddrinfo(ip, port, 0, socket.SOCK_DGRAM): + ai_family = addrinfo[0] + self.sockaddr = addrinfo[4] + if self.sock is not None: + self.sock.close() + self.sock = socket.socket(ai_family, socket.SOCK_DGRAM) + self.sock.settimeout(1) + self.sock.bind(self.sockaddr) + if cfg.CONF.health_manager.sock_rlimit > 0: + rlimit = cfg.CONF.health_manager.sock_rlimit + LOG.info("setting sock rlimit to %s", rlimit) + self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, + rlimit) + break # just used the first addr getaddrinfo finds + if self.sock is None: + raise exceptions.NetworkConfig("unable to find suitable socket") + + def dorecv(self, *args, **kw): + """Waits for a UDP heart beat to be sent. + + :return: Returns the unwrapped payload and addr that sent the + heartbeat. + """ + (data, srcaddr) = self.sock.recvfrom(UDP_MAX_SIZE) + LOG.debug('Received packet from %s', srcaddr) + try: + obj = status_message.unwrap_envelope(data, self.key) + except Exception as e: + LOG.warning('Health Manager experienced an exception processing a ' + 'heartbeat message from %s. Ignoring this packet. ' + 'Exception: %s', srcaddr, str(e)) + raise exceptions.InvalidHMACException() + obj['recv_time'] = time.time() + return obj, srcaddr[0] + + def check(self): + try: + obj, srcaddr = self.dorecv() + except socket.timeout: + # Pass here as this is an expected cycling of the listen socket + pass + except exceptions.InvalidHMACException: + # Pass here as the packet was dropped and logged already + pass + except Exception as e: + LOG.warning('Health Manager experienced an exception processing a ' + 'heartbeat packet. Ignoring this packet. ' + 'Exception: %s', str(e)) + else: + self.health_executor.submit(self.health_updater.update_health, + obj, srcaddr) + self.stats_executor.submit(update_stats, obj) + + +def update_stats(health_message): + """Parses the health message then passes it to the stats driver(s) + + :param health_message: The health message containing the listener stats + :type health_message: dict + + Example V1 message:: + + health = { + "id": "", + "listeners": { + "": { + "status": "OPEN", + "stats": { + "ereq": 0, + "conns": 0, + "totconns": 0, + "rx": 0, + "tx": 0, + }, + "pools": { + "": { + "status": "UP", + "members": {"": "ONLINE"} + } + } + } + } + } + + Example V2 message:: + + {"id": "", + "seq": 67, + "listeners": { + "": { + "status": "OPEN", + "stats": { + "tx": 0, + "rx": 0, + "conns": 0, + "totconns": 0, + "ereq": 0 + } + } + }, + "pools": { + ":": { + "status": "UP", + "members": { + "": "no check" + } + } + }, + "ver": 2 + "recv_time": time.time() + } + + Example V3 message:: + + Same as V2 message, except values are deltas rather than absolutes. + """ + version = health_message.get("ver", 2) + + deltas = False + if version >= 3: + deltas = True + + amphora_id = health_message.get('id') + listeners = health_message.get('listeners', {}) + listener_stats = [] + for listener_id, listener in listeners.items(): + listener_dict = listener.get('stats') + stats_model = data_models.ListenerStatistics( + listener_id=listener_id, + amphora_id=amphora_id, + bytes_in=listener_dict.get('rx'), + bytes_out=listener_dict.get('tx'), + active_connections=listener_dict.get('conns'), + total_connections=listener_dict.get('totconns'), + request_errors=listener_dict.get('ereq'), + received_time=health_message.get('recv_time') + ) + LOG.debug("Listener %s / Amphora %s stats: %s", + listener_id, amphora_id, stats_model.get_stats()) + listener_stats.append(stats_model) + stats_base.update_stats_via_driver(listener_stats, deltas=deltas) + + +class UpdateHealthDb: + def __init__(self): + super().__init__() + # first setup repo for amphora, listener,member(nodes),pool repo + self.amphora_repo = repo.AmphoraRepository() + self.amphora_health_repo = repo.AmphoraHealthRepository() + self.listener_repo = repo.ListenerRepository() + self.loadbalancer_repo = repo.LoadBalancerRepository() + self.member_repo = repo.MemberRepository() + self.pool_repo = repo.PoolRepository() + + @staticmethod + def _update_status(session, repo, entity_type, + entity_id, new_op_status, old_op_status): + if old_op_status.lower() != new_op_status.lower(): + LOG.debug("%s %s status has changed from %s to " + "%s, updating db.", + entity_type, entity_id, old_op_status, + new_op_status) + repo.update(session, entity_id, operating_status=new_op_status) + + def update_health(self, health, srcaddr): + # The executor will eat any exceptions from the update_health code + # so we need to wrap it and log the unhandled exception + start_time = timeit.default_timer() + try: + self._update_health(health, srcaddr) + except Exception as e: + LOG.exception('Health update for amphora %(amp)s encountered ' + 'error %(err)s. Skipping health update.', + {'amp': health['id'], 'err': str(e)}) + # TODO(johnsom) We need to set a warning threshold here + LOG.debug('Health Update finished in: %s seconds', + timeit.default_timer() - start_time) + + # Health heartbeat message pre-versioning with UDP listeners + # need to adjust the expected listener count + # This is for backward compatibility with Rocky pre-versioning + # heartbeat amphora. + def _update_listener_count_for_UDP(self, session, db_lb, + expected_listener_count): + # For udp listener, the udp health won't send out by amp agent. + # Once the default_pool of udp listener have the first enabled + # member, then the health will be sent out. So during this + # period, need to figure out the udp listener and ignore them + # by changing expected_listener_count. + for list_id, list_db in db_lb.get('listeners', {}).items(): + need_remove = False + if list_db['protocol'] == constants.PROTOCOL_UDP: + listener = self.listener_repo.get(session, id=list_id) + enabled_members = ([member + for member in + listener.default_pool.members + if member.enabled] + if listener.default_pool else []) + if listener.default_pool: + if not listener.default_pool.members: + need_remove = True + elif not enabled_members: + need_remove = True + else: + need_remove = True + + if need_remove: + expected_listener_count = expected_listener_count - 1 + return expected_listener_count + + def _update_health(self, health, srcaddr): + """This function is to update db info based on amphora status + + :param health: map object that contains amphora, listener, member info + :type map: string + :returns: null + + The input v1 health data structure is shown as below:: + + health = { + "id": self.FAKE_UUID_1, + "listeners": { + "listener-id-1": {"status": constants.OPEN, "pools": { + "pool-id-1": {"status": constants.UP, + "members": { + "member-id-1": constants.ONLINE} + } + } + } + } + } + + Example V2 message:: + + {"id": "", + "seq": 67, + "listeners": { + "": { + "status": "OPEN", + "stats": { + "tx": 0, + "rx": 0, + "conns": 0, + "totconns": 0, + "ereq": 0 + } + } + }, + "pools": { + ":": { + "status": "UP", + "members": { + "": "no check" + } + } + }, + "ver": 2 + } + + """ + session = db_api.get_session() + + # We need to see if all of the listeners are reporting in + with session.begin(): + db_lb = self.amphora_repo.get_lb_for_health_update(session, + health['id']) + ignore_listener_count = False + + if db_lb: + expected_listener_count = 0 + if ('PENDING' in db_lb['provisioning_status'] or + not db_lb['enabled']): + ignore_listener_count = True + else: + for key, listener in db_lb.get('listeners', {}).items(): + # disabled listeners don't report from the amphora + if listener['enabled']: + expected_listener_count += 1 + + # If this is a heartbeat older than versioning, handle + # UDP special for backward compatibility. + if 'ver' not in health: + udp_listeners = [ + l for k, l in db_lb.get('listeners', {}).items() + if l['protocol'] == constants.PROTOCOL_UDP] + if udp_listeners: + with session.begin(): + expected_listener_count = ( + self._update_listener_count_for_UDP( + session, db_lb, expected_listener_count)) + else: + with session.begin(): + amp = self.amphora_repo.get(session, id=health['id']) + # This is debug and not warning because this can happen under + # normal deleting operations. + LOG.debug('Received a health heartbeat from amphora %s with ' + 'IP %s that should not exist. This amphora may be ' + 'in the process of being deleted, in which case you ' + 'will only see this message a few ' + 'times', health['id'], srcaddr) + if not amp: + LOG.warning('The amphora %s with IP %s is missing from ' + 'the DB, so it cannot be automatically ' + 'deleted (the compute_id is unknown). An ' + 'operator must manually delete it from the ' + 'compute service.', health['id'], srcaddr) + return + # delete the amp right there + try: + compute = stevedore_driver.DriverManager( + namespace='octavia.compute.drivers', + name=CONF.controller_worker.compute_driver, + invoke_on_load=True + ).driver + compute.delete(amp.compute_id) + return + except Exception as e: + LOG.info("Error deleting amp %s with IP %s Error: %s", + health['id'], srcaddr, str(e)) + expected_listener_count = 0 + + listeners = health['listeners'] + + # Do not update amphora health if the reporting listener count + # does not match the expected listener count + if len(listeners) == expected_listener_count or ignore_listener_count: + + # if we're running too far behind, warn and bail + proc_delay = time.time() - health['recv_time'] + hb_interval = CONF.health_manager.heartbeat_interval + # TODO(johnsom) We need to set a warning threshold here, and + # escalate to critical when it reaches the + # heartbeat_interval + if proc_delay >= hb_interval: + LOG.warning('Amphora %(id)s health message was processed too ' + 'slowly: %(delay)ss! The system may be overloaded ' + 'or otherwise malfunctioning. This heartbeat has ' + 'been ignored and no update was made to the ' + 'amphora health entry. THIS IS NOT GOOD.', + {'id': health['id'], 'delay': proc_delay}) + return + + lock_session = db_api.get_session() + lock_session.begin() + + # if the input amphora is healthy, we update its db info + try: + self.amphora_health_repo.replace( + lock_session, health['id'], + last_update=timeutils.utcnow()) + lock_session.commit() + except Exception: + with excutils.save_and_reraise_exception(): + lock_session.rollback() + else: + LOG.warning('Amphora %(id)s health message reports %(found)i ' + 'listeners when %(expected)i expected', + {'id': health['id'], 'found': len(listeners), + 'expected': expected_listener_count}) + + # Don't try to update status for bogus or old spares pool amphora + if not db_lb: + return + + processed_pools = [] + potential_offline_pools = {} + + # We got a heartbeat so lb is healthy until proven otherwise + if db_lb[constants.ENABLED] is False: + lb_status = constants.OFFLINE + else: + lb_status = constants.ONLINE + + health_msg_version = health.get('ver', 0) + + for listener_id in db_lb.get(constants.LISTENERS, {}): + db_listener = db_lb[constants.LISTENERS][listener_id] + db_op_status = db_listener[constants.OPERATING_STATUS] + listener_status = None + listener = None + + if listener_id not in listeners: + if (db_listener[constants.ENABLED] and + db_lb[constants.PROVISIONING_STATUS] == + constants.ACTIVE): + listener_status = constants.ERROR + else: + listener_status = constants.OFFLINE + else: + listener = listeners[listener_id] + + # OPEN = HAProxy listener status nbconn < maxconn + if listener.get('status') == constants.OPEN: + listener_status = constants.ONLINE + # FULL = HAProxy listener status not nbconn < maxconn + elif listener.get('status') == constants.FULL: + listener_status = constants.DEGRADED + if lb_status == constants.ONLINE: + lb_status = constants.DEGRADED + else: + LOG.warning(('Listener %(list)s reported status of ' + '%(status)s'), + {'list': listener_id, + 'status': listener.get('status')}) + + try: + if (listener_status is not None and + listener_status != db_op_status): + with session.begin(): + self._update_status( + session, self.listener_repo, constants.LISTENER, + listener_id, listener_status, db_op_status) + except sqlalchemy.orm.exc.NoResultFound: + LOG.error("Listener %s is not in DB", listener_id) + + if not listener: + continue + + if health_msg_version < 2: + raw_pools = listener['pools'] + + # normalize the pool IDs. Single process listener pools + # have the listener id appended with an ':' separator. + # Old multi-process listener pools only have a pool ID. + # This makes sure the keys are only pool IDs. + pools = {(k + ' ')[:k.rfind(':')]: v for k, v in + raw_pools.items()} + + for db_pool_id in db_lb.get('pools', {}): + # If we saw this pool already on another listener, skip it. + if db_pool_id in processed_pools: + continue + db_pool_dict = db_lb['pools'][db_pool_id] + with session.begin(): + lb_status = self._process_pool_status( + session, db_pool_id, db_pool_dict, pools, + lb_status, processed_pools, + potential_offline_pools) + + if health_msg_version >= 2: + raw_pools = health['pools'] + + # normalize the pool IDs. Single process listener pools + # have the listener id appended with an ':' separator. + # Old multi-process listener pools only have a pool ID. + # This makes sure the keys are only pool IDs. + pools = {(k + ' ')[:k.rfind(':')]: v for k, v in raw_pools.items()} + + for db_pool_id in db_lb.get('pools', {}): + # If we saw this pool already, skip it. + if db_pool_id in processed_pools: + continue + db_pool_dict = db_lb['pools'][db_pool_id] + with session.begin(): + lb_status = self._process_pool_status( + session, db_pool_id, db_pool_dict, pools, + lb_status, processed_pools, potential_offline_pools) + + for pool_id, pool in potential_offline_pools.items(): + # Skip if we eventually found a status for this pool + if pool_id in processed_pools: + continue + try: + # If the database doesn't already show the pool offline, update + if pool != constants.OFFLINE: + with session.begin(): + self._update_status( + session, self.pool_repo, constants.POOL, + pool_id, constants.OFFLINE, pool) + except sqlalchemy.orm.exc.NoResultFound: + LOG.error("Pool %s is not in DB", pool_id) + + # Update the load balancer status last + try: + if lb_status != db_lb['operating_status']: + with session.begin(): + self._update_status( + session, self.loadbalancer_repo, + constants.LOADBALANCER, db_lb['id'], lb_status, + db_lb[constants.OPERATING_STATUS]) + except sqlalchemy.orm.exc.NoResultFound: + LOG.error("Load balancer %s is not in DB", db_lb.id) + + def _process_pool_status( + self, session, pool_id, db_pool_dict, pools, lb_status, + processed_pools, potential_offline_pools): + pool_status = None + + if pool_id not in pools: + # If we don't have a status update for this pool_id + # add it to the list of potential offline pools and continue. + # We will check the potential offline pool list after we + # finish processing the status updates from all of the listeners. + potential_offline_pools[pool_id] = db_pool_dict['operating_status'] + return lb_status + + pool = pools[pool_id] + + processed_pools.append(pool_id) + + # UP = HAProxy backend has working or no servers + if pool.get('status') == constants.UP: + pool_status = constants.ONLINE + # DOWN = HAProxy backend has no working servers + elif pool.get('status') == constants.DOWN: + pool_status = constants.ERROR + lb_status = constants.ERROR + else: + LOG.warning(('Pool %(pool)s reported status of ' + '%(status)s'), + {'pool': pool_id, + 'status': pool.get('status')}) + + # Deal with the members that are reporting from + # the Amphora + members = pool['members'] + for member_id in db_pool_dict.get('members', {}): + member_status = None + member_db_status = ( + db_pool_dict['members'][member_id]['operating_status']) + + if member_id not in members: + if member_db_status != constants.NO_MONITOR: + member_status = constants.OFFLINE + else: + status = members[member_id] + + # Member status can be "UP" or "UP #/#" + # (transitional) + if status.startswith(constants.UP): + member_status = constants.ONLINE + # Member status can be "DOWN" or "DOWN #/#" + # (transitional) + elif status.startswith(constants.DOWN): + member_status = constants.ERROR + if pool_status == constants.ONLINE: + pool_status = constants.DEGRADED + if lb_status == constants.ONLINE: + lb_status = constants.DEGRADED + elif status == constants.DRAIN: + member_status = constants.DRAINING + elif status == constants.MAINT: + member_status = constants.OFFLINE + elif status == constants.NO_CHECK: + member_status = constants.NO_MONITOR + elif status == constants.RESTARTING: + # RESTARTING means that keepalived is restarting and a down + # member has been detected, the real status of the member + # is not clear, it might mean that the checker hasn't run + # yet. + # In this case, keep previous member_status, and wait for a + # non-transitional status. + pass + else: + LOG.warning('Member %(mem)s reported ' + 'status of %(status)s', + {'mem': member_id, + 'status': status}) + + try: + if (member_status is not None and + member_status != member_db_status): + self._update_status( + session, self.member_repo, constants.MEMBER, + member_id, member_status, member_db_status) + except sqlalchemy.orm.exc.NoResultFound: + LOG.error("Member %s is not able to update " + "in DB", member_id) + + try: + if (pool_status is not None and + pool_status != db_pool_dict['operating_status']): + self._update_status( + session, self.pool_repo, constants.POOL, + pool_id, pool_status, db_pool_dict['operating_status']) + except sqlalchemy.orm.exc.NoResultFound: + LOG.error("Pool %s is not in DB", pool_id) + + return lb_status diff --git a/octavia/amphorae/drivers/keepalived/__init__.py b/octavia/amphorae/drivers/keepalived/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/amphorae/drivers/keepalived/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/amphorae/drivers/keepalived/jinja/__init__.py b/octavia/amphorae/drivers/keepalived/jinja/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/amphorae/drivers/keepalived/jinja/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/amphorae/drivers/keepalived/jinja/jinja_cfg.py b/octavia/amphorae/drivers/keepalived/jinja/jinja_cfg.py new file mode 100644 index 0000000000..fe72a269aa --- /dev/null +++ b/octavia/amphorae/drivers/keepalived/jinja/jinja_cfg.py @@ -0,0 +1,145 @@ +# Copyright 2015 Hewlett Packard Enterprise Development Company LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import ipaddress +import os + +import jinja2 +from oslo_config import cfg +from oslo_log import log as logging + +from octavia.amphorae.backends.agent.api_server import util +from octavia.common import constants + + +KEEPALIVED_TEMPLATE = os.path.abspath( + os.path.join(os.path.dirname(__file__), + 'templates/keepalived_base.template')) +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class KeepalivedJinjaTemplater: + + def __init__(self, keepalived_template=None): + """Keepalived configuration generation + + :param keepalived_template: Absolute path to keepalived Jinja template + """ + super().__init__() + self.keepalived_template = (keepalived_template if + keepalived_template else + KEEPALIVED_TEMPLATE) + self._jinja_env = None + + def get_template(self, template_file): + """Returns the specified Jinja configuration template.""" + if not self._jinja_env: + template_loader = jinja2.FileSystemLoader( + searchpath=os.path.dirname(template_file)) + self._jinja_env = jinja2.Environment( + autoescape=True, + loader=template_loader, + trim_blocks=True, + lstrip_blocks=True) + return self._jinja_env.get_template(os.path.basename(template_file)) + + def build_keepalived_config(self, loadbalancer, amphora, amp_net_config): + """Renders the loadblanacer keepalived configuration for Active/Standby + + :param loadbalancer: A loadbalancer object + :param amphora: An amphora object + :param amp_net_config: The amphora network config, a dict + """ + # Note on keepalived configuration: The current base configuration + # enforced Master election whenever a high priority VRRP instance + # start advertising its presence. Accordingly, the fallback behavior + # - which I described in the blueprint - is the default behavior. + # Although this is a stable behavior, this can be undesirable for + # several backend services. To disable the fallback behavior, we need + # to add the "nopreempt" flag in the backup instance section. + peers_ips = [] + + # Get the VIP subnet for the amphora + additional_vip_data = amp_net_config['additional_vip_data'] + vip_subnet = amp_net_config[constants.VIP_SUBNET] + + # Sort VIPs by their IP so we can guarantee interface_index matching + sorted_add_vips = sorted(additional_vip_data, + key=lambda x: x['ip_address']) + + # The primary VIP is always first in the list + vip_list = [{ + 'ip_address': loadbalancer.vip.ip_address, + 'subnet': vip_subnet + }] + sorted_add_vips + + # Handle the case of multiple IP family types + vrrp_addr = ipaddress.ip_address(amphora.vrrp_ip) + vrrp_ipv6 = vrrp_addr.version == 6 + + # Handle all VIPs: + rendered_vips = [] + for index, add_vip in enumerate(vip_list): + # Validate the VIP address and see if it is IPv6 + vip = add_vip['ip_address'] + vip_addr = ipaddress.ip_address(vip) + vip_ipv6 = vip_addr.version == 6 + vip_cidr = add_vip['subnet']['cidr'] + + # Normalize and validate the VIP subnet CIDR + vip_network_cidr = ipaddress.ip_network( + vip_cidr).with_prefixlen + + host_routes = add_vip['subnet'].get('host_routes', []) + + # Addresses that aren't the same family as the VRRP + # interface will be in the "excluded" block + rendered_vips.append({ + 'ip_address': vip, + 'network_cidr': vip_network_cidr, + 'ipv6': vip_ipv6, + 'interface_index': index, + 'gateway': add_vip['subnet']['gateway_ip'], + 'excluded': vip_ipv6 != vrrp_ipv6, + 'host_routes': host_routes + }) + + for amp in filter( + lambda amp: amp.status == constants.AMPHORA_ALLOCATED, + loadbalancer.amphorae): + if amp.vrrp_ip != amphora.vrrp_ip: + peers_ips.append(amp.vrrp_ip) + return self.get_template(self.keepalived_template).render( + {'vrrp_group_name': loadbalancer.vrrp_group.vrrp_group_name, + 'amp_intf': amphora.vrrp_interface, + 'amp_vrrp_id': amphora.vrrp_id, + 'amp_priority': amphora.vrrp_priority, + 'vrrp_garp_refresh': + CONF.keepalived_vrrp.vrrp_garp_refresh_interval, + 'vrrp_garp_refresh_repeat': + CONF.keepalived_vrrp.vrrp_garp_refresh_count, + 'vrrp_auth_type': loadbalancer.vrrp_group.vrrp_auth_type, + 'vrrp_auth_pass': loadbalancer.vrrp_group.vrrp_auth_pass, + 'amp_vrrp_ip': amphora.vrrp_ip, + 'peers_vrrp_ips': peers_ips, + 'advert_int': loadbalancer.vrrp_group.advert_int, + 'check_script_path': util.keepalived_check_script_path(), + 'vrrp_check_interval': + CONF.keepalived_vrrp.vrrp_check_interval, + 'vrrp_fail_count': CONF.keepalived_vrrp.vrrp_fail_count, + 'vrrp_success_count': + CONF.keepalived_vrrp.vrrp_success_count, + 'vips': rendered_vips}, + constants=constants) diff --git a/octavia/amphorae/drivers/keepalived/jinja/templates/keepalived_base.template b/octavia/amphorae/drivers/keepalived/jinja/templates/keepalived_base.template new file mode 100644 index 0000000000..d44f48cbda --- /dev/null +++ b/octavia/amphorae/drivers/keepalived/jinja/templates/keepalived_base.template @@ -0,0 +1,77 @@ +{# +# Copyright 2015 Hewlett Packard Enterprise Development Company LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +#} +vrrp_script check_script { + script {{ check_script_path }} + interval {{ vrrp_check_interval }} + fall {{ vrrp_fail_count }} + rise {{ vrrp_success_count }} +} + +vrrp_instance {{ vrrp_group_name }} { + interface {{ amp_intf }} + virtual_router_id {{ amp_vrrp_id }} + priority {{ amp_priority }} + nopreempt + accept + garp_master_refresh {{ vrrp_garp_refresh }} + garp_master_refresh_repeat {{ vrrp_garp_refresh_repeat }} + advert_int {{ advert_int }} + authentication { + auth_type {{ vrrp_auth_type }} + auth_pass {{ vrrp_auth_pass }} + } + + unicast_src_ip {{ amp_vrrp_ip }} + unicast_peer { +{% for amp_vrrp_ip in peers_vrrp_ips %} + {{ amp_vrrp_ip }} +{% endfor %} + } + + virtual_ipaddress { +{% for vip in vips if not vip.excluded %} + {{ vip.ip_address }} +{% endfor %} + } + + virtual_ipaddress_excluded { +{% for vip in vips if vip.excluded %} + {{ vip.ip_address }} +{% endfor %} + } + + virtual_routes { +{% for vip in vips %} + {{ vip.network_cidr }} dev {{ amp_intf }} src {{ vip.ip_address }} scope link table 1 + {% if vip.gateway %} + default via {{ vip.gateway }} dev {{ amp_intf }} onlink table 1 + {% endif %} + {% for host_route in vip.host_routes %} + {{ host_route.destination }} dev {{ amp_intf }} gateway {{ host_route.nexthop }} onlink table 1 + {% endfor %} +{% endfor %} + } + + virtual_rules { +{% for vip in vips %} + from {{ vip.ip_address }}/{{ '128' if vip.ipv6 else '32' }} table 1 priority 100 +{% endfor %} + } + + track_script { + check_script + } +} diff --git a/octavia/amphorae/drivers/keepalived/vrrp_rest_driver.py b/octavia/amphorae/drivers/keepalived/vrrp_rest_driver.py new file mode 100644 index 0000000000..33a2e7bbf8 --- /dev/null +++ b/octavia/amphorae/drivers/keepalived/vrrp_rest_driver.py @@ -0,0 +1,109 @@ +# Copyright 2015 Hewlett Packard Enterprise Development Company LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging + +from octavia.amphorae.drivers import driver_base +from octavia.amphorae.drivers.keepalived.jinja import jinja_cfg +from octavia.common import constants + +LOG = logging.getLogger(__name__) + + +class KeepalivedAmphoraDriverMixin(driver_base.VRRPDriverMixin): + def __init__(self): + super().__init__() + + # The Mixed class must define a self.client object for the + # AmphoraApiClient + + def update_vrrp_conf(self, loadbalancer, amphorae_network_config, amphora, + timeout_dict=None): + """Update amphora of the loadbalancer with a new VRRP configuration + + :param loadbalancer: loadbalancer object + :param amphorae_network_config: amphorae network configurations + :param amphora: The amphora object to update. + :param timeout_dict: Dictionary of timeout values for calls to the + amphora. May contain: req_conn_timeout, + req_read_timeout, conn_max_retries, + conn_retry_interval + """ + if amphora.status != constants.AMPHORA_ALLOCATED: + LOG.debug('update_vrrp_conf called for un-allocated amphora %s. ' + 'Ignoring.', amphora.id) + return + + templater = jinja_cfg.KeepalivedJinjaTemplater() + + LOG.debug("Update amphora %s VRRP configuration.", amphora.id) + + self._populate_amphora_api_version(amphora, + timeout_dict=timeout_dict) + + # Generate Keepalived configuration from loadbalancer object + config = templater.build_keepalived_config( + loadbalancer, amphora, amphorae_network_config[amphora.id]) + self.clients[amphora.api_version].upload_vrrp_config(amphora, config) + + def stop_vrrp_service(self, loadbalancer): + """Stop the vrrp services running on the loadbalancer's amphorae + + :param loadbalancer: loadbalancer object + """ + LOG.info("Stop loadbalancer %s amphora VRRP Service.", + loadbalancer.id) + + for amp in filter( + lambda amp: amp.status == constants.AMPHORA_ALLOCATED, + loadbalancer.amphorae): + + self._populate_amphora_api_version(amp) + self.clients[amp.api_version].stop_vrrp(amp) + + def start_vrrp_service(self, amphora, timeout_dict=None): + """Start the VRRP services on an amphorae. + + :param amphora: amphora object + :param timeout_dict: Dictionary of timeout values for calls to the + amphora. May contain: req_conn_timeout, + req_read_timeout, conn_max_retries, + conn_retry_interval + """ + if amphora.status != constants.AMPHORA_ALLOCATED: + LOG.debug('start_vrrp_service called for un-allocated amphora %s. ' + 'Ignoring.', amphora.id) + return + + LOG.info("Start amphora %s VRRP Service.", amphora.id) + + self._populate_amphora_api_version(amphora, + timeout_dict=timeout_dict) + self.clients[amphora.api_version].start_vrrp(amphora, + timeout_dict=timeout_dict) + + def reload_vrrp_service(self, loadbalancer): + """Reload the VRRP services of all amphorae of the loadbalancer + + :param loadbalancer: loadbalancer object + """ + LOG.info("Reload loadbalancer %s amphora VRRP Service.", + loadbalancer.id) + + for amp in filter( + lambda amp: amp.status == constants.AMPHORA_ALLOCATED, + loadbalancer.amphorae): + + self._populate_amphora_api_version(amp) + self.clients[amp.api_version].reload_vrrp(amp) diff --git a/octavia/amphorae/drivers/noop_driver/__init__.py b/octavia/amphorae/drivers/noop_driver/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/amphorae/drivers/noop_driver/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/amphorae/drivers/noop_driver/driver.py b/octavia/amphorae/drivers/noop_driver/driver.py new file mode 100644 index 0000000000..98906886da --- /dev/null +++ b/octavia/amphorae/drivers/noop_driver/driver.py @@ -0,0 +1,223 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import random + +from oslo_log import log as logging + +from octavia.amphorae.drivers import driver_base +from octavia.common import data_models +from octavia.db import api as db_apis +from octavia.db import repositories + +LOG = logging.getLogger(__name__) + + +class NoopManager: + + def __init__(self): + super().__init__() + self.amphoraconfig = {} + + def update_amphora_listeners(self, loadbalancer, amphora, timeout_dict): + amphora_id = amphora.id + for listener in loadbalancer.listeners: + LOG.debug("Amphora noop driver update_amphora_listeners, " + "listener %s, amphora %s, timeouts %s", listener.id, + amphora_id, timeout_dict) + self.amphoraconfig[(listener.id, amphora_id)] = ( + listener, amphora_id, timeout_dict, "update_amp") + + # Add some dummy stats to the DB when using noop driver + listener_stats_repo = repositories.ListenerStatisticsRepository() + stats_obj = data_models.ListenerStatistics( + listener_id=listener.id, + amphora_id=amphora.id, + bytes_in=random.randrange(1000000000), + bytes_out=random.randrange(1000000000), + active_connections=random.randrange(1000000000), + total_connections=random.randrange(1000000000), + request_errors=random.randrange(1000000000), + received_time=float(random.randrange(1000000000)), + ) + listener_stats_repo.replace(session=db_apis.get_session(), + stats_obj=stats_obj) + + def update(self, loadbalancer): + LOG.debug("Amphora %s no-op, update listener %s, vip %s", + self.__class__.__name__, + tuple(li.protocol_port for li in loadbalancer.listeners), + loadbalancer.vip.ip_address) + self.amphoraconfig[ + (tuple(li.protocol_port for li in loadbalancer.listeners), + loadbalancer.vip.ip_address)] = (loadbalancer.listeners, + loadbalancer.vip, + 'active') + + def start(self, loadbalancer, amphora=None, timeout_dict=None): + LOG.debug("Amphora %s no-op, start listeners, lb %s, amp %s " + "timeouts %s", self.__class__.__name__, loadbalancer.id, + amphora, timeout_dict) + self.amphoraconfig[ + (loadbalancer.id, amphora.id)] = (loadbalancer, amphora, + 'start') + + def reload(self, loadbalancer, amphora=None, timeout_dict=None): + LOG.debug("Amphora %s no-op, reload listeners, lb %s, amp %s, " + "timeouts %s", self.__class__.__name__, loadbalancer.id, + amphora, timeout_dict) + self.amphoraconfig[ + (loadbalancer.id, amphora.id)] = (loadbalancer, amphora, + 'reload') + + def delete(self, listener): + LOG.debug("Amphora %s no-op, delete listener %s, vip %s", + self.__class__.__name__, + listener.protocol_port, + listener.load_balancer.vip.ip_address) + self.amphoraconfig[(listener.protocol_port, + listener.load_balancer.vip.ip_address)] = ( + listener, listener.load_balancer.vip, 'delete') + + def get_info(self, amphora, raise_retry_exception=False): + LOG.debug("Amphora %s no-op, info amphora %s", + self.__class__.__name__, amphora.id) + self.amphoraconfig[amphora.id] = (amphora.id, 'get_info') + + def get_diagnostics(self, amphora): + LOG.debug("Amphora %s no-op, get diagnostics amphora %s", + self.__class__.__name__, amphora.id) + self.amphoraconfig[amphora.id] = (amphora.id, 'get_diagnostics') + + def finalize_amphora(self, amphora): + LOG.debug("Amphora %s no-op, finalize amphora %s", + self.__class__.__name__, amphora.id) + self.amphoraconfig[amphora.id] = (amphora.id, 'finalize amphora') + + def post_network_plug(self, amphora, port, amphora_network_config): + LOG.debug("Amphora %s no-op, post network plug amphora %s, port %s, " + "amphora_network_config %s", self.__class__.__name__, + amphora.id, port.id, amphora_network_config) + self.amphoraconfig[amphora.id, port.id] = (amphora.id, port.id, + 'post_network_plug') + + def post_vip_plug(self, amphora, load_balancer, amphorae_network_config, + vrrp_port, vip_subnet, additional_vip_data=None): + LOG.debug("Amphora %s no-op, post vip plug load balancer %s", + self.__class__.__name__, load_balancer.id) + self.amphoraconfig[(load_balancer.id, id(amphorae_network_config))] = ( + load_balancer.id, amphorae_network_config, 'post_vip_plug') + + def upload_cert_amp(self, amphora, pem_file): + LOG.debug("Amphora %s no-op, upload cert amphora %s,with pem file %s", + self.__class__.__name__, amphora.id, pem_file) + self.amphoraconfig[amphora.id, pem_file] = (amphora.id, pem_file, + 'update_amp_cert_file') + + def update_amphora_agent_config(self, amphora, agent_config): + LOG.debug("Amphora %s no-op, update agent config amphora " + "%s, with agent config %s", + self.__class__.__name__, amphora.id, agent_config) + self.amphoraconfig[amphora.id, agent_config] = ( + amphora.id, agent_config, 'update_amphora_agent_config') + + def get_interface_from_ip(self, amphora, ip_address, timeout_dict=None): + LOG.debug("Amphora %s no-op, get interface from amphora %s for IP %s", + self.__class__.__name__, amphora.id, ip_address) + if ip_address == '198.51.100.99': + return "noop0" + return None + + +class NoopAmphoraLoadBalancerDriver( + driver_base.AmphoraLoadBalancerDriver, + driver_base.VRRPDriverMixin): + def __init__(self): + super().__init__() + self.driver = NoopManager() + + def update_amphora_listeners(self, loadbalancer, amphora, timeout_dict): + + self.driver.update_amphora_listeners(loadbalancer, amphora, + timeout_dict) + + def update(self, loadbalancer): + + self.driver.update(loadbalancer) + + def start(self, loadbalancer, amphora=None, timeout_dict=None): + + self.driver.start(loadbalancer, amphora, timeout_dict) + + def reload(self, loadbalancer, amphora=None, timeout_dict=None): + + self.driver.reload(loadbalancer, amphora, timeout_dict) + + def delete(self, listener): + + self.driver.delete(listener) + + def get_info(self, amphora, raise_retry_exception=False): + + self.driver.get_info(amphora, + raise_retry_exception=raise_retry_exception) + + def get_diagnostics(self, amphora): + + self.driver.get_diagnostics(amphora) + + def finalize_amphora(self, amphora): + + self.driver.finalize_amphora(amphora) + + def post_network_plug(self, amphora, port, amphora_network_config): + + self.driver.post_network_plug(amphora, port, amphora_network_config) + + def post_vip_plug(self, amphora, load_balancer, amphorae_network_config, + vrrp_port, vip_subnet, additional_vip_data=None): + + self.driver.post_vip_plug(amphora, + load_balancer, amphorae_network_config, + vrrp_port, vip_subnet, + additional_vip_data=additional_vip_data) + + def upload_cert_amp(self, amphora, pem_file): + + self.driver.upload_cert_amp(amphora, pem_file) + + def update_amphora_agent_config(self, amphora, agent_config): + self.driver.update_amphora_agent_config(amphora, agent_config) + + def get_interface_from_ip(self, amphora, ip_address, timeout_dict=None): + return self.driver.get_interface_from_ip(amphora, ip_address, + timeout_dict) + + def update_vrrp_conf(self, loadbalancer, amphorae_network_config, amphora, + timeout_dict=None): + pass + + def stop_vrrp_service(self, loadbalancer): + pass + + def start_vrrp_service(self, amphora, timeout_dict=None): + pass + + def reload_vrrp_service(self, loadbalancer): + pass + + def check(self, amphora, timeout_dict=None): + pass + + def set_interface_rules(self, amphora, ip_address, rules): + pass diff --git a/octavia/api/__init__.py b/octavia/api/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/api/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/api/app.py b/octavia/api/app.py new file mode 100644 index 0000000000..f5290a244a --- /dev/null +++ b/octavia/api/app.py @@ -0,0 +1,105 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +import keystonemiddleware.audit as audit_middleware +from oslo_config import cfg +from oslo_log import log as logging +from oslo_middleware import cors +from oslo_middleware import http_proxy_to_wsgi +from oslo_middleware import request_id +from oslo_middleware import sizelimit +from pecan import configuration as pecan_configuration +from pecan import make_app as pecan_make_app + +from octavia.api import config as app_config +from octavia.api.drivers import driver_factory +from octavia.common import config +from octavia.common import constants +from octavia.common import exceptions +from octavia.common import keystone +from octavia.common import service as octavia_service + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF + + +def get_pecan_config(): + """Returns the pecan config.""" + filename = app_config.__file__.replace('.pyc', '.py') + return pecan_configuration.conf_from_file(filename) + + +def _init_drivers(): + """Initialize provider drivers.""" + for provider in CONF.api_settings.enabled_provider_drivers: + driver_factory.get_driver(provider) + + +def setup_app(pecan_config=None, debug=False, argv=None): + """Creates and returns a pecan wsgi app.""" + if argv is None: + argv = sys.argv + octavia_service.prepare_service(argv) + cfg.CONF.log_opt_values(LOG, logging.INFO) + + _init_drivers() + + if not pecan_config: + pecan_config = get_pecan_config() + pecan_configuration.set_config(dict(pecan_config), overwrite=True) + + return pecan_make_app( + pecan_config.app.root, + wrap_app=_wrap_app, + debug=debug, + hooks=pecan_config.app.hooks, + wsme=pecan_config.wsme + ) + + +def _wrap_app(app): + """Wraps wsgi app with additional middlewares.""" + app = request_id.RequestId(app) + + if CONF.audit.enabled: + try: + app = audit_middleware.AuditMiddleware( + app, + audit_map_file=CONF.audit.audit_map_file, + ignore_req_list=CONF.audit.ignore_req_list + ) + except (OSError, audit_middleware.PycadfAuditApiConfigError) as e: + raise exceptions.InputFileError( + file_name=CONF.audit.audit_map_file, + reason=e + ) + + if cfg.CONF.api_settings.auth_strategy == constants.KEYSTONE: + app = keystone.SkippingAuthProtocol(app, {}) + + app = http_proxy_to_wsgi.HTTPProxyToWSGI(app) + + # This should be the last middleware in the list (which results in + # it being the first in the middleware chain). This is to ensure + # that any errors thrown by other middleware, such as an auth + # middleware - are annotated with CORS headers, and thus accessible + # by the browser. + config.set_cors_middleware_defaults() + app = cors.CORS(app, cfg.CONF) + + app = sizelimit.RequestBodySizeLimiter(app, cfg.CONF) + + return app diff --git a/octavia/api/common/__init__.py b/octavia/api/common/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/api/common/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/api/common/hooks.py b/octavia/api/common/hooks.py new file mode 100644 index 0000000000..d7e65cd74d --- /dev/null +++ b/octavia/api/common/hooks.py @@ -0,0 +1,75 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from pecan import hooks +from webob import acceptparse +from webob import exc + +from octavia.api.common import pagination +from octavia.api.common import utils +from octavia.common import constants +from octavia.common import context +from octavia.i18n import _ + +_HEALTHCHECK_PATHS = ['/healthcheck', '/load-balancer/healthcheck'] + + +class ContentTypeHook(hooks.PecanHook): + """Force the request content type to JSON if that is acceptable.""" + + def on_route(self, state): + # Oslo healthcheck middleware has its own content type handling + # so we need to bypass the Octavia content type restrictions. + if state.request.path in _HEALTHCHECK_PATHS: + return + # TODO(johnsom) Testing for an empty string is a workaround for an + # openstacksdk bug present up to the initial + # antelope release of openstacksdk. This means the + # octavia dashboard would also be impacted. + # This can be removed once antelope is EOL. + # See: https://review.opendev.org/c/openstack/openstacksdk/+/876669 + if state.request.accept and state.request.accept.header_value != '': + best_matches = state.request.accept.acceptable_offers( + [constants.APPLICATION_JSON]) + if not best_matches: + # The API reference says we always respond with JSON + state.request.accept = acceptparse.create_accept_header( + constants.APPLICATION_JSON) + msg = _('Only content type %s is accepted.') + raise exc.HTTPNotAcceptable( + msg % constants.APPLICATION_JSON, + json_formatter=utils.json_error_formatter) + + # Force application/json with no other options for the request + state.request.accept = acceptparse.create_accept_header( + constants.APPLICATION_JSON) + + +class ContextHook(hooks.PecanHook): + """Configures a request context and attaches it to the request.""" + + def on_route(self, state): + context_obj = context.RequestContext.from_environ( + state.request.environ) + state.request.context['octavia_context'] = context_obj + + +class QueryParametersHook(hooks.PecanHook): + + def before(self, state): + if state.request.method != 'GET': + return + + state.request.context[ + constants.PAGINATION_HELPER] = pagination.PaginationHelper( + state.request.params.mixed()) diff --git a/octavia/api/common/pagination.py b/octavia/api/common/pagination.py new file mode 100644 index 0000000000..ec233c8e4a --- /dev/null +++ b/octavia/api/common/pagination.py @@ -0,0 +1,396 @@ +# Copyright 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import itertools + +from oslo_log import log as logging +from pecan import request +import sqlalchemy +from sqlalchemy.orm import aliased +import sqlalchemy.sql as sa_sql + +from octavia.api.common import types +from octavia.common.config import cfg +from octavia.common import constants +from octavia.common import exceptions +from octavia.db import base_models +from octavia.db import models + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class PaginationHelper: + """Class helping to interact with pagination functionality + + Pass this class to `db.repositories` to apply it on query + """ + _auxiliary_arguments = ('limit', 'marker', + 'sort', 'sort_key', 'sort_dir', + 'fields', 'page_reverse', + ) + + def __init__(self, params, sort_dir=constants.DEFAULT_SORT_DIR): + """Pagination Helper takes params and a default sort direction + + :param params: Contains the following: + limit: maximum number of items to return + marker: the last item of the previous page; we return + the next results after this value. + sort: array of attr by which results should be sorted + :param sort_dir: default direction to sort (asc, desc) + """ + self.marker = params.get('marker') + self.sort_dir = self._validate_sort_dir(sort_dir) + self.limit = self._parse_limit(params) + self.sort_keys = self._parse_sort_keys(params) + self.params = params + self.filters = None + self.page_reverse = params.get('page_reverse', 'False') + + @staticmethod + def _parse_limit(params): + if CONF.api_settings.pagination_max_limit == 'infinite': + page_max_limit = None + else: + page_max_limit = int(CONF.api_settings.pagination_max_limit) + limit = params.get('limit', page_max_limit) + try: + # Deal with limit being a string or int meaning 'Unlimited' + if limit == 'infinite' or int(limit) < 1: + limit = None + # If we don't have a max, just use whatever limit is specified + elif page_max_limit is None: + limit = int(limit) + # Otherwise, we need to compare against the max + else: + limit = min(int(limit), page_max_limit) + except ValueError as e: + raise exceptions.InvalidLimit(key=limit) from e + return limit + + def _parse_sort_keys(self, params): + sort_keys_dirs = [] + sort = params.get('sort') + sort_keys = params.get('sort_key') + if sort: + for sort_dir_key in sort.split(","): + comps = sort_dir_key.split(":") + if len(comps) == 1: # Use default sort order + sort_keys_dirs.append((comps[0], self.sort_dir)) + elif len(comps) == 2: + sort_keys_dirs.append( + (comps[0], self._validate_sort_dir(comps[1]))) + else: + raise exceptions.InvalidSortKey(key=comps) + elif sort_keys: + sort_keys = sort_keys.split(',') + sort_dirs = params.get('sort_dir') + if not sort_dirs: + sort_dirs = [self.sort_dir] * len(sort_keys) + else: + sort_dirs = sort_dirs.split(',') + + if len(sort_dirs) < len(sort_keys): + sort_dirs += [self.sort_dir] * (len(sort_keys) - + len(sort_dirs)) + for sk, sd in zip(sort_keys, sort_dirs): + sort_keys_dirs.append((sk, self._validate_sort_dir(sd))) + + return sort_keys_dirs + + def _parse_marker(self, session, model): + return session.query(model).filter_by(id=self.marker).one_or_none() + + @staticmethod + def _get_default_column_value(column_type): + """Return the default value of the columns from DB table + + In postgreDB case, if no right default values are being set, an + psycopg2.DataError will be thrown. + """ + type_schema = { + 'datetime': None, + 'big_integer': 0, + 'integer': 0, + 'string': '' + } + + if isinstance(column_type, sa_sql.type_api.Variant): + return PaginationHelper._get_default_column_value(column_type.impl) + + return type_schema[column_type.__visit_name__] + + @staticmethod + def _validate_sort_dir(sort_dir): + sort_dir = sort_dir.lower() + if sort_dir not in constants.ALLOWED_SORT_DIR: + raise exceptions.InvalidSortDirection(key=sort_dir) + return sort_dir + + def _make_links(self, model_list): + if CONF.api_settings.api_base_uri: + path_url = (f"{CONF.api_settings.api_base_uri.rstrip('/')}" + f"{request.path}") + else: + path_url = request.path_url + links = [] + if model_list: + prev_attr = [f"limit={self.limit}"] + if self.params.get('sort'): + prev_attr.append(f"sort={self.params.get('sort')}") + if self.params.get('sort_key'): + prev_attr.append(f"sort_key={self.params.get('sort_key')}") + next_attr = copy.copy(prev_attr) + if self.marker: + prev_attr.append(f"marker={model_list[0].get('id')}") + prev_attr.append("page_reverse=True") + prev_link = { + "rel": "previous", + "href": f"{path_url}?{'&'.join(prev_attr)}" + } + links.append(prev_link) + # TODO(rm_work) Do we need to know when there are more vs exact? + # We safely know if we have a full page, but it might include the + # last element or it might not, it is unclear + if self.limit is None or len(model_list) >= self.limit: + next_attr.append(f"marker={model_list[-1].get('id')}") + next_link = { + "rel": "next", + "href": f"{path_url}?{'&'.join(next_attr)}" + } + links.append(next_link) + links = [types.PageType(**link) for link in links] + return links + + def _apply_tags_filtering(self, params, model, query): + if not getattr(model, "_tags", None): + return query + + if 'tags' in params: + tags = params.pop('tags') + + for tag in tags: + # This requires a multi-join to the tags table, + # so me must use aliases for each one. + tag_alias = aliased(base_models.Tags) + query = query.join(tag_alias, model._tags) + query = query.filter(tag_alias.tag == tag) + + if 'tags-any' in params: + tags = params.pop('tags-any') + tag_alias = aliased(base_models.Tags) + query = query.join(tag_alias, model._tags) + query = query.filter(tag_alias.tag.in_(tags)) + + if 'not-tags' in params: + tags = params.pop('not-tags') + subq = query.session.query(model.id) + for tag in tags: + tag_alias = aliased(base_models.Tags) + subq = subq.join(tag_alias, model._tags) + subq = subq.filter(tag_alias.tag == tag) + + query = query.filter(~model.id.in_(subq)) + + if 'not-tags-any' in params: + tags = params.pop('not-tags-any') + query = query.filter( + ~model._tags.any(base_models.Tags.tag.in_(tags))) + + return query + + @staticmethod + def _prepare_tags_list(param): + """Split comma separated tags and return a flat list of tags.""" + if not isinstance(param, list): + param = [param] + return list(itertools.chain.from_iterable( + tag.split(',') for tag in param)) + + def apply(self, query, model, enforce_valid_params=True): + """Returns a query with sorting / pagination criteria added. + + Pagination works by requiring a unique sort_key specified by sort_keys. + (If sort_keys is not unique, then we risk looping through values.) + We use the last row in the previous page as the pagination 'marker'. + So we must return values that follow the passed marker in the order. + With a single-valued sort_key, this would be easy: sort_key > X. + With a compound-values sort_key, (k1, k2, k3) we must do this to repeat + the lexicographical ordering: + (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3) + We also have to cope with different sort_directions. + Typically, the id of the last row is used as the client-facing + pagination marker, then the actual marker object must be fetched from + the db and passed in to us as marker. + :param query: the query object to which we should add + paging/sorting/filtering + :param model: the ORM model class + :param enforce_valid_params: check for invalid entries in self.params + + :rtype: sqlalchemy.orm.query.Query + :returns: The query with sorting/pagination/filtering added. + """ + + # Add filtering + if CONF.api_settings.allow_filtering: + # Exclude (valid) arguments that are not used for data filtering + filter_params = {k: v for k, v in self.params.items() + if k not in self._auxiliary_arguments} + + secondary_query_filter = filter_params.pop( + "project_id", None) if (model == models.Amphora) else None + + # Translate arguments from API standard to data model's field name + filter_params = ( + model.__v2_wsme__.translate_dict_keys_to_data_model( + filter_params) + ) + if 'loadbalancer_id' in filter_params: + filter_params['load_balancer_id'] = filter_params.pop( + 'loadbalancer_id') + + # Pop the 'tags' related parameters off before handling the + # other filters. Then apply the 'tags' filters after the + # other filters have been applied. + tag_params = {} + if 'tags' in filter_params: + tag_params['tags'] = self._prepare_tags_list( + filter_params.pop('tags')) + if 'tags-any' in filter_params: + tag_params['tags-any'] = self._prepare_tags_list( + filter_params.pop('tags-any')) + if 'not-tags' in filter_params: + tag_params['not-tags'] = self._prepare_tags_list( + filter_params.pop('not-tags')) + if 'not-tags-any' in filter_params: + tag_params['not-tags-any'] = self._prepare_tags_list( + filter_params.pop('not-tags-any')) + + # Drop invalid arguments + self.filters = {k: v for (k, v) in filter_params.items() + if k in vars(model.__data_model__())} + + if enforce_valid_params and ( + len(self.filters) < len(filter_params) + ): + raise exceptions.InvalidFilterArgument() + + query = model.apply_filter(query, model, self.filters) + if secondary_query_filter is not None: + query = query.filter(model.load_balancer.has( + project_id=secondary_query_filter)) + + # Apply tags filtering for the models which support tags. + query = self._apply_tags_filtering(tag_params, model, query) + + # Add sorting + if CONF.api_settings.allow_sorting: + # Add default sort keys (if they are OK for the model) + keys_only = [k[0] for k in self.sort_keys] + for key in constants.DEFAULT_SORT_KEYS: + if key not in keys_only and hasattr(model, key): + self.sort_keys.append((key, self.sort_dir)) + + for current_sort_key, current_sort_dir in self.sort_keys: + # Translate sort_key from API standard to data model's name + current_sort_key = ( + model.__v2_wsme__.translate_key_to_data_model( + current_sort_key)) + sort_dir_func = { + constants.ASC: sqlalchemy.asc, + constants.DESC: sqlalchemy.desc, + }[current_sort_dir] + + try: + # The translated object may be a nested parameter + # such as vip.ip_address, so handle that case by + # joining with the nested table. + if '.' in current_sort_key: + parent, child = current_sort_key.split('.') + parent_obj = getattr(model, parent) + query = query.join(parent_obj) + sort_key_attr = child + else: + sort_key_attr = getattr(model, current_sort_key) + except AttributeError as e: + raise exceptions.InvalidSortKey( + key=current_sort_key) from e + query = query.order_by(sort_dir_func(sort_key_attr)) + + # Add pagination + if CONF.api_settings.allow_pagination: + default = '' # Default to an empty string if NULL + if self.marker is not None: + marker_object = self._parse_marker(query.session, model) + if not marker_object: + raise exceptions.InvalidMarker(key=self.marker) + marker_values = [] + for sort_key, _ in self.sort_keys: + v = getattr(marker_object, sort_key) + if v is None: + v = default + marker_values.append(v) + + # Build up an array of sort criteria as in the docstring + criteria_list = [] + for i in range(len(self.sort_keys)): + crit_attrs = [] + for j in range(i): + model_attr = getattr(model, self.sort_keys[j][0]) + default = PaginationHelper._get_default_column_value( + model_attr.property.columns[0].type) + attr = sa_sql.expression.case( + (model_attr.isnot(None), model_attr), + else_=default) + crit_attrs.append(attr == marker_values[j]) + + model_attr = getattr(model, self.sort_keys[i][0]) + default = PaginationHelper._get_default_column_value( + model_attr.property.columns[0].type) + attr = sa_sql.expression.case( + (model_attr.isnot(None), model_attr), + else_=default) + this_sort_dir = self.sort_keys[i][1] + if this_sort_dir == constants.DESC: + if self.page_reverse == "True": + crit_attrs.append(attr > marker_values[i]) + else: + crit_attrs.append(attr < marker_values[i]) + elif this_sort_dir == constants.ASC: + if self.page_reverse == "True": + crit_attrs.append(attr < marker_values[i]) + else: + crit_attrs.append(attr > marker_values[i]) + else: + raise exceptions.InvalidSortDirection( + key=this_sort_dir) + + criteria = sa_sql.and_(*crit_attrs) + criteria_list.append(criteria) + + f = sa_sql.or_(*criteria_list) + query = query.filter(f) + + if self.limit is not None: + query = query.limit(self.limit) + + model_list = query.all() + + links = None + if CONF.api_settings.allow_pagination: + links = self._make_links(model_list) + + return model_list, links diff --git a/octavia/api/common/types.py b/octavia/api/common/types.py new file mode 100644 index 0000000000..56ae72fbbf --- /dev/null +++ b/octavia/api/common/types.py @@ -0,0 +1,255 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import ipaddress + +from dateutil import parser +from wsme import types as wtypes + +from octavia.common import constants +from octavia.common import exceptions +from octavia.common import validate + + +class IPAddressType(wtypes.UserType): + basetype = str + name = 'ipaddress' + + @staticmethod + def validate(value): + """Validates whether value is an IPv4 or IPv6 address.""" + try: + wtypes.IPv4AddressType.validate(value) + return value + except ValueError: + try: + wtypes.IPv6AddressType.validate(value) + return value + except ValueError as e: + error = 'Value should be IPv4 or IPv6 format' + raise ValueError(error) from e + + +class CidrType(wtypes.UserType): + basetype = str + name = 'cidr' + + @staticmethod + def validate(value): + """Validates whether value is an IPv4 or IPv6 CIDR.""" + try: + return ipaddress.ip_network(value, strict=False).with_prefixlen + except Exception as e: + error = 'Value should be IPv4 or IPv6 CIDR format' + raise ValueError(error) from e + + +class AlpnProtocolType(wtypes.UserType): + basetype = str + name = 'alpn_protocol' + + @staticmethod + def validate(value): + """Validates whether value is a valid ALPN protocol ID.""" + if value in constants.SUPPORTED_ALPN_PROTOCOLS: + return value + error = 'Value should be a valid ALPN protocol ID' + raise ValueError(error) + + +class URLType(wtypes.UserType): + basetype = str + name = 'url' + + def __init__(self, require_scheme=True): + super().__init__() + self.require_scheme = require_scheme + + def validate(self, value): + try: + validate.url(http://github.com/value,%20require_scheme=self.require_scheme) + except exceptions.InvalidURL as e: + error = 'Value must be a valid URL string' + raise ValueError(error) from e + return value + + +class URLPathType(wtypes.UserType): + basetype = str + name = 'url_path' + + @staticmethod + def validate(value): + try: + validate.url_path(value) + except exceptions.InvalidURLPath as e: + error = 'Value must be a valid URL Path string' + raise ValueError(error) from e + return value + + +class BaseMeta(wtypes.BaseMeta): + def __new__(mcs, name, bases, dct): + def get_tenant_id(self): + tenant_id = getattr(self, '_tenant_id', wtypes.Unset) + # If tenant_id was explicitly set to Unset, return that + if tenant_id is wtypes.Unset and self._unset_tenant: + return tenant_id + # Otherwise, assume we can return project_id + return self.project_id + + def set_tenant_id(self, tenant_id): + self._tenant_id = tenant_id + + if tenant_id is wtypes.Unset: + # Record that tenant_id was explicitly Unset + self._unset_tenant = True + else: + # Reset 'unset' state, and update project_id as well + self._unset_tenant = False + self.project_id = tenant_id + + if 'project_id' in dct and 'tenant_id' not in dct: + dct['tenant_id'] = wtypes.wsproperty( + wtypes.StringType(max_length=36), + get_tenant_id, set_tenant_id) + # This will let us know if tenant_id was explicitly set to Unset + dct['_unset_tenant'] = False + return super().__new__(mcs, name, bases, dct) + + +class BaseType(wtypes.Base, metaclass=BaseMeta): + @classmethod + def _full_response(cls): + return False + + @classmethod + def from_data_model(cls, data_model, children=False): + """Converts data_model to Octavia WSME type. + + :param data_model: data model to convert from + :param children: convert child data models + """ + type_dict = data_model.to_dict() + # We need to have json convertible data for storing it in persistence + # jobboard backend. + for k, v in type_dict.items(): + if ('_at' in k or 'expiration' in k) and v is not None: + type_dict[k] = parser.parse(v) + + if not hasattr(cls, '_type_to_model_map'): + return cls(**type_dict) + + dm_to_type_map = {value: key + for key, value in cls._type_to_model_map.items()} + + new_dict = copy.deepcopy(type_dict) + for key, value in type_dict.items(): + if isinstance(value, dict): + for child_key, child_value in value.items(): + if '.'.join([key, child_key]) in dm_to_type_map: + new_dict['_'.join([key, child_key])] = child_value + elif key in ['name', 'description'] and value is None: + new_dict[key] = '' + else: + if key in dm_to_type_map: + new_dict[dm_to_type_map[key]] = value + del new_dict[key] + return cls(**new_dict) + + @classmethod + def translate_dict_keys_to_data_model(cls, wsme_dict): + """Translate the keys from wsme class type, to data_model.""" + if not hasattr(cls, '_type_to_model_map'): + return wsme_dict + res = {} + for (k, v) in wsme_dict.items(): + if k in cls._type_to_model_map: + k = cls._type_to_model_map[k] + if '.' in k: + parent, child = k.split('.') + if parent not in res: + res[parent] = {} + res[parent][child] = v + continue + res[k] = v + return res + + @classmethod + def translate_key_to_data_model(cls, key): + """Translate the keys from wsme class type, to data_model.""" + if not hasattr(cls, '_type_to_model_map') or ( + key not in cls._type_to_model_map): + return key + return cls._type_to_model_map[key] + + def to_dict(self, render_unsets=False): + """Converts Octavia WSME type to dictionary. + + :param render_unsets: If True, will convert items that are WSME Unset + types to None. If False, does not add the item + """ + # Set project_id equal tenant_id if project_id is unset and tenant_id + # is + if hasattr(self, 'project_id') and hasattr(self, 'tenant_id'): + # pylint: disable=access-member-before-definition + if (isinstance(self.project_id, wtypes.UnsetType) and + not isinstance(self.tenant_id, wtypes.UnsetType)): + self.project_id = self.tenant_id + if hasattr(self, 'admin_state_up') and getattr( + self, 'admin_state_up') is None: + # This situation will be hit during request with + # admin_state_up is null. If users specify this field to null, + # then we treat it as False + self.admin_state_up = bool(self.admin_state_up) + wsme_dict = {} + for attr in dir(self): + if attr.startswith('_'): + continue + value = getattr(self, attr, None) + # TODO(blogan): Investigate wsme types handling the duality of + # tenant_id and project_id in a clean way. One way could be + # wsme.rest.json.fromjson and using the @fromjson.when_object + # decorator. + if attr == 'tenant_id': + continue + if value and callable(value): + continue + if value and isinstance(value, BaseType): + value = value.to_dict(render_unsets=render_unsets) + if value and isinstance(value, list): + value = [val.to_dict(render_unsets=render_unsets) + if isinstance(val, BaseType) else val + for val in value] + if isinstance(value, wtypes.UnsetType): + if render_unsets: + value = None + else: + continue + wsme_dict[attr] = value + return self.translate_dict_keys_to_data_model(wsme_dict) + + +class IdOnlyType(BaseType): + id = wtypes.wsattr(wtypes.UuidType(), mandatory=True) + + +class NameOnlyType(BaseType): + name = wtypes.wsattr(wtypes.StringType(max_length=255), mandatory=True) + + +class PageType(BaseType): + href = wtypes.StringType() + rel = wtypes.StringType() diff --git a/octavia/api/common/utils.py b/octavia/api/common/utils.py new file mode 100644 index 0000000000..c1b379cd99 --- /dev/null +++ b/octavia/api/common/utils.py @@ -0,0 +1,44 @@ +# Copyright 2022 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_middleware import request_id +import webob + +from octavia.common import constants + + +# Inspired by the OpenStack Placement service utils.py +def json_error_formatter(body, status, title, environ): + """A json_formatter for webob exceptions. + + Follows API-WG guidelines at + http://specs.openstack.org/openstack/api-wg/guidelines/errors.html + """ + # Clear out the html that webob sneaks in. + body = webob.exc.strip_tags(body) + # Get status code out of status message. webob's error formatter + # only passes entire status string. + status_code = int(status.split(None, 1)[0]) + error_dict = { + constants.STATUS: status_code, + constants.TITLE: title, + constants.DETAIL: body + } + + # If the request id middleware has had a chance to add an id, + # put it in the error response. + if request_id.ENV_REQUEST_ID in environ: + error_dict[constants.REQUEST_ID] = environ[request_id.ENV_REQUEST_ID] + + return {constants.ERRORS: [error_dict]} diff --git a/octavia/api/config.py b/octavia/api/config.py new file mode 100644 index 0000000000..dad6cb5b1d --- /dev/null +++ b/octavia/api/config.py @@ -0,0 +1,35 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from octavia.api.common import hooks + +# Pecan Application Configurations +# See https://pecan.readthedocs.org/en/latest/configuration.html#application-configuration # noqa +app = { + 'root': 'octavia.api.root_controller.RootController', + 'modules': ['octavia.api'], + 'hooks': [ + hooks.ContentTypeHook(), + hooks.ContextHook(), + hooks.QueryParametersHook()], + 'debug': False +} + +# WSME Configurations +# See https://wsme.readthedocs.org/en/latest/integrate.html#configuration +wsme = { + # Provider driver uses 501 if the driver is not installed. + # Don't dump a stack trace for 501s + 'debug': False +} diff --git a/octavia/api/drivers/__init__.py b/octavia/api/drivers/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/api/drivers/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/api/drivers/amphora_driver/__init__.py b/octavia/api/drivers/amphora_driver/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/api/drivers/amphora_driver/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/api/drivers/amphora_driver/availability_zone_schema.py b/octavia/api/drivers/amphora_driver/availability_zone_schema.py new file mode 100644 index 0000000000..a1288d0ee2 --- /dev/null +++ b/octavia/api/drivers/amphora_driver/availability_zone_schema.py @@ -0,0 +1,57 @@ +# Copyright 2018 Rackspace US Inc. All rights reserved. +# Copyright 2019 Verizon Media +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from octavia.common import constants as consts + +# This is a JSON schema validation dictionary +# https://json-schema.org/latest/json-schema-validation.html +# +# Note: This is used to generate the amphora driver "supported availability +# zone metadata" dictionary. Each property should include a description +# for the user to understand what this availability zone setting does. +# +# Where possible, the property name should match the configuration file name +# for the setting. The configuration file setting is the default when a +# setting is not defined in an availability zone profile. + +SUPPORTED_AVAILABILITY_ZONE_SCHEMA = { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Octavia Amphora Driver Availability Zone Metadata Schema", + "description": "This schema is used to validate new availability zone " + "profiles submitted for use in an amphora driver " + "availability zone.", + "type": "object", + "additionalProperties": False, + "properties": { + consts.COMPUTE_ZONE: { + "type": "string", + "description": "The compute availability zone." + }, + consts.VOLUME_ZONE: { + "type": "string", + "description": "The volume availability zone." + }, + consts.MANAGEMENT_NETWORK: { + "type": "string", + "description": "The management network ID for the amphora." + }, + consts.VALID_VIP_NETWORKS: { + "type": "array", + "description": "List of network IDs that are allowed for VIP use. " + "This overrides/replaces the list of allowed " + "networks configured in `octavia.conf`." + } + } +} diff --git a/octavia/api/drivers/amphora_driver/flavor_schema.py b/octavia/api/drivers/amphora_driver/flavor_schema.py new file mode 100644 index 0000000000..8c3b2d7985 --- /dev/null +++ b/octavia/api/drivers/amphora_driver/flavor_schema.py @@ -0,0 +1,62 @@ +# Copyright 2018 Rackspace US Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from octavia.common import constants as consts + +# This is a JSON schema validation dictionary +# https://json-schema.org/latest/json-schema-validation.html +# +# Note: This is used to generate the amphora driver "supported flavor +# metadata" dictionary. Each property should include a description +# for the user to understand what this flavor setting does. +# +# Where possible, the property name should match the configuration file name +# for the setting. The configuration file setting is the default when a +# setting is not defined in a flavor profile. + +SUPPORTED_FLAVOR_SCHEMA = { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Octavia Amphora Driver Flavor Metadata Schema", + "description": "This schema is used to validate new flavor profiles " + "submitted for use in an amphora driver flavor profile.", + "type": "object", + "additionalProperties": False, + "properties": { + consts.LOADBALANCER_TOPOLOGY: { + "type": "string", + "description": "The load balancer topology. One of: " + "SINGLE - One amphora per load balancer. " + "ACTIVE_STANDBY - Two amphora per load balancer.", + "enum": list(consts.SUPPORTED_LB_TOPOLOGIES) + }, + consts.COMPUTE_FLAVOR: { + "type": "string", + "description": "The compute driver flavor ID." + }, + consts.AMP_IMAGE_TAG: { + "type": "string", + "description": "The amphora image tag." + }, + consts.SRIOV_VIP: { + "type": "boolean", + "description": "When true, the VIP port will be created using an " + "SR-IOV VF port." + }, + consts.ALLOW_MEMBER_SRIOV: { + "type": "boolean", + "description": "When true, users can request a member port be " + "SR-IOV enabled at member creation time." + } + } +} diff --git a/octavia/api/drivers/amphora_driver/v2/__init__.py b/octavia/api/drivers/amphora_driver/v2/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/api/drivers/amphora_driver/v2/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/api/drivers/amphora_driver/v2/driver.py b/octavia/api/drivers/amphora_driver/v2/driver.py new file mode 100644 index 0000000000..444adbcc0a --- /dev/null +++ b/octavia/api/drivers/amphora_driver/v2/driver.py @@ -0,0 +1,612 @@ +# Copyright 2018 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from jsonschema import exceptions as js_exceptions +from jsonschema import validate + +from oslo_config import cfg +from oslo_log import log as logging +import oslo_messaging as messaging +from stevedore import driver as stevedore_driver + +from octavia_lib.api.drivers import data_models as driver_dm +from octavia_lib.api.drivers import exceptions +from octavia_lib.api.drivers import provider_base as driver_base +from octavia_lib.common import constants as lib_consts + +from octavia.api.drivers.amphora_driver import availability_zone_schema +from octavia.api.drivers.amphora_driver import flavor_schema +from octavia.api.drivers import utils as driver_utils +from octavia.common import constants as consts +from octavia.common import data_models +from octavia.common import rpc +from octavia.common import utils +from octavia.db import api as db_apis +from octavia.db import repositories +from octavia.network import base as network_base + +CONF = cfg.CONF +CONF.import_group('oslo_messaging', 'octavia.common.config') +LOG = logging.getLogger(__name__) +AMPHORA_SUPPORTED_LB_ALGORITHMS = [ + consts.LB_ALGORITHM_ROUND_ROBIN, + consts.LB_ALGORITHM_SOURCE_IP, + consts.LB_ALGORITHM_LEAST_CONNECTIONS] + +AMPHORA_SUPPORTED_PROTOCOLS = [ + lib_consts.PROTOCOL_TCP, + lib_consts.PROTOCOL_HTTP, + lib_consts.PROTOCOL_HTTPS, + lib_consts.PROTOCOL_TERMINATED_HTTPS, + lib_consts.PROTOCOL_PROXY, + lib_consts.PROTOCOL_PROXYV2, + lib_consts.PROTOCOL_UDP, + lib_consts.PROTOCOL_SCTP, + lib_consts.PROTOCOL_PROMETHEUS, +] + +VALID_L7POLICY_LISTENER_PROTOCOLS = [ + lib_consts.PROTOCOL_HTTP, + lib_consts.PROTOCOL_TERMINATED_HTTPS +] + + +class AmphoraProviderDriver(driver_base.ProviderDriver): + def __init__(self): + super().__init__() + self.target = messaging.Target( + namespace=consts.RPC_NAMESPACE_CONTROLLER_AGENT, + topic=consts.TOPIC_AMPHORA_V2, version="2.0", fanout=False) + self.client = rpc.get_client(self.target) + self.repositories = repositories.Repositories() + self.fernet = utils.get_server_certs_key_passphrases_fernet() + + def _validate_pool_algorithm(self, pool): + if pool.lb_algorithm not in AMPHORA_SUPPORTED_LB_ALGORITHMS: + msg = (f'Amphora provider does not support {pool.lb_algorithm} ' + f'algorithm.') + raise exceptions.UnsupportedOptionError( + user_fault_string=msg, + operator_fault_string=msg) + + def _validate_listener_protocol(self, listener): + if listener.protocol not in AMPHORA_SUPPORTED_PROTOCOLS: + msg = ('Amphora provider does not support %s protocol. ' + 'Supported: %s' + % (listener.protocol, + ", ".join(AMPHORA_SUPPORTED_PROTOCOLS))) + raise exceptions.UnsupportedOptionError( + user_fault_string=msg, + operator_fault_string=msg) + + def _validate_alpn_protocols(self, obj): + if not obj.alpn_protocols: + return + supported = consts.AMPHORA_SUPPORTED_ALPN_PROTOCOLS + not_supported = set(obj.alpn_protocols) - set(supported) + if not_supported: + msg = ('Amphora provider does not support %s ALPN protocol(s). ' + 'Supported: %s' + % (", ".join(not_supported), ", ".join(supported))) + raise exceptions.UnsupportedOptionError( + user_fault_string=msg, + operator_fault_string=msg) + + # Load Balancer + def create_vip_port(self, loadbalancer_id, project_id, vip_dictionary, + additional_vip_dicts): + vip_obj = driver_utils.provider_vip_dict_to_vip_obj(vip_dictionary) + add_vip_objs = [ + driver_utils.provider_additional_vip_dict_to_additional_vip_obj( + add_vip) + for add_vip in additional_vip_dicts] + lb_obj = data_models.LoadBalancer(id=loadbalancer_id, + project_id=project_id, vip=vip_obj, + additional_vips=add_vip_objs) + + network_driver = utils.get_network_driver() + vip_network = network_driver.get_network( + vip_dictionary[lib_consts.VIP_NETWORK_ID]) + if not vip_network.port_security_enabled: + message = "Port security must be enabled on the VIP network." + raise exceptions.DriverError(user_fault_string=message, + operator_fault_string=message) + + try: + vip, add_vips = network_driver.allocate_vip(lb_obj) + except network_base.VIPInUseException as e: + message = str(e) + if getattr(e, 'orig_msg', None) is not None: + message = e.orig_msg + raise exceptions.Conflict(user_fault_string=message, + operator_fault_string=message) + except network_base.AllocateVIPException as e: + message = str(e) + if getattr(e, 'orig_msg', None) is not None: + message = e.orig_msg + raise exceptions.DriverError(user_fault_string=message, + operator_fault_string=message) + + LOG.info('Amphora provider created VIP port %s for load balancer %s.', + vip.port_id, loadbalancer_id) + vip_return_dict = driver_utils.vip_dict_to_provider_dict(vip.to_dict()) + add_return_dicts = [driver_utils.additional_vip_dict_to_provider_dict( + add_vip.to_dict()) for add_vip in add_vips] + return vip_return_dict, add_return_dicts + + # TODO(johnsom) convert this to octavia_lib constant flavor + # once octavia is transitioned to use octavia_lib + def loadbalancer_create(self, loadbalancer): + if loadbalancer.flavor == driver_dm.Unset: + loadbalancer.flavor = None + if loadbalancer.availability_zone == driver_dm.Unset: + loadbalancer.availability_zone = None + payload = {consts.LOADBALANCER: loadbalancer.to_dict(), + consts.FLAVOR: loadbalancer.flavor, + consts.AVAILABILITY_ZONE: loadbalancer.availability_zone} + self.client.cast({}, 'create_load_balancer', **payload) + + def loadbalancer_delete(self, loadbalancer, cascade=False): + payload = {consts.LOADBALANCER: loadbalancer.to_dict(), + 'cascade': cascade} + self.client.cast({}, 'delete_load_balancer', **payload) + + def loadbalancer_failover(self, loadbalancer_id): + payload = {consts.LOAD_BALANCER_ID: loadbalancer_id} + self.client.cast({}, 'failover_load_balancer', **payload) + + def loadbalancer_update(self, original_load_balancer, new_loadbalancer): + # Adapt the provider data model to the queue schema + lb_dict = new_loadbalancer.to_dict() + if 'admin_state_up' in lb_dict: + lb_dict['enabled'] = lb_dict.pop('admin_state_up') + # Put the qos_policy_id back under the vip element the controller + # expects + vip_qos_policy_id = lb_dict.pop('vip_qos_policy_id', None) + lb_dict.pop(consts.LOADBALANCER_ID) + if vip_qos_policy_id: + vip_dict = {"qos_policy_id": vip_qos_policy_id} + lb_dict["vip"] = vip_dict + + payload = {consts.ORIGINAL_LOADBALANCER: + original_load_balancer.to_dict(), + consts.LOAD_BALANCER_UPDATES: lb_dict} + self.client.cast({}, 'update_load_balancer', **payload) + + def _encrypt_tls_container_data(self, tls_container_data): + for key, val in tls_container_data.items(): + if isinstance(val, bytes): + tls_container_data[key] = self.fernet.encrypt(val) + elif isinstance(val, list): + encrypt_vals = [] + for i in val: + if isinstance(i, bytes): + encrypt_vals.append(self.fernet.encrypt(i)) + else: + encrypt_vals.append(i) + tls_container_data[key] = encrypt_vals + + def _encrypt_listener_dict(self, listener_dict): + # We need to encrypt the user cert/key data for sending it + # over messaging. + if listener_dict.get(consts.DEFAULT_TLS_CONTAINER_DATA, False): + container_data = listener_dict[consts.DEFAULT_TLS_CONTAINER_DATA] + self._encrypt_tls_container_data(container_data) + if listener_dict.get(consts.SNI_CONTAINER_DATA, False): + sni_list = [] + for sni_data in listener_dict[consts.SNI_CONTAINER_DATA]: + self._encrypt_tls_container_data(sni_data) + sni_list.append(sni_data) + if sni_list: + listener_dict[consts.SNI_CONTAINER_DATA] = sni_list + + # Listener + def listener_create(self, listener): + self._validate_listener_protocol(listener) + self._validate_alpn_protocols(listener) + payload = {consts.LISTENER: listener.to_dict()} + self._encrypt_listener_dict(payload[consts.LISTENER]) + + self.client.cast({}, 'create_listener', **payload) + + def listener_delete(self, listener): + payload = {consts.LISTENER: listener.to_dict()} + self.client.cast({}, 'delete_listener', **payload) + + def listener_update(self, old_listener, new_listener): + self._validate_alpn_protocols(new_listener) + original_listener = old_listener.to_dict() + listener_updates = new_listener.to_dict() + + self._encrypt_listener_dict(original_listener) + self._encrypt_listener_dict(listener_updates) + + payload = {consts.ORIGINAL_LISTENER: original_listener, + consts.LISTENER_UPDATES: listener_updates} + self.client.cast({}, 'update_listener', **payload) + + # Pool + def _pool_convert_to_dict(self, pool): + pool_dict = pool.to_dict(recurse=True) + if 'admin_state_up' in pool_dict: + pool_dict['enabled'] = pool_dict.pop('admin_state_up') + if 'tls_container_ref' in pool_dict: + pool_dict['tls_certificate_id'] = pool_dict.pop( + 'tls_container_ref') + pool_dict.pop('tls_container_data', None) + if 'ca_tls_container_ref' in pool_dict: + pool_dict['ca_tls_certificate_id'] = pool_dict.pop( + 'ca_tls_container_ref') + pool_dict.pop('ca_tls_container_data', None) + if 'crl_container_ref' in pool_dict: + pool_dict['crl_container_id'] = pool_dict.pop('crl_container_ref') + pool_dict.pop('crl_container_data', None) + return pool_dict + + def pool_create(self, pool): + self._validate_pool_algorithm(pool) + self._validate_alpn_protocols(pool) + payload = {consts.POOL: self._pool_convert_to_dict(pool)} + self.client.cast({}, 'create_pool', **payload) + + def pool_delete(self, pool): + payload = {consts.POOL: pool.to_dict(recurse=True)} + self.client.cast({}, 'delete_pool', **payload) + + def pool_update(self, old_pool, new_pool): + self._validate_alpn_protocols(new_pool) + if new_pool.lb_algorithm: + self._validate_pool_algorithm(new_pool) + pool_dict = self._pool_convert_to_dict(new_pool) + pool_dict.pop('pool_id') + payload = {consts.ORIGINAL_POOL: old_pool.to_dict(), + consts.POOL_UPDATES: pool_dict} + self.client.cast({}, 'update_pool', **payload) + + # Member + def member_create(self, member): + pool_id = member.pool_id + session = db_apis.get_session() + with session.begin(): + db_pool = self.repositories.pool.get(session, + id=pool_id) + self._validate_members(db_pool, [member]) + + payload = {consts.MEMBER: member.to_dict()} + self.client.cast({}, 'create_member', **payload) + + def member_delete(self, member): + payload = {consts.MEMBER: member.to_dict()} + self.client.cast({}, 'delete_member', **payload) + + def member_update(self, old_member, new_member): + original_member = old_member.to_dict() + member_updates = new_member.to_dict() + if 'admin_state_up' in member_updates: + member_updates['enabled'] = member_updates.pop('admin_state_up') + member_updates.pop(consts.MEMBER_ID) + payload = {consts.ORIGINAL_MEMBER: original_member, + consts.MEMBER_UPDATES: member_updates} + self.client.cast({}, 'update_member', **payload) + + def member_batch_update(self, pool_id, members): + # The DB should not have updated yet, so we can still use the pool + session = db_apis.get_session() + with session.begin(): + db_pool = self.repositories.pool.get(session, id=pool_id) + + self._validate_members(db_pool, members) + + old_members = db_pool.members + + old_member_ids = [m.id for m in old_members] + # The driver will always pass objects with IDs. + new_member_ids = [m.member_id for m in members] + + # Find members that are brand new or updated + new_members = [] + updated_members = [] + for m in members: + if m.member_id not in old_member_ids: + new_members.append(m) + else: + member_dict = m.to_dict(render_unsets=False) + member_dict['id'] = member_dict.pop('member_id') + if 'address' in member_dict: + member_dict['ip_address'] = member_dict.pop('address') + if 'admin_state_up' in member_dict: + member_dict['enabled'] = member_dict.pop('admin_state_up') + updated_members.append(member_dict) + + # Find members that are deleted + deleted_members = [] + for m in old_members: + if m.id not in new_member_ids: + deleted_members.append(m) + + payload = {'old_members': [m.to_dict() for m in deleted_members], + 'new_members': [m.to_dict() for m in new_members], + 'updated_members': updated_members} + self.client.cast({}, 'batch_update_members', **payload) + + def _validate_members(self, db_pool, members): + if db_pool.protocol in consts.LVS_PROTOCOLS: + # For SCTP/UDP LBs: + # Allow ipv4 member if there's at least one ipv4 VIP + # Allow ipv6 member if there's at least one ipv6 VIP + for member in members: + member_is_ipv6 = utils.is_ipv6(member.address) + + for listener in db_pool.listeners: + lb = listener.load_balancer + vips = [lb.vip] + vips.extend(lb.additional_vips) + lb_has_ipv4 = any(utils.is_ipv4(vip.ip_address) + for vip in vips) + lb_has_ipv6 = any(utils.is_ipv6(vip.ip_address) + for vip in vips) + + if ((member_is_ipv6 and not lb_has_ipv6) or + (not member_is_ipv6 and not lb_has_ipv4)): + msg = ("This provider doesn't support mixing IPv4 and " + "IPv6 addresses for its VIP and members in {} " + "load balancers.".format(db_pool.protocol)) + raise exceptions.UnsupportedOptionError( + user_fault_string=msg, + operator_fault_string=msg) + + # Health Monitor + def health_monitor_create(self, healthmonitor): + payload = {consts.HEALTH_MONITOR: healthmonitor.to_dict()} + self.client.cast({}, 'create_health_monitor', **payload) + + def health_monitor_delete(self, healthmonitor): + payload = {consts.HEALTH_MONITOR: healthmonitor.to_dict()} + self.client.cast({}, 'delete_health_monitor', **payload) + + def health_monitor_update(self, old_healthmonitor, new_healthmonitor): + healthmon_dict = new_healthmonitor.to_dict() + if 'admin_state_up' in healthmon_dict: + healthmon_dict['enabled'] = healthmon_dict.pop('admin_state_up') + if 'max_retries_down' in healthmon_dict: + healthmon_dict['fall_threshold'] = healthmon_dict.pop( + 'max_retries_down') + if 'max_retries' in healthmon_dict: + healthmon_dict['rise_threshold'] = healthmon_dict.pop( + 'max_retries') + healthmon_dict.pop('healthmonitor_id') + + payload = {consts.ORIGINAL_HEALTH_MONITOR: old_healthmonitor.to_dict(), + consts.HEALTH_MONITOR_UPDATES: healthmon_dict} + self.client.cast({}, 'update_health_monitor', **payload) + + # L7 Policy + def l7policy_create(self, l7policy): + session = db_apis.get_session() + with session.begin(): + db_listener = self.repositories.listener.get( + session, id=l7policy.listener_id) + if db_listener.protocol not in VALID_L7POLICY_LISTENER_PROTOCOLS: + msg = ('%s protocol listeners do not support L7 policies' % ( + db_listener.protocol)) + raise exceptions.UnsupportedOptionError( + user_fault_string=msg, + operator_fault_string=msg) + payload = {consts.L7POLICY: l7policy.to_dict()} + self.client.cast({}, 'create_l7policy', **payload) + + def l7policy_delete(self, l7policy): + payload = {consts.L7POLICY: l7policy.to_dict()} + self.client.cast({}, 'delete_l7policy', **payload) + + def l7policy_update(self, old_l7policy, new_l7policy): + l7policy_dict = new_l7policy.to_dict() + if 'admin_state_up' in l7policy_dict: + l7policy_dict['enabled'] = l7policy_dict.pop(consts.ADMIN_STATE_UP) + l7policy_dict.pop(consts.L7POLICY_ID) + + payload = {consts.ORIGINAL_L7POLICY: old_l7policy.to_dict(), + consts.L7POLICY_UPDATES: l7policy_dict} + self.client.cast({}, 'update_l7policy', **payload) + + # L7 Rule + def l7rule_create(self, l7rule): + payload = {consts.L7RULE: l7rule.to_dict()} + self.client.cast({}, 'create_l7rule', **payload) + + def l7rule_delete(self, l7rule): + payload = {consts.L7RULE: l7rule.to_dict()} + self.client.cast({}, 'delete_l7rule', **payload) + + def l7rule_update(self, old_l7rule, new_l7rule): + l7rule_dict = new_l7rule.to_dict() + if consts.ADMIN_STATE_UP in l7rule_dict: + l7rule_dict['enabled'] = l7rule_dict.pop(consts.ADMIN_STATE_UP) + l7rule_dict.pop(consts.L7RULE_ID) + + payload = {consts.ORIGINAL_L7RULE: old_l7rule.to_dict(), + consts.L7RULE_UPDATES: l7rule_dict} + self.client.cast({}, 'update_l7rule', **payload) + + # Flavor + def get_supported_flavor_metadata(self): + """Returns the valid flavor metadata keys and descriptions. + + This extracts the valid flavor metadata keys and descriptions + from the JSON validation schema and returns it as a dictionary. + + :return: Dictionary of flavor metadata keys and descriptions. + :raises DriverError: An unexpected error occurred. + """ + try: + props = flavor_schema.SUPPORTED_FLAVOR_SCHEMA['properties'] + return {k: v.get('description', '') for k, v in props.items()} + except Exception as e: + raise exceptions.DriverError( + user_fault_string='Failed to get the supported flavor ' + 'metadata due to: {}'.format(str(e)), + operator_fault_string='Failed to get the supported flavor ' + 'metadata due to: {}'.format(str(e))) + + def validate_flavor(self, flavor_dict): + """Validates flavor profile data. + + This will validate a flavor profile dataset against the flavor + settings the amphora driver supports. + + :param flavor_dict: The flavor dictionary to validate. + :type flavor: dict + :return: None + :raises DriverError: An unexpected error occurred. + :raises UnsupportedOptionError: If the driver does not support + one of the flavor settings. + """ + try: + validate(flavor_dict, flavor_schema.SUPPORTED_FLAVOR_SCHEMA) + except js_exceptions.ValidationError as e: + error_object = '' + if e.relative_path: + error_object = f'{e.relative_path[0]} ' + raise exceptions.UnsupportedOptionError( + user_fault_string=f'{error_object}{e.message}', + operator_fault_string=str(e)) + except Exception as e: + raise exceptions.DriverError( + user_fault_string='Failed to validate the flavor metadata ' + 'due to: {}'.format(str(e)), + operator_fault_string='Failed to validate the flavor metadata ' + 'due to: {}'.format(str(e))) + compute_flavor = flavor_dict.get(consts.COMPUTE_FLAVOR, None) + if compute_flavor: + compute_driver = stevedore_driver.DriverManager( + namespace='octavia.compute.drivers', + name=CONF.controller_worker.compute_driver, + invoke_on_load=True + ).driver + + # TODO(johnsom) Fix this to raise a NotFound error + # when the octavia-lib supports it. + compute_driver.validate_flavor(compute_flavor) + + amp_image_tag = flavor_dict.get(consts.AMP_IMAGE_TAG, None) + if amp_image_tag: + image_driver = stevedore_driver.DriverManager( + namespace='octavia.image.drivers', + name=CONF.controller_worker.image_driver, + invoke_on_load=True + ).driver + + try: + image_driver.get_image_id_by_tag( + amp_image_tag, CONF.controller_worker.amp_image_owner_id) + except Exception as e: + raise exceptions.NotFound( + user_fault_string='Failed to find an image with tag {} ' + 'due to: {}'.format( + amp_image_tag, str(e)), + operator_fault_string='Failed to find an image with tag ' + '{} due to: {}'.format( + amp_image_tag, str(e))) + + # Availability Zone + def get_supported_availability_zone_metadata(self): + """Returns the valid availability zone metadata keys and descriptions. + + This extracts the valid availability zone metadata keys and + descriptions from the JSON validation schema and returns it as a + dictionary. + + :return: Dictionary of availability zone metadata keys and descriptions + :raises DriverError: An unexpected error occurred. + """ + try: + props = ( + availability_zone_schema.SUPPORTED_AVAILABILITY_ZONE_SCHEMA[ + 'properties']) + return {k: v.get('description', '') for k, v in props.items()} + except Exception as e: + raise exceptions.DriverError( + user_fault_string='Failed to get the supported availability ' + 'zone metadata due to: {}'.format(str(e)), + operator_fault_string='Failed to get the supported ' + 'availability zone metadata due to: ' + '{}'.format(str(e))) + + def validate_availability_zone(self, availability_zone_dict): + """Validates availability zone profile data. + + This will validate an availability zone profile dataset against the + availability zone settings the amphora driver supports. + + :param availability_zone_dict: The availability zone dict to validate. + :type availability_zone_dict: dict + :return: None + :raises DriverError: An unexpected error occurred. + :raises UnsupportedOptionError: If the driver does not support + one of the availability zone settings. + """ + try: + validate( + availability_zone_dict, + availability_zone_schema.SUPPORTED_AVAILABILITY_ZONE_SCHEMA) + except js_exceptions.ValidationError as e: + error_object = '' + if e.relative_path: + error_object = f'{e.relative_path[0]} ' + raise exceptions.UnsupportedOptionError( + user_fault_string=f'{error_object}{e.message}', + operator_fault_string=str(e)) + except Exception as e: + raise exceptions.DriverError( + user_fault_string='Failed to validate the availability zone ' + 'metadata due to: {}'.format(str(e)), + operator_fault_string='Failed to validate the availability ' + 'zone metadata due to: {}'.format(str(e)) + ) + compute_zone = availability_zone_dict.get(consts.COMPUTE_ZONE, None) + if compute_zone: + compute_driver = stevedore_driver.DriverManager( + namespace='octavia.compute.drivers', + name=CONF.controller_worker.compute_driver, + invoke_on_load=True + ).driver + + # TODO(johnsom) Fix this to raise a NotFound error + # when the octavia-lib supports it. + compute_driver.validate_availability_zone(compute_zone) + + volume_zone = availability_zone_dict.get(consts.VOLUME_ZONE, None) + if volume_zone: + volume_driver = stevedore_driver.DriverManager( + namespace='octavia.volume.drivers', + name=CONF.controller_worker.volume_driver, + invoke_on_load=True + ).driver + + # TODO(johnsom) Fix this to raise a NotFound error + # when the octavia-lib supports it. + volume_driver.validate_availability_zone(volume_zone) + + check_nets = availability_zone_dict.get( + consts.VALID_VIP_NETWORKS, []) + management_net = availability_zone_dict.get( + consts.MANAGEMENT_NETWORK, None) + if management_net: + check_nets.append(management_net) + for check_net in check_nets: + network_driver = utils.get_network_driver() + + # TODO(johnsom) Fix this to raise a NotFound error + # when the octavia-lib supports it. + network_driver.get_network(check_net) diff --git a/octavia/api/drivers/driver_agent/__init__.py b/octavia/api/drivers/driver_agent/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/api/drivers/driver_agent/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/api/drivers/driver_agent/driver_get.py b/octavia/api/drivers/driver_agent/driver_get.py new file mode 100644 index 0000000000..3433b7e690 --- /dev/null +++ b/octavia/api/drivers/driver_agent/driver_get.py @@ -0,0 +1,90 @@ +# Copyright 2019 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from octavia_lib.common import constants as lib_consts + +from octavia.api.drivers import utils as driver_utils +from octavia.common import constants +from octavia.db import api as db_api +from octavia.db import repositories + + +def process_get(get_data): + session = db_api.get_session() + + if get_data[constants.OBJECT] == lib_consts.LOADBALANCERS: + lb_repo = repositories.LoadBalancerRepository() + with session.begin(): + db_lb = lb_repo.get(session, id=get_data[lib_consts.ID], + show_deleted=False) + if db_lb: + provider_lb = ( + driver_utils.db_loadbalancer_to_provider_loadbalancer(db_lb)) + return provider_lb.to_dict(recurse=True, render_unsets=True) + elif get_data[constants.OBJECT] == lib_consts.LISTENERS: + listener_repo = repositories.ListenerRepository() + with session.begin(): + db_listener = listener_repo.get( + session, id=get_data[lib_consts.ID], show_deleted=False) + if db_listener: + provider_listener = ( + driver_utils.db_listener_to_provider_listener(db_listener)) + return provider_listener.to_dict(recurse=True, render_unsets=True) + elif get_data[constants.OBJECT] == lib_consts.POOLS: + pool_repo = repositories.PoolRepository() + with session.begin(): + db_pool = pool_repo.get(session, id=get_data[lib_consts.ID], + show_deleted=False) + if db_pool: + provider_pool = ( + driver_utils.db_pool_to_provider_pool(db_pool)) + return provider_pool.to_dict(recurse=True, render_unsets=True) + elif get_data[constants.OBJECT] == lib_consts.MEMBERS: + member_repo = repositories.MemberRepository() + with session.begin(): + db_member = member_repo.get(session, id=get_data[lib_consts.ID], + show_deleted=False) + if db_member: + provider_member = ( + driver_utils.db_member_to_provider_member(db_member)) + return provider_member.to_dict(recurse=True, render_unsets=True) + elif get_data[constants.OBJECT] == lib_consts.HEALTHMONITORS: + hm_repo = repositories.HealthMonitorRepository() + with session.begin(): + db_hm = hm_repo.get(session, id=get_data[lib_consts.ID], + show_deleted=False) + if db_hm: + provider_hm = ( + driver_utils.db_HM_to_provider_HM(db_hm)) + return provider_hm.to_dict(recurse=True, render_unsets=True) + elif get_data[constants.OBJECT] == lib_consts.L7POLICIES: + l7policy_repo = repositories.L7PolicyRepository() + with session.begin(): + db_l7policy = l7policy_repo.get(session, + id=get_data[lib_consts.ID], + show_deleted=False) + if db_l7policy: + provider_l7policy = ( + driver_utils.db_l7policy_to_provider_l7policy(db_l7policy)) + return provider_l7policy.to_dict(recurse=True, render_unsets=True) + elif get_data[constants.OBJECT] == lib_consts.L7RULES: + l7rule_repo = repositories.L7RuleRepository() + with session.begin(): + db_l7rule = l7rule_repo.get(session, id=get_data[lib_consts.ID], + show_deleted=False) + if db_l7rule: + provider_l7rule = ( + driver_utils.db_l7rule_to_provider_l7rule(db_l7rule)) + return provider_l7rule.to_dict(recurse=True, render_unsets=True) + return {} diff --git a/octavia/api/drivers/driver_agent/driver_listener.py b/octavia/api/drivers/driver_agent/driver_listener.py new file mode 100644 index 0000000000..d7513b1a86 --- /dev/null +++ b/octavia/api/drivers/driver_agent/driver_listener.py @@ -0,0 +1,186 @@ +# Copyright 2018 Rackspace, US Inc. +# Copyright 2019 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import errno +import os +import socketserver +import threading + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_serialization import jsonutils + +from octavia.api.drivers.driver_agent import driver_get +from octavia.api.drivers.driver_agent import driver_updater + + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +def _recv(recv_socket): + size_str = b'' + char = recv_socket.recv(1) + while char != b'\n': + size_str += char + char = recv_socket.recv(1) + payload_size = int(size_str) + mv_buffer = memoryview(bytearray(payload_size)) + next_offset = 0 + while payload_size - next_offset > 0: + recv_size = recv_socket.recv_into(mv_buffer[next_offset:], + payload_size - next_offset) + next_offset += recv_size + return jsonutils.loads(mv_buffer.tobytes()) + + +class StatusRequestHandler(socketserver.BaseRequestHandler): + + def handle(self): + # Get the update data + try: + status = _recv(self.request) + except Exception: + LOG.exception("Error while receiving data.") + return + + # Process the update + updater = driver_updater.DriverUpdater() + response = updater.update_loadbalancer_status(status) + + # Send the response + json_data = jsonutils.dump_as_bytes(response) + len_str = f'{len(json_data)}\n'.encode() + try: + self.request.send(len_str) + self.request.sendall(json_data) + except Exception: + LOG.exception("Error while sending data.") + + +class StatsRequestHandler(socketserver.BaseRequestHandler): + + def handle(self): + # Get the update data + try: + stats = _recv(self.request) + except Exception: + LOG.exception("Error while receiving data.") + return + + # Process the update + updater = driver_updater.DriverUpdater() + response = updater.update_listener_statistics(stats) + + # Send the response + json_data = jsonutils.dump_as_bytes(response) + len_str = f'{len(json_data)}\n'.encode() + try: + self.request.send(len_str) + self.request.sendall(json_data) + except Exception: + LOG.exception("Error while sending data.") + + +class GetRequestHandler(socketserver.BaseRequestHandler): + + def handle(self): + # Get the data request + try: + get_data = _recv(self.request) + except Exception: + LOG.exception("Error while receiving data.") + return + + # Process the get + response = driver_get.process_get(get_data) + + # Send the response + json_data = jsonutils.dump_as_bytes(response) + len_str = f'{len(json_data)}\n'.encode() + try: + self.request.send(len_str) + self.request.sendall(json_data) + except Exception: + LOG.exception("Error while sending data.") + + +class ForkingUDSServer(socketserver.ForkingMixIn, + socketserver.UnixStreamServer): + pass + + +def _cleanup_socket_file(filename): + # Remove the socket file if it already exists + try: + os.remove(filename) + except OSError as e: + if e.errno != errno.ENOENT: + raise + + +def status_listener(exit_event): + _cleanup_socket_file(CONF.driver_agent.status_socket_path) + + with ForkingUDSServer(CONF.driver_agent.status_socket_path, + StatusRequestHandler) as server: + server.timeout = CONF.driver_agent.status_request_timeout + server.max_children = CONF.driver_agent.status_max_processes + + threading.Thread(target=server.serve_forever).start() + + exit_event.wait() + + LOG.info('Waiting for driver status listener to shutdown...') + server.shutdown() + LOG.info('Driver status listener shutdown finished.') + _cleanup_socket_file(CONF.driver_agent.status_socket_path) + + +def stats_listener(exit_event): + _cleanup_socket_file(CONF.driver_agent.stats_socket_path) + + with ForkingUDSServer(CONF.driver_agent.stats_socket_path, + StatsRequestHandler) as server: + server.timeout = CONF.driver_agent.stats_request_timeout + server.max_children = CONF.driver_agent.stats_max_processes + + threading.Thread(target=server.serve_forever).start() + + exit_event.wait() + + LOG.info('Waiting for driver statistics listener to shutdown...') + server.shutdown() + LOG.info('Driver statistics listener shutdown finished.') + _cleanup_socket_file(CONF.driver_agent.stats_socket_path) + + +def get_listener(exit_event): + _cleanup_socket_file(CONF.driver_agent.get_socket_path) + + with ForkingUDSServer(CONF.driver_agent.get_socket_path, + GetRequestHandler) as server: + server.timeout = CONF.driver_agent.get_request_timeout + server.max_children = CONF.driver_agent.get_max_processes + + threading.Thread(target=server.serve_forever).start() + + exit_event.wait() + + LOG.info('Waiting for driver get listener to shutdown...') + server.shutdown() + LOG.info('Driver get listener shutdown finished.') + _cleanup_socket_file(CONF.driver_agent.get_socket_path) + LOG.info("UDS server was closed and socket was cleaned up.") diff --git a/octavia/api/drivers/driver_agent/driver_updater.py b/octavia/api/drivers/driver_agent/driver_updater.py new file mode 100644 index 0000000000..9f6dab0124 --- /dev/null +++ b/octavia/api/drivers/driver_agent/driver_updater.py @@ -0,0 +1,235 @@ +# Copyright 2018 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import time + +from octavia_lib.api.drivers import exceptions as driver_exceptions +from octavia_lib.common import constants as lib_consts +from oslo_log import log as logging +from oslo_utils import excutils + +from octavia.common import constants as consts +from octavia.common import data_models +from octavia.common import utils +from octavia.db import api as db_apis +from octavia.db import repositories as repo +from octavia.statistics import stats_base + +LOG = logging.getLogger(__name__) + + +class DriverUpdater: + + def __init__(self, **kwargs): + self.repos = repo.Repositories() + self.loadbalancer_repo = repo.LoadBalancerRepository() + self.listener_repo = repo.ListenerRepository() + self.pool_repo = repo.PoolRepository() + self.health_mon_repo = repo.HealthMonitorRepository() + self.member_repo = repo.MemberRepository() + self.l7policy_repo = repo.L7PolicyRepository() + self.l7rule_repo = repo.L7RuleRepository() + self.listener_stats_repo = repo.ListenerStatisticsRepository() + + self.db_session = db_apis.get_session() + super().__init__(**kwargs) + + def _check_for_lb_vip_deallocate(self, repo, lb_id): + with self.db_session.begin(): + lb = repo.get(self.db_session, id=lb_id) + if lb.vip.octavia_owned: + vip = lb.vip + # We need a backreference + vip.load_balancer = lb + # Only lookup the network driver if we have a VIP to deallocate + network_driver = utils.get_network_driver() + network_driver.deallocate_vip(vip) + + def _decrement_quota(self, repo, object_name, record_id): + lock_session = self.db_session + lock_session.begin() + db_object = repo.get(lock_session, id=record_id) + if db_object is None: + lock_session.rollback() + msg = ('{} with ID of {} is not present in the ' + 'database, it might have already been deleted. ' + 'Skipping quota update.'.format( + object_name, record_id)) + raise driver_exceptions.NotFound(msg) + try: + if db_object.provisioning_status == consts.DELETED: + LOG.info('%(name)s with ID of %(id)s is already in the ' + 'DELETED state. Skipping quota update.', + {'name': object_name, 'id': record_id}) + lock_session.rollback() + return + self.repos.decrement_quota(lock_session, + repo.model_class.__data_model__, + db_object.project_id) + lock_session.commit() + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error('Failed to decrement %(name)s quota for ' + 'project: %(proj)s the project may have excess ' + 'quota in use.', {'proj': db_object.project_id, + 'name': object_name}) + lock_session.rollback() + + def _process_status_update(self, repo, object_name, record, + delete_record=False): + # Zero it out so that if the ID is missing from a record we do not + # report the last LB as the failed record in the exception + record_id = None + try: + record_id = record['id'] + record_kwargs = {} + prov_status = record.get(consts.PROVISIONING_STATUS, None) + if prov_status: + if prov_status == consts.DELETED: + if object_name == consts.LOADBALANCERS: + self._check_for_lb_vip_deallocate(repo, record_id) + + try: + self._decrement_quota(repo, object_name, record_id) + except driver_exceptions.NotFound: + # prov_status is DELETED and the object no longer + # exists in the DB, ignore the update. + return + + if delete_record and object_name != consts.LOADBALANCERS: + with self.db_session.begin(): + repo.delete(self.db_session, id=record_id) + return + + record_kwargs[consts.PROVISIONING_STATUS] = prov_status + op_status = record.get(consts.OPERATING_STATUS, None) + if op_status: + record_kwargs[consts.OPERATING_STATUS] = op_status + if prov_status or op_status: + with self.db_session.begin(): + repo.update(self.db_session, record_id, **record_kwargs) + except Exception as e: + # We need to raise a failure here to notify the driver it is + # sending bad status data. + raise driver_exceptions.UpdateStatusError( + fault_string=str(e), status_object_id=record_id, + status_object=object_name) + + def update_loadbalancer_status(self, status): + """Update load balancer status. + + :param status: dictionary defining the provisioning status and + operating status for load balancer objects, including pools, + members, listeners, L7 policies, and L7 rules. + iod (string): ID for the object. + provisioning_status (string): Provisioning status for the object. + operating_status (string): Operating status for the object. + :type status: dict + :raises: UpdateStatusError + :returns: None + """ + try: + members = status.pop(consts.MEMBERS, []) + for member in members: + self._process_status_update(self.member_repo, consts.MEMBERS, + member, delete_record=True) + + health_mons = status.pop(consts.HEALTHMONITORS, []) + for health_mon in health_mons: + self._process_status_update( + self.health_mon_repo, consts.HEALTHMONITORS, health_mon, + delete_record=True) + + pools = status.pop(consts.POOLS, []) + for pool in pools: + self._process_status_update(self.pool_repo, consts.POOLS, + pool, delete_record=True) + + l7rules = status.pop(consts.L7RULES, []) + for l7rule in l7rules: + self._process_status_update(self.l7rule_repo, consts.L7RULES, + l7rule, delete_record=True) + + l7policies = status.pop(consts.L7POLICIES, []) + for l7policy in l7policies: + self._process_status_update( + self.l7policy_repo, consts.L7POLICIES, l7policy, + delete_record=True) + + listeners = status.pop(lib_consts.LISTENERS, []) + for listener in listeners: + self._process_status_update( + self.listener_repo, lib_consts.LISTENERS, listener, + delete_record=True) + + lbs = status.pop(consts.LOADBALANCERS, []) + for lb in lbs: + self._process_status_update(self.loadbalancer_repo, + consts.LOADBALANCERS, lb) + except driver_exceptions.UpdateStatusError as e: + return {lib_consts.STATUS_CODE: lib_consts.DRVR_STATUS_CODE_FAILED, + lib_consts.FAULT_STRING: e.fault_string, + lib_consts.STATUS_OBJECT: e.status_object, + lib_consts.STATUS_OBJECT_ID: e.status_object_id} + except Exception as e: + return {lib_consts.STATUS_CODE: lib_consts.DRVR_STATUS_CODE_FAILED, + lib_consts.FAULT_STRING: str(e)} + return {lib_consts.STATUS_CODE: lib_consts.DRVR_STATUS_CODE_OK} + + def update_listener_statistics(self, statistics): + """Update listener statistics. + + :param statistics: Statistics for listeners: + id (string): ID for listener. + active_connections (int): Number of currently active connections. + bytes_in (int): Total bytes received. + bytes_out (int): Total bytes sent. + request_errors (int): Total requests not fulfilled. + total_connections (int): The total connections handled. + :type statistics: dict + :raises: UpdateStatisticsError + :returns: None + """ + listener_stats = statistics.get(lib_consts.LISTENERS, []) + stats_objects = [] + for stat in listener_stats: + try: + stats_obj = data_models.ListenerStatistics( + listener_id=stat['id'], + bytes_in=stat['bytes_in'], + bytes_out=stat['bytes_out'], + active_connections=stat['active_connections'], + total_connections=stat['total_connections'], + request_errors=stat['request_errors'], + received_time=time.time() + ) + stats_objects.append(stats_obj) + except Exception as e: + return { + lib_consts.STATUS_CODE: lib_consts.DRVR_STATUS_CODE_FAILED, + lib_consts.FAULT_STRING: str(e), + lib_consts.STATS_OBJECT: lib_consts.LISTENERS} + + # Provider drivers other than the amphora driver do not have + # an amphora ID, use the listener ID again here to meet the + # constraint requirement. + try: + if stats_objects: + stats_base.update_stats_via_driver(stats_objects) + except Exception as e: + return { + lib_consts.STATUS_CODE: lib_consts.DRVR_STATUS_CODE_FAILED, + lib_consts.FAULT_STRING: str(e), + lib_consts.STATS_OBJECT: lib_consts.LISTENERS} + return {lib_consts.STATUS_CODE: lib_consts.DRVR_STATUS_CODE_OK} diff --git a/octavia/api/drivers/driver_factory.py b/octavia/api/drivers/driver_factory.py new file mode 100644 index 0000000000..b67fee2d2d --- /dev/null +++ b/octavia/api/drivers/driver_factory.py @@ -0,0 +1,50 @@ +# Copyright 2018 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_log import log as logging +from stevedore import driver as stevedore_driver +from wsme import types as wtypes + +from octavia.common import exceptions + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +def get_driver(provider): + # If this came in None it must be a load balancer that existed before + # provider support was added. These must be of type 'amphora' and not + # whatever the current "default" is set to. + if isinstance(provider, wtypes.UnsetType): + provider = CONF.api_settings.default_provider_driver + elif not provider: + provider = 'amphora' + + if provider not in CONF.api_settings.enabled_provider_drivers: + LOG.warning("Requested provider driver '%s' was not enabled in the " + "configuration file.", provider) + raise exceptions.ProviderNotEnabled(prov=provider) + + try: + driver = stevedore_driver.DriverManager( + namespace='octavia.api.drivers', + name=provider, + invoke_on_load=True).driver + driver.name = provider + except Exception as e: + LOG.error('Unable to load provider driver %s due to: %s', + provider, str(e)) + raise exceptions.ProviderNotFound(prov=provider) + return driver diff --git a/octavia/api/drivers/noop_driver/__init__.py b/octavia/api/drivers/noop_driver/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/api/drivers/noop_driver/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/api/drivers/noop_driver/agent.py b/octavia/api/drivers/noop_driver/agent.py new file mode 100644 index 0000000000..b6e6385663 --- /dev/null +++ b/octavia/api/drivers/noop_driver/agent.py @@ -0,0 +1,26 @@ +# Copyright 2019 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import time + +from oslo_log import log as logging + +LOG = logging.getLogger(__name__) + + +def noop_provider_agent(exit_event): + LOG.info('No-Op provider agent has started.') + while not exit_event.is_set(): + time.sleep(1) + LOG.info('No-Op provider agent is exiting.') diff --git a/octavia/api/drivers/noop_driver/driver.py b/octavia/api/drivers/noop_driver/driver.py new file mode 100644 index 0000000000..48f6cf246b --- /dev/null +++ b/octavia/api/drivers/noop_driver/driver.py @@ -0,0 +1,375 @@ +# Copyright 2018 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging +from oslo_utils import uuidutils + +from octavia_lib.api.drivers import data_models +from octavia_lib.api.drivers import provider_base as driver_base + +from octavia.api.drivers import utils as driver_utils + +LOG = logging.getLogger(__name__) + + +class NoopManager: + def __init__(self): + super().__init__() + self.driverconfig = {} + + # Load Balancer + def create_vip_port(self, loadbalancer_id, project_id, vip_dictionary, + additional_vip_dicts): + LOG.debug('Provider %s no-op, create_vip_port loadbalancer %s', + self.__class__.__name__, loadbalancer_id) + + self.driverconfig[loadbalancer_id] = (loadbalancer_id, project_id, + vip_dictionary, + additional_vip_dicts, + 'create_vip_port') + + vip_address = vip_dictionary.get('vip_address', '198.0.2.5') + vip_network_id = vip_dictionary.get('vip_network_id', + uuidutils.generate_uuid()) + vip_port_id = vip_dictionary.get('vip_port_id', + uuidutils.generate_uuid()) + vip_subnet_id = vip_dictionary.get('vip_subnet_id', + uuidutils.generate_uuid()) + + vip = data_models.VIP(vip_address=vip_address, + vip_network_id=vip_network_id, + vip_port_id=vip_port_id, + vip_subnet_id=vip_subnet_id, + vip_sg_ids=vip_dictionary.get('vip_sg_ids', [])) + + vip_return_dict = vip.to_dict() + additional_vip_dicts = additional_vip_dicts or [] + add_return_dicts = [driver_utils.additional_vip_dict_to_provider_dict( + add_vip) for add_vip in additional_vip_dicts] + return vip_return_dict, add_return_dicts + + def loadbalancer_create(self, loadbalancer): + LOG.debug('Provider %s no-op, loadbalancer_create loadbalancer %s', + self.__class__.__name__, loadbalancer.loadbalancer_id) + + self.driverconfig[loadbalancer.loadbalancer_id] = ( + loadbalancer, 'loadbalancer_create') + + def loadbalancer_delete(self, loadbalancer, cascade=False): + loadbalancer_id = loadbalancer.loadbalancer_id + LOG.debug('Provider %s no-op, loadbalancer_delete loadbalancer %s', + self.__class__.__name__, loadbalancer_id) + + self.driverconfig[loadbalancer_id] = (loadbalancer_id, cascade, + 'loadbalancer_delete') + + def loadbalancer_failover(self, loadbalancer_id): + LOG.debug('Provider %s no-op, loadbalancer_failover loadbalancer %s', + self.__class__.__name__, loadbalancer_id) + + self.driverconfig[loadbalancer_id] = (loadbalancer_id, + 'loadbalancer_failover') + + def loadbalancer_update(self, old_loadbalancer, new_loadbalancer): + LOG.debug('Provider %s no-op, loadbalancer_update loadbalancer %s ' + 'old: %s. new: %s', + self.__class__.__name__, new_loadbalancer.loadbalancer_id, + old_loadbalancer.to_dict(), new_loadbalancer.to_dict()) + + self.driverconfig[new_loadbalancer.loadbalancer_id] = ( + new_loadbalancer, 'loadbalancer_update') + + # Listener + def listener_create(self, listener): + LOG.debug('Provider %s no-op, listener_create listener %s', + self.__class__.__name__, listener.listener_id) + + self.driverconfig[listener.listener_id] = (listener, 'listener_create') + + def listener_delete(self, listener): + listener_id = listener.listener_id + LOG.debug('Provider %s no-op, listener_delete listener %s', + self.__class__.__name__, listener_id) + + self.driverconfig[listener_id] = (listener_id, 'listener_delete') + + def listener_update(self, old_listener, new_listener): + LOG.debug('Provider %s no-op, listener_update listener %s ' + 'old: %s. new: %s', + self.__class__.__name__, new_listener.listener_id, + old_listener.to_dict(), new_listener.to_dict()) + + self.driverconfig[new_listener.listener_id] = ( + new_listener, 'listener_update') + + # Pool + def pool_create(self, pool): + LOG.debug('Provider %s no-op, pool_create pool %s', + self.__class__.__name__, pool.pool_id) + + self.driverconfig[pool.pool_id] = (pool, 'pool_create') + + def pool_delete(self, pool): + pool_id = pool.pool_id + LOG.debug('Provider %s no-op, pool_delete pool %s', + self.__class__.__name__, pool_id) + + self.driverconfig[pool_id] = (pool_id, 'pool_delete') + + def pool_update(self, old_pool, new_pool): + LOG.debug('Provider %s no-op, pool_update pool %s ' + 'old: %s. new: %s', + self.__class__.__name__, new_pool.pool_id, + old_pool.to_dict(), new_pool.to_dict()) + + self.driverconfig[new_pool.pool_id] = ( + new_pool, 'pool_update') + + # Member + def member_create(self, member): + LOG.debug('Provider %s no-op, member_create member %s', + self.__class__.__name__, member.member_id) + + self.driverconfig[member.member_id] = (member, 'member_create') + + def member_delete(self, member): + member_id = member.member_id + LOG.debug('Provider %s no-op, member_delete member %s', + self.__class__.__name__, member_id) + + self.driverconfig[member_id] = (member_id, 'member_delete') + + def member_update(self, old_member, new_member): + LOG.debug('Provider %s no-op, member_update member %s ' + 'old: %s. new: %s', + self.__class__.__name__, new_member.member_id, + old_member.to_dict(), new_member.to_dict()) + + self.driverconfig[new_member.member_id] = ( + new_member, 'member_update') + + def member_batch_update(self, pool_id, members): + for member in members: + LOG.debug('Provider %s no-op, member_batch_update pool_id %s ' + 'member %s', + self.__class__.__name__, pool_id, member.member_id) + + self.driverconfig[member.member_id] = (member, + 'member_batch_update') + + # Health Monitor + def health_monitor_create(self, healthmonitor): + LOG.debug('Provider %s no-op, health_monitor_create healthmonitor %s', + self.__class__.__name__, healthmonitor.healthmonitor_id) + + self.driverconfig[healthmonitor.healthmonitor_id] = ( + healthmonitor, 'health_monitor_create') + + def health_monitor_delete(self, healthmonitor): + healthmonitor_id = healthmonitor.healthmonitor_id + LOG.debug('Provider %s no-op, health_monitor_delete healthmonitor %s', + self.__class__.__name__, healthmonitor_id) + + self.driverconfig[healthmonitor_id] = (healthmonitor_id, + 'health_monitor_delete') + + def health_monitor_update(self, old_healthmonitor, new_healthmonitor): + LOG.debug('Provider %s no-op, health_monitor_update healthmonitor %s ' + 'old: %s. new: %s', + self.__class__.__name__, new_healthmonitor.healthmonitor_id, + old_healthmonitor.to_dict(), new_healthmonitor.to_dict()) + + self.driverconfig[new_healthmonitor.healthmonitor_id] = ( + new_healthmonitor, 'health_monitor_update') + + # L7 Policy + def l7policy_create(self, l7policy): + LOG.debug('Provider %s no-op, l7policy_create l7policy %s', + self.__class__.__name__, l7policy.l7policy_id) + + self.driverconfig[l7policy.l7policy_id] = (l7policy, 'l7policy_create') + + def l7policy_delete(self, l7policy): + l7policy_id = l7policy.l7policy_id + LOG.debug('Provider %s no-op, l7policy_delete l7policy %s', + self.__class__.__name__, l7policy_id) + + self.driverconfig[l7policy_id] = (l7policy_id, 'l7policy_delete') + + def l7policy_update(self, old_l7policy, new_l7policy): + LOG.debug('Provider %s no-op, l7policy_update l7policy %s ' + 'old: %s. new: %s', + self.__class__.__name__, new_l7policy.l7policy_id, + old_l7policy.to_dict(), new_l7policy.to_dict()) + + self.driverconfig[new_l7policy.l7policy_id] = ( + new_l7policy, 'l7policy_update') + + # L7 Rule + def l7rule_create(self, l7rule): + LOG.debug('Provider %s no-op, l7rule_create l7rule %s', + self.__class__.__name__, l7rule.l7rule_id) + + self.driverconfig[l7rule.l7rule_id] = (l7rule, 'l7rule_create') + + def l7rule_delete(self, l7rule): + l7rule_id = l7rule.l7rule_id + LOG.debug('Provider %s no-op, l7rule_delete l7rule %s', + self.__class__.__name__, l7rule_id) + + self.driverconfig[l7rule_id] = (l7rule_id, 'l7rule_delete') + + def l7rule_update(self, old_l7rule, new_l7rule): + LOG.debug('Provider %s no-op, l7rule_update l7rule %s. ' + 'old: %s. new: %s', + self.__class__.__name__, new_l7rule.l7rule_id, + old_l7rule.to_dict(), new_l7rule.to_dict()) + + self.driverconfig[new_l7rule.l7rule_id] = (new_l7rule, 'l7rule_update') + + # Flavor + def get_supported_flavor_metadata(self): + LOG.debug('Provider %s no-op, get_supported_flavor_metadata', + self.__class__.__name__) + + return {"amp_image_tag": "The glance image tag to use for this load " + "balancer."} + + def validate_flavor(self, flavor_metadata): + LOG.debug('Provider %s no-op, validate_flavor metadata: %s', + self.__class__.__name__, flavor_metadata) + + flavor_hash = hash(frozenset(flavor_metadata)) + self.driverconfig[flavor_hash] = (flavor_metadata, 'validate_flavor') + + # Availability Zone + def get_supported_availability_zone_metadata(self): + LOG.debug( + 'Provider %s no-op, get_supported_availability_zone_metadata', + self.__class__.__name__) + + return {"compute_zone": "The compute availability zone to use for " + "this loadbalancer.", + "volume_zone": "The volume availability zone to use for " + "this loadbalancer."} + + def validate_availability_zone(self, availability_zone_metadata): + LOG.debug('Provider %s no-op, validate_availability_zone metadata: %s', + self.__class__.__name__, availability_zone_metadata) + + availability_zone_hash = hash(frozenset(availability_zone_metadata)) + self.driverconfig[availability_zone_hash] = ( + availability_zone_metadata, 'validate_availability_zone') + + +class NoopProviderDriver(driver_base.ProviderDriver): + def __init__(self): + super().__init__() + self.driver = NoopManager() + + # Load Balancer + def create_vip_port(self, loadbalancer_id, project_id, vip_dictionary, + additional_vip_dicts): + return self.driver.create_vip_port(loadbalancer_id, project_id, + vip_dictionary, + additional_vip_dicts) + + def loadbalancer_create(self, loadbalancer): + self.driver.loadbalancer_create(loadbalancer) + + def loadbalancer_delete(self, loadbalancer, cascade=False): + self.driver.loadbalancer_delete(loadbalancer, cascade) + + def loadbalancer_failover(self, loadbalancer_id): + self.driver.loadbalancer_failover(loadbalancer_id) + + def loadbalancer_update(self, old_loadbalancer, new_loadbalancer): + self.driver.loadbalancer_update(old_loadbalancer, new_loadbalancer) + + # Listener + def listener_create(self, listener): + self.driver.listener_create(listener) + + def listener_delete(self, listener): + self.driver.listener_delete(listener) + + def listener_update(self, old_listener, new_listener): + self.driver.listener_update(old_listener, new_listener) + + # Pool + def pool_create(self, pool): + self.driver.pool_create(pool) + + def pool_delete(self, pool): + self.driver.pool_delete(pool) + + def pool_update(self, old_pool, new_pool): + self.driver.pool_update(old_pool, new_pool) + + # Member + def member_create(self, member): + self.driver.member_create(member) + + def member_delete(self, member): + self.driver.member_delete(member) + + def member_update(self, old_member, new_member): + self.driver.member_update(old_member, new_member) + + def member_batch_update(self, pool_id, members): + self.driver.member_batch_update(pool_id, members) + + # Health Monitor + def health_monitor_create(self, healthmonitor): + self.driver.health_monitor_create(healthmonitor) + + def health_monitor_delete(self, healthmonitor): + self.driver.health_monitor_delete(healthmonitor) + + def health_monitor_update(self, old_healthmonitor, new_healthmonitor): + self.driver.health_monitor_update(old_healthmonitor, new_healthmonitor) + + # L7 Policy + def l7policy_create(self, l7policy): + self.driver.l7policy_create(l7policy) + + def l7policy_delete(self, l7policy): + self.driver.l7policy_delete(l7policy) + + def l7policy_update(self, old_l7policy, new_l7policy): + self.driver.l7policy_update(old_l7policy, new_l7policy) + + # L7 Rule + def l7rule_create(self, l7rule): + self.driver.l7rule_create(l7rule) + + def l7rule_delete(self, l7rule): + self.driver.l7rule_delete(l7rule) + + def l7rule_update(self, old_l7rule, new_l7rule): + self.driver.l7rule_update(old_l7rule, new_l7rule) + + # Flavor + def get_supported_flavor_metadata(self): + return self.driver.get_supported_flavor_metadata() + + def validate_flavor(self, flavor_metadata): + self.driver.validate_flavor(flavor_metadata) + + # Availability Zone + def get_supported_availability_zone_metadata(self): + return self.driver.get_supported_availability_zone_metadata() + + def validate_availability_zone(self, availability_zone_metadata): + self.driver.validate_availability_zone(availability_zone_metadata) diff --git a/octavia/api/drivers/utils.py b/octavia/api/drivers/utils.py new file mode 100644 index 0000000000..c5cdf75019 --- /dev/null +++ b/octavia/api/drivers/utils.py @@ -0,0 +1,630 @@ +# Copyright 2018 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from octavia_lib.api.drivers import data_models as driver_dm +from octavia_lib.api.drivers import exceptions as lib_exceptions +from octavia_lib.common import constants as lib_consts +from oslo_config import cfg +from oslo_context import context as oslo_context +from oslo_log import log as logging +from oslo_utils import excutils +from stevedore import driver as stevedore_driver + +from octavia.common import constants +from octavia.common import data_models +from octavia.common import exceptions +from octavia.common.tls_utils import cert_parser +from octavia.db import api as db_api +from octavia.db import repositories +from octavia.i18n import _ + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF + + +def call_provider(provider, driver_method, *args, **kwargs): + """Wrap calls to the provider driver to handle driver errors. + + This allows Octavia to return user friendly errors when a provider driver + has an issue. + + :param driver_method: Method in the driver to call. + :raises ProviderDriverError: Catch all driver error. + :raises ProviderNotImplementedError: The driver doesn't support this + action. + :raises ProviderUnsupportedOptionError: The driver doesn't support a + provided option. + """ + + try: + return driver_method(*args, **kwargs) + except lib_exceptions.Conflict as e: + LOG.info("Provider '%s' raised a conflict error: %s", + provider, e.operator_fault_string) + raise exceptions.VIPAddressConflict(msg=e.user_fault_string) + except lib_exceptions.DriverError as e: + LOG.exception("Provider '%s' raised a driver error: %s", + provider, e.operator_fault_string) + raise exceptions.ProviderDriverError(prov=provider, + user_msg=e.user_fault_string) + except (lib_exceptions.NotImplementedError, NotImplementedError) as e: + op_fault_string = ( + e.operator_fault_string + if hasattr(e, "operator_fault_string") + else _("This feature is not implemented by this provider.")) + usr_fault_string = ( + e.user_fault_string + if hasattr(e, "user_fault_string") + else _("This feature is not implemented by the provider.")) + LOG.info("Provider '%s' raised a not implemented error: %s", + provider, op_fault_string) + raise exceptions.ProviderNotImplementedError( + prov=provider, user_msg=usr_fault_string) + except lib_exceptions.UnsupportedOptionError as e: + LOG.info("Provider '%s' raised an unsupported option error: " + "%s", provider, e.operator_fault_string) + raise exceptions.ProviderUnsupportedOptionError( + prov=provider, user_msg=e.user_fault_string) + except Exception as e: + LOG.exception("Provider '%s' raised an unknown error: %s", + provider, str(e)) + raise exceptions.ProviderDriverError(prov=provider, user_msg=e) + + +def _base_to_provider_dict(current_dict, include_project_id=False): + new_dict = copy.deepcopy(current_dict) + if 'provisioning_status' in new_dict: + del new_dict['provisioning_status'] + if 'operating_status' in new_dict: + del new_dict['operating_status'] + if 'provider' in new_dict: + del new_dict['provider'] + if 'created_at' in new_dict: + del new_dict['created_at'] + if 'updated_at' in new_dict: + del new_dict['updated_at'] + if 'enabled' in new_dict: + new_dict['admin_state_up'] = new_dict.pop('enabled') + if 'project_id' in new_dict and not include_project_id: + del new_dict['project_id'] + if 'tenant_id' in new_dict: + del new_dict['tenant_id'] + if 'tags' in new_dict: + del new_dict['tags'] + if 'flavor_id' in new_dict: + del new_dict['flavor_id'] + if 'topology' in new_dict: + del new_dict['topology'] + if 'vrrp_group' in new_dict: + del new_dict['vrrp_group'] + if 'amphorae' in new_dict: + del new_dict['amphorae'] + if 'vip' in new_dict: + del new_dict['vip'] + if 'listeners' in new_dict: + del new_dict['listeners'] + if 'pools' in new_dict: + del new_dict['pools'] + if 'server_group_id' in new_dict: + del new_dict['server_group_id'] + return new_dict + + +# Note: The provider dict returned from this method will have provider +# data model objects in it. +def lb_dict_to_provider_dict(lb_dict, vip=None, add_vips=None, db_pools=None, + db_listeners=None, for_delete=False): + new_lb_dict = _base_to_provider_dict(lb_dict, include_project_id=True) + new_lb_dict['loadbalancer_id'] = new_lb_dict.pop('id') + if vip: + new_lb_dict['vip_address'] = vip.ip_address + new_lb_dict['vip_network_id'] = vip.network_id + new_lb_dict['vip_port_id'] = vip.port_id + new_lb_dict['vip_subnet_id'] = vip.subnet_id + new_lb_dict['vip_qos_policy_id'] = vip.qos_policy_id + new_lb_dict[lib_consts.VIP_SG_IDS] = vip.sg_ids + if 'flavor_id' in lb_dict and lb_dict['flavor_id']: + flavor_repo = repositories.FlavorRepository() + session = db_api.get_session() + with session.begin(): + new_lb_dict['flavor'] = flavor_repo.get_flavor_metadata_dict( + session, lb_dict['flavor_id']) + if add_vips: + new_lb_dict['additional_vips'] = db_additional_vips_to_provider_vips( + add_vips) + if db_pools: + new_lb_dict['pools'] = db_pools_to_provider_pools( + db_pools, for_delete=for_delete) + if db_listeners: + new_lb_dict['listeners'] = db_listeners_to_provider_listeners( + db_listeners, for_delete=for_delete) + return new_lb_dict + + +def db_loadbalancer_to_provider_loadbalancer(db_loadbalancer, + for_delete=False): + new_loadbalancer_dict = lb_dict_to_provider_dict( + db_loadbalancer.to_dict(recurse=True), + vip=db_loadbalancer.vip, + db_pools=db_loadbalancer.pools, + db_listeners=db_loadbalancer.listeners, for_delete=for_delete) + for unsupported_field in ['server_group_id', 'amphorae', + 'vrrp_group', 'topology', 'vip']: + if unsupported_field in new_loadbalancer_dict: + del new_loadbalancer_dict[unsupported_field] + provider_loadbalancer = driver_dm.LoadBalancer.from_dict( + new_loadbalancer_dict) + return provider_loadbalancer + + +def db_listeners_to_provider_listeners(db_listeners, for_delete=False): + provider_listeners = [] + for listener in db_listeners: + provider_listener = db_listener_to_provider_listener( + listener, for_delete=for_delete) + provider_listeners.append(provider_listener) + return provider_listeners + + +def db_listeners_to_provider_dicts_list_of_dicts(db_listeners, + for_delete=False): + listeners = db_listeners_to_provider_listeners( + db_listeners, for_delete=for_delete) + return [listener.to_dict() for listener in listeners] + + +def db_listener_to_provider_listener(db_listener, for_delete=False): + new_listener_dict = listener_dict_to_provider_dict( + db_listener.to_dict(recurse=True), for_delete=for_delete) + if ('default_pool' in new_listener_dict and + new_listener_dict['default_pool']): + provider_pool = db_pool_to_provider_pool(db_listener.default_pool, + for_delete=for_delete) + new_listener_dict['default_pool_id'] = provider_pool.pool_id + new_listener_dict['default_pool'] = provider_pool + if new_listener_dict.get('l7policies', None): + new_listener_dict['l7policies'] = ( + db_l7policies_to_provider_l7policies(db_listener.l7policies)) + provider_listener = driver_dm.Listener.from_dict(new_listener_dict) + return provider_listener + + +def _get_secret_data(cert_manager, project_id, secret_ref, for_delete=False): + """Get the secret from the certificate manager and upload it to the amp. + + :returns: The secret data. + """ + context = oslo_context.RequestContext(project_id=project_id) + try: + secret_data = cert_manager.get_secret(context, secret_ref) + except Exception as e: + LOG.warning('Unable to retrieve certificate: %s due to %s.', + secret_ref, str(e)) + if for_delete: + secret_data = None + else: + raise exceptions.CertificateRetrievalException(ref=secret_ref) + # We need to have json convertible data for storing it in + # persistence jobboard backend. + if isinstance(secret_data, bytes): + return secret_data.decode() + return secret_data + + +def listener_dict_to_provider_dict(listener_dict, for_delete=False): + new_listener_dict = _base_to_provider_dict(listener_dict, + include_project_id=True) + new_listener_dict['listener_id'] = new_listener_dict.pop('id') + if 'load_balancer_id' in new_listener_dict: + new_listener_dict['loadbalancer_id'] = new_listener_dict.pop( + 'load_balancer_id') + + # Pull the certs out of the certificate manager to pass to the provider + if 'tls_certificate_id' in new_listener_dict: + new_listener_dict['default_tls_container_ref'] = new_listener_dict.pop( + 'tls_certificate_id') + if 'client_ca_tls_certificate_id' in new_listener_dict: + new_listener_dict['client_ca_tls_container_ref'] = ( + new_listener_dict.pop('client_ca_tls_certificate_id')) + if 'client_crl_container_id' in new_listener_dict: + new_listener_dict['client_crl_container_ref'] = ( + new_listener_dict.pop('client_crl_container_id')) + listener_obj = data_models.Listener(**listener_dict) + if (listener_obj.tls_certificate_id or listener_obj.sni_containers or + listener_obj.client_ca_tls_certificate_id): + SNI_objs = [] + for sni in listener_obj.sni_containers: + if isinstance(sni, dict): + sni_obj = data_models.SNI(**sni) + SNI_objs.append(sni_obj) + elif isinstance(sni, str): + sni_obj = data_models.SNI(tls_container_id=sni) + SNI_objs.append(sni_obj) + else: + raise exceptions.ValidationException( + detail=_('Invalid SNI container on listener')) + listener_obj.sni_containers = SNI_objs + cert_manager = stevedore_driver.DriverManager( + namespace='octavia.cert_manager', + name=CONF.certificates.cert_manager, + invoke_on_load=True, + ).driver + try: + cert_dict = cert_parser.load_certificates_data(cert_manager, + listener_obj) + except Exception as e: + with excutils.save_and_reraise_exception() as ctxt: + LOG.warning('Unable to retrieve certificate(s) due to %s.', + str(e)) + if for_delete: + ctxt.reraise = False + cert_dict = {} + if 'tls_cert' in cert_dict and cert_dict['tls_cert']: + new_listener_dict['default_tls_container_data'] = ( + cert_dict['tls_cert'].to_dict(recurse=True)) + if 'sni_certs' in cert_dict and cert_dict['sni_certs']: + sni_data_list = [] + for sni in cert_dict['sni_certs']: + sni_data_list.append(sni.to_dict(recurse=True)) + new_listener_dict['sni_container_data'] = sni_data_list + + if listener_obj.client_ca_tls_certificate_id: + cert = _get_secret_data(cert_manager, listener_obj.project_id, + listener_obj.client_ca_tls_certificate_id, + for_delete=for_delete) + new_listener_dict['client_ca_tls_container_data'] = cert + if listener_obj.client_crl_container_id: + crl_file = _get_secret_data(cert_manager, listener_obj.project_id, + listener_obj.client_crl_container_id, + for_delete=for_delete) + new_listener_dict['client_crl_container_data'] = crl_file + + # Format the allowed_cidrs + if ('allowed_cidrs' in new_listener_dict and + new_listener_dict['allowed_cidrs'] and + 'cidr' in new_listener_dict['allowed_cidrs'][0]): + cidrs_dict_list = new_listener_dict.pop('allowed_cidrs') + new_listener_dict['allowed_cidrs'] = [cidr_dict['cidr'] for + cidr_dict in cidrs_dict_list] + + # Format the sni_containers -> sni_container_refs + sni_containers = new_listener_dict.pop('sni_containers', None) + if sni_containers: + new_listener_dict['sni_container_refs'] = [] + for sni in sni_containers: + if isinstance(sni, dict): + new_listener_dict['sni_container_refs'].append( + sni['tls_container_id']) + elif isinstance(sni, str): + new_listener_dict['sni_container_refs'].append(sni) + else: + raise exceptions.ValidationException( + detail=_('Invalid SNI container on listener')) + + # Remove the DB back references + if 'load_balancer' in new_listener_dict: + del new_listener_dict['load_balancer'] + if 'peer_port' in new_listener_dict: + del new_listener_dict['peer_port'] + if 'pools' in new_listener_dict: + del new_listener_dict['pools'] + if 'stats' in new_listener_dict: + del new_listener_dict['stats'] + + if ('default_pool' in new_listener_dict and + new_listener_dict['default_pool']): + pool = new_listener_dict.pop('default_pool') + new_listener_dict['default_pool'] = pool_dict_to_provider_dict( + pool, for_delete=for_delete) + provider_l7policies = [] + if 'l7policies' in new_listener_dict: + l7policies = new_listener_dict.pop('l7policies') or [] + for l7policy in l7policies: + provider_l7policy = l7policy_dict_to_provider_dict(l7policy) + provider_l7policies.append(provider_l7policy) + new_listener_dict['l7policies'] = provider_l7policies + return new_listener_dict + + +def db_additional_vips_to_provider_vips(db_add_vips): + provider_add_vips = [] + for add_vip in db_add_vips: + provider_add_vips.append( + additional_vip_dict_to_provider_dict(add_vip.to_dict())) + return provider_add_vips + + +def db_pools_to_provider_pools(db_pools, for_delete=False): + provider_pools = [] + for pool in db_pools: + provider_pools.append(db_pool_to_provider_pool(pool, + for_delete=for_delete)) + return provider_pools + + +def db_pool_to_provider_pool(db_pool, for_delete=False): + new_pool_dict = pool_dict_to_provider_dict(db_pool.to_dict(recurse=True), + for_delete=for_delete) + # Replace the sub-dicts with objects + if 'health_monitor' in new_pool_dict: + del new_pool_dict['health_monitor'] + if db_pool.health_monitor: + provider_healthmonitor = db_HM_to_provider_HM(db_pool.health_monitor) + new_pool_dict['healthmonitor'] = provider_healthmonitor + # Don't leave a 'members' None here, we want it to pass through to Unset + if new_pool_dict.get('members', None): + del new_pool_dict['members'] + if db_pool.members: + provider_members = db_members_to_provider_members(db_pool.members) + new_pool_dict['members'] = provider_members + db_listeners = db_pool.listeners + if db_listeners: + new_pool_dict['listener_id'] = db_listeners[0].id + return driver_dm.Pool.from_dict(new_pool_dict) + + +def pool_dict_to_provider_dict(pool_dict, for_delete=False): + new_pool_dict = _base_to_provider_dict(pool_dict, include_project_id=True) + new_pool_dict['pool_id'] = new_pool_dict.pop('id') + + # Pull the certs out of the certificate manager to pass to the provider + if 'tls_certificate_id' in new_pool_dict: + new_pool_dict['tls_container_ref'] = new_pool_dict.pop( + 'tls_certificate_id') + if 'ca_tls_certificate_id' in new_pool_dict: + new_pool_dict['ca_tls_container_ref'] = new_pool_dict.pop( + 'ca_tls_certificate_id') + if 'crl_container_id' in new_pool_dict: + new_pool_dict['crl_container_ref'] = new_pool_dict.pop( + 'crl_container_id') + + pool_obj = data_models.Pool(**pool_dict) + if (pool_obj.tls_certificate_id or pool_obj.ca_tls_certificate_id or + pool_obj.crl_container_id): + cert_manager = stevedore_driver.DriverManager( + namespace='octavia.cert_manager', + name=CONF.certificates.cert_manager, + invoke_on_load=True, + ).driver + try: + cert_dict = cert_parser.load_certificates_data(cert_manager, + pool_obj) + except Exception as e: + with excutils.save_and_reraise_exception() as ctxt: + LOG.warning('Unable to retrieve certificate(s) due to %s.', + str(e)) + if for_delete: + ctxt.reraise = False + cert_dict = {} + if 'tls_cert' in cert_dict and cert_dict['tls_cert']: + new_pool_dict['tls_container_data'] = ( + cert_dict['tls_cert'].to_dict(recurse=True)) + + if pool_obj.ca_tls_certificate_id: + cert = _get_secret_data(cert_manager, pool_obj.project_id, + pool_obj.ca_tls_certificate_id, + for_delete=for_delete) + new_pool_dict['ca_tls_container_data'] = cert + + if pool_obj.crl_container_id: + crl_file = _get_secret_data(cert_manager, pool_obj.project_id, + pool_obj.crl_container_id, + for_delete=for_delete) + new_pool_dict['crl_container_data'] = crl_file + + # Remove the DB back references + if ('session_persistence' in new_pool_dict and + new_pool_dict['session_persistence']): + if 'pool_id' in new_pool_dict['session_persistence']: + del new_pool_dict['session_persistence']['pool_id'] + if 'pool' in new_pool_dict['session_persistence']: + del new_pool_dict['session_persistence']['pool'] + if 'l7policies' in new_pool_dict: + del new_pool_dict['l7policies'] + if 'listeners' in new_pool_dict: + del new_pool_dict['listeners'] + if 'load_balancer' in new_pool_dict: + del new_pool_dict['load_balancer'] + if 'load_balancer_id' in new_pool_dict: + new_pool_dict['loadbalancer_id'] = new_pool_dict.pop( + 'load_balancer_id') + if 'health_monitor' in new_pool_dict: + hm = new_pool_dict.pop('health_monitor') + if hm: + new_pool_dict['healthmonitor'] = hm_dict_to_provider_dict(hm) + else: + new_pool_dict['healthmonitor'] = None + if 'members' in new_pool_dict and new_pool_dict['members']: + members = new_pool_dict.pop('members') + provider_members = [] + for member in members: + provider_member = member_dict_to_provider_dict(member) + provider_members.append(provider_member) + new_pool_dict['members'] = provider_members + return new_pool_dict + + +def db_members_to_provider_members(db_members): + provider_members = [] + for member in db_members: + provider_members.append(db_member_to_provider_member(member)) + return provider_members + + +def db_member_to_provider_member(db_member): + new_member_dict = member_dict_to_provider_dict(db_member.to_dict()) + if constants.REQUEST_SRIOV in new_member_dict: + request_sriov = new_member_dict.pop(constants.REQUEST_SRIOV) + if request_sriov: + new_member_dict[constants.VNIC_TYPE] = constants.VNIC_TYPE_DIRECT + else: + new_member_dict[constants.VNIC_TYPE] = constants.VNIC_TYPE_NORMAL + return driver_dm.Member.from_dict(new_member_dict) + + +def member_dict_to_provider_dict(member_dict): + new_member_dict = _base_to_provider_dict(member_dict, + include_project_id=True) + new_member_dict['member_id'] = new_member_dict.pop('id') + if 'ip_address' in new_member_dict: + new_member_dict['address'] = new_member_dict.pop('ip_address') + # Remove the DB back references + if 'pool' in new_member_dict: + del new_member_dict['pool'] + return new_member_dict + + +def db_HM_to_provider_HM(db_hm): + new_HM_dict = hm_dict_to_provider_dict(db_hm.to_dict()) + return driver_dm.HealthMonitor.from_dict(new_HM_dict) + + +def hm_dict_to_provider_dict(hm_dict): + new_hm_dict = _base_to_provider_dict(hm_dict, include_project_id=True) + new_hm_dict['healthmonitor_id'] = new_hm_dict.pop('id') + if 'fall_threshold' in new_hm_dict: + new_hm_dict['max_retries_down'] = new_hm_dict.pop('fall_threshold') + if 'rise_threshold' in new_hm_dict: + new_hm_dict['max_retries'] = new_hm_dict.pop('rise_threshold') + # Remove the DB back references + if 'pool' in new_hm_dict: + del new_hm_dict['pool'] + return new_hm_dict + + +def db_l7policies_to_provider_l7policies(db_l7policies): + provider_l7policies = [] + for l7policy in db_l7policies: + provider_l7policy = db_l7policy_to_provider_l7policy(l7policy) + provider_l7policies.append(provider_l7policy) + return provider_l7policies + + +def db_l7policy_to_provider_l7policy(db_l7policy): + new_l7policy_dict = l7policy_dict_to_provider_dict( + db_l7policy.to_dict(recurse=True)) + if 'l7rules' in new_l7policy_dict: + del new_l7policy_dict['l7rules'] + new_l7rules = db_l7rules_to_provider_l7rules(db_l7policy.l7rules) + new_l7policy_dict['rules'] = new_l7rules + return driver_dm.L7Policy.from_dict(new_l7policy_dict) + + +def l7policy_dict_to_provider_dict(l7policy_dict): + new_l7policy_dict = _base_to_provider_dict(l7policy_dict, + include_project_id=True) + new_l7policy_dict['l7policy_id'] = new_l7policy_dict.pop('id') + # Remove the DB back references + if 'listener' in new_l7policy_dict: + del new_l7policy_dict['listener'] + if 'redirect_pool' in new_l7policy_dict: + del new_l7policy_dict['redirect_pool'] + if 'l7rules' in new_l7policy_dict and new_l7policy_dict['l7rules']: + rules = new_l7policy_dict.pop('l7rules') + provider_rules = [] + for rule in rules: + provider_rule = l7rule_dict_to_provider_dict(rule) + provider_rules.append(provider_rule) + new_l7policy_dict['rules'] = provider_rules + return new_l7policy_dict + + +def db_l7rules_to_provider_l7rules(db_l7rules): + provider_l7rules = [] + for l7rule in db_l7rules: + provider_l7rule = db_l7rule_to_provider_l7rule(l7rule) + provider_l7rules.append(provider_l7rule) + return provider_l7rules + + +def db_l7rule_to_provider_l7rule(db_l7rule): + new_l7rule_dict = l7rule_dict_to_provider_dict(db_l7rule.to_dict()) + return driver_dm.L7Rule.from_dict(new_l7rule_dict) + + +def l7rule_dict_to_provider_dict(l7rule_dict): + new_l7rule_dict = _base_to_provider_dict(l7rule_dict, + include_project_id=True) + new_l7rule_dict['l7rule_id'] = new_l7rule_dict.pop('id') + # Remove the DB back references + if 'l7policy' in new_l7rule_dict: + del new_l7rule_dict['l7policy'] + return new_l7rule_dict + + +def vip_dict_to_provider_dict(vip_dict): + new_vip_dict = {} + if 'ip_address' in vip_dict: + new_vip_dict['vip_address'] = vip_dict['ip_address'] + if 'network_id' in vip_dict: + new_vip_dict['vip_network_id'] = vip_dict['network_id'] + if 'port_id' in vip_dict: + new_vip_dict['vip_port_id'] = vip_dict['port_id'] + if 'subnet_id' in vip_dict: + new_vip_dict['vip_subnet_id'] = vip_dict['subnet_id'] + if 'qos_policy_id' in vip_dict: + new_vip_dict['vip_qos_policy_id'] = vip_dict['qos_policy_id'] + if constants.SG_IDS in vip_dict: + new_vip_dict[lib_consts.VIP_SG_IDS] = vip_dict[constants.SG_IDS] + if constants.OCTAVIA_OWNED in vip_dict: + new_vip_dict[constants.OCTAVIA_OWNED] = vip_dict[ + constants.OCTAVIA_OWNED] + return new_vip_dict + + +def additional_vip_dict_to_provider_dict(vip_dict): + new_vip_dict = {} + if 'ip_address' in vip_dict: + new_vip_dict['ip_address'] = vip_dict['ip_address'] + if 'network_id' in vip_dict: + new_vip_dict['network_id'] = vip_dict['network_id'] + if 'port_id' in vip_dict: + new_vip_dict['port_id'] = vip_dict['port_id'] + if 'subnet_id' in vip_dict: + new_vip_dict['subnet_id'] = vip_dict['subnet_id'] + return new_vip_dict + + +def provider_vip_dict_to_vip_obj(vip_dictionary): + vip_obj = data_models.Vip() + if 'vip_address' in vip_dictionary: + vip_obj.ip_address = vip_dictionary['vip_address'] + if 'vip_network_id' in vip_dictionary: + vip_obj.network_id = vip_dictionary['vip_network_id'] + if 'vip_port_id' in vip_dictionary: + vip_obj.port_id = vip_dictionary['vip_port_id'] + if 'vip_subnet_id' in vip_dictionary: + vip_obj.subnet_id = vip_dictionary['vip_subnet_id'] + if 'vip_qos_policy_id' in vip_dictionary: + vip_obj.qos_policy_id = vip_dictionary['vip_qos_policy_id'] + if lib_consts.VIP_SG_IDS in vip_dictionary: + vip_obj.sg_ids = vip_dictionary[lib_consts.VIP_SG_IDS] + if constants.OCTAVIA_OWNED in vip_dictionary: + vip_obj.octavia_owned = vip_dictionary[constants.OCTAVIA_OWNED] + return vip_obj + + +def provider_additional_vip_dict_to_additional_vip_obj(vip_dictionary): + vip_obj = data_models.AdditionalVip() + if 'ip_address' in vip_dictionary: + vip_obj.ip_address = vip_dictionary['ip_address'] + if 'network_id' in vip_dictionary: + vip_obj.network_id = vip_dictionary['network_id'] + if 'port_id' in vip_dictionary: + vip_obj.port_id = vip_dictionary['port_id'] + if 'subnet_id' in vip_dictionary: + vip_obj.subnet_id = vip_dictionary['subnet_id'] + return vip_obj diff --git a/octavia/api/healthcheck/healthcheck_plugins.py b/octavia/api/healthcheck/healthcheck_plugins.py new file mode 100644 index 0000000000..abf1693408 --- /dev/null +++ b/octavia/api/healthcheck/healthcheck_plugins.py @@ -0,0 +1,68 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import datetime + +from oslo_config import cfg +from oslo_middleware.healthcheck import pluginbase + +from octavia.db import api as db_apis +from octavia.db import healthcheck + +CONF = cfg.CONF + + +class OctaviaDBHealthcheck(pluginbase.HealthcheckBaseExtension): + + UNAVAILABLE_REASON = 'The Octavia database is unavailable.' + + last_check = None + last_result = None + last_message = None + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def healthcheck(self, server_port): + try: + if (self.last_check is not None and + ((datetime.datetime.now() - + self.last_check).total_seconds()) < + CONF.api_settings.healthcheck_refresh_interval): + result = self.last_result + message = self.last_message + else: + session = db_apis.get_session() + with session.begin(): + result, message = healthcheck.check_database_connection( + session) + self.last_check = datetime.datetime.now() + self.last_result = result + self.last_message = message + if result: + return OctaviaDBCheckResult(available=True, reason="OK") + else: + return OctaviaDBCheckResult(available=False, + reason=self.UNAVAILABLE_REASON, + details=message) + except Exception as e: + return OctaviaDBCheckResult(available=False, + reason=self.UNAVAILABLE_REASON, + details=str(e)) + + +class OctaviaDBCheckResult(pluginbase.HealthcheckResult): + """Result sub-class to provide a unique name in detail reports.""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) diff --git a/octavia/api/root_controller.py b/octavia/api/root_controller.py new file mode 100644 index 0000000000..f06545a3c6 --- /dev/null +++ b/octavia/api/root_controller.py @@ -0,0 +1,160 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_middleware import healthcheck +from pecan import abort as pecan_abort +from pecan import expose as pecan_expose +from pecan import request as pecan_request +from wsme import types as wtypes +from wsmeext import pecan as wsme_pecan + +from octavia.api.v2 import controllers as v2_controller + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class RootController: + """The controller with which the pecan wsgi app should be created.""" + + def __init__(self): + super().__init__() + setattr(self, 'v2.0', v2_controller.V2Controller()) + setattr(self, 'v2', v2_controller.V2Controller()) + if CONF.api_settings.healthcheck_enabled: + self.healthcheck_obj = healthcheck.Healthcheck.app_factory(None) + + # Run the oslo middleware healthcheck for /healthcheck + @pecan_expose('json') + @pecan_expose(content_type='text/plain') + @pecan_expose(content_type='text/html') + def healthcheck(self): # pylint: disable=inconsistent-return-statements + if CONF.api_settings.healthcheck_enabled: + if pecan_request.method not in ['GET', 'HEAD']: + pecan_abort(405) + return self.healthcheck_obj.process_request(pecan_request) + pecan_abort(404) + + def _add_a_version(self, versions, version, url_version, status, + timestamp, base_url): + versions.append({ + 'id': version, + 'status': status, + 'updated': timestamp, + 'links': [{ + 'href': base_url + url_version, + 'rel': 'self' + }] + }) + + @wsme_pecan.wsexpose(wtypes.text) + def index(self): + host_url = pecan_request.path_url + + if not host_url.endswith('/'): + host_url = f'{host_url}/' + + versions = [] + self._add_a_version(versions, 'v2.0', 'v2', 'SUPPORTED', + '2016-12-11T00:00:00Z', host_url) + self._add_a_version(versions, 'v2.1', 'v2', 'SUPPORTED', + '2018-04-20T00:00:00Z', host_url) + self._add_a_version(versions, 'v2.2', 'v2', 'SUPPORTED', + '2018-07-31T00:00:00Z', host_url) + self._add_a_version(versions, 'v2.3', 'v2', 'SUPPORTED', + '2018-12-18T00:00:00Z', host_url) + # amp statistics + self._add_a_version(versions, 'v2.4', 'v2', 'SUPPORTED', + '2018-12-19T00:00:00Z', host_url) + # Tags + self._add_a_version(versions, 'v2.5', 'v2', 'SUPPORTED', + '2019-01-21T00:00:00Z', host_url) + # Flavors + self._add_a_version(versions, 'v2.6', 'v2', 'SUPPORTED', + '2019-01-25T00:00:00Z', host_url) + # Amphora Config update + self._add_a_version(versions, 'v2.7', 'v2', 'SUPPORTED', + '2018-01-25T12:00:00Z', host_url) + # TLS client authentication + self._add_a_version(versions, 'v2.8', 'v2', 'SUPPORTED', + '2019-02-12T00:00:00Z', host_url) + # HTTP Redirect code + self._add_a_version(versions, 'v2.9', 'v2', 'SUPPORTED', + '2019-03-04T00:00:00Z', host_url) + # Healthmonitor host header + self._add_a_version(versions, 'v2.10', 'v2', 'SUPPORTED', + '2019-03-05T00:00:00Z', host_url) + # Additive batch member update + self._add_a_version(versions, 'v2.11', 'v2', 'SUPPORTED', + '2019-06-24T00:00:00Z', host_url) + # VIP ACL + self._add_a_version(versions, 'v2.12', 'v2', 'SUPPORTED', + '2019-09-11T00:00:00Z', host_url) + # SOURCE_IP_PORT algorithm + self._add_a_version(versions, 'v2.13', 'v2', 'SUPPORTED', + '2019-09-13T00:00:00Z', host_url) + # Availability Zones + self._add_a_version(versions, 'v2.14', 'v2', 'SUPPORTED', + '2019-11-10T00:00:00Z', host_url) + # TLS cipher options + self._add_a_version(versions, 'v2.15', 'v2', 'SUPPORTED', + '2020-03-10T00:00:00Z', host_url) + # Additional UDP Healthcheck Types (HTTP/TCP) + self._add_a_version(versions, 'v2.16', 'v2', 'SUPPORTED', + '2020-03-15T00:00:00Z', host_url) + # Listener TLS versions + self._add_a_version(versions, 'v2.17', 'v2', 'SUPPORTED', + '2020-04-29T00:00:00Z', host_url) + # Pool TLS versions + self._add_a_version(versions, 'v2.18', 'v2', 'SUPPORTED', + '2020-04-29T01:00:00Z', host_url) + # Add quota support to octavia's l7policy and l7rule + self._add_a_version(versions, 'v2.19', 'v2', 'SUPPORTED', + '2020-05-12T00:00:00Z', host_url) + # ALPN protocols (listener) + self._add_a_version(versions, 'v2.20', 'v2', 'SUPPORTED', + '2020-08-02T00:00:00Z', host_url) + # Amphora delete + self._add_a_version(versions, 'v2.21', 'v2', 'SUPPORTED', + '2020-09-03T00:00:00Z', host_url) + # Add PROXYV2 pool protocol + self._add_a_version(versions, 'v2.22', 'v2', 'SUPPORTED', + '2020-09-04T00:00:00Z', host_url) + # SCTP protocol + self._add_a_version(versions, 'v2.23', 'v2', 'SUPPORTED', + '2020-09-07T00:00:00Z', host_url) + # ALPN protocols (pool) + self._add_a_version(versions, 'v2.24', 'v2', 'SUPPORTED', + '2020-10-15T00:00:00Z', host_url) + # PROMETHEUS listeners + self._add_a_version(versions, 'v2.25', 'v2', 'SUPPORTED', + '2021-10-02T00:00:00Z', host_url) + # Additional VIPs + self._add_a_version(versions, 'v2.26', 'v2', 'SUPPORTED', + '2022-08-29T00:00:00Z', host_url) + # HTTP Strict Transport Security (HSTS) + self._add_a_version(versions, 'v2.27', 'v2', 'SUPPORTED', + '2023-05-05T00:00:00Z', host_url) + # Add VIP port vnic_type for SR-IOV + self._add_a_version(versions, 'v2.28', 'v2', 'SUPPORTED', + '2023-11-08T00:00:00Z', host_url) + # Add VIP SGs + self._add_a_version(versions, 'v2.29', 'v2', 'SUPPORTED', + '2024-10-15T00:00:00Z', host_url) + # Add member port SR-IOV support + self._add_a_version(versions, 'v2.30', 'v2', 'CURRENT', + '2025-02-26T00:00:00Z', host_url) + return {'versions': versions} diff --git a/octavia/api/v2/__init__.py b/octavia/api/v2/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/api/v2/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/api/v2/controllers/__init__.py b/octavia/api/v2/controllers/__init__.py new file mode 100644 index 0000000000..c9525b3ed4 --- /dev/null +++ b/octavia/api/v2/controllers/__init__.py @@ -0,0 +1,80 @@ +# Copyright 2016 Intel +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from wsme import types as wtypes +from wsmeext import pecan as wsme_pecan + +from octavia.api.v2.controllers import amphora +from octavia.api.v2.controllers import availability_zone_profiles +from octavia.api.v2.controllers import availability_zones +from octavia.api.v2.controllers import base +from octavia.api.v2.controllers import flavor_profiles +from octavia.api.v2.controllers import flavors +from octavia.api.v2.controllers import health_monitor +from octavia.api.v2.controllers import l7policy +from octavia.api.v2.controllers import listener +from octavia.api.v2.controllers import load_balancer +from octavia.api.v2.controllers import pool +from octavia.api.v2.controllers import provider +from octavia.api.v2.controllers import quotas + + +class BaseV2Controller(base.BaseController): + loadbalancers = None + listeners = None + pools = None + l7policies = None + healthmonitors = None + quotas = None + + def __init__(self): + super().__init__() + self.loadbalancers = load_balancer.LoadBalancersController() + self.listeners = listener.ListenersController() + self.pools = pool.PoolsController() + self.l7policies = l7policy.L7PolicyController() + self.healthmonitors = health_monitor.HealthMonitorController() + self.quotas = quotas.QuotasController() + self.providers = provider.ProviderController() + self.flavors = flavors.FlavorsController() + self.flavorprofiles = flavor_profiles.FlavorProfileController() + self.availabilityzones = ( + availability_zones.AvailabilityZonesController()) + self.availabilityzoneprofiles = ( + availability_zone_profiles.AvailabilityZoneProfileController()) + + @wsme_pecan.wsexpose(wtypes.text) + def get(self): + return "v2" + + +class OctaviaV2Controller(base.BaseController): + amphorae = None + + def __init__(self): + super().__init__() + self.amphorae = amphora.AmphoraController() + + @wsme_pecan.wsexpose(wtypes.text) + def get(self): + return "v2" + + +class V2Controller(BaseV2Controller): + lbaas = None + + def __init__(self): + super().__init__() + self.lbaas = BaseV2Controller() + self.octavia = OctaviaV2Controller() diff --git a/octavia/api/v2/controllers/amphora.py b/octavia/api/v2/controllers/amphora.py new file mode 100644 index 0000000000..e07ea2e6c0 --- /dev/null +++ b/octavia/api/v2/controllers/amphora.py @@ -0,0 +1,229 @@ +# Copyright 2014 Rackspace +# Copyright 2016 Blue Box, an IBM Company +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_log import log as logging +import oslo_messaging as messaging +from oslo_utils import excutils +from pecan import expose as pecan_expose +from pecan import request as pecan_request +from sqlalchemy.orm import exc as sa_exception +from wsme import types as wtypes +from wsmeext import pecan as wsme_pecan + +from octavia.api.v2.controllers import base +from octavia.api.v2.types import amphora as amp_types +from octavia.common import constants +from octavia.common import exceptions +from octavia.common import rpc + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class AmphoraController(base.BaseController): + RBAC_TYPE = constants.RBAC_AMPHORA + + def __init__(self): + super().__init__() + topic = cfg.CONF.oslo_messaging.topic + self.target = messaging.Target( + namespace=constants.RPC_NAMESPACE_CONTROLLER_AGENT, + topic=topic, version="1.0", fanout=False) + self.client = rpc.get_client(self.target) + + @wsme_pecan.wsexpose(amp_types.AmphoraRootResponse, wtypes.text, + [wtypes.text], ignore_extra_args=True) + def get_one(self, id, fields=None): + """Gets a single amphora's details.""" + context = pecan_request.context.get('octavia_context') + with context.session.begin(): + db_amp = self._get_db_amp(context.session, id, show_deleted=False) + + self._auth_validate_action(context, context.project_id, + constants.RBAC_GET_ONE) + + result = self._convert_db_to_type( + db_amp, amp_types.AmphoraResponse) + if fields is not None: + result = self._filter_fields([result], fields)[0] + return amp_types.AmphoraRootResponse(amphora=result) + + @wsme_pecan.wsexpose(amp_types.AmphoraeRootResponse, [wtypes.text], + ignore_extra_args=True) + def get_all(self, fields=None): + """Gets all amphorae.""" + pcontext = pecan_request.context + context = pcontext.get('octavia_context') + + self._auth_validate_action(context, context.project_id, + constants.RBAC_GET_ALL) + + with context.session.begin(): + db_amp, links = self.repositories.amphora.get_all_API_list( + context.session, show_deleted=False, + pagination_helper=pcontext.get(constants.PAGINATION_HELPER)) + result = self._convert_db_to_type( + db_amp, [amp_types.AmphoraResponse]) + if fields is not None: + result = self._filter_fields(result, fields) + return amp_types.AmphoraeRootResponse( + amphorae=result, amphorae_links=links) + + @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) + def delete(self, id): + """Deletes an amphora.""" + context = pecan_request.context.get('octavia_context') + + self._auth_validate_action(context, context.project_id, + constants.RBAC_DELETE) + + with context.session.begin(): + try: + self.repositories.amphora.test_and_set_status_for_delete( + context.session, id) + except sa_exception.NoResultFound as e: + raise exceptions.NotFound(resource='Amphora', id=id) from e + + LOG.info("Sending delete amphora %s to the queue.", id) + payload = {constants.AMPHORA_ID: id} + self.client.cast({}, 'delete_amphora', **payload) + + @pecan_expose() + def _lookup(self, amphora_id, *remainder): + """Overridden pecan _lookup method for custom routing. + + Currently it checks if this was a failover request and routes + the request to the FailoverController. + """ + if amphora_id and remainder: + controller = remainder[0] + remainder = remainder[1:] + if controller == 'config': + return AmphoraUpdateController(amp_id=amphora_id), remainder + if controller == 'failover': + return FailoverController(amp_id=amphora_id), remainder + if controller == 'stats': + return AmphoraStatsController(amp_id=amphora_id), remainder + return None + + +class FailoverController(base.BaseController): + RBAC_TYPE = constants.RBAC_AMPHORA + + def __init__(self, amp_id): + super().__init__() + topic = constants.TOPIC_AMPHORA_V2 + version = "2.0" + self.target = messaging.Target( + namespace=constants.RPC_NAMESPACE_CONTROLLER_AGENT, + topic=topic, version=version, fanout=False) + self.client = rpc.get_client(self.target) + self.amp_id = amp_id + + @wsme_pecan.wsexpose(None, wtypes.text, status_code=202) + def put(self): + """Fails over an amphora""" + pcontext = pecan_request.context + context = pcontext.get('octavia_context') + with context.session.begin(): + db_amp = self._get_db_amp(context.session, self.amp_id, + show_deleted=False) + + self._auth_validate_action( + context, db_amp.load_balancer.project_id, + constants.RBAC_PUT_FAILOVER) + + with context.session.begin(): + self.repositories.load_balancer.test_and_set_provisioning_status( + context.session, db_amp.load_balancer_id, + status=constants.PENDING_UPDATE, raise_exception=True) + + try: + LOG.info("Sending failover request for amphora %s to the " + "queue", self.amp_id) + payload = {constants.AMPHORA_ID: db_amp.id} + self.client.cast({}, 'failover_amphora', **payload) + except Exception: + with excutils.save_and_reraise_exception(reraise=False): + self.repositories.load_balancer.update( + context.session, db_amp.load_balancer.id, + provisioning_status=constants.ERROR) + + +class AmphoraUpdateController(base.BaseController): + RBAC_TYPE = constants.RBAC_AMPHORA + + def __init__(self, amp_id): + super().__init__() + + topic = constants.TOPIC_AMPHORA_V2 + version = "2.0" + self.target = messaging.Target( + namespace=constants.RPC_NAMESPACE_CONTROLLER_AGENT, + topic=topic, version=version, fanout=False) + self.client = rpc.get_client(self.target) + self.amp_id = amp_id + + @wsme_pecan.wsexpose(None, wtypes.text, status_code=202) + def put(self): + """Update amphora agent configuration""" + pcontext = pecan_request.context + context = pcontext.get('octavia_context') + with context.session.begin(): + db_amp = self._get_db_amp(context.session, self.amp_id, + show_deleted=False) + + self._auth_validate_action( + context, db_amp.load_balancer.project_id, + constants.RBAC_PUT_CONFIG) + + try: + LOG.info("Sending amphora agent update request for amphora %s to " + "the queue.", self.amp_id) + payload = {constants.AMPHORA_ID: db_amp.id} + self.client.cast({}, 'update_amphora_agent_config', **payload) + except Exception: + with excutils.save_and_reraise_exception(reraise=True): + LOG.error("Unable to send amphora agent update request for " + "amphora %s to the queue.", self.amp_id) + + +class AmphoraStatsController(base.BaseController): + RBAC_TYPE = constants.RBAC_AMPHORA + + def __init__(self, amp_id): + super().__init__() + self.amp_id = amp_id + + @wsme_pecan.wsexpose(amp_types.StatisticsRootResponse, wtypes.text, + status_code=200) + def get(self): + context = pecan_request.context.get('octavia_context') + + self._auth_validate_action(context, context.project_id, + constants.RBAC_GET_STATS) + + with context.session.begin(): + stats = self.repositories.get_amphora_stats(context.session, + self.amp_id) + if not stats: + raise exceptions.NotFound(resource='Amphora stats for', + id=self.amp_id) + + wsme_stats = [] + for stat in stats: + wsme_stats.append(amp_types.AmphoraStatisticsResponse(**stat)) + return amp_types.StatisticsRootResponse(amphora_stats=wsme_stats) diff --git a/octavia/api/v2/controllers/availability_zone_profiles.py b/octavia/api/v2/controllers/availability_zone_profiles.py new file mode 100644 index 0000000000..bd963f01bb --- /dev/null +++ b/octavia/api/v2/controllers/availability_zone_profiles.py @@ -0,0 +1,247 @@ +# Copyright 2019 Verizon Media +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_db import exception as odb_exceptions +from oslo_log import log as logging +from oslo_serialization import jsonutils +from oslo_utils import excutils +from oslo_utils import uuidutils +from pecan import request as pecan_request +from sqlalchemy.orm import exc as sa_exception +from wsme import types as wtypes +from wsmeext import pecan as wsme_pecan + +from octavia.api.drivers import driver_factory +from octavia.api.drivers import utils as driver_utils +from octavia.api.v2.controllers import base +from octavia.api.v2.types import availability_zone_profile as profile_types +from octavia.common import constants +from octavia.common import exceptions + +LOG = logging.getLogger(__name__) + + +class AvailabilityZoneProfileController(base.BaseController): + RBAC_TYPE = constants.RBAC_AVAILABILITY_ZONE_PROFILE + + def __init__(self): + super().__init__() + + @wsme_pecan.wsexpose(profile_types.AvailabilityZoneProfileRootResponse, + wtypes.text, [wtypes.text], ignore_extra_args=True) + def get_one(self, id, fields=None): + """Gets an Availability Zone Profile's detail.""" + context = pecan_request.context.get('octavia_context') + self._auth_validate_action(context, context.project_id, + constants.RBAC_GET_ONE) + if id == constants.NIL_UUID: + raise exceptions.NotFound(resource='Availability Zone Profile', + id=constants.NIL_UUID) + with context.session.begin(): + db_availability_zone_profile = ( + self._get_db_availability_zone_profile(context.session, id)) + result = self._convert_db_to_type( + db_availability_zone_profile, + profile_types.AvailabilityZoneProfileResponse) + if fields is not None: + result = self._filter_fields([result], fields)[0] + return profile_types.AvailabilityZoneProfileRootResponse( + availability_zone_profile=result) + + @wsme_pecan.wsexpose(profile_types.AvailabilityZoneProfilesRootResponse, + [wtypes.text], ignore_extra_args=True) + def get_all(self, fields=None): + """Lists all Availability Zone Profiles.""" + pcontext = pecan_request.context + context = pcontext.get('octavia_context') + self._auth_validate_action(context, context.project_id, + constants.RBAC_GET_ALL) + with context.session.begin(): + db_availability_zone_profiles, links = ( + self.repositories.availability_zone_profile.get_all( + context.session, + pagination_helper=pcontext.get( + constants.PAGINATION_HELPER))) + result = self._convert_db_to_type( + db_availability_zone_profiles, + [profile_types.AvailabilityZoneProfileResponse]) + if fields is not None: + result = self._filter_fields(result, fields) + return profile_types.AvailabilityZoneProfilesRootResponse( + availability_zone_profiles=result, + availability_zone_profile_links=links) + + @wsme_pecan.wsexpose(profile_types.AvailabilityZoneProfileRootResponse, + body=profile_types.AvailabilityZoneProfileRootPOST, + status_code=201) + def post(self, availability_zone_profile_): + """Creates an Availability Zone Profile.""" + availability_zone_profile = ( + availability_zone_profile_.availability_zone_profile) + context = pecan_request.context.get('octavia_context') + self._auth_validate_action(context, context.project_id, + constants.RBAC_POST) + # Do a basic JSON validation on the metadata + try: + availability_zone_data_dict = jsonutils.loads( + availability_zone_profile.availability_zone_data) + except Exception as e: + raise exceptions.InvalidOption( + value=availability_zone_profile.availability_zone_data, + option=constants.AVAILABILITY_ZONE_DATA) from e + + # Validate that the provider driver supports the metadata + driver = driver_factory.get_driver( + availability_zone_profile.provider_name) + driver_utils.call_provider( + driver.name, driver.validate_availability_zone, + availability_zone_data_dict) + + context.session.begin() + try: + availability_zone_profile_dict = availability_zone_profile.to_dict( + render_unsets=True) + availability_zone_profile_dict['id'] = uuidutils.generate_uuid() + db_availability_zone_profile = ( + self.repositories.availability_zone_profile.create( + context.session, **availability_zone_profile_dict)) + context.session.commit() + except odb_exceptions.DBDuplicateEntry as e: + context.session.rollback() + raise exceptions.IDAlreadyExists() from e + except Exception: + with excutils.save_and_reraise_exception(): + context.session.rollback() + result = self._convert_db_to_type( + db_availability_zone_profile, + profile_types.AvailabilityZoneProfileResponse) + return profile_types.AvailabilityZoneProfileRootResponse( + availability_zone_profile=result) + + def _validate_update_azp(self, context, id, availability_zone_profile): + if availability_zone_profile.name is None: + raise exceptions.InvalidOption(value=None, option=constants.NAME) + if availability_zone_profile.provider_name is None: + raise exceptions.InvalidOption( + value=None, option=constants.PROVIDER_NAME) + if availability_zone_profile.availability_zone_data is None: + raise exceptions.InvalidOption( + value=None, option=constants.AVAILABILITY_ZONE_DATA) + + # Don't allow changes to the availability_zone_data or provider_name if + # it is in use. + if (not isinstance(availability_zone_profile.availability_zone_data, + wtypes.UnsetType) or + not isinstance(availability_zone_profile.provider_name, + wtypes.UnsetType)): + if self.repositories.availability_zone.count( + context.session, availability_zone_profile_id=id) > 0: + raise exceptions.ObjectInUse( + object='Availability Zone Profile', id=id) + + @wsme_pecan.wsexpose(profile_types.AvailabilityZoneProfileRootResponse, + wtypes.text, status_code=200, + body=profile_types.AvailabilityZoneProfileRootPUT) + def put(self, id, availability_zone_profile_): + """Updates an Availability Zone Profile.""" + availability_zone_profile = ( + availability_zone_profile_.availability_zone_profile) + context = pecan_request.context.get('octavia_context') + self._auth_validate_action(context, context.project_id, + constants.RBAC_PUT) + + with context.session.begin(): + self._validate_update_azp(context, id, availability_zone_profile) + if id == constants.NIL_UUID: + raise exceptions.NotFound(resource='Availability Zone Profile', + id=constants.NIL_UUID) + + if not isinstance(availability_zone_profile.availability_zone_data, + wtypes.UnsetType): + # Do a basic JSON validation on the metadata + try: + availability_zone_data_dict = jsonutils.loads( + availability_zone_profile.availability_zone_data) + except Exception as e: + raise exceptions.InvalidOption( + value=availability_zone_profile.availability_zone_data, + option=constants.FLAVOR_DATA) from e + + if isinstance(availability_zone_profile.provider_name, + wtypes.UnsetType): + with context.session.begin(): + db_availability_zone_profile = ( + self._get_db_availability_zone_profile( + context.session, id)) + provider_driver = db_availability_zone_profile.provider_name + else: + provider_driver = availability_zone_profile.provider_name + + # Validate that the provider driver supports the metadata + driver = driver_factory.get_driver(provider_driver) + driver_utils.call_provider( + driver.name, driver.validate_availability_zone, + availability_zone_data_dict) + + context.session.begin() + try: + availability_zone_profile_dict = availability_zone_profile.to_dict( + render_unsets=False) + if availability_zone_profile_dict: + self.repositories.availability_zone_profile.update( + context.session, id, **availability_zone_profile_dict) + context.session.commit() + except Exception: + with excutils.save_and_reraise_exception(): + context.session.rollback() + + # Force SQL alchemy to query the DB, otherwise we get inconsistent + # results + context.session.expire_all() + with context.session.begin(): + db_availability_zone_profile = ( + self._get_db_availability_zone_profile(context.session, + id)) + result = self._convert_db_to_type( + db_availability_zone_profile, + profile_types.AvailabilityZoneProfileResponse) + return profile_types.AvailabilityZoneProfileRootResponse( + availability_zone_profile=result) + + @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) + def delete(self, availability_zone_profile_id): + """Deletes an Availability Zone Profile""" + context = pecan_request.context.get('octavia_context') + + self._auth_validate_action(context, context.project_id, + constants.RBAC_DELETE) + if availability_zone_profile_id == constants.NIL_UUID: + raise exceptions.NotFound(resource='Availability Zone Profile', + id=constants.NIL_UUID) + # Don't allow it to be deleted if it is in use by an availability zone + with context.session.begin(): + if self.repositories.availability_zone.count( + context.session, + availability_zone_profile_id=availability_zone_profile_id + ) > 0: + raise exceptions.ObjectInUse( + object='Availability Zone Profile', + id=availability_zone_profile_id) + try: + self.repositories.availability_zone_profile.delete( + context.session, id=availability_zone_profile_id) + except sa_exception.NoResultFound as e: + raise exceptions.NotFound( + resource='Availability Zone Profile', + id=availability_zone_profile_id) from e diff --git a/octavia/api/v2/controllers/availability_zones.py b/octavia/api/v2/controllers/availability_zones.py new file mode 100644 index 0000000000..b57aac5674 --- /dev/null +++ b/octavia/api/v2/controllers/availability_zones.py @@ -0,0 +1,180 @@ +# Copyright 2019 Verizon Media +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_db import api as oslo_db_api +from oslo_db import exception as odb_exceptions +from oslo_log import log as logging +from oslo_utils import excutils +from pecan import request as pecan_request +from sqlalchemy.orm import exc as sa_exception +from wsme import types as wtypes +from wsmeext import pecan as wsme_pecan + +from octavia.api.v2.controllers import base +from octavia.api.v2.types import availability_zones as availability_zone_types +from octavia.common import constants +from octavia.common import exceptions +from octavia.db import api as db_api + +LOG = logging.getLogger(__name__) + + +class AvailabilityZonesController(base.BaseController): + RBAC_TYPE = constants.RBAC_AVAILABILITY_ZONE + + def __init__(self): + super().__init__() + + @wsme_pecan.wsexpose(availability_zone_types.AvailabilityZoneRootResponse, + wtypes.text, [wtypes.text], ignore_extra_args=True) + def get_one(self, name, fields=None): + """Gets an Availability Zone's detail.""" + context = pecan_request.context.get('octavia_context') + self._auth_validate_action(context, context.project_id, + constants.RBAC_GET_ONE) + if name == constants.NIL_UUID: + raise exceptions.NotFound(resource='Availability Zone', + id=constants.NIL_UUID) + with context.session.begin(): + db_availability_zone = self._get_db_availability_zone( + context.session, name) + result = self._convert_db_to_type( + db_availability_zone, + availability_zone_types.AvailabilityZoneResponse) + if fields is not None: + result = self._filter_fields([result], fields)[0] + return availability_zone_types.AvailabilityZoneRootResponse( + availability_zone=result) + + @wsme_pecan.wsexpose(availability_zone_types.AvailabilityZonesRootResponse, + [wtypes.text], ignore_extra_args=True) + def get_all(self, fields=None): + """Lists all Availability Zones.""" + pcontext = pecan_request.context + context = pcontext.get('octavia_context') + self._auth_validate_action(context, context.project_id, + constants.RBAC_GET_ALL) + with context.session.begin(): + db_availability_zones, links = ( + self.repositories.availability_zone.get_all( + context.session, + pagination_helper=pcontext.get( + constants.PAGINATION_HELPER))) + result = self._convert_db_to_type( + db_availability_zones, + [availability_zone_types.AvailabilityZoneResponse]) + if fields is not None: + result = self._filter_fields(result, fields) + return availability_zone_types.AvailabilityZonesRootResponse( + availability_zones=result, availability_zones_links=links) + + @wsme_pecan.wsexpose(availability_zone_types.AvailabilityZoneRootResponse, + body=availability_zone_types.AvailabilityZoneRootPOST, + status_code=201) + def post(self, availability_zone_): + """Creates an Availability Zone.""" + availability_zone = availability_zone_.availability_zone + context = pecan_request.context.get('octavia_context') + self._auth_validate_action(context, context.project_id, + constants.RBAC_POST) + + context.session.begin() + try: + availability_zone_dict = availability_zone.to_dict( + render_unsets=True) + db_availability_zone = self.repositories.availability_zone.create( + context.session, **availability_zone_dict) + context.session.commit() + except odb_exceptions.DBDuplicateEntry as e: + context.session.rollback() + raise exceptions.RecordAlreadyExists( + field='availability zone', name=availability_zone.name) from e + except Exception: + with excutils.save_and_reraise_exception(): + context.session.rollback() + result = self._convert_db_to_type( + db_availability_zone, + availability_zone_types.AvailabilityZoneResponse) + return availability_zone_types.AvailabilityZoneRootResponse( + availability_zone=result) + + @wsme_pecan.wsexpose(availability_zone_types.AvailabilityZoneRootResponse, + wtypes.text, status_code=200, + body=availability_zone_types.AvailabilityZoneRootPUT) + def put(self, name, availability_zone_): + availability_zone = availability_zone_.availability_zone + context = pecan_request.context.get('octavia_context') + self._auth_validate_action(context, context.project_id, + constants.RBAC_PUT) + if name == constants.NIL_UUID: + raise exceptions.NotFound(resource='Availability Zone', + id=constants.NIL_UUID) + context.session.begin() + try: + availability_zone_dict = availability_zone.to_dict( + render_unsets=False) + if availability_zone_dict: + self.repositories.availability_zone.update( + context.session, name, **availability_zone_dict) + context.session.commit() + except Exception: + with excutils.save_and_reraise_exception(): + context.session.rollback() + + # Force SQL alchemy to query the DB, otherwise we get inconsistent + # results + context.session.expire_all() + with context.session.begin(): + db_availability_zone = self._get_db_availability_zone( + context.session, name) + result = self._convert_db_to_type( + db_availability_zone, + availability_zone_types.AvailabilityZoneResponse) + return availability_zone_types.AvailabilityZoneRootResponse( + availability_zone=result) + + @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) + @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) + def delete(self, availability_zone_name): + """Deletes an Availability Zone""" + context = pecan_request.context.get('octavia_context') + + self._auth_validate_action(context, context.project_id, + constants.RBAC_DELETE) + if availability_zone_name == constants.NIL_UUID: + raise exceptions.NotFound(resource='Availability Zone', + id=constants.NIL_UUID) + serial_session = db_api.get_session() + serial_session.connection( + execution_options={'isolation_level': 'SERIALIZABLE'}) + try: + self.repositories.availability_zone.delete( + serial_session, name=availability_zone_name) + serial_session.commit() + # Handle when load balancers still reference this availability_zone + except odb_exceptions.DBReferenceError as e: + serial_session.rollback() + raise exceptions.ObjectInUse(object='Availability Zone', + id=availability_zone_name) from e + except sa_exception.NoResultFound as e: + serial_session.rollback() + raise exceptions.NotFound(resource='Availability Zone', + id=availability_zone_name) from e + except Exception as e: + with excutils.save_and_reraise_exception(): + LOG.error( + 'Unknown availability_zone delete exception: %s', str(e)) + serial_session.rollback() + finally: + serial_session.close() diff --git a/octavia/api/v2/controllers/base.py b/octavia/api/v2/controllers/base.py new file mode 100644 index 0000000000..c2bea34876 --- /dev/null +++ b/octavia/api/v2/controllers/base.py @@ -0,0 +1,351 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cryptography.hazmat.backends import default_backend +from cryptography import x509 +from oslo_config import cfg +from oslo_log import log as logging +from pecan import request as pecan_request +from pecan import rest as pecan_rest +from stevedore import driver as stevedore_driver +from wsme import types as wtypes + +from octavia.common import constants +from octavia.common import data_models +from octavia.common import exceptions +from octavia.common import policy +from octavia.db import repositories +from octavia.i18n import _ + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class BaseController(pecan_rest.RestController): + RBAC_TYPE = None + + def __init__(self): + super().__init__() + self.cert_manager = stevedore_driver.DriverManager( + namespace='octavia.cert_manager', + name=CONF.certificates.cert_manager, + invoke_on_load=True, + ).driver + + self.repositories = repositories.Repositories() + + @staticmethod + def _convert_db_to_type(db_entity, to_type, children=False): + """Converts a data model into an Octavia WSME type + + :param db_entity: data model to convert + :param to_type: converts db_entity to this type + """ + if isinstance(to_type, list): + to_type = to_type[0] + + def _convert(db_obj): + return to_type.from_data_model(db_obj, children=children) + if isinstance(db_entity, list): + converted = [_convert(db_obj) for db_obj in db_entity] + else: + converted = _convert(db_entity) + return converted + + @staticmethod + def _get_db_obj(session, repo, data_model, id, show_deleted=True, + limited_graph=False): + """Gets an object from the database and returns it.""" + db_obj = repo.get(session, id=id, show_deleted=show_deleted, + limited_graph=limited_graph) + if not db_obj: + LOG.debug('%(name)s %(id)s not found', + {'name': data_model._name(), 'id': id}) + raise exceptions.NotFound( + resource=data_model._name(), id=id) + return db_obj + + def _get_db_lb(self, session, id, show_deleted=True): + """Get a load balancer from the database.""" + return self._get_db_obj(session, self.repositories.load_balancer, + data_models.LoadBalancer, id, + show_deleted=show_deleted) + + def _get_db_listener(self, session, id, show_deleted=True): + """Get a listener from the database.""" + return self._get_db_obj(session, self.repositories.listener, + data_models.Listener, id, + show_deleted=show_deleted) + + def _get_listener_and_loadbalancer_id(self, db_l7policy): + """Get listener and loadbalancer ids from the l7policy db_model.""" + load_balancer_id = db_l7policy.listener.load_balancer_id + listener_id = db_l7policy.listener_id + return load_balancer_id, listener_id + + def _get_db_pool(self, session, id, show_deleted=True, + limited_graph=False): + """Get a pool from the database.""" + return self._get_db_obj(session, self.repositories.pool, + data_models.Pool, id, + show_deleted=show_deleted, + limited_graph=limited_graph) + + def _get_db_member(self, session, id, show_deleted=True): + """Get a member from the database.""" + return self._get_db_obj(session, self.repositories.member, + data_models.Member, id, + show_deleted=show_deleted) + + def _get_db_hm(self, session, id, show_deleted=True): + """Get a health monitor from the database.""" + return self._get_db_obj(session, self.repositories.health_monitor, + data_models.HealthMonitor, id, + show_deleted=show_deleted) + + def _get_db_flavor(self, session, id): + """Get a flavor from the database.""" + return self._get_db_obj(session, self.repositories.flavor, + data_models.Flavor, id) + + def _get_db_flavor_profile(self, session, id): + """Get a flavor profile from the database.""" + return self._get_db_obj(session, self.repositories.flavor_profile, + data_models.FlavorProfile, id) + + def _get_db_availability_zone(self, session, name): + """Get an availability zone from the database.""" + db_obj = self.repositories.availability_zone.get(session, name=name) + if not db_obj: + LOG.debug('%(obj_name)s %(name)s not found', + {'obj_name': data_models.AvailabilityZone._name(), + 'name': name}) + raise exceptions.NotFound( + resource=data_models.AvailabilityZone._name(), id=name) + return db_obj + + def _get_db_availability_zone_profile(self, session, id): + """Get an availability zone profile from the database.""" + return self._get_db_obj(session, + self.repositories.availability_zone_profile, + data_models.AvailabilityZoneProfile, id) + + def _get_db_l7policy(self, session, id, show_deleted=True): + """Get a L7 Policy from the database.""" + return self._get_db_obj(session, self.repositories.l7policy, + data_models.L7Policy, id, + show_deleted=show_deleted) + + def _get_db_l7rule(self, session, id, show_deleted=True): + """Get a L7 Rule from the database.""" + return self._get_db_obj(session, self.repositories.l7rule, + data_models.L7Rule, id, + show_deleted=show_deleted) + + def _get_db_amp(self, session, id, show_deleted=True): + """Gets an Amphora from the database.""" + return self._get_db_obj(session, self.repositories.amphora, + data_models.Amphora, id, + show_deleted=show_deleted) + + def _get_lb_project_id(self, session, id, show_deleted=True): + """Get the project_id of the load balancer from the database.""" + lb = self._get_db_obj(session, self.repositories.load_balancer, + data_models.LoadBalancer, id, + show_deleted=show_deleted) + return lb.project_id + + def _get_lb_project_id_provider(self, session, id, show_deleted=True): + """Get the project_id of the load balancer from the database.""" + lb = self._get_db_obj(session, self.repositories.load_balancer, + data_models.LoadBalancer, id, + show_deleted=show_deleted) + return lb.project_id, lb.provider + + def _get_l7policy_project_id(self, session, id, show_deleted=True): + """Get the project_id of the load balancer from the database.""" + l7policy = self._get_db_obj(session, self.repositories.l7policy, + data_models.LoadBalancer, id, + show_deleted=show_deleted) + return l7policy.project_id + + def _get_default_quotas(self, project_id): + """Gets the project's default quotas.""" + quotas = data_models.Quotas( + project_id=project_id, + load_balancer=CONF.quotas.default_load_balancer_quota, + listener=CONF.quotas.default_listener_quota, + pool=CONF.quotas.default_pool_quota, + health_monitor=CONF.quotas.default_health_monitor_quota, + member=CONF.quotas.default_member_quota, + l7policy=CONF.quotas.default_l7policy_quota, + l7rule=CONF.quotas.default_l7rule_quota) + return quotas + + def _get_db_quotas(self, session, project_id): + """Gets the project's quotas from the database, or responds with the + + default quotas. + """ + # At this point project_id should not ever be None or Unset + db_quotas = self.repositories.quotas.get( + session, project_id=project_id) + if not db_quotas: + LOG.debug("No custom quotas for project %s. Returning " + "defaults...", project_id) + db_quotas = self._get_default_quotas(project_id=project_id) + else: + # Fill in any that are using the configured defaults + if db_quotas.load_balancer is None: + db_quotas.load_balancer = (CONF.quotas. + default_load_balancer_quota) + if db_quotas.listener is None: + db_quotas.listener = CONF.quotas.default_listener_quota + if db_quotas.pool is None: + db_quotas.pool = CONF.quotas.default_pool_quota + if db_quotas.health_monitor is None: + db_quotas.health_monitor = (CONF.quotas. + default_health_monitor_quota) + if db_quotas.member is None: + db_quotas.member = CONF.quotas.default_member_quota + if db_quotas.l7policy is None: + db_quotas.l7policy = CONF.quotas.default_l7policy_quota + if db_quotas.l7rule is None: + db_quotas.l7rule = CONF.quotas.default_l7rule_quota + return db_quotas + + def _auth_get_all(self, context, project_id): + # Check authorization to list objects under all projects + action = f'{self.RBAC_TYPE}{constants.RBAC_GET_ALL_GLOBAL}' + target = {'project_id': project_id} + if not policy.get_enforcer().authorize(action, target, + context, do_raise=False): + # Not a global observer or admin + if project_id is None: + project_id = context.project_id + + # If we still don't know who it is, reject it. + if project_id is None: + raise exceptions.PolicyForbidden() + + # Check authorization to list objects under this project + self._auth_validate_action(context, project_id, + constants.RBAC_GET_ALL) + if project_id is None: + query_filter = {} + else: + query_filter = {'project_id': project_id} + return query_filter + + def _auth_validate_action(self, context, project_id, action): + # Check that the user is authorized to do an action in this object + action = f'{self.RBAC_TYPE}{action}' + target = {'project_id': project_id} + policy.get_enforcer().authorize(action, target, context) + + def _filter_fields(self, object_list, fields): + if CONF.api_settings.allow_field_selection: + for index, obj in enumerate(object_list): + members = self._get_attrs(obj) + for member in members: + if member not in fields: + setattr(obj, member, wtypes.Unset) + return object_list + + @staticmethod + def _get_attrs(obj): + attrs = [attr for attr in dir(obj) if not callable( + getattr(obj, attr)) and not attr.startswith("_")] + return attrs + + def _validate_tls_refs(self, tls_refs): + context = pecan_request.context.get('octavia_context') + bad_refs = [] + for ref in tls_refs: + try: + self.cert_manager.set_acls(context, ref) + self.cert_manager.get_cert(context, ref, check_only=True) + except exceptions.UnreadablePKCS12: + raise + except Exception: + bad_refs.append(ref) + + if bad_refs: + raise exceptions.CertificateRetrievalException(ref=bad_refs) + + def _validate_client_ca_and_crl_refs(self, client_ca_ref, crl_ref): + context = pecan_request.context.get('octavia_context') + bad_refs = [] + try: + self.cert_manager.set_acls(context, client_ca_ref) + ca_pem = self.cert_manager.get_secret(context, client_ca_ref) + except Exception: + bad_refs.append(client_ca_ref) + + pem_crl = None + if crl_ref: + try: + self.cert_manager.set_acls(context, crl_ref) + pem_crl = self.cert_manager.get_secret(context, crl_ref) + except Exception: + bad_refs.append(crl_ref) + if bad_refs: + raise exceptions.CertificateRetrievalException(ref=bad_refs) + + ca_cert = None + try: + # Test if it needs to be UTF-8 encoded + try: + ca_pem = ca_pem.encode('utf-8') + except AttributeError: + pass + ca_cert = x509.load_pem_x509_certificate(ca_pem, default_backend()) + except Exception as e: + raise exceptions.ValidationException(detail=_( + "The client authentication CA certificate is invalid. " + "It must be a valid x509 PEM format certificate. " + "Error: %s") % str(e)) + + # Validate the CRL is for the client CA + if pem_crl: + ca_pub_key = ca_cert.public_key() + crl = None + # Test if it needs to be UTF-8 encoded + try: + pem_crl = pem_crl.encode('utf-8') + except AttributeError: + pass + try: + crl = x509.load_pem_x509_crl(pem_crl, default_backend()) + except Exception as e: + raise exceptions.ValidationException(detail=_( + "The client authentication certificate revocation list " + "is invalid. It must be a valid x509 PEM format " + "certificate revocation list. Error: %s") % str(e)) + if not crl.is_signature_valid(ca_pub_key): + raise exceptions.ValidationException(detail=_( + "The CRL specified is not valid for client certificate " + "authority reference supplied.")) + + @staticmethod + def _validate_protocol(listener_protocol, pool_protocol): + proto_map = constants.VALID_LISTENER_POOL_PROTOCOL_MAP + for valid_pool_proto in proto_map[listener_protocol]: + if pool_protocol == valid_pool_proto: + return + detail = _("The pool protocol '%(pool_protocol)s' is invalid while " + "the listener protocol is '%(listener_protocol)s'.") % { + "pool_protocol": pool_protocol, + "listener_protocol": listener_protocol} + raise exceptions.ValidationException(detail=detail) diff --git a/octavia/api/v2/controllers/flavor_profiles.py b/octavia/api/v2/controllers/flavor_profiles.py new file mode 100644 index 0000000000..665be9cdac --- /dev/null +++ b/octavia/api/v2/controllers/flavor_profiles.py @@ -0,0 +1,221 @@ +# Copyright 2014 Rackspace +# Copyright 2016 Blue Box, an IBM Company +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_db import exception as odb_exceptions +from oslo_log import log as logging +from oslo_serialization import jsonutils +from oslo_utils import excutils +from oslo_utils import uuidutils +from pecan import request as pecan_request +from sqlalchemy.orm import exc as sa_exception +from wsme import types as wtypes +from wsmeext import pecan as wsme_pecan + +from octavia.api.drivers import driver_factory +from octavia.api.drivers import utils as driver_utils +from octavia.api.v2.controllers import base +from octavia.api.v2.types import flavor_profile as profile_types +from octavia.common import constants +from octavia.common import exceptions + +LOG = logging.getLogger(__name__) + + +class FlavorProfileController(base.BaseController): + RBAC_TYPE = constants.RBAC_FLAVOR_PROFILE + + def __init__(self): + super().__init__() + + @wsme_pecan.wsexpose(profile_types.FlavorProfileRootResponse, wtypes.text, + [wtypes.text], ignore_extra_args=True) + def get_one(self, id, fields=None): + """Gets a flavor profile's detail.""" + context = pecan_request.context.get('octavia_context') + self._auth_validate_action(context, context.project_id, + constants.RBAC_GET_ONE) + if id == constants.NIL_UUID: + raise exceptions.NotFound(resource='Flavor profile', + id=constants.NIL_UUID) + with context.session.begin(): + db_flavor_profile = self._get_db_flavor_profile(context.session, + id) + result = self._convert_db_to_type(db_flavor_profile, + profile_types.FlavorProfileResponse) + if fields is not None: + result = self._filter_fields([result], fields)[0] + return profile_types.FlavorProfileRootResponse(flavorprofile=result) + + @wsme_pecan.wsexpose(profile_types.FlavorProfilesRootResponse, + [wtypes.text], ignore_extra_args=True) + def get_all(self, fields=None): + """Lists all flavor profiles.""" + pcontext = pecan_request.context + context = pcontext.get('octavia_context') + self._auth_validate_action(context, context.project_id, + constants.RBAC_GET_ALL) + with context.session.begin(): + db_flavor_profiles, links = ( + self.repositories.flavor_profile.get_all( + context.session, + pagination_helper=pcontext.get( + constants.PAGINATION_HELPER))) + result = self._convert_db_to_type( + db_flavor_profiles, [profile_types.FlavorProfileResponse]) + if fields is not None: + result = self._filter_fields(result, fields) + return profile_types.FlavorProfilesRootResponse( + flavorprofiles=result, flavorprofile_links=links) + + @wsme_pecan.wsexpose(profile_types.FlavorProfileRootResponse, + body=profile_types.FlavorProfileRootPOST, + status_code=201) + def post(self, flavor_profile_): + """Creates a flavor Profile.""" + flavorprofile = flavor_profile_.flavorprofile + context = pecan_request.context.get('octavia_context') + self._auth_validate_action(context, context.project_id, + constants.RBAC_POST) + # Do a basic JSON validation on the metadata + try: + flavor_data_dict = jsonutils.loads(flavorprofile.flavor_data) + except Exception as e: + raise exceptions.InvalidOption( + value=flavorprofile.flavor_data, + option=constants.FLAVOR_DATA) from e + + # Validate that the provider driver supports the metadata + driver = driver_factory.get_driver(flavorprofile.provider_name) + driver_utils.call_provider(driver.name, driver.validate_flavor, + flavor_data_dict) + + context.session.begin() + try: + flavorprofile_dict = flavorprofile.to_dict(render_unsets=True) + flavorprofile_dict['id'] = uuidutils.generate_uuid() + db_flavor_profile = self.repositories.flavor_profile.create( + context.session, **flavorprofile_dict) + context.session.commit() + except odb_exceptions.DBDuplicateEntry as e: + context.session.rollback() + raise exceptions.IDAlreadyExists() from e + except Exception: + with excutils.save_and_reraise_exception(): + context.session.rollback() + result = self._convert_db_to_type( + db_flavor_profile, profile_types.FlavorProfileResponse) + return profile_types.FlavorProfileRootResponse(flavorprofile=result) + + def _validate_update_fp(self, context, id, flavorprofile): + if flavorprofile.name is None: + raise exceptions.InvalidOption(value=None, option=constants.NAME) + if flavorprofile.provider_name is None: + raise exceptions.InvalidOption(value=None, + option=constants.PROVIDER_NAME) + if flavorprofile.flavor_data is None: + raise exceptions.InvalidOption(value=None, + option=constants.FLAVOR_DATA) + + # Don't allow changes to the flavor_data or provider_name if it + # is in use. + if (not isinstance(flavorprofile.flavor_data, wtypes.UnsetType) or + not isinstance(flavorprofile.provider_name, wtypes.UnsetType)): + if self.repositories.flavor.count(context.session, + flavor_profile_id=id) > 0: + raise exceptions.ObjectInUse(object='Flavor profile', id=id) + + @wsme_pecan.wsexpose(profile_types.FlavorProfileRootResponse, + wtypes.text, status_code=200, + body=profile_types.FlavorProfileRootPUT) + def put(self, id, flavor_profile_): + """Updates a flavor Profile.""" + flavorprofile = flavor_profile_.flavorprofile + context = pecan_request.context.get('octavia_context') + self._auth_validate_action(context, context.project_id, + constants.RBAC_PUT) + + with context.session.begin(): + self._validate_update_fp(context, id, flavorprofile) + if id == constants.NIL_UUID: + raise exceptions.NotFound(resource='Flavor profile', + id=constants.NIL_UUID) + + if not isinstance(flavorprofile.flavor_data, wtypes.UnsetType): + # Do a basic JSON validation on the metadata + try: + flavor_data_dict = jsonutils.loads(flavorprofile.flavor_data) + except Exception as e: + raise exceptions.InvalidOption( + value=flavorprofile.flavor_data, + option=constants.FLAVOR_DATA) from e + + if isinstance(flavorprofile.provider_name, wtypes.UnsetType): + with context.session.begin(): + db_flavor_profile = self._get_db_flavor_profile( + context.session, id) + provider_driver = db_flavor_profile.provider_name + else: + provider_driver = flavorprofile.provider_name + + # Validate that the provider driver supports the metadata + driver = driver_factory.get_driver(provider_driver) + driver_utils.call_provider(driver.name, driver.validate_flavor, + flavor_data_dict) + + context.session.begin() + try: + flavorprofile_dict = flavorprofile.to_dict(render_unsets=False) + if flavorprofile_dict: + self.repositories.flavor_profile.update(context.session, id, + **flavorprofile_dict) + context.session.commit() + except Exception: + with excutils.save_and_reraise_exception(): + context.session.rollback() + + # Force SQL alchemy to query the DB, otherwise we get inconsistent + # results + context.session.expire_all() + with context.session.begin(): + db_flavor_profile = self._get_db_flavor_profile(context.session, + id) + result = self._convert_db_to_type( + db_flavor_profile, profile_types.FlavorProfileResponse) + return profile_types.FlavorProfileRootResponse(flavorprofile=result) + + @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) + def delete(self, flavor_profile_id): + """Deletes a Flavor Profile""" + context = pecan_request.context.get('octavia_context') + + self._auth_validate_action(context, context.project_id, + constants.RBAC_DELETE) + + if flavor_profile_id == constants.NIL_UUID: + raise exceptions.NotFound(resource='Flavor profile', + id=constants.NIL_UUID) + + # Don't allow it to be deleted if it is in use by a flavor + with context.session.begin(): + if self.repositories.flavor.count( + context.session, flavor_profile_id=flavor_profile_id) > 0: + raise exceptions.ObjectInUse(object='Flavor profile', + id=flavor_profile_id) + try: + self.repositories.flavor_profile.delete(context.session, + id=flavor_profile_id) + except sa_exception.NoResultFound as e: + raise exceptions.NotFound( + resource='Flavor profile', id=flavor_profile_id) from e diff --git a/octavia/api/v2/controllers/flavors.py b/octavia/api/v2/controllers/flavors.py new file mode 100644 index 0000000000..ae6fe029ce --- /dev/null +++ b/octavia/api/v2/controllers/flavors.py @@ -0,0 +1,165 @@ +# Copyright 2014 Rackspace +# Copyright 2016 Blue Box, an IBM Company +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_db import api as oslo_db_api +from oslo_db import exception as odb_exceptions +from oslo_log import log as logging +from oslo_utils import excutils +from oslo_utils import uuidutils +from pecan import request as pecan_request +from sqlalchemy.orm import exc as sa_exception +from wsme import types as wtypes +from wsmeext import pecan as wsme_pecan + +from octavia.api.v2.controllers import base +from octavia.api.v2.types import flavors as flavor_types +from octavia.common import constants +from octavia.common import exceptions +from octavia.db import api as db_api + +LOG = logging.getLogger(__name__) + + +class FlavorsController(base.BaseController): + RBAC_TYPE = constants.RBAC_FLAVOR + + def __init__(self): + super().__init__() + + @wsme_pecan.wsexpose(flavor_types.FlavorRootResponse, wtypes.text, + [wtypes.text], ignore_extra_args=True) + def get_one(self, id, fields=None): + """Gets a flavor's detail.""" + context = pecan_request.context.get('octavia_context') + self._auth_validate_action(context, context.project_id, + constants.RBAC_GET_ONE) + if id == constants.NIL_UUID: + raise exceptions.NotFound(resource='Flavor', id=constants.NIL_UUID) + with context.session.begin(): + db_flavor = self._get_db_flavor(context.session, id) + result = self._convert_db_to_type(db_flavor, + flavor_types.FlavorResponse) + if fields is not None: + result = self._filter_fields([result], fields)[0] + return flavor_types.FlavorRootResponse(flavor=result) + + @wsme_pecan.wsexpose(flavor_types.FlavorsRootResponse, + [wtypes.text], ignore_extra_args=True) + def get_all(self, fields=None): + """Lists all flavors.""" + pcontext = pecan_request.context + context = pcontext.get('octavia_context') + self._auth_validate_action(context, context.project_id, + constants.RBAC_GET_ALL) + with context.session.begin(): + db_flavors, links = self.repositories.flavor.get_all( + context.session, + pagination_helper=pcontext.get(constants.PAGINATION_HELPER)) + result = self._convert_db_to_type( + db_flavors, [flavor_types.FlavorResponse]) + if fields is not None: + result = self._filter_fields(result, fields) + return flavor_types.FlavorsRootResponse( + flavors=result, flavors_links=links) + + @wsme_pecan.wsexpose(flavor_types.FlavorRootResponse, + body=flavor_types.FlavorRootPOST, status_code=201) + def post(self, flavor_): + """Creates a flavor.""" + flavor = flavor_.flavor + context = pecan_request.context.get('octavia_context') + self._auth_validate_action(context, context.project_id, + constants.RBAC_POST) + + # TODO(johnsom) Validate the flavor profile ID + + context.session.begin() + try: + flavor_dict = flavor.to_dict(render_unsets=True) + flavor_dict['id'] = uuidutils.generate_uuid() + db_flavor = self.repositories.flavor.create(context.session, + **flavor_dict) + context.session.commit() + except odb_exceptions.DBDuplicateEntry as e: + context.session.rollback() + raise exceptions.RecordAlreadyExists(field='flavor', + name=flavor.name) from e + except Exception: + with excutils.save_and_reraise_exception(): + context.session.rollback() + result = self._convert_db_to_type(db_flavor, + flavor_types.FlavorResponse) + return flavor_types.FlavorRootResponse(flavor=result) + + @wsme_pecan.wsexpose(flavor_types.FlavorRootResponse, + wtypes.text, status_code=200, + body=flavor_types.FlavorRootPUT) + def put(self, id, flavor_): + flavor = flavor_.flavor + context = pecan_request.context.get('octavia_context') + self._auth_validate_action(context, context.project_id, + constants.RBAC_PUT) + if id == constants.NIL_UUID: + raise exceptions.NotFound(resource='Flavor', id=constants.NIL_UUID) + context.session.begin() + try: + flavor_dict = flavor.to_dict(render_unsets=False) + if flavor_dict: + self.repositories.flavor.update(context.session, id, + **flavor_dict) + context.session.commit() + except Exception: + with excutils.save_and_reraise_exception(): + context.session.rollback() + + # Force SQL alchemy to query the DB, otherwise we get inconsistent + # results + context.session.expire_all() + with context.session.begin(): + db_flavor = self._get_db_flavor(context.session, id) + result = self._convert_db_to_type(db_flavor, + flavor_types.FlavorResponse) + return flavor_types.FlavorRootResponse(flavor=result) + + @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) + @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) + def delete(self, flavor_id): + """Deletes a Flavor""" + context = pecan_request.context.get('octavia_context') + + self._auth_validate_action(context, context.project_id, + constants.RBAC_DELETE) + if flavor_id == constants.NIL_UUID: + raise exceptions.NotFound(resource='Flavor', id=constants.NIL_UUID) + serial_session = db_api.get_session() + serial_session.begin() + serial_session.connection( + execution_options={'isolation_level': 'SERIALIZABLE'}) + try: + self.repositories.flavor.delete(serial_session, id=flavor_id) + serial_session.commit() + # Handle when load balancers still reference this flavor + except odb_exceptions.DBReferenceError as e: + serial_session.rollback() + raise exceptions.ObjectInUse(object='Flavor', id=flavor_id) from e + except sa_exception.NoResultFound as e: + serial_session.rollback() + raise exceptions.NotFound(resource='Flavor', id=flavor_id) from e + except Exception as e: + with excutils.save_and_reraise_exception(): + LOG.error('Unknown flavor delete exception: %s', str(e)) + serial_session.rollback() + finally: + serial_session.close() diff --git a/octavia/api/v2/controllers/health_monitor.py b/octavia/api/v2/controllers/health_monitor.py new file mode 100644 index 0000000000..7afc21f0db --- /dev/null +++ b/octavia/api/v2/controllers/health_monitor.py @@ -0,0 +1,443 @@ +# Copyright 2014 Rackspace +# Copyright 2016 Blue Box, an IBM Company +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from octavia_lib.api.drivers import data_models as driver_dm +from octavia_lib.common import constants as lib_consts +from oslo_config import cfg +from oslo_db import exception as odb_exceptions +from oslo_log import log as logging +from oslo_utils import excutils +from pecan import request as pecan_request +from wsme import types as wtypes +from wsmeext import pecan as wsme_pecan + +from octavia.api.drivers import driver_factory +from octavia.api.drivers import utils as driver_utils +from octavia.api.v2.controllers import base +from octavia.api.v2.types import health_monitor as hm_types +from octavia.common import constants as consts +from octavia.common import data_models +from octavia.common import exceptions +from octavia.db import prepare as db_prepare +from octavia.i18n import _ + + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class HealthMonitorController(base.BaseController): + RBAC_TYPE = consts.RBAC_HEALTHMONITOR + + def __init__(self): + super().__init__() + + @wsme_pecan.wsexpose(hm_types.HealthMonitorRootResponse, wtypes.text, + [wtypes.text], ignore_extra_args=True) + def get_one(self, id, fields=None): + """Gets a single healthmonitor's details.""" + context = pecan_request.context.get('octavia_context') + with context.session.begin(): + db_hm = self._get_db_hm(context.session, id, show_deleted=False) + + self._auth_validate_action(context, db_hm.project_id, + consts.RBAC_GET_ONE) + + result = self._convert_db_to_type( + db_hm, hm_types.HealthMonitorResponse) + if fields is not None: + result = self._filter_fields([result], fields)[0] + return hm_types.HealthMonitorRootResponse(healthmonitor=result) + + @wsme_pecan.wsexpose(hm_types.HealthMonitorsRootResponse, wtypes.text, + [wtypes.text], ignore_extra_args=True) + def get_all(self, project_id=None, fields=None): + """Gets all health monitors.""" + pcontext = pecan_request.context + context = pcontext.get('octavia_context') + + query_filter = self._auth_get_all(context, project_id) + + with context.session.begin(): + db_hm, links = self.repositories.health_monitor.get_all_API_list( + context.session, show_deleted=False, + pagination_helper=pcontext.get(consts.PAGINATION_HELPER), + **query_filter) + result = self._convert_db_to_type( + db_hm, [hm_types.HealthMonitorResponse]) + if fields is not None: + result = self._filter_fields(result, fields) + return hm_types.HealthMonitorsRootResponse( + healthmonitors=result, healthmonitors_links=links) + + def _get_affected_listener_ids(self, session, hm): + """Gets a list of all listeners this request potentially affects.""" + pool = self.repositories.pool.get(session, id=hm.pool_id) + listener_ids = [li.id for li in pool.listeners] + return listener_ids + + def _test_lb_and_listener_and_pool_statuses(self, session, hm): + """Verify load balancer is in a mutable state.""" + # We need to verify that any listeners referencing this pool are also + # mutable + pool = self.repositories.pool.get(session, id=hm.pool_id) + load_balancer_id = pool.load_balancer_id + # Check the parent is not locked for some reason (ERROR, etc.) + if pool.provisioning_status not in consts.MUTABLE_STATUSES: + raise exceptions.ImmutableObject(resource='Pool', id=hm.pool_id) + if not self.repositories.test_and_set_lb_and_listeners_prov_status( + session, load_balancer_id, + consts.PENDING_UPDATE, consts.PENDING_UPDATE, + listener_ids=self._get_affected_listener_ids(session, hm), + pool_id=hm.pool_id): + LOG.info("Health Monitor cannot be created or modified because " + "the Load Balancer is in an immutable state") + raise exceptions.ImmutableObject(resource='Load Balancer', + id=load_balancer_id) + + def _validate_create_hm(self, lock_session, hm_dict): + """Validate creating health monitor on pool.""" + mandatory_fields = (consts.TYPE, consts.DELAY, consts.TIMEOUT, + consts.POOL_ID) + for field in mandatory_fields: + if hm_dict.get(field, None) is None: + raise exceptions.InvalidOption(value='None', option=field) + # MAX_RETRIES is renamed fall_threshold so handle is special + if hm_dict.get(consts.RISE_THRESHOLD, None) is None: + raise exceptions.InvalidOption(value='None', + option=consts.MAX_RETRIES) + + if hm_dict[consts.TYPE] not in (consts.HEALTH_MONITOR_HTTP, + consts.HEALTH_MONITOR_HTTPS): + if hm_dict.get(consts.HTTP_METHOD, None): + raise exceptions.InvalidOption( + value=consts.HTTP_METHOD, option='health monitors of ' + 'type {}'.format(hm_dict[consts.TYPE])) + if hm_dict.get(consts.URL_PATH, None): + raise exceptions.InvalidOption( + value=consts.URL_PATH, option='health monitors of ' + 'type {}'.format(hm_dict[consts.TYPE])) + if hm_dict.get(consts.EXPECTED_CODES, None): + raise exceptions.InvalidOption( + value=consts.EXPECTED_CODES, option='health monitors of ' + 'type {}'.format(hm_dict[consts.TYPE])) + else: + if not hm_dict.get(consts.HTTP_METHOD, None): + hm_dict[consts.HTTP_METHOD] = ( + consts.HEALTH_MONITOR_HTTP_DEFAULT_METHOD) + if not hm_dict.get(consts.URL_PATH, None): + hm_dict[consts.URL_PATH] = ( + consts.HEALTH_MONITOR_DEFAULT_URL_PATH) + if not hm_dict.get(consts.EXPECTED_CODES, None): + hm_dict[consts.EXPECTED_CODES] = ( + consts.HEALTH_MONITOR_DEFAULT_EXPECTED_CODES) + + if hm_dict.get('domain_name') and not hm_dict.get('http_version'): + raise exceptions.ValidationException( + detail=_("'http_version' must be specified when 'domain_name' " + "is provided.")) + + if hm_dict.get('http_version') and hm_dict.get('domain_name'): + if hm_dict['http_version'] < 1.1: + raise exceptions.InvalidOption( + value=f"http_version {hm_dict['http_version']}", + option='health monitors HTTP 1.1 domain name health check') + + try: + ret = self.repositories.health_monitor.create( + lock_session, **hm_dict) + lock_session.flush() + return ret + except odb_exceptions.DBDuplicateEntry as e: + raise exceptions.DuplicateHealthMonitor() from e + except odb_exceptions.DBReferenceError as e: + raise exceptions.InvalidOption(value=hm_dict.get(e.key), + option=e.key) from e + except odb_exceptions.DBError as e: + raise exceptions.APIException() from e + + def _validate_healthmonitor_request_for_udp_sctp(self, request, + pool_protocol): + if request.type not in ( + consts.HEALTH_MONITOR_UDP_CONNECT, + lib_consts.HEALTH_MONITOR_SCTP, + consts.HEALTH_MONITOR_TCP, + consts.HEALTH_MONITOR_HTTP): + raise exceptions.ValidationException(detail=_( + "The associated pool protocol is %(pool_protocol)s, so only " + "a %(types)s health monitor is supported.") % { + 'pool_protocol': pool_protocol, + 'types': '/'.join((consts.HEALTH_MONITOR_UDP_CONNECT, + lib_consts.HEALTH_MONITOR_SCTP, + consts.HEALTH_MONITOR_TCP, + consts.HEALTH_MONITOR_HTTP))}) + # check the delay value if the HM type is UDP-CONNECT + hm_is_type_udp = ( + request.type == consts.HEALTH_MONITOR_UDP_CONNECT) + conf_min_delay = ( + CONF.api_settings.udp_connect_min_interval_health_monitor) + if (hm_is_type_udp and + not isinstance(request.delay, wtypes.UnsetType) and + request.delay < conf_min_delay): + raise exceptions.ValidationException(detail=_( + "The request delay value %(delay)s should be larger than " + "%(conf_min_delay)s for %(type)s health monitor type.") % { + 'delay': request.delay, + 'conf_min_delay': conf_min_delay, + 'type': consts.HEALTH_MONITOR_UDP_CONNECT}) + + @wsme_pecan.wsexpose(hm_types.HealthMonitorRootResponse, + body=hm_types.HealthMonitorRootPOST, status_code=201) + def post(self, health_monitor_): + """Creates a health monitor on a pool.""" + context = pecan_request.context.get('octavia_context') + health_monitor = health_monitor_.healthmonitor + + with context.session.begin(): + pool = self._get_db_pool(context.session, health_monitor.pool_id) + + health_monitor.project_id, provider = ( + self._get_lb_project_id_provider(context.session, + pool.load_balancer_id)) + + self._auth_validate_action(context, health_monitor.project_id, + consts.RBAC_POST) + + if (not CONF.api_settings.allow_ping_health_monitors and + health_monitor.type == consts.HEALTH_MONITOR_PING): + raise exceptions.DisabledOption( + option='type', value=consts.HEALTH_MONITOR_PING) + + if pool.protocol in (lib_consts.PROTOCOL_UDP, + lib_consts.PROTOCOL_SCTP): + self._validate_healthmonitor_request_for_udp_sctp(health_monitor, + pool.protocol) + else: + if health_monitor.type in (consts.HEALTH_MONITOR_UDP_CONNECT, + lib_consts.HEALTH_MONITOR_SCTP): + raise exceptions.ValidationException(detail=_( + "The %(type)s type is only supported for pools of type " + "%(protocols)s.") % { + 'type': health_monitor.type, + 'protocols': '/'.join((consts.PROTOCOL_UDP, + lib_consts.PROTOCOL_SCTP))}) + + # Load the driver early as it also provides validation + driver = driver_factory.get_driver(provider) + + context.session.begin() + try: + if self.repositories.check_quota_met( + context.session, + data_models.HealthMonitor, + health_monitor.project_id): + raise exceptions.QuotaException( + resource=data_models.HealthMonitor._name()) + + hm_dict = db_prepare.create_health_monitor( + health_monitor.to_dict(render_unsets=True)) + + self._test_lb_and_listener_and_pool_statuses( + context.session, health_monitor) + db_hm = self._validate_create_hm(context.session, hm_dict) + + # Prepare the data for the driver data model + provider_healthmon = driver_utils.db_HM_to_provider_HM(db_hm) + + # Dispatch to the driver + LOG.info("Sending create Health Monitor %s to provider %s", + db_hm.id, driver.name) + driver_utils.call_provider( + driver.name, driver.health_monitor_create, provider_healthmon) + + context.session.commit() + except odb_exceptions.DBError as e: + context.session.rollback() + raise exceptions.InvalidOption( + value=hm_dict.get('type'), option='type') from e + except Exception: + with excutils.save_and_reraise_exception(): + context.session.rollback() + + with context.session.begin(): + db_hm = self._get_db_hm(context.session, db_hm.id) + result = self._convert_db_to_type( + db_hm, hm_types.HealthMonitorResponse) + return hm_types.HealthMonitorRootResponse(healthmonitor=result) + + def _graph_create(self, lock_session, hm_dict): + hm_dict = db_prepare.create_health_monitor(hm_dict) + db_hm = self._validate_create_hm(lock_session, hm_dict) + + return db_hm + + def _validate_update_hm(self, db_hm, health_monitor): + if db_hm.type not in (consts.HEALTH_MONITOR_HTTP, + consts.HEALTH_MONITOR_HTTPS): + if health_monitor.http_method != wtypes.Unset: + raise exceptions.InvalidOption( + value=consts.HTTP_METHOD, option='health monitors of ' + 'type {}'.format(db_hm.type)) + if health_monitor.url_path != wtypes.Unset: + raise exceptions.InvalidOption( + value=consts.URL_PATH, option='health monitors of ' + 'type {}'.format(db_hm.type)) + if health_monitor.expected_codes != wtypes.Unset: + raise exceptions.InvalidOption( + value=consts.EXPECTED_CODES, option='health monitors of ' + 'type {}'.format(db_hm.type)) + if health_monitor.delay is None: + raise exceptions.InvalidOption(value=None, option=consts.DELAY) + if health_monitor.max_retries is None: + raise exceptions.InvalidOption(value=None, + option=consts.MAX_RETRIES) + if health_monitor.timeout is None: + raise exceptions.InvalidOption(value=None, option=consts.TIMEOUT) + + if health_monitor.domain_name and not ( + db_hm.http_version or health_monitor.http_version): + raise exceptions.ValidationException( + detail=_("'http_version' must be specified when 'domain_name' " + "is provided.")) + + if ((db_hm.http_version or health_monitor.http_version) and + (db_hm.domain_name or health_monitor.domain_name)): + http_version = health_monitor.http_version or db_hm.http_version + if http_version < 1.1: + raise exceptions.InvalidOption( + value=f'http_version {http_version}', + option='health monitors HTTP 1.1 domain name health check') + + def _set_default_on_none(self, health_monitor): + """Reset settings to their default values if None/null was passed in + + A None/null value can be passed in to clear a value. PUT values + that were not provided by the user have a type of wtypes.UnsetType. + If the user is attempting to clear values, they should either + be set to None (for example in the name field) or they should be + reset to their default values. + This method is intended to handle those values that need to be set + back to a default value. + """ + if health_monitor.http_method is None: + health_monitor.http_method = ( + consts.HEALTH_MONITOR_HTTP_DEFAULT_METHOD) + if health_monitor.url_path is None: + health_monitor.url_path = ( + consts.HEALTH_MONITOR_DEFAULT_URL_PATH) + if health_monitor.expected_codes is None: + health_monitor.expected_codes = ( + consts.HEALTH_MONITOR_DEFAULT_EXPECTED_CODES) + if health_monitor.max_retries_down is None: + health_monitor.max_retries_down = consts.DEFAULT_MAX_RETRIES_DOWN + + @wsme_pecan.wsexpose(hm_types.HealthMonitorRootResponse, wtypes.text, + body=hm_types.HealthMonitorRootPUT, status_code=200) + def put(self, id, health_monitor_): + """Updates a health monitor.""" + context = pecan_request.context.get('octavia_context') + health_monitor = health_monitor_.healthmonitor + with context.session.begin(): + db_hm = self._get_db_hm(context.session, id, show_deleted=False) + + pool = self._get_db_pool(context.session, db_hm.pool_id) + project_id, provider = self._get_lb_project_id_provider( + context.session, pool.load_balancer_id) + + self._auth_validate_action(context, project_id, consts.RBAC_PUT) + + self._validate_update_hm(db_hm, health_monitor) + # Validate health monitor update options for UDP/SCTP + if pool.protocol in (lib_consts.PROTOCOL_UDP, + lib_consts.PROTOCOL_SCTP): + health_monitor.type = db_hm.type + self._validate_healthmonitor_request_for_udp_sctp(health_monitor, + pool.protocol) + + self._set_default_on_none(health_monitor) + + # Load the driver early as it also provides validation + driver = driver_factory.get_driver(provider) + + with context.session.begin(): + + self._test_lb_and_listener_and_pool_statuses(context.session, + db_hm) + + # Prepare the data for the driver data model + healthmon_dict = health_monitor.to_dict(render_unsets=False) + healthmon_dict['id'] = id + provider_healthmon_dict = ( + driver_utils.hm_dict_to_provider_dict(healthmon_dict)) + + # Also prepare the baseline object data + old_provider_healthmon = driver_utils.db_HM_to_provider_HM(db_hm) + + # Dispatch to the driver + LOG.info("Sending update Health Monitor %s to provider %s", + id, driver.name) + driver_utils.call_provider( + driver.name, driver.health_monitor_update, + old_provider_healthmon, + driver_dm.HealthMonitor.from_dict(provider_healthmon_dict)) + + # Update the database to reflect what the driver just accepted + health_monitor.provisioning_status = consts.PENDING_UPDATE + db_hm_dict = health_monitor.to_dict(render_unsets=False) + self.repositories.health_monitor.update(context.session, id, + **db_hm_dict) + + # Force SQL alchemy to query the DB, otherwise we get inconsistent + # results + context.session.expire_all() + with context.session.begin(): + db_hm = self._get_db_hm(context.session, id) + result = self._convert_db_to_type( + db_hm, hm_types.HealthMonitorResponse) + return hm_types.HealthMonitorRootResponse(healthmonitor=result) + + @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) + def delete(self, id): + """Deletes a health monitor.""" + context = pecan_request.context.get('octavia_context') + with context.session.begin(): + db_hm = self._get_db_hm(context.session, id, show_deleted=False) + + pool = self._get_db_pool(context.session, db_hm.pool_id) + project_id, provider = self._get_lb_project_id_provider( + context.session, pool.load_balancer_id) + + self._auth_validate_action(context, project_id, consts.RBAC_DELETE) + + if db_hm.provisioning_status == consts.DELETED: + return + + # Load the driver early as it also provides validation + driver = driver_factory.get_driver(provider) + + with context.session.begin(): + + self._test_lb_and_listener_and_pool_statuses(context.session, + db_hm) + + self.repositories.health_monitor.update( + context.session, db_hm.id, + provisioning_status=consts.PENDING_DELETE) + + LOG.info("Sending delete Health Monitor %s to provider %s", + id, driver.name) + provider_healthmon = driver_utils.db_HM_to_provider_HM(db_hm) + driver_utils.call_provider( + driver.name, driver.health_monitor_delete, provider_healthmon) diff --git a/octavia/api/v2/controllers/l7policy.py b/octavia/api/v2/controllers/l7policy.py new file mode 100644 index 0000000000..ec156cd2dd --- /dev/null +++ b/octavia/api/v2/controllers/l7policy.py @@ -0,0 +1,342 @@ +# Copyright 2016 Blue Box, an IBM Company +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from octavia_lib.api.drivers import data_models as driver_dm +from octavia_lib.common import constants as lib_consts +from oslo_config import cfg +from oslo_db import exception as odb_exceptions +from oslo_log import log as logging +from oslo_utils import excutils +from pecan import expose as pecan_expose +from pecan import request as pecan_request +from wsme import types as wtypes +from wsmeext import pecan as wsme_pecan + +from octavia.api.drivers import driver_factory +from octavia.api.drivers import utils as driver_utils +from octavia.api.v2.controllers import base +from octavia.api.v2.controllers import l7rule +from octavia.api.v2.types import l7policy as l7policy_types +from octavia.common import constants +from octavia.common import data_models +from octavia.common import exceptions +from octavia.common import validate +from octavia.db import prepare as db_prepare + + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class L7PolicyController(base.BaseController): + RBAC_TYPE = constants.RBAC_L7POLICY + + def __init__(self): + super().__init__() + + @wsme_pecan.wsexpose(l7policy_types.L7PolicyRootResponse, wtypes.text, + [wtypes.text], ignore_extra_args=True) + def get(self, id, fields=None): + """Gets a single l7policy's details.""" + context = pecan_request.context.get('octavia_context') + with context.session.begin(): + db_l7policy = self._get_db_l7policy(context.session, id, + show_deleted=False) + + self._auth_validate_action(context, db_l7policy.project_id, + constants.RBAC_GET_ONE) + + result = self._convert_db_to_type( + db_l7policy, l7policy_types.L7PolicyResponse) + if fields is not None: + result = self._filter_fields([result], fields)[0] + return l7policy_types.L7PolicyRootResponse(l7policy=result) + + @wsme_pecan.wsexpose(l7policy_types.L7PoliciesRootResponse, wtypes.text, + [wtypes.text], ignore_extra_args=True) + def get_all(self, project_id=None, fields=None): + """Lists all l7policies of a listener.""" + pcontext = pecan_request.context + context = pcontext.get('octavia_context') + + query_filter = self._auth_get_all(context, project_id) + + with context.session.begin(): + db_l7policies, links = self.repositories.l7policy.get_all_API_list( + context.session, show_deleted=False, + pagination_helper=pcontext.get(constants.PAGINATION_HELPER), + **query_filter) + result = self._convert_db_to_type( + db_l7policies, [l7policy_types.L7PolicyResponse]) + if fields is not None: + result = self._filter_fields(result, fields) + return l7policy_types.L7PoliciesRootResponse( + l7policies=result, l7policies_links=links) + + def _test_lb_and_listener_statuses(self, session, lb_id, listener_ids): + """Verify load balancer is in a mutable state.""" + if not self.repositories.test_and_set_lb_and_listeners_prov_status( + session, lb_id, + constants.PENDING_UPDATE, constants.PENDING_UPDATE, + listener_ids=listener_ids): + LOG.info("L7Policy cannot be created or modified because the " + "Load Balancer is in an immutable state") + raise exceptions.ImmutableObject(resource='Load Balancer', + id=lb_id) + + def _validate_create_l7policy(self, lock_session, l7policy_dict): + try: + # Set the default HTTP redirect code here so it's explicit + if ((l7policy_dict.get('redirect_url') or + l7policy_dict.get('redirect_prefix')) and + not l7policy_dict.get('redirect_http_code')): + l7policy_dict['redirect_http_code'] = 302 + + return self.repositories.l7policy.create(lock_session, + **l7policy_dict) + except odb_exceptions.DBDuplicateEntry as e: + raise exceptions.IDAlreadyExists() from e + except odb_exceptions.DBReferenceError as e: + raise exceptions.InvalidOption(value=l7policy_dict.get(e.key), + option=e.key) from e + except odb_exceptions.DBError as e: + raise exceptions.APIException() from e + + @wsme_pecan.wsexpose(l7policy_types.L7PolicyRootResponse, + body=l7policy_types.L7PolicyRootPOST, status_code=201) + def post(self, l7policy_): + """Creates a l7policy on a listener.""" + l7policy = l7policy_.l7policy + context = pecan_request.context.get('octavia_context') + + # Verify the parent listener exists + listener_id = l7policy.listener_id + with context.session.begin(): + listener = self._get_db_listener( + context.session, listener_id) + load_balancer_id = listener.load_balancer_id + l7policy.project_id, provider = self._get_lb_project_id_provider( + context.session, load_balancer_id) + + self._auth_validate_action(context, l7policy.project_id, + constants.RBAC_POST) + + # PROMETHEUS listeners cannot have l7policies attached + if listener.protocol == lib_consts.PROTOCOL_PROMETHEUS: + raise exceptions.ListenerNoChildren( + protocol=lib_consts.PROTOCOL_PROMETHEUS) + + # Make sure any pool specified by redirect_pool_id exists + if l7policy.redirect_pool_id: + with context.session.begin(): + db_pool = self._get_db_pool( + context.session, l7policy.redirect_pool_id) + self._validate_protocol(listener.protocol, db_pool.protocol) + + # Load the driver early as it also provides validation + driver = driver_factory.get_driver(provider) + + lock_session = context.session + lock_session.begin() + try: + if self.repositories.check_quota_met( + lock_session, + data_models.L7Policy, + l7policy.project_id): + raise exceptions.QuotaException( + resource=data_models.L7Policy._name()) + + l7policy_dict = db_prepare.create_l7policy( + l7policy.to_dict(render_unsets=True), + load_balancer_id, listener_id) + + self._test_lb_and_listener_statuses( + lock_session, lb_id=load_balancer_id, + listener_ids=[listener_id]) + db_l7policy = self._validate_create_l7policy( + lock_session, l7policy_dict) + + # Prepare the data for the driver data model + provider_l7policy = ( + driver_utils.db_l7policy_to_provider_l7policy(db_l7policy)) + + # Dispatch to the driver + LOG.info("Sending create L7 Policy %s to provider %s", + db_l7policy.id, driver.name) + driver_utils.call_provider( + driver.name, driver.l7policy_create, provider_l7policy) + + lock_session.commit() + except Exception: + with excutils.save_and_reraise_exception(): + lock_session.rollback() + + with context.session.begin(): + db_l7policy = self._get_db_l7policy(context.session, + db_l7policy.id) + result = self._convert_db_to_type(db_l7policy, + l7policy_types.L7PolicyResponse) + return l7policy_types.L7PolicyRootResponse(l7policy=result) + + def _graph_create(self, lock_session, policy_dict): + load_balancer_id = policy_dict.pop('load_balancer_id', None) + listener_id = policy_dict['listener_id'] + policy_dict = db_prepare.create_l7policy( + policy_dict, load_balancer_id, listener_id) + rules = policy_dict.pop('l7rules', []) or [] + db_policy = self._validate_create_l7policy(lock_session, policy_dict) + + new_rules = [] + for r in rules: + r['project_id'] = db_policy.project_id + new_rules.append( + l7rule.L7RuleController(db_policy.id)._graph_create( + lock_session, r)) + + db_policy.l7rules = new_rules + return db_policy + + @wsme_pecan.wsexpose(l7policy_types.L7PolicyRootResponse, + wtypes.text, body=l7policy_types.L7PolicyRootPUT, + status_code=200) + def put(self, id, l7policy_): + """Updates a l7policy.""" + l7policy = l7policy_.l7policy + context = pecan_request.context.get('octavia_context') + with context.session.begin(): + db_l7policy = self._get_db_l7policy(context.session, id, + show_deleted=False) + load_balancer_id, listener_id = ( + self._get_listener_and_loadbalancer_id(db_l7policy)) + project_id, provider = self._get_lb_project_id_provider( + context.session, load_balancer_id) + + self._auth_validate_action(context, project_id, constants.RBAC_PUT) + + l7policy_dict = validate.sanitize_l7policy_api_args( + l7policy.to_dict(render_unsets=False)) + # Reset renamed attributes + for attr, val in l7policy_types.L7PolicyPUT._type_to_model_map.items(): + if val in l7policy_dict: + l7policy_dict[attr] = l7policy_dict.pop(val) + sanitized_l7policy = l7policy_types.L7PolicyPUT(**l7policy_dict) + + with context.session.begin(): + listener = self._get_db_listener( + context.session, db_l7policy.listener_id) + # Make sure any specified redirect_pool_id exists + if l7policy_dict.get('redirect_pool_id'): + with context.session.begin(): + db_pool = self._get_db_pool( + context.session, l7policy_dict['redirect_pool_id']) + self._validate_protocol(listener.protocol, db_pool.protocol) + + # Load the driver early as it also provides validation + driver = driver_factory.get_driver(provider) + + with context.session.begin(): + lock_session = context.session + + self._test_lb_and_listener_statuses(lock_session, + lb_id=load_balancer_id, + listener_ids=[listener_id]) + + # Prepare the data for the driver data model + l7policy_dict = sanitized_l7policy.to_dict(render_unsets=False) + l7policy_dict['id'] = id + provider_l7policy_dict = ( + driver_utils.l7policy_dict_to_provider_dict(l7policy_dict)) + + # Also prepare the baseline object data + old_provider_l7policy = ( + driver_utils.db_l7policy_to_provider_l7policy(db_l7policy)) + + # Dispatch to the driver + LOG.info("Sending update L7 Policy %s to provider %s", + id, driver.name) + driver_utils.call_provider( + driver.name, driver.l7policy_update, + old_provider_l7policy, + driver_dm.L7Policy.from_dict(provider_l7policy_dict)) + + # Update the database to reflect what the driver just accepted + sanitized_l7policy.provisioning_status = constants.PENDING_UPDATE + db_l7policy_dict = sanitized_l7policy.to_dict(render_unsets=False) + self.repositories.l7policy.update(lock_session, id, + **db_l7policy_dict) + + # Force SQL alchemy to query the DB, otherwise we get inconsistent + # results + context.session.expire_all() + with context.session.begin(): + db_l7policy = self._get_db_l7policy(context.session, id) + result = self._convert_db_to_type(db_l7policy, + l7policy_types.L7PolicyResponse) + return l7policy_types.L7PolicyRootResponse(l7policy=result) + + @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) + def delete(self, id): + """Deletes a l7policy.""" + context = pecan_request.context.get('octavia_context') + with context.session.begin(): + db_l7policy = self._get_db_l7policy(context.session, id, + show_deleted=False) + load_balancer_id, listener_id = ( + self._get_listener_and_loadbalancer_id(db_l7policy)) + project_id, provider = self._get_lb_project_id_provider( + context.session, load_balancer_id) + + self._auth_validate_action(context, project_id, constants.RBAC_DELETE) + + if db_l7policy.provisioning_status == constants.DELETED: + return + + # Load the driver early as it also provides validation + driver = driver_factory.get_driver(provider) + + with context.session.begin(): + self._test_lb_and_listener_statuses(context.session, + lb_id=load_balancer_id, + listener_ids=[listener_id]) + self.repositories.l7policy.update( + context.session, db_l7policy.id, + provisioning_status=constants.PENDING_DELETE) + + LOG.info("Sending delete L7 Policy %s to provider %s", + id, driver.name) + provider_l7policy = driver_utils.db_l7policy_to_provider_l7policy( + db_l7policy) + driver_utils.call_provider(driver.name, driver.l7policy_delete, + provider_l7policy) + + @pecan_expose() + def _lookup(self, l7policy_id, *remainder): + """Overridden pecan _lookup method for custom routing. + + Verifies that the l7policy passed in the url exists, and if so decides + which controller, if any, should control be passed. + """ + context = pecan_request.context.get('octavia_context') + if l7policy_id and remainder and remainder[0] == 'rules': + remainder = remainder[1:] + with context.session.begin(): + db_l7policy = self.repositories.l7policy.get( + context.session, id=l7policy_id) + if not db_l7policy: + LOG.info("L7Policy %s not found.", l7policy_id) + raise exceptions.NotFound( + resource='L7Policy', id=l7policy_id) + return l7rule.L7RuleController( + l7policy_id=db_l7policy.id), remainder + return None diff --git a/octavia/api/v2/controllers/l7rule.py b/octavia/api/v2/controllers/l7rule.py new file mode 100644 index 0000000000..f30d639e0c --- /dev/null +++ b/octavia/api/v2/controllers/l7rule.py @@ -0,0 +1,310 @@ +# Copyright 2016 Blue Box, an IBM Company +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from octavia_lib.api.drivers import data_models as driver_dm +from oslo_db import exception as odb_exceptions +from oslo_log import log as logging +from oslo_utils import excutils +from pecan import request as pecan_request +from wsme import types as wtypes +from wsmeext import pecan as wsme_pecan + +from octavia.api.drivers import driver_factory +from octavia.api.drivers import utils as driver_utils +from octavia.api.v2.controllers import base +from octavia.api.v2.types import l7rule as l7rule_types +from octavia.common import constants +from octavia.common import data_models +from octavia.common import exceptions +from octavia.common import validate +from octavia.db import prepare as db_prepare + + +LOG = logging.getLogger(__name__) + + +class L7RuleController(base.BaseController): + RBAC_TYPE = constants.RBAC_L7RULE + + def __init__(self, l7policy_id): + super().__init__() + self.l7policy_id = l7policy_id + + @wsme_pecan.wsexpose(l7rule_types.L7RuleRootResponse, wtypes.text, + [wtypes.text], ignore_extra_args=True) + def get(self, id, fields=None): + """Gets a single l7rule's details.""" + context = pecan_request.context.get('octavia_context') + with context.session.begin(): + db_l7rule = self._get_db_l7rule(context.session, id, + show_deleted=False) + + self._auth_validate_action(context, db_l7rule.project_id, + constants.RBAC_GET_ONE) + + result = self._convert_db_to_type( + db_l7rule, l7rule_types.L7RuleResponse) + if fields is not None: + result = self._filter_fields([result], fields)[0] + return l7rule_types.L7RuleRootResponse(rule=result) + + @wsme_pecan.wsexpose(l7rule_types.L7RulesRootResponse, [wtypes.text], + ignore_extra_args=True) + def get_all(self, fields=None): + """Lists all l7rules of a l7policy.""" + pcontext = pecan_request.context + context = pcontext.get('octavia_context') + + with context.session.begin(): + l7policy = self._get_db_l7policy(context.session, self.l7policy_id, + show_deleted=False) + + self._auth_validate_action(context, l7policy.project_id, + constants.RBAC_GET_ALL) + + db_l7rules, links = self.repositories.l7rule.get_all_API_list( + context.session, show_deleted=False, + l7policy_id=self.l7policy_id, + pagination_helper=pcontext.get(constants.PAGINATION_HELPER)) + result = self._convert_db_to_type( + db_l7rules, [l7rule_types.L7RuleResponse]) + if fields is not None: + result = self._filter_fields(result, fields) + return l7rule_types.L7RulesRootResponse( + rules=result, rules_links=links) + + def _test_lb_listener_policy_statuses(self, session): + """Verify load balancer is in a mutable state.""" + l7policy = self._get_db_l7policy(session, self.l7policy_id) + listener_id = l7policy.listener_id + load_balancer_id = l7policy.listener.load_balancer_id + # Check the parent is not locked for some reason (ERROR, etc.) + if l7policy.provisioning_status not in constants.MUTABLE_STATUSES: + raise exceptions.ImmutableObject(resource='L7Policy', + id=self.l7policy_id) + if not self.repositories.test_and_set_lb_and_listeners_prov_status( + session, load_balancer_id, + constants.PENDING_UPDATE, constants.PENDING_UPDATE, + listener_ids=[listener_id], l7policy_id=self.l7policy_id): + LOG.info("L7Rule cannot be created or modified because the " + "Load Balancer is in an immutable state") + raise exceptions.ImmutableObject(resource='Load Balancer', + id=load_balancer_id) + + def _check_l7policy_max_rules(self, session): + """Checks to make sure the L7Policy doesn't have too many rules.""" + count = self.repositories.l7rule.count( + session, l7policy_id=self.l7policy_id) + if count >= constants.MAX_L7RULES_PER_L7POLICY: + raise exceptions.TooManyL7RulesOnL7Policy(id=self.l7policy_id) + + def _validate_create_l7rule(self, lock_session, l7rule_dict): + try: + ret = self.repositories.l7rule.create(lock_session, **l7rule_dict) + lock_session.flush() + return ret + except odb_exceptions.DBDuplicateEntry as e: + raise exceptions.IDAlreadyExists() from e + except odb_exceptions.DBReferenceError as e: + raise exceptions.InvalidOption(value=l7rule_dict.get(e.key), + option=e.key) from e + except odb_exceptions.DBError as e: + raise exceptions.APIException() from e + + @wsme_pecan.wsexpose(l7rule_types.L7RuleRootResponse, + body=l7rule_types.L7RuleRootPOST, status_code=201) + def post(self, rule_): + """Creates a l7rule on an l7policy.""" + l7rule = rule_.rule + context = pecan_request.context.get('octavia_context') + + with context.session.begin(): + db_l7policy = self._get_db_l7policy(context.session, + self.l7policy_id, + show_deleted=False) + load_balancer_id, listener_id = ( + self._get_listener_and_loadbalancer_id(db_l7policy)) + l7rule.project_id, provider = self._get_lb_project_id_provider( + context.session, load_balancer_id) + + self._auth_validate_action(context, l7rule.project_id, + constants.RBAC_POST) + + try: + validate.l7rule_data(l7rule) + except Exception as e: + raise exceptions.L7RuleValidation(error=e) + + self._check_l7policy_max_rules(context.session) + + # Load the driver early as it also provides validation + driver = driver_factory.get_driver(provider) + + context.session.begin() + try: + if self.repositories.check_quota_met( + context.session, + data_models.L7Rule, + l7rule.project_id): + raise exceptions.QuotaException( + resource=data_models.L7Rule._name()) + + l7rule_dict = db_prepare.create_l7rule( + l7rule.to_dict(render_unsets=True), self.l7policy_id) + + self._test_lb_listener_policy_statuses(context.session) + + db_l7rule = self._validate_create_l7rule(context.session, + l7rule_dict) + + # Prepare the data for the driver data model + provider_l7rule = ( + driver_utils.db_l7rule_to_provider_l7rule(db_l7rule)) + + # Dispatch to the driver + LOG.info("Sending create L7 Rule %s to provider %s", + db_l7rule.id, driver.name) + driver_utils.call_provider( + driver.name, driver.l7rule_create, provider_l7rule) + + context.session.commit() + except Exception: + with excutils.save_and_reraise_exception(): + context.session.rollback() + + with context.session.begin(): + db_l7rule = self._get_db_l7rule(context.session, db_l7rule.id) + result = self._convert_db_to_type(db_l7rule, + l7rule_types.L7RuleResponse) + return l7rule_types.L7RuleRootResponse(rule=result) + + def _graph_create(self, lock_session, rule_dict): + try: + validate.l7rule_data(l7rule_types.L7RulePOST(**rule_dict)) + except Exception as e: + raise exceptions.L7RuleValidation(error=e) + rule_dict = db_prepare.create_l7rule(rule_dict, self.l7policy_id) + db_rule = self._validate_create_l7rule(lock_session, rule_dict) + + return db_rule + + @wsme_pecan.wsexpose(l7rule_types.L7RuleRootResponse, + wtypes.text, body=l7rule_types.L7RuleRootPUT, + status_code=200) + def put(self, id, l7rule_): + """Updates a l7rule.""" + l7rule = l7rule_.rule + context = pecan_request.context.get('octavia_context') + with context.session.begin(): + db_l7rule = self._get_db_l7rule(context.session, id, + show_deleted=False) + db_l7policy = self._get_db_l7policy(context.session, + self.l7policy_id, + show_deleted=False) + load_balancer_id, listener_id = ( + self._get_listener_and_loadbalancer_id(db_l7policy)) + project_id, provider = self._get_lb_project_id_provider( + context.session, load_balancer_id) + + self._auth_validate_action(context, project_id, constants.RBAC_PUT) + + # Handle the invert unset + if l7rule.invert is None: + l7rule.invert = False + + new_l7rule = db_l7rule.to_dict() + new_l7rule.update(l7rule.to_dict()) + new_l7rule = data_models.L7Rule.from_dict(new_l7rule) + + try: + validate.l7rule_data(new_l7rule) + except Exception as e: + raise exceptions.L7RuleValidation(error=e) + + # Load the driver early as it also provides validation + driver = driver_factory.get_driver(provider) + + with context.session.begin(): + self._test_lb_listener_policy_statuses(context.session) + + # Prepare the data for the driver data model + l7rule_dict = l7rule.to_dict(render_unsets=False) + l7rule_dict['id'] = id + provider_l7rule_dict = ( + driver_utils.l7rule_dict_to_provider_dict(l7rule_dict)) + + # Also prepare the baseline object data + old_provider_l7rule = driver_utils.db_l7rule_to_provider_l7rule( + db_l7rule) + + # Dispatch to the driver + LOG.info("Sending update L7 Rule %s to provider %s", id, + driver.name) + driver_utils.call_provider( + driver.name, driver.l7rule_update, + old_provider_l7rule, + driver_dm.L7Rule.from_dict(provider_l7rule_dict)) + + # Update the database to reflect what the driver just accepted + l7rule.provisioning_status = constants.PENDING_UPDATE + db_l7rule_dict = l7rule.to_dict(render_unsets=False) + self.repositories.l7rule.update(context.session, id, + **db_l7rule_dict) + + # Force SQL alchemy to query the DB, otherwise we get inconsistent + # results + context.session.expire_all() + with context.session.begin(): + db_l7rule = self._get_db_l7rule(context.session, id) + result = self._convert_db_to_type(db_l7rule, + l7rule_types.L7RuleResponse) + return l7rule_types.L7RuleRootResponse(rule=result) + + @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) + def delete(self, id): + """Deletes a l7rule.""" + context = pecan_request.context.get('octavia_context') + with context.session.begin(): + db_l7rule = self._get_db_l7rule(context.session, id, + show_deleted=False) + + db_l7policy = self._get_db_l7policy(context.session, + self.l7policy_id, + show_deleted=False) + load_balancer_id, listener_id = ( + self._get_listener_and_loadbalancer_id(db_l7policy)) + project_id, provider = self._get_lb_project_id_provider( + context.session, load_balancer_id) + + self._auth_validate_action(context, project_id, constants.RBAC_DELETE) + + if db_l7rule.provisioning_status == constants.DELETED: + return + + # Load the driver early as it also provides validation + driver = driver_factory.get_driver(provider) + + with context.session.begin(): + self._test_lb_listener_policy_statuses(context.session) + + self.repositories.l7rule.update( + context.session, db_l7rule.id, + provisioning_status=constants.PENDING_DELETE) + + LOG.info("Sending delete L7 Rule %s to provider %s", id, + driver.name) + provider_l7rule = ( + driver_utils.db_l7rule_to_provider_l7rule(db_l7rule)) + driver_utils.call_provider(driver.name, driver.l7rule_delete, + provider_l7rule) diff --git a/octavia/api/v2/controllers/listener.py b/octavia/api/v2/controllers/listener.py new file mode 100644 index 0000000000..51bc5f2e3c --- /dev/null +++ b/octavia/api/v2/controllers/listener.py @@ -0,0 +1,756 @@ +# Copyright 2014 Rackspace +# Copyright 2016 Blue Box, an IBM Company +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from octavia_lib.api.drivers import data_models as driver_dm +from octavia_lib.common import constants as lib_consts +from oslo_config import cfg +from oslo_db import exception as odb_exceptions +from oslo_log import log as logging +from oslo_utils import excutils +from pecan import expose as pecan_expose +from pecan import request as pecan_request +from wsme import types as wtypes +from wsmeext import pecan as wsme_pecan + +from octavia.api.drivers import driver_factory +from octavia.api.drivers import utils as driver_utils +from octavia.api.v2.controllers import base +from octavia.api.v2.controllers import l7policy +from octavia.api.v2.types import listener as listener_types +from octavia.common import constants +from octavia.common import data_models +from octavia.common import exceptions +from octavia.common import stats +from octavia.common import utils as common_utils +from octavia.common import validate +from octavia.db import prepare as db_prepare +from octavia.i18n import _ + + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class ListenersController(base.BaseController): + RBAC_TYPE = constants.RBAC_LISTENER + + def __init__(self): + super().__init__() + + @wsme_pecan.wsexpose(listener_types.ListenerRootResponse, wtypes.text, + [wtypes.text], ignore_extra_args=True) + def get_one(self, id, fields=None): + """Gets a single listener's details.""" + context = pecan_request.context.get('octavia_context') + with context.session.begin(): + db_listener = self._get_db_listener(context.session, id, + show_deleted=False) + + if not db_listener: + raise exceptions.NotFound(resource=data_models.Listener._name(), + id=id) + + self._auth_validate_action(context, db_listener.project_id, + constants.RBAC_GET_ONE) + + result = self._convert_db_to_type(db_listener, + listener_types.ListenerResponse) + if fields is not None: + result = self._filter_fields([result], fields)[0] + return listener_types.ListenerRootResponse(listener=result) + + @wsme_pecan.wsexpose(listener_types.ListenersRootResponse, wtypes.text, + [wtypes.text], ignore_extra_args=True) + def get_all(self, project_id=None, fields=None): + """Lists all listeners.""" + pcontext = pecan_request.context + context = pcontext.get('octavia_context') + + query_filter = self._auth_get_all(context, project_id) + + with context.session.begin(): + db_listeners, links = self.repositories.listener.get_all_API_list( + context.session, show_deleted=False, + pagination_helper=pcontext.get(constants.PAGINATION_HELPER), + **query_filter) + result = self._convert_db_to_type( + db_listeners, [listener_types.ListenerResponse]) + if fields is not None: + result = self._filter_fields(result, fields) + return listener_types.ListenersRootResponse( + listeners=result, listeners_links=links) + + def _test_lb_and_listener_statuses( + self, session, lb_id, id=None, + listener_status=constants.PENDING_UPDATE): + """Verify load balancer is in a mutable state.""" + lb_repo = self.repositories.load_balancer + if id: + if not self.repositories.test_and_set_lb_and_listeners_prov_status( + session, lb_id, constants.PENDING_UPDATE, + listener_status, listener_ids=[id]): + LOG.info("Load Balancer %s is immutable.", lb_id) + db_lb = lb_repo.get(session, id=lb_id) + raise exceptions.ImmutableObject(resource=db_lb._name(), + id=lb_id) + else: + if not lb_repo.test_and_set_provisioning_status( + session, lb_id, constants.PENDING_UPDATE): + db_lb = lb_repo.get(session, id=lb_id) + LOG.info("Load Balancer %s is immutable.", db_lb.id) + raise exceptions.ImmutableObject(resource=db_lb._name(), + id=lb_id) + + def _validate_pool(self, session, lb_id, pool_id, listener_protocol): + """Validate pool given exists on same load balancer as listener.""" + db_pool = self.repositories.pool.get( + session, load_balancer_id=lb_id, id=pool_id) + if not db_pool: + raise exceptions.NotFound( + resource=data_models.Pool._name(), id=pool_id) + self._validate_protocol(listener_protocol, db_pool.protocol) + + def _has_tls_container_refs(self, listener_dict): + return (listener_dict.get('tls_certificate_id') or + listener_dict.get('client_ca_tls_container_id') or + listener_dict.get('sni_containers')) + + def _is_tls_or_insert_header(self, listener_dict): + return (self._has_tls_container_refs(listener_dict) or + listener_dict.get('insert_headers')) + + def _validate_insert_headers(self, insert_header_list, listener_protocol): + if (listener_protocol not in + constants.LISTENER_PROTOCOLS_SUPPORTING_HEADER_INSERTION): + raise exceptions.InvalidOption( + value='insert-headers', + option=f'a {listener_protocol} protocol listener.') + if list(set(insert_header_list) - ( + set(constants.SUPPORTED_HTTP_HEADERS + + constants.SUPPORTED_SSL_HEADERS))): + raise exceptions.InvalidOption( + value=insert_header_list, + option='insert_headers') + if not listener_protocol == constants.PROTOCOL_TERMINATED_HTTPS: + is_matched = len( + constants.SUPPORTED_SSL_HEADERS) > len( + list(set(constants.SUPPORTED_SSL_HEADERS) - set( + insert_header_list))) + if is_matched: + headers = [] + for header_name in insert_header_list: + if header_name in constants.SUPPORTED_SSL_HEADERS: + headers.append(header_name) + raise exceptions.InvalidOption( + value=headers, + option=f'{listener_protocol} protocol listener.') + + def _validate_cidr_compatible_with_vip(self, db_vip: data_models.Vip, + vips: list[str], + allowed_cidrs: list[str]): + if allowed_cidrs and db_vip.sg_ids: + msg = _("Allowed CIDRs are not allowed when using custom VIP " + "Security Groups.") + raise exceptions.ValidationException( + detail=msg) + + for cidr in allowed_cidrs: + for vip in vips: + # Check if CIDR IP version matches VIP IP version + if (common_utils.is_cidr_ipv6(cidr) == + common_utils.is_ipv6(vip)): + break + else: + msg = _("CIDR %(cidr)s IP version incompatible with all VIPs " + "%(vips)s IP version.") + raise exceptions.ValidationException( + detail=msg % {'cidr': cidr, 'vips': vips}) + + def _validate_create_listener(self, lock_session, listener_dict): + """Validate listener for wrong protocol or duplicate listeners + + Update the load balancer db when provisioning status changes. + """ + listener_protocol = listener_dict.get('protocol') + + _can_tls_offload = ( + listener_protocol == constants.PROTOCOL_TERMINATED_HTTPS or + (listener_protocol == constants.PROTOCOL_PROMETHEUS and + self._has_tls_container_refs(listener_dict))) + + if listener_dict and listener_dict.get('insert_headers'): + self._validate_insert_headers( + listener_dict['insert_headers'].keys(), listener_protocol) + + # Check for UDP/SCTP compatibility + if (listener_protocol in (constants.PROTOCOL_UDP, + lib_consts.PROTOCOL_SCTP) and + self._is_tls_or_insert_header(listener_dict)): + raise exceptions.ValidationException( + detail=_("%s protocol listener does not " + "support TLS.") % listener_protocol) + + # Check for TLS disabled + if (not CONF.api_settings.allow_tls_terminated_listeners and + _can_tls_offload): + if listener_protocol == constants.PROTOCOL_PROMETHEUS: + msg = f"{listener_protocol} with TLS" + else: + msg = listener_protocol + raise exceptions.DisabledOption(value=msg, option='protocol') + + # Check for PROMETHEUS listeners disabled + if (not CONF.api_settings.allow_prometheus_listeners and + listener_protocol == lib_consts.PROTOCOL_PROMETHEUS): + raise exceptions.DisabledOption( + value=lib_consts.PROTOCOL_PROMETHEUS, option='protocol') + + # Check for certs when not TLS offload capable + if (not _can_tls_offload and + self._has_tls_container_refs(listener_dict)): + raise exceptions.ValidationException(detail=_( + "Certificate container references are not allowed on " + "%s protocol listeners.") % listener_protocol) + + # Make sure a base certificate exists if specifying a client ca + if (listener_dict.get('client_ca_tls_certificate_id') and + not (listener_dict.get('tls_certificate_id') or + listener_dict.get('sni_containers'))): + raise exceptions.ValidationException(detail=_( + "An SNI or default certificate container reference must " + "be provided with a client CA container reference.")) + + # Make sure a certificate container is specified for TLS protocols + if (_can_tls_offload and + not (listener_dict.get('tls_certificate_id') or + listener_dict.get('sni_containers'))): + if listener_protocol == constants.PROTOCOL_PROMETHEUS: + msg = f"{listener_protocol} with TLS" + else: + msg = listener_protocol + raise exceptions.ValidationException(detail=_( + "An SNI or default certificate container reference must " + "be provided for %s protocol listeners.") % msg) + + # Make sure we have a client CA cert if they enable client auth + if (listener_dict.get('client_authentication') != + constants.CLIENT_AUTH_NONE and not + listener_dict.get('client_ca_tls_certificate_id')): + raise exceptions.ValidationException(detail=_( + "Client authentication setting %s requires a client CA " + "container reference.") % + listener_dict.get('client_authentication')) + + # Make sure we have a client CA if they specify a CRL + if (listener_dict.get('client_crl_container_id') and + not listener_dict.get('client_ca_tls_certificate_id')): + raise exceptions.ValidationException(detail=_( + "A client authentication CA reference is required to " + "specify a client authentication revocation list.")) + + # Check TLS cipher prohibit list + if 'tls_ciphers' in listener_dict and listener_dict['tls_ciphers']: + rejected_ciphers = validate.check_cipher_prohibit_list( + listener_dict['tls_ciphers']) + if rejected_ciphers: + raise exceptions.ValidationException(detail=_( + 'The following ciphers have been prohibited by an ' + 'administrator: ' + ', '.join(rejected_ciphers))) + + # Validate the TLS containers + sni_containers = listener_dict.pop('sni_containers', []) + tls_refs = [sni['tls_container_id'] for sni in sni_containers] + if listener_dict.get('tls_certificate_id'): + tls_refs.append(listener_dict.get('tls_certificate_id')) + self._validate_tls_refs(tls_refs) + + # Validate the client CA cert and optional client CRL + if listener_dict.get('client_ca_tls_certificate_id'): + self._validate_client_ca_and_crl_refs( + listener_dict.get('client_ca_tls_certificate_id'), + listener_dict.get('client_crl_container_id', None)) + + # Validate that the L4 protocol (UDP, TCP or SCTP) is not already used + # for the specified protocol_port in this load balancer + pcontext = pecan_request.context + query_filter = { + 'project_id': listener_dict['project_id'], + 'load_balancer_id': listener_dict['load_balancer_id'], + 'protocol_port': listener_dict['protocol_port'] + } + + # Get listeners on the same load balancer that use the same + # protocol port + db_listeners = self.repositories.listener.get_all_API_list( + lock_session, show_deleted=False, + pagination_helper=pcontext.get(constants.PAGINATION_HELPER), + **query_filter)[0] + + if db_listeners: + l4_protocol = constants.L4_PROTOCOL_MAP[listener_protocol] + + # List supported protocols that share the same L4 protocol as our + # new listener + disallowed_protocols = [ + p + for p, l4_p in constants.L4_PROTOCOL_MAP.items() + if l4_p == l4_protocol + ] + + for db_l in db_listeners: + # Check if l4 protocol ports conflict + if db_l.protocol in disallowed_protocols: + raise exceptions.DuplicateListenerEntry( + protocol=db_l.protocol, + port=listener_dict.get('protocol_port')) + + # Validate allowed CIDRs + allowed_cidrs = listener_dict.get('allowed_cidrs', []) or [] + lb_id = listener_dict.get('load_balancer_id') + lb_db = self.repositories.load_balancer.get( + lock_session, id=lb_id) + vip_addresses = [lb_db.vip.ip_address] + vip_addresses.extend([vip.ip_address for vip in lb_db.additional_vips]) + self._validate_cidr_compatible_with_vip(lb_db.vip, + vip_addresses, allowed_cidrs) + + if _can_tls_offload: + # Validate TLS version list + validate.check_tls_version_list(listener_dict['tls_versions']) + # Validate TLS versions against minimum + validate.check_tls_version_min(listener_dict['tls_versions']) + # Validate ALPN protocol list + validate.check_alpn_protocols(listener_dict['alpn_protocols']) + + validate.check_hsts_options(listener_dict) + + try: + db_listener = self.repositories.listener.create( + lock_session, **listener_dict) + lock_session.flush() + if sni_containers: + for container in sni_containers: + sni_dict = {'listener_id': db_listener.id, + 'tls_container_id': container.get( + 'tls_container_id')} + self.repositories.sni.create(lock_session, **sni_dict) + lock_session.flush() + # DB listener needs to be refreshed + db_listener = self.repositories.listener.get( + lock_session, id=db_listener.id) + + return db_listener + except odb_exceptions.DBDuplicateEntry as e: + raise exceptions.DuplicateListenerEntry( + protocol=listener_dict.get('protocol'), + port=listener_dict.get('protocol_port')) from e + except odb_exceptions.DBError as e: + raise exceptions.InvalidOption(value=listener_dict.get('protocol'), + option='protocol') from e + + @wsme_pecan.wsexpose(listener_types.ListenerRootResponse, + body=listener_types.ListenerRootPOST, status_code=201) + def post(self, listener_): + """Creates a listener on a load balancer.""" + listener = listener_.listener + context = pecan_request.context.get('octavia_context') + + load_balancer_id = listener.loadbalancer_id + with context.session.begin(): + listener.project_id, provider = self._get_lb_project_id_provider( + context.session, load_balancer_id) + + self._auth_validate_action(context, listener.project_id, + constants.RBAC_POST) + + # Load the driver early as it also provides validation + driver = driver_factory.get_driver(provider) + + context.session.begin() + try: + if self.repositories.check_quota_met( + context.session, + data_models.Listener, + listener.project_id): + raise exceptions.QuotaException( + resource=data_models.Listener._name()) + + listener_dict = db_prepare.create_listener( + listener.to_dict(render_unsets=True), None) + + if listener_dict['default_pool_id']: + if (listener_dict.get('protocol') == + lib_consts.PROTOCOL_PROMETHEUS): + raise exceptions.ListenerNoChildren( + protocol=lib_consts.PROTOCOL_PROMETHEUS) + self._validate_pool(context.session, load_balancer_id, + listener_dict['default_pool_id'], + listener.protocol) + + self._test_lb_and_listener_statuses( + context.session, lb_id=load_balancer_id) + + db_listener = self._validate_create_listener( + context.session, listener_dict) + + # Prepare the data for the driver data model + provider_listener = ( + driver_utils.db_listener_to_provider_listener(db_listener)) + + # re-inject the sni container references lost due to SNI + # being a separate table in the DB + if listener.sni_container_refs != wtypes.Unset: + provider_listener.sni_container_refs = ( + listener.sni_container_refs) + + # Dispatch to the driver + LOG.info("Sending create Listener %s to provider %s", + db_listener.id, driver.name) + driver_utils.call_provider( + driver.name, driver.listener_create, provider_listener) + + context.session.commit() + except Exception: + with excutils.save_and_reraise_exception(): + context.session.rollback() + + with context.session.begin(): + db_listener = self._get_db_listener(context.session, + db_listener.id) + result = self._convert_db_to_type(db_listener, + listener_types.ListenerResponse) + return listener_types.ListenerRootResponse(listener=result) + + def _graph_create(self, lock_session, listener_dict, + l7policies=None, pool_name_ids=None): + load_balancer_id = listener_dict['load_balancer_id'] + listener_dict = db_prepare.create_listener( + listener_dict, load_balancer_id) + l7policies = listener_dict.pop('l7policies', l7policies) + if listener_dict.get('default_pool_id'): + if listener_dict.get('protocol') == lib_consts.PROTOCOL_PROMETHEUS: + raise exceptions.ListenerNoChildren( + protocol=lib_consts.PROTOCOL_PROMETHEUS) + self._validate_pool(lock_session, load_balancer_id, + listener_dict['default_pool_id'], + listener_dict['protocol']) + db_listener = self._validate_create_listener( + lock_session, listener_dict) + + # Now create l7policies + new_l7ps = [] + + if (listener_dict.get('protocol') == lib_consts.PROTOCOL_PROMETHEUS and + l7policies): + raise exceptions.ListenerNoChildren( + protocol=lib_consts.PROTOCOL_PROMETHEUS) + + for l7p in l7policies: + l7p['project_id'] = db_listener.project_id + l7p['load_balancer_id'] = load_balancer_id + l7p['listener_id'] = db_listener.id + redirect_pool = l7p.pop('redirect_pool', None) + if redirect_pool: + pool_name = redirect_pool['name'] + pool_id = pool_name_ids.get(pool_name) + if not pool_id: + raise exceptions.SingleCreateDetailsMissing( + type='Pool', name=pool_name) + l7p['redirect_pool_id'] = pool_id + new_l7ps.append(l7policy.L7PolicyController()._graph_create( + lock_session, l7p)) + db_listener.l7policies = new_l7ps + return db_listener + + def _validate_listener_PUT(self, listener, db_listener): + _can_tls_offload = ( + db_listener.protocol == constants.PROTOCOL_TERMINATED_HTTPS or + (db_listener.protocol == constants.PROTOCOL_PROMETHEUS and + self._has_tls_container_refs(listener.to_dict()))) + + # TODO(rm_work): Do we need something like this? What do we do on an + # empty body for a PUT? + if not listener: + raise exceptions.ValidationException( + detail='No listener object supplied.') + + # Check for UDP/SCTP compatibility + if (db_listener.protocol in (constants.PROTOCOL_UDP, + lib_consts.PROTOCOL_SCTP) and + self._is_tls_or_insert_header(listener.to_dict())): + raise exceptions.ValidationException(detail=_( + "%s protocol listener does not support TLS or header " + "insertion.") % db_listener.protocol) + + # Check for certs when not TLS offload capable + if (not _can_tls_offload and + self._has_tls_container_refs(listener.to_dict())): + raise exceptions.ValidationException(detail=_( + "Certificate container references are not allowed on " + "%s protocol listeners.") % db_listener.protocol) + + # Make sure we have a client CA cert if they enable client auth + if (listener.client_authentication not in + (wtypes.Unset, constants.CLIENT_AUTH_NONE) and + not (db_listener.client_ca_tls_certificate_id or + listener.client_ca_tls_container_ref)): + raise exceptions.ValidationException(detail=_( + "Client authentication setting %s requires a client CA " + "container reference.") % + listener.client_authentication) + + if listener.insert_headers: + self._validate_insert_headers( + list(listener.insert_headers.keys()), db_listener.protocol) + + sni_containers = listener.sni_container_refs or [] + tls_refs = list(sni_containers) + if listener.default_tls_container_ref: + tls_refs.append(listener.default_tls_container_ref) + self._validate_tls_refs(tls_refs) + + ca_ref = None + if (listener.client_ca_tls_container_ref and + listener.client_ca_tls_container_ref != wtypes.Unset): + ca_ref = listener.client_ca_tls_container_ref + elif db_listener.client_ca_tls_certificate_id: + ca_ref = db_listener.client_ca_tls_certificate_id + + crl_ref = None + if (listener.client_crl_container_ref and + listener.client_crl_container_ref != wtypes.Unset): + crl_ref = listener.client_crl_container_ref + elif db_listener.client_crl_container_id: + crl_ref = db_listener.client_crl_container_id + + if crl_ref and not ca_ref: + raise exceptions.ValidationException(detail=_( + "A client authentication CA reference is required to " + "specify a client authentication revocation list.")) + + if ca_ref or crl_ref: + self._validate_client_ca_and_crl_refs(ca_ref, crl_ref) + + # Validate allowed CIDRs + if (listener.allowed_cidrs and listener.allowed_cidrs != wtypes.Unset): + vip_addresses = [db_listener.load_balancer.vip.ip_address] + vip_addresses.extend( + [vip.ip_address + for vip in db_listener.load_balancer.additional_vips] + ) + self._validate_cidr_compatible_with_vip( + db_listener.load_balancer.vip, + vip_addresses, listener.allowed_cidrs) + + # Check TLS cipher prohibit list + if listener.tls_ciphers: + rejected_ciphers = validate.check_cipher_prohibit_list( + listener.tls_ciphers) + if rejected_ciphers: + raise exceptions.ValidationException(detail=_( + 'The following ciphers have been prohibited by an ' + 'administrator: ' + ', '.join(rejected_ciphers))) + + if listener.tls_versions is not wtypes.Unset: + # Validate TLS version list + validate.check_tls_version_list(listener.tls_versions) + # Validate TLS versions against minimum + validate.check_tls_version_min(listener.tls_versions) + + if listener.alpn_protocols is not wtypes.Unset: + # Validate ALPN protocol list + validate.check_alpn_protocols(listener.alpn_protocols) + + validate.check_hsts_options_put(listener, db_listener) + + def _set_default_on_none(self, listener): + """Reset settings to their default values if None/null was passed in + + A None/null value can be passed in to clear a value. PUT values + that were not provided by the user have a type of wtypes.UnsetType. + If the user is attempting to clear values, they should either + be set to None (for example in the name field) or they should be + reset to their default values. + This method is intended to handle those values that need to be set + back to a default value. + """ + if listener.connection_limit is None: + listener.connection_limit = constants.DEFAULT_CONNECTION_LIMIT + if listener.timeout_client_data is None: + listener.timeout_client_data = ( + CONF.haproxy_amphora.timeout_client_data) + if listener.timeout_member_connect is None: + listener.timeout_member_connect = ( + CONF.haproxy_amphora.timeout_member_connect) + if listener.timeout_member_data is None: + listener.timeout_member_data = ( + CONF.haproxy_amphora.timeout_member_data) + if listener.timeout_tcp_inspect is None: + listener.timeout_tcp_inspect = ( + CONF.haproxy_amphora.timeout_tcp_inspect) + if listener.client_authentication is None: + listener.client_authentication = constants.CLIENT_AUTH_NONE + if listener.tls_ciphers is None: + listener.tls_ciphers = CONF.api_settings.default_listener_ciphers + if listener.tls_versions is None: + listener.tls_versions = ( + CONF.api_settings.default_listener_tls_versions) + if listener.alpn_protocols is None: + listener.alpn_protocols = ( + CONF.api_settings.default_listener_alpn_protocols) + if listener.hsts_include_subdomains is None: + listener.hsts_include_subdomains = False + if listener.hsts_preload is None: + listener.hsts_preload = False + + @wsme_pecan.wsexpose(listener_types.ListenerRootResponse, wtypes.text, + body=listener_types.ListenerRootPUT, status_code=200) + def put(self, id, listener_: listener_types.ListenerRootPUT): + """Updates a listener on a load balancer.""" + listener = listener_.listener + context = pecan_request.context.get('octavia_context') + with context.session.begin(): + db_listener = self._get_db_listener(context.session, id, + show_deleted=False) + load_balancer_id = db_listener.load_balancer_id + + project_id, provider = self._get_lb_project_id_provider( + context.session, load_balancer_id) + + self._auth_validate_action(context, project_id, constants.RBAC_PUT) + + self._validate_listener_PUT(listener, db_listener) + + self._set_default_on_none(listener) + + if listener.default_pool_id: + if db_listener.protocol == lib_consts.PROTOCOL_PROMETHEUS: + raise exceptions.ListenerNoChildren( + protocol=lib_consts.PROTOCOL_PROMETHEUS) + with context.session.begin(): + self._validate_pool(context.session, load_balancer_id, + listener.default_pool_id, + db_listener.protocol) + + # Load the driver early as it also provides validation + driver = driver_factory.get_driver(provider) + + with context.session.begin(): + self._test_lb_and_listener_statuses(context.session, + load_balancer_id, id=id) + + # Prepare the data for the driver data model + listener_dict = listener.to_dict(render_unsets=False) + listener_dict['id'] = id + + provider_listener_dict = ( + driver_utils.listener_dict_to_provider_dict(listener_dict)) + + # Also prepare the baseline object data + old_provider_listener = ( + driver_utils.db_listener_to_provider_listener(db_listener, + for_delete=True)) + + # Dispatch to the driver + LOG.info("Sending update Listener %s to provider %s", id, + driver.name) + driver_utils.call_provider( + driver.name, driver.listener_update, + old_provider_listener, + driver_dm.Listener.from_dict(provider_listener_dict)) + + # Update the database to reflect what the driver just accepted + self.repositories.listener.update( + context.session, id, **listener.to_dict(render_unsets=False)) + + # Force SQL alchemy to query the DB, otherwise we get inconsistent + # results + context.session.expire_all() + with context.session.begin(): + db_listener = self._get_db_listener(context.session, id) + result = self._convert_db_to_type(db_listener, + listener_types.ListenerResponse) + return listener_types.ListenerRootResponse(listener=result) + + @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) + def delete(self, id): + """Deletes a listener from a load balancer.""" + context = pecan_request.context.get('octavia_context') + with context.session.begin(): + db_listener = self._get_db_listener(context.session, id, + show_deleted=False) + load_balancer_id = db_listener.load_balancer_id + + project_id, provider = self._get_lb_project_id_provider( + context.session, load_balancer_id) + + self._auth_validate_action(context, project_id, constants.RBAC_DELETE) + + # Load the driver early as it also provides validation + driver = driver_factory.get_driver(provider) + + with context.session.begin(): + self._test_lb_and_listener_statuses( + context.session, load_balancer_id, + id=id, listener_status=constants.PENDING_DELETE) + + LOG.info("Sending delete Listener %s to provider %s", id, + driver.name) + provider_listener = ( + driver_utils.db_listener_to_provider_listener( + db_listener, for_delete=True)) + driver_utils.call_provider(driver.name, driver.listener_delete, + provider_listener) + + @pecan_expose() + def _lookup(self, id, *remainder): + """Overridden pecan _lookup method for custom routing. + + Currently it checks if this was a stats request and routes + the request to the StatsController. + """ + if id and remainder and remainder[0] == 'stats': + return StatisticsController(listener_id=id), remainder[1:] + return None + + +class StatisticsController(base.BaseController, stats.StatsMixin): + RBAC_TYPE = constants.RBAC_LISTENER + + def __init__(self, listener_id): + super().__init__() + self.id = listener_id + + @wsme_pecan.wsexpose(listener_types.StatisticsRootResponse, wtypes.text, + status_code=200) + def get(self): + context = pecan_request.context.get('octavia_context') + with context.session.begin(): + db_listener = self._get_db_listener(context.session, self.id, + show_deleted=False) + if not db_listener: + LOG.info("Listener %s not found.", id) + raise exceptions.NotFound( + resource=data_models.Listener._name(), + id=id) + + self._auth_validate_action(context, db_listener.project_id, + constants.RBAC_GET_STATS) + + listener_stats = self.get_listener_stats(context.session, self.id) + + result = self._convert_db_to_type( + listener_stats, listener_types.ListenerStatisticsResponse) + return listener_types.StatisticsRootResponse(stats=result) diff --git a/octavia/api/v2/controllers/load_balancer.py b/octavia/api/v2/controllers/load_balancer.py new file mode 100644 index 0000000000..3fe593532e --- /dev/null +++ b/octavia/api/v2/controllers/load_balancer.py @@ -0,0 +1,949 @@ +# Copyright 2014 Rackspace +# Copyright 2016 Blue Box, an IBM Company +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import ipaddress + +from octavia_lib.api.drivers import data_models as driver_dm +from oslo_config import cfg +from oslo_db import exception as odb_exceptions +from oslo_log import log as logging +from oslo_utils import excutils +from oslo_utils import strutils +from pecan import expose as pecan_expose +from pecan import request as pecan_request +from sqlalchemy.orm import exc as sa_exception +from wsme import types as wtypes +from wsmeext import pecan as wsme_pecan + +from octavia.api.drivers import driver_factory +from octavia.api.drivers import utils as driver_utils +from octavia.api.v2.controllers import base +from octavia.api.v2.controllers import listener +from octavia.api.v2.controllers import pool +from octavia.api.v2.types import load_balancer as lb_types +from octavia.common import constants +from octavia.common import data_models +from octavia.common import exceptions +from octavia.common import stats +from octavia.common import utils +from octavia.common import validate +from octavia.db import prepare as db_prepare +from octavia.i18n import _ +from octavia.network import base as network_base + + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class LoadBalancersController(base.BaseController): + RBAC_TYPE = constants.RBAC_LOADBALANCER + + def __init__(self): + super().__init__() + + @wsme_pecan.wsexpose(lb_types.LoadBalancerRootResponse, wtypes.text, + [wtypes.text], ignore_extra_args=True) + def get_one(self, id, fields=None): + """Gets a single load balancer's details.""" + context = pecan_request.context.get('octavia_context') + with context.session.begin(): + load_balancer = self._get_db_lb(context.session, id, + show_deleted=False) + + if not load_balancer: + raise exceptions.NotFound( + resource=data_models.LoadBalancer._name(), + id=id) + + self._auth_validate_action(context, load_balancer.project_id, + constants.RBAC_GET_ONE) + + result = self._convert_db_to_type( + load_balancer, lb_types.LoadBalancerResponse) + if fields is not None: + result = self._filter_fields([result], fields)[0] + return lb_types.LoadBalancerRootResponse(loadbalancer=result) + + @wsme_pecan.wsexpose(lb_types.LoadBalancersRootResponse, wtypes.text, + [wtypes.text], ignore_extra_args=True) + def get_all(self, project_id=None, fields=None): + """Lists all load balancers.""" + pcontext = pecan_request.context + context = pcontext.get('octavia_context') + + query_filter = self._auth_get_all(context, project_id) + + with context.session.begin(): + load_balancers, links = ( + self.repositories.load_balancer.get_all_API_list( + context.session, show_deleted=False, + pagination_helper=pcontext.get( + constants.PAGINATION_HELPER), + **query_filter)) + result = self._convert_db_to_type( + load_balancers, [lb_types.LoadBalancerResponse]) + if fields is not None: + result = self._filter_fields(result, fields) + return lb_types.LoadBalancersRootResponse( + loadbalancers=result, loadbalancers_links=links) + + def _test_lb_status(self, session, id, lb_status=constants.PENDING_UPDATE): + """Verify load balancer is in a mutable state.""" + lb_repo = self.repositories.load_balancer + if not lb_repo.test_and_set_provisioning_status( + session, id, lb_status): + prov_status = lb_repo.get(session, id=id).provisioning_status + LOG.info("Invalid state %(state)s of loadbalancer resource %(id)s", + {"state": prov_status, "id": id}) + raise exceptions.LBPendingStateError( + state=prov_status, id=id) + + def _test_and_set_failover_prov_status(self, session, id): + lb_repo = self.repositories.load_balancer + if not lb_repo.set_status_for_failover(session, id, + constants.PENDING_UPDATE): + prov_status = lb_repo.get(session, id=id).provisioning_status + LOG.info("Invalid state %(state)s of loadbalancer resource %(id)s", + {"state": prov_status, "id": id}) + raise exceptions.LBPendingStateError( + state=prov_status, id=id) + + @staticmethod + def _validate_network_and_fill_or_validate_subnet(load_balancer, + context=None): + network = validate.network_exists_optionally_contains_subnet( + network_id=load_balancer.vip_network_id, + subnet_id=load_balancer.vip_subnet_id, + context=context) + if not load_balancer.vip_subnet_id: + network_driver = utils.get_network_driver() + if load_balancer.vip_address: + for subnet_id in network.subnets: + subnet = network_driver.get_subnet(subnet_id) + if validate.is_ip_member_of_cidr(load_balancer.vip_address, + subnet.cidr): + load_balancer.vip_subnet_id = subnet_id + break + if not load_balancer.vip_subnet_id: + raise exceptions.ValidationException(detail=_( + "Supplied network does not contain a subnet for " + "VIP address specified." + )) + else: + # If subnet and IP are not provided, pick the first subnet with + # enough available IPs, preferring ipv4 + if not network.subnets: + raise exceptions.ValidationException(detail=_( + "Supplied network does not contain a subnet." + )) + ip_avail = network_driver.get_network_ip_availability( + network) + if (CONF.controller_worker.loadbalancer_topology == + constants.TOPOLOGY_SINGLE): + num_req_ips = 2 + if (CONF.controller_worker.loadbalancer_topology == + constants.TOPOLOGY_ACTIVE_STANDBY): + num_req_ips = 3 + subnets = [subnet_id for subnet_id in network.subnets if + utils.subnet_ip_availability(ip_avail, subnet_id, + num_req_ips)] + if not subnets: + raise exceptions.ValidationException(detail=_( + "Subnet(s) in the supplied network do not contain " + "enough available IPs." + )) + for subnet_id in subnets: + # Use the first subnet, in case there are no ipv4 subnets + if not load_balancer.vip_subnet_id: + load_balancer.vip_subnet_id = subnet_id + subnet = network_driver.get_subnet(subnet_id) + if subnet.ip_version == 4: + load_balancer.vip_subnet_id = subnet_id + break + + @staticmethod + def _validate_port_and_fill_or_validate_subnet(load_balancer, + context=None): + port = validate.port_exists(port_id=load_balancer.vip_port_id, + context=context) + validate.check_port_in_use(port) + load_balancer.vip_network_id = port.network_id + + # validate the request vip port whether applied the qos_policy and + # store the port_qos_policy to loadbalancer obj if possible. The + # default behavior is that if 'vip_qos_policy_id' is specified in the + # request, it will override the qos_policy applied on vip_port. + port_qos_policy_id = port.qos_policy_id + if (port_qos_policy_id and + isinstance(load_balancer.vip_qos_policy_id, wtypes.UnsetType)): + load_balancer.vip_qos_policy_id = port_qos_policy_id + + if load_balancer.vip_subnet_id: + # If we were provided a subnet_id, validate it exists and that + # there is a fixed_ip on the port that matches the provided subnet + validate.subnet_exists(subnet_id=load_balancer.vip_subnet_id, + context=context) + for port_fixed_ip in port.fixed_ips: + if port_fixed_ip.subnet_id == load_balancer.vip_subnet_id: + load_balancer.vip_address = port_fixed_ip.ip_address + break # Just pick the first address found in the subnet + if not load_balancer.vip_address: + raise exceptions.ValidationException(detail=_( + "No VIP address found on the specified VIP port within " + "the specified subnet.")) + elif load_balancer.vip_address: + normalized_lb_ip = ipaddress.ip_address( + load_balancer.vip_address).compressed + for port_fixed_ip in port.fixed_ips: + normalized_port_ip = ipaddress.ip_address( + port_fixed_ip.ip_address).compressed + if normalized_port_ip == normalized_lb_ip: + load_balancer.vip_subnet_id = port_fixed_ip.subnet_id + break + if not load_balancer.vip_subnet_id: + raise exceptions.ValidationException(detail=_( + "Specified VIP address not found on the " + "specified VIP port.")) + elif len(port.fixed_ips) == 1: + # User provided only a port, get the subnet and address from it + load_balancer.vip_subnet_id = port.fixed_ips[0].subnet_id + load_balancer.vip_address = port.fixed_ips[0].ip_address + else: + raise exceptions.ValidationException(detail=_( + "VIP port's subnet could not be determined. Please " + "specify either a VIP subnet or address.")) + + @staticmethod + def _validate_subnets_share_network_but_no_duplicates(load_balancer): + # Validate that no subnet_id is used more than once + subnet_use_counts = {load_balancer.vip_subnet_id: 1} + for vip in load_balancer.additional_vips: + if vip.subnet_id in subnet_use_counts: + raise exceptions.ValidationException(detail=_( + 'Duplicate VIP subnet(s) specified. Only one IP can be ' + 'bound per subnet.')) + subnet_use_counts[vip.subnet_id] = 1 + + # Validate that all subnets belong to the same network + network_driver = utils.get_network_driver() + used_subnets = {} + for subnet_id in subnet_use_counts: + used_subnets[subnet_id] = network_driver.get_subnet(subnet_id) + all_networks = [subnet.network_id for subnet in used_subnets.values()] + if len(set(all_networks)) > 1: + raise exceptions.ValidationException(detail=_( + 'All VIP subnets must belong to the same network.' + )) + # Fill the network_id for each additional_vip + for vip in load_balancer.additional_vips: + vip.network_id = used_subnets[vip.subnet_id].network_id + + def _validate_vip_request_object(self, load_balancer, context=None): + allowed_network_objects = [] + if CONF.networking.allow_vip_port_id: + allowed_network_objects.append('vip_port_id') + if CONF.networking.allow_vip_network_id: + allowed_network_objects.append('vip_network_id') + if CONF.networking.allow_vip_subnet_id: + allowed_network_objects.append('vip_subnet_id') + + msg = _("use of %(object)s is disallowed by this deployment's " + "configuration.") + if (load_balancer.vip_port_id and + not CONF.networking.allow_vip_port_id): + raise exceptions.ValidationException( + detail=msg % {'object': 'vip_port_id'}) + if (load_balancer.vip_network_id and + not CONF.networking.allow_vip_network_id): + raise exceptions.ValidationException( + detail=msg % {'object': 'vip_network_id'}) + if (load_balancer.vip_subnet_id and + not CONF.networking.allow_vip_subnet_id): + raise exceptions.ValidationException( + detail=msg % {'object': 'vip_subnet_id'}) + + if not (load_balancer.vip_port_id or + load_balancer.vip_network_id or + load_balancer.vip_subnet_id): + raise exceptions.VIPValidationException( + objects=', '.join(allowed_network_objects)) + + # Validate the port id + if load_balancer.vip_port_id: + self._validate_port_and_fill_or_validate_subnet(load_balancer, + context=context) + # If no port id, validate the network id (and subnet if provided) + elif load_balancer.vip_network_id: + self._validate_network_and_fill_or_validate_subnet(load_balancer, + context=context) + # Validate just the subnet id + elif load_balancer.vip_subnet_id: + subnet = validate.subnet_exists( + subnet_id=load_balancer.vip_subnet_id, context=context) + load_balancer.vip_network_id = subnet.network_id + if load_balancer.vip_qos_policy_id: + validate.qos_policy_exists( + qos_policy_id=load_balancer.vip_qos_policy_id) + + # Even though we've just validated the subnet or else retrieved its ID + # directly from the port, we might still be missing the network. + if not load_balancer.vip_network_id: + subnet = validate.subnet_exists( + subnet_id=load_balancer.vip_subnet_id) + load_balancer.vip_network_id = subnet.network_id + + # Multi-vip validation for ensuring subnets are "sane" + self._validate_subnets_share_network_but_no_duplicates(load_balancer) + + # Validate optional security groups + if load_balancer.vip_sg_ids: + for sg_id in load_balancer.vip_sg_ids: + validate.security_group_exists(sg_id, context=context) + + def _validate_vnic_type(self, vnic_type: str, + load_balancer: lb_types.LoadBalancerPOST): + if (vnic_type == constants.VNIC_TYPE_DIRECT and + load_balancer.vip_sg_ids): + msg = _("VIP Security Groups are not allowed with VNIC direct " + "type") + raise exceptions.ValidationException(detail=msg) + + @staticmethod + def _create_vip_port_if_not_exist(load_balancer_db): + """Create vip port.""" + network_driver = utils.get_network_driver() + try: + return network_driver.allocate_vip(load_balancer_db) + except network_base.AllocateVIPException as e: + # Convert neutron style exception to octavia style + # if the error was API ready + if getattr(e, 'orig_code', None) is not None: + e.code = e.orig_code + if getattr(e, 'orig_msg', None) is not None: + e.message = e.orig_msg + e.msg = e.orig_msg + raise e + + def _get_provider(self, session, load_balancer): + """Decide on the provider for this load balancer.""" + + provider = None + if not isinstance(load_balancer.flavor_id, wtypes.UnsetType): + try: + with session.begin(): + provider = self.repositories.flavor.get_flavor_provider( + session, load_balancer.flavor_id) + except sa_exception.NoResultFound as e: + raise exceptions.ValidationException( + detail=_("Invalid flavor_id.")) from e + + # No provider specified and no flavor specified, use conf default + if (isinstance(load_balancer.provider, wtypes.UnsetType) and + not provider): + provider = CONF.api_settings.default_provider_driver + # Both provider and flavor specified, they must match + elif (not isinstance(load_balancer.provider, wtypes.UnsetType) and + provider): + if provider != load_balancer.provider: + raise exceptions.ProviderFlavorMismatchError( + flav=load_balancer.flavor_id, prov=load_balancer.provider) + # No flavor, but provider, use the provider specified + elif not provider: + provider = load_balancer.provider + # Otherwise, use the flavor provider we found above + + return provider + + def _apply_flavor_to_lb_dict(self, lock_session, driver, lb_dict): + + flavor_dict = {} + if 'flavor_id' in lb_dict: + try: + flavor_dict = ( + self.repositories.flavor.get_flavor_metadata_dict( + lock_session, lb_dict['flavor_id'])) + except sa_exception.NoResultFound as e: + raise exceptions.ValidationException( + detail=_("Invalid flavor_id.")) from e + + # Make sure the driver will still accept the flavor metadata + if flavor_dict: + driver_utils.call_provider(driver.name, driver.validate_flavor, + flavor_dict) + + # Apply the flavor settings to the load balanacer + # Use the configuration file settings as defaults + lb_dict[constants.TOPOLOGY] = flavor_dict.get( + constants.LOADBALANCER_TOPOLOGY, + CONF.controller_worker.loadbalancer_topology) + + return flavor_dict + + def _validate_flavor(self, session, load_balancer): + if not isinstance(load_balancer.flavor_id, wtypes.UnsetType): + with session.begin(): + flavor = self.repositories.flavor.get( + session, id=load_balancer.flavor_id) + if not flavor: + raise exceptions.ValidationException( + detail=_("Invalid flavor_id.")) + if not flavor.enabled: + raise exceptions.DisabledOption(option='flavor', + value=load_balancer.flavor_id) + + def _validate_and_return_az_dict(self, lock_session, driver, lb_dict): + + az_dict = {} + if 'availability_zone' in lb_dict: + try: + az = self.repositories.availability_zone.get( + lock_session, name=lb_dict['availability_zone']) + az_dict = ( + self.repositories.availability_zone + .get_availability_zone_metadata_dict(lock_session, az.name) + ) + except sa_exception.NoResultFound as e: + raise exceptions.ValidationException( + detail=_("Invalid availability_zone.")) from e + + # Make sure the driver will still accept the availability zone metadata + if az_dict: + try: + driver_utils.call_provider(driver.name, + driver.validate_availability_zone, + az_dict) + except NotImplementedError as e: + raise exceptions.ProviderNotImplementedError( + prov=driver.name, user_msg="This provider does not support" + " availability zones.") from e + + return az_dict + + def _validate_availability_zone(self, session, load_balancer): + if not isinstance(load_balancer.availability_zone, wtypes.UnsetType): + with session.begin(): + az = self.repositories.availability_zone.get( + session, name=load_balancer.availability_zone) + if not az: + raise exceptions.ValidationException( + detail=_("Invalid availability zone.")) + if not az.enabled: + raise exceptions.DisabledOption( + option='availability_zone', + value=load_balancer.availability_zone) + + @wsme_pecan.wsexpose(lb_types.LoadBalancerFullRootResponse, + body=lb_types.LoadBalancerRootPOST, status_code=201) + def post(self, load_balancer: lb_types.LoadBalancerRootPOST): + """Creates a load balancer.""" + load_balancer = load_balancer.loadbalancer + context = pecan_request.context.get('octavia_context') + + if not load_balancer.project_id and context.project_id: + load_balancer.project_id = context.project_id + + if not load_balancer.project_id: + raise exceptions.ValidationException(detail=_( + "Missing project ID in request where one is required. " + "An administrator should check the keystone settings " + "in the Octavia configuration.")) + + self._auth_validate_action(context, load_balancer.project_id, + constants.RBAC_POST) + if not isinstance(load_balancer.vip_sg_ids, wtypes.UnsetType): + self._auth_validate_action( + context, load_balancer.project_id, + f"{constants.RBAC_POST}:vip_sg_ids") + + self._validate_vip_request_object(load_balancer, context=context) + + self._validate_flavor(context.session, load_balancer) + + self._validate_availability_zone(context.session, load_balancer) + + provider = self._get_provider(context.session, load_balancer) + + # Load the driver early as it also provides validation + driver = driver_factory.get_driver(provider) + + lock_session = context.session + lock_session.begin() + try: + if self.repositories.check_quota_met( + lock_session, + data_models.LoadBalancer, + load_balancer.project_id): + raise exceptions.QuotaException( + resource=data_models.LoadBalancer._name()) + + db_lb, db_pools, db_lists = None, None, None + + lb_dict = db_prepare.create_load_balancer(load_balancer.to_dict( + render_unsets=False + )) + vip_dict = lb_dict.pop('vip', {}) + additional_vip_dicts = lb_dict.pop('additional_vips', []) + + # Make sure we store the right provider in the DB + lb_dict['provider'] = driver.name + + # NoneType can be weird here, have to force type a second time + listeners = lb_dict.pop('listeners', []) or [] + pools = lb_dict.pop('pools', []) or [] + + flavor_dict = self._apply_flavor_to_lb_dict(lock_session, driver, + lb_dict) + + az_dict = self._validate_and_return_az_dict(lock_session, driver, + lb_dict) + # Validate the network as soon as we have the AZ data + validate.network_allowed_by_config( + load_balancer.vip_network_id, + valid_networks=az_dict.get(constants.VALID_VIP_NETWORKS)) + + # Apply the anticipated vNIC type so the create will return the + # right vip_vnic_type + if flavor_dict and flavor_dict.get(constants.SRIOV_VIP, False): + vip_dict[constants.VNIC_TYPE] = constants.VNIC_TYPE_DIRECT + else: + vip_dict[constants.VNIC_TYPE] = constants.VNIC_TYPE_NORMAL + + self._validate_vnic_type(vip_dict[constants.VNIC_TYPE], + load_balancer) + + db_lb = self.repositories.create_load_balancer_and_vip( + lock_session, lb_dict, vip_dict, additional_vip_dicts) + + # Pass the flavor dictionary through for the provider drivers + # This is a "virtual" lb_dict item that includes the expanded + # flavor dict instead of just the flavor_id we store in the DB. + lb_dict['flavor'] = flavor_dict + + # Do the same with the availability_zone dict + lb_dict['availability_zone'] = az_dict + + # See if the provider driver wants to manage the VIP port + # This will still be called if the user provided a port to + # allow drivers to collect any required information about the + # VIP port. + octavia_owned = False + try: + provider_vip_dict = driver_utils.vip_dict_to_provider_dict( + vip_dict) + provider_additional_vips = [ + driver_utils.additional_vip_dict_to_provider_dict(add_vip) + for add_vip in additional_vip_dicts] + vip_dict, additional_vip_dicts = driver_utils.call_provider( + driver.name, driver.create_vip_port, db_lb.id, + db_lb.project_id, provider_vip_dict, + provider_additional_vips) + vip = driver_utils.provider_vip_dict_to_vip_obj(vip_dict) + add_vips = [data_models.AdditionalVip(**add_vip) + for add_vip in additional_vip_dicts] + except exceptions.ProviderNotImplementedError: + # create vip port if not exist, driver didn't want to create + # the VIP port + vip, add_vips = self._create_vip_port_if_not_exist(db_lb) + LOG.info('Created VIP port %s for provider %s.', + vip.port_id, driver.name) + # If a port_id wasn't passed in and we made it this far + # we created the VIP + if 'port_id' not in vip_dict or not vip_dict['port_id']: + octavia_owned = True + + # Check if the driver claims octavia owns the VIP port. + if vip.octavia_owned: + octavia_owned = True + + self.repositories.vip.update( + lock_session, db_lb.id, ip_address=vip.ip_address, + port_id=vip.port_id, network_id=vip.network_id, + subnet_id=vip.subnet_id, octavia_owned=octavia_owned) + for add_vip in add_vips: + self.repositories.additional_vip.update( + lock_session, db_lb.id, ip_address=add_vip.ip_address, + port_id=add_vip.port_id, network_id=add_vip.network_id, + subnet_id=add_vip.subnet_id) + + if listeners or pools: + # expire_all is required here, it ensures that the loadbalancer + # will be re-fetched with its associated vip in _graph_create. + # without expire_all the vip attributes that have been updated + # just before this call may not be set correctly in the + # loadbalancer object. + lock_session.expire_all() + + db_pools, db_lists = self._graph_create( + lock_session, db_lb, listeners, pools) + + # Prepare the data for the driver data model + driver_lb_dict = driver_utils.lb_dict_to_provider_dict( + lb_dict, vip, add_vips, db_pools, db_lists) + + lock_session.flush() + + # Dispatch to the driver + LOG.info("Sending create Load Balancer %s to provider %s", + db_lb.id, driver.name) + driver_utils.call_provider( + driver.name, driver.loadbalancer_create, + driver_dm.LoadBalancer.from_dict(driver_lb_dict)) + + lock_session.commit() + except odb_exceptions.DBDuplicateEntry as e: + lock_session.rollback() + raise exceptions.IDAlreadyExists() from e + except Exception: + with excutils.save_and_reraise_exception(): + lock_session.rollback() + + with context.session.begin(): + db_lb = self._get_db_lb(context.session, db_lb.id) + + result = self._convert_db_to_type( + db_lb, lb_types.LoadBalancerFullResponse) + return lb_types.LoadBalancerFullRootResponse(loadbalancer=result) + + def _graph_create(self, session, db_lb, listeners, pools): + # Track which pools must have a full specification + pools_required = set() + # Look through listeners and find any extra pools, and move them to the + # top level so they are created first. + for li in listeners: + default_pool = li.get('default_pool') + pool_name = ( + default_pool.get('name') if default_pool else None) + # All pools need to have a name so they can be referenced + if default_pool and not pool_name: + raise exceptions.ValidationException( + detail='Pools must be named when creating a fully ' + 'populated loadbalancer.') + # If a pool has more than a name, assume it's a full specification + # (but use >3 because it will also have "enabled" and "tls_enabled" + # as default) + if default_pool and len(default_pool) > 3: + pools.append(default_pool) + li['default_pool'] = {'name': pool_name} + # Otherwise, it's a reference and we record it and move on + elif default_pool: + pools_required.add(pool_name) + # We also need to check policy redirects + for policy in li.get('l7policies'): + redirect_pool = policy.get('redirect_pool') + pool_name = ( + redirect_pool.get('name') if redirect_pool else None) + # All pools need to have a name so they can be referenced + if redirect_pool and not pool_name: + raise exceptions.ValidationException( + detail='Pools must be named when creating a fully ' + 'populated loadbalancer.') + # If a pool has more than a name, assume it's a full spec + # (but use >2 because it will also have "enabled" and + # "tls_enabled" as default) + if redirect_pool and len(redirect_pool) > 3: + pool_name = redirect_pool['name'] + policy['redirect_pool'] = {'name': pool_name} + pools.append(redirect_pool) + # Otherwise, it's a reference and we record it and move on + elif redirect_pool: + pools_required.add(pool_name) + + # Make sure all pool names are unique. + pool_names = [p.get('name') for p in pools] + if len(set(pool_names)) != len(pool_names): + raise exceptions.ValidationException( + detail="Pool names must be unique when creating a fully " + "populated loadbalancer.") + # Make sure every reference is present in our spec list + for pool_ref in pools_required: + if pool_ref not in pool_names: + raise exceptions.ValidationException( + detail="Pool '{name}' was referenced but no full " + "definition was found.".format(name=pool_ref)) + + # Check quotas for pools. + if pools and self.repositories.check_quota_met( + session, data_models.Pool, db_lb.project_id, + count=len(pools)): + raise exceptions.QuotaException(resource=data_models.Pool._name()) + + # Now create all of the pools ahead of the listeners. + new_pools = [] + pool_name_ids = {} + for p in pools: + # Check that pools have mandatory attributes, since we have to + # bypass the normal validation layer to allow for name-only + for attr in ('protocol', 'lb_algorithm'): + if attr not in p: + raise exceptions.ValidationException( + detail="Pool definition for '{name}' missing required " + "attribute: {attr}".format(name=p['name'], + attr=attr)) + p['load_balancer_id'] = db_lb.id + p['project_id'] = db_lb.project_id + new_pool = (pool.PoolsController()._graph_create( + session, p)) + new_pools.append(new_pool) + pool_name_ids[new_pool.name] = new_pool.id + + # Now check quotas for listeners + if listeners and self.repositories.check_quota_met( + session, data_models.Listener, db_lb.project_id, + count=len(listeners)): + raise exceptions.QuotaException( + resource=data_models.Listener._name()) + + # Now create all of the listeners + new_lists = [] + for li in listeners: + default_pool = li.pop('default_pool', None) + # If there's a default pool, replace it with the ID + if default_pool: + pool_name = default_pool['name'] + pool_id = pool_name_ids.get(pool_name) + if not pool_id: + raise exceptions.SingleCreateDetailsMissing( + type='Pool', name=pool_name) + li['default_pool_id'] = pool_id + li['load_balancer_id'] = db_lb.id + li['project_id'] = db_lb.project_id + new_lists.append(listener.ListenersController()._graph_create( + session, li, pool_name_ids=pool_name_ids)) + + return new_pools, new_lists + + @wsme_pecan.wsexpose(lb_types.LoadBalancerRootResponse, + wtypes.text, status_code=200, + body=lb_types.LoadBalancerRootPUT) + def put(self, id, load_balancer): + """Updates a load balancer.""" + load_balancer = load_balancer.loadbalancer + context = pecan_request.context.get('octavia_context') + with context.session.begin(): + db_lb = self._get_db_lb(context.session, id, show_deleted=False) + + self._auth_validate_action(context, db_lb.project_id, + constants.RBAC_PUT) + if not isinstance(load_balancer.vip_sg_ids, wtypes.UnsetType): + self._auth_validate_action(context, db_lb.project_id, + f"{constants.RBAC_PUT}:vip_sg_ids") + + if not isinstance(load_balancer.vip_qos_policy_id, wtypes.UnsetType): + network_driver = utils.get_network_driver() + validate.qos_extension_enabled(network_driver) + if load_balancer.vip_qos_policy_id is not None: + if db_lb.vip.qos_policy_id != load_balancer.vip_qos_policy_id: + validate.qos_policy_exists(load_balancer.vip_qos_policy_id) + + if not isinstance(load_balancer.vip_sg_ids, wtypes.UnsetType): + if load_balancer.vip_sg_ids is None: + load_balancer.vip_sg_ids = [] + else: + for sg_id in load_balancer.vip_sg_ids: + validate.security_group_exists(sg_id, context=context) + + self._validate_vnic_type(db_lb.vip.vnic_type, load_balancer) + + # Load the driver early as it also provides validation + driver = driver_factory.get_driver(db_lb.provider) + + with context.session.begin(): + self._test_lb_status(context.session, id) + + # Prepare the data for the driver data model + lb_dict = load_balancer.to_dict(render_unsets=False) + lb_dict['id'] = id + vip_dict = lb_dict.pop('vip', {}) + lb_dict = driver_utils.lb_dict_to_provider_dict(lb_dict) + if 'qos_policy_id' in vip_dict: + lb_dict['vip_qos_policy_id'] = vip_dict['qos_policy_id'] + + # Also prepare the baseline object data + old_provider_lb = ( + driver_utils.db_loadbalancer_to_provider_loadbalancer( + db_lb, for_delete=True)) + + # Dispatch to the driver + LOG.info("Sending update Load Balancer %s to provider " + "%s", id, driver.name) + driver_utils.call_provider( + driver.name, driver.loadbalancer_update, + old_provider_lb, + driver_dm.LoadBalancer.from_dict(lb_dict)) + + db_lb_dict = load_balancer.to_dict(render_unsets=False) + if 'vip' in db_lb_dict: + db_vip_dict = db_lb_dict.pop('vip') + self.repositories.vip.update(context.session, id, + **db_vip_dict) + if db_lb_dict: + self.repositories.load_balancer.update(context.session, id, + **db_lb_dict) + + # Force SQL alchemy to query the DB, otherwise we get inconsistent + # results + context.session.expire_all() + with context.session.begin(): + db_lb = self._get_db_lb(context.session, id) + result = self._convert_db_to_type(db_lb, lb_types.LoadBalancerResponse) + return lb_types.LoadBalancerRootResponse(loadbalancer=result) + + @wsme_pecan.wsexpose(None, wtypes.text, wtypes.text, status_code=204) + def delete(self, id, cascade=False): + """Deletes a load balancer.""" + context = pecan_request.context.get('octavia_context') + cascade = strutils.bool_from_string(cascade) + with context.session.begin(): + db_lb = self._get_db_lb(context.session, id, show_deleted=False) + + self._auth_validate_action(context, db_lb.project_id, + constants.RBAC_DELETE) + + # Load the driver early as it also provides validation + driver = driver_factory.get_driver(db_lb.provider) + + with context.session.begin(): + if (db_lb.listeners or db_lb.pools) and not cascade: + msg = _("Cannot delete Load Balancer %s - " + "it has children") % id + LOG.warning(msg) + raise exceptions.ValidationException(detail=msg) + self._test_lb_status(context.session, id, + lb_status=constants.PENDING_DELETE) + + LOG.info("Sending delete Load Balancer %s to provider %s", + id, driver.name) + provider_loadbalancer = ( + driver_utils.db_loadbalancer_to_provider_loadbalancer( + db_lb, for_delete=True)) + driver_utils.call_provider(driver.name, driver.loadbalancer_delete, + provider_loadbalancer, cascade) + + @pecan_expose() + def _lookup(self, id, *remainder): + """Overridden pecan _lookup method for custom routing. + + Currently it checks if this was a status request and routes + the request to the StatusController. + + 'statuses' is aliased here for backward compatibility with + neutron-lbaas LBaaS v2 API. + """ + is_children = ( + id and remainder and ( + remainder[0] == 'status' or remainder[0] == 'statuses' or ( + remainder[0] == 'stats' or remainder[0] == 'failover' + ) + ) + ) + if is_children: + controller = remainder[0] + remainder = remainder[1:] + if controller in ('status', 'statuses'): + return StatusController(lb_id=id), remainder + if controller == 'stats': + return StatisticsController(lb_id=id), remainder + if controller == 'failover': + return FailoverController(lb_id=id), remainder + return None + + +class StatusController(base.BaseController): + RBAC_TYPE = constants.RBAC_LOADBALANCER + + def __init__(self, lb_id): + super().__init__() + self.id = lb_id + + @wsme_pecan.wsexpose(lb_types.StatusRootResponse, wtypes.text, + status_code=200) + def get(self): + context = pecan_request.context.get('octavia_context') + with context.session.begin(): + load_balancer = self._get_db_lb(context.session, self.id, + show_deleted=False) + if not load_balancer: + LOG.info("Load balancer %s not found.", id) + raise exceptions.NotFound( + resource=data_models.LoadBalancer._name(), + id=id) + + self._auth_validate_action(context, load_balancer.project_id, + constants.RBAC_GET_STATUS) + + result = self._convert_db_to_type( + load_balancer, lb_types.LoadBalancerStatusResponse) + result = lb_types.StatusResponse(loadbalancer=result) + return lb_types.StatusRootResponse(statuses=result) + + +class StatisticsController(base.BaseController, stats.StatsMixin): + RBAC_TYPE = constants.RBAC_LOADBALANCER + + def __init__(self, lb_id): + super().__init__() + self.id = lb_id + + @wsme_pecan.wsexpose(lb_types.StatisticsRootResponse, wtypes.text, + status_code=200) + def get(self): + context = pecan_request.context.get('octavia_context') + with context.session.begin(): + load_balancer = self._get_db_lb(context.session, self.id, + show_deleted=False) + if not load_balancer: + LOG.info("Load balancer %s not found.", id) + raise exceptions.NotFound( + resource=data_models.LoadBalancer._name(), + id=id) + + self._auth_validate_action(context, load_balancer.project_id, + constants.RBAC_GET_STATS) + + with context.session.begin(): + lb_stats = self.get_loadbalancer_stats(context.session, self.id) + + result = self._convert_db_to_type( + lb_stats, lb_types.LoadBalancerStatisticsResponse) + return lb_types.StatisticsRootResponse(stats=result) + + +class FailoverController(LoadBalancersController): + + def __init__(self, lb_id): + super().__init__() + self.lb_id = lb_id + + @wsme_pecan.wsexpose(None, wtypes.text, status_code=202) + def put(self, **kwargs): + """Fails over a loadbalancer""" + context = pecan_request.context.get('octavia_context') + with context.session.begin(): + db_lb = self._get_db_lb(context.session, self.lb_id, + show_deleted=False) + + self._auth_validate_action(context, db_lb.project_id, + constants.RBAC_PUT_FAILOVER) + + # Load the driver early as it also provides validation + driver = driver_factory.get_driver(db_lb.provider) + + with context.session.begin(): + self._test_and_set_failover_prov_status(context.session, + self.lb_id) + LOG.info("Sending failover request for load balancer %s to the " + "provider %s", self.lb_id, driver.name) + driver_utils.call_provider( + driver.name, driver.loadbalancer_failover, self.lb_id) diff --git a/octavia/api/v2/controllers/member.py b/octavia/api/v2/controllers/member.py new file mode 100644 index 0000000000..aad93dfff9 --- /dev/null +++ b/octavia/api/v2/controllers/member.py @@ -0,0 +1,523 @@ +# Copyright 2014 Rackspace +# Copyright 2016 Blue Box, an IBM Company +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from octavia_lib.api.drivers import data_models as driver_dm +from oslo_db import exception as odb_exceptions +from oslo_log import log as logging +from oslo_utils import excutils +from oslo_utils import strutils +from pecan import request as pecan_request +from sqlalchemy.orm import exc as sa_exception +from wsme import types as wtypes +from wsmeext import pecan as wsme_pecan + +from octavia.api.drivers import driver_factory +from octavia.api.drivers import utils as driver_utils +from octavia.api.v2.controllers import base +from octavia.api.v2.types import member as member_types +from octavia.common import constants +from octavia.common import data_models +from octavia.common import exceptions +from octavia.common import validate +from octavia.db import prepare as db_prepare +from octavia.i18n import _ + + +LOG = logging.getLogger(__name__) + + +class MemberController(base.BaseController): + RBAC_TYPE = constants.RBAC_MEMBER + + def __init__(self, pool_id): + super().__init__() + self.pool_id = pool_id + + @wsme_pecan.wsexpose(member_types.MemberRootResponse, wtypes.text, + [wtypes.text], ignore_extra_args=True) + def get(self, id, fields=None): + """Gets a single pool member's details.""" + context = pecan_request.context.get('octavia_context') + with context.session.begin(): + db_member = self._get_db_member(context.session, id, + show_deleted=False) + + self._auth_validate_action(context, db_member.project_id, + constants.RBAC_GET_ONE) + + self._validate_pool_id(id, db_member.pool_id) + + result = self._convert_db_to_type( + db_member, member_types.MemberResponse) + if fields is not None: + result = self._filter_fields([result], fields)[0] + return member_types.MemberRootResponse(member=result) + + @wsme_pecan.wsexpose(member_types.MembersRootResponse, [wtypes.text], + ignore_extra_args=True) + def get_all(self, fields=None): + """Lists all pool members of a pool.""" + pcontext = pecan_request.context + context = pcontext.get('octavia_context') + + with context.session.begin(): + pool = self._get_db_pool(context.session, self.pool_id, + show_deleted=False, limited_graph=True) + + self._auth_validate_action(context, pool.project_id, + constants.RBAC_GET_ALL) + + db_members, links = self.repositories.member.get_all_API_list( + context.session, show_deleted=False, + pool_id=self.pool_id, + pagination_helper=pcontext.get(constants.PAGINATION_HELPER), + limited_graph=True) + result = self._convert_db_to_type( + db_members, [member_types.MemberResponse]) + if fields is not None: + result = self._filter_fields(result, fields) + return member_types.MembersRootResponse( + members=result, members_links=links) + + def _get_affected_listener_ids(self, session, member=None): + """Gets a list of all listeners this request potentially affects.""" + if member: + listener_ids = [li.id for li in member.pool.listeners] + else: + pool = self._get_db_pool(session, self.pool_id) + listener_ids = [li.id for li in pool.listeners] + return listener_ids + + def _test_lb_and_listener_and_pool_statuses(self, session, member=None): + """Verify load balancer is in a mutable state.""" + # We need to verify that any listeners referencing this member's + # pool are also mutable + pool = self._get_db_pool(session, self.pool_id) + # Check the parent is not locked for some reason (ERROR, etc.) + if pool.provisioning_status not in constants.MUTABLE_STATUSES: + raise exceptions.ImmutableObject(resource='Pool', id=self.pool_id) + load_balancer_id = pool.load_balancer_id + if not self.repositories.test_and_set_lb_and_listeners_prov_status( + session, load_balancer_id, + constants.PENDING_UPDATE, constants.PENDING_UPDATE, + listener_ids=self._get_affected_listener_ids(session, member), + pool_id=self.pool_id): + LOG.info("Member cannot be created or modified because the " + "Load Balancer is in an immutable state") + raise exceptions.ImmutableObject(resource='Load Balancer', + id=load_balancer_id) + + def _validate_create_member(self, lock_session, member_dict): + """Validate creating member on pool.""" + try: + ret = self.repositories.member.create(lock_session, **member_dict) + lock_session.flush() + return ret + except odb_exceptions.DBDuplicateEntry as e: + raise exceptions.DuplicateMemberEntry( + ip_address=member_dict.get('ip_address'), + port=member_dict.get('protocol_port')) from e + except odb_exceptions.DBReferenceError as e: + raise exceptions.InvalidOption(value=member_dict.get(e.key), + option=e.key) from e + except odb_exceptions.DBError as e: + raise exceptions.APIException() from e + return None + + def _validate_pool_id(self, member_id, db_member_pool_id): + if db_member_pool_id != self.pool_id: + raise exceptions.NotFound(resource='Member', id=member_id) + + @wsme_pecan.wsexpose(member_types.MemberRootResponse, + body=member_types.MemberRootPOST, status_code=201) + def post(self, member_): + """Creates a pool member on a pool.""" + member = member_.member + context = pecan_request.context.get('octavia_context') + + flavor_dict = {} + with context.session.begin(): + pool = self.repositories.pool.get(context.session, id=self.pool_id) + member.project_id, provider = self._get_lb_project_id_provider( + context.session, pool.load_balancer_id) + if pool.load_balancer.flavor_id: + try: + flavor_dict = ( + self.repositories.flavor.get_flavor_metadata_dict( + context.session, pool.load_balancer.flavor_id)) + except sa_exception.NoResultFound: + LOG.error("load balancer has a flavor ID: %s that was not " + "found in the database. Assuming no flavor.", + pool.load_balancer.flavor_id) + + self._auth_validate_action(context, member.project_id, + constants.RBAC_POST) + + validate.ip_not_reserved(member.address) + + # Validate member subnet + if (member.subnet_id and + not validate.subnet_exists(member.subnet_id, context=context)): + raise exceptions.NotFound(resource='Subnet', id=member.subnet_id) + + # Load the driver early as it also provides validation + driver = driver_factory.get_driver(provider) + + context.session.begin() + try: + if self.repositories.check_quota_met( + context.session, + data_models.Member, + member.project_id): + raise exceptions.QuotaException( + resource=data_models.Member._name()) + + db_member_dict = member.to_dict(render_unsets=True) + + # Validate and store port SR-IOV vnic_type + request_sriov = db_member_dict.pop('request_sriov') + if (request_sriov and not + flavor_dict.get(constants.ALLOW_MEMBER_SRIOV, False)): + raise exceptions.MemberSRIOVDisabled + if request_sriov: + db_member_dict[constants.VNIC_TYPE] = ( + constants.VNIC_TYPE_DIRECT) + else: + db_member_dict[constants.VNIC_TYPE] = ( + constants.VNIC_TYPE_NORMAL) + + member_dict = db_prepare.create_member(db_member_dict, + self.pool_id, + bool(pool.health_monitor)) + + self._test_lb_and_listener_and_pool_statuses(context.session) + + db_member = self._validate_create_member(context.session, + member_dict) + + # Prepare the data for the driver data model + provider_member = ( + driver_utils.db_member_to_provider_member(db_member)) + + # Dispatch to the driver + LOG.info("Sending create Member %s to provider %s", + db_member.id, driver.name) + driver_utils.call_provider( + driver.name, driver.member_create, provider_member) + + context.session.commit() + except Exception: + with excutils.save_and_reraise_exception(): + context.session.rollback() + + with context.session.begin(): + db_member = self._get_db_member(context.session, db_member.id) + result = self._convert_db_to_type(db_member, + member_types.MemberResponse) + return member_types.MemberRootResponse(member=result) + + def _graph_create(self, lock_session, member_dict): + pool = self.repositories.pool.get(lock_session, id=self.pool_id) + + # Validate and store port SR-IOV vnic_type + request_sriov = member_dict.pop('request_sriov') + flavor_dict = {} + if pool.load_balancer.flavor_id: + try: + flavor_dict = ( + self.repositories.flavor.get_flavor_metadata_dict( + lock_session, pool.load_balancer.flavor_id)) + except sa_exception.NoResultFound: + LOG.error("load balancer has a flavor ID: %s that was not " + "found in the database. Assuming no flavor.", + pool.load_balancer.flavor_id) + if (request_sriov and not + flavor_dict.get(constants.ALLOW_MEMBER_SRIOV, False)): + raise exceptions.MemberSRIOVDisabled + + if request_sriov: + member_dict[constants.VNIC_TYPE] = constants.VNIC_TYPE_DIRECT + else: + member_dict[constants.VNIC_TYPE] = constants.VNIC_TYPE_NORMAL + + member_dict = db_prepare.create_member( + member_dict, self.pool_id, bool(pool.health_monitor)) + db_member = self._validate_create_member(lock_session, member_dict) + + return db_member + + def _set_default_on_none(self, member): + """Reset settings to their default values if None/null was passed in + + A None/null value can be passed in to clear a value. PUT values + that were not provided by the user have a type of wtypes.UnsetType. + If the user is attempting to clear values, they should either + be set to None (for example in the name field) or they should be + reset to their default values. + This method is intended to handle those values that need to be set + back to a default value. + """ + if member.backup is None: + member.backup = False + if member.weight is None: + member.weight = constants.DEFAULT_WEIGHT + + @wsme_pecan.wsexpose(member_types.MemberRootResponse, + wtypes.text, body=member_types.MemberRootPUT, + status_code=200) + def put(self, id, member_): + """Updates a pool member.""" + member = member_.member + context = pecan_request.context.get('octavia_context') + with context.session.begin(): + db_member = self._get_db_member(context.session, id, + show_deleted=False) + pool = self.repositories.pool.get(context.session, + id=db_member.pool_id) + project_id, provider = self._get_lb_project_id_provider( + context.session, pool.load_balancer_id) + + self._auth_validate_action(context, project_id, constants.RBAC_PUT) + + self._validate_pool_id(id, db_member.pool_id) + + self._set_default_on_none(member) + + # Load the driver early as it also provides validation + driver = driver_factory.get_driver(provider) + + with context.session.begin(): + self._test_lb_and_listener_and_pool_statuses(context.session, + member=db_member) + + # Prepare the data for the driver data model + member_dict = member.to_dict(render_unsets=False) + member_dict['id'] = id + provider_member_dict = ( + driver_utils.member_dict_to_provider_dict(member_dict)) + + # Also prepare the baseline object data + old_provider_member = driver_utils.db_member_to_provider_member( + db_member) + + # Dispatch to the driver + LOG.info("Sending update Member %s to provider %s", id, + driver.name) + driver_utils.call_provider( + driver.name, driver.member_update, + old_provider_member, + driver_dm.Member.from_dict(provider_member_dict)) + + # Update the database to reflect what the driver just accepted + member.provisioning_status = constants.PENDING_UPDATE + db_member_dict = member.to_dict(render_unsets=False) + self.repositories.member.update(context.session, id, + **db_member_dict) + + # Force SQL alchemy to query the DB, otherwise we get inconsistent + # results + context.session.expire_all() + with context.session.begin(): + db_member = self._get_db_member(context.session, id) + result = self._convert_db_to_type(db_member, + member_types.MemberResponse) + return member_types.MemberRootResponse(member=result) + + @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) + def delete(self, id): + """Deletes a pool member.""" + context = pecan_request.context.get('octavia_context') + with context.session.begin(): + db_member = self._get_db_member(context.session, id, + show_deleted=False) + + pool = self.repositories.pool.get(context.session, + id=db_member.pool_id) + project_id, provider = self._get_lb_project_id_provider( + context.session, pool.load_balancer_id) + + self._auth_validate_action(context, project_id, constants.RBAC_DELETE) + + self._validate_pool_id(id, db_member.pool_id) + + # Load the driver early as it also provides validation + driver = driver_factory.get_driver(provider) + + with context.session.begin(): + self._test_lb_and_listener_and_pool_statuses(context.session, + member=db_member) + self.repositories.member.update( + context.session, db_member.id, + provisioning_status=constants.PENDING_DELETE) + + LOG.info("Sending delete Member %s to provider %s", id, + driver.name) + provider_member = ( + driver_utils.db_member_to_provider_member(db_member)) + driver_utils.call_provider(driver.name, driver.member_delete, + provider_member) + + +class MembersController(MemberController): + + def __init__(self, pool_id): + super().__init__(pool_id) + + @wsme_pecan.wsexpose(None, wtypes.text, + body=member_types.MembersRootPUT, status_code=202) + def put(self, additive_only=False, members_=None): + """Updates all members.""" + members = members_.members + additive_only = strutils.bool_from_string(additive_only) + context = pecan_request.context.get('octavia_context') + + with context.session.begin(): + db_pool = self._get_db_pool(context.session, self.pool_id) + + project_id, provider = self._get_lb_project_id_provider( + context.session, db_pool.load_balancer_id) + + # Check POST+PUT+DELETE since this operation is all of 'CUD' + self._auth_validate_action(context, project_id, constants.RBAC_POST) + self._auth_validate_action(context, project_id, constants.RBAC_PUT) + if not additive_only: + self._auth_validate_action(context, project_id, + constants.RBAC_DELETE) + + # Load the driver early as it also provides validation + driver = driver_factory.get_driver(provider) + + with context.session.begin(): + self._test_lb_and_listener_and_pool_statuses(context.session) + + # Reload the pool, the members may have been updated between the + # first query in this function and the lock of the loadbalancer + db_pool = self._get_db_pool(context.session, self.pool_id) + old_members = db_pool.members + + old_member_uniques = { + (m.ip_address, m.protocol_port): m.id for m in old_members} + new_member_uniques = [ + (m.address, m.protocol_port) for m in members] + + # Find members that are brand new or updated + new_members = [] + updated_members = [] + updated_member_uniques = set() + for m in members: + key = (m.address, m.protocol_port) + if key not in old_member_uniques: + validate.ip_not_reserved(m.address) + new_members.append(m) + else: + m.id = old_member_uniques[key] + if key in updated_member_uniques: + LOG.error("Member %s is updated multiple times in " + "the same batch request.", m.id) + raise exceptions.ValidationException( + detail=_("Member must be updated only once in the " + "same request.")) + updated_member_uniques.add(key) + updated_members.append(m) + + # Find members that are deleted + deleted_members = [] + for m in old_members: + if (m.ip_address, m.protocol_port) not in new_member_uniques: + deleted_members.append(m) + + if not (deleted_members or new_members or updated_members): + LOG.info("Member batch update is a noop, rolling back and " + "returning early.") + context.session.rollback() + return + + if additive_only: + member_count_diff = len(new_members) + else: + member_count_diff = len(new_members) - len(deleted_members) + if member_count_diff > 0 and self.repositories.check_quota_met( + context.session, data_models.Member, + db_pool.project_id, count=member_count_diff): + raise exceptions.QuotaException( + resource=data_models.Member._name()) + + provider_members = [] + valid_subnets = set() + # Create new members + for m in new_members: + # NOTE(mnaser): In order to avoid hitting the Neutron API hard + # when creating many new members, we cache the + # validation results. We also validate new + # members only since subnet ID is immutable. + # If the member doesn't have a subnet, or the subnet is + # already valid, move on. Run validate and add it to + # cache otherwise. + if m.subnet_id and m.subnet_id not in valid_subnets: + # If the subnet does not exist, + # raise an exception and get out. + if not validate.subnet_exists( + m.subnet_id, context=context): + raise exceptions.NotFound( + resource='Subnet', id=m.subnet_id) + + # Mark the subnet as valid for future runs. + valid_subnets.add(m.subnet_id) + + m = m.to_dict(render_unsets=False) + m['project_id'] = db_pool.project_id + created_member = self._graph_create(context.session, m) + provider_member = driver_utils.db_member_to_provider_member( + created_member) + provider_members.append(provider_member) + # Update old members + for m in updated_members: + m.provisioning_status = constants.PENDING_UPDATE + m.project_id = db_pool.project_id + db_member_dict = m.to_dict(render_unsets=False) + db_member_dict.pop('id') + # We don't allow updating the vnic_type + # TODO(johnsom) Give the user an error once we change the + # wsme type for batch member update to not use + # the MemberPOST type + db_member_dict.pop(constants.REQUEST_SRIOV) + self.repositories.member.update( + context.session, m.id, **db_member_dict) + + m.pool_id = self.pool_id + provider_members.append( + driver_utils.db_member_to_provider_member(m)) + # Delete old members + for m in deleted_members: + if additive_only: + # Members are appended to the dict and their status remains + # unchanged, because they are logically "untouched". + db_member_dict = m.to_dict(render_unsets=False) + db_member_dict.pop('id') + m.pool_id = self.pool_id + provider_members.append( + driver_utils.db_member_to_provider_member(m)) + else: + # Members are changed to PENDING_DELETE and not passed. + self.repositories.member.update( + context.session, m.id, + provisioning_status=constants.PENDING_DELETE) + + # Dispatch to the driver + LOG.info("Sending Pool %s batch member update to provider %s", + db_pool.id, driver.name) + driver_utils.call_provider( + driver.name, driver.member_batch_update, db_pool.id, + provider_members) diff --git a/octavia/api/v2/controllers/pool.py b/octavia/api/v2/controllers/pool.py new file mode 100644 index 0000000000..9131a1aa19 --- /dev/null +++ b/octavia/api/v2/controllers/pool.py @@ -0,0 +1,559 @@ +# Copyright 2014 Rackspace +# Copyright 2016 Blue Box, an IBM Company +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from octavia_lib.api.drivers import data_models as driver_dm +from octavia_lib.common import constants as lib_consts +from oslo_config import cfg +from oslo_db import exception as odb_exceptions +from oslo_log import log as logging +from oslo_utils import excutils +from pecan import expose as pecan_expose +from pecan import request as pecan_request +from wsme import types as wtypes +from wsmeext import pecan as wsme_pecan + +from octavia.api.drivers import driver_factory +from octavia.api.drivers import utils as driver_utils +from octavia.api.v2.controllers import base +from octavia.api.v2.controllers import health_monitor +from octavia.api.v2.controllers import member +from octavia.api.v2.types import pool as pool_types +from octavia.common import constants +from octavia.common import data_models +from octavia.common import exceptions +from octavia.common import validate +from octavia.db import prepare as db_prepare +from octavia.i18n import _ + + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class PoolsController(base.BaseController): + RBAC_TYPE = constants.RBAC_POOL + + def __init__(self): + super().__init__() + + @wsme_pecan.wsexpose(pool_types.PoolRootResponse, wtypes.text, + [wtypes.text], ignore_extra_args=True) + def get(self, id, fields=None): + """Gets a pool's details.""" + context = pecan_request.context.get('octavia_context') + with context.session.begin(): + db_pool = self._get_db_pool(context.session, id, + show_deleted=False) + + self._auth_validate_action(context, db_pool.project_id, + constants.RBAC_GET_ONE) + + result = self._convert_db_to_type(db_pool, pool_types.PoolResponse) + if fields is not None: + result = self._filter_fields([result], fields)[0] + return pool_types.PoolRootResponse(pool=result) + + @wsme_pecan.wsexpose(pool_types.PoolsRootResponse, wtypes.text, + [wtypes.text], ignore_extra_args=True) + def get_all(self, project_id=None, fields=None): + """Lists all pools.""" + pcontext = pecan_request.context + context = pcontext.get('octavia_context') + + query_filter = self._auth_get_all(context, project_id) + + with context.session.begin(): + db_pools, links = self.repositories.pool.get_all_API_list( + context.session, show_deleted=False, + pagination_helper=pcontext.get(constants.PAGINATION_HELPER), + **query_filter) + result = self._convert_db_to_type(db_pools, [pool_types.PoolResponse]) + if fields is not None: + result = self._filter_fields(result, fields) + return pool_types.PoolsRootResponse(pools=result, pools_links=links) + + def _get_affected_listener_ids(self, pool): + """Gets a list of all listeners this request potentially affects.""" + listener_ids = [li.id for li in pool.listeners] + return listener_ids + + def _test_lb_and_listener_statuses(self, session, lb_id, listener_ids): + """Verify load balancer is in a mutable state.""" + # We need to verify that any listeners referencing this pool are also + # mutable + if not self.repositories.test_and_set_lb_and_listeners_prov_status( + session, lb_id, + constants.PENDING_UPDATE, constants.PENDING_UPDATE, + listener_ids=listener_ids): + LOG.info("Pool cannot be created or modified because the Load " + "Balancer is in an immutable state") + raise exceptions.ImmutableObject(resource=_('Load Balancer'), + id=lb_id) + + def _validate_create_pool(self, lock_session, pool_dict, listener_id=None): + """Validate creating pool on load balancer. + + Update database for load balancer and (optional) listener based on + provisioning status. + """ + # Make sure we have a client CA if they specify a CRL + if (pool_dict.get('crl_container_id') and + not pool_dict.get('ca_tls_certificate_id')): + raise exceptions.ValidationException(detail=_( + "A CA certificate reference is required to " + "specify a revocation list.")) + + tls_certificate_id = pool_dict.get('tls_certificate_id', None) + tls_refs = [tls_certificate_id] if tls_certificate_id else [] + self._validate_tls_refs(tls_refs) + + # Validate the client CA cert and optional client CRL + if pool_dict.get('ca_tls_certificate_id'): + self._validate_client_ca_and_crl_refs( + pool_dict.get('ca_tls_certificate_id'), + pool_dict.get('crl_container_id', None)) + + # Check TLS cipher prohibit list + if 'tls_ciphers' in pool_dict and pool_dict['tls_ciphers']: + rejected_ciphers = validate.check_cipher_prohibit_list( + pool_dict['tls_ciphers']) + if rejected_ciphers: + raise exceptions.ValidationException(detail=_( + 'The following ciphers have been prohibited by an ' + 'administrator: ' + ', '.join(rejected_ciphers))) + + if pool_dict['tls_enabled']: + # Validate TLS version list + validate.check_tls_version_list(pool_dict['tls_versions']) + # Validate TLS versions against minimum + validate.check_tls_version_min(pool_dict['tls_versions']) + # Validate ALPN protocol list + validate.check_alpn_protocols(pool_dict['alpn_protocols']) + + try: + ret = self.repositories.create_pool_on_load_balancer( + lock_session, pool_dict, + listener_id=listener_id) + lock_session.flush() + return ret + except odb_exceptions.DBDuplicateEntry as e: + raise exceptions.IDAlreadyExists() from e + except odb_exceptions.DBReferenceError as e: + raise exceptions.InvalidOption(value=pool_dict.get(e.key), + option=e.key) from e + except odb_exceptions.DBError as e: + raise exceptions.APIException() from e + + def _is_only_specified_in_request(self, request, **kwargs): + request_attrs = [] + check_attrs = kwargs['check_exist_attrs'] + escaped_attrs = ['from_data_model', 'translate_key_to_data_model', + 'translate_dict_keys_to_data_model', 'to_dict'] + + for attr in dir(request): + if attr.startswith('_') or attr in escaped_attrs: + continue + request_attrs.append(attr) + + for req_attr in request_attrs: + if (getattr(request, req_attr) and req_attr not in check_attrs): + return False + return True + + def _validate_pool_request_for_udp_sctp(self, request): + if request.session_persistence: + if (request.session_persistence.type == + constants.SESSION_PERSISTENCE_SOURCE_IP and + not self._is_only_specified_in_request( + request.session_persistence, + check_exist_attrs=['type', 'persistence_timeout', + 'persistence_granularity'])): + raise exceptions.ValidationException(detail=_( + "session_persistence %s type for UDP and SCTP protocols " + "only accepts: type, persistence_timeout, " + "persistence_granularity.") % ( + constants.SESSION_PERSISTENCE_SOURCE_IP)) + if request.session_persistence.cookie_name: + raise exceptions.ValidationException(detail=_( + "Cookie names are not supported for %s pools.") % + "/".join((constants.PROTOCOL_UDP, + lib_consts.PROTOCOL_SCTP))) + if request.session_persistence.type in [ + constants.SESSION_PERSISTENCE_HTTP_COOKIE, + constants.SESSION_PERSISTENCE_APP_COOKIE]: + raise exceptions.ValidationException(detail=_( + "Session persistence of type %(type)s is not supported " + "for %(protocol)s protocol pools.") % { + 'type': request.session_persistence.type, + 'protocol': "/".join((constants.PROTOCOL_UDP, + lib_consts.PROTOCOL_SCTP))}) + + @wsme_pecan.wsexpose(pool_types.PoolRootResponse, + body=pool_types.PoolRootPOST, status_code=201) + def post(self, pool_): + """Creates a pool on a load balancer or listener. + + Note that this can optionally take a listener_id with which the pool + should be associated as the listener's default_pool. If specified, + the pool creation will fail if the listener specified already has + a default_pool. + """ + # For some API requests the listener_id will be passed in the + # pool_dict: + pool = pool_.pool + context = pecan_request.context.get('octavia_context') + listener = None + with context.session.begin(): + if pool.loadbalancer_id: + pool.project_id, provider = self._get_lb_project_id_provider( + context.session, pool.loadbalancer_id) + elif pool.listener_id: + listener = self.repositories.listener.get( + context.session, id=pool.listener_id) + pool.loadbalancer_id = listener.load_balancer_id + pool.project_id, provider = self._get_lb_project_id_provider( + context.session, pool.loadbalancer_id) + else: + msg = _("Must provide at least one of: " + "loadbalancer_id, listener_id") + raise exceptions.ValidationException(detail=msg) + + self._auth_validate_action(context, pool.project_id, + constants.RBAC_POST) + + if pool.listener_id and listener: + if listener.protocol == lib_consts.PROTOCOL_PROMETHEUS: + raise exceptions.ListenerNoChildren( + protocol=lib_consts.PROTOCOL_PROMETHEUS) + self._validate_protocol(listener.protocol, pool.protocol) + + if pool.protocol in (constants.PROTOCOL_UDP, + lib_consts.PROTOCOL_SCTP): + self._validate_pool_request_for_udp_sctp(pool) + else: + if (pool.session_persistence and ( + pool.session_persistence.persistence_timeout or + pool.session_persistence.persistence_granularity)): + raise exceptions.ValidationException(detail=_( + "persistence_timeout and persistence_granularity " + "is only for UDP and SCTP protocol pools.")) + + if pool.session_persistence: + sp_dict = pool.session_persistence.to_dict(render_unsets=False) + validate.check_session_persistence(sp_dict) + + # Load the driver early as it also provides validation + driver = driver_factory.get_driver(provider) + + context.session.begin() + try: + if self.repositories.check_quota_met( + context.session, + data_models.Pool, + pool.project_id): + raise exceptions.QuotaException( + resource=data_models.Pool._name()) + + listener_repo = self.repositories.listener + pool_dict = db_prepare.create_pool( + pool.to_dict(render_unsets=True)) + + listener_id = pool_dict.pop('listener_id', None) + if listener_id: + if listener_repo.has_default_pool(context.session, + listener_id): + raise exceptions.DuplicatePoolEntry() + + self._test_lb_and_listener_statuses( + context.session, lb_id=pool_dict['load_balancer_id'], + listener_ids=[listener_id] if listener_id else []) + + db_pool = self._validate_create_pool( + context.session, pool_dict, listener_id) + + # Prepare the data for the driver data model + provider_pool = ( + driver_utils.db_pool_to_provider_pool(db_pool)) + + # Dispatch to the driver + LOG.info("Sending create Pool %s to provider %s", + db_pool.id, driver.name) + driver_utils.call_provider( + driver.name, driver.pool_create, provider_pool) + + context.session.commit() + except Exception: + with excutils.save_and_reraise_exception(): + context.session.rollback() + + with context.session.begin(): + db_pool = self._get_db_pool(context.session, db_pool.id) + result = self._convert_db_to_type(db_pool, pool_types.PoolResponse) + return pool_types.PoolRootResponse(pool=result) + + def _graph_create(self, session, pool_dict): + load_balancer_id = pool_dict['load_balancer_id'] + pool_dict = db_prepare.create_pool( + pool_dict, load_balancer_id) + members = pool_dict.pop('members', []) or [] + hm = pool_dict.pop('health_monitor', None) + db_pool = self._validate_create_pool( + session, pool_dict) + + # Check quotas for healthmonitors + if hm and self.repositories.check_quota_met( + session, data_models.HealthMonitor, + db_pool.project_id): + raise exceptions.QuotaException( + resource=data_models.HealthMonitor._name()) + + # Now possibly create a healthmonitor + if hm: + hm[constants.POOL_ID] = db_pool.id + hm[constants.PROJECT_ID] = db_pool.project_id + new_hm = health_monitor.HealthMonitorController()._graph_create( + session, hm) + if db_pool.protocol in (constants.PROTOCOL_UDP, + lib_consts.PROTOCOL_SCTP): + health_monitor.HealthMonitorController( + )._validate_healthmonitor_request_for_udp_sctp(new_hm, + db_pool) + else: + if new_hm.type in (constants.HEALTH_MONITOR_UDP_CONNECT, + lib_consts.HEALTH_MONITOR_SCTP): + raise exceptions.ValidationException(detail=_( + "The %(type)s type is only supported for pools of " + "type %(protocol)s.") % { + 'type': new_hm.type, + 'protocol': '/'.join((constants.PROTOCOL_UDP, + lib_consts.PROTOCOL_SCTP))}) + db_pool.health_monitor = new_hm + + # Now check quotas for members + if members and self.repositories.check_quota_met( + session, data_models.Member, + db_pool.project_id, count=len(members)): + raise exceptions.QuotaException( + resource=data_models.Member._name()) + + # Now create members + new_members = [] + for m in members: + validate.ip_not_reserved(m["ip_address"]) + + m['project_id'] = db_pool.project_id + new_members.append( + member.MembersController(db_pool.id)._graph_create( + session, m)) + db_pool.members = new_members + return db_pool + + def _validate_pool_PUT(self, pool, db_pool): + + if db_pool.protocol in (constants.PROTOCOL_UDP, + lib_consts.PROTOCOL_SCTP): + self._validate_pool_request_for_udp_sctp(pool) + else: + if (pool.session_persistence and ( + pool.session_persistence.persistence_timeout or + pool.session_persistence.persistence_granularity)): + raise exceptions.ValidationException(detail=_( + "persistence_timeout and persistence_granularity " + "is only for UDP protocol pools.")) + + if pool.session_persistence: + sp_dict = pool.session_persistence.to_dict(render_unsets=False) + validate.check_session_persistence(sp_dict) + + crl_ref = None + # If we got a crl_ref and it's not unset, use it + if (pool.crl_container_ref and + pool.crl_container_ref != wtypes.Unset): + crl_ref = pool.crl_container_ref + # If we got Unset and a CRL exists in the DB, use the DB crl_ref + elif (db_pool.crl_container_id and + pool.crl_container_ref == wtypes.Unset): + crl_ref = db_pool.crl_container_id + + ca_ref = None + db_ca_ref = db_pool.ca_tls_certificate_id + if pool.ca_tls_container_ref != wtypes.Unset: + if not pool.ca_tls_container_ref and db_ca_ref and crl_ref: + raise exceptions.ValidationException(detail=_( + "A CA reference cannot be removed when a " + "certificate revocation list is present.")) + + if not pool.ca_tls_container_ref and not db_ca_ref and crl_ref: + raise exceptions.ValidationException(detail=_( + "A CA reference is required to " + "specify a certificate revocation list.")) + if pool.ca_tls_container_ref: + ca_ref = pool.ca_tls_container_ref + elif db_ca_ref and pool.ca_tls_container_ref == wtypes.Unset: + ca_ref = db_ca_ref + elif crl_ref and not db_ca_ref: + raise exceptions.ValidationException(detail=_( + "A CA reference is required to " + "specify a certificate revocation list.")) + + if pool.tls_container_ref: + self._validate_tls_refs([pool.tls_container_ref]) + + # Validate the client CA cert and optional client CRL + if ca_ref: + self._validate_client_ca_and_crl_refs(ca_ref, crl_ref) + + # Check TLS cipher prohibit list + if pool.tls_ciphers: + rejected_ciphers = validate.check_cipher_prohibit_list( + pool.tls_ciphers) + if rejected_ciphers: + raise exceptions.ValidationException(detail=_( + "The following ciphers have been prohibited by an " + "administrator: " + ', '.join(rejected_ciphers))) + + if pool.tls_versions is not wtypes.Unset: + # Validate TLS version list + validate.check_tls_version_list(pool.tls_versions) + # Validate TLS version against minimum + validate.check_tls_version_min(pool.tls_versions) + + if pool.alpn_protocols is not wtypes.Unset: + # Validate ALPN protocol list + validate.check_alpn_protocols(pool.alpn_protocols) + + @wsme_pecan.wsexpose(pool_types.PoolRootResponse, wtypes.text, + body=pool_types.PoolRootPut, status_code=200) + def put(self, id, pool_): + """Updates a pool on a load balancer.""" + pool = pool_.pool + context = pecan_request.context.get('octavia_context') + with context.session.begin(): + db_pool = self._get_db_pool(context.session, id, + show_deleted=False) + + project_id, provider = self._get_lb_project_id_provider( + context.session, db_pool.load_balancer_id) + + self._auth_validate_action(context, project_id, constants.RBAC_PUT) + + if pool.tls_versions is None: + pool.tls_versions = CONF.api_settings.default_pool_tls_versions + if pool.tls_ciphers is None: + pool.tls_ciphers = CONF.api_settings.default_pool_ciphers + + if (pool.session_persistence and + not pool.session_persistence.type and + db_pool.session_persistence and + db_pool.session_persistence.type): + pool.session_persistence.type = db_pool.session_persistence.type + + self._validate_pool_PUT(pool, db_pool) + + # Load the driver early as it also provides validation + driver = driver_factory.get_driver(provider) + + with context.session.begin(): + self._test_lb_and_listener_statuses( + context.session, lb_id=db_pool.load_balancer_id, + listener_ids=self._get_affected_listener_ids(db_pool)) + + # Prepare the data for the driver data model + pool_dict = pool.to_dict(render_unsets=False) + pool_dict['id'] = id + provider_pool_dict = ( + driver_utils.pool_dict_to_provider_dict(pool_dict)) + + # Also prepare the baseline object data + old_provider_pool = driver_utils.db_pool_to_provider_pool( + db_pool, for_delete=True) + + # Dispatch to the driver + LOG.info("Sending update Pool %s to provider %s", id, driver.name) + driver_utils.call_provider( + driver.name, driver.pool_update, + old_provider_pool, + driver_dm.Pool.from_dict(provider_pool_dict)) + + # Update the database to reflect what the driver just accepted + pool.provisioning_status = constants.PENDING_UPDATE + db_pool_dict = pool.to_dict(render_unsets=False) + self.repositories.update_pool_and_sp(context.session, id, + db_pool_dict) + + # Force SQL alchemy to query the DB, otherwise we get inconsistent + # results + context.session.expire_all() + with context.session.begin(): + db_pool = self._get_db_pool(context.session, id) + result = self._convert_db_to_type(db_pool, pool_types.PoolResponse) + return pool_types.PoolRootResponse(pool=result) + + @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) + def delete(self, id): + """Deletes a pool from a load balancer.""" + context = pecan_request.context.get('octavia_context') + with context.session.begin(): + db_pool = self._get_db_pool(context.session, id, + show_deleted=False) + + project_id, provider = self._get_lb_project_id_provider( + context.session, db_pool.load_balancer_id) + + self._auth_validate_action(context, project_id, constants.RBAC_DELETE) + + if db_pool.l7policies: + raise exceptions.PoolInUseByL7Policy( + id=db_pool.id, l7policy_id=db_pool.l7policies[0].id) + + # Load the driver early as it also provides validation + driver = driver_factory.get_driver(provider) + + with context.session.begin(): + self._test_lb_and_listener_statuses( + context.session, lb_id=db_pool.load_balancer_id, + listener_ids=self._get_affected_listener_ids(db_pool)) + self.repositories.pool.update( + context.session, db_pool.id, + provisioning_status=constants.PENDING_DELETE) + + LOG.info("Sending delete Pool %s to provider %s", id, driver.name) + provider_pool = ( + driver_utils.db_pool_to_provider_pool(db_pool, + for_delete=True)) + driver_utils.call_provider(driver.name, driver.pool_delete, + provider_pool) + + @pecan_expose() + def _lookup(self, pool_id, *remainder): + """Overridden pecan _lookup method for custom routing. + + Verifies that the pool passed in the url exists, and if so decides + which controller, if any, should control be passed. + """ + context = pecan_request.context.get('octavia_context') + if pool_id and remainder and remainder[0] == 'members': + remainder = remainder[1:] + with context.session.begin(): + db_pool = self.repositories.pool.get(context.session, + id=pool_id) + if not db_pool: + LOG.info("Pool %s not found.", pool_id) + raise exceptions.NotFound(resource=data_models.Pool._name(), + id=pool_id) + if remainder: + return member.MemberController(pool_id=db_pool.id), remainder + return member.MembersController(pool_id=db_pool.id), remainder + return None diff --git a/octavia/api/v2/controllers/provider.py b/octavia/api/v2/controllers/provider.py new file mode 100644 index 0000000000..be340bcaa5 --- /dev/null +++ b/octavia/api/v2/controllers/provider.py @@ -0,0 +1,174 @@ +# Copyright 2018 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from octavia_lib.api.drivers import exceptions as lib_exceptions +from oslo_config import cfg +from oslo_log import log as logging +from pecan import expose as pecan_expose +from pecan import request as pecan_request +from wsme import types as wtypes +from wsmeext import pecan as wsme_pecan + +from octavia.api.drivers import driver_factory +from octavia.api.v2.controllers import base +from octavia.api.v2.types import provider as provider_types +from octavia.common import constants +from octavia.common import exceptions + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class ProviderController(base.BaseController): + RBAC_TYPE = constants.RBAC_PROVIDER + + def __init__(self): + super().__init__() + + @wsme_pecan.wsexpose(provider_types.ProvidersRootResponse, [wtypes.text], + ignore_extra_args=True) + def get_all(self, fields=None): + """List enabled provider drivers and their descriptions.""" + pcontext = pecan_request.context + context = pcontext.get('octavia_context') + + self._auth_validate_action(context, context.project_id, + constants.RBAC_GET_ALL) + + enabled_providers = CONF.api_settings.enabled_provider_drivers + response_list = [ + provider_types.ProviderResponse(name=key, description=value) for + key, value in enabled_providers.items()] + if fields is not None: + response_list = self._filter_fields(response_list, fields) + return provider_types.ProvidersRootResponse(providers=response_list) + + @pecan_expose() + def _lookup(self, provider, *remainder): + """Overridden pecan _lookup method for custom routing. + + Currently it checks if this was a flavor capabilities request and + routes the request to the FlavorCapabilitiesController. + """ + if provider and remainder: + if remainder[0] == 'flavor_capabilities': + return (FlavorCapabilitiesController(provider=provider), + remainder[1:]) + if remainder[0] == 'availability_zone_capabilities': + return ( + AvailabilityZoneCapabilitiesController(provider=provider), + remainder[1:]) + return None + + +class FlavorCapabilitiesController(base.BaseController): + RBAC_TYPE = constants.RBAC_PROVIDER_FLAVOR + + def __init__(self, provider): + super().__init__() + self.provider = provider + + @wsme_pecan.wsexpose(provider_types.FlavorCapabilitiesResponse, + [wtypes.text], ignore_extra_args=True, + status_code=200) + def get_all(self, fields=None): + context = pecan_request.context.get('octavia_context') + self._auth_validate_action(context, context.project_id, + constants.RBAC_GET_ALL) + self.driver = driver_factory.get_driver(self.provider) + try: + metadata_dict = self.driver.get_supported_flavor_metadata() + except lib_exceptions.NotImplementedError as e: + LOG.warning('Provider %s get_supported_flavor_metadata() ' + 'reported: %s', self.provider, e.operator_fault_string) + raise exceptions.ProviderNotImplementedError( + prov=self.provider, user_msg=e.user_fault_string) + + # Apply any valid filters provided as URL parameters + name_filter = None + description_filter = None + pagination_helper = pecan_request.context.get( + constants.PAGINATION_HELPER) + if pagination_helper: + name_filter = pagination_helper.params.get(constants.NAME) + description_filter = pagination_helper.params.get( + constants.DESCRIPTION) + if name_filter: + metadata_dict = { + key: value for key, value in metadata_dict.items() if + key == name_filter} + if description_filter: + metadata_dict = { + key: value for key, value in metadata_dict.items() if + value == description_filter} + + response_list = [ + provider_types.ProviderResponse(name=key, description=value) for + key, value in metadata_dict.items()] + if fields is not None: + response_list = self._filter_fields(response_list, fields) + return provider_types.FlavorCapabilitiesResponse( + flavor_capabilities=response_list) + + +class AvailabilityZoneCapabilitiesController(base.BaseController): + RBAC_TYPE = constants.RBAC_PROVIDER_AVAILABILITY_ZONE + + def __init__(self, provider): + super().__init__() + self.provider = provider + + @wsme_pecan.wsexpose(provider_types.AvailabilityZoneCapabilitiesResponse, + [wtypes.text], ignore_extra_args=True, + status_code=200) + def get_all(self, fields=None): + context = pecan_request.context.get('octavia_context') + self._auth_validate_action(context, context.project_id, + constants.RBAC_GET_ALL) + self.driver = driver_factory.get_driver(self.provider) + try: + metadata_dict = ( + self.driver.get_supported_availability_zone_metadata()) + except lib_exceptions.NotImplementedError as e: + LOG.warning( + 'Provider %s get_supported_availability_zone_metadata() ' + 'reported: %s', self.provider, e.operator_fault_string) + raise exceptions.ProviderNotImplementedError( + prov=self.provider, user_msg=e.user_fault_string) + + # Apply any valid filters provided as URL parameters + name_filter = None + description_filter = None + pagination_helper = pecan_request.context.get( + constants.PAGINATION_HELPER) + if pagination_helper: + name_filter = pagination_helper.params.get(constants.NAME) + description_filter = pagination_helper.params.get( + constants.DESCRIPTION) + if name_filter: + metadata_dict = { + key: value for key, value in metadata_dict.items() if + key == name_filter} + if description_filter: + metadata_dict = { + key: value for key, value in metadata_dict.items() if + value == description_filter} + + response_list = [ + provider_types.ProviderResponse(name=key, description=value) for + key, value in metadata_dict.items()] + if fields is not None: + response_list = self._filter_fields(response_list, fields) + return provider_types.AvailabilityZoneCapabilitiesResponse( + availability_zone_capabilities=response_list) diff --git a/octavia/api/v2/controllers/quotas.py b/octavia/api/v2/controllers/quotas.py new file mode 100644 index 0000000000..8aec9a4451 --- /dev/null +++ b/octavia/api/v2/controllers/quotas.py @@ -0,0 +1,123 @@ +# Copyright 2016 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from pecan import expose as pecan_expose +from pecan import request as pecan_request +from wsme import types as wtypes +from wsmeext import pecan as wsme_pecan + +from octavia.api.v2.controllers import base +from octavia.api.v2.types import quotas as quota_types +from octavia.common import constants +from octavia.common import exceptions + +CONF = cfg.CONF +CONF.import_group('quotas', 'octavia.common.config') + + +class QuotasController(base.BaseController): + RBAC_TYPE = constants.RBAC_QUOTA + + def __init__(self): + super().__init__() + + @wsme_pecan.wsexpose(quota_types.QuotaResponse, wtypes.text) + def get(self, project_id): + """Get a single project's quota details.""" + context = pecan_request.context.get('octavia_context') + + self._auth_validate_action(context, project_id, constants.RBAC_GET_ONE) + + db_quotas = self._get_db_quotas(context.session, project_id) + return self._convert_db_to_type(db_quotas, quota_types.QuotaResponse) + + @wsme_pecan.wsexpose(quota_types.QuotaAllResponse, + ignore_extra_args=True) + def get_all(self, project_id=None): + """List all non-default quotas.""" + pcontext = pecan_request.context + context = pcontext.get('octavia_context') + + query_filter = self._auth_get_all(context, project_id) + + db_quotas, links = self.repositories.quotas.get_all( + context.session, + pagination_helper=pcontext.get(constants.PAGINATION_HELPER), + **query_filter) + quotas = quota_types.QuotaAllResponse.from_data_model(db_quotas) + quotas.quotas_links = links + return quotas + + @wsme_pecan.wsexpose(quota_types.QuotaResponse, wtypes.text, + body=quota_types.QuotaPUT, status_code=202) + def put(self, project_id, quotas): + """Update any or all quotas for a project.""" + context = pecan_request.context.get('octavia_context') + + if not project_id: + raise exceptions.MissingAPIProjectID() + + self._auth_validate_action(context, project_id, constants.RBAC_PUT) + + quotas_dict = quotas.to_dict() + with context.session.begin(): + self.repositories.quotas.update(context.session, project_id, + **quotas_dict) + db_quotas = self._get_db_quotas(context.session, project_id) + return self._convert_db_to_type(db_quotas, quota_types.QuotaResponse) + + @wsme_pecan.wsexpose(None, wtypes.text, status_code=202) + def delete(self, project_id): + """Reset a project's quotas to the default values.""" + context = pecan_request.context.get('octavia_context') + + if not project_id: + raise exceptions.MissingAPIProjectID() + + self._auth_validate_action(context, project_id, constants.RBAC_DELETE) + + with context.session.begin(): + self.repositories.quotas.delete(context.session, project_id) + db_quotas = self._get_db_quotas(context.session, project_id) + return self._convert_db_to_type(db_quotas, quota_types.QuotaResponse) + + @pecan_expose() + def _lookup(self, project_id, *remainder): + """Overridden pecan _lookup method for routing default endpoint.""" + if project_id and remainder and remainder[0] == 'default': + return QuotasDefaultController(project_id), '' + return None + + +class QuotasDefaultController(base.BaseController): + RBAC_TYPE = constants.RBAC_QUOTA + + def __init__(self, project_id): + super().__init__() + self.project_id = project_id + + @wsme_pecan.wsexpose(quota_types.QuotaResponse, wtypes.text) + def get(self): + """Get a project's default quota details.""" + context = pecan_request.context.get('octavia_context') + + if not self.project_id: + raise exceptions.MissingAPIProjectID() + + self._auth_validate_action(context, self.project_id, + constants.RBAC_GET_DEFAULTS) + + quotas = self._get_default_quotas(self.project_id) + return self._convert_db_to_type(quotas, quota_types.QuotaResponse) diff --git a/octavia/api/v2/types/__init__.py b/octavia/api/v2/types/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/api/v2/types/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/api/v2/types/amphora.py b/octavia/api/v2/types/amphora.py new file mode 100644 index 0000000000..140e442105 --- /dev/null +++ b/octavia/api/v2/types/amphora.py @@ -0,0 +1,78 @@ +# Copyright 2016 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from wsme import types as wtypes + +from octavia.api.common import types + + +class BaseAmphoraType(types.BaseType): + _type_to_model_map = {'loadbalancer_id': 'load_balancer_id'} + _child_map = {} + + +class AmphoraResponse(BaseAmphoraType): + """Defines which attributes are to be shown on any response.""" + id = wtypes.wsattr(wtypes.UuidType()) + loadbalancer_id = wtypes.wsattr(wtypes.UuidType()) + compute_id = wtypes.wsattr(wtypes.UuidType()) + lb_network_ip = wtypes.wsattr(types.IPAddressType()) + vrrp_ip = wtypes.wsattr(types.IPAddressType()) + ha_ip = wtypes.wsattr(types.IPAddressType()) + vrrp_port_id = wtypes.wsattr(wtypes.UuidType()) + ha_port_id = wtypes.wsattr(wtypes.UuidType()) + cert_expiration = wtypes.wsattr(wtypes.datetime.datetime) + cert_busy = wtypes.wsattr(bool) + role = wtypes.wsattr(wtypes.StringType()) + status = wtypes.wsattr(wtypes.StringType()) + vrrp_interface = wtypes.wsattr(wtypes.StringType()) + vrrp_id = wtypes.wsattr(wtypes.IntegerType()) + vrrp_priority = wtypes.wsattr(wtypes.IntegerType()) + cached_zone = wtypes.wsattr(wtypes.StringType()) + created_at = wtypes.wsattr(wtypes.datetime.datetime) + updated_at = wtypes.wsattr(wtypes.datetime.datetime) + image_id = wtypes.wsattr(wtypes.UuidType()) + compute_flavor = wtypes.wsattr(wtypes.StringType()) + + @classmethod + def from_data_model(cls, data_model, children=False): + amphorae = super().from_data_model( + data_model, children=children) + + return amphorae + + +class AmphoraRootResponse(types.BaseType): + amphora = wtypes.wsattr(AmphoraResponse) + + +class AmphoraeRootResponse(types.BaseType): + amphorae = wtypes.wsattr([AmphoraResponse]) + amphorae_links = wtypes.wsattr([types.PageType]) + + +class AmphoraStatisticsResponse(BaseAmphoraType): + """Defines which attributes are to show on stats response.""" + active_connections = wtypes.wsattr(wtypes.IntegerType()) + bytes_in = wtypes.wsattr(wtypes.IntegerType()) + bytes_out = wtypes.wsattr(wtypes.IntegerType()) + id = wtypes.wsattr(wtypes.UuidType()) + listener_id = wtypes.wsattr(wtypes.UuidType()) + loadbalancer_id = wtypes.wsattr(wtypes.UuidType()) + request_errors = wtypes.wsattr(wtypes.IntegerType()) + total_connections = wtypes.wsattr(wtypes.IntegerType()) + + +class StatisticsRootResponse(types.BaseType): + amphora_stats = wtypes.wsattr([AmphoraStatisticsResponse]) diff --git a/octavia/api/v2/types/availability_zone_profile.py b/octavia/api/v2/types/availability_zone_profile.py new file mode 100644 index 0000000000..c7a3dc2ad2 --- /dev/null +++ b/octavia/api/v2/types/availability_zone_profile.py @@ -0,0 +1,70 @@ +# Copyright 2019 Verizon Media +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from wsme import types as wtypes + +from octavia.api.common import types + + +class BaseAvailabilityZoneProfileType(types.BaseType): + _type_to_model_map = {} + _child_map = {} + + +class AvailabilityZoneProfileResponse(BaseAvailabilityZoneProfileType): + """Defines which attributes are to be shown on any response.""" + id = wtypes.wsattr(wtypes.UuidType()) + name = wtypes.wsattr(wtypes.StringType()) + provider_name = wtypes.wsattr(wtypes.StringType()) + availability_zone_data = wtypes.wsattr(wtypes.StringType()) + + @classmethod + def from_data_model(cls, data_model, children=False): + availability_zone_profile = super().from_data_model( + data_model, children=children) + return availability_zone_profile + + +class AvailabilityZoneProfileRootResponse(types.BaseType): + availability_zone_profile = wtypes.wsattr(AvailabilityZoneProfileResponse) + + +class AvailabilityZoneProfilesRootResponse(types.BaseType): + availability_zone_profiles = wtypes.wsattr( + [AvailabilityZoneProfileResponse]) + availability_zone_profile_links = wtypes.wsattr([types.PageType]) + + +class AvailabilityZoneProfilePOST(BaseAvailabilityZoneProfileType): + """Defines mandatory and optional attributes of a POST request.""" + name = wtypes.wsattr(wtypes.StringType(max_length=255), mandatory=True) + provider_name = wtypes.wsattr(wtypes.StringType(max_length=255), + mandatory=True) + availability_zone_data = wtypes.wsattr(wtypes.StringType(max_length=4096), + mandatory=True) + + +class AvailabilityZoneProfileRootPOST(types.BaseType): + availability_zone_profile = wtypes.wsattr(AvailabilityZoneProfilePOST) + + +class AvailabilityZoneProfilePUT(BaseAvailabilityZoneProfileType): + """Defines the attributes of a PUT request.""" + name = wtypes.wsattr(wtypes.StringType(max_length=255)) + provider_name = wtypes.wsattr(wtypes.StringType(max_length=255)) + availability_zone_data = wtypes.wsattr(wtypes.StringType(max_length=4096)) + + +class AvailabilityZoneProfileRootPUT(types.BaseType): + availability_zone_profile = wtypes.wsattr(AvailabilityZoneProfilePUT) diff --git a/octavia/api/v2/types/availability_zones.py b/octavia/api/v2/types/availability_zones.py new file mode 100644 index 0000000000..61e429355c --- /dev/null +++ b/octavia/api/v2/types/availability_zones.py @@ -0,0 +1,68 @@ +# Copyright 2019 Verizon Media +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from wsme import types as wtypes + +from octavia.api.common import types + + +class BaseAvailabilityZoneType(types.BaseType): + _type_to_model_map = {} + _child_map = {} + + +class AvailabilityZoneResponse(BaseAvailabilityZoneType): + """Defines which attributes are to be shown on any response.""" + name = wtypes.wsattr(wtypes.StringType()) + description = wtypes.wsattr(wtypes.StringType()) + enabled = wtypes.wsattr(bool) + availability_zone_profile_id = wtypes.wsattr(wtypes.StringType()) + + @classmethod + def from_data_model(cls, data_model, children=False): + availability_zone = super().from_data_model( + data_model, children=children) + return availability_zone + + +class AvailabilityZoneRootResponse(types.BaseType): + availability_zone = wtypes.wsattr(AvailabilityZoneResponse) + + +class AvailabilityZonesRootResponse(types.BaseType): + availability_zones = wtypes.wsattr([AvailabilityZoneResponse]) + availability_zones_links = wtypes.wsattr([types.PageType]) + + +class AvailabilityZonePOST(BaseAvailabilityZoneType): + """Defines mandatory and optional attributes of a POST request.""" + name = wtypes.wsattr(wtypes.StringType(max_length=255), mandatory=True) + description = wtypes.wsattr(wtypes.StringType(max_length=255)) + enabled = wtypes.wsattr(bool, default=True) + availability_zone_profile_id = wtypes.wsattr(wtypes.UuidType(), + mandatory=True) + + +class AvailabilityZoneRootPOST(types.BaseType): + availability_zone = wtypes.wsattr(AvailabilityZonePOST) + + +class AvailabilityZonePUT(BaseAvailabilityZoneType): + """Defines the attributes of a PUT request.""" + description = wtypes.wsattr(wtypes.StringType(max_length=255)) + enabled = wtypes.wsattr(bool) + + +class AvailabilityZoneRootPUT(types.BaseType): + availability_zone = wtypes.wsattr(AvailabilityZonePUT) diff --git a/octavia/api/v2/types/flavor_profile.py b/octavia/api/v2/types/flavor_profile.py new file mode 100644 index 0000000000..99a682449c --- /dev/null +++ b/octavia/api/v2/types/flavor_profile.py @@ -0,0 +1,69 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from wsme import types as wtypes + +from octavia.api.common import types + + +class BaseFlavorProfileType(types.BaseType): + _type_to_model_map = {} + _child_map = {} + + +class FlavorProfileResponse(BaseFlavorProfileType): + """Defines which attributes are to be shown on any response.""" + id = wtypes.wsattr(wtypes.UuidType()) + name = wtypes.wsattr(wtypes.StringType()) + provider_name = wtypes.wsattr(wtypes.StringType()) + flavor_data = wtypes.wsattr(wtypes.StringType()) + + @classmethod + def from_data_model(cls, data_model, children=False): + flavorprofile = super().from_data_model( + data_model, children=children) + return flavorprofile + + +class FlavorProfileRootResponse(types.BaseType): + flavorprofile = wtypes.wsattr(FlavorProfileResponse) + + +class FlavorProfilesRootResponse(types.BaseType): + flavorprofiles = wtypes.wsattr([FlavorProfileResponse]) + flavorprofile_links = wtypes.wsattr([types.PageType]) + + +class FlavorProfilePOST(BaseFlavorProfileType): + """Defines mandatory and optional attributes of a POST request.""" + name = wtypes.wsattr(wtypes.StringType(max_length=255), mandatory=True) + provider_name = wtypes.wsattr(wtypes.StringType(max_length=255), + mandatory=True) + flavor_data = wtypes.wsattr(wtypes.StringType(max_length=4096), + mandatory=True) + + +class FlavorProfileRootPOST(types.BaseType): + flavorprofile = wtypes.wsattr(FlavorProfilePOST) + + +class FlavorProfilePUT(BaseFlavorProfileType): + """Defines the attributes of a PUT request.""" + name = wtypes.wsattr(wtypes.StringType(max_length=255)) + provider_name = wtypes.wsattr(wtypes.StringType(max_length=255)) + flavor_data = wtypes.wsattr(wtypes.StringType(max_length=4096)) + + +class FlavorProfileRootPUT(types.BaseType): + flavorprofile = wtypes.wsattr(FlavorProfilePUT) diff --git a/octavia/api/v2/types/flavors.py b/octavia/api/v2/types/flavors.py new file mode 100644 index 0000000000..d999c8271e --- /dev/null +++ b/octavia/api/v2/types/flavors.py @@ -0,0 +1,69 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from wsme import types as wtypes + +from octavia.api.common import types + + +class BaseFlavorType(types.BaseType): + _type_to_model_map = {} + _child_map = {} + + +class FlavorResponse(BaseFlavorType): + """Defines which attributes are to be shown on any response.""" + id = wtypes.wsattr(wtypes.UuidType()) + name = wtypes.wsattr(wtypes.StringType()) + description = wtypes.wsattr(wtypes.StringType()) + enabled = wtypes.wsattr(bool) + flavor_profile_id = wtypes.wsattr(wtypes.StringType()) + + @classmethod + def from_data_model(cls, data_model, children=False): + flavor = super().from_data_model( + data_model, children=children) + return flavor + + +class FlavorRootResponse(types.BaseType): + flavor = wtypes.wsattr(FlavorResponse) + + +class FlavorsRootResponse(types.BaseType): + flavors = wtypes.wsattr([FlavorResponse]) + flavors_links = wtypes.wsattr([types.PageType]) + + +class FlavorPOST(BaseFlavorType): + """Defines mandatory and optional attributes of a POST request.""" + name = wtypes.wsattr(wtypes.StringType(max_length=255), mandatory=True) + description = wtypes.wsattr(wtypes.StringType(max_length=255)) + enabled = wtypes.wsattr(bool, default=True) + flavor_profile_id = wtypes.wsattr(wtypes.UuidType(), mandatory=True) + + +class FlavorRootPOST(types.BaseType): + flavor = wtypes.wsattr(FlavorPOST) + + +class FlavorPUT(BaseFlavorType): + """Defines the attributes of a PUT request.""" + name = wtypes.wsattr(wtypes.StringType(max_length=255)) + description = wtypes.wsattr(wtypes.StringType(max_length=255)) + enabled = wtypes.wsattr(bool) + + +class FlavorRootPUT(types.BaseType): + flavor = wtypes.wsattr(FlavorPUT) diff --git a/octavia/api/v2/types/health_monitor.py b/octavia/api/v2/types/health_monitor.py new file mode 100644 index 0000000000..76723ff6f1 --- /dev/null +++ b/octavia/api/v2/types/health_monitor.py @@ -0,0 +1,182 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from wsme import types as wtypes + +from octavia.api.common import types +from octavia.common import constants + + +class BaseHealthMonitorType(types.BaseType): + _type_to_model_map = {'admin_state_up': 'enabled', + 'max_retries': 'rise_threshold', + 'max_retries_down': 'fall_threshold'} + _child_map = {} + + +class HealthMonitorResponse(BaseHealthMonitorType): + """Defines which attributes are to be shown on any response.""" + id = wtypes.wsattr(wtypes.UuidType()) + name = wtypes.wsattr(wtypes.StringType()) + type = wtypes.wsattr(wtypes.text) + delay = wtypes.wsattr(wtypes.IntegerType()) + timeout = wtypes.wsattr(wtypes.IntegerType()) + max_retries = wtypes.wsattr(wtypes.IntegerType()) + max_retries_down = wtypes.wsattr(wtypes.IntegerType()) + http_method = wtypes.wsattr(wtypes.text) + url_path = wtypes.wsattr(wtypes.text) + expected_codes = wtypes.wsattr(wtypes.text) + admin_state_up = wtypes.wsattr(bool) + project_id = wtypes.wsattr(wtypes.StringType()) + pools = wtypes.wsattr([types.IdOnlyType]) + provisioning_status = wtypes.wsattr(wtypes.StringType()) + operating_status = wtypes.wsattr(wtypes.StringType()) + created_at = wtypes.wsattr(wtypes.datetime.datetime) + updated_at = wtypes.wsattr(wtypes.datetime.datetime) + tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType())) + http_version = wtypes.wsattr(float) + domain_name = wtypes.wsattr(wtypes.StringType()) + + @classmethod + def from_data_model(cls, data_model, children=False): + healthmonitor = super().from_data_model( + data_model, children=children) + + if cls._full_response(): + del healthmonitor.pools + else: + healthmonitor.pools = [ + types.IdOnlyType.from_data_model(data_model.pool)] + return healthmonitor + + +class HealthMonitorFullResponse(HealthMonitorResponse): + @classmethod + def _full_response(cls): + return True + + +class HealthMonitorRootResponse(types.BaseType): + healthmonitor = wtypes.wsattr(HealthMonitorResponse) + + +class HealthMonitorsRootResponse(types.BaseType): + healthmonitors = wtypes.wsattr([HealthMonitorResponse]) + healthmonitors_links = wtypes.wsattr([types.PageType]) + + +class HealthMonitorPOST(BaseHealthMonitorType): + """Defines mandatory and optional attributes of a POST request.""" + name = wtypes.wsattr(wtypes.StringType(max_length=255)) + type = wtypes.wsattr( + wtypes.Enum(str, *constants.SUPPORTED_HEALTH_MONITOR_TYPES), + mandatory=True) + delay = wtypes.wsattr(wtypes.IntegerType(minimum=0), mandatory=True) + timeout = wtypes.wsattr(wtypes.IntegerType(minimum=0), mandatory=True) + max_retries_down = wtypes.wsattr( + wtypes.IntegerType(minimum=constants.MIN_HM_RETRIES, + maximum=constants.MAX_HM_RETRIES), + default=constants.DEFAULT_MAX_RETRIES_DOWN) + max_retries = wtypes.wsattr( + wtypes.IntegerType(minimum=constants.MIN_HM_RETRIES, + maximum=constants.MAX_HM_RETRIES), + mandatory=True) + http_method = wtypes.wsattr( + wtypes.Enum(str, *constants.SUPPORTED_HEALTH_MONITOR_HTTP_METHODS)) + url_path = wtypes.wsattr( + types.URLPathType()) + expected_codes = wtypes.wsattr( + wtypes.StringType(pattern=r'^(\d{3}(\s*,\s*\d{3})*)$|^(\d{3}-\d{3})$')) + admin_state_up = wtypes.wsattr(bool, default=True) + # TODO(johnsom) Remove after deprecation (R series) + project_id = wtypes.wsattr(wtypes.StringType(max_length=36)) + pool_id = wtypes.wsattr(wtypes.UuidType(), mandatory=True) + tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) + http_version = wtypes.wsattr( + wtypes.Enum(float, *constants.SUPPORTED_HTTP_VERSIONS)) + domain_name = wtypes.wsattr( + wtypes.StringType(min_length=1, max_length=255, + pattern=constants.DOMAIN_NAME_REGEX)) + + +class HealthMonitorRootPOST(types.BaseType): + healthmonitor = wtypes.wsattr(HealthMonitorPOST) + + +class HealthMonitorPUT(BaseHealthMonitorType): + """Defines attributes that are acceptable of a PUT request.""" + name = wtypes.wsattr(wtypes.StringType(max_length=255)) + delay = wtypes.wsattr(wtypes.IntegerType(minimum=0)) + timeout = wtypes.wsattr(wtypes.IntegerType(minimum=0)) + max_retries_down = wtypes.wsattr( + wtypes.IntegerType(minimum=constants.MIN_HM_RETRIES, + maximum=constants.MAX_HM_RETRIES)) + max_retries = wtypes.wsattr( + wtypes.IntegerType(minimum=constants.MIN_HM_RETRIES, + maximum=constants.MAX_HM_RETRIES)) + http_method = wtypes.wsattr( + wtypes.Enum(str, *constants.SUPPORTED_HEALTH_MONITOR_HTTP_METHODS)) + url_path = wtypes.wsattr(types.URLPathType()) + expected_codes = wtypes.wsattr( + wtypes.StringType(pattern=r'^(\d{3}(\s*,\s*\d{3})*)$|^(\d{3}-\d{3})$')) + admin_state_up = wtypes.wsattr(bool) + tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) + http_version = wtypes.wsattr( + wtypes.Enum(float, *constants.SUPPORTED_HTTP_VERSIONS)) + domain_name = wtypes.wsattr( + wtypes.StringType(min_length=1, max_length=255, + pattern=constants.DOMAIN_NAME_REGEX)) + + +class HealthMonitorRootPUT(types.BaseType): + healthmonitor = wtypes.wsattr(HealthMonitorPUT) + + +class HealthMonitorSingleCreate(BaseHealthMonitorType): + """Defines mandatory and optional attributes of a POST request.""" + name = wtypes.wsattr(wtypes.StringType(max_length=255)) + type = wtypes.wsattr( + wtypes.Enum(str, *constants.SUPPORTED_HEALTH_MONITOR_TYPES), + mandatory=True) + delay = wtypes.wsattr(wtypes.IntegerType(minimum=0), mandatory=True) + timeout = wtypes.wsattr(wtypes.IntegerType(minimum=0), mandatory=True) + max_retries_down = wtypes.wsattr( + wtypes.IntegerType(minimum=constants.MIN_HM_RETRIES, + maximum=constants.MAX_HM_RETRIES), + default=constants.DEFAULT_MAX_RETRIES_DOWN) + max_retries = wtypes.wsattr( + wtypes.IntegerType(minimum=constants.MIN_HM_RETRIES, + maximum=constants.MAX_HM_RETRIES), + mandatory=True) + http_method = wtypes.wsattr( + wtypes.Enum(str, *constants.SUPPORTED_HEALTH_MONITOR_HTTP_METHODS)) + url_path = wtypes.wsattr(types.URLPathType()) + expected_codes = wtypes.wsattr( + wtypes.StringType(pattern=r'^(\d{3}(\s*,\s*\d{3})*)$|^(\d{3}-\d{3})$')) + admin_state_up = wtypes.wsattr(bool, default=True) + tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) + http_version = wtypes.wsattr( + wtypes.Enum(float, *constants.SUPPORTED_HTTP_VERSIONS)) + domain_name = wtypes.wsattr( + wtypes.StringType(min_length=1, max_length=255, + pattern=constants.DOMAIN_NAME_REGEX)) + + +class HealthMonitorStatusResponse(BaseHealthMonitorType): + """Defines which attributes are to be shown on status response.""" + id = wtypes.wsattr(wtypes.UuidType()) + name = wtypes.wsattr(wtypes.StringType()) + type = wtypes.wsattr(wtypes.text) + provisioning_status = wtypes.wsattr(wtypes.StringType()) + operating_status = wtypes.wsattr(wtypes.StringType()) diff --git a/octavia/api/v2/types/l7policy.py b/octavia/api/v2/types/l7policy.py new file mode 100644 index 0000000000..f7e1be198a --- /dev/null +++ b/octavia/api/v2/types/l7policy.py @@ -0,0 +1,148 @@ +# Copyright 2016 Blue Box, an IBM Company +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from wsme import types as wtypes + +from octavia.api.common import types +from octavia.api.v2.types import l7rule +from octavia.api.v2.types import pool +from octavia.common import constants + + +class BaseL7PolicyType(types.BaseType): + _type_to_model_map = {'admin_state_up': 'enabled'} + _child_map = {} + + +class L7PolicyResponse(BaseL7PolicyType): + """Defines which attributes are to be shown on any response.""" + id = wtypes.wsattr(wtypes.UuidType()) + name = wtypes.wsattr(wtypes.StringType()) + description = wtypes.wsattr(wtypes.StringType()) + provisioning_status = wtypes.wsattr(wtypes.StringType()) + operating_status = wtypes.wsattr(wtypes.StringType()) + admin_state_up = wtypes.wsattr(bool) + project_id = wtypes.wsattr(wtypes.StringType()) + action = wtypes.wsattr(wtypes.StringType()) + listener_id = wtypes.wsattr(wtypes.UuidType()) + redirect_pool_id = wtypes.wsattr(wtypes.UuidType()) + redirect_url = wtypes.wsattr(wtypes.StringType()) + redirect_prefix = wtypes.wsattr(wtypes.StringType()) + position = wtypes.wsattr(wtypes.IntegerType()) + rules = wtypes.wsattr([types.IdOnlyType]) + created_at = wtypes.wsattr(wtypes.datetime.datetime) + updated_at = wtypes.wsattr(wtypes.datetime.datetime) + tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType())) + redirect_http_code = wtypes.wsattr(wtypes.IntegerType()) + + @classmethod + def from_data_model(cls, data_model, children=False): + policy = super().from_data_model( + data_model, children=children) + + if cls._full_response(): + rule_model = l7rule.L7RuleFullResponse + else: + rule_model = types.IdOnlyType + policy.rules = [ + rule_model.from_data_model(i) for i in data_model.l7rules] + return policy + + +class L7PolicyFullResponse(L7PolicyResponse): + @classmethod + def _full_response(cls): + return True + + rules = wtypes.wsattr([l7rule.L7RuleFullResponse]) + + +class L7PolicyRootResponse(types.BaseType): + l7policy = wtypes.wsattr(L7PolicyResponse) + + +class L7PoliciesRootResponse(types.BaseType): + l7policies = wtypes.wsattr([L7PolicyResponse]) + l7policies_links = wtypes.wsattr([types.PageType]) + + +class L7PolicyPOST(BaseL7PolicyType): + """Defines mandatory and optional attributes of a POST request.""" + name = wtypes.wsattr(wtypes.StringType(max_length=255)) + description = wtypes.wsattr(wtypes.StringType(max_length=255)) + admin_state_up = wtypes.wsattr(bool, default=True) + # TODO(johnsom) Remove after deprecation (R series) + project_id = wtypes.wsattr(wtypes.StringType(max_length=36)) + action = wtypes.wsattr( + wtypes.Enum(str, *constants.SUPPORTED_L7POLICY_ACTIONS), + mandatory=True) + redirect_pool_id = wtypes.wsattr(wtypes.UuidType()) + redirect_url = wtypes.wsattr(types.URLType()) + redirect_prefix = wtypes.wsattr(types.URLType()) + position = wtypes.wsattr(wtypes.IntegerType( + minimum=constants.MIN_POLICY_POSITION, + maximum=constants.MAX_POLICY_POSITION), + default=constants.MAX_POLICY_POSITION) + listener_id = wtypes.wsattr(wtypes.UuidType(), mandatory=True) + rules = wtypes.wsattr([l7rule.L7RuleSingleCreate]) + tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) + redirect_http_code = wtypes.wsattr( + wtypes.Enum(int, *constants.SUPPORTED_L7POLICY_REDIRECT_HTTP_CODES)) + + +class L7PolicyRootPOST(types.BaseType): + l7policy = wtypes.wsattr(L7PolicyPOST) + + +class L7PolicyPUT(BaseL7PolicyType): + """Defines attributes that are acceptable of a PUT request.""" + name = wtypes.wsattr(wtypes.StringType(max_length=255)) + description = wtypes.wsattr(wtypes.StringType(max_length=255)) + admin_state_up = wtypes.wsattr(bool) + action = wtypes.wsattr( + wtypes.Enum(str, *constants.SUPPORTED_L7POLICY_ACTIONS)) + redirect_pool_id = wtypes.wsattr(wtypes.UuidType()) + redirect_url = wtypes.wsattr(types.URLType()) + redirect_prefix = wtypes.wsattr(types.URLType()) + position = wtypes.wsattr(wtypes.IntegerType( + minimum=constants.MIN_POLICY_POSITION, + maximum=constants.MAX_POLICY_POSITION)) + tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) + redirect_http_code = wtypes.wsattr( + wtypes.Enum(int, *constants.SUPPORTED_L7POLICY_REDIRECT_HTTP_CODES)) + + +class L7PolicyRootPUT(types.BaseType): + l7policy = wtypes.wsattr(L7PolicyPUT) + + +class L7PolicySingleCreate(BaseL7PolicyType): + """Defines mandatory and optional attributes of a POST request.""" + name = wtypes.wsattr(wtypes.StringType(max_length=255)) + description = wtypes.wsattr(wtypes.StringType(max_length=255)) + admin_state_up = wtypes.wsattr(bool, default=True) + action = wtypes.wsattr( + wtypes.Enum(str, *constants.SUPPORTED_L7POLICY_ACTIONS), + mandatory=True) + redirect_pool = wtypes.wsattr(pool.PoolSingleCreate) + redirect_url = wtypes.wsattr(types.URLType()) + redirect_prefix = wtypes.wsattr(types.URLType()) + position = wtypes.wsattr(wtypes.IntegerType( + minimum=constants.MIN_POLICY_POSITION, + maximum=constants.MAX_POLICY_POSITION), + default=constants.MAX_POLICY_POSITION) + rules = wtypes.wsattr([l7rule.L7RuleSingleCreate]) + tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) + redirect_http_code = wtypes.wsattr( + wtypes.Enum(int, *constants.SUPPORTED_L7POLICY_REDIRECT_HTTP_CODES)) diff --git a/octavia/api/v2/types/l7rule.py b/octavia/api/v2/types/l7rule.py new file mode 100644 index 0000000000..11c2015fc8 --- /dev/null +++ b/octavia/api/v2/types/l7rule.py @@ -0,0 +1,121 @@ +# Copyright 2016 Blue Box, an IBM Company +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from wsme import types as wtypes + +from octavia.api.common import types +from octavia.common import constants + + +class BaseL7Type(types.BaseType): + _type_to_model_map = {'admin_state_up': 'enabled'} + _child_map = {} + + +class L7RuleResponse(BaseL7Type): + """Defines which attributes are to be shown on any response.""" + id = wtypes.wsattr(wtypes.UuidType()) + type = wtypes.wsattr(wtypes.StringType()) + compare_type = wtypes.wsattr(wtypes.StringType()) + key = wtypes.wsattr(wtypes.StringType()) + value = wtypes.wsattr(wtypes.StringType()) + invert = wtypes.wsattr(bool) + provisioning_status = wtypes.wsattr(wtypes.StringType()) + operating_status = wtypes.wsattr(wtypes.StringType()) + created_at = wtypes.wsattr(wtypes.datetime.datetime) + updated_at = wtypes.wsattr(wtypes.datetime.datetime) + project_id = wtypes.wsattr(wtypes.StringType()) + admin_state_up = wtypes.wsattr(bool) + tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType())) + + @classmethod + def from_data_model(cls, data_model, children=False): + rule = super().from_data_model( + data_model, children=children) + return rule + + +class L7RuleFullResponse(L7RuleResponse): + @classmethod + def _full_response(cls): + return True + + +class L7RuleRootResponse(types.BaseType): + rule = wtypes.wsattr(L7RuleResponse) + + +class L7RulesRootResponse(types.BaseType): + rules = wtypes.wsattr([L7RuleResponse]) + rules_links = wtypes.wsattr([types.PageType]) + + +class L7RulePOST(BaseL7Type): + """Defines mandatory and optional attributes of a POST request.""" + type = wtypes.wsattr( + wtypes.Enum(str, *constants.SUPPORTED_L7RULE_TYPES), + mandatory=True) + compare_type = wtypes.wsattr( + wtypes.Enum(str, *constants.SUPPORTED_L7RULE_COMPARE_TYPES), + mandatory=True) + key = wtypes.wsattr(wtypes.StringType(max_length=255, + pattern=r'^[^\r\n]*$')) + value = wtypes.wsattr(wtypes.StringType(max_length=255, + pattern=r'^[^\r\n]*$'), + mandatory=True) + invert = wtypes.wsattr(bool, default=False) + admin_state_up = wtypes.wsattr(bool, default=True) + # TODO(johnsom) Remove after deprecation (R series) + project_id = wtypes.wsattr(wtypes.StringType(max_length=36)) + tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) + + +class L7RuleRootPOST(types.BaseType): + rule = wtypes.wsattr(L7RulePOST) + + +class L7RulePUT(BaseL7Type): + """Defines attributes that are acceptable of a PUT request.""" + type = wtypes.wsattr( + wtypes.Enum(str, + *constants.SUPPORTED_L7RULE_TYPES)) + compare_type = wtypes.wsattr( + wtypes.Enum(str, + *constants.SUPPORTED_L7RULE_COMPARE_TYPES)) + key = wtypes.wsattr(wtypes.StringType(max_length=255, + pattern=r'^[^\r\n]*$')) + value = wtypes.wsattr(wtypes.StringType(max_length=255, + pattern=r'^[^\r\n]*$')) + invert = wtypes.wsattr(bool) + admin_state_up = wtypes.wsattr(bool) + tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) + + +class L7RuleRootPUT(types.BaseType): + rule = wtypes.wsattr(L7RulePUT) + + +class L7RuleSingleCreate(BaseL7Type): + """Defines mandatory and optional attributes of a POST request.""" + type = wtypes.wsattr( + wtypes.Enum(str, *constants.SUPPORTED_L7RULE_TYPES), + mandatory=True) + compare_type = wtypes.wsattr( + wtypes.Enum(str, *constants.SUPPORTED_L7RULE_COMPARE_TYPES), + mandatory=True) + key = wtypes.wsattr(wtypes.StringType(max_length=255)) + value = wtypes.wsattr(wtypes.StringType(max_length=255), mandatory=True) + invert = wtypes.wsattr(bool, default=False) + admin_state_up = wtypes.wsattr(bool, default=True) + tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) diff --git a/octavia/api/v2/types/listener.py b/octavia/api/v2/types/listener.py new file mode 100644 index 0000000000..81451e48ea --- /dev/null +++ b/octavia/api/v2/types/listener.py @@ -0,0 +1,306 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from octavia_lib.common import constants as lib_constants +from wsme import types as wtypes + +from octavia.api.common import types +from octavia.api.v2.types import l7policy +from octavia.api.v2.types import pool +from octavia.common import constants + + +class BaseListenerType(types.BaseType): + _type_to_model_map = { + 'admin_state_up': 'enabled', + 'default_tls_container_ref': 'tls_certificate_id', + 'sni_container_refs': 'sni_containers', + 'client_ca_tls_container_ref': 'client_ca_tls_certificate_id', + 'client_crl_container_ref': 'client_crl_container_id'} + _child_map = {} + + +class ListenerResponse(BaseListenerType): + """Defines which attributes are to be shown on any response.""" + id = wtypes.wsattr(wtypes.UuidType()) + name = wtypes.wsattr(wtypes.StringType()) + description = wtypes.wsattr(wtypes.StringType()) + provisioning_status = wtypes.wsattr(wtypes.StringType()) + operating_status = wtypes.wsattr(wtypes.StringType()) + admin_state_up = wtypes.wsattr(bool) + protocol = wtypes.wsattr(wtypes.text) + protocol_port = wtypes.wsattr(wtypes.IntegerType()) + connection_limit = wtypes.wsattr(wtypes.IntegerType()) + default_tls_container_ref = wtypes.wsattr(wtypes.StringType()) + sni_container_refs = [wtypes.StringType()] + project_id = wtypes.wsattr(wtypes.StringType()) + default_pool_id = wtypes.wsattr(wtypes.UuidType()) + l7policies = wtypes.wsattr([types.IdOnlyType]) + insert_headers = wtypes.wsattr(wtypes.DictType(str, str)) + created_at = wtypes.wsattr(wtypes.datetime.datetime) + updated_at = wtypes.wsattr(wtypes.datetime.datetime) + loadbalancers = wtypes.wsattr([types.IdOnlyType]) + timeout_client_data = wtypes.wsattr(wtypes.IntegerType()) + timeout_member_connect = wtypes.wsattr(wtypes.IntegerType()) + timeout_member_data = wtypes.wsattr(wtypes.IntegerType()) + timeout_tcp_inspect = wtypes.wsattr(wtypes.IntegerType()) + tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType())) + client_ca_tls_container_ref = wtypes.StringType() + client_authentication = wtypes.wsattr(wtypes.StringType()) + client_crl_container_ref = wtypes.wsattr(wtypes.StringType()) + allowed_cidrs = wtypes.wsattr([types.CidrType()]) + tls_ciphers = wtypes.StringType() + tls_versions = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType())) + alpn_protocols = wtypes.wsattr(wtypes.ArrayType(types.AlpnProtocolType())) + hsts_max_age = wtypes.wsattr(wtypes.IntegerType()) + hsts_include_subdomains = wtypes.wsattr(bool) + hsts_preload = wtypes.wsattr(bool) + + @classmethod + def from_data_model(cls, data_model, children=False): + listener = super().from_data_model( + data_model, children=children) + + listener.sni_container_refs = [ + sni_c.tls_container_id for sni_c in data_model.sni_containers] + listener.allowed_cidrs = [ + c.cidr for c in data_model.allowed_cidrs] or None + if cls._full_response(): + del listener.loadbalancers + l7policy_type = l7policy.L7PolicyFullResponse + else: + listener.loadbalancers = [ + types.IdOnlyType.from_data_model(data_model.load_balancer)] + l7policy_type = types.IdOnlyType + + listener.l7policies = [ + l7policy_type.from_data_model(i) for i in data_model.l7policies] + + listener.tls_versions = data_model.tls_versions + listener.alpn_protocols = data_model.alpn_protocols + listener.hsts_max_age = data_model.hsts_max_age + listener.hsts_include_subdomains = data_model.hsts_include_subdomains + listener.hsts_preload = data_model.hsts_preload + + return listener + + +class ListenerFullResponse(ListenerResponse): + @classmethod + def _full_response(cls): + return True + + l7policies = wtypes.wsattr([l7policy.L7PolicyFullResponse]) + + +class ListenerRootResponse(types.BaseType): + listener = wtypes.wsattr(ListenerResponse) + + +class ListenersRootResponse(types.BaseType): + listeners = wtypes.wsattr([ListenerResponse]) + listeners_links = wtypes.wsattr([types.PageType]) + + +class ListenerPOST(BaseListenerType): + """Defines mandatory and optional attributes of a POST request.""" + name = wtypes.wsattr(wtypes.StringType(max_length=255)) + description = wtypes.wsattr(wtypes.StringType(max_length=255)) + admin_state_up = wtypes.wsattr(bool, default=True) + protocol = wtypes.wsattr(wtypes.Enum(str, + *lib_constants.LISTENER_SUPPORTED_PROTOCOLS), + mandatory=True) + protocol_port = wtypes.wsattr( + wtypes.IntegerType(minimum=constants.MIN_PORT_NUMBER, + maximum=constants.MAX_PORT_NUMBER), mandatory=True) + connection_limit = wtypes.wsattr( + wtypes.IntegerType(minimum=constants.MIN_CONNECTION_LIMIT), + default=constants.DEFAULT_CONNECTION_LIMIT) + default_tls_container_ref = wtypes.wsattr( + wtypes.StringType(max_length=255)) + sni_container_refs = [wtypes.StringType(max_length=255)] + # TODO(johnsom) Remove after deprecation (R series) + project_id = wtypes.wsattr(wtypes.StringType(max_length=36)) + default_pool_id = wtypes.wsattr(wtypes.UuidType()) + default_pool = wtypes.wsattr(pool.PoolSingleCreate) + l7policies = wtypes.wsattr([l7policy.L7PolicySingleCreate], default=[]) + insert_headers = wtypes.wsattr( + wtypes.DictType(str, wtypes.StringType(max_length=255))) + loadbalancer_id = wtypes.wsattr(wtypes.UuidType(), mandatory=True) + timeout_client_data = wtypes.wsattr( + wtypes.IntegerType(minimum=constants.MIN_TIMEOUT, + maximum=constants.MAX_TIMEOUT)) + timeout_member_connect = wtypes.wsattr( + wtypes.IntegerType(minimum=constants.MIN_TIMEOUT, + maximum=constants.MAX_TIMEOUT)) + timeout_member_data = wtypes.wsattr( + wtypes.IntegerType(minimum=constants.MIN_TIMEOUT, + maximum=constants.MAX_TIMEOUT)) + timeout_tcp_inspect = wtypes.wsattr( + wtypes.IntegerType(minimum=constants.MIN_TIMEOUT, + maximum=constants.MAX_TIMEOUT)) + tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) + client_ca_tls_container_ref = wtypes.StringType(max_length=255) + client_authentication = wtypes.wsattr( + wtypes.Enum(str, *constants.SUPPORTED_CLIENT_AUTH_MODES), + default=constants.CLIENT_AUTH_NONE) + client_crl_container_ref = wtypes.StringType(max_length=255) + allowed_cidrs = wtypes.wsattr([types.CidrType()]) + tls_ciphers = wtypes.StringType(max_length=2048) + tls_versions = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType( + max_length=32))) + alpn_protocols = wtypes.wsattr(wtypes.ArrayType(types.AlpnProtocolType())) + hsts_max_age = wtypes.wsattr(wtypes.IntegerType(minimum=0)) + hsts_include_subdomains = wtypes.wsattr(bool, default=False) + hsts_preload = wtypes.wsattr(bool, default=False) + + +class ListenerRootPOST(types.BaseType): + listener = wtypes.wsattr(ListenerPOST) + + +class ListenerPUT(BaseListenerType): + """Defines attributes that are acceptable of a PUT request.""" + name = wtypes.wsattr(wtypes.StringType(max_length=255)) + description = wtypes.wsattr(wtypes.StringType(max_length=255)) + admin_state_up = wtypes.wsattr(bool) + connection_limit = wtypes.wsattr( + wtypes.IntegerType(minimum=constants.MIN_CONNECTION_LIMIT)) + default_tls_container_ref = wtypes.wsattr( + wtypes.StringType(max_length=255)) + sni_container_refs = [wtypes.StringType(max_length=255)] + default_pool_id = wtypes.wsattr(wtypes.UuidType()) + insert_headers = wtypes.wsattr( + wtypes.DictType(str, wtypes.StringType(max_length=255))) + timeout_client_data = wtypes.wsattr( + wtypes.IntegerType(minimum=constants.MIN_TIMEOUT, + maximum=constants.MAX_TIMEOUT)) + timeout_member_connect = wtypes.wsattr( + wtypes.IntegerType(minimum=constants.MIN_TIMEOUT, + maximum=constants.MAX_TIMEOUT)) + timeout_member_data = wtypes.wsattr( + wtypes.IntegerType(minimum=constants.MIN_TIMEOUT, + maximum=constants.MAX_TIMEOUT)) + timeout_tcp_inspect = wtypes.wsattr( + wtypes.IntegerType(minimum=constants.MIN_TIMEOUT, + maximum=constants.MAX_TIMEOUT)) + tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) + client_ca_tls_container_ref = wtypes.StringType(max_length=255) + client_authentication = wtypes.wsattr( + wtypes.Enum(str, *constants.SUPPORTED_CLIENT_AUTH_MODES)) + client_crl_container_ref = wtypes.StringType(max_length=255) + allowed_cidrs = wtypes.wsattr([types.CidrType()]) + tls_ciphers = wtypes.StringType(max_length=2048) + tls_versions = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType( + max_length=32))) + alpn_protocols = wtypes.wsattr(wtypes.ArrayType(types.AlpnProtocolType())) + hsts_max_age = wtypes.wsattr(wtypes.IntegerType(minimum=0)) + hsts_include_subdomains = wtypes.wsattr(bool) + hsts_preload = wtypes.wsattr(bool) + + +class ListenerRootPUT(types.BaseType): + listener = wtypes.wsattr(ListenerPUT) + + +class ListenerSingleCreate(BaseListenerType): + """Defines mandatory and optional attributes of a POST request.""" + name = wtypes.wsattr(wtypes.StringType(max_length=255)) + description = wtypes.wsattr(wtypes.StringType(max_length=255)) + admin_state_up = wtypes.wsattr(bool, default=True) + protocol = wtypes.wsattr(wtypes.Enum(str, + *lib_constants.LISTENER_SUPPORTED_PROTOCOLS), + mandatory=True) + protocol_port = wtypes.wsattr( + wtypes.IntegerType(minimum=constants.MIN_PORT_NUMBER, + maximum=constants.MAX_PORT_NUMBER), mandatory=True) + connection_limit = wtypes.wsattr( + wtypes.IntegerType(minimum=constants.MIN_CONNECTION_LIMIT), + default=constants.DEFAULT_CONNECTION_LIMIT) + default_tls_container_ref = wtypes.wsattr( + wtypes.StringType(max_length=255)) + sni_container_refs = [wtypes.StringType(max_length=255)] + default_pool_id = wtypes.wsattr(wtypes.UuidType()) + default_pool = wtypes.wsattr(pool.PoolSingleCreate) + l7policies = wtypes.wsattr([l7policy.L7PolicySingleCreate], default=[]) + insert_headers = wtypes.wsattr( + wtypes.DictType(str, wtypes.StringType(max_length=255))) + timeout_client_data = wtypes.wsattr( + wtypes.IntegerType(minimum=constants.MIN_TIMEOUT, + maximum=constants.MAX_TIMEOUT)) + timeout_member_connect = wtypes.wsattr( + wtypes.IntegerType(minimum=constants.MIN_TIMEOUT, + maximum=constants.MAX_TIMEOUT)) + timeout_member_data = wtypes.wsattr( + wtypes.IntegerType(minimum=constants.MIN_TIMEOUT, + maximum=constants.MAX_TIMEOUT)) + timeout_tcp_inspect = wtypes.wsattr( + wtypes.IntegerType(minimum=constants.MIN_TIMEOUT, + maximum=constants.MAX_TIMEOUT)) + tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) + client_ca_tls_container_ref = wtypes.StringType(max_length=255) + client_authentication = wtypes.wsattr( + wtypes.Enum(str, *constants.SUPPORTED_CLIENT_AUTH_MODES), + default=constants.CLIENT_AUTH_NONE) + client_crl_container_ref = wtypes.StringType(max_length=255) + allowed_cidrs = wtypes.wsattr([types.CidrType()]) + tls_ciphers = wtypes.StringType(max_length=2048) + tls_versions = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType( + max_length=32))) + alpn_protocols = wtypes.wsattr(wtypes.ArrayType(types.AlpnProtocolType())) + hsts_max_age = wtypes.wsattr(wtypes.IntegerType()) + hsts_include_subdomains = wtypes.wsattr(bool, default=False) + hsts_preload = wtypes.wsattr(bool, default=False) + + +class ListenerStatusResponse(BaseListenerType): + """Defines which attributes are to be shown on status response.""" + id = wtypes.wsattr(wtypes.UuidType()) + name = wtypes.wsattr(wtypes.StringType()) + operating_status = wtypes.wsattr(wtypes.StringType()) + provisioning_status = wtypes.wsattr(wtypes.StringType()) + pools = wtypes.wsattr([pool.PoolStatusResponse]) + + @classmethod + def from_data_model(cls, data_model, children=False): + listener = super().from_data_model( + data_model, children=children) + + pool_model = pool.PoolStatusResponse + listener.pools = [ + pool_model.from_data_model(i) for i in data_model.pools] + + if not listener.name: + listener.name = "" + + return listener + + +class ListenerStatisticsResponse(BaseListenerType): + """Defines which attributes are to show on stats response.""" + bytes_in = wtypes.wsattr(wtypes.IntegerType()) + bytes_out = wtypes.wsattr(wtypes.IntegerType()) + active_connections = wtypes.wsattr(wtypes.IntegerType()) + total_connections = wtypes.wsattr(wtypes.IntegerType()) + request_errors = wtypes.wsattr(wtypes.IntegerType()) + + @classmethod + def from_data_model(cls, data_model, children=False): + result = super().from_data_model( + data_model, children=children) + return result + + +class StatisticsRootResponse(types.BaseType): + stats = wtypes.wsattr(ListenerStatisticsResponse) diff --git a/octavia/api/v2/types/load_balancer.py b/octavia/api/v2/types/load_balancer.py new file mode 100644 index 0000000000..7aaef74628 --- /dev/null +++ b/octavia/api/v2/types/load_balancer.py @@ -0,0 +1,214 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from wsme import types as wtypes + +from octavia.api.common import types +from octavia.api.v2.types import listener +from octavia.api.v2.types import pool + + +class BaseLoadBalancerType(types.BaseType): + _type_to_model_map = {'vip_address': 'vip.ip_address', + 'vip_subnet_id': 'vip.subnet_id', + 'vip_port_id': 'vip.port_id', + 'vip_network_id': 'vip.network_id', + 'vip_qos_policy_id': 'vip.qos_policy_id', + 'vip_vnic_type': 'vip.vnic_type', + 'vip_sg_ids': 'vip.sg_ids', + 'admin_state_up': 'enabled'} + _child_map = {'vip': { + 'ip_address': 'vip_address', + 'subnet_id': 'vip_subnet_id', + 'port_id': 'vip_port_id', + 'network_id': 'vip_network_id', + 'qos_policy_id': 'vip_qos_policy_id', + 'vnic_type': 'vip_vnic_type', + 'sg_ids': 'vip_sg_ids'}} + + +class AdditionalVipsType(types.BaseType): + """Type for additional vips""" + subnet_id = wtypes.wsattr(wtypes.UuidType(), mandatory=True) + ip_address = wtypes.wsattr(types.IPAddressType()) + port_id = wtypes.wsattr(wtypes.UuidType()) + + +class LoadBalancerResponse(BaseLoadBalancerType): + """Defines which attributes are to be shown on any response.""" + id = wtypes.wsattr(wtypes.UuidType()) + name = wtypes.wsattr(wtypes.StringType()) + description = wtypes.wsattr(wtypes.StringType()) + provisioning_status = wtypes.wsattr(wtypes.StringType()) + operating_status = wtypes.wsattr(wtypes.StringType()) + admin_state_up = wtypes.wsattr(bool) + project_id = wtypes.wsattr(wtypes.StringType()) + created_at = wtypes.wsattr(wtypes.datetime.datetime) + updated_at = wtypes.wsattr(wtypes.datetime.datetime) + vip_address = wtypes.wsattr(types.IPAddressType()) + vip_port_id = wtypes.wsattr(wtypes.UuidType()) + vip_subnet_id = wtypes.wsattr(wtypes.UuidType()) + vip_network_id = wtypes.wsattr(wtypes.UuidType()) + vip_sg_ids = wtypes.wsattr([wtypes.UuidType()]) + additional_vips = wtypes.wsattr([AdditionalVipsType]) + listeners = wtypes.wsattr([types.IdOnlyType]) + pools = wtypes.wsattr([types.IdOnlyType]) + provider = wtypes.wsattr(wtypes.StringType()) + flavor_id = wtypes.wsattr(wtypes.UuidType()) + vip_qos_policy_id = wtypes.wsattr(wtypes.UuidType()) + tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType())) + availability_zone = wtypes.wsattr(wtypes.StringType()) + vip_vnic_type = wtypes.wsattr(wtypes.StringType()) + + @classmethod + def from_data_model(cls, data_model, children=False): + result = super().from_data_model( + data_model, children=children) + if data_model.vip: + result.vip_subnet_id = data_model.vip.subnet_id + result.vip_port_id = data_model.vip.port_id + result.vip_address = data_model.vip.ip_address + result.vip_network_id = data_model.vip.network_id + result.vip_qos_policy_id = data_model.vip.qos_policy_id + result.vip_vnic_type = data_model.vip.vnic_type + result.vip_sg_ids = data_model.vip.sg_ids + result.additional_vips = [ + AdditionalVipsType.from_data_model(i) + for i in data_model.additional_vips] + if cls._full_response(): + listener_model = listener.ListenerFullResponse + pool_model = pool.PoolFullResponse + else: + listener_model = types.IdOnlyType + pool_model = types.IdOnlyType + result.listeners = [ + listener_model.from_data_model(i) for i in data_model.listeners] + result.pools = [ + pool_model.from_data_model(i) for i in data_model.pools] + + if not result.provider: + result.provider = "octavia" + + return result + + +class LoadBalancerFullResponse(LoadBalancerResponse): + @classmethod + def _full_response(cls): + return True + + listeners = wtypes.wsattr([listener.ListenerFullResponse]) + pools = wtypes.wsattr([pool.PoolFullResponse]) + + +class LoadBalancerRootResponse(types.BaseType): + loadbalancer = wtypes.wsattr(LoadBalancerResponse) + + +class LoadBalancerFullRootResponse(LoadBalancerRootResponse): + loadbalancer = wtypes.wsattr(LoadBalancerFullResponse) + + +class LoadBalancersRootResponse(types.BaseType): + loadbalancers = wtypes.wsattr([LoadBalancerResponse]) + loadbalancers_links = wtypes.wsattr([types.PageType]) + + +class LoadBalancerPOST(BaseLoadBalancerType): + """Defines mandatory and optional attributes of a POST request.""" + + name = wtypes.wsattr(wtypes.StringType(max_length=255)) + description = wtypes.wsattr(wtypes.StringType(max_length=255)) + admin_state_up = wtypes.wsattr(bool, default=True) + vip_address = wtypes.wsattr(types.IPAddressType()) + vip_port_id = wtypes.wsattr(wtypes.UuidType()) + vip_subnet_id = wtypes.wsattr(wtypes.UuidType()) + vip_network_id = wtypes.wsattr(wtypes.UuidType()) + vip_qos_policy_id = wtypes.wsattr(wtypes.UuidType()) + vip_sg_ids = wtypes.wsattr([wtypes.UuidType()]) + additional_vips = wtypes.wsattr([AdditionalVipsType], default=[]) + project_id = wtypes.wsattr(wtypes.StringType(max_length=36)) + listeners = wtypes.wsattr([listener.ListenerSingleCreate], default=[]) + pools = wtypes.wsattr([pool.PoolSingleCreate], default=[]) + provider = wtypes.wsattr(wtypes.StringType(max_length=64)) + tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) + flavor_id = wtypes.wsattr(wtypes.UuidType()) + availability_zone = wtypes.wsattr(wtypes.StringType(max_length=255)) + + +class LoadBalancerRootPOST(types.BaseType): + loadbalancer = wtypes.wsattr(LoadBalancerPOST) + + +class LoadBalancerPUT(BaseLoadBalancerType): + """Defines attributes that are acceptable of a PUT request.""" + + name = wtypes.wsattr(wtypes.StringType(max_length=255)) + description = wtypes.wsattr(wtypes.StringType(max_length=255)) + vip_qos_policy_id = wtypes.wsattr(wtypes.UuidType()) + admin_state_up = wtypes.wsattr(bool) + vip_sg_ids = wtypes.wsattr([wtypes.UuidType()]) + tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) + + +class LoadBalancerRootPUT(types.BaseType): + loadbalancer = wtypes.wsattr(LoadBalancerPUT) + + +class LoadBalancerStatusResponse(BaseLoadBalancerType): + """Defines which attributes are to be shown on status response.""" + id = wtypes.wsattr(wtypes.UuidType()) + name = wtypes.wsattr(wtypes.StringType()) + operating_status = wtypes.wsattr(wtypes.StringType()) + provisioning_status = wtypes.wsattr(wtypes.StringType()) + listeners = wtypes.wsattr([listener.ListenerStatusResponse]) + + @classmethod + def from_data_model(cls, data_model, children=False): + result = super().from_data_model( + data_model, children=children) + listener_model = listener.ListenerStatusResponse + result.listeners = [ + listener_model.from_data_model(i) for i in data_model.listeners] + if not result.name: + result.name = "" + + return result + + +class StatusResponse(wtypes.Base): + loadbalancer = wtypes.wsattr(LoadBalancerStatusResponse) + + +class StatusRootResponse(types.BaseType): + statuses = wtypes.wsattr(StatusResponse) + + +class LoadBalancerStatisticsResponse(BaseLoadBalancerType): + """Defines which attributes are to show on stats response.""" + bytes_in = wtypes.wsattr(wtypes.IntegerType()) + bytes_out = wtypes.wsattr(wtypes.IntegerType()) + active_connections = wtypes.wsattr(wtypes.IntegerType()) + total_connections = wtypes.wsattr(wtypes.IntegerType()) + request_errors = wtypes.wsattr(wtypes.IntegerType()) + + @classmethod + def from_data_model(cls, data_model, children=False): + result = super().from_data_model( + data_model, children=children) + return result + + +class StatisticsRootResponse(types.BaseType): + stats = wtypes.wsattr(LoadBalancerStatisticsResponse) diff --git a/octavia/api/v2/types/member.py b/octavia/api/v2/types/member.py new file mode 100644 index 0000000000..f73bc58fa9 --- /dev/null +++ b/octavia/api/v2/types/member.py @@ -0,0 +1,154 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from wsme import types as wtypes + +from octavia.api.common import types +from octavia.common import constants + + +class BaseMemberType(types.BaseType): + _type_to_model_map = {'admin_state_up': 'enabled', + 'address': 'ip_address'} + _child_map = {} + + +class MemberResponse(BaseMemberType): + """Defines which attributes are to be shown on any response.""" + id = wtypes.wsattr(wtypes.UuidType()) + name = wtypes.wsattr(wtypes.StringType()) + operating_status = wtypes.wsattr(wtypes.StringType()) + provisioning_status = wtypes.wsattr(wtypes.StringType()) + admin_state_up = wtypes.wsattr(bool) + address = wtypes.wsattr(types.IPAddressType()) + protocol_port = wtypes.wsattr(wtypes.IntegerType()) + weight = wtypes.wsattr(wtypes.IntegerType()) + backup = wtypes.wsattr(bool) + subnet_id = wtypes.wsattr(wtypes.UuidType()) + project_id = wtypes.wsattr(wtypes.StringType()) + created_at = wtypes.wsattr(wtypes.datetime.datetime) + updated_at = wtypes.wsattr(wtypes.datetime.datetime) + monitor_address = wtypes.wsattr(types.IPAddressType()) + monitor_port = wtypes.wsattr(wtypes.IntegerType()) + tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType())) + vnic_type = wtypes.wsattr(wtypes.StringType()) + + @classmethod + def from_data_model(cls, data_model, children=False): + member = super().from_data_model( + data_model, children=children) + return member + + +class MemberFullResponse(MemberResponse): + @classmethod + def _full_response(cls): + return True + + +class MemberRootResponse(types.BaseType): + member = wtypes.wsattr(MemberResponse) + + +class MembersRootResponse(types.BaseType): + members = wtypes.wsattr([MemberResponse]) + members_links = wtypes.wsattr([types.PageType]) + + +class MemberPOST(BaseMemberType): + """Defines mandatory and optional attributes of a POST request.""" + name = wtypes.wsattr(wtypes.StringType(max_length=255)) + admin_state_up = wtypes.wsattr(bool, default=True) + address = wtypes.wsattr(types.IPAddressType(), mandatory=True) + protocol_port = wtypes.wsattr(wtypes.IntegerType( + minimum=constants.MIN_PORT_NUMBER, maximum=constants.MAX_PORT_NUMBER), + mandatory=True) + weight = wtypes.wsattr(wtypes.IntegerType( + minimum=constants.MIN_WEIGHT, maximum=constants.MAX_WEIGHT), + default=constants.DEFAULT_WEIGHT) + backup = wtypes.wsattr(bool, default=False) + subnet_id = wtypes.wsattr(wtypes.UuidType()) + # TODO(johnsom) Remove after deprecation (R series) + project_id = wtypes.wsattr(wtypes.StringType(max_length=36)) + monitor_port = wtypes.wsattr(wtypes.IntegerType( + minimum=constants.MIN_PORT_NUMBER, maximum=constants.MAX_PORT_NUMBER), + default=None) + monitor_address = wtypes.wsattr(types.IPAddressType(), default=None) + tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) + request_sriov = wtypes.wsattr(bool, default=False) + + +class MemberRootPOST(types.BaseType): + member = wtypes.wsattr(MemberPOST) + + +class MemberPUT(BaseMemberType): + """Defines attributes that are acceptable of a PUT request.""" + name = wtypes.wsattr(wtypes.StringType(max_length=255)) + admin_state_up = wtypes.wsattr(bool) + weight = wtypes.wsattr(wtypes.IntegerType( + minimum=constants.MIN_WEIGHT, maximum=constants.MAX_WEIGHT)) + backup = wtypes.wsattr(bool) + monitor_port = wtypes.wsattr(wtypes.IntegerType( + minimum=constants.MIN_PORT_NUMBER, maximum=constants.MAX_PORT_NUMBER)) + monitor_address = wtypes.wsattr(types.IPAddressType()) + tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) + + +class MemberRootPUT(types.BaseType): + member = wtypes.wsattr(MemberPUT) + + +class MembersRootPUT(types.BaseType): + members = wtypes.wsattr([MemberPOST]) + + +class MemberSingleCreate(BaseMemberType): + """Defines mandatory and optional attributes of a POST request.""" + name = wtypes.wsattr(wtypes.StringType(max_length=255)) + admin_state_up = wtypes.wsattr(bool, default=True) + address = wtypes.wsattr(types.IPAddressType(), mandatory=True) + protocol_port = wtypes.wsattr(wtypes.IntegerType( + minimum=constants.MIN_PORT_NUMBER, maximum=constants.MAX_PORT_NUMBER), + mandatory=True) + weight = wtypes.wsattr(wtypes.IntegerType( + minimum=constants.MIN_WEIGHT, maximum=constants.MAX_WEIGHT), + default=constants.DEFAULT_WEIGHT) + backup = wtypes.wsattr(bool, default=False) + subnet_id = wtypes.wsattr(wtypes.UuidType()) + monitor_port = wtypes.wsattr(wtypes.IntegerType( + minimum=constants.MIN_PORT_NUMBER, maximum=constants.MAX_PORT_NUMBER)) + monitor_address = wtypes.wsattr(types.IPAddressType()) + tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) + request_sriov = wtypes.wsattr(bool, default=False) + + +class MemberStatusResponse(BaseMemberType): + """Defines which attributes are to be shown on status response.""" + id = wtypes.wsattr(wtypes.UuidType()) + name = wtypes.wsattr(wtypes.StringType()) + operating_status = wtypes.wsattr(wtypes.StringType()) + provisioning_status = wtypes.wsattr(wtypes.StringType()) + address = wtypes.wsattr(types.IPAddressType()) + protocol_port = wtypes.wsattr(wtypes.IntegerType()) + + @classmethod + def from_data_model(cls, data_model, children=False): + member = super().from_data_model( + data_model, children=children) + + if not member.name: + member.name = "" + + return member diff --git a/octavia/api/v2/types/pool.py b/octavia/api/v2/types/pool.py new file mode 100644 index 0000000000..d83478935d --- /dev/null +++ b/octavia/api/v2/types/pool.py @@ -0,0 +1,254 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from octavia_lib.common import constants as lib_constants +from wsme import types as wtypes + +from octavia.api.common import types +from octavia.api.v2.types import health_monitor +from octavia.api.v2.types import member +from octavia.common import constants + + +class SessionPersistenceResponse(types.BaseType): + """Defines which attributes are to be shown on any response.""" + type = wtypes.wsattr(wtypes.text) + cookie_name = wtypes.wsattr(wtypes.text) + persistence_timeout = wtypes.wsattr(wtypes.IntegerType()) + persistence_granularity = wtypes.wsattr(types.IPAddressType()) + + +class SessionPersistencePOST(types.BaseType): + """Defines mandatory and optional attributes of a POST request.""" + type = wtypes.wsattr(wtypes.Enum(str, *constants.SUPPORTED_SP_TYPES), + mandatory=True) + # pattern of invalid characters is based on + # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie + cookie_name = wtypes.wsattr(wtypes.StringType( + max_length=255, pattern=r'^[^\s,;\\]+$'), + default=None) + persistence_timeout = wtypes.wsattr(wtypes.IntegerType(), default=None) + persistence_granularity = wtypes.wsattr(types.IPAddressType(), + default=None) + + +class SessionPersistencePUT(types.BaseType): + """Defines attributes that are acceptable of a PUT request.""" + type = wtypes.wsattr(wtypes.Enum(str, *constants.SUPPORTED_SP_TYPES)) + # pattern of invalid characters is based on + # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie + cookie_name = wtypes.wsattr(wtypes.StringType( + max_length=255, pattern=r'^[^\s,;\\]+$'), + default=None) + persistence_timeout = wtypes.wsattr(wtypes.IntegerType(), default=None) + persistence_granularity = wtypes.wsattr(types.IPAddressType(), + default=None) + + +class BasePoolType(types.BaseType): + _type_to_model_map = {'admin_state_up': 'enabled', + 'healthmonitor': 'health_monitor', + 'healthmonitor_id': 'health_monitor.id', + 'tls_container_ref': 'tls_certificate_id', + 'ca_tls_container_ref': 'ca_tls_certificate_id', + 'crl_container_ref': 'crl_container_id'} + + _child_map = {'health_monitor': {'id': 'healthmonitor_id'}} + + +class PoolResponse(BasePoolType): + """Defines which attributes are to be shown on any response.""" + id = wtypes.wsattr(wtypes.UuidType()) + name = wtypes.wsattr(wtypes.StringType()) + description = wtypes.wsattr(wtypes.StringType()) + provisioning_status = wtypes.wsattr(wtypes.StringType()) + operating_status = wtypes.wsattr(wtypes.StringType()) + admin_state_up = wtypes.wsattr(bool) + protocol = wtypes.wsattr(wtypes.text) + lb_algorithm = wtypes.wsattr(wtypes.text) + session_persistence = wtypes.wsattr(SessionPersistenceResponse) + project_id = wtypes.wsattr(wtypes.StringType()) + loadbalancers = wtypes.wsattr([types.IdOnlyType]) + listeners = wtypes.wsattr([types.IdOnlyType]) + created_at = wtypes.wsattr(wtypes.datetime.datetime) + updated_at = wtypes.wsattr(wtypes.datetime.datetime) + healthmonitor_id = wtypes.wsattr(wtypes.UuidType()) + members = wtypes.wsattr([types.IdOnlyType]) + tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType())) + tls_container_ref = wtypes.wsattr(wtypes.StringType()) + ca_tls_container_ref = wtypes.wsattr(wtypes.StringType()) + crl_container_ref = wtypes.wsattr(wtypes.StringType()) + tls_enabled = wtypes.wsattr(bool) + tls_ciphers = wtypes.wsattr(wtypes.StringType()) + tls_versions = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType())) + alpn_protocols = wtypes.wsattr(wtypes.ArrayType(types.AlpnProtocolType())) + + @classmethod + def from_data_model(cls, data_model, children=False): + pool = super().from_data_model( + data_model, children=children) + if data_model.session_persistence: + pool.session_persistence = ( + SessionPersistenceResponse.from_data_model( + data_model.session_persistence)) + + if cls._full_response(): + del pool.loadbalancers + member_model = member.MemberFullResponse + if data_model.health_monitor: + pool.healthmonitor = ( + health_monitor.HealthMonitorFullResponse + .from_data_model(data_model.health_monitor)) + else: + if data_model.load_balancer: + pool.loadbalancers = [ + types.IdOnlyType.from_data_model(data_model.load_balancer)] + else: + pool.loadbalancers = [] + member_model = types.IdOnlyType + if data_model.health_monitor: + pool.healthmonitor_id = data_model.health_monitor.id + pool.listeners = [ + types.IdOnlyType.from_data_model(i) for i in data_model.listeners] + pool.members = [ + member_model.from_data_model(i) for i in data_model.members] + + pool.tls_versions = data_model.tls_versions + pool.alpn_protocols = data_model.alpn_protocols + + return pool + + +class PoolFullResponse(PoolResponse): + @classmethod + def _full_response(cls): + return True + + members = wtypes.wsattr([member.MemberFullResponse]) + healthmonitor = wtypes.wsattr(health_monitor.HealthMonitorFullResponse) + + +class PoolRootResponse(types.BaseType): + pool = wtypes.wsattr(PoolResponse) + + +class PoolsRootResponse(types.BaseType): + pools = wtypes.wsattr([PoolResponse]) + pools_links = wtypes.wsattr([types.PageType]) + + +class PoolPOST(BasePoolType): + """Defines mandatory and optional attributes of a POST request.""" + name = wtypes.wsattr(wtypes.StringType(max_length=255)) + description = wtypes.wsattr(wtypes.StringType(max_length=255)) + admin_state_up = wtypes.wsattr(bool, default=True) + listener_id = wtypes.wsattr(wtypes.UuidType()) + loadbalancer_id = wtypes.wsattr(wtypes.UuidType()) + protocol = wtypes.wsattr( + wtypes.Enum(str, *lib_constants.POOL_SUPPORTED_PROTOCOLS), + mandatory=True) + lb_algorithm = wtypes.wsattr( + wtypes.Enum(str, *constants.SUPPORTED_LB_ALGORITHMS), + mandatory=True) + session_persistence = wtypes.wsattr(SessionPersistencePOST) + # TODO(johnsom) Remove after deprecation (R series) + project_id = wtypes.wsattr(wtypes.StringType(max_length=36)) + healthmonitor = wtypes.wsattr(health_monitor.HealthMonitorSingleCreate) + members = wtypes.wsattr([member.MemberSingleCreate]) + tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) + tls_container_ref = wtypes.wsattr( + wtypes.StringType(max_length=255)) + ca_tls_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255)) + crl_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255)) + tls_enabled = wtypes.wsattr(bool, default=False) + tls_ciphers = wtypes.wsattr(wtypes.StringType(max_length=2048)) + tls_versions = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType( + max_length=32))) + alpn_protocols = wtypes.wsattr(wtypes.ArrayType(types.AlpnProtocolType())) + + +class PoolRootPOST(types.BaseType): + pool = wtypes.wsattr(PoolPOST) + + +class PoolPUT(BasePoolType): + """Defines attributes that are acceptable of a PUT request.""" + name = wtypes.wsattr(wtypes.StringType(max_length=255)) + description = wtypes.wsattr(wtypes.StringType(max_length=255)) + admin_state_up = wtypes.wsattr(bool) + lb_algorithm = wtypes.wsattr( + wtypes.Enum(str, *constants.SUPPORTED_LB_ALGORITHMS)) + session_persistence = wtypes.wsattr(SessionPersistencePUT) + tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) + tls_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255)) + ca_tls_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255)) + crl_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255)) + tls_enabled = wtypes.wsattr(bool) + tls_ciphers = wtypes.wsattr(wtypes.StringType(max_length=2048)) + tls_versions = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType( + max_length=32))) + alpn_protocols = wtypes.wsattr(wtypes.ArrayType(types.AlpnProtocolType())) + + +class PoolRootPut(types.BaseType): + pool = wtypes.wsattr(PoolPUT) + + +class PoolSingleCreate(BasePoolType): + """Defines mandatory and optional attributes of a POST request.""" + name = wtypes.wsattr(wtypes.StringType(max_length=255)) + description = wtypes.wsattr(wtypes.StringType(max_length=255)) + admin_state_up = wtypes.wsattr(bool, default=True) + protocol = wtypes.wsattr( + wtypes.Enum(str, *lib_constants.POOL_SUPPORTED_PROTOCOLS)) + lb_algorithm = wtypes.wsattr( + wtypes.Enum(str, *constants.SUPPORTED_LB_ALGORITHMS)) + session_persistence = wtypes.wsattr(SessionPersistencePOST) + healthmonitor = wtypes.wsattr(health_monitor.HealthMonitorSingleCreate) + members = wtypes.wsattr([member.MemberSingleCreate]) + tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) + tls_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255)) + ca_tls_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255)) + crl_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255)) + tls_enabled = wtypes.wsattr(bool, default=False) + tls_ciphers = wtypes.wsattr(wtypes.StringType(max_length=2048)) + tls_versions = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType( + max_length=32))) + alpn_protocols = wtypes.wsattr(wtypes.ArrayType(types.AlpnProtocolType())) + + +class PoolStatusResponse(BasePoolType): + """Defines which attributes are to be shown on status response.""" + id = wtypes.wsattr(wtypes.UuidType()) + name = wtypes.wsattr(wtypes.StringType()) + provisioning_status = wtypes.wsattr(wtypes.StringType()) + operating_status = wtypes.wsattr(wtypes.StringType()) + health_monitor = wtypes.wsattr( + health_monitor.HealthMonitorStatusResponse) + members = wtypes.wsattr([member.MemberStatusResponse]) + + @classmethod + def from_data_model(cls, data_model, children=False): + pool = super().from_data_model( + data_model, children=children) + + member_model = member.MemberStatusResponse + if data_model.health_monitor: + pool.health_monitor = ( + health_monitor.HealthMonitorStatusResponse.from_data_model( + data_model.health_monitor)) + pool.members = [ + member_model.from_data_model(i) for i in data_model.members] + + return pool diff --git a/octavia/api/v2/types/provider.py b/octavia/api/v2/types/provider.py new file mode 100644 index 0000000000..5a167115af --- /dev/null +++ b/octavia/api/v2/types/provider.py @@ -0,0 +1,34 @@ +# Copyright 2018 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from wsme import types as wtypes + +from octavia.api.common import types + + +class ProviderResponse(types.BaseType): + name = wtypes.wsattr(wtypes.StringType()) + description = wtypes.wsattr(wtypes.StringType()) + + +class ProvidersRootResponse(types.BaseType): + providers = wtypes.wsattr([ProviderResponse]) + + +class FlavorCapabilitiesResponse(types.BaseType): + flavor_capabilities = wtypes.wsattr([ProviderResponse]) + + +class AvailabilityZoneCapabilitiesResponse(types.BaseType): + availability_zone_capabilities = wtypes.wsattr([ProviderResponse]) diff --git a/octavia/api/v2/types/quotas.py b/octavia/api/v2/types/quotas.py new file mode 100644 index 0000000000..eed7234dac --- /dev/null +++ b/octavia/api/v2/types/quotas.py @@ -0,0 +1,110 @@ +# Copyright 2016 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from wsme import types as wtypes + +from octavia.api.common import types as base +from octavia.common import constants as consts + + +class QuotaBase(base.BaseType): + """Individual quota definitions.""" + loadbalancer = wtypes.wsattr(wtypes.IntegerType( + minimum=consts.MIN_QUOTA, maximum=consts.MAX_QUOTA)) + # Misspelled version, deprecated in Rocky + load_balancer = wtypes.wsattr(wtypes.IntegerType( + minimum=consts.MIN_QUOTA, maximum=consts.MAX_QUOTA)) + listener = wtypes.wsattr(wtypes.IntegerType( + minimum=consts.MIN_QUOTA, maximum=consts.MAX_QUOTA)) + member = wtypes.wsattr(wtypes.IntegerType( + minimum=consts.MIN_QUOTA, maximum=consts.MAX_QUOTA)) + pool = wtypes.wsattr(wtypes.IntegerType( + minimum=consts.MIN_QUOTA, maximum=consts.MAX_QUOTA)) + healthmonitor = wtypes.wsattr(wtypes.IntegerType( + minimum=consts.MIN_QUOTA, maximum=consts.MAX_QUOTA)) + # Misspelled version, deprecated in Rocky + health_monitor = wtypes.wsattr(wtypes.IntegerType( + minimum=consts.MIN_QUOTA, maximum=consts.MAX_QUOTA)) + l7policy = wtypes.wsattr(wtypes.IntegerType( + minimum=consts.MIN_QUOTA, maximum=consts.MAX_QUOTA)) + l7rule = wtypes.wsattr(wtypes.IntegerType( + minimum=consts.MIN_QUOTA, maximum=consts.MAX_QUOTA)) + + def to_dict(self, render_unsets=False): + quota_dict = super().to_dict(render_unsets) + if 'loadbalancer' in quota_dict: + quota_dict['load_balancer'] = quota_dict.pop('loadbalancer') + if 'healthmonitor' in quota_dict: + quota_dict['health_monitor'] = quota_dict.pop('healthmonitor') + return quota_dict + + +class QuotaResponse(base.BaseType): + """Wrapper object for quotas responses.""" + quota = wtypes.wsattr(QuotaBase) + + @classmethod + def from_data_model(cls, data_model, children=False): + quotas = super().from_data_model( + data_model, children=children) + quotas.quota = QuotaBase.from_data_model(data_model) + return quotas + + +class QuotaAllBase(base.BaseType): + """Wrapper object for get all quotas responses.""" + project_id = wtypes.wsattr(wtypes.StringType()) + loadbalancer = wtypes.wsattr(wtypes.IntegerType()) + # Misspelled version, deprecated in Rocky, remove in T + load_balancer = wtypes.wsattr(wtypes.IntegerType()) + listener = wtypes.wsattr(wtypes.IntegerType()) + member = wtypes.wsattr(wtypes.IntegerType()) + pool = wtypes.wsattr(wtypes.IntegerType()) + healthmonitor = wtypes.wsattr(wtypes.IntegerType()) + # Misspelled version, deprecated in Rocky, remove in T + health_monitor = wtypes.wsattr(wtypes.IntegerType()) + l7policy = wtypes.wsattr(wtypes.IntegerType()) + l7rule = wtypes.wsattr(wtypes.IntegerType()) + + _type_to_model_map = {'loadbalancer': 'load_balancer', + 'healthmonitor': 'health_monitor'} + _child_map = {} + + @classmethod + def from_data_model(cls, data_model, children=False): + quotas = super().from_data_model( + data_model, children=children) + # For backwards compatibility, remove in T + quotas.load_balancer = quotas.loadbalancer + # For backwards compatibility, remove in T + quotas.health_monitor = quotas.healthmonitor + return quotas + + +class QuotaAllResponse(base.BaseType): + quotas = wtypes.wsattr([QuotaAllBase]) + quotas_links = wtypes.wsattr([base.PageType]) + + @classmethod + def from_data_model(cls, data_model, children=False): + quotalist = QuotaAllResponse() + quotalist.quotas = [ + QuotaAllBase.from_data_model(obj) + for obj in data_model] + return quotalist + + +class QuotaPUT(base.BaseType): + """Overall object for quota PUT request.""" + quota = wtypes.wsattr(QuotaBase) diff --git a/octavia/certificates/__init__.py b/octavia/certificates/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/certificates/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/certificates/common/__init__.py b/octavia/certificates/common/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/certificates/common/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/certificates/common/auth/__init__.py b/octavia/certificates/common/auth/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/certificates/common/auth/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/certificates/common/auth/barbican_acl.py b/octavia/certificates/common/auth/barbican_acl.py new file mode 100644 index 0000000000..859f7d34cc --- /dev/null +++ b/octavia/certificates/common/auth/barbican_acl.py @@ -0,0 +1,99 @@ +# Copyright (c) 2014 Rackspace US, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Barbican ACL auth class for Barbican certificate handling +""" +from barbicanclient import client as barbican_client +from keystoneauth1 import session +from keystoneauth1 import token_endpoint + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import excutils + +from octavia.certificates.common import barbican as barbican_common +from octavia.common import keystone + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF + + +class BarbicanACLAuth(barbican_common.BarbicanAuth): + _barbican_client = None + + @classmethod + def get_barbican_client(cls, project_id=None): + if not cls._barbican_client: + try: + ksession = keystone.KeystoneSession() + cls._barbican_client = barbican_client.Client( + session=ksession.get_session(), + region_name=CONF.certificates.region_name, + interface=CONF.certificates.endpoint_type + ) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception("Error creating Barbican client") + return cls._barbican_client + + @classmethod + def ensure_secret_access(cls, context, ref): + # get a normal session + ksession = keystone.KeystoneSession() + user_id = ksession.get_service_user_id() + + # use barbican client to set the ACLs + bc = cls.get_barbican_client_user_auth(context) + acl = bc.acls.get(ref) + read_oper = acl.get('read') + if user_id not in read_oper.users: + read_oper.users.append(user_id) + acl.submit() + + @classmethod + def revoke_secret_access(cls, context, ref): + # get a normal session + ksession = keystone.KeystoneSession() + user_id = ksession.get_service_user_id() + + # use barbican client to set the ACLs + bc = cls.get_barbican_client_user_auth(context) + acl = bc.acls.get(ref) + read_oper = acl.get('read') + if user_id in read_oper.users: + read_oper.users.remove(user_id) + acl.submit() + + @classmethod + def get_barbican_client_user_auth(cls, context): + barbican_endpoint = CONF.certificates.endpoint + if not barbican_endpoint: + ksession = keystone.KeystoneSession().get_session() + endpoint_data = ksession.get_endpoint_data( + service_type='key-manager', + region_name=CONF.certificates.region_name, + interface=CONF.certificates.endpoint_type) + barbican_endpoint = endpoint_data.catalog_url + + auth_token = token_endpoint.Token(barbican_endpoint, + context.auth_token) + + user_session = session.Session( + auth=auth_token, + verify=CONF.certificates.ca_certificates_file) + return barbican_client.Client( + session=user_session, + endpoint=barbican_endpoint) diff --git a/octavia/certificates/common/barbican.py b/octavia/certificates/common/barbican.py new file mode 100644 index 0000000000..7c2938470c --- /dev/null +++ b/octavia/certificates/common/barbican.py @@ -0,0 +1,88 @@ +# Copyright (c) 2014 Rackspace US, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Common classes for Barbican certificate handling +""" + +import abc + +from barbicanclient.v1 import containers +from oslo_utils import encodeutils + +from octavia.certificates.common import cert +from octavia.common.tls_utils import cert_parser +from octavia.i18n import _ + + +class BarbicanCert(cert.Cert): + """Representation of a Cert based on the Barbican CertificateContainer.""" + def __init__(self, cert_container): + if not isinstance(cert_container, containers.CertificateContainer): + raise TypeError(_("Retrieved Barbican Container is not of the " + "correct type (certificate).")) + self._cert_container = cert_container + + def get_certificate(self): + if self._cert_container.certificate: + return encodeutils.to_utf8( + self._cert_container.certificate.payload) + return None + + def get_intermediates(self): + if self._cert_container.intermediates: + intermediates = encodeutils.to_utf8( + self._cert_container.intermediates.payload) + return list(cert_parser.get_intermediates_pems(intermediates)) + return None + + def get_private_key(self): + if self._cert_container.private_key: + return encodeutils.to_utf8( + self._cert_container.private_key.payload) + return None + + def get_private_key_passphrase(self): + if self._cert_container.private_key_passphrase: + return encodeutils.to_utf8( + self._cert_container.private_key_passphrase.payload) + return None + + +class BarbicanAuth(metaclass=abc.ABCMeta): + @abc.abstractmethod + def get_barbican_client(self, project_id): + """Creates a Barbican client object. + + :param project_id: Project ID that the request will be used for + :return: a Barbican Client object + :raises Exception: if the client cannot be created + """ + + @abc.abstractmethod + def ensure_secret_access(self, context, ref): + """Do whatever steps are necessary to ensure future access to a secret. + + :param context: pecan context object + :param ref: Reference to a Barbican object + """ + + @abc.abstractmethod + def revoke_secret_access(self, context, ref): + """Revoke access of Octavia keystone user to a secret. + + :param context: pecan context object + :param ref: Reference to a Barbican object + """ diff --git a/octavia/certificates/common/cert.py b/octavia/certificates/common/cert.py new file mode 100644 index 0000000000..59f7285541 --- /dev/null +++ b/octavia/certificates/common/cert.py @@ -0,0 +1,36 @@ +# Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + + +class Cert(metaclass=abc.ABCMeta): + """Base class to represent all certificates.""" + + @abc.abstractmethod + def get_certificate(self): + """Returns the certificate.""" + + @abc.abstractmethod + def get_intermediates(self): + """Returns the intermediate certificates as a list.""" + + @abc.abstractmethod + def get_private_key(self): + """Returns the private key for the certificate.""" + + @abc.abstractmethod + def get_private_key_passphrase(self): + """Returns the passphrase for the private key.""" diff --git a/octavia/certificates/common/local.py b/octavia/certificates/common/local.py new file mode 100644 index 0000000000..078db1c4ea --- /dev/null +++ b/octavia/certificates/common/local.py @@ -0,0 +1,105 @@ +# Copyright (c) 2014 Rackspace US, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Common classes for local filesystem certificate handling +""" +import os + +from oslo_config import cfg +from oslo_config import types + +from octavia.certificates.common import cert + +TLS_CERT_DEFAULT = os.environ.get( + 'OS_OCTAVIA_TLS_CA_CERT', '/etc/ssl/certs/ssl-cert-snakeoil.pem' +) +TLS_KEY_DEFAULT = os.environ.get( + 'OS_OCTAVIA_TLS_CA_KEY', '/etc/ssl/private/ssl-cert-snakeoil.key' +) +TLS_PKP_DEFAULT = os.environ.get('OS_OCTAVIA_CA_KEY_PASS') +TLS_PASS_AMPS_DEFAULT = os.environ.get('TLS_PASS_AMPS_DEFAULT', + 'insecure-key-do-not-use-this-key') + +TLS_DIGEST_DEFAULT = os.environ.get('OS_OCTAVIA_CA_SIGNING_DIGEST', 'sha256') +TLS_STORAGE_DEFAULT = os.environ.get( + 'OS_OCTAVIA_TLS_STORAGE', '/var/lib/octavia/certificates/' +) + + +certgen_opts = [ + cfg.StrOpt('ca_certificate', + default=TLS_CERT_DEFAULT, + help='Absolute path to the CA Certificate for signing. Defaults' + ' to env[OS_OCTAVIA_TLS_CA_CERT].'), + cfg.StrOpt('ca_private_key', + default=TLS_KEY_DEFAULT, + help='Absolute path to the Private Key for signing. Defaults' + ' to env[OS_OCTAVIA_TLS_CA_KEY].'), + cfg.StrOpt('ca_private_key_passphrase', + default=TLS_PKP_DEFAULT, + help='Passphrase for the Private Key. Defaults' + ' to env[OS_OCTAVIA_CA_KEY_PASS] or None.', + secret=True), + cfg.ListOpt('server_certs_key_passphrase', + default=[TLS_PASS_AMPS_DEFAULT], + item_type=types.String(regex=r'^[A-Za-z0-9\-_=]{32}$'), + help='List of passphrase for encrypting Amphora Certificates ' + 'and Private Keys, first in list is used for encryption while ' + 'all other keys is used to decrypt previously encrypted data. ' + 'Each key must be 32, base64(url) compatible, characters long.' + ' Defaults to env[TLS_PASS_AMPS_DEFAULT] or ' + 'a list with default key insecure-key-do-not-use-this-key', + required=True, + secret=True), + cfg.StrOpt('signing_digest', + default=TLS_DIGEST_DEFAULT, + help='Certificate signing digest. Defaults' + ' to env[OS_OCTAVIA_CA_SIGNING_DIGEST] or "sha256".'), + cfg.IntOpt('cert_validity_time', + default=30 * 24 * 60 * 60, + help="The validity time for the Amphora Certificates " + "(in seconds)."), +] + +certmgr_opts = [ + cfg.StrOpt('storage_path', + default=TLS_STORAGE_DEFAULT, + help='Absolute path to the certificate storage directory. ' + 'Defaults to env[OS_OCTAVIA_TLS_STORAGE].') +] + + +class LocalCert(cert.Cert): + """Representation of a Cert for local storage.""" + + def __init__(self, certificate, private_key, intermediates=None, + private_key_passphrase=None): + self.certificate = certificate + self.intermediates = intermediates + self.private_key = private_key + self.private_key_passphrase = private_key_passphrase + + def get_certificate(self): + return self.certificate + + def get_intermediates(self): + return self.intermediates + + def get_private_key(self): + return self.private_key + + def get_private_key_passphrase(self): + return self.private_key_passphrase diff --git a/octavia/certificates/common/pkcs12.py b/octavia/certificates/common/pkcs12.py new file mode 100644 index 0000000000..ad66f5af34 --- /dev/null +++ b/octavia/certificates/common/pkcs12.py @@ -0,0 +1,59 @@ +# Copyright (c) 2017 GoDaddy +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Common classes for pkcs12 based certificate handling +""" + +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.primitives.serialization import pkcs12 + +from octavia.certificates.common import cert +from octavia.common import exceptions + + +class PKCS12Cert(cert.Cert): + """Representation of a Cert for local storage.""" + def __init__(self, certbag): + try: + p12 = pkcs12.load_pkcs12(certbag, None) + except (TypeError, ValueError) as e: + raise exceptions.UnreadablePKCS12(error=str(e)) + self.certificate = p12.cert + self.intermediates = p12.additional_certs + self.private_key = p12.key + + def get_certificate(self): + return self.certificate.certificate.public_bytes( + encoding=serialization.Encoding.PEM).strip() + + def get_intermediates(self): + if self.intermediates: + int_data = [ + ic.certificate.public_bytes( + encoding=serialization.Encoding.PEM).strip() + for ic in self.intermediates + ] + return int_data + return None + + def get_private_key(self): + return self.private_key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.TraditionalOpenSSL, + encryption_algorithm=serialization.NoEncryption()).strip() + + def get_private_key_passphrase(self): + return None diff --git a/octavia/certificates/generator/__init__.py b/octavia/certificates/generator/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/certificates/generator/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/certificates/generator/cert_gen.py b/octavia/certificates/generator/cert_gen.py new file mode 100644 index 0000000000..cc741f9213 --- /dev/null +++ b/octavia/certificates/generator/cert_gen.py @@ -0,0 +1,55 @@ +# Copyright (c) 2014 Rackspace US, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Certificate Generator API +""" +import abc + + +class CertGenerator(metaclass=abc.ABCMeta): + """Base Cert Generator Interface + + A Certificate Generator is responsible for generating private keys, + generating CSRs, and signing TLS certificates. + """ + + @abc.abstractmethod + def sign_cert(self, csr, validity): + """Generates a signed certificate from the provided CSR + + This call is designed to block until a signed certificate can be + returned. + + :param csr: A Certificate Signing Request + :param validity: Valid for seconds from the current time + + :return: PEM Encoded Signed certificate + :raises Exception: If certificate signing fails + """ + + @abc.abstractmethod + def generate_cert_key_pair(self, cn, validity, bit_length, passphrase): + """Generates a private key and certificate pair + + :param cn: Common name to use for the Certificate + :param validity: Validity period for the Certificate + :param bit_length: Private key bit length + :param passphrase: Passphrase to use for encrypting the private key + + :return: octavia.certificates.common.Cert representation of the + certificate data + :raises Exception: If generation fails + """ diff --git a/octavia/certificates/generator/local.py b/octavia/certificates/generator/local.py new file mode 100644 index 0000000000..fb390e2097 --- /dev/null +++ b/octavia/certificates/generator/local.py @@ -0,0 +1,241 @@ +# Copyright (c) 2014 Rackspace US, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import datetime +import uuid + +from cryptography import exceptions as crypto_exceptions +from cryptography.hazmat import backends +from cryptography.hazmat.primitives.asymmetric import rsa +from cryptography.hazmat.primitives import hashes +from cryptography.hazmat.primitives import serialization +from cryptography import x509 +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import timeutils + +from octavia.certificates.common import local as local_common +from octavia.certificates.generator import cert_gen +from octavia.common import exceptions + +LOG = logging.getLogger(__name__) + +CONF = cfg.CONF + + +class LocalCertGenerator(cert_gen.CertGenerator): + """Cert Generator Interface that signs certs locally.""" + + @classmethod + def _new_serial(cls): + return int(uuid.uuid4()) + + @classmethod + def _validate_cert(cls, ca_cert, ca_key, ca_key_pass): + if not ca_cert: + LOG.info("Using CA Certificate from config.") + try: + with open(CONF.certificates.ca_certificate, 'rb') as fp: + fp.read() + except OSError as e: + raise exceptions.CertificateGenerationException( + msg="Failed to load CA Certificate {}." + .format(CONF.certificates.ca_certificate) + ) from e + if not ca_key: + LOG.info("Using CA Private Key from config.") + try: + with open(CONF.certificates.ca_private_key, 'rb') as fp: + fp.read() + except OSError as e: + raise exceptions.CertificateGenerationException( + msg="Failed to load CA Private Key {}." + .format(CONF.certificates.ca_private_key) + ) from e + if not ca_key_pass: + ca_key_pass = CONF.certificates.ca_private_key_passphrase + if ca_key_pass: + LOG.info("Using CA Private Key Passphrase from config.") + else: + LOG.info("No Passphrase found for CA Private Key, not using " + "one.") + + @classmethod + def sign_cert(cls, csr, validity, ca_cert=None, ca_key=None, + ca_key_pass=None, ca_digest=None): + """Signs a certificate using our private CA based on the specified CSR + + The signed certificate will be valid from now until seconds + from now. + + :param csr: A Certificate Signing Request + :param validity: Valid for seconds from the current time + :param ca_cert: Signing Certificate (default: config) + :param ca_key: Signing Certificate Key (default: config) + :param ca_key_pass: Signing Certificate Key Pass (default: config) + :param ca_digest: Digest method to use for signing (default: config) + + :return: Signed certificate + :raises Exception: if certificate signing fails + """ + LOG.info("Signing a certificate request using OpenSSL locally.") + cls._validate_cert(ca_cert, ca_key, ca_key_pass) + if not ca_digest: + ca_digest = CONF.certificates.signing_digest + try: + algorithm = getattr(hashes, ca_digest.upper())() + except AttributeError as e: + raise crypto_exceptions.UnsupportedAlgorithm( + f"Supplied digest method not found: {ca_digest}" + ) from e + + if not ca_cert: + with open(CONF.certificates.ca_certificate, 'rb') as f: + ca_cert = f.read() + if not ca_key: + with open(CONF.certificates.ca_private_key, 'rb') as f: + ca_key = f.read() + if not ca_key_pass: + ca_key_pass = CONF.certificates.ca_private_key_passphrase + if ca_key_pass is not None: + ca_key_pass = ca_key_pass.encode('utf-8') + + try: + lo_cert = x509.load_pem_x509_certificate( + data=ca_cert, backend=backends.default_backend()) + lo_key = serialization.load_pem_private_key( + data=ca_key, password=ca_key_pass, + backend=backends.default_backend()) + lo_req = x509.load_pem_x509_csr(data=csr, + backend=backends.default_backend()) + new_cert = x509.CertificateBuilder() + new_cert = new_cert.serial_number(cls._new_serial()) + valid_from_datetime = timeutils.utcnow() + valid_to_datetime = (timeutils.utcnow() + + datetime.timedelta(seconds=validity)) + new_cert = new_cert.not_valid_before(valid_from_datetime) + new_cert = new_cert.not_valid_after(valid_to_datetime) + new_cert = new_cert.issuer_name(lo_cert.subject) + new_cert = new_cert.subject_name(lo_req.subject) + new_cert = new_cert.public_key(lo_req.public_key()) + new_cert = new_cert.add_extension( + x509.BasicConstraints(ca=False, path_length=None), + critical=True + ) + cn_str = lo_req.subject.get_attributes_for_oid( + x509.oid.NameOID.COMMON_NAME)[0].value + new_cert = new_cert.add_extension( + x509.SubjectAlternativeName([x509.DNSName(cn_str)]), + critical=False + ) + new_cert = new_cert.add_extension( + x509.KeyUsage( + digital_signature=True, + key_encipherment=True, + data_encipherment=True, + key_agreement=True, + content_commitment=False, + key_cert_sign=False, + crl_sign=False, + encipher_only=False, + decipher_only=False + ), + critical=True + ) + new_cert = new_cert.add_extension( + x509.ExtendedKeyUsage([ + x509.oid.ExtendedKeyUsageOID.SERVER_AUTH, + x509.oid.ExtendedKeyUsageOID.CLIENT_AUTH + ]), + critical=True + ) + signed_cert = new_cert.sign(private_key=lo_key, + algorithm=algorithm, + backend=backends.default_backend()) + return signed_cert.public_bytes( + encoding=serialization.Encoding.PEM) + except Exception as e: + LOG.error("Unable to sign certificate.") + raise exceptions.CertificateGenerationException(msg=e) + + @classmethod + def _generate_private_key(cls, bit_length=2048, passphrase=None): + pk = rsa.generate_private_key( + public_exponent=65537, + key_size=bit_length, + backend=backends.default_backend() + ) + if passphrase: + encryption = serialization.BestAvailableEncryption(passphrase) + else: + encryption = serialization.NoEncryption() + return pk.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.TraditionalOpenSSL, + encryption_algorithm=encryption, + ) + + @classmethod + def _generate_csr(cls, cn, private_key, passphrase=None): + pk = serialization.load_pem_private_key( + data=private_key, password=passphrase, + backend=backends.default_backend()) + csr = x509.CertificateSigningRequestBuilder().subject_name( + x509.Name([ + x509.NameAttribute(x509.oid.NameOID.COMMON_NAME, cn), + ]) + ) + csr = csr.add_extension( + x509.BasicConstraints( + ca=False, + path_length=None + ), + critical=True + ) + csr = csr.add_extension( + x509.KeyUsage( + digital_signature=True, + key_encipherment=True, + data_encipherment=True, + key_agreement=True, + content_commitment=False, + key_cert_sign=False, + crl_sign=False, + encipher_only=False, + decipher_only=False + ), + critical=True + ) + csr = csr.add_extension( + x509.SubjectAlternativeName([x509.DNSName(cn)]), + critical=False + ) + signed_csr = csr.sign( + pk, + getattr(hashes, CONF.certificates.signing_digest.upper())(), + backends.default_backend()) + return signed_csr.public_bytes(serialization.Encoding.PEM) + + @classmethod + def generate_cert_key_pair(cls, cn, validity, bit_length=2048, + passphrase=None, **kwargs): + pk = cls._generate_private_key(bit_length, passphrase) + csr = cls._generate_csr(cn, pk, passphrase) + cert = cls.sign_cert(csr, validity, **kwargs) + cert_object = local_common.LocalCert( + certificate=cert, + private_key=pk, + private_key_passphrase=passphrase + ) + return cert_object diff --git a/octavia/certificates/manager/__init__.py b/octavia/certificates/manager/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/certificates/manager/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/certificates/manager/barbican.py b/octavia/certificates/manager/barbican.py new file mode 100644 index 0000000000..1cff008034 --- /dev/null +++ b/octavia/certificates/manager/barbican.py @@ -0,0 +1,192 @@ +# Copyright (c) 2014 Rackspace US, Inc +# Copyright (c) 2017 GoDaddy +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Cert manager implementation for Barbican using a single PKCS12 secret +""" +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.primitives.serialization import pkcs12 as c_pkcs12 +from cryptography import x509 +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import encodeutils +from oslo_utils import excutils +from stevedore import driver as stevedore_driver + +from octavia.certificates.common import pkcs12 +from octavia.certificates.manager import barbican_legacy +from octavia.certificates.manager import cert_mgr +from octavia.common import exceptions +from octavia.common.tls_utils import cert_parser + +LOG = logging.getLogger(__name__) + + +class BarbicanCertManager(cert_mgr.CertManager): + """Certificate Manager that wraps the Barbican client API.""" + + def __init__(self): + super().__init__() + self.auth = stevedore_driver.DriverManager( + namespace='octavia.barbican_auth', + name=cfg.CONF.certificates.barbican_auth, + invoke_on_load=True, + ).driver + + def store_cert(self, context, certificate, private_key, intermediates=None, + private_key_passphrase=None, expiration=None, + name="PKCS12 Certificate Bundle"): + """Stores a certificate in the certificate manager. + + :param context: Oslo context of the request + :param certificate: PEM encoded TLS certificate + :param private_key: private key for the supplied certificate + :param intermediates: ordered and concatenated intermediate certs + :param private_key_passphrase: optional passphrase for the supplied key + :param expiration: the expiration time of the cert in ISO 8601 format + :param name: a friendly name for the cert + + :returns: the container_ref of the stored cert + :raises Exception: if certificate storage fails + """ + connection = self.auth.get_barbican_client(context.project_id) + + LOG.info("Storing certificate secret '%s' in Barbican.", name) + + if private_key_passphrase: + raise exceptions.CertificateStorageException( + "Passphrase protected PKCS12 certificates are not supported.") + + x509_cert = x509.load_pem_x509_certificate(certificate) + x509_pk = serialization.load_pem_private_key(private_key, None) + cas = None + if intermediates: + cert_ints = list(cert_parser.get_intermediates_pems(intermediates)) + cas = [ + x509.load_pem_x509_certificate(ci) + for ci in cert_ints] + + try: + certificate_secret = connection.secrets.create( + payload=c_pkcs12.serialize_key_and_certificates( + name=encodeutils.safe_encode(name), + key=x509_pk, + cert=x509_cert, + cas=cas, + encryption_algorithm=serialization.NoEncryption() + ), + expiration=expiration, + name=name + ) + certificate_secret.store() + return certificate_secret.secret_ref + except Exception as e: + with excutils.save_and_reraise_exception(): + LOG.error('Error storing certificate data: %s', str(e)) + return None + + def get_cert(self, context, cert_ref, resource_ref=None, check_only=False, + service_name=None): + """Retrieves the specified cert and registers as a consumer. + + :param context: Oslo context of the request + :param cert_ref: the UUID of the cert to retrieve + :param resource_ref: Full HATEOAS reference to the consuming resource + :param check_only: Read Certificate data without registering + :param service_name: Friendly name for the consuming service + + :return: octavia.certificates.common.Cert representation of the + certificate data + :raises Exception: if certificate retrieval fails + """ + connection = self.auth.get_barbican_client(context.project_id) + + LOG.info('Loading certificate secret %s from Barbican.', cert_ref) + try: + cert_secret = connection.secrets.get(secret_ref=cert_ref) + return pkcs12.PKCS12Cert(cert_secret.payload) + except exceptions.UnreadablePKCS12: + raise + except Exception as e: + LOG.warning('Failed to load PKCS12Cert for secret %s with %s', + cert_ref, str(e)) + LOG.warning('Falling back to the barbican_legacy implementation.') + # If our get fails, try with the legacy driver. + # TODO(rm_work): Remove this code when the deprecation cycle for + # the legacy driver is complete. + legacy_mgr = barbican_legacy.BarbicanCertManager() + legacy_cert = legacy_mgr.get_cert( + context, cert_ref, resource_ref=resource_ref, + check_only=check_only, service_name=service_name + ) + return legacy_cert + + def delete_cert(self, context, cert_ref, resource_ref, service_name=None): + """Deregister as a consumer for the specified cert. + + :param context: Oslo context of the request + :param cert_ref: the UUID of the cert to retrieve + :param resource_ref: Full HATEOAS reference to the consuming resource + :param service_name: Friendly name for the consuming service + + :raises Exception: if deregistration fails + """ + # TODO(rm_work): We won't take any action on a delete in this driver, + # but for now try the legacy driver's delete and ignore failure. + try: + legacy_mgr = barbican_legacy.BarbicanCertManager(auth=self.auth) + legacy_mgr.delete_cert( + context, cert_ref, resource_ref, service_name=service_name) + except Exception: + # If the delete failed, it was probably because it isn't legacy + # (this will be fixed once Secrets have Consumer registration). + pass + + def set_acls(self, context, cert_ref): + LOG.debug('Setting project ACL for certificate secret...') + self.auth.ensure_secret_access(context, cert_ref) + # TODO(velizarx): Remove this code when the deprecation cycle for + # the legacy driver is complete. + legacy_mgr = barbican_legacy.BarbicanCertManager(auth=self.auth) + legacy_mgr.set_acls(context, cert_ref) + + def unset_acls(self, context, cert_ref): + LOG.debug('Unsetting project ACL for certificate secret...') + self.auth.revoke_secret_access(context, cert_ref) + # TODO(velizarx): Remove this code when the deprecation cycle for + # the legacy driver is complete. + legacy_mgr = barbican_legacy.BarbicanCertManager(auth=self.auth) + legacy_mgr.unset_acls(context, cert_ref) + + def get_secret(self, context, secret_ref): + """Retrieves a secret payload by reference. + + :param context: Oslo context of the request + :param secret_ref: The secret reference ID + + :return: The secret payload + :raises CertificateStorageException: if retrieval fails + """ + connection = self.auth.get_barbican_client(context.project_id) + + LOG.info('Loading secret %s from Barbican.', secret_ref) + try: + secret = connection.secrets.get(secret_ref=secret_ref) + return secret.payload + except Exception as e: + LOG.error("Failed to access secret for %s due to: %s.", + secret_ref, str(e)) + raise exceptions.CertificateRetrievalException(ref=secret_ref) diff --git a/octavia/certificates/manager/barbican_legacy.py b/octavia/certificates/manager/barbican_legacy.py new file mode 100644 index 0000000000..67467ba877 --- /dev/null +++ b/octavia/certificates/manager/barbican_legacy.py @@ -0,0 +1,234 @@ +# Copyright (c) 2014 Rackspace US, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Legacy cert manager implementation for Barbican (container+secrets) +""" +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import excutils +from stevedore import driver as stevedore_driver + +from octavia.certificates.common import barbican as barbican_common +from octavia.certificates.manager import cert_mgr +from octavia.common.tls_utils import cert_parser + +LOG = logging.getLogger(__name__) + + +class BarbicanCertManager(cert_mgr.CertManager): + """Certificate Manager that wraps the Barbican client API.""" + + def __init__(self, auth=None): + super().__init__() + if auth: + self.auth = auth + else: + self.auth = stevedore_driver.DriverManager( + namespace='octavia.barbican_auth', + name=cfg.CONF.certificates.barbican_auth, + invoke_on_load=True, + ).driver + + def store_cert(self, context, certificate, private_key, intermediates=None, + private_key_passphrase=None, expiration=None, name=None): + """Stores a certificate in the certificate manager. + + :param context: Oslo context of the request + :param certificate: PEM encoded TLS certificate + :param private_key: private key for the supplied certificate + :param intermediates: ordered and concatenated intermediate certs + :param private_key_passphrase: optional passphrase for the supplied key + :param expiration: the expiration time of the cert in ISO 8601 format + :param name: a friendly name for the cert + + :returns: the container_ref of the stored cert + :raises Exception: if certificate storage fails + """ + connection = self.auth.get_barbican_client(context.project_id) + + LOG.info("Storing certificate container '%s' in Barbican.", name) + + certificate_secret = None + private_key_secret = None + intermediates_secret = None + pkp_secret = None + + try: + certificate_secret = connection.secrets.create( + payload=certificate, + expiration=expiration, + name="Certificate" + ) + private_key_secret = connection.secrets.create( + payload=private_key, + expiration=expiration, + name="Private Key" + ) + certificate_container = connection.containers.create_certificate( + name=name, + certificate=certificate_secret, + private_key=private_key_secret + ) + if intermediates: + intermediates_secret = connection.secrets.create( + payload=intermediates, + expiration=expiration, + name="Intermediates" + ) + certificate_container.intermediates = intermediates_secret + if private_key_passphrase: + pkp_secret = connection.secrets.create( + payload=private_key_passphrase, + expiration=expiration, + name="Private Key Passphrase" + ) + certificate_container.private_key_passphrase = pkp_secret + + certificate_container.store() + return certificate_container.container_ref + except Exception as e: + for i in [certificate_secret, private_key_secret, + intermediates_secret, pkp_secret]: + if i and i.secret_ref: + old_ref = i.secret_ref + try: + i.delete() + LOG.info('Deleted secret %s (%s) during rollback.', + i.name, old_ref) + except Exception: + LOG.warning('Failed to delete %s (%s) during ' + 'rollback. This might not be a problem.', + i.name, old_ref) + with excutils.save_and_reraise_exception(): + LOG.error('Error storing certificate data: %s', str(e)) + return None + + def get_cert(self, context, cert_ref, resource_ref=None, check_only=False, + service_name=None): + """Retrieves the specified cert and registers as a consumer. + + :param context: Oslo context of the request + :param cert_ref: the UUID of the cert to retrieve + :param resource_ref: Full HATEOAS reference to the consuming resource + :param check_only: Read Certificate data without registering + :param service_name: Friendly name for the consuming service + + :return: octavia.certificates.common.Cert representation of the + certificate data + :raises Exception: if certificate retrieval fails + """ + connection = self.auth.get_barbican_client(context.project_id) + + LOG.info('Loading certificate container %s from Barbican.', cert_ref) + try: + if check_only: + cert_container = connection.containers.get( + container_ref=cert_ref + ) + else: + cert_container = connection.containers.register_consumer( + container_ref=cert_ref, + name=service_name, + url=resource_ref + ) + barbican_cert = barbican_common.BarbicanCert(cert_container) + + LOG.debug('Validating certificate data for %s.', cert_ref) + cert_parser.validate_cert( + barbican_cert.get_certificate(), + private_key=barbican_cert.get_private_key(), + private_key_passphrase=( + barbican_cert.get_private_key_passphrase()), + intermediates=barbican_cert.get_intermediates()) + LOG.debug('Certificate data validated for %s.', cert_ref) + + return barbican_cert + except Exception as e: + with excutils.save_and_reraise_exception(): + LOG.error('Error getting cert %s: %s', cert_ref, str(e)) + return None + + def delete_cert(self, context, cert_ref, resource_ref, service_name=None): + """Deregister as a consumer for the specified cert. + + :param context: Oslo context of the request + :param cert_ref: the UUID of the cert to retrieve + :param resource_ref: Full HATEOAS reference to the consuming resource + :param service_name: Friendly name for the consuming service + + :raises Exception: if deregistration fails + """ + connection = self.auth.get_barbican_client(context.project_id) + + LOG.info('Deregistering as a consumer of %s in Barbican.', cert_ref) + try: + connection.containers.remove_consumer( + container_ref=cert_ref, + name=service_name, + url=resource_ref + ) + except Exception as e: + with excutils.save_and_reraise_exception(): + LOG.error('Error deregistering as a consumer of %s: %s', + cert_ref, str(e)) + + def set_acls(self, context, cert_ref): + connection = self.auth.get_barbican_client(context.project_id) + try: + cert_container = connection.containers.get( + container_ref=cert_ref + ) + except Exception: + # If the containers.get failed, it was probably because it isn't + # legacy so we will skip this step + return + self.auth.ensure_secret_access( + context, cert_container.certificate.secret_ref) + self.auth.ensure_secret_access( + context, cert_container.private_key.secret_ref) + if cert_container.private_key_passphrase: + self.auth.ensure_secret_access( + context, + cert_container.private_key_passphrase.secret_ref) + if cert_container.intermediates: + self.auth.ensure_secret_access( + context, cert_container.intermediates.secret_ref) + + def unset_acls(self, context, cert_ref): + connection = self.auth.get_barbican_client(context.project_id) + try: + cert_container = connection.containers.get( + container_ref=cert_ref + ) + except Exception: + # If the containers.get failed, it was probably because it isn't + # legacy so we will skip this step + return + self.auth.revoke_secret_access( + context, cert_container.certificate.secret_ref) + self.auth.revoke_secret_access( + context, cert_container.private_key.secret_ref) + if cert_container.private_key_passphrase: + self.auth.revoke_secret_access( + context, + cert_container.private_key_passphrase.secret_ref) + if cert_container.intermediates: + self.auth.revoke_secret_access( + context, cert_container.intermediates.secret_ref) + + def get_secret(self, context, secret_ref): + # The legacy driver doesn't need get_secret + return None diff --git a/octavia/certificates/manager/castellan_mgr.py b/octavia/certificates/manager/castellan_mgr.py new file mode 100644 index 0000000000..963c30fb90 --- /dev/null +++ b/octavia/certificates/manager/castellan_mgr.py @@ -0,0 +1,91 @@ +# Copyright (c) 2017 GoDaddy +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Cert manager implementation for Castellan +""" +from castellan.common.objects import opaque_data +from castellan import key_manager +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.primitives.serialization import pkcs12 as c_pkcs12 +from oslo_config import cfg +from oslo_log import log as logging + +from octavia.certificates.common import pkcs12 +from octavia.certificates.manager import cert_mgr +from octavia.common import exceptions + +CONF = cfg.CONF + +LOG = logging.getLogger(__name__) + + +class CastellanCertManager(cert_mgr.CertManager): + """Certificate Manager for the Castellan library.""" + + def __init__(self): + super().__init__() + self.manager = key_manager.API(CONF) + + def store_cert(self, context, certificate, private_key, intermediates=None, + private_key_passphrase=None, expiration=None, + name="PKCS12 Certificate Bundle"): + if private_key_passphrase: + raise exceptions.CertificateStorageException( + "Passphrases protected PKCS12 certificates are not supported.") + + p12_data = opaque_data.OpaqueData( + c_pkcs12.serialize_key_and_certificates( + name=None, + key=private_key, + cert=certificate, + cas=intermediates, + encryption_algorithm=serialization.NoEncryption() + ), + name=name + ) + self.manager.store(context, p12_data) + + def get_cert(self, context, cert_ref, resource_ref=None, check_only=False, + service_name=None): + certbag = self.manager.get(context, cert_ref) + certbag_data = certbag.get_encoded() + cert = pkcs12.PKCS12Cert(certbag_data) + return cert + + def delete_cert(self, context, cert_ref, resource_ref, service_name=None): + # Delete is not a great name for this -- we don't delete anything + # in reality, we just do cleanup here. For castellan, none is required + pass + + def set_acls(self, context, cert_ref): + # We don't manage ACL based access for things retrieved via Castellan + # because we assume we have elevated access to the secret store. + pass + + def unset_acls(self, context, cert_ref): + # We don't manage ACL based access for things retrieved via Castellan + # because we assume we have elevated access to the secret store. + pass + + def get_secret(self, context, secret_ref): + try: + certbag = self.manager.get(context, secret_ref) + certbag_data = certbag.get_encoded() + except Exception as e: + LOG.error("Failed to access secret for %s due to: %s.", + secret_ref, str(e)) + raise exceptions.CertificateRetrievalException(ref=secret_ref) + return certbag_data diff --git a/octavia/certificates/manager/cert_mgr.py b/octavia/certificates/manager/cert_mgr.py new file mode 100644 index 0000000000..5ec8856233 --- /dev/null +++ b/octavia/certificates/manager/cert_mgr.py @@ -0,0 +1,79 @@ +# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Certificate manager API +""" +import abc + + +class CertManager(metaclass=abc.ABCMeta): + """Base Cert Manager Interface + + A Cert Manager is responsible for managing certificates for TLS. + """ + + @abc.abstractmethod + def store_cert(self, context, certificate, private_key, intermediates=None, + private_key_passphrase=None, expiration=None, name=None): + """Stores (i.e., registers) a cert with the cert manager. + + This method stores the specified cert and returns its UUID that + identifies it within the cert manager. + If storage of the certificate data fails, a CertificateStorageException + should be raised. + """ + + @abc.abstractmethod + def get_cert(self, context, cert_ref, resource_ref=None, check_only=False, + service_name=None): + """Retrieves the specified cert. + + If check_only is True, don't perform any sort of registration. + If the specified cert does not exist, a CertificateStorageException + should be raised. + """ + + @abc.abstractmethod + def delete_cert(self, context, cert_ref, resource_ref, service_name=None): + """Deletes the specified cert. + + If the specified cert does not exist, a CertificateStorageException + should be raised. + """ + + @abc.abstractmethod + def set_acls(self, context, cert_ref): + """Adds ACLs so Octavia can access the cert objects. + + If the specified cert does not exist or the addition of ACLs fails for + any reason, a CertificateStorageException should be raised. + """ + + @abc.abstractmethod + def unset_acls(self, context, cert_ref): + """Remove ACLs so Octavia can access the cert objects. + + If the specified cert does not exist or the removal of ACLs fails for + any reason, a CertificateStorageException should be raised. + """ + + @abc.abstractmethod + def get_secret(self, context, secret_ref): + """Retrieves a secret payload by reference. + + If the specified secret does not exist, a CertificateStorageException + should be raised. + """ diff --git a/octavia/certificates/manager/local.py b/octavia/certificates/manager/local.py new file mode 100644 index 0000000000..03b38d357a --- /dev/null +++ b/octavia/certificates/manager/local.py @@ -0,0 +1,212 @@ +# Copyright (c) 2014 Rackspace US, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import os +import stat +import uuid + +from oslo_config import cfg +from oslo_log import log as logging + +from octavia.certificates.common import local as local_common +from octavia.certificates.manager import cert_mgr +from octavia.common import exceptions +from octavia.common.tls_utils import cert_parser + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class LocalCertManager(cert_mgr.CertManager): + """Cert Manager Interface that stores data locally.""" + + @staticmethod + def store_cert(context, certificate, private_key, intermediates=None, + private_key_passphrase=None, **kwargs): + """Stores (i.e., registers) a cert with the cert manager. + + This method stores the specified cert to the filesystem and returns + a UUID that can be used to retrieve it. + + :param context: Ignored in this implementation + :param certificate: PEM encoded TLS certificate + :param private_key: private key for the supplied certificate + :param intermediates: ordered and concatenated intermediate certs + :param private_key_passphrase: optional passphrase for the supplied key + + :returns: the UUID of the stored cert + :raises CertificateStorageException: if certificate storage fails + """ + cert_ref = str(uuid.uuid4()) + filename_base = os.path.join(CONF.certificates.storage_path, cert_ref) + if isinstance(certificate, bytes): + certificate = certificate.decode('utf-8') + if isinstance(private_key, bytes): + private_key = private_key.decode('utf-8') + + LOG.info("Storing certificate data on the local filesystem.") + try: + filename_certificate = f"{filename_base}.crt" + flags = os.O_WRONLY | os.O_CREAT + mode = stat.S_IRUSR | stat.S_IWUSR # mode 0600 + with os.fdopen(os.open( + filename_certificate, flags, mode), 'w') as cert_file: + cert_file.write(certificate) + + filename_private_key = f"{filename_base}.key" + with os.fdopen(os.open( + filename_private_key, flags, mode), 'w') as key_file: + key_file.write(private_key) + + if intermediates: + filename_intermediates = f"{filename_base}.int" + if isinstance(intermediates, bytes): + intermediates = intermediates.decode('utf-8') + with os.fdopen(os.open( + filename_intermediates, flags, mode), 'w') as int_file: + int_file.write(intermediates) + + if private_key_passphrase: + filename_pkp = f"{filename_base}.pass" + if isinstance(private_key_passphrase, bytes): + private_key_passphrase = private_key_passphrase.decode( + 'utf-8') + with os.fdopen(os.open( + filename_pkp, flags, mode), 'w') as pass_file: + pass_file.write(private_key_passphrase) + except OSError as ioe: + LOG.error("Failed to store certificate.") + raise exceptions.CertificateStorageException(message=ioe.message) + + return cert_ref + + @staticmethod + def get_cert(context, cert_ref, **kwargs): + """Retrieves the specified cert. + + :param context: Ignored in this implementation + :param cert_ref: the UUID of the cert to retrieve + + :return: octavia.certificates.common.Cert representation of the + certificate data + :raises CertificateStorageException: if certificate retrieval fails + """ + LOG.info("Loading certificate %s from the local filesystem.", cert_ref) + + filename_base = os.path.join(CONF.certificates.storage_path, cert_ref) + + filename_certificate = f"{filename_base}.crt" + filename_private_key = f"{filename_base}.key" + filename_intermediates = f"{filename_base}.int" + filename_pkp = f"{filename_base}.pass" + + cert_data = {} + + flags = os.O_RDONLY + try: + with os.fdopen(os.open(filename_certificate, flags)) as cert_file: + cert_data['certificate'] = cert_file.read() + except OSError as e: + LOG.error("Failed to read certificate for %s.", cert_ref) + raise exceptions.CertificateStorageException( + msg="Certificate could not be read.") from e + try: + with os.fdopen(os.open(filename_private_key, flags)) as key_file: + cert_data['private_key'] = key_file.read() + except OSError as e: + LOG.error("Failed to read private key for %s", cert_ref) + raise exceptions.CertificateStorageException( + msg="Private Key could not be read.") from e + + try: + with os.fdopen(os.open(filename_intermediates, flags)) as int_file: + cert_data['intermediates'] = int_file.read() + cert_data['intermediates'] = list( + cert_parser.get_intermediates_pems(cert_data['intermediates'])) + except OSError: + pass + + try: + with os.fdopen(os.open(filename_pkp, flags)) as pass_file: + cert_data['private_key_passphrase'] = pass_file.read() + except OSError: + pass + + return local_common.LocalCert(**cert_data) + + @staticmethod + def delete_cert(context, cert_ref, **kwargs): + """Deletes the specified cert. + + :param context: Ignored in this implementation + :param cert_ref: the UUID of the cert to delete + + :raises CertificateStorageException: if certificate deletion fails + """ + LOG.info("Deleting certificate %s from the local filesystem.", + cert_ref) + + filename_base = os.path.join(CONF.certificates.storage_path, cert_ref) + + filename_certificate = f"{filename_base}.crt" + filename_private_key = f"{filename_base}.key" + filename_intermediates = f"{filename_base}.int" + filename_pkp = f"{filename_base}.pass" + + try: + os.remove(filename_certificate) + os.remove(filename_private_key) + os.remove(filename_intermediates) + os.remove(filename_pkp) + except OSError as ioe: + LOG.error("Failed to delete certificate %s", cert_ref) + raise exceptions.CertificateStorageException(message=ioe.message) + + def set_acls(self, context, cert_ref): + # There is no security on this store, because it's really dumb + pass + + def unset_acls(self, context, cert_ref): + # There is no security on this store, because it's really dumb + pass + + @staticmethod + def get_secret(context, secret_ref): + """Retrieves a secret payload by reference. + + :param context: Ignored in this implementation + :param secret_ref: The secret reference ID + + :return: The secret payload + :raises CertificateStorageException: if secret retrieval fails + """ + LOG.info("Loading secret %s from the local filesystem.", secret_ref) + + filename_base = os.path.join(CONF.certificates.storage_path, + secret_ref) + + filename_secret = f"{filename_base}.crt" + + secret_data = None + + flags = os.O_RDONLY + try: + with os.fdopen(os.open(filename_secret, flags)) as secret_file: + secret_data = secret_file.read() + except OSError as e: + LOG.error("Failed to read secret for %s.", secret_ref) + raise exceptions.CertificateRetrievalException( + ref=secret_ref) from e + + return secret_data diff --git a/octavia/certificates/manager/noop.py b/octavia/certificates/manager/noop.py new file mode 100644 index 0000000000..f8b53caac6 --- /dev/null +++ b/octavia/certificates/manager/noop.py @@ -0,0 +1,106 @@ +# Copyright (c) 2023 Red Hat +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import uuid + +from oslo_log import log as logging + +from octavia.certificates.common import cert +from octavia.certificates.common import local +from octavia.certificates.manager import cert_mgr +from octavia.common.tls_utils import cert_parser +from octavia.tests.common import sample_certs + +LOG = logging.getLogger(__name__) + + +class NoopCertManager(cert_mgr.CertManager): + """Cert manager implementation for no-op operations + + """ + def __init__(self): + super().__init__() + self._local_cert = None + + @property + def local_cert(self): + if self._local_cert is None: + self._local_cert = self.store_cert( + None, + sample_certs.X509_CERT, + sample_certs.X509_CERT_KEY_ENCRYPTED, + sample_certs.X509_IMDS, + private_key_passphrase=sample_certs.X509_CERT_KEY_PASSPHRASE) + return self._local_cert + + def store_cert(self, context, certificate, private_key, intermediates=None, + private_key_passphrase=None, **kwargs) -> cert.Cert: + """Stores (i.e., registers) a cert with the cert manager. + + This method stores the specified cert to the filesystem and returns + a UUID that can be used to retrieve it. + + :param context: Ignored in this implementation + :param certificate: PEM encoded TLS certificate + :param private_key: private key for the supplied certificate + :param intermediates: ordered and concatenated intermediate certs + :param private_key_passphrase: optional passphrase for the supplied key + + :returns: the UUID of the stored cert + :raises CertificateStorageException: if certificate storage fails + """ + cert_ref = str(uuid.uuid4()) + if isinstance(certificate, bytes): + certificate = certificate.decode('utf-8') + if isinstance(private_key, bytes): + private_key = private_key.decode('utf-8') + + LOG.debug('Driver %s no-op, store_cert certificate %s, cert_ref %s', + self.__class__.__name__, certificate, cert_ref) + + cert_data = {'certificate': certificate, 'private_key': private_key} + if intermediates: + if isinstance(intermediates, bytes): + intermediates = intermediates.decode('utf-8') + cert_data['intermediates'] = list( + cert_parser.get_intermediates_pems(intermediates)) + if private_key_passphrase: + if isinstance(private_key_passphrase, bytes): + private_key_passphrase = private_key_passphrase.decode('utf-8') + cert_data['private_key_passphrase'] = private_key_passphrase + + return local.LocalCert(**cert_data) + + def get_cert(self, context, cert_ref, check_only=True, **kwargs) -> ( + cert.Cert): + LOG.debug('Driver %s no-op, get_cert with cert_ref %s', + self.__class__.__name__, cert_ref) + return self.local_cert + + def delete_cert(self, context, cert_ref, resource_ref, service_name=None): + LOG.debug('Driver %s no-op, delete_cert with cert_ref %s', + self.__class__.__name__, cert_ref) + + def set_acls(self, context, cert_ref): + LOG.debug('Driver %s no-op, set_acls with cert_ref %s', + self.__class__.__name__, cert_ref) + + def unset_acls(self, context, cert_ref): + LOG.debug('Driver %s no-op, unset_acls with cert_ref %s', + self.__class__.__name__, cert_ref) + + def get_secret(self, context, secret_ref) -> cert.Cert: + LOG.debug('Driver %s no-op, get_secret with secret_ref %s', + self.__class__.__name__, secret_ref) + return self.local_cert diff --git a/octavia/cmd/__init__.py b/octavia/cmd/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/cmd/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/cmd/agent.py b/octavia/cmd/agent.py new file mode 100644 index 0000000000..6475e65229 --- /dev/null +++ b/octavia/cmd/agent.py @@ -0,0 +1,95 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# make sure PYTHONPATH includes the home directory if you didn't install + +import multiprocessing as multiproc +import ssl +import sys + +import gunicorn.app.base +from oslo_config import cfg +from oslo_reports import guru_meditation_report as gmr +from oslo_reports import opts as gmr_opts + +from octavia.amphorae.backends.agent.api_server import server +from octavia.amphorae.backends.health_daemon import health_daemon +from octavia.common import service +from octavia.common import utils +from octavia import version + + +CONF = cfg.CONF + + +class AmphoraAgent(gunicorn.app.base.BaseApplication): + def __init__(self, app, options=None): + self.options = options or {} + self.application = app + super().__init__() + + def load_config(self): + config = {key: value for key, value in self.options.items() + if key in self.cfg.settings and value is not None} + for key, value in config.items(): + self.cfg.set(key.lower(), value) + + def load(self): + return self.application + + +# start api server +def main(): + # comment out to improve logging + service.prepare_service(sys.argv) + + gmr_opts.set_defaults(CONF) + gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) + + # Setup a multiprocessing manager and queue to share between the + # health manager sender and the workers. This allows us to reload the + # configuration into the health manager sender process. + hm_queue = multiproc.Manager().Queue() + + health_sender_proc = multiproc.Process(name='HM_sender', + target=health_daemon.run_sender, + args=(hm_queue,)) + health_sender_proc.daemon = True + health_sender_proc.start() + + # Initiate server class + server_instance = server.Server(hm_queue) + + bind_ip_port = utils.ip_port_str(CONF.haproxy_amphora.bind_host, + CONF.haproxy_amphora.bind_port) + proto = CONF.amphora_agent.agent_tls_protocol.replace('.', '_') + options = { + 'bind': bind_ip_port, + 'workers': 1, + 'timeout': CONF.amphora_agent.agent_request_read_timeout, + 'certfile': CONF.amphora_agent.agent_server_cert, + 'ca_certs': CONF.amphora_agent.agent_server_ca, + 'cert_reqs': ssl.CERT_REQUIRED, + 'ssl_version': getattr(ssl, f"PROTOCOL_{proto}"), + 'preload_app': True, + 'accesslog': '/var/log/amphora-agent.log', + 'errorlog': '/var/log/amphora-agent.log', + 'loglevel': 'debug', + 'syslog': True, + 'syslog_facility': ( + f'local{CONF.amphora_agent.administrative_log_facility}'), + 'syslog_addr': 'unix://run/rsyslog/octavia/log#dgram', + + } + AmphoraAgent(server_instance.app, options).run() diff --git a/octavia/cmd/api.py b/octavia/cmd/api.py new file mode 100644 index 0000000000..7dcc43ee2c --- /dev/null +++ b/octavia/cmd/api.py @@ -0,0 +1,55 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys +from wsgiref import simple_server + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_reports import guru_meditation_report as gmr +from oslo_reports import opts as gmr_opts + +from octavia.api import app as api_app +from octavia.common import constants +from octavia import version + + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +def main(): + # TODO(tkajinam): We should consider adding this to wsgi app too so that + # GMR can be used even when api is run by uwsgi/mod_wsgi/etc. + gmr_opts.set_defaults(CONF) + gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) + + app = api_app.setup_app(argv=sys.argv) + + host = CONF.api_settings.bind_host + port = CONF.api_settings.bind_port + LOG.info("Starting API server on %(host)s:%(port)s", + {"host": host, "port": port}) + if CONF.api_settings.auth_strategy != constants.KEYSTONE: + LOG.warning('Octavia configuration [api_settings] auth_strategy is ' + 'not set to "keystone". This is not a normal ' + 'configuration and you may get "Missing project ID" ' + 'errors from API calls."') + LOG.warning('You are running the Octavia API wsgi application using ' + 'simple_server. We do not recommend this outside of simple ' + 'testing. We recommend you run the Octavia API wsgi with ' + 'a more full function server such as gunicorn or uWSGI.') + srv = simple_server.make_server(host, port, app) + + srv.serve_forever() diff --git a/octavia/cmd/driver_agent.py b/octavia/cmd/driver_agent.py new file mode 100644 index 0000000000..67148b3807 --- /dev/null +++ b/octavia/cmd/driver_agent.py @@ -0,0 +1,171 @@ +# Copyright 2018 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from functools import partial +import multiprocessing +import os +import signal +import sys +import time + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_reports import guru_meditation_report as gmr +from oslo_reports import opts as gmr_opts +import setproctitle +from stevedore import enabled as stevedore_enabled + +from octavia.api.drivers.driver_agent import driver_listener +from octavia.common import service +from octavia import version + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) +PROVIDER_AGENT_PROCESSES = [] + + +def _mutate_config(*args, **kwargs): + CONF.mutate_config_files() + + +def _handle_mutate_config(status_proc_pid, stats_proc_pid, *args, **kwargs): + LOG.info("Driver agent received HUP signal, mutating config.") + _mutate_config() + os.kill(status_proc_pid, signal.SIGHUP) + os.kill(stats_proc_pid, signal.SIGHUP) + + +def _check_if_provider_agent_enabled(extension): + if extension.name in CONF.driver_agent.enabled_provider_agents: + return True + return False + + +def _process_wrapper(exit_event, proc_name, function, agent_name=None): + signal.signal(signal.SIGINT, signal.SIG_IGN) + signal.signal(signal.SIGHUP, _mutate_config) + if agent_name: + process_title = f'octavia-driver-agent - {proc_name} -- {agent_name}' + else: + process_title = f'octavia-driver-agent - {proc_name}' + setproctitle.setproctitle(process_title) + while not exit_event.is_set(): + try: + function(exit_event) + except Exception as e: + if agent_name: + LOG.exception('Provider agent "%s" raised exception: %s. ' + 'Restarting the "%s" provider agent.', + agent_name, str(e), agent_name) + else: + LOG.exception('%s raised exception: %s. ' + 'Restarting %s.', + proc_name, str(e), proc_name) + time.sleep(1) + continue + break + + +def _start_provider_agents(exit_event): + extensions = stevedore_enabled.EnabledExtensionManager( + namespace='octavia.driver_agent.provider_agents', + check_func=_check_if_provider_agent_enabled) + for ext in extensions: + ext_process = multiprocessing.Process( + name=ext.name, target=_process_wrapper, + args=(exit_event, 'provider_agent', ext.plugin), + kwargs={'agent_name': ext.name}) + PROVIDER_AGENT_PROCESSES.append(ext_process) + ext_process.start() + LOG.info('Started enabled provider agent: "%s" with PID: %d.', + ext.name, ext_process.pid) + + +def main(): + service.prepare_service(sys.argv) + LOG.debug('Full set of CONF:') + CONF.log_opt_values(LOG, logging.DEBUG) + + gmr_opts.set_defaults(CONF) + gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) + + processes = [] + exit_event = multiprocessing.Event() + + status_listener_proc = multiprocessing.Process( + name='status_listener', target=_process_wrapper, + args=(exit_event, 'status_listener', driver_listener.status_listener)) + processes.append(status_listener_proc) + + LOG.info("Driver agent status listener process starts:") + status_listener_proc.start() + + stats_listener_proc = multiprocessing.Process( + name='stats_listener', target=_process_wrapper, + args=(exit_event, 'stats_listener', driver_listener.stats_listener)) + processes.append(stats_listener_proc) + + LOG.info("Driver agent statistics listener process starts:") + stats_listener_proc.start() + + get_listener_proc = multiprocessing.Process( + name='get_listener', target=_process_wrapper, + args=(exit_event, 'get_listener', driver_listener.get_listener)) + processes.append(get_listener_proc) + + LOG.info("Driver agent get listener process starts:") + get_listener_proc.start() + + _start_provider_agents(exit_event) + + def process_cleanup(*args, **kwargs): + LOG.info("Driver agent exiting due to signal.") + exit_event.set() + status_listener_proc.join() + stats_listener_proc.join() + get_listener_proc.join() + + for proc in PROVIDER_AGENT_PROCESSES: + LOG.info('Waiting up to %s seconds for provider agent "%s" to ' + 'shutdown.', + CONF.driver_agent.provider_agent_shutdown_timeout, + proc.name) + try: + proc.join(CONF.driver_agent.provider_agent_shutdown_timeout) + if proc.exitcode is None: + # TODO(johnsom) Change to proc.kill() once + # python 3.7 or newer only + os.kill(proc.pid, signal.SIGKILL) + LOG.warning( + 'Forcefully killed "%s" provider agent because it ' + 'failed to shutdown in %s seconds.', proc.name, + CONF.driver_agent.provider_agent_shutdown_timeout) + except Exception as e: + LOG.warning('Unknown error "%s" while shutting down "%s", ' + 'ignoring and continuing shutdown process.', + str(e), proc.name) + else: + LOG.info('Provider agent "%s" has successfully shutdown.', + proc.name) + + signal.signal(signal.SIGTERM, process_cleanup) + signal.signal(signal.SIGHUP, partial( + _handle_mutate_config, status_listener_proc.pid, + stats_listener_proc.pid, get_listener_proc.pid)) + + try: + for process in processes: + process.join() + except KeyboardInterrupt: + process_cleanup() diff --git a/octavia/cmd/haproxy_vrrp_check.py b/octavia/cmd/haproxy_vrrp_check.py new file mode 100644 index 0000000000..db675be647 --- /dev/null +++ b/octavia/cmd/haproxy_vrrp_check.py @@ -0,0 +1,67 @@ +# Copyright 2015 Hewlett Packard Enterprise Development Company LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import socket +import sys + +SOCKET_TIMEOUT = 5 + + +def get_status(sock_address): + """Query haproxy stat socket + + Only VRRP fail over if the stats socket is not responding. + + :param sock_address: unix socket file + :return: 0 if haproxy responded + """ + s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + s.settimeout(SOCKET_TIMEOUT) + s.connect(sock_address) + s.send(b'show stat -1 -1 -1\n') + data = b'' + while True: + x = s.recv(1024) + if not x: + break + data += x + s.close() + # if get nothing, means has no response + if not data: + return 1 + return 0 + + +def health_check(sock_addresses): + """Invoke queries for all defined listeners + + :param sock_addresses: + :return: + """ + status = 0 + for address in sock_addresses: + status += get_status(address) + return status + + +def main(): + # usage python haproxy_vrrp_check.py + # Note: for performance, this script loads minimal number of module. + # Loading octavia modules or any other complex construct MUST be avoided. + listeners_sockets = sys.argv[1:] + try: + status = health_check(listeners_sockets) + except Exception: + sys.exit(1) + sys.exit(status) diff --git a/octavia/cmd/health_checker.py b/octavia/cmd/health_checker.py new file mode 100644 index 0000000000..c3e5d81633 --- /dev/null +++ b/octavia/cmd/health_checker.py @@ -0,0 +1,264 @@ +# Copyright 2020 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import socket +import struct +import sys +import time + +import random + + +# Adapted from https://opendev.org/openstack/os-ken/src/branch/ +# master/os_ken/lib/packet/sctp.py +def crc32c(data): + # from RFC 3309 + crc_c = [ + 0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4, + 0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB, + 0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B, + 0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24, + 0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B, + 0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384, + 0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54, + 0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B, + 0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A, + 0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35, + 0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5, + 0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA, + 0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45, + 0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A, + 0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A, + 0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595, + 0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48, + 0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957, + 0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687, + 0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198, + 0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927, + 0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38, + 0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8, + 0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7, + 0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096, + 0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789, + 0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859, + 0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46, + 0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9, + 0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6, + 0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36, + 0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829, + 0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C, + 0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93, + 0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043, + 0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C, + 0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3, + 0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC, + 0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C, + 0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033, + 0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652, + 0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D, + 0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D, + 0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982, + 0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D, + 0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622, + 0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2, + 0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED, + 0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530, + 0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F, + 0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF, + 0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0, + 0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F, + 0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540, + 0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90, + 0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F, + 0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE, + 0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1, + 0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321, + 0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E, + 0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81, + 0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E, + 0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E, + 0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351, + ] + + crc32 = 0xffffffff + for c in data: + crc32 = (crc32 >> 8) ^ crc_c[(crc32 ^ (c)) & 0xFF] + crc32 = (~crc32) & 0xffffffff + return struct.unpack(">I", struct.pack("") + sys.exit(1) + + try: + interface_cmd(interface_name, action) + except Exception as e: + print(f"Error: {e}") + sys.exit(2) diff --git a/octavia/cmd/octavia_worker.py b/octavia/cmd/octavia_worker.py new file mode 100644 index 0000000000..68804990aa --- /dev/null +++ b/octavia/cmd/octavia_worker.py @@ -0,0 +1,40 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +import cotyledon +from cotyledon import oslo_config_glue +from oslo_config import cfg +from oslo_reports import guru_meditation_report as gmr +from oslo_reports import opts as gmr_opts + +from octavia.common import service as octavia_service +from octavia.controller.queue.v2 import consumer as consumer_v2 +from octavia import version + +CONF = cfg.CONF + + +def main(): + octavia_service.prepare_service(sys.argv) + + gmr_opts.set_defaults(CONF) + gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) + + sm = cotyledon.ServiceManager() + sm.add(consumer_v2.ConsumerService, + workers=CONF.controller_worker.workers, args=(CONF,)) + oslo_config_glue.setup(sm, CONF, reload_method="mutate") + sm.run() diff --git a/octavia/cmd/prometheus_proxy.py b/octavia/cmd/prometheus_proxy.py new file mode 100644 index 0000000000..1c4a22a44e --- /dev/null +++ b/octavia/cmd/prometheus_proxy.py @@ -0,0 +1,809 @@ +# Copyright 2022 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# This prometheus-proxy is intended to abstract the prometheus metrics +# exported from the reference provider driver load balancing engines (haproxy +# and lvs) such that all of the provider drivers can expose a consistent set +# of metrics. It also aligns the terms to be consistent with Octavia +# terminology. + +from http.server import SimpleHTTPRequestHandler +from http.server import ThreadingHTTPServer +import os +import signal +import sys +import threading +import time +import traceback +import urllib.request + +import psutil + +from octavia.amphorae.backends.utils import network_namespace +from octavia.common import constants as consts + +METRICS_URL = "http://127.0.0.1:9101/metrics" +PRINT_REJECTED = False +EXIT_EVENT = threading.Event() + +# A dictionary of prometheus metrics mappings. +# Key: The metric string to match +# Value: A tuple of replacement data. +# tuple[0]: The string to replace the key with. +# tuple[1]: If not None, the replacement HELP line for the metric. +# If tuple[1] is None, the key will be replaced in the HELP string. +# tuple[2]: If not None, includes a dictionary of additional substitutions. +# tuple[2] substitutions happen prior to the key replacement in tuple[0]. +METRIC_MAP = { + # Load balancer metrics + "haproxy_process_pool_allocated_bytes ": + ("octavia_memory_pool_allocated_bytes ", + "# HELP octavia_memory_pool_allocated_bytes Total amount of memory " + "allocated in the memory pools (in bytes).\n", None), + "haproxy_process_pool_used_bytes ": + ("octavia_memory_pool_used_bytes ", + "# HELP octavia_memory_pool_used_bytes Total amount of memory used " + "in the memory pools (in bytes).\n", None), + "haproxy_process_pool_failures_total ": + ("octavia_memory_pool_failures_total ", + "# HELP octavia_memory_pool_failures_total Total number of failed " + "memory pool allocations.\n", None), + "haproxy_process_max_connections ": + ("octavia_loadbalancer_max_connections ", None, None), + "haproxy_process_current_connections ": + ("octavia_loadbalancer_current_connections ", None, None), + # TODO(johnsom) consider adding in UDP + "haproxy_process_connections_total ": + ("octavia_loadbalancer_connections_total ", None, None), + # TODO(johnsom) consider adding in UDP (and update help string) + "haproxy_process_requests_total ": + ("octavia_loadbalancer_requests_total ", None, None), + "haproxy_process_max_ssl_connections ": + ("octavia_loadbalancer_max_ssl_connections ", None, None), + "haproxy_process_current_ssl_connections ": + ("octavia_loadbalancer_current_ssl_connections ", + "# HELP octavia_loadbalancer_current_ssl_connections Number of " + "active SSL connections.\n", None), + "haproxy_process_ssl_connections_total ": + ("octavia_loadbalancer_ssl_connections_total ", None, None), + "haproxy_process_current_connection_rate ": + ("octavia_loadbalancer_current_connection_rate ", None, None), + "haproxy_process_limit_connection_rate ": + ("octavia_loadbalancer_limit_connection_rate ", None, None), + "haproxy_process_max_connection_rate ": + ("octavia_loadbalancer_max_connection_rate ", None, None), + "haproxy_process_current_session_rate ": + ("octavia_loadbalancer_current_session_rate ", None, None), + "haproxy_process_limit_session_rate ": + ("octavia_loadbalancer_limit_session_rate ", None, None), + "haproxy_process_max_session_rate ": + ("octavia_loadbalancer_max_session_rate ", None, None), + "haproxy_process_current_ssl_rate ": + ("octavia_loadbalancer_current_ssl_rate ", None, None), + "haproxy_process_limit_ssl_rate ": + ("octavia_loadbalancer_limit_ssl_rate ", None, None), + "haproxy_process_max_ssl_rate ": + ("octavia_loadbalancer_max_ssl_rate ", None, None), + "haproxy_process_current_frontend_ssl_key_rate ": + ("octavia_loadbalancer_current_frontend_ssl_key_rate ", None, None), + "haproxy_process_max_frontend_ssl_key_rate ": + ("octavia_loadbalancer_max_frontend_ssl_key_rate ", None, None), + "haproxy_process_frontend_ssl_reuse ": + ("octavia_loadbalancer_frontend_ssl_reuse ", None, None), + "haproxy_process_current_backend_ssl_key_rate ": + ("octavia_loadbalancer_current_backend_ssl_key_rate ", None, None), + "haproxy_process_max_backend_ssl_key_rate ": + ("octavia_loadbalancer_max_backend_ssl_key_rate ", None, None), + "haproxy_process_ssl_cache_lookups_total ": + ("octavia_loadbalancer_ssl_cache_lookups_total ", None, None), + "haproxy_process_ssl_cache_misses_total ": + ("octavia_loadbalancer_ssl_cache_misses_total ", None, None), + "haproxy_process_http_comp_bytes_in_total ": + ("octavia_loadbalancer_http_comp_bytes_in_total ", None, None), + "haproxy_process_http_comp_bytes_out_total ": + ("octavia_loadbalancer_http_comp_bytes_out_total ", None, None), + "haproxy_process_limit_http_comp ": + ("octavia_loadbalancer_limit_http_comp ", None, None), + "haproxy_process_listeners ": + ("octavia_loadbalancer_listeners ", None, None), + "haproxy_process_dropped_logs_total ": + ("octavia_loadbalancer_dropped_logs_total ", None, None), + + # Listener metrics + "haproxy_frontend_status ": + ("octavia_listener_status ", + "# HELP octavia_listener_status Current status of the listener.\n", + None), + "haproxy_frontend_status{": + ("octavia_listener_status{", None, {"proxy=": "listener="}), + "haproxy_frontend_current_sessions ": + ("octavia_listener_current_sessions ", None, None), + "haproxy_frontend_current_sessions{": + ("octavia_listener_current_sessions{", None, + {"proxy=": "listener="}), + "haproxy_frontend_max_sessions ": + ("octavia_listener_max_sessions ", None, None), + "haproxy_frontend_max_sessions{": + ("octavia_listener_max_sessions{", None, {"proxy=": "listener="}), + "haproxy_frontend_limit_sessions ": + ("octavia_listener_limit_sessions ", None, None), + "haproxy_frontend_limit_sessions{": + ("octavia_listener_limit_sessions{", None, {"proxy=": "listener="}), + "haproxy_frontend_sessions_total ": + ("octavia_listener_sessions_total ", None, None), + "haproxy_frontend_sessions_total{": + ("octavia_listener_sessions_total{", None, {"proxy=": "listener="}), + "haproxy_frontend_limit_session_rate ": + ("octavia_listener_limit_session_rate ", None, None), + "haproxy_frontend_limit_session_rate{": + ("octavia_listener_limit_session_rate{", None, + {"proxy=": "listener="}), + "haproxy_frontend_max_session_rate ": + ("octavia_listener_max_session_rate ", None, None), + "haproxy_frontend_max_session_rate{": + ("octavia_listener_max_session_rate{", None, + {"proxy=": "listener="}), + "haproxy_frontend_connections_rate_max ": + ("octavia_listener_connections_rate_max ", None, None), + "haproxy_frontend_connections_rate_max{": + ("octavia_listener_connections_rate_max{", None, + {"proxy=": "listener="}), + "haproxy_frontend_connections_total ": + ("octavia_listener_connections_total ", None, None), + "haproxy_frontend_connections_total{": + ("octavia_listener_connections_total{", None, + {"proxy=": "listener="}), + "haproxy_frontend_bytes_in_total ": + ("octavia_listener_bytes_in_total ", None, None), + "haproxy_frontend_bytes_in_total{": + ("octavia_listener_bytes_in_total{", None, {"proxy=": "listener="}), + "haproxy_frontend_bytes_out_total ": + ("octavia_listener_bytes_out_total ", None, None), + "haproxy_frontend_bytes_out_total{": + ("octavia_listener_bytes_out_total{", None, + {"proxy=": "listener="}), + "haproxy_frontend_requests_denied_total ": + ("octavia_listener_requests_denied_total ", None, None), + "haproxy_frontend_requests_denied_total{": + ("octavia_listener_requests_denied_total{", None, + {"proxy=": "listener="}), + "haproxy_frontend_responses_denied_total ": + ("octavia_listener_responses_denied_total ", None, None), + "haproxy_frontend_responses_denied_total{": + ("octavia_listener_responses_denied_total{", None, + {"proxy=": "listener="}), + "haproxy_frontend_request_errors_total ": + ("octavia_listener_request_errors_total ", None, None), + "haproxy_frontend_request_errors_total{": + ("octavia_listener_request_errors_total{", None, + {"proxy=": "listener="}), + "haproxy_frontend_denied_connections_total ": + ("octavia_listener_denied_connections_total ", + "# HELP octavia_listener_denied_connections_total Total number of " + "requests denied by connection rules.\n", None), + "haproxy_frontend_denied_connections_total{": + ("octavia_listener_denied_connections_total{", None, + {"proxy=": "listener="}), + "haproxy_frontend_denied_sessions_total ": + ("octavia_listener_denied_sessions_total ", + "# HELP octavia_listener_denied_sessions_total Total number of " + "requests denied by session rules.\n", None), + "haproxy_frontend_denied_sessions_total{": + ("octavia_listener_denied_sessions_total{", None, + {"proxy=": "listener="}), + "haproxy_frontend_failed_header_rewriting_total ": + ("octavia_listener_failed_header_rewriting_total ", + "# HELP octavia_listener_failed_header_rewriting_total Total number " + "of failed header rewriting rules.\n", None), + "haproxy_frontend_failed_header_rewriting_total{": + ("octavia_listener_failed_header_rewriting_total{", None, + {"proxy=": "listener="}), + "haproxy_frontend_http_requests_rate_max ": + ("octavia_listener_http_requests_rate_max ", None, None), + "haproxy_frontend_http_requests_rate_max{": + ("octavia_listener_http_requests_rate_max{", None, + {"proxy=": "listener="}), + "haproxy_frontend_http_requests_total ": + ("octavia_listener_http_requests_total ", None, None), + "haproxy_frontend_http_requests_total{": + ("octavia_listener_http_requests_total{", None, + {"proxy=": "listener="}), + "haproxy_frontend_http_responses_total ": + ("octavia_listener_http_responses_total ", None, None), + "haproxy_frontend_http_responses_total{": + ("octavia_listener_http_responses_total{", None, + {"proxy=": "listener="}), + "haproxy_frontend_intercepted_requests_total ": + ("octavia_listener_intercepted_requests_total ", None, None), + "haproxy_frontend_intercepted_requests_total{": + ("octavia_listener_intercepted_requests_total{", None, + {"proxy=": "listener="}), + "haproxy_frontend_http_cache_lookups_total ": + ("octavia_listener_http_cache_lookups_total ", None, None), + "haproxy_frontend_http_cache_lookups_total{": + ("octavia_listener_http_cache_lookups_total{", None, + {"proxy=": "listener="}), + "haproxy_frontend_http_cache_hits_total ": + ("octavia_listener_http_cache_hits_total ", None, None), + "haproxy_frontend_http_cache_hits_total{": + ("octavia_listener_http_cache_hits_total{", None, + {"proxy=": "listener="}), + "haproxy_frontend_http_comp_bytes_in_total ": + ("octavia_listener_http_comp_bytes_in_total ", None, None), + "haproxy_frontend_http_comp_bytes_in_total{": + ("octavia_listener_http_comp_bytes_in_total{", None, + {"proxy=": "listener="}), + "haproxy_frontend_http_comp_bytes_out_total ": + ("octavia_listener_http_comp_bytes_out_total ", None, None), + "haproxy_frontend_http_comp_bytes_out_total{": + ("octavia_listener_http_comp_bytes_out_total{", None, + {"proxy=": "listener="}), + "haproxy_frontend_http_comp_bytes_bypassed_total ": + ("octavia_listener_http_comp_bytes_bypassed_total ", None, None), + "haproxy_frontend_http_comp_bytes_bypassed_total{": + ("octavia_listener_http_comp_bytes_bypassed_total{", None, + {"proxy=": "listener="}), + "haproxy_frontend_http_comp_responses_total ": + ("octavia_listener_http_comp_responses_total ", None, None), + "haproxy_frontend_http_comp_responses_total{": + ("octavia_listener_http_comp_responses_total{", None, + {"proxy=": "listener="}), + + # Pool Metrics + "haproxy_backend_status ": + ("octavia_pool_status ", + "# HELP octavia_pool_status Current status of the pool.\n", None), + "haproxy_backend_status{": + ("octavia_pool_status{", None, {"proxy=": "pool="}), + "haproxy_backend_current_sessions ": + ("octavia_pool_current_sessions ", None, None), + "haproxy_backend_current_sessions{": + ("octavia_pool_current_sessions{", None, {"proxy=": "pool="}), + "haproxy_backend_max_sessions ": + ("octavia_pool_max_sessions ", None, None), + "haproxy_backend_max_sessions{": + ("octavia_pool_max_sessions{", None, {"proxy=": "pool="}), + "haproxy_backend_limit_sessions ": + ("octavia_pool_limit_sessions ", None, None), + "haproxy_backend_limit_sessions{": + ("octavia_pool_limit_sessions{", None, {"proxy=": "pool="}), + "haproxy_backend_sessions_total ": + ("octavia_pool_sessions_total ", None, None), + "haproxy_backend_sessions_total{": + ("octavia_pool_sessions_total{", None, {"proxy=": "pool="}), + "haproxy_backend_max_session_rate ": + ("octavia_pool_max_session_rate ", None, None), + "haproxy_backend_max_session_rate{": + ("octavia_pool_max_session_rate{", None, {"proxy=": "pool="}), + "haproxy_backend_last_session_seconds ": + ("octavia_pool_last_session_seconds ", + "# HELP octavia_pool_last_session_seconds Number of seconds since " + "last session assigned to a member.\n", None, None), + "haproxy_backend_last_session_seconds{": + ("octavia_pool_last_session_seconds{", None, {"proxy=": "pool="}), + "haproxy_backend_current_queue ": + ("octavia_pool_current_queue ", None, None), + "haproxy_backend_current_queue{": + ("octavia_pool_current_queue{", None, {"proxy=": "pool="}), + "haproxy_backend_max_queue ": + ("octavia_pool_max_queue ", None, None), + "haproxy_backend_max_queue{": + ("octavia_pool_max_queue{", None, {"proxy=": "pool="}), + "haproxy_backend_connection_attempts_total ": + ("octavia_pool_connection_attempts_total ", None, None), + "haproxy_backend_connection_attempts_total{": + ("octavia_pool_connection_attempts_total{", None, + {"proxy=": "pool="}), + "haproxy_backend_connection_reuses_total ": + ("octavia_pool_connection_reuses_total ", None, None), + "haproxy_backend_connection_reuses_total{": + ("octavia_pool_connection_reuses_total{", None, + {"proxy=": "pool="}), + "haproxy_backend_bytes_in_total ": + ("octavia_pool_bytes_in_total ", None, None), + "haproxy_backend_bytes_in_total{": + ("octavia_pool_bytes_in_total{", None, {"proxy=": "pool="}), + "haproxy_backend_bytes_out_total ": + ("octavia_pool_bytes_out_total ", None, None), + "haproxy_backend_bytes_out_total{": + ("octavia_pool_bytes_out_total{", None, {"proxy=": "pool="}), + "haproxy_backend_queue_time_average_seconds ": + ("octavia_pool_queue_time_average_seconds ", None, None), + "haproxy_backend_queue_time_average_seconds{": + ("octavia_pool_queue_time_average_seconds{", None, + {"proxy=": "pool="}), + "haproxy_backend_connect_time_average_seconds ": + ("octavia_pool_connect_time_average_seconds ", None, None), + "haproxy_backend_connect_time_average_seconds{": + ("octavia_pool_connect_time_average_seconds{", None, + {"proxy=": "pool="}), + "haproxy_backend_response_time_average_seconds ": + ("octavia_pool_response_time_average_seconds ", None, None), + "haproxy_backend_response_time_average_seconds{": + ("octavia_pool_response_time_average_seconds{", None, + {"proxy=": "pool="}), + "haproxy_backend_total_time_average_seconds ": + ("octavia_pool_total_time_average_seconds ", None, None), + "haproxy_backend_total_time_average_seconds{": + ("octavia_pool_total_time_average_seconds{", None, + {"proxy=": "pool="}), + "haproxy_backend_max_queue_time_seconds ": + ("octavia_pool_max_queue_time_seconds ", None, None), + "haproxy_backend_max_queue_time_seconds{": + ("octavia_pool_max_queue_time_seconds{", None, {"proxy=": "pool="}), + "haproxy_backend_max_connect_time_seconds ": + ("octavia_pool_max_connect_time_seconds ", None, None), + "haproxy_backend_max_connect_time_seconds{": + ("octavia_pool_max_connect_time_seconds{", None, + {"proxy=": "pool="}), + "haproxy_backend_max_response_time_seconds ": + ("octavia_pool_max_response_time_seconds ", + "# HELP octavia_pool_max_response_time_seconds Maximum observed " + "time spent waiting for a member response.\n", None), + "haproxy_backend_max_response_time_seconds{": + ("octavia_pool_max_response_time_seconds{", None, + {"proxy=": "pool="}), + "haproxy_backend_max_total_time_seconds ": + ("octavia_pool_max_total_time_seconds ", None, None), + "haproxy_backend_max_total_time_seconds{": + ("octavia_pool_max_total_time_seconds{", None, {"proxy=": "pool="}), + "haproxy_backend_requests_denied_total ": + ("octavia_pool_requests_denied_total ", None, None), + "haproxy_backend_requests_denied_total{": + ("octavia_pool_requests_denied_total{", None, {"proxy=": "pool="}), + "haproxy_backend_responses_denied_total ": + ("octavia_pool_responses_denied_total ", None, None), + "haproxy_backend_responses_denied_total{": + ("octavia_pool_responses_denied_total{", None, {"proxy=": "pool="}), + "haproxy_backend_connection_errors_total ": + ("octavia_pool_connection_errors_total ", None, None), + "haproxy_backend_connection_errors_total{": + ("octavia_pool_connection_errors_total{", None, + {"proxy=": "pool="}), + "haproxy_backend_response_errors_total ": + ("octavia_pool_response_errors_total ", None, None), + "haproxy_backend_response_errors_total{": + ("octavia_pool_response_errors_total{", None, {"proxy=": "pool="}), + "haproxy_backend_retry_warnings_total ": + ("octavia_pool_retry_warnings_total ", None, None), + "haproxy_backend_retry_warnings_total{": + ("octavia_pool_retry_warnings_total{", None, {"proxy=": "pool="}), + "haproxy_backend_redispatch_warnings_total ": + ("octavia_pool_redispatch_warnings_total ", None, None), + "haproxy_backend_redispatch_warnings_total{": + ("octavia_pool_redispatch_warnings_total{", None, + {"proxy=": "pool="}), + "haproxy_backend_failed_header_rewriting_total ": + ("octavia_pool_failed_header_rewriting_total ", None, None), + "haproxy_backend_failed_header_rewriting_total{": + ("octavia_pool_failed_header_rewriting_total{", None, + {"proxy=": "pool="}), + "haproxy_backend_client_aborts_total ": + ("octavia_pool_client_aborts_total ", None, None), + "haproxy_backend_client_aborts_total{": + ("octavia_pool_client_aborts_total{", None, {"proxy=": "pool="}), + "haproxy_backend_server_aborts_total ": + ("octavia_pool_member_aborts_total ", + "# HELP octavia_pool_member_aborts_total Total number of data " + "transfers aborted by the server.\n", None), + "haproxy_backend_server_aborts_total{": + ("octavia_pool_member_aborts_total{", None, {"proxy=": "pool="}), + "haproxy_backend_active_servers ": + ("octavia_pool_active_members ", + "# HELP octavia_pool_active_members Current number of active " + "members.\n", None), + "haproxy_backend_active_servers{": + ("octavia_pool_active_members{", None, {"proxy=": "pool="}), + "haproxy_backend_backup_servers ": + ("octavia_pool_backup_members ", + "# HELP octavia_pool_backup_members Current number of backup " + "members.\n", None), + "haproxy_backend_backup_servers{": + ("octavia_pool_backup_members{", None, {"proxy=": "pool="}), + "haproxy_backend_check_up_down_total ": + ("octavia_pool_check_up_down_total ", None, None), + "haproxy_backend_check_up_down_total{": + ("octavia_pool_check_up_down_total{", None, {"proxy=": "pool="}), + "haproxy_backend_check_last_change_seconds ": + ("octavia_pool_check_last_change_seconds ", None, None), + "haproxy_backend_check_last_change_seconds{": + ("octavia_pool_check_last_change_seconds{", None, + {"proxy=": "pool="}), + "haproxy_backend_downtime_seconds_total ": + ("octavia_pool_downtime_seconds_total ", + "# HELP octavia_pool_downtime_seconds_total Total downtime " + "(in seconds) for the pool.\n", None), + "haproxy_backend_downtime_seconds_total{": + ("octavia_pool_downtime_seconds_total{", None, {"proxy=": "pool="}), + "haproxy_backend_loadbalanced_total ": + ("octavia_pool_loadbalanced_total ", + "# HELP octavia_pool_loadbalanced_total Total number of times a " + "pool was selected, either for new sessions, or when " + "redispatching.\n", None), + "haproxy_backend_loadbalanced_total{": + ("octavia_pool_loadbalanced_total{", None, {"proxy=": "pool="}), + "haproxy_backend_http_requests_total ": + ("octavia_pool_http_requests_total ", None, None), + "haproxy_backend_http_requests_total{": + ("octavia_pool_http_requests_total{", None, {"proxy=": "pool="}), + "haproxy_backend_http_responses_total ": + ("octavia_pool_http_responses_total ", None, None), + "haproxy_backend_http_responses_total{": + ("octavia_pool_http_responses_total{", None, {"proxy=": "pool="}), + "haproxy_backend_http_cache_lookups_total ": + ("octavia_pool_http_cache_lookups_total ", None, None), + "haproxy_backend_http_cache_lookups_total{": + ("octavia_pool_http_cache_lookups_total{", None, + {"proxy=": "pool="}), + "haproxy_backend_http_cache_hits_total ": + ("octavia_pool_http_cache_hits_total ", None, None), + "haproxy_backend_http_cache_hits_total{": + ("octavia_pool_http_cache_hits_total{", None, {"proxy=": "pool="}), + "haproxy_backend_http_comp_bytes_in_total ": + ("octavia_pool_http_comp_bytes_in_total ", None, None), + "haproxy_backend_http_comp_bytes_in_total{": + ("octavia_pool_http_comp_bytes_in_total{", None, + {"proxy=": "pool="}), + "haproxy_backend_http_comp_bytes_out_total ": + ("octavia_pool_http_comp_bytes_out_total ", None, None), + "haproxy_backend_http_comp_bytes_out_total{": + ("octavia_pool_http_comp_bytes_out_total{", None, + {"proxy=": "pool="}), + "haproxy_backend_http_comp_bytes_bypassed_total ": + ("octavia_pool_http_comp_bytes_bypassed_total ", None, None), + "haproxy_backend_http_comp_bytes_bypassed_total{": + ("octavia_pool_http_comp_bytes_bypassed_total{", None, + {"proxy=": "pool="}), + "haproxy_backend_http_comp_responses_total ": + ("octavia_pool_http_comp_responses_total ", None, None), + "haproxy_backend_http_comp_responses_total{": + ("octavia_pool_http_comp_responses_total{", None, + {"proxy=": "pool="}), + + # Member Metrics + "haproxy_server_status ": + ("octavia_member_status ", + "# HELP octavia_member_status Current status of the member.\n", None), + "haproxy_server_status{": + ("octavia_member_status{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_current_sessions ": + ("octavia_member_current_sessions ", None, None), + "haproxy_server_current_sessions{": + ("octavia_member_current_sessions{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_max_sessions ": + ("octavia_member_max_sessions ", None, None), + "haproxy_server_max_sessions{": + ("octavia_member_max_sessions{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_limit_sessions ": + ("octavia_member_limit_sessions ", None, None), + "haproxy_server_limit_sessions{": + ("octavia_member_limit_sessions{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_sessions_total ": + ("octavia_member_sessions_total ", None, None), + "haproxy_server_sessions_total{": + ("octavia_member_sessions_total{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_max_session_rate ": + ("octavia_member_max_session_rate ", None, None), + "haproxy_server_max_session_rate{": + ("octavia_member_max_session_rate{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_last_session_seconds ": + ("octavia_member_last_session_seconds ", + "# HELP octavia_member_last_session_seconds Number of seconds since " + "last session assigned to the member.\n", None), + "haproxy_server_last_session_seconds{": + ("octavia_member_last_session_seconds{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_current_queue ": + ("octavia_member_current_queue ", None, None), + "haproxy_server_current_queue{": + ("octavia_member_current_queue{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_max_queue ": + ("octavia_member_max_queue ", None, None), + "haproxy_server_max_queue{": + ("octavia_member_max_queue{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_queue_limit ": + ("octavia_member_queue_limit ", + "# HELP octavia_member_queue_limit Configured maxqueue for the " + "member (0 meaning no limit).\n", None), + "haproxy_server_queue_limit{": + ("octavia_member_queue_limit{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_bytes_in_total ": + ("octavia_member_bytes_in_total ", None, None), + "haproxy_server_bytes_in_total{": + ("octavia_member_bytes_in_total{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_bytes_out_total ": + ("octavia_member_bytes_out_total ", None, None), + "haproxy_server_bytes_out_total{": + ("octavia_member_bytes_out_total{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_queue_time_average_seconds ": + ("octavia_member_queue_time_average_seconds ", None, None), + "haproxy_server_queue_time_average_seconds{": + ("octavia_member_queue_time_average_seconds{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_connect_time_average_seconds ": + ("octavia_member_connect_time_average_seconds ", None, None), + "haproxy_server_connect_time_average_seconds{": + ("octavia_member_connect_time_average_seconds{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_response_time_average_seconds ": + ("octavia_member_response_time_average_seconds ", None, None), + "haproxy_server_response_time_average_seconds{": + ("octavia_member_response_time_average_seconds{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_total_time_average_seconds ": + ("octavia_member_total_time_average_seconds ", None, None), + "haproxy_server_total_time_average_seconds{": + ("octavia_member_total_time_average_seconds{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_max_queue_time_seconds ": + ("octavia_member_max_queue_time_seconds ", None, None), + "haproxy_server_max_queue_time_seconds{": + ("octavia_member_max_queue_time_seconds{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_max_connect_time_seconds ": + ("octavia_member_max_connect_time_seconds ", None, None), + "haproxy_server_max_connect_time_seconds{": + ("octavia_member_max_connect_time_seconds{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_max_response_time_seconds ": + ("octavia_member_max_response_time_seconds ", + "# HELP octavia_member_max_response_time_seconds Maximum observed " + "time spent waiting for a member response.\n", None), + "haproxy_server_max_response_time_seconds{": + ("octavia_member_max_response_time_seconds{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_max_total_time_seconds ": + ("octavia_member_max_total_time_seconds ", None, None), + "haproxy_server_max_total_time_seconds{": + ("octavia_member_max_total_time_seconds{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_connection_attempts_total ": + ("octavia_member_connection_attempts_total ", None, None), + "haproxy_server_connection_attempts_total{": + ("octavia_member_connection_attempts_total{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_connection_reuses_total ": + ("octavia_member_connection_reuses_total ", None, None), + "haproxy_server_connection_reuses_total{": + ("octavia_member_connection_reuses_total{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_responses_denied_total ": + ("octavia_member_responses_denied_total ", None, None), + "haproxy_server_responses_denied_total{": + ("octavia_member_responses_denied_total{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_connection_errors_total ": + ("octavia_member_connection_errors_total ", None, None), + "haproxy_server_connection_errors_total{": + ("octavia_member_connection_errors_total{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_response_errors_total ": + ("octavia_member_response_errors_total ", None, None), + "haproxy_server_response_errors_total{": + ("octavia_member_response_errors_total{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_retry_warnings_total ": + ("octavia_member_retry_warnings_total ", None, None), + "haproxy_server_retry_warnings_total{": + ("octavia_member_retry_warnings_total{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_redispatch_warnings_total ": + ("octavia_member_redispatch_warnings_total ", None, None), + "haproxy_server_redispatch_warnings_total{": + ("octavia_member_redispatch_warnings_total{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_failed_header_rewriting_total ": + ("octavia_member_failed_header_rewriting_total ", None, None), + "haproxy_server_failed_header_rewriting_total{": + ("octavia_member_failed_header_rewriting_total{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_client_aborts_total ": + ("octavia_member_client_aborts_total ", None, None), + "haproxy_server_client_aborts_total{": + ("octavia_member_client_aborts_total{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_server_aborts_total ": + ("octavia_member_server_aborts_total ", None, None), + "haproxy_server_server_aborts_total{": + ("octavia_member_server_aborts_total{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_weight ": + ("octavia_member_weight ", + "# HELP octavia_member_weight Member weight.\n", None), + "haproxy_server_weight{": + ("octavia_member_weight{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_check_failures_total ": + ("octavia_member_check_failures_total ", + "# HELP octavia_member_check_failures_total Total number of failed " + "check (Only counts checks failed when the member is up).\n", None), + "haproxy_server_check_failures_total{": + ("octavia_member_check_failures_total{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_check_up_down_total ": + ("octavia_member_check_up_down_total ", None, None), + "haproxy_server_check_up_down_total{": + ("octavia_member_check_up_down_total{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_downtime_seconds_total ": + ("octavia_member_downtime_seconds_total ", + "# HELP octavia_member_downtime_seconds_total Total downtime (in " + "seconds) for the member.\n", None), + "haproxy_server_downtime_seconds_total{": + ("octavia_member_downtime_seconds_total{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_check_last_change_seconds ": + ("octavia_member_check_last_change_seconds ", None, None), + "haproxy_server_check_last_change_seconds{": + ("octavia_member_check_last_change_seconds{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_current_throttle ": + ("octavia_member_current_throttle ", + "# HELP octavia_member_current_throttle Current throttle percentage " + "for the member, when slowstart is active, or no value if not in " + "slowstart.\n", None), + "haproxy_server_current_throttle{": + ("octavia_member_current_throttle{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_loadbalanced_total ": + ("octavia_member_loadbalanced_total ", + "# HELP octavia_member_loadbalanced_total Total number of times a " + "member was selected, either for new sessions, or when " + "redispatching.\n", None), + "haproxy_server_loadbalanced_total{": + ("octavia_member_loadbalanced_total{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_http_responses_total ": + ("octavia_member_http_responses_total ", None, None), + "haproxy_server_http_responses_total{": + ("octavia_member_http_responses_total{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_idle_connections_current ": + ("octavia_member_idle_connections_current ", None, None), + "haproxy_server_idle_connections_current{": + ("octavia_member_idle_connections_current{", None, + {"proxy=": "pool=", "server=": "member="}), + "haproxy_server_idle_connections_limit ": + ("octavia_member_idle_connections_limit ", None, None), + "haproxy_server_idle_connections_limit{": + ("octavia_member_idle_connections_limit{", None, + {"proxy=": "pool=", "server=": "member="}), +} +METRIC_KEYS = METRIC_MAP.keys() + + +class PrometheusProxy(SimpleHTTPRequestHandler): + + protocol_version = 'HTTP/1.1' + + # No need to log every request through the proxy + def log_request(self, *args, **kwargs): + pass + + def _add_cpu_utilization(self, metrics_buffer): + cpu_pcnt = (psutil.getloadavg()[0] / os.cpu_count()) * 100 + metrics_buffer += ("# HELP octavia_loadbalancer_cpu Load balancer " + "CPU utilization (percentage).\n") + metrics_buffer += "# TYPE octavia_loadbalancer_cpu gauge\n" + cpu_metric_string = f"octavia_loadbalancer_cpu {cpu_pcnt:.1f}\n" + metrics_buffer += cpu_metric_string + return metrics_buffer + + def _add_memory_utilization(self, metrics_buffer): + mem_pcnt = psutil.virtual_memory()[2] + metrics_buffer += ("# HELP octavia_loadbalancer_memory Load balancer " + "memory utilization (percentage).\n") + metrics_buffer += "# TYPE octavia_loadbalancer_memory gauge\n" + mem_metric_string = f"octavia_loadbalancer_memory {mem_pcnt:.1f}\n" + metrics_buffer += mem_metric_string + return metrics_buffer + + def do_GET(self): + metrics_buffer = "" + + metrics_buffer = self._add_cpu_utilization(metrics_buffer) + metrics_buffer = self._add_memory_utilization(metrics_buffer) + + try: + with urllib.request.urlopen(METRICS_URL) as source: # nosec + lines = source.readlines() + for line in lines: + line = line.decode("utf-8") + # Don't report metrics for the internal prometheus + # proxy loop. The user facing listener will still be + # reported. + if "prometheus-exporter" in line: + continue + match = next((x for x in METRIC_KEYS if x in line), False) + if match: + map_tuple = METRIC_MAP[match] + if map_tuple[1] and "HELP" in line: + metrics_buffer += map_tuple[1] + else: + if map_tuple[2] and not line.startswith("#"): + for key in map_tuple[2].keys(): + line = line.replace(key, + map_tuple[2][key]) + metrics_buffer += line.replace(match, + map_tuple[0]) + elif PRINT_REJECTED: + print(f"REJECTED: {line}") + except Exception as e: + print(str(e), flush=True) + traceback.print_tb(e.__traceback__) + self.send_response(502) + self.send_header("connection", "close") + self.end_headers() + return + + self.send_response(200) + self.send_header("cache-control", "no-cache") + self.send_header("content-type", "text/plain; version=0.0.4") + self.send_header("connection", "close") + self.end_headers() + self.wfile.write(metrics_buffer.encode("utf-8")) + + +class SignalHandler: + + def __init__(self): + signal.signal(signal.SIGINT, self.shutdown) + signal.signal(signal.SIGTERM, self.shutdown) + + def shutdown(self, *args): + EXIT_EVENT.set() + + +def shutdown_thread(http): + EXIT_EVENT.wait() + http.shutdown() + + +def main(): + global PRINT_REJECTED + try: + if sys.argv[1] == "--rejected": + PRINT_REJECTED = True + except Exception: + pass + + SignalHandler() + + while not EXIT_EVENT.is_set(): + # The amphora-haproxy network namespace may not be present, so handle + # it gracefully. + try: + with network_namespace.NetworkNamespace(consts.AMPHORA_NAMESPACE): + httpd = ThreadingHTTPServer(('127.0.0.1', 9102), + PrometheusProxy) + shutdownthread = threading.Thread(target=shutdown_thread, + args=(httpd,)) + shutdownthread.start() + + httpd.daemon_threads = True + print("Now serving on port 9102") + httpd.serve_forever() + except Exception: + time.sleep(1) diff --git a/octavia/cmd/status.py b/octavia/cmd/status.py new file mode 100644 index 0000000000..eb9f5a8248 --- /dev/null +++ b/octavia/cmd/status.py @@ -0,0 +1,96 @@ +# Copyright (c) 2018 NEC, Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_upgradecheck import common_checks +from oslo_upgradecheck import upgradecheck +from stevedore import driver as stevedore_driver + +# Need to import to load config +from octavia.common import config # noqa: F401 pylint: disable=unused-import +from octavia.common import constants +from octavia.controller.worker.v2 import taskflow_jobboard_driver as tsk_driver +from octavia.i18n import _ + +CONF = cfg.CONF + + +class Checks(upgradecheck.UpgradeCommands): + + """Contains upgrade checks + + Various upgrade checks should be added as separate methods in this class + and added to _upgrade_checks tuple. + """ + + def _check_persistence(self): + try: + pers_driver = tsk_driver.MysqlPersistenceDriver() + with pers_driver.get_persistence() as pers: + if pers.engine.dialect.name == 'sqlite': + return upgradecheck.Result( + upgradecheck.Code.WARNING, + _('Persistence database is using sqlite backend. ' + 'Verification required if persistence_connecton URL ' + 'has been set properly.')) + return pers + except Exception: + return upgradecheck.Result(upgradecheck.Code.FAILURE, + _('Failed to connect to persistence ' + 'backend for AmphoraV2 provider.')) + + def _check_jobboard(self, persistence): + try: + jobboard_driver = stevedore_driver.DriverManager( + namespace='octavia.worker.jobboard_driver', + name=CONF.task_flow.jobboard_backend_driver, + invoke_args=(persistence,), + invoke_on_load=True).driver + with jobboard_driver.job_board(persistence) as jb: + if jb.connected: + return upgradecheck.Result( + upgradecheck.Code.SUCCESS, + _('Persistence database and Jobboard backend for ' + 'AmphoraV2 provider configured.')) + except Exception: + # Return FAILURE later + pass + + return upgradecheck.Result( + upgradecheck.Code.FAILURE, + _('Failed to connect to jobboard backend for AmphoraV2 provider. ' + 'Check jobboard configuration options in task_flow config ' + 'section.')) + + def _check_amphorav2(self): + enabled_provider_drivers = CONF.api_settings.enabled_provider_drivers + if (constants.AMPHORA in enabled_provider_drivers or + constants.AMPHORAV2 in enabled_provider_drivers): + persistence = self._check_persistence() + if isinstance(persistence, upgradecheck.Result): + return persistence + return self._check_jobboard(persistence) + return upgradecheck.Result(upgradecheck.Code.SUCCESS, + _('AmphoraV2 provider is not enabled.')) + + _upgrade_checks = ( + (_('AmphoraV2 Check'), _check_amphorav2), + (_('Policy File JSON to YAML Migration'), + (common_checks.check_policy_json, {'conf': CONF})), + ) + + +def main(): + return upgradecheck.main( + CONF, project='octavia', upgrade_command=Checks()) diff --git a/octavia/common/__init__.py b/octavia/common/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/common/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/common/base_taskflow.py b/octavia/common/base_taskflow.py new file mode 100644 index 0000000000..29736c3455 --- /dev/null +++ b/octavia/common/base_taskflow.py @@ -0,0 +1,268 @@ +# Copyright 2014-2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import concurrent.futures +import datetime +import functools +import time + +from oslo_config import cfg +from oslo_log import log +from oslo_utils import uuidutils +from taskflow.conductors.backends import impl_blocking +from taskflow import engines +from taskflow import exceptions as taskflow_exc +from taskflow.jobs.base import Job +from taskflow.listeners import base +from taskflow.listeners import logging +from taskflow.persistence import models +from taskflow import states + +from octavia.amphorae.driver_exceptions import exceptions as drv_exceptions +from octavia.common import exceptions + +LOG = log.getLogger(__name__) + +CONF = cfg.CONF + + +# We do not need to log retry exception information. Warning "Could not connect +# to instance" will be logged as usual. +def retryMaskFilter(record): + if record.exc_info is not None and isinstance( + record.exc_info[1], ( + drv_exceptions.AmpConnectionRetry, + exceptions.ComputeWaitTimeoutException)): + return False + return True + + +LOG.logger.addFilter(retryMaskFilter) + + +def _details_filter(obj): + if isinstance(obj, dict): + ret = {} + for key in obj: + if (key in ('certificate', 'private_key', 'passphrase') and + isinstance(obj[key], str)): + ret[key] = '***' + elif key == 'intermediates' and isinstance(obj[key], list): + ret[key] = ['***'] * len(obj[key]) + else: + ret[key] = _details_filter(obj[key]) + return ret + if isinstance(obj, list): + return [_details_filter(e) for e in obj] + return obj + + +class FilteredJob(Job): + def __str__(self): + # Override the default __str__ method from taskflow.job.base.Job, + # filter out private information from details + cls_name = type(self).__name__ + details = _details_filter(self.details) + return "{}: {} (priority={}, uuid={}, details={})".format( + cls_name, self.name, self.priority, + self.uuid, details) + + +class JobDetailsFilter(log.logging.Filter): + def filter(self, record): + # If the first arg is a Job, convert it now to a string with our custom + # method + if isinstance(record.args[0], Job): + arg0 = record.args[0] + record.args = (FilteredJob.__str__(arg0),) + record.args[1:] + return True + + +class BaseTaskFlowEngine: + """This is the task flow engine + + Use this engine to start/load flows in the + code + """ + + def __init__(self): + # work around for https://bugs.python.org/issue7980 + datetime.datetime.strptime('2014-06-19 22:47:16', '%Y-%m-%d %H:%M:%S') + self.executor = concurrent.futures.ThreadPoolExecutor( + max_workers=CONF.task_flow.max_workers) + + def taskflow_load(self, flow, **kwargs): + eng = engines.load( + flow, + engine=CONF.task_flow.engine, + executor=self.executor, + never_resolve=CONF.task_flow.disable_revert, + **kwargs) + eng.compile() + eng.prepare() + + return eng + + +class ExtendExpiryListener(base.Listener): + + def __init__(self, engine, job): + super().__init__(engine) + self.job = job + + def _task_receiver(self, state, details): + self.job.extend_expiry(cfg.CONF.task_flow.jobboard_expiration_time) + + def _flow_receiver(self, state, details): + self.job.extend_expiry(cfg.CONF.task_flow.jobboard_expiration_time) + + def _retry_receiver(self, state, details): + self.job.extend_expiry(cfg.CONF.task_flow.jobboard_expiration_time) + + +class DynamicLoggingConductor(impl_blocking.BlockingConductor): + + def _listeners_from_job(self, job, engine): + listeners = super()._listeners_from_job( + job, engine) + listeners.append(logging.DynamicLoggingListener(engine, log=LOG)) + + return listeners + + def _on_job_done(self, job, fut): + super()._on_job_done(job, fut) + # Double check that job is complete. + if (not CONF.task_flow.jobboard_save_logbook and + job.state == states.COMPLETE): + LOG.debug("Job %s is complete. Cleaning up job logbook.", job.name) + if job.book: + try: + self._persistence.get_connection().destroy_logbook( + job.book.uuid) + except taskflow_exc.NotFound: + LOG.debug("Logbook for job %s has been already cleaned up", + job.name) + + +class ExtendExpiryDynamicLoggingConductor(DynamicLoggingConductor): + + def _listeners_from_job(self, job, engine): + listeners = super()._listeners_from_job(job, engine) + listeners.append(ExtendExpiryListener(engine, job)) + return listeners + + +class TaskFlowServiceController: + + def __init__(self, driver): + self.driver = driver + + # Install filter for taskflow executor logger + taskflow_logger = log.logging.getLogger( + "taskflow.conductors.backends.impl_executor") + taskflow_logger.addFilter(JobDetailsFilter()) + + def run_poster(self, flow_factory, *args, **kwargs): + with self.driver.persistence_driver.get_persistence() as persistence: + with self.driver.job_board(persistence) as job_board: + job_id = uuidutils.generate_uuid() + job_name = '-'.join([flow_factory.__name__, job_id]) + job_logbook = models.LogBook(job_name) + flow_detail = models.FlowDetail( + job_name, job_id) + job_details = { + 'store': kwargs.pop('store') + } + job_logbook.add(flow_detail) + persistence.get_connection().save_logbook(job_logbook) + engines.save_factory_details(flow_detail, flow_factory, + args, kwargs, + backend=persistence) + + job_board.post(job_name, book=job_logbook, + details=job_details) + self._wait_for_job(job_board) + + return job_id + + def _wait_for_job(self, job_board): + # Wait for job to its complete state + for job in job_board.iterjobs(): + LOG.debug("Waiting for job %s to finish", job.name) + job.wait() + + def run_conductor(self, name): + with self.driver.persistence_driver.get_persistence() as persistence: + with self.driver.job_board(persistence) as board: + # Redis and etcd do not expire jobs by default, so jobs won't + # be resumed with restart of controller. Add expiry for board + # and use special listener. + if (CONF.task_flow.jobboard_backend_driver in ( + 'etcd_taskflow_driver', + 'redis_taskflow_driver')): + conductor = ExtendExpiryDynamicLoggingConductor( + name, board, persistence=persistence, + engine=CONF.task_flow.engine, + engine_options={ + 'max_workers': CONF.task_flow.max_workers + }) + if (CONF.task_flow.jobboard_backend_driver == + 'redis_taskflow_driver'): + # Hack for redis only: + # The TTL of the jobs of the Redis Jobboard driver can + # be only overriden by using the 'expiry' parameter of + # the 'claim' function + # For the Etcd driver, the default TTL for all the + # locks can be configured while creating the backend + board.claim = functools.partial( + board.claim, + expiry=CONF.task_flow.jobboard_expiration_time) + else: + conductor = DynamicLoggingConductor( + name, board, persistence=persistence, + engine=CONF.task_flow.engine) + + waiter_th = concurrent.futures.ThreadPoolExecutor( + max_workers=1) + waiter_th.submit(self._waiter, conductor) + + conductor.run() + + def _extend_jobs(self, conductor, expiration_time): + conductor_name = conductor._name + + with self.driver.persistence_driver.get_persistence() as persistence: + with self.driver.job_board(persistence) as board: + for job in board.iterjobs(): + try: + owner = board.find_owner(job) + except TypeError: + # taskflow throws an exception if a job is not owned + # (probably a bug in taskflow) + continue + # Only extend expiry for jobs that are owner by our + # conductor (from the same process) + if owner == conductor_name: + if job.expires_in() < expiration_time / 2: + LOG.debug("Extend expiry for job %s", job.name) + job.extend_expiry(expiration_time) + + def _waiter(self, conductor): + expiration_time = CONF.task_flow.jobboard_expiration_time + + while True: + self._extend_jobs(conductor, expiration_time) + + time.sleep(expiration_time / 4) diff --git a/octavia/common/clients.py b/octavia/common/clients.py new file mode 100644 index 0000000000..f2986dcb60 --- /dev/null +++ b/octavia/common/clients.py @@ -0,0 +1,212 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinderclient import client as cinder_client +from glanceclient import client as glance_client +from keystoneauth1 import session +from keystoneauth1 import token_endpoint +from novaclient import api_versions +from novaclient import client as nova_client +import openstack +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import excutils + +from octavia.common import keystone + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF + +GLANCE_VERSION = '2' +NOVA_VERSION = '2.15' +CINDER_VERSION = '3' + + +class NovaAuth: + nova_client = None + + @classmethod + def get_nova_client(cls, region, service_name=None, endpoint=None, + endpoint_type='publicURL', insecure=False, + cacert=None): + """Create nova client object. + + :param region: The region of the service + :param service_name: The name of the nova service in the catalog + :param endpoint: The endpoint of the service + :param endpoint_type: The type of the endpoint + :param insecure: Turn off certificate validation + :param cacert: CA Cert file path + :return: a Nova Client object. + :raises Exception: if the client cannot be created + """ + ksession = keystone.KeystoneSession() + if not cls.nova_client: + kwargs = {'region_name': region, + 'session': ksession.get_session(), + 'endpoint_type': endpoint_type, + 'insecure': insecure} + if service_name: + kwargs['service_name'] = service_name + if endpoint: + kwargs['endpoint_override'] = endpoint + if cacert: + kwargs['cacert'] = cacert + try: + cls.nova_client = nova_client.Client( + version=api_versions.APIVersion(NOVA_VERSION), **kwargs) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception("Error creating Nova client.") + return cls.nova_client + + +class NeutronAuth: + neutron_client = None + + @classmethod + def get_neutron_client(cls): + """Create neutron client object.""" + ksession = keystone.KeystoneSession('neutron') + if not cls.neutron_client: + sess = ksession.get_session() + kwargs = {'region_name': CONF.neutron.region_name} + # TODO(ricolin) `interface` option don't take list as option yet. + # We can move away from this when openstacksdk no longer depends + # on `interface`. + try: + interface = CONF.neutron.valid_interfaces[0] + except (TypeError, LookupError): + interface = CONF.neutron.valid_interfaces + if interface: + kwargs['interface'] = interface + if CONF.neutron.endpoint_override: + kwargs['network_endpoint_override'] = ( + CONF.neutron.endpoint_override) + if CONF.neutron.endpoint_override.startswith("https"): + kwargs['insecure'] = CONF.neutron.insecure + kwargs['cacert'] = CONF.neutron.cafile + + conn = openstack.connection.Connection( + session=sess, **kwargs) + cls.neutron_client = conn + return cls.neutron_client + + @classmethod + def get_user_neutron_client(cls, context): + """Get neutron client for request user. + + It's possible that the token in the context is a trust scoped + which can't be used to initialize a keystone session. + We directly use the token and endpoint_url to initialize neutron + client. + """ + sess = keystone.KeystoneSession('neutron').get_session() + kwargs = {} + neutron_endpoint = CONF.neutron.endpoint_override + if neutron_endpoint is None: + endpoint_data = sess.get_endpoint_data( + service_type=(CONF.neutron.service_type or 'network'), + interface=CONF.neutron.valid_interfaces, + region_name=CONF.neutron.region_name) + neutron_endpoint = endpoint_data.catalog_url + + neutron_cafile = getattr(CONF.neutron, "cafile", None) + insecure = getattr(CONF.neutron, "insecure", False) + kwargs['verify'] = not insecure + if neutron_cafile is not None and not insecure: + kwargs['verify'] = neutron_cafile + user_auth = token_endpoint.Token(neutron_endpoint, context.auth_token) + user_sess = session.Session(auth=user_auth, **kwargs) + + conn = openstack.connection.Connection( + session=user_sess, oslo_conf=CONF) + return conn.network + + +class GlanceAuth: + glance_client = None + + @classmethod + def get_glance_client(cls, region, service_name=None, endpoint=None, + endpoint_type='publicURL', insecure=False, + cacert=None): + """Create glance client object. + + :param region: The region of the service + :param service_name: The name of the glance service in the catalog + :param endpoint: The endpoint of the service + :param endpoint_type: The endpoint_type of the service + :param insecure: Turn off certificate validation + :param cacert: CA Cert file path + :return: a Glance Client object. + :raises Exception: if the client cannot be created + """ + ksession = keystone.KeystoneSession() + if not cls.glance_client: + kwargs = {'region_name': region, + 'session': ksession.get_session(), + 'interface': endpoint_type} + if service_name: + kwargs['service_name'] = service_name + if endpoint: + kwargs['endpoint'] = endpoint + if endpoint.startswith("https"): + kwargs['insecure'] = insecure + kwargs['cacert'] = cacert + try: + cls.glance_client = glance_client.Client( + GLANCE_VERSION, **kwargs) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception("Error creating Glance client.") + return cls.glance_client + + +class CinderAuth: + cinder_client = None + + @classmethod + def get_cinder_client(cls, region, service_name=None, endpoint=None, + endpoint_type='publicURL', insecure=False, + cacert=None): + """Create cinder client object. + + :param region: The region of the service + :param service_name: The name of the cinder service in the catalog + :param endpoint: The endpoint of the service + :param endpoint_type: The endpoint type of the service + :param insecure: Turn off certificate validation + :param cacert: CA Cert file path + :return: a Cinder Client object + :raise Exception: if the client cannot be created + """ + ksession = keystone.KeystoneSession() + if not cls.cinder_client: + kwargs = {'region_name': region, + 'session': ksession.get_session(), + 'interface': endpoint_type} + if service_name: + kwargs['service_name'] = service_name + if endpoint: + kwargs['endpoint'] = endpoint + if endpoint.startswith("https"): + kwargs['insecure'] = insecure + kwargs['cacert'] = cacert + try: + cls.cinder_client = cinder_client.Client( + CINDER_VERSION, **kwargs + ) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception("Error creating Cinder client.") + return cls.cinder_client diff --git a/octavia/common/config.py b/octavia/common/config.py new file mode 100644 index 0000000000..aafece9a55 --- /dev/null +++ b/octavia/common/config.py @@ -0,0 +1,1094 @@ +# Copyright 2011 VMware, Inc., 2014 A10 Networks +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Routines for configuring Octavia +""" + +import os +import sys + +from keystoneauth1 import loading as ks_loading +from octavia_lib.common import constants as lib_consts +from oslo_config import cfg +from oslo_db import options as db_options +from oslo_log import log as logging +import oslo_messaging as messaging +from oslo_middleware import cors +from oslo_policy import opts as policy_opts + +from octavia.certificates.common import local +from octavia.common import constants +from octavia.common import utils +from octavia.common import validate +from octavia.i18n import _ +from octavia import version + +LOG = logging.getLogger(__name__) + +core_opts = [ + cfg.HostnameOpt('host', default=utils.get_hostname(), + sample_default='', + help=_("The hostname Octavia is running on")), + cfg.StrOpt('octavia_plugins', default='hot_plug_plugin', + help=_("Name of the controller plugin to use")), +] + +api_opts = [ + cfg.IPOpt('bind_host', default='127.0.0.1', + help=_("The host IP to bind to")), + cfg.PortOpt('bind_port', default=9876, + help=_("The port to bind to")), + cfg.StrOpt('auth_strategy', default=constants.KEYSTONE, + choices=[constants.NOAUTH, + constants.KEYSTONE, + constants.TESTING], + help=_("The auth strategy for API requests.")), + cfg.BoolOpt('allow_pagination', default=True, + help=_("Allow the usage of pagination")), + cfg.BoolOpt('allow_sorting', default=True, + help=_("Allow the usage of sorting")), + cfg.BoolOpt('allow_filtering', default=True, + help=_("Allow the usage of filtering")), + cfg.BoolOpt('allow_field_selection', default=True, + help=_("Allow the usage of field selection")), + cfg.StrOpt('pagination_max_limit', + default=str(constants.DEFAULT_PAGE_SIZE), + help=_("The maximum number of items returned in a single " + "response. The string 'infinite' or a negative " + "integer value means 'no limit'")), + cfg.StrOpt('api_base_uri', + help=_("Base URI for the API for use in pagination links. " + "This will be autodetected from the request if not " + "overridden here.")), + cfg.BoolOpt('allow_tls_terminated_listeners', default=True, + help=_("Allow users to create TLS Terminated listeners?")), + cfg.BoolOpt('allow_ping_health_monitors', default=True, + help=_("Allow users to create PING type Health Monitors?")), + cfg.BoolOpt('allow_prometheus_listeners', default=True, + help=_("Allow users to create PROMETHEUS type listeners?")), + cfg.DictOpt('enabled_provider_drivers', + help=_('A comma separated list of dictionaries of the ' + 'enabled provider driver names and descriptions. ' + 'Must match the driver name in the ' + 'octavia.api.drivers entrypoint.'), + default={'amphora': 'The Octavia Amphora driver.', + 'octavia': 'Deprecated alias of the Octavia Amphora ' + 'driver.', + }), + cfg.StrOpt('default_provider_driver', default='amphora', + help=_('Default provider driver.')), + cfg.IntOpt('udp_connect_min_interval_health_monitor', + default=3, + help=_("The minimum health monitor delay interval for the " + "UDP-CONNECT Health Monitor type. A negative integer " + "value means 'no limit'.")), + cfg.BoolOpt('healthcheck_enabled', default=False, + help=_("When True, the oslo middleware healthcheck endpoint " + "is enabled in the Octavia API.")), + cfg.IntOpt('healthcheck_refresh_interval', default=5, + help=_("The interval healthcheck plugins should cache results, " + "in seconds.")), + cfg.StrOpt('default_listener_ciphers', + default=constants.CIPHERS_OWASP_SUITE_B, + help=_("Default OpenSSL cipher string (colon-separated) for " + "new TLS-enabled listeners.")), + cfg.StrOpt('default_pool_ciphers', + default=constants.CIPHERS_OWASP_SUITE_B, + help=_("Default OpenSSL cipher string (colon-separated) for " + "new TLS-enabled pools.")), + cfg.StrOpt('tls_cipher_prohibit_list', default='', + deprecated_name='tls_cipher_blacklist', + help=_("Colon separated list of OpenSSL ciphers. " + "Usage of these ciphers will be blocked.")), + cfg.ListOpt('default_listener_tls_versions', + default=constants.TLS_VERSIONS_OWASP_SUITE_B, + item_type=cfg.types.String( + choices=constants.TLS_ALL_VERSIONS), + help=_('List of TLS versions to use for new TLS-enabled ' + 'listeners.')), + cfg.ListOpt('default_pool_tls_versions', + default=constants.TLS_VERSIONS_OWASP_SUITE_B, + item_type=cfg.types.String( + choices=constants.TLS_ALL_VERSIONS), + help=_('List of TLS versions to use for new TLS-enabled ' + 'pools.')), + cfg.StrOpt('minimum_tls_version', + default=None, + choices=constants.TLS_ALL_VERSIONS + [None], + help=_('Minimum allowed TLS version for listeners and pools.')), + cfg.ListOpt('default_listener_alpn_protocols', + default=[lib_consts.ALPN_PROTOCOL_HTTP_2, + lib_consts.ALPN_PROTOCOL_HTTP_1_1, + lib_consts.ALPN_PROTOCOL_HTTP_1_0], + item_type=cfg.types.String( + choices=constants.SUPPORTED_ALPN_PROTOCOLS), + help=_('List of ALPN protocols to use for new TLS-enabled ' + 'listeners.')), + cfg.ListOpt('default_pool_alpn_protocols', + default=[lib_consts.ALPN_PROTOCOL_HTTP_2, + lib_consts.ALPN_PROTOCOL_HTTP_1_1, + lib_consts.ALPN_PROTOCOL_HTTP_1_0], + item_type=cfg.types.String( + choices=constants.SUPPORTED_ALPN_PROTOCOLS), + help=_('List of ALPN protocols to use for new TLS-enabled ' + 'pools.')), +] + +# Options only used by the amphora agent +amphora_agent_opts = [ + cfg.StrOpt('agent_server_ca', default='/etc/octavia/certs/client_ca.pem', + help=_("The ca which signed the client certificates")), + cfg.StrOpt('agent_server_cert', default='/etc/octavia/certs/server.pem', + help=_("The server certificate for the agent server " + "to use")), + cfg.StrOpt('agent_server_network_dir', + help=_("The directory where new network interfaces " + "are located")), + cfg.IntOpt('agent_request_read_timeout', default=180, + help=_("The time in seconds to allow a request from the " + "controller to run before terminating the socket.")), + cfg.StrOpt('agent_tls_protocol', default=lib_consts.TLS_VERSION_1_2, + help=_("Minimum TLS protocol for communication with the " + "amphora agent."), + choices=constants.TLS_ALL_VERSIONS), + + # Logging setup + cfg.ListOpt('admin_log_targets', + help=_('List of log server ip and port pairs for ' + 'Administrative logs. Additional hosts are backup to ' + 'the primary server. If none is ' + 'specified remote logging is disabled. Example ' + '127.0.0.1:10514, 192.168.0.1:10514')), + cfg.ListOpt('tenant_log_targets', + help=_('List of log server ip and port pairs for ' + 'tenant traffic logs. Additional hosts are backup to ' + 'the primary server. If none is ' + 'specified remote logging is disabled. Example ' + '127.0.0.1:10514, 192.168.0.1:10514')), + cfg.IntOpt('user_log_facility', default=0, min=0, max=7, + help=_('LOG_LOCAL facility number to use for user traffic ' + 'logs.')), + cfg.IntOpt('administrative_log_facility', default=1, min=0, max=7, + help=_('LOG_LOCAL facility number to use for amphora processes ' + 'logs.')), + cfg.StrOpt('log_protocol', default=lib_consts.PROTOCOL_UDP, + choices=[lib_consts.PROTOCOL_TCP, lib_consts.PROTOCOL_UDP], + help=_("The log forwarding transport protocol. One of UDP or " + "TCP.")), + cfg.IntOpt('log_retry_count', default=5, + help=_('The maximum attempts to retry connecting to the ' + 'logging host.')), + cfg.IntOpt('log_retry_interval', default=2, + help=_('The time, in seconds, to wait between retries ' + 'connecting to the logging host.')), + cfg.IntOpt('log_queue_size', default=10000, + help=_('The queue size (messages) to buffer log messages.')), + cfg.StrOpt('logging_template_override', + help=_('Custom logging configuration template.')), + cfg.BoolOpt('forward_all_logs', default=False, + help=_('When True, the amphora will forward all of the ' + 'system logs (except tenant traffic logs) to the ' + 'admin log target(s). When False, ' + 'only amphora specific admin logs will be forwarded.')), + cfg.BoolOpt('disable_local_log_storage', default=False, + help=_('When True, no logs will be written to the amphora ' + 'filesystem. When False, log files will be written to ' + 'the local filesystem.')), + + # Do not specify in octavia.conf, loaded at runtime + cfg.StrOpt('amphora_id', help=_("The amphora ID.")), + cfg.StrOpt('amphora_udp_driver', + default='keepalived_lvs', + help='The UDP API backend for amphora agent.', + deprecated_for_removal=True, + deprecated_reason=_('amphora-agent will not support any other ' + 'backend than keepalived_lvs.'), + deprecated_since='Wallaby'), +] + +compute_opts = [ + cfg.IntOpt('max_retries', default=15, + help=_('The maximum attempts to retry an action with the ' + 'compute service.')), + cfg.IntOpt('retry_interval', default=1, + help=_('Seconds to wait before retrying an action with the ' + 'compute service.')), + cfg.IntOpt('retry_backoff', default=1, + help=_('The seconds to backoff retry attempts.')), + cfg.IntOpt('retry_max', default=10, + help=_('The maximum interval in seconds between retry ' + 'attempts.')), +] + +networking_opts = [ + cfg.IntOpt('max_retries', default=15, + help=_('The maximum attempts to retry an action with the ' + 'networking service.')), + cfg.IntOpt('retry_interval', default=1, + help=_('Seconds to wait before retrying an action with the ' + 'networking service.')), + cfg.IntOpt('retry_backoff', default=1, + help=_('The seconds to backoff retry attempts.')), + cfg.IntOpt('retry_max', default=10, + help=_('The maximum interval in seconds between retry ' + 'attempts.')), + cfg.IntOpt('port_detach_timeout', default=300, + help=_('Seconds to wait for a port to detach from an ' + 'amphora.')), + cfg.BoolOpt('allow_vip_network_id', default=True, + help=_('Can users supply a network_id for their VIP?')), + cfg.BoolOpt('allow_vip_subnet_id', default=True, + help=_('Can users supply a subnet_id for their VIP?')), + cfg.BoolOpt('allow_vip_port_id', default=True, + help=_('Can users supply a port_id for their VIP?')), + cfg.ListOpt('valid_vip_networks', + help=_('List of network_ids that are valid for VIP ' + 'creation. If this field is empty, no validation ' + 'is performed.')), + cfg.ListOpt('reserved_ips', + default=['169.254.169.254'], + item_type=cfg.types.IPAddress(), + help=_('List of IP addresses reserved from being used for ' + 'member addresses. IPv6 addresses should be in ' + 'expanded, uppercase form.')), + cfg.BoolOpt('allow_invisible_resource_usage', default=False, + help=_("When True, users can use network resources they " + "cannot normally see as VIP or member subnets. Making " + "this True may allow users to access resources on " + "subnets they do not normally have access to via " + "neutron RBAC policies.")), +] + +health_manager_opts = [ + cfg.IPOpt('bind_ip', default='127.0.0.1', + help=_('IP address the controller will listen on for ' + 'heart beats')), + cfg.PortOpt('bind_port', default=5555, + help=_('Port number the controller will listen on ' + 'for heart beats')), + cfg.IntOpt('failover_threads', + default=10, + help=_('Number of threads performing amphora failovers.')), + cfg.IntOpt('health_update_threads', + default=None, + help=_('Number of processes for amphora health update.')), + cfg.IntOpt('stats_update_threads', + default=None, + help=_('Number of processes for amphora stats update.')), + cfg.StrOpt('heartbeat_key', + mutable=True, + help=_('key used to validate amphora sending ' + 'the message'), secret=True), + cfg.IntOpt('heartbeat_timeout', + default=60, + help=_('Interval, in seconds, to wait before failing over an ' + 'amphora.')), + cfg.IntOpt('health_check_interval', + default=3, + help=_('Sleep time between health checks in seconds.')), + cfg.IntOpt('sock_rlimit', default=0, + help=_(' sets the value of the heartbeat recv buffer')), + cfg.IntOpt('failover_threshold', default=None, + help=_('Stop failovers if the count of simultaneously failed ' + 'amphora reaches this number. This may prevent large ' + 'scale accidental failover events, like in the case of ' + 'network failures or read-only database issues.')), + + # Used by the health manager on the amphora + cfg.ListOpt('controller_ip_port_list', + help=_('List of controller ip and port pairs for the ' + 'heartbeat receivers. Example 127.0.0.1:5555, ' + '192.168.0.1:5555'), + mutable=True, + default=[]), + cfg.IntOpt('heartbeat_interval', + default=10, + mutable=True, + help=_('Sleep time between sending heartbeats.')), +] + +oslo_messaging_opts = [ + cfg.StrOpt('topic', help=_('Topic (i.e. Queue) Name')), +] + +haproxy_amphora_opts = [ + cfg.StrOpt('base_path', + default='/var/lib/octavia', + help=_('Base directory for amphora files.')), + cfg.StrOpt('base_cert_dir', + default='/var/lib/octavia/certs', + help=_('Base directory for cert storage.')), + cfg.StrOpt('haproxy_template', help=_('Custom haproxy template.')), + cfg.BoolOpt('connection_logging', default=True, + help=_('Set this to False to disable connection logging.')), + cfg.IntOpt('connection_max_retries', + default=120, + help=_('Retry threshold for connecting to amphorae.')), + cfg.IntOpt('connection_retry_interval', + default=5, + help=_('Retry timeout between connection attempts in ' + 'seconds.')), + cfg.IntOpt('active_connection_max_retries', + default=15, + help=_('Retry threshold for connecting to active amphorae.')), + cfg.IntOpt('active_connection_retry_interval', + default=2, + deprecated_name='active_connection_rety_interval', + help=_('Retry timeout between connection attempts in ' + 'seconds for active amphora.')), + cfg.IntOpt('failover_connection_max_retries', + default=2, + help=_('Retry threshold for connecting to an amphora in ' + 'failover.')), + cfg.IntOpt('failover_connection_retry_interval', + default=5, + help=_('Retry timeout between connection attempts in ' + 'seconds for amphora in failover.')), + cfg.IntOpt('build_rate_limit', + default=-1, + help=_('Number of amphorae that could be built per controller ' + 'worker, simultaneously.')), + cfg.IntOpt('build_active_retries', + default=120, + help=_('Retry threshold for waiting for a build slot for ' + 'an amphorae.')), + cfg.IntOpt('build_retry_interval', + default=5, + help=_('Retry timeout between build attempts in ' + 'seconds.')), + cfg.StrOpt('haproxy_stick_size', default='10k', + help=_('Size of the HAProxy stick table. Accepts k, m, g ' + 'suffixes.')), + cfg.StrOpt('user_log_format', + default='{{ project_id }} {{ lb_id }} %f %ci %cp %t %{+Q}r %ST ' + '%B %U %[ssl_c_verify] %{+Q}[ssl_c_s_dn] %b %s %Tt ' + '%tsc', + help=_('Log format string for user flow logging.')), + + # REST server + cfg.IPOpt('bind_host', default='::', # nosec + help=_("The host IP to bind to")), + cfg.PortOpt('bind_port', default=9443, + help=_("The port to bind to")), + cfg.StrOpt('lb_network_interface', + default='o-hm0', + help=_('Network interface through which to reach amphora, only ' + 'required if using IPv6 link local addresses.')), + cfg.StrOpt('haproxy_cmd', default='/usr/sbin/haproxy', + help=_("The full path to haproxy")), + cfg.IntOpt('respawn_count', default=2, + deprecated_for_removal=True, + deprecated_reason='upstart support has been removed and this ' + 'option is no longer used.', + help=_("The respawn count for haproxy's upstart script")), + cfg.IntOpt('respawn_interval', default=2, + deprecated_for_removal=True, + deprecated_reason='upstart support has been removed and this ' + 'option is no longer used.', + help=_("The respawn interval for haproxy's upstart script")), + cfg.FloatOpt('rest_request_conn_timeout', default=10, + help=_("The time in seconds to wait for a REST API " + "to connect.")), + cfg.FloatOpt('rest_request_read_timeout', default=60, + help=_("The time in seconds to wait for a REST API " + "response.")), + cfg.IntOpt('timeout_client_data', + default=constants.DEFAULT_TIMEOUT_CLIENT_DATA, + help=_('Frontend client inactivity timeout.')), + cfg.IntOpt('timeout_member_connect', + default=constants.DEFAULT_TIMEOUT_MEMBER_CONNECT, + help=_('Backend member connection timeout.')), + cfg.IntOpt('timeout_member_data', + default=constants.DEFAULT_TIMEOUT_MEMBER_DATA, + help=_('Backend member inactivity timeout.')), + cfg.IntOpt('timeout_tcp_inspect', + default=constants.DEFAULT_TIMEOUT_TCP_INSPECT, + help=_('Time to wait for TCP packets for content inspection.')), + # REST client + cfg.StrOpt('client_cert', default='/etc/octavia/certs/client.pem', + help=_("The client certificate to talk to the agent")), + cfg.StrOpt('server_ca', default='/etc/octavia/certs/server_ca.pem', + help=_("The ca which signed the server certificates")), + cfg.IntOpt('api_db_commit_retry_attempts', default=15, + help=_('The number of times the database action will be ' + 'attempted.')), + cfg.IntOpt('api_db_commit_retry_initial_delay', default=1, + help=_('The initial delay before a retry attempt.')), + cfg.IntOpt('api_db_commit_retry_backoff', default=1, + help=_('The time to backoff retry attempts.')), + cfg.IntOpt('api_db_commit_retry_max', default=5, + help=_('The maximum amount of time to wait between retry ' + 'attempts.')), + cfg.IntOpt('default_connection_limit', + default=constants.HAPROXY_DEFAULT_MAXCONN, + help=_('Default connection_limit for listeners, used when ' + 'setting "-1" or when unsetting connection_limit with ' + 'the listener API.')), +] + +controller_worker_opts = [ + cfg.IntOpt('workers', + default=1, min=1, + help='Number of workers for the controller-worker service.'), + cfg.IntOpt('amp_active_retries', + default=30, + help=_('Retry attempts to wait for Amphora to become active')), + cfg.IntOpt('amp_active_wait_sec', + default=10, + help=_('Seconds to wait between checks on whether an Amphora ' + 'has become active')), + cfg.StrOpt('amp_flavor_id', + default='', + help=_('Nova instance flavor id for the Amphora')), + cfg.StrOpt('amp_image_tag', + default='', + help=_('Glance image tag for the Amphora image to boot. ' + 'Use this option to be able to update the image ' + 'without reconfiguring Octavia.')), + cfg.StrOpt('amp_image_owner_id', + default='', + help=_('Restrict glance image selection to a specific ' + 'owner ID. This is a recommended security setting.')), + cfg.StrOpt('amp_ssh_key_name', + default='', + help=_('Optional SSH keypair name, in nova, that will be used ' + 'for the authorized_keys inside the amphora.')), + cfg.StrOpt('amp_timezone', + default='UTC', + help=_('The timezone to use in the Amphora as represented in ' + '/usr/share/zoneinfo.')), + cfg.ListOpt('amp_boot_network_list', + default=[], + help=_('List of networks to attach to the Amphorae. ' + 'All networks defined in the list will ' + 'be attached to each amphora.')), + cfg.ListOpt('amp_secgroup_list', + default=[], + help=_('List of security groups to attach to the Amphora.')), + cfg.StrOpt('client_ca', + default='/etc/octavia/certs/ca_01.pem', + help=_('Client CA for the amphora agent to use')), + cfg.StrOpt('amphora_driver', + default='amphora_haproxy_rest_driver', + help=_('Name of the amphora driver to use')), + cfg.StrOpt('compute_driver', + default='compute_nova_driver', + help=_('Name of the compute driver to use')), + cfg.StrOpt('network_driver', + default='allowed_address_pairs_driver', + help=_('Name of the network driver to use')), + cfg.StrOpt('volume_driver', + default=constants.VOLUME_NOOP_DRIVER, + choices=constants.SUPPORTED_VOLUME_DRIVERS, + help=_('Name of the volume driver to use')), + cfg.StrOpt('image_driver', + default='image_glance_driver', + choices=constants.SUPPORTED_IMAGE_DRIVERS, + help=_('Name of the image driver to use')), + cfg.StrOpt('distributor_driver', + default='distributor_noop_driver', + help=_('Name of the distributor driver to use')), + cfg.ListOpt('statistics_drivers', default=['stats_db'], + help=_('List of drivers for updating amphora statistics.')), + cfg.StrOpt('loadbalancer_topology', + default=constants.TOPOLOGY_SINGLE, + choices=constants.SUPPORTED_LB_TOPOLOGIES, + mutable=True, + help=_('Load balancer topology configuration. ' + 'SINGLE - One amphora per load balancer. ' + 'ACTIVE_STANDBY - Two amphora per load balancer.')), + cfg.BoolOpt('user_data_config_drive', default=False, + deprecated_for_removal=True, + deprecated_reason=_('User_data nova option is not used and is ' + ' too small to replace the config_drive.'), + deprecated_since='Antelope(2023.1)', + help=_('If True, build cloud-init user-data that is passed ' + 'to the config drive on Amphora boot instead of ' + 'personality files. If False, utilize personality ' + 'files.')), + cfg.IntOpt('amphora_delete_retries', default=5, + help=_('Number of times an amphora delete should be retried.')), + cfg.IntOpt('amphora_delete_retry_interval', default=5, + help=_('Time, in seconds, between amphora delete retries.')), + cfg.BoolOpt('event_notifications', default=True, + help=_('Enable octavia event notifications. See ' + 'oslo_messaging_notifications section for additional ' + 'requirements.')), + # 2000 attempts is around 2h45 with the default settings + cfg.IntOpt('db_commit_retry_attempts', default=2000, + help=_('The number of times the database action will be ' + 'attempted.')), + cfg.IntOpt('db_commit_retry_initial_delay', default=1, + help=_('The initial delay before a retry attempt.')), + cfg.IntOpt('db_commit_retry_backoff', default=1, + help=_('The time to backoff retry attempts.')), + cfg.IntOpt('db_commit_retry_max', default=5, + help=_('The maximum amount of time to wait between retry ' + 'attempts.')), +] + +task_flow_opts = [ + cfg.StrOpt('engine', + default='parallel', + choices=constants.SUPPORTED_TASKFLOW_ENGINE_TYPES, + help=_('TaskFlow engine to use.')), + cfg.IntOpt('max_workers', + default=5, + help=_('The maximum number of workers')), + cfg.BoolOpt('disable_revert', default=False, + help=_('If True, disables the controller worker taskflow ' + 'flows from reverting. This will leave resources in ' + 'an inconsistent state and should only be used for ' + 'debugging purposes.')), + cfg.StrOpt('persistence_connection', + default='sqlite://', secret=True, + help='Persistence database, which will be used to store tasks ' + 'states. Database connection url with db name'), + cfg.BoolOpt('jobboard_enabled', default=False, + help=_('If True, enables TaskFlow jobboard.')), + cfg.StrOpt('jobboard_backend_driver', + default='redis_taskflow_driver', + choices=[('redis_taskflow_driver', + 'Driver that will use Redis to store job states.'), + ('zookeeper_taskflow_driver', + 'Driver that will use Zookeeper to store job ' + 'states.'), + ('etcd_taskflow_driver', + 'Driver that will user Etcd to store job states.')], + help='Jobboard backend driver that will monitor job state.'), + cfg.ListOpt('jobboard_backend_hosts', default=['127.0.0.1'], + help='Jobboard backend server host(s).'), + cfg.PortOpt('jobboard_backend_port', default=6379, + help='Jobboard backend server port'), + cfg.StrOpt('jobboard_backend_username', + help='Jobboard backend server user name'), + cfg.StrOpt('jobboard_backend_password', secret=True, + help='Jobboard backend server password'), + cfg.StrOpt('jobboard_backend_namespace', default='octavia_jobboard', + help='Jobboard name that should be used to store taskflow ' + 'job id and claims for it.'), + cfg.IntOpt('jobboard_redis_backend_db', + default=0, min=0, + help='Database ID in redis server.'), + cfg.StrOpt('jobboard_redis_sentinel', default=None, + help='Sentinel name if it is used for Redis.'), + cfg.StrOpt('jobboard_redis_sentinel_username', + help='Redis Sentinel server user name'), + cfg.StrOpt('jobboard_redis_sentinel_password', secret=True, + help='Redis Sentinel server password'), + cfg.DictOpt('jobboard_redis_backend_ssl_options', + help='Redis jobboard backend ssl configuration options.', + default={'ssl': False, + 'ssl_keyfile': None, + 'ssl_certfile': None, + 'ssl_ca_certs': None, + 'ssl_cert_reqs': 'required'}), + cfg.DictOpt('jobboard_redis_sentinel_ssl_options', + help='Redis sentinel ssl configuration options.', + default={'ssl': False, + 'ssl_keyfile': None, + 'ssl_certfile': None, + 'ssl_ca_certs': None, + 'ssl_cert_reqs': 'required'}), + cfg.DictOpt('jobboard_zookeeper_ssl_options', + help='Zookeeper jobboard backend ssl configuration options.', + default={'use_ssl': False, + 'keyfile': None, + 'keyfile_password': None, + 'certfile': None, + 'verify_certs': True}), + cfg.DictOpt('jobboard_etcd_ssl_options', + help='Etcd jobboard backend ssl configuration options.', + default={'use_ssl': False, + 'ca_cert': None, + 'cert_key': None, + 'cert_cert': None}), + cfg.IntOpt('jobboard_etcd_timeout', default=None, + help='Timeout when communicating with the Etcd backend.'), + cfg.StrOpt('jobboard_etcd_api_path', default=None, + help='API Path of the Etcd server.'), + cfg.IntOpt('jobboard_expiration_time', default=30, + help='For backends like redis claiming jobs requiring setting ' + 'the expiry - how many seconds the claim should be ' + 'retained for.'), + cfg.BoolOpt('jobboard_save_logbook', default=False, + help='If for analysis required saving logbooks info, set this ' + 'parameter to True. By default remove logbook from ' + 'persistence backend when job completed.'), +] + +core_cli_opts = [] + +certificate_opts = [ + cfg.StrOpt('cert_manager', + default='barbican_cert_manager', + help='Name of the cert manager to use'), + cfg.StrOpt('cert_generator', + default='local_cert_generator', + help='Name of the cert generator to use'), + cfg.StrOpt('barbican_auth', + default='barbican_acl_auth', + help='Name of the Barbican authentication method to use'), + cfg.StrOpt('service_name', + help=_('The name of the certificate service in the keystone ' + 'catalog')), + cfg.StrOpt('endpoint', help=_('A new endpoint to override the endpoint ' + 'in the keystone catalog.')), + cfg.StrOpt('region_name', + help='Region in Identity service catalog to use for ' + 'communication with the barbican service.'), + cfg.StrOpt('endpoint_type', + default='publicURL', + help='The endpoint_type to be used for barbican service.'), + cfg.StrOpt('ca_certificates_file', + help=_('CA certificates file path for the key manager service ' + '(such as Barbican).')), + cfg.BoolOpt('insecure', + default=False, + help=_('Disable certificate validation on SSL connections ')), +] + +house_keeping_opts = [ + cfg.IntOpt('cleanup_interval', + default=30, + help=_('DB cleanup interval in seconds')), + cfg.IntOpt('amphora_expiry_age', + default=604800, + help=_('Amphora expiry age in seconds')), + cfg.IntOpt('load_balancer_expiry_age', + default=604800, + help=_('Load balancer expiry age in seconds')), + cfg.IntOpt('cert_interval', + default=3600, + help=_('Certificate check interval in seconds')), + # 14 days for cert expiry buffer + cfg.IntOpt('cert_expiry_buffer', + default=1209600, + help=_('Seconds until certificate expiration')), + cfg.IntOpt('cert_rotate_threads', + default=10, + help=_('Number of threads performing amphora certificate' + ' rotation')) +] + +keepalived_vrrp_opts = [ + cfg.IntOpt('vrrp_advert_int', + default=1, + help=_('Amphora role and priority advertisement interval ' + 'in seconds.')), + cfg.IntOpt('vrrp_check_interval', + default=5, + help=_('VRRP health check script run interval in seconds.')), + cfg.IntOpt('vrrp_fail_count', + default=2, + help=_('Number of successive failures before transition to a ' + 'fail state.')), + cfg.IntOpt('vrrp_success_count', + default=2, + help=_('Number of consecutive successes before transition to a ' + 'success state.')), + cfg.IntOpt('vrrp_garp_refresh_interval', + default=5, + help=_('Time in seconds between gratuitous ARP announcements ' + 'from the MASTER.')), + cfg.IntOpt('vrrp_garp_refresh_count', + default=2, + help=_('Number of gratuitous ARP announcements to make on ' + 'each refresh interval.')) + +] + +nova_opts = [ + cfg.StrOpt('service_name', + help=_('The name of the nova service in the keystone catalog')), + cfg.StrOpt('endpoint', help=_('A new endpoint to override the endpoint ' + 'in the keystone catalog.')), + cfg.StrOpt('region_name', + help=_('Region in Identity service catalog to use for ' + 'communication with the OpenStack services.')), + cfg.StrOpt('endpoint_type', default='publicURL', + help=_('Endpoint interface in identity service to use')), + cfg.StrOpt('ca_certificates_file', + help=_('CA certificates file path')), + cfg.BoolOpt('insecure', + default=False, + help=_('Disable certificate validation on SSL connections')), + cfg.BoolOpt('enable_anti_affinity', default=False, + help=_('Flag to indicate if nova anti-affinity feature is ' + 'turned on. This option is only used when creating ' + 'amphorae in ACTIVE_STANDBY topology.')), + cfg.StrOpt('anti_affinity_policy', default=constants.ANTI_AFFINITY, + choices=[constants.ANTI_AFFINITY, constants.SOFT_ANTI_AFFINITY], + help=_('Sets the anti-affinity policy for nova')), + cfg.IntOpt('random_amphora_name_length', default=0, + help=_('If non-zero, generate a random name of the length ' + 'provided for each amphora, in the format "a[A-Z0-9]*". ' + 'Otherwise, the default name format will be used: ' + '"amphora-{UUID}".')), + cfg.StrOpt('availability_zone', default=None, + help=_('Availability zone to use for creating Amphorae')), +] + +cinder_opts = [ + cfg.StrOpt('service_name', + help=_('The name of the cinder service in the keystone ' + 'catalog')), + cfg.StrOpt('endpoint', help=_('A new endpoint to override the endpoint ' + 'in the keystone catalog.')), + cfg.StrOpt('region_name', + help=_('Region in Identity service catalog to use for ' + 'communication with the OpenStack services.')), + cfg.StrOpt('endpoint_type', default='publicURL', + help=_('Endpoint interface in identity service to use')), + cfg.StrOpt('ca_certificates_file', + help=_('CA certificates file path')), + cfg.StrOpt('availability_zone', default=None, + help=_('Availability zone to use for creating Volume')), + cfg.BoolOpt('insecure', + default=False, + help=_('Disable certificate validation on SSL connections')), + cfg.IntOpt('volume_size', default=16, min=1, + help=_('Size of volume, in GB, for Amphora instance')), + cfg.StrOpt('volume_type', default=None, + help=_('Type of volume for Amphorae volume root disk')), + cfg.IntOpt('volume_create_retry_interval', default=5, + help=_('Interval time to wait volume is created in available ' + 'state')), + cfg.IntOpt('volume_create_timeout', default=300, + help=_('Timeout to wait for volume creation success')), + cfg.IntOpt('volume_create_max_retries', default=5, + help=_('Maximum number of retries to create volume')) +] + +neutron_opts = [ + cfg.StrOpt('endpoint', help=_('A new endpoint to override the endpoint ' + 'in the keystone catalog.'), + deprecated_for_removal=True, + deprecated_reason=_('The endpoint_override option defined by ' + 'keystoneauth1 is the new name for this ' + 'option.'), + deprecated_since='2023.2/Bobcat'), + cfg.StrOpt('endpoint_type', help=_('Endpoint interface in identity ' + 'service to use'), + deprecated_for_removal=True, + deprecated_reason=_('This option was replaced by the ' + 'valid_interfaces option defined by ' + 'keystoneauth.'), + deprecated_since='2023.2/Bobcat'), + cfg.StrOpt('ca_certificates_file', + help=_('CA certificates file path'), + deprecated_for_removal=True, + deprecated_reason=_('The cafile option defined by ' + 'keystoneauth1 is the new name for this ' + 'option.'), + deprecated_since='2023.2/Bobcat'), +] + +glance_opts = [ + cfg.StrOpt('service_name', + help=_('The name of the glance service in the ' + 'keystone catalog')), + cfg.StrOpt('endpoint', help=_('A new endpoint to override the endpoint ' + 'in the keystone catalog.')), + cfg.StrOpt('region_name', + help=_('Region in Identity service catalog to use for ' + 'communication with the OpenStack services.')), + cfg.StrOpt('endpoint_type', default='publicURL', + help=_('Endpoint interface in identity service to use')), + cfg.StrOpt('ca_certificates_file', + help=_('CA certificates file path')), + cfg.BoolOpt('insecure', + default=False, + help=_('Disable certificate validation on SSL connections ')), +] + +quota_opts = [ + cfg.IntOpt('default_load_balancer_quota', + default=constants.QUOTA_UNLIMITED, + help=_('Default per project load balancer quota.')), + cfg.IntOpt('default_listener_quota', + default=constants.QUOTA_UNLIMITED, + help=_('Default per project listener quota.')), + cfg.IntOpt('default_member_quota', + default=constants.QUOTA_UNLIMITED, + help=_('Default per project member quota.')), + cfg.IntOpt('default_pool_quota', + default=constants.QUOTA_UNLIMITED, + help=_('Default per project pool quota.')), + cfg.IntOpt('default_health_monitor_quota', + default=constants.QUOTA_UNLIMITED, + help=_('Default per project health monitor quota.')), + cfg.IntOpt('default_l7policy_quota', + default=constants.QUOTA_UNLIMITED, + help=_('Default per project l7policy quota.')), + cfg.IntOpt('default_l7rule_quota', + default=constants.QUOTA_UNLIMITED, + help=_('Default per project l7rule quota.')), +] + +audit_opts = [ + cfg.BoolOpt('enabled', default=False, + help=_('Enable auditing of API requests')), + cfg.StrOpt('audit_map_file', + default='/etc/octavia/octavia_api_audit_map.conf', + help=_('Path to audit map file for octavia-api service. ' + 'Used only when API audit is enabled.')), + cfg.StrOpt('ignore_req_list', default='', + help=_('Comma separated list of REST API HTTP methods to be ' + 'ignored during audit. For example: auditing will not ' + 'be done on any GET or POST requests if this is set to ' + '"GET,POST". It is used only when API audit is ' + 'enabled.')), +] + +driver_agent_opts = [ + cfg.StrOpt('status_socket_path', + default='/var/run/octavia/status.sock', + help=_('Path to the driver status unix domain socket file.')), + cfg.StrOpt('stats_socket_path', + default='/var/run/octavia/stats.sock', + help=_('Path to the driver statistics unix domain socket ' + 'file.')), + cfg.StrOpt('get_socket_path', + default='/var/run/octavia/get.sock', + help=_('Path to the driver get unix domain socket file.')), + cfg.IntOpt('status_request_timeout', + default=5, + help=_('Time, in seconds, to wait for a status update ' + 'request.')), + cfg.IntOpt('status_max_processes', + default=50, + help=_('Maximum number of concurrent processes to use ' + 'servicing status updates.')), + cfg.IntOpt('stats_request_timeout', + default=5, + help=_('Time, in seconds, to wait for a statistics update ' + 'request.')), + cfg.IntOpt('stats_max_processes', + default=50, + help=_('Maximum number of concurrent processes to use ' + 'servicing statistics updates.')), + cfg.IntOpt('get_request_timeout', + default=5, + help=_('Time, in seconds, to wait for a get request.')), + cfg.IntOpt('get_max_processes', + default=50, + help=_('Maximum number of concurrent processes to use ' + 'servicing get requests.')), + cfg.FloatOpt('max_process_warning_percent', + default=0.75, min=0.01, max=0.99, + help=_('Percentage of max_processes (both status and stats) ' + 'in use to start logging warning messages about an ' + 'overloaded driver-agent.')), + cfg.IntOpt('provider_agent_shutdown_timeout', + default=60, + help=_('The time, in seconds, to wait for provider agents ' + 'to shutdown after the exit event has been set.')), + cfg.ListOpt('enabled_provider_agents', default=[], + help=_('List of enabled provider agents. The driver-agent ' + 'will launch these agents at startup.')) +] + +# Register the configuration options +cfg.CONF.register_opts(core_opts) +cfg.CONF.register_opts(api_opts, group='api_settings') +cfg.CONF.register_opts(amphora_agent_opts, group='amphora_agent') +cfg.CONF.register_opts(compute_opts, group='compute') +cfg.CONF.register_opts(networking_opts, group='networking') +cfg.CONF.register_opts(oslo_messaging_opts, group='oslo_messaging') +cfg.CONF.register_opts(haproxy_amphora_opts, group='haproxy_amphora') +cfg.CONF.register_opts(controller_worker_opts, group='controller_worker') +cfg.CONF.register_opts(keepalived_vrrp_opts, group='keepalived_vrrp') +cfg.CONF.register_opts(task_flow_opts, group='task_flow') +cfg.CONF.register_opts(house_keeping_opts, group='house_keeping') +cfg.CONF.register_opts(certificate_opts, group='certificates') +cfg.CONF.register_opts(health_manager_opts, group='health_manager') +cfg.CONF.register_opts(nova_opts, group='nova') +cfg.CONF.register_opts(cinder_opts, group='cinder') +cfg.CONF.register_opts(glance_opts, group='glance') +cfg.CONF.register_opts(neutron_opts, group='neutron') +cfg.CONF.register_opts(quota_opts, group='quotas') +cfg.CONF.register_opts(audit_opts, group='audit') +cfg.CONF.register_opts(driver_agent_opts, group='driver_agent') + +cfg.CONF.register_opts(local.certgen_opts, group='certificates') +cfg.CONF.register_opts(local.certmgr_opts, group='certificates') + +# Ensure that the control exchange is set correctly +messaging.set_transport_defaults(control_exchange='octavia') +_SQL_CONNECTION_DEFAULT = 'sqlite://' +# Update the default QueuePool parameters. These can be tweaked by the +# configuration variables - max_pool_size, max_overflow and pool_timeout +db_options.set_defaults(cfg.CONF, connection=_SQL_CONNECTION_DEFAULT, + max_pool_size=10, max_overflow=20, pool_timeout=10) + + +def register_ks_options(group): + ks_loading.register_auth_conf_options(cfg.CONF, group) + ks_loading.register_session_conf_options(cfg.CONF, group) + ks_loading.register_adapter_conf_options(cfg.CONF, group, + include_deprecated=False) + + +register_ks_options(constants.SERVICE_AUTH) +register_ks_options('neutron') + + +def register_cli_opts(): + cfg.CONF.register_cli_opts(core_cli_opts) + logging.register_options(cfg.CONF) + + +def handle_neutron_deprecations(): + # Apply neutron deprecated options to their new setting if needed + + # Basically: if the new option is not set and the value of the deprecated + # option is not the default, it means that the deprecated setting is still + # used in the config file: + # * convert it to a valid "new" value if needed + # * set it as the default for the new option + # Thus [neutron]. has an higher precedence than + # [neutron]. + loc = cfg.CONF.get_location('endpoint', 'neutron') + new_loc = cfg.CONF.get_location('endpoint_override', 'neutron') + if not new_loc and loc and loc.location != cfg.Locations.opt_default: + cfg.CONF.set_default('endpoint_override', cfg.CONF.neutron.endpoint, + 'neutron') + + loc = cfg.CONF.get_location('endpoint_type', 'neutron') + new_loc = cfg.CONF.get_location('valid_interfaces', 'neutron') + if not new_loc and loc and loc.location != cfg.Locations.opt_default: + endpoint_type = cfg.CONF.neutron.endpoint_type.replace('URL', '') + cfg.CONF.set_default('valid_interfaces', [endpoint_type], + 'neutron') + + loc = cfg.CONF.get_location('ca_certificates_file', 'neutron') + new_loc = cfg.CONF.get_location('cafile', 'neutron') + if not new_loc and loc and loc.location != cfg.Locations.opt_default: + cfg.CONF.set_default('cafile', cfg.CONF.neutron.ca_certificates_file, + 'neutron') + + +def init(args, **kwargs): + register_cli_opts() + cfg.CONF(args=args, project='octavia', + version=f'%prog {version.version_info.release_string()}', + **kwargs) + validate.check_default_tls_versions_min_conflict() + setup_remote_debugger() + validate.check_default_ciphers_prohibit_list_conflict() + + # Override default auth_type for plugins with the default from service_auth + auth_type = cfg.CONF.service_auth.auth_type + cfg.CONF.set_default('auth_type', auth_type, 'neutron') + + handle_neutron_deprecations() + + +def setup_logging(conf): + """Sets up the logging options for a log with supplied name. + + :param conf: a cfg.ConfOpts object + """ + ll = logging.get_default_log_levels() + logging.set_defaults(default_log_levels=ll) + product_name = "octavia" + logging.setup(conf, product_name) + LOG.info("Logging enabled!") + LOG.info("%(prog)s version %(version)s", + {'prog': sys.argv[0], + 'version': version.version_info.release_string()}) + LOG.debug("command line: %s", " ".join(sys.argv)) + + +def _enable_pydev(debugger_host, debugger_port): + try: + from pydev import pydevd # pylint: disable=import-outside-toplevel + except ImportError: + import pydevd # pylint: disable=import-outside-toplevel + + pydevd.settrace(debugger_host, + suspend=False, + port=int(debugger_port), + stdoutToServer=True, + stderrToServer=True) + + +def _enable_ptvsd(debuggger_host, debugger_port): + import ptvsd # pylint: disable=import-outside-toplevel + + # Allow other computers to attach to ptvsd at this IP address and port. + ptvsd.enable_attach(address=(debuggger_host, debugger_port), + redirect_output=True) + + # Pause the program until a remote debugger is attached + ptvsd.wait_for_attach() + + +def setup_remote_debugger(): + """Required setup for remote debugging.""" + + debugger_type = os.environ.get('DEBUGGER_TYPE', 'pydev') + debugger_host = os.environ.get('DEBUGGER_HOST') + debugger_port = os.environ.get('DEBUGGER_PORT') + + if not debugger_type or not debugger_host or not debugger_port: + return + + try: + LOG.warning("Connecting to remote debugger. Once connected, resume " + "the program on the debugger to continue with the " + "initialization of the service.") + if debugger_type == 'pydev': + _enable_pydev(debugger_host, debugger_port) + elif debugger_type == 'ptvsd': + _enable_ptvsd(debugger_host, debugger_port) + else: + LOG.exception('Debugger %(debugger)s is not supported', + debugger_type) + except Exception: + LOG.exception('Unable to join debugger, please make sure that the ' + 'debugger processes is listening on debug-host ' + '\'%(debug-host)s\' debug-port \'%(debug-port)s\'.', + {'debug-host': debugger_host, + 'debug-port': debugger_port}) + raise + + +def set_lib_defaults(): + """Update default value for configuration options from other namespace. + + Example, oslo lib config options. This is needed for + config generator tool to pick these default value changes. + https://docs.openstack.org/oslo.config/latest/cli/ + generator.html#modifying-defaults-from-other-namespaces + """ + + set_cors_middleware_defaults() + # TODO(gmann): Remove setting the default value of config policy_file + # once oslo_policy change the default value to 'policy.yaml'. + # https://github.com/openstack/oslo.policy/blob/a626ad12fe5a3abd49d70e3e5b95589d279ab578/oslo_policy/opts.py#L49 + # Update default value of oslo.policy policy_file config option. + policy_opts.set_defaults(cfg.CONF, 'policy.yaml') + + +def set_cors_middleware_defaults(): + """Update default configuration options for oslo.middleware.""" + cors.set_defaults( + allow_headers=['X-Auth-Token', 'X-Openstack-Request-Id'], + allow_methods=['GET', 'PUT', 'POST', 'DELETE'], + expose_headers=['X-Auth-Token', 'X-Openstack-Request-Id'] + ) diff --git a/octavia/common/constants.py b/octavia/common/constants.py new file mode 100644 index 0000000000..957c8e6d1e --- /dev/null +++ b/octavia/common/constants.py @@ -0,0 +1,990 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from octavia_lib.common import constants as lib_consts + +############################################################################## +# Constants common to the provider drivers moved to +# octavia_lib.common.constants +# These are deprecated, to be removed in the 'U' release +############################################################################## +# 'loadbalancers' +LOADBALANCERS = lib_consts.LOADBALANCERS +# 'listeners' +LISTENERS = lib_consts.LISTENERS +# 'pools' +POOLS = lib_consts.POOLS +# HEALTHMONITORS = 'healthmonitors' +HEALTHMONITORS = lib_consts.HEALTHMONITORS +# 'members' +MEMBERS = lib_consts.MEMBERS +# 'l7policies' +L7POLICIES = lib_consts.L7POLICIES +# 'l7rules' +L7RULES = lib_consts.L7RULES + +# 'PING' +HEALTH_MONITOR_PING = lib_consts.HEALTH_MONITOR_PING +# 'TCP' +HEALTH_MONITOR_TCP = lib_consts.HEALTH_MONITOR_TCP +# 'HTTP' +HEALTH_MONITOR_HTTP = lib_consts.HEALTH_MONITOR_HTTP +# 'HTTPS' +HEALTH_MONITOR_HTTPS = lib_consts.HEALTH_MONITOR_HTTPS +# 'TLS-HELLO' +HEALTH_MONITOR_TLS_HELLO = lib_consts.HEALTH_MONITOR_TLS_HELLO +# 'UDP-CONNECT' +HEALTH_MONITOR_UDP_CONNECT = lib_consts.HEALTH_MONITOR_UDP_CONNECT +SUPPORTED_HEALTH_MONITOR_TYPES = lib_consts.SUPPORTED_HEALTH_MONITOR_TYPES + +# 'GET' +HEALTH_MONITOR_HTTP_METHOD_GET = lib_consts.HEALTH_MONITOR_HTTP_METHOD_GET +# 'HEAD' +HEALTH_MONITOR_HTTP_METHOD_HEAD = lib_consts.HEALTH_MONITOR_HTTP_METHOD_HEAD +# 'POST' +HEALTH_MONITOR_HTTP_METHOD_POST = lib_consts.HEALTH_MONITOR_HTTP_METHOD_POST +# 'PUT' +HEALTH_MONITOR_HTTP_METHOD_PUT = lib_consts.HEALTH_MONITOR_HTTP_METHOD_PUT +# 'DELETE' +HEALTH_MONITOR_HTTP_METHOD_DELETE = ( + lib_consts.HEALTH_MONITOR_HTTP_METHOD_DELETE) +# 'TRACE' +HEALTH_MONITOR_HTTP_METHOD_TRACE = lib_consts.HEALTH_MONITOR_HTTP_METHOD_TRACE +# 'OPTIONS' +HEALTH_MONITOR_HTTP_METHOD_OPTIONS = ( + lib_consts.HEALTH_MONITOR_HTTP_METHOD_OPTIONS) +# 'CONNECT' +HEALTH_MONITOR_HTTP_METHOD_CONNECT = ( + lib_consts.HEALTH_MONITOR_HTTP_METHOD_CONNECT) +# 'PATCH' +HEALTH_MONITOR_HTTP_METHOD_PATCH = lib_consts.HEALTH_MONITOR_HTTP_METHOD_PATCH +SUPPORTED_HEALTH_MONITOR_HTTP_METHODS = ( + lib_consts.SUPPORTED_HEALTH_MONITOR_HTTP_METHODS) + +# 'REJECT' +L7POLICY_ACTION_REJECT = lib_consts.L7POLICY_ACTION_REJECT +# 'REDIRECT_TO_URL' +L7POLICY_ACTION_REDIRECT_TO_URL = lib_consts.L7POLICY_ACTION_REDIRECT_TO_URL +# 'REDIRECT_TO_POOL' +L7POLICY_ACTION_REDIRECT_TO_POOL = lib_consts.L7POLICY_ACTION_REDIRECT_TO_POOL +# 'REDIRECT_PREFIX' +L7POLICY_ACTION_REDIRECT_PREFIX = lib_consts.L7POLICY_ACTION_REDIRECT_PREFIX +SUPPORTED_L7POLICY_ACTIONS = lib_consts.SUPPORTED_L7POLICY_ACTIONS + +# 'REGEX' +L7RULE_COMPARE_TYPE_REGEX = lib_consts.L7RULE_COMPARE_TYPE_REGEX +# 'STARTS_WITH' +L7RULE_COMPARE_TYPE_STARTS_WITH = lib_consts.L7RULE_COMPARE_TYPE_STARTS_WITH +# 'ENDS_WITH' +L7RULE_COMPARE_TYPE_ENDS_WITH = lib_consts.L7RULE_COMPARE_TYPE_ENDS_WITH +# 'CONTAINS' +L7RULE_COMPARE_TYPE_CONTAINS = lib_consts.L7RULE_COMPARE_TYPE_CONTAINS +# 'EQUAL_TO' +L7RULE_COMPARE_TYPE_EQUAL_TO = lib_consts.L7RULE_COMPARE_TYPE_EQUAL_TO +SUPPORTED_L7RULE_COMPARE_TYPES = lib_consts.SUPPORTED_L7RULE_COMPARE_TYPES + +# 'HOST_NAME' +L7RULE_TYPE_HOST_NAME = lib_consts.L7RULE_TYPE_HOST_NAME +# 'PATH' +L7RULE_TYPE_PATH = lib_consts.L7RULE_TYPE_PATH +# 'FILE_TYPE' +L7RULE_TYPE_FILE_TYPE = lib_consts.L7RULE_TYPE_FILE_TYPE +# 'HEADER' +L7RULE_TYPE_HEADER = lib_consts.L7RULE_TYPE_HEADER +# 'COOKIE' +L7RULE_TYPE_COOKIE = lib_consts.L7RULE_TYPE_COOKIE +# 'SSL_CONN_HAS_CERT' +L7RULE_TYPE_SSL_CONN_HAS_CERT = lib_consts.L7RULE_TYPE_SSL_CONN_HAS_CERT +# 'SSL_VERIFY_RESULT' +L7RULE_TYPE_SSL_VERIFY_RESULT = lib_consts.L7RULE_TYPE_SSL_VERIFY_RESULT +# 'SSL_DN_FIELD' +L7RULE_TYPE_SSL_DN_FIELD = lib_consts.L7RULE_TYPE_SSL_DN_FIELD +SUPPORTED_L7RULE_TYPES = lib_consts.SUPPORTED_L7RULE_TYPES + +# 'ROUND_ROBIN' +LB_ALGORITHM_ROUND_ROBIN = lib_consts.LB_ALGORITHM_ROUND_ROBIN +# 'LEAST_CONNECTIONS' +LB_ALGORITHM_LEAST_CONNECTIONS = lib_consts.LB_ALGORITHM_LEAST_CONNECTIONS +# 'SOURCE_IP' +LB_ALGORITHM_SOURCE_IP = lib_consts.LB_ALGORITHM_SOURCE_IP +SUPPORTED_LB_ALGORITHMS = lib_consts.SUPPORTED_LB_ALGORITHMS + +# 'operating_status' +OPERATING_STATUS = lib_consts.OPERATING_STATUS +# 'ONLINE' +ONLINE = lib_consts.ONLINE +# 'OFFLINE' +OFFLINE = lib_consts.OFFLINE +# 'DEGRADED' +DEGRADED = lib_consts.DEGRADED +# 'ERROR' +ERROR = lib_consts.ERROR +# 'DRAINING' +DRAINING = lib_consts.DRAINING +# 'NO_MONITOR' +NO_MONITOR = lib_consts.NO_MONITOR +# 'operating_status' +SUPPORTED_OPERATING_STATUSES = lib_consts.SUPPORTED_OPERATING_STATUSES + +# 'TCP' +PROTOCOL_TCP = lib_consts.PROTOCOL_TCP +# 'UDP' +PROTOCOL_UDP = lib_consts.PROTOCOL_UDP +# 'HTTP' +PROTOCOL_HTTP = lib_consts.PROTOCOL_HTTP +# 'HTTPS' +PROTOCOL_HTTPS = lib_consts.PROTOCOL_HTTPS +# 'TERMINATED_HTTPS' +PROTOCOL_TERMINATED_HTTPS = lib_consts.PROTOCOL_TERMINATED_HTTPS +# 'PROXY' +PROTOCOL_PROXY = lib_consts.PROTOCOL_PROXY +SUPPORTED_PROTOCOLS = lib_consts.SUPPORTED_PROTOCOLS +# 'PROMETHEUS' +PROTOCOL_PROMETHEUS = lib_consts.PROTOCOL_PROMETHEUS + +# 'provisioning_status' +PROVISIONING_STATUS = lib_consts.PROVISIONING_STATUS +# Amphora has been allocated to a load balancer 'ALLOCATED' +AMPHORA_ALLOCATED = lib_consts.AMPHORA_ALLOCATED +# Amphora is being built 'BOOTING' +AMPHORA_BOOTING = lib_consts.AMPHORA_BOOTING +# 'FAILOVER_STOPPED'. Failover threshold level has been reached. +AMPHORA_FAILOVER_STOPPED = lib_consts.AMPHORA_FAILOVER_STOPPED +# 'ACTIVE' +ACTIVE = lib_consts.ACTIVE +# 'PENDING_DELETE' +PENDING_DELETE = lib_consts.PENDING_DELETE +# 'PENDING_UPDATE' +PENDING_UPDATE = lib_consts.PENDING_UPDATE +# 'PENDING_CREATE' +PENDING_CREATE = lib_consts.PENDING_CREATE +# 'DELETED' +DELETED = lib_consts.DELETED +SUPPORTED_PROVISIONING_STATUSES = lib_consts.SUPPORTED_PROVISIONING_STATUSES + +# 'SOURCE_IP' +SESSION_PERSISTENCE_SOURCE_IP = lib_consts.SESSION_PERSISTENCE_SOURCE_IP +# 'HTTP_COOKIE' +SESSION_PERSISTENCE_HTTP_COOKIE = lib_consts.SESSION_PERSISTENCE_HTTP_COOKIE +# 'APP_COOKIE' +SESSION_PERSISTENCE_APP_COOKIE = lib_consts.SESSION_PERSISTENCE_APP_COOKIE +SUPPORTED_SP_TYPES = lib_consts.SUPPORTED_SP_TYPES + +# List of HTTP headers which are supported for insertion +SUPPORTED_HTTP_HEADERS = lib_consts.SUPPORTED_HTTP_HEADERS + +# List of SSL headers for client certificate +SUPPORTED_SSL_HEADERS = lib_consts.SUPPORTED_SSL_HEADERS + +############################################################################### + +HEALTH_MONITOR_DEFAULT_EXPECTED_CODES = '200' +HEALTH_MONITOR_HTTP_DEFAULT_METHOD = lib_consts.HEALTH_MONITOR_HTTP_METHOD_GET +HEALTH_MONITOR_DEFAULT_URL_PATH = '/' +TYPE = 'type' +URL_PATH = 'url_path' +HTTP_METHOD = 'http_method' +HTTP_VERSION = 'http_version' +EXPECTED_CODES = 'expected_codes' +DELAY = 'delay' +TIMEOUT = 'timeout' +MAX_RETRIES = 'max_retries' +MAX_RETRIES_DOWN = 'max_retries_down' +RISE_THRESHOLD = 'rise_threshold' +DOMAIN_NAME = 'domain_name' + +UPDATE_STATS = 'UPDATE_STATS' +UPDATE_HEALTH = 'UPDATE_HEALTH' + +VALID_LISTENER_POOL_PROTOCOL_MAP = { + PROTOCOL_TCP: [PROTOCOL_HTTP, PROTOCOL_HTTPS, + PROTOCOL_PROXY, lib_consts.PROTOCOL_PROXYV2, PROTOCOL_TCP], + PROTOCOL_HTTP: [PROTOCOL_HTTP, PROTOCOL_PROXY, + lib_consts.PROTOCOL_PROXYV2], + PROTOCOL_HTTPS: [PROTOCOL_HTTPS, PROTOCOL_PROXY, + lib_consts.PROTOCOL_PROXYV2, PROTOCOL_TCP], + PROTOCOL_TERMINATED_HTTPS: [PROTOCOL_HTTP, PROTOCOL_PROXY, + lib_consts.PROTOCOL_PROXYV2], + PROTOCOL_UDP: [PROTOCOL_UDP], + lib_consts.PROTOCOL_SCTP: [lib_consts.PROTOCOL_SCTP], + lib_consts.PROTOCOL_PROMETHEUS: []} + +# API Integer Ranges +MIN_PORT_NUMBER = 1 +MAX_PORT_NUMBER = 65535 + +DEFAULT_CONNECTION_LIMIT = -1 +MIN_CONNECTION_LIMIT = -1 + +DEFAULT_WEIGHT = 1 +MIN_WEIGHT = 0 +MAX_WEIGHT = 256 + +DEFAULT_MAX_RETRIES_DOWN = 3 +MIN_HM_RETRIES = 1 +MAX_HM_RETRIES = 10 + +# 24 days: days d h m ms +MAX_TIMEOUT = 24 * 24 * 60 * 60 * 1000 +MIN_TIMEOUT = 0 + +DEFAULT_TIMEOUT_CLIENT_DATA = 50000 +DEFAULT_TIMEOUT_MEMBER_CONNECT = 5000 +DEFAULT_TIMEOUT_MEMBER_DATA = 50000 +DEFAULT_TIMEOUT_TCP_INSPECT = 0 + +MUTABLE_STATUSES = (lib_consts.ACTIVE,) +DELETABLE_STATUSES = (lib_consts.ACTIVE, lib_consts.ERROR) +FAILOVERABLE_STATUSES = (lib_consts.ACTIVE, lib_consts.ERROR) + +AMPHORA_VM = 'VM' +SUPPORTED_AMPHORA_TYPES = (AMPHORA_VM,) + +DISTINGUISHED_NAME_FIELD_REGEX = lib_consts.DISTINGUISHED_NAME_FIELD_REGEX + +# For redirect, only codes 301, 302, 303, 307 and 308 are # supported. +SUPPORTED_L7POLICY_REDIRECT_HTTP_CODES = [301, 302, 303, 307, 308] + +SUPPORTED_HTTP_VERSIONS = [1.0, 1.1] + +MIN_POLICY_POSITION = 1 +# Largest a 32-bit integer can be, which is a limitation +# here if you're using MySQL, as most probably are. This just needs +# to be larger than any existing rule position numbers which will +# definitely be the case with 2147483647 +MAX_POLICY_POSITION = 2147483647 + +# Testing showed haproxy config failed to parse after more than +# 53 rules per policy +MAX_L7RULES_PER_L7POLICY = 50 + +# See RFCs 2616, 2965, 6265, 7230: Should match characters valid in a +# http header or cookie name. +HTTP_HEADER_NAME_REGEX = r'\A[a-zA-Z0-9!#$%&\'*+-.^_`|~]+\Z' + +# See RFCs 2616, 2965, 6265: Should match characters valid in a cookie value. +HTTP_COOKIE_VALUE_REGEX = r'\A[a-zA-Z0-9!#$%&\'()*+-./:<=>?@[\]^_`{|}~]+\Z' + +# See RFC 7230: Should match characters valid in a header value. +HTTP_HEADER_VALUE_REGEX = (r'\A[a-zA-Z0-9' + r'!"#$%&\'()*+,-./:;<=>?@[\]^_`{|}~\\]+\Z') + +# Also in RFC 7230: Should match characters valid in a header value +# when quoted with double quotes. +HTTP_QUOTED_HEADER_VALUE_REGEX = (r'\A"[a-zA-Z0-9 \t' + r'!"#$%&\'()*+,-./:;<=>?@[\]^_`{|}~\\]*"\Z') + +DOMAIN_NAME_REGEX = ( + r'^(?=.{1,253}\.?$)(?:(?!-|[^.]+_)[A-Za-z0-9-_]{1,63}(?= 2: + feature_compatibility[lib_consts.PROTOCOL_PROMETHEUS] = True + # haproxy 2.2 requires insecure-fork-wanted for PING healthchecks + if versionutils.is_compatible("2.2.0", version, same_major=False): + feature_compatibility[constants.INSECURE_FORK] = True + + return self.render_loadbalancer_obj( + host_amphora, listeners, tls_certs=tls_certs, + socket_path=socket_path, + feature_compatibility=feature_compatibility, + amp_details=amp_details) + + def _get_template(self): + """Returns the specified Jinja configuration template.""" + global JINJA_ENV + if not JINJA_ENV: + template_loader = jinja2.FileSystemLoader( + searchpath=os.path.dirname(self.haproxy_template)) + JINJA_ENV = jinja2.Environment( + autoescape=True, + loader=template_loader, + trim_blocks=True, + lstrip_blocks=True) + JINJA_ENV.filters['hash_amp_id'] = octavia_utils.base64_sha1_string + return JINJA_ENV.get_template(os.path.basename(self.haproxy_template)) + + def _format_log_string(self, load_balancer, protocol): + log_format = CONF.haproxy_amphora.user_log_format.replace( + '{{ project_id }}', load_balancer.project_id) + log_format = log_format.replace('{{ lb_id }}', load_balancer.id) + + # Order of these filters matter. + # TODO(johnsom) Remove when HAProxy handles the format string + # with HTTP variables in TCP listeners. + # Currently it either throws an error or just fails + # to log the message. + if protocol not in constants.HAPROXY_HTTP_PROTOCOLS: + log_format = log_format.replace('%{+Q}r', '-') + log_format = log_format.replace('%r', '-') + log_format = log_format.replace('%{+Q}ST', '-') + log_format = log_format.replace('%ST', '-') + + log_format = log_format.replace(' ', '\\ ') + return log_format + + def render_loadbalancer_obj(self, host_amphora, listeners, + tls_certs=None, socket_path=None, + feature_compatibility=None, + amp_details: Optional[dict] = None): + """Renders a templated configuration from a load balancer object + + :param host_amphora: The Amphora this configuration is hosted on + :param listener: The listener configuration + :param tls_certs: Dict of the TLS certificates for the listener + :param socket_path: The socket path for Haproxy process + :param amp_details: Detail information from the amphora + :return: Rendered configuration + """ + feature_compatibility = feature_compatibility or {} + loadbalancer = self._transform_loadbalancer( + host_amphora, + listeners[0].load_balancer, + listeners, + tls_certs, + feature_compatibility) + if not socket_path: + socket_path = (f'{self.base_amp_path}/' + f'{listeners[0].load_balancer.id}.sock') + state_file_path = '{}/{}/servers-state'.format( + self.base_amp_path, + listeners[0].load_balancer.id) if feature_compatibility.get( + constants.SERVER_STATE_FILE) else '' + prometheus_listener = any( + lsnr.protocol == lib_consts.PROTOCOL_PROMETHEUS for lsnr in + listeners) + require_insecure_fork = feature_compatibility.get( + constants.INSECURE_FORK) + enable_prometheus = prometheus_listener and feature_compatibility.get( + lib_consts.PROTOCOL_PROMETHEUS, False) + term_https_listener = any( + lsnr.protocol == lib_consts.PROTOCOL_TERMINATED_HTTPS for lsnr in + listeners) + + jinja_dict = { + 'loadbalancer': loadbalancer, + 'stats_sock': socket_path, + 'log_http': self.log_http, + 'log_server': self.log_server, + 'state_file': state_file_path, + 'administrative_log_facility': + CONF.amphora_agent.administrative_log_facility, + 'user_log_facility': + CONF.amphora_agent.user_log_facility, + 'connection_logging': self.connection_logging, + 'enable_prometheus': enable_prometheus, + 'require_insecure_fork': require_insecure_fork, + } + try: + # Enable cpu-pinning only if the amphora TuneD profile is active + if "amphora" in amp_details["active_tuned_profiles"].split(): + jinja_dict["cpu_count"] = int(amp_details["cpu_count"]) + except (KeyError, TypeError): + pass + + if term_https_listener: + try: + mem = amp_details["memory"] + # Account for 32 KB per established connection for each + # pair of HAProxy network sockets. Use 1024 as fallback + # because that is what ulimit -n typically returns. + max_conn_mem_kb = 32 * loadbalancer.get( + "global_connection_limit", 1024) + # LP #2119987: Use 2/5 of the remaining memory for SSL caches + ssl_cache_mem_kb = (mem["free"] + mem["buffers"] + + mem["cached"] - max_conn_mem_kb) * 2 // 5 + # A cache block uses about 250 bytes of data. + # The HAProxy default of ssl_cache (20000) would take up + # 5000 KB. We don't want to go below that. + if ssl_cache_mem_kb > 5000: + jinja_dict["ssl_cache"] = ssl_cache_mem_kb * 4 + except (KeyError, TypeError): + pass + + return self._get_template().render( + jinja_dict, constants=constants, lib_consts=lib_consts) + + def _transform_loadbalancer(self, host_amphora, loadbalancer, listeners, + tls_certs, feature_compatibility): + """Transforms a load balancer into an object that will + + be processed by the templating system + """ + listener_transforms = [] + for listener in listeners: + if listener.protocol in constants.LVS_PROTOCOLS: + continue + listener_transforms.append(self._transform_listener( + listener, tls_certs, feature_compatibility, loadbalancer)) + additional_vips = [ + vip.ip_address for vip in loadbalancer.additional_vips] + + ret_value = { + 'id': loadbalancer.id, + 'vip_address': loadbalancer.vip.ip_address, + 'additional_vips': additional_vips, + 'listeners': listener_transforms, + 'topology': loadbalancer.topology, + 'enabled': loadbalancer.enabled, + 'peer_port': listeners[0].peer_port, + 'host_amphora': self._transform_amphora( + host_amphora, feature_compatibility), + 'amphorae': loadbalancer.amphorae + } + # NOTE(sbalukoff): Global connection limit should be a sum of all + # listeners' connection limits. + connection_limit_sum = 0 + for listener in listeners: + if not listener.enabled: + continue + if listener.protocol in constants.LVS_PROTOCOLS: + continue + if listener.connection_limit and listener.connection_limit > -1: + connection_limit_sum += listener.connection_limit + else: + connection_limit_sum += ( + CONF.haproxy_amphora.default_connection_limit) + # If there's a limit between 0 and MAX, set it, otherwise just set MAX + if 0 < connection_limit_sum < constants.HAPROXY_MAX_MAXCONN: + ret_value['global_connection_limit'] = connection_limit_sum + else: + ret_value['global_connection_limit'] = ( + constants.HAPROXY_MAX_MAXCONN) + return ret_value + + def _transform_amphora(self, amphora, feature_compatibility): + """Transform an amphora into an object that will + + be processed by the templating system. + """ + return { + 'id': amphora.id, + 'lb_network_ip': amphora.lb_network_ip, + 'vrrp_ip': amphora.vrrp_ip, + 'ha_ip': amphora.ha_ip, + 'vrrp_port_id': amphora.vrrp_port_id, + 'ha_port_id': amphora.ha_port_id, + 'role': amphora.role, + 'status': amphora.status, + 'vrrp_interface': amphora.vrrp_interface, + 'vrrp_priority': amphora.vrrp_priority + } + + def _transform_listener(self, listener: models.Listener, tls_certs, + feature_compatibility, + loadbalancer): + """Transforms a listener into an object that will + + be processed by the templating system + """ + ret_value = { + 'id': listener.id, + 'protocol_port': listener.protocol_port, + 'protocol_mode': PROTOCOL_MAP[listener.protocol], + 'protocol': listener.protocol, + 'insert_headers': listener.insert_headers, + 'enabled': listener.enabled, + 'timeout_client_data': ( + listener.timeout_client_data or + CONF.haproxy_amphora.timeout_client_data), + 'timeout_member_connect': ( + listener.timeout_member_connect or + CONF.haproxy_amphora.timeout_member_connect), + 'timeout_member_data': ( + listener.timeout_member_data or + CONF.haproxy_amphora.timeout_member_data), + 'timeout_tcp_inspect': (listener.timeout_tcp_inspect or + CONF.haproxy_amphora.timeout_tcp_inspect), + lib_consts.PROTOCOL_PROMETHEUS: feature_compatibility.get( + lib_consts.PROTOCOL_PROMETHEUS, False) + } + if self.connection_logging: + ret_value['user_log_format'] = ( + self._format_log_string(loadbalancer, listener.protocol)) + if listener.connection_limit and listener.connection_limit > -1: + ret_value['connection_limit'] = listener.connection_limit + else: + ret_value['connection_limit'] = ( + CONF.haproxy_amphora.default_connection_limit) + + if listener.tls_certificate_id: + ret_value['crt_list_filename'] = os.path.join( + CONF.haproxy_amphora.base_cert_dir, + loadbalancer.id, f'{listener.id}.pem') + + if tls_certs is not None: + if listener.client_ca_tls_certificate_id: + ret_value['client_ca_tls_path'] = '%s' % ( + os.path.join( + self.base_crt_dir, loadbalancer.id, + tls_certs[listener.client_ca_tls_certificate_id])) + ret_value['client_auth'] = CLIENT_AUTH_MAP.get( + listener.client_authentication) + + if listener.client_crl_container_id: + ret_value['client_crl_path'] = '%s' % ( + os.path.join(self.base_crt_dir, loadbalancer.id, + tls_certs[listener.client_crl_container_id])) + + tls_enabled = False + if listener.protocol in (constants.PROTOCOL_TERMINATED_HTTPS, + constants.PROTOCOL_PROMETHEUS): + tls_enabled = True + if listener.tls_ciphers is not None: + ret_value['tls_ciphers'] = listener.tls_ciphers + if listener.tls_versions is not None: + ret_value['tls_versions'] = listener.tls_versions + if listener.alpn_protocols is not None: + ret_value['alpn_protocols'] = ",".join(listener.alpn_protocols) + if listener.hsts_max_age is not None: + hsts_directives = f"max-age={listener.hsts_max_age};" + if listener.hsts_include_subdomains: + hsts_directives += " includeSubDomains;" + if listener.hsts_preload: + hsts_directives += " preload;" + ret_value['hsts_directives'] = hsts_directives + + pools = [] + pool_gen = (pool for pool in listener.pools if + pool.provisioning_status != constants.PENDING_DELETE) + for pool in pool_gen: + kwargs = {} + if tls_certs is not None and tls_certs.get(pool.id): + kwargs = {'pool_tls_certs': tls_certs.get(pool.id)} + pools.append(self._transform_pool( + pool, feature_compatibility, tls_enabled, **kwargs)) + ret_value['pools'] = pools + policy_gen = (policy for policy in listener.l7policies if + policy.provisioning_status != constants.PENDING_DELETE) + if listener.default_pool: + for pool in pools: + if pool['id'] == listener.default_pool.id: + ret_value['default_pool'] = pool + break + + l7policies = [self._transform_l7policy( + x, feature_compatibility, tls_enabled, tls_certs) + for x in policy_gen] + ret_value['l7policies'] = l7policies + return ret_value + + def _transform_pool(self, pool, feature_compatibility, + listener_tls_enabled, pool_tls_certs=None): + """Transforms a pool into an object that will + + be processed by the templating system + """ + proxy_protocol_version = None + if pool.protocol == constants.PROTOCOL_PROXY: + proxy_protocol_version = 1 + if pool.protocol == lib_consts.PROTOCOL_PROXYV2: + proxy_protocol_version = 2 + ret_value = { + 'id': pool.id, + 'protocol': PROTOCOL_MAP[pool.protocol], + 'proxy_protocol': proxy_protocol_version, + 'listener_tls_enabled': listener_tls_enabled, + 'lb_algorithm': BALANCE_MAP.get(pool.lb_algorithm, 'roundrobin'), + 'members': [], + 'health_monitor': '', + 'session_persistence': '', + 'enabled': pool.enabled, + 'operating_status': pool.operating_status, + 'stick_size': CONF.haproxy_amphora.haproxy_stick_size, + constants.HTTP_REUSE: feature_compatibility.get( + constants.HTTP_REUSE, False), + 'ca_tls_path': '', + 'crl_path': '', + 'tls_enabled': pool.tls_enabled + } + members_gen = (mem for mem in pool.members if + mem.provisioning_status != constants.PENDING_DELETE) + members = [self._transform_member(x, feature_compatibility) + for x in members_gen] + ret_value['members'] = members + health_mon = pool.health_monitor + if (health_mon and + health_mon.provisioning_status != constants.PENDING_DELETE): + ret_value['health_monitor'] = self._transform_health_monitor( + health_mon, feature_compatibility) + if pool.session_persistence: + ret_value[ + 'session_persistence'] = self._transform_session_persistence( + pool.session_persistence, feature_compatibility) + if (pool.tls_certificate_id and pool_tls_certs and + pool_tls_certs.get('client_cert')): + ret_value['client_cert'] = pool_tls_certs.get('client_cert') + if pool.tls_enabled is True: + if pool.tls_ciphers is not None: + ret_value['tls_ciphers'] = pool.tls_ciphers + if pool.tls_versions is not None: + ret_value['tls_versions'] = pool.tls_versions + if (pool.alpn_protocols is not None and + feature_compatibility.get(constants.POOL_ALPN, False)): + ret_value['alpn_protocols'] = ",".join(pool.alpn_protocols) + if (pool.ca_tls_certificate_id and pool_tls_certs and + pool_tls_certs.get('ca_cert')): + ret_value['ca_cert'] = pool_tls_certs.get('ca_cert') + if (pool.crl_container_id and pool_tls_certs and + pool_tls_certs.get('crl')): + ret_value['crl'] = pool_tls_certs.get('crl') + + return ret_value + + @staticmethod + def _transform_session_persistence(persistence, feature_compatibility): + """Transforms session persistence into an object that will + + be processed by the templating system + """ + return { + 'type': persistence.type, + 'cookie_name': persistence.cookie_name + } + + @staticmethod + def _transform_member(member, feature_compatibility): + """Transforms a member into an object that will + + be processed by the templating system + """ + return { + 'id': member.id, + 'address': member.ip_address, + 'protocol_port': member.protocol_port, + 'weight': member.weight, + 'enabled': member.enabled, + 'subnet_id': member.subnet_id, + 'operating_status': member.operating_status, + 'monitor_address': member.monitor_address, + 'monitor_port': member.monitor_port, + 'backup': member.backup + } + + def _transform_health_monitor(self, monitor, feature_compatibility): + """Transforms a health monitor into an object that will + + be processed by the templating system + """ + codes = None + if monitor.expected_codes: + codes = '|'.join(octavia_utils.expand_expected_codes( + monitor.expected_codes)) + return { + 'id': monitor.id, + 'type': monitor.type, + 'delay': monitor.delay, + 'timeout': monitor.timeout, + 'fall_threshold': monitor.fall_threshold, + 'rise_threshold': monitor.rise_threshold, + 'http_method': monitor.http_method, + 'url_path': monitor.url_path, + 'expected_codes': codes, + 'enabled': monitor.enabled, + 'http_version': monitor.http_version, + 'domain_name': monitor.domain_name, + } + + def _transform_l7policy(self, l7policy, feature_compatibility, + listener_tls_enabled, tls_certs=None): + """Transforms an L7 policy into an object that will + + be processed by the templating system + """ + ret_value = { + 'id': l7policy.id, + 'action': l7policy.action, + 'redirect_url': l7policy.redirect_url, + 'redirect_prefix': l7policy.redirect_prefix, + 'enabled': l7policy.enabled + } + if (l7policy.redirect_pool and + l7policy.redirect_pool.provisioning_status != + constants.PENDING_DELETE): + kwargs = {} + if tls_certs is not None and tls_certs.get( + l7policy.redirect_pool.id): + kwargs = {'pool_tls_certs': + tls_certs.get(l7policy.redirect_pool.id)} + ret_value['redirect_pool'] = self._transform_pool( + l7policy.redirect_pool, feature_compatibility, + listener_tls_enabled, **kwargs) + else: + ret_value['redirect_pool'] = None + if (l7policy.action in [constants.L7POLICY_ACTION_REDIRECT_TO_URL, + constants.L7POLICY_ACTION_REDIRECT_PREFIX] and + l7policy.redirect_http_code): + ret_value['redirect_http_code'] = l7policy.redirect_http_code + else: + ret_value['redirect_http_code'] = None + rule_gen = (rule for rule in l7policy.l7rules if rule.enabled and + rule.provisioning_status != constants.PENDING_DELETE) + l7rules = [self._transform_l7rule(x, feature_compatibility) + for x in rule_gen] + ret_value['l7rules'] = l7rules + return ret_value + + def _transform_l7rule(self, l7rule, feature_compatibility): + """Transforms an L7 rule into an object that will + + be processed by the templating system + """ + return { + 'id': l7rule.id, + 'type': l7rule.type, + 'compare_type': l7rule.compare_type, + 'key': l7rule.key, + 'value': self._escape_haproxy_config_string(l7rule.value), + 'invert': l7rule.invert, + 'enabled': l7rule.enabled + } + + @staticmethod + def _escape_haproxy_config_string(value): + """Escapes certain characters in a given string such that + + haproxy will parse the string as a single value + """ + # Escape backslashes first + value = re.sub(r'\\', r'\\\\', value) + # Spaces next + value = re.sub(' ', '\\ ', value) + return value diff --git a/octavia/common/jinja/haproxy/combined_listeners/templates/base.j2 b/octavia/common/jinja/haproxy/combined_listeners/templates/base.j2 new file mode 100644 index 0000000000..e5090e1e22 --- /dev/null +++ b/octavia/common/jinja/haproxy/combined_listeners/templates/base.j2 @@ -0,0 +1,67 @@ +{# Copyright (c) 2015 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +#} +# Configuration for loadbalancer {{ loadbalancer_id }} +global + daemon + user nobody + log {{ log_http | default('/run/rsyslog/octavia/log', true)}} local{{ user_log_facility }} + log {{ log_server | default('/run/rsyslog/octavia/log', true)}} local{{ administrative_log_facility }} notice + stats socket {{ sock_path }} mode 0666 level user + {% if state_file %} + server-state-file {{ state_file }} + {% endif %} + {% if loadbalancer.global_connection_limit is defined %} + maxconn {{ loadbalancer.global_connection_limit }} + {% endif %} + {% if ssl_cache is defined %} + tune.ssl.cachesize {{ ssl_cache }} + {% endif %} + {%- if cpu_count is defined and cpu_count > 1 %} + nbthread {{ cpu_count - 1 }} + cpu-map auto:1/1-{{ cpu_count - 1 }} 1-{{ cpu_count - 1 }} + {%- endif %} + {% set found_ns = namespace(found=false) %} + {% for listener in loadbalancer.listeners if listener.enabled %} + {% for pool in listener.pools if pool.enabled %} + {% if pool.health_monitor and pool.health_monitor.enabled and + pool.health_monitor.type == constants.HEALTH_MONITOR_PING and + found_ns.found == false %} + {% set found_ns.found = true %} + external-check + {% if require_insecure_fork %} + insecure-fork-wanted + {% endif %} + {% endif %} + {% endfor %} + {% endfor %} + +defaults + {% if connection_logging %} + log global + {% else %} + no log + {% endif %} + retries 3 + option redispatch + option splice-request + option splice-response + option http-keep-alive + +{% block peers %}{% endblock peers %} + +{% block prometheus %}{% endblock prometheus %} + +{% block proxies %}{% endblock proxies %} diff --git a/octavia/common/jinja/haproxy/combined_listeners/templates/haproxy.cfg.j2 b/octavia/common/jinja/haproxy/combined_listeners/templates/haproxy.cfg.j2 new file mode 100644 index 0000000000..31ec07818c --- /dev/null +++ b/octavia/common/jinja/haproxy/combined_listeners/templates/haproxy.cfg.j2 @@ -0,0 +1,61 @@ +{# Copyright (c) 2015 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +#} +{% extends 'base.j2' %} + + +{% from 'macros.j2' import frontend_macro, backend_macro %} +{% from 'macros.j2' import peers_macro %} + + +{% set loadbalancer_id = loadbalancer.id %} +{% set sock_path = stats_sock %} + + +{% block peers %} +{{ peers_macro(constants, loadbalancer) }} +{% endblock peers %} + +{% block prometheus %} +{% if enable_prometheus %} +frontend prometheus-exporter-internal-endpoint + bind 127.0.0.1:9101 + mode http + no log + http-request use-service prometheus-exporter if { path /metrics } + http-request reject + timeout http-request 5s + timeout client 5s +backend prometheus-exporter-internal + mode http + no log + balance first + timeout connect 5s + timeout server 5s + server prometheus-internal 127.0.0.1:9102 +{% endif %} +{% endblock prometheus %} + +{% block proxies %} + {% if loadbalancer.enabled %} + {% for listener in loadbalancer.listeners if listener.enabled %} + {{- frontend_macro(constants, lib_consts, listener, loadbalancer.vip_address, + loadbalancer.additional_vips) }} + {% for pool in listener.pools if pool.enabled %} + {{- backend_macro(constants, lib_consts, listener, pool, loadbalancer, state_file) }} + {% endfor %} + {% endfor %} + {% endif %} +{% endblock proxies %} diff --git a/octavia/common/jinja/haproxy/combined_listeners/templates/macros.j2 b/octavia/common/jinja/haproxy/combined_listeners/templates/macros.j2 new file mode 100644 index 0000000000..600085967a --- /dev/null +++ b/octavia/common/jinja/haproxy/combined_listeners/templates/macros.j2 @@ -0,0 +1,450 @@ +{# Copyright (c) 2015 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +#} +{% macro peers_macro(constants, loadbalancer) %} + {% if loadbalancer.topology == constants.TOPOLOGY_ACTIVE_STANDBY %} +peers {{ "%s_peers"|format(loadbalancer.id.replace("-", ""))|trim() }} + {% for amp in loadbalancer.amphorae if ( + amp.status == constants.AMPHORA_ALLOCATED) %} + {# HAProxy has peer name limitations, thus the hash filter #} + peer {{ amp.id|hash_amp_id|replace('=', '') }} {{ + amp.vrrp_ip }}:{{ constants.HAPROXY_BASE_PEER_PORT }} + {% endfor %} + {% endif %} +{% endmacro %} + + +{% macro bind_macro(constants, lib_consts, listener, lb_vip_address) %} + {% if listener.crt_list_filename is defined %} + {% set def_crt_opt = ("ssl crt-list %s"|format( + listener.crt_list_filename)|trim()) %} + {% else %} + {% set def_crt_opt = "" %} + {% endif %} + {% if listener.client_ca_tls_path and listener.client_auth %} + {% set client_ca_opt = "ca-file %s verify %s"|format(listener.client_ca_tls_path, listener.client_auth)|trim() %} + {% else %} + {% set client_ca_opt = "" %} + {% endif %} + {% if listener.client_crl_path and listener.client_ca_tls_path %} + {% set ca_crl_opt = "crl-file %s"|format(listener.client_crl_path)|trim() %} + {% else %} + {% set ca_crl_opt = "" %} + {% endif %} + {% if listener.tls_ciphers is defined %} + {% set ciphers_opt = "ciphers %s"|format(listener.tls_ciphers)|trim() %} + {% else %} + {% set ciphers_opt = "" %} + {% endif %} + {% set tls_versions_opt = "" %} + {% if listener.tls_versions is defined %} + {% if lib_consts.SSL_VERSION_3 not in listener.tls_versions %} + {% set tls_versions_opt = tls_versions_opt + " no-sslv3" %} + {% endif %} + {% if lib_consts.TLS_VERSION_1 not in listener.tls_versions %} + {% set tls_versions_opt = tls_versions_opt + " no-tlsv10" %} + {% endif %} + {% if lib_consts.TLS_VERSION_1_1 not in listener.tls_versions %} + {% set tls_versions_opt = tls_versions_opt + " no-tlsv11" %} + {% endif %} + {% if lib_consts.TLS_VERSION_1_2 not in listener.tls_versions %} + {% set tls_versions_opt = tls_versions_opt + " no-tlsv12" %} + {% endif %} + {% if lib_consts.TLS_VERSION_1_3 not in listener.tls_versions %} + {% set tls_versions_opt = tls_versions_opt + " no-tlsv13" %} + {% endif %} + {% endif %} + {% if listener.alpn_protocols is defined %} + {% set alpn_opt = "alpn %s"|format(listener.alpn_protocols)|trim() %} + {% else %} + {% set alpn_opt = "" %} + {% endif %} + +bind {{ lb_vip_address }}:{{ listener.protocol_port }} {{ +"%s %s %s %s%s %s"|format(def_crt_opt, client_ca_opt, ca_crl_opt, ciphers_opt, tls_versions_opt, alpn_opt)|trim() }} +{% endmacro %} + + +{% macro l7rule_compare_type_macro(constants, ctype, rtype=None) %} + {% if ctype == constants.L7RULE_COMPARE_TYPE_REGEX %} + {{- "-m reg" -}} + {% elif ctype == constants.L7RULE_COMPARE_TYPE_STARTS_WITH %} + {{- "-m beg" -}} + {% elif ctype == constants.L7RULE_COMPARE_TYPE_ENDS_WITH %} + {{- "-m end" -}} + {% elif ctype == constants.L7RULE_COMPARE_TYPE_CONTAINS %} + {{- "-m sub" -}} + {% elif ctype == constants.L7RULE_COMPARE_TYPE_EQUAL_TO %} + {# Specific handling for FILE_TYPE with EQUAL_TO, "path_end -m str" + # doesn't work with haproxy, "path_end" is enough for this type of + # comparison + # https://github.com/haproxy/haproxy/issues/2567 + #} + {% if rtype != constants.L7RULE_TYPE_FILE_TYPE %} + {{- "-m str" -}} + {% endif %} + {% endif %} +{% endmacro %} + + +{% macro l7rule_macro(constants, l7rule) %} + {% if l7rule.type == constants.L7RULE_TYPE_HOST_NAME %} + acl {{ l7rule.id }} req.hdr(host) -i {{ l7rule_compare_type_macro( + constants, l7rule.compare_type) }} {{ l7rule.value }} + {% elif l7rule.type == constants.L7RULE_TYPE_PATH %} + acl {{ l7rule.id }} path {{ l7rule_compare_type_macro( + constants, l7rule.compare_type) }} {{ l7rule.value }} + {% elif l7rule.type == constants.L7RULE_TYPE_FILE_TYPE %} + acl {{ l7rule.id }} path_end {{ l7rule_compare_type_macro( + constants, l7rule.compare_type, l7rule.type) }} {{ l7rule.value }} + {% elif l7rule.type == constants.L7RULE_TYPE_HEADER %} + acl {{ l7rule.id }} req.hdr({{ l7rule.key }}) {{ + l7rule_compare_type_macro( + constants, l7rule.compare_type) }} {{ l7rule.value }} + {% elif l7rule.type == constants.L7RULE_TYPE_COOKIE %} + acl {{ l7rule.id }} req.cook({{ l7rule.key }}) {{ + l7rule_compare_type_macro( + constants, l7rule.compare_type) }} {{ l7rule.value }} + {% elif l7rule.type == constants.L7RULE_TYPE_SSL_CONN_HAS_CERT %} + acl {{ l7rule.id }} ssl_c_used + {% elif l7rule.type == constants.L7RULE_TYPE_SSL_VERIFY_RESULT %} + acl {{ l7rule.id }} ssl_c_verify eq {{ l7rule.value }} + {% elif l7rule.type == constants.L7RULE_TYPE_SSL_DN_FIELD %} + acl {{ l7rule.id }} ssl_c_s_dn({{ l7rule.key }}) {{ + l7rule_compare_type_macro( + constants, l7rule.compare_type) }} {{ l7rule.value }} + {% endif %} +{% endmacro %} + + +{% macro l7rule_invert_macro(invert) %} + {% if invert %} + {{- "!" -}} + {% endif %} +{% endmacro %} + + +{% macro l7rule_list_macro(l7policy) %} + {% for l7rule in l7policy.l7rules %} + {{- " " -}}{{- l7rule_invert_macro(l7rule.invert) -}}{{- l7rule.id -}} + {% endfor %} +{% endmacro %} + + +{% macro l7policy_macro(constants, l7policy, listener) %} + {% for l7rule in l7policy.l7rules %} + {{- l7rule_macro(constants, l7rule) -}} + {% endfor %} + {% if l7policy.redirect_http_code %} + {% set redirect_http_code_opt = " code %s"|format( + l7policy.redirect_http_code) %} + {% else %} + {% set redirect_http_code_opt = "" %} + {% endif %} + {% if l7policy.action == constants.L7POLICY_ACTION_REJECT %} + http-request deny if{{ l7rule_list_macro(l7policy) }} + {% elif l7policy.action == constants.L7POLICY_ACTION_REDIRECT_TO_URL %} + redirect {{- redirect_http_code_opt }} location {{ l7policy.redirect_url }} if{{ l7rule_list_macro(l7policy) }} + {% elif l7policy.action == constants.L7POLICY_ACTION_REDIRECT_TO_POOL and l7policy.redirect_pool.enabled %} + use_backend {{ l7policy.redirect_pool.id }}:{{ listener.id }} if{{ l7rule_list_macro(l7policy) }} + {% elif l7policy.action == constants.L7POLICY_ACTION_REDIRECT_PREFIX %} + redirect {{- redirect_http_code_opt }} prefix {{ l7policy.redirect_prefix }} if{{ l7rule_list_macro(l7policy) }} + {% endif %} +{% endmacro %} + + +{% macro frontend_macro(constants, lib_consts, listener, lb_vip_address, additional_vips) %} +frontend {{ listener.id }} + {% if listener.connection_limit is defined %} + maxconn {{ listener.connection_limit }} + {% endif %} + {% if (listener.protocol.lower() == + constants.PROTOCOL_TERMINATED_HTTPS.lower()) %} + redirect scheme https if !{ ssl_fc } + {% if listener.hsts_directives is defined %} + http-response set-header Strict-Transport-Security "{{ listener.hsts_directives }}" + {% endif %} + {% endif %} + {{ bind_macro(constants, lib_consts, listener, lb_vip_address)|trim() }} + {% for add_vip in additional_vips %} + {{ bind_macro(constants, lib_consts, listener, add_vip)|trim() }} + {% endfor %} + mode {{ listener.protocol_mode }} + {% for l7policy in listener.l7policies if (l7policy.enabled and + l7policy.l7rules|length > 0) %} + {{- l7policy_macro(constants, l7policy, listener) -}} + {% endfor %} + {% if listener.default_pool and listener.default_pool.enabled %} + default_backend {{ listener.default_pool.id }}:{{ listener.id }} + {% endif %} + timeout client {{ listener.timeout_client_data }} + {% if listener.timeout_tcp_inspect %} + tcp-request inspect-delay {{ listener.timeout_tcp_inspect }} + {% endif %} + {% if listener.protocol == lib_consts.PROTOCOL_PROMETHEUS and + listener.get(lib_consts.PROTOCOL_PROMETHEUS, False) %} + default_backend prometheus-exporter-internal + {% endif %} + {% if listener.user_log_format is defined %} + log-format {{ listener.user_log_format }} + {% endif %} +{% endmacro %} + + +{% macro member_macro(constants, lib_consts, pool, member) %} + {% if pool.health_monitor and pool.health_monitor.enabled %} + {% if member.monitor_address %} + {% set monitor_addr_opt = " addr %s"|format(member.monitor_address) %} + {% else %} + {% set monitor_addr_opt = "" %} + {% endif %} + {% if member.monitor_port %} + {% set monitor_port_opt = " port %s"|format(member.monitor_port) %} + {% else %} + {% set monitor_port_opt = "" %} + {% endif %} + {% if pool.alpn_protocols is defined %} + {% set alpn_opt = " check-alpn %s"|format(pool.alpn_protocols) %} + {% else %} + {% set alpn_opt = "" %} + {% endif %} + {% if pool.health_monitor.type == constants.HEALTH_MONITOR_HTTPS %} + {% set monitor_ssl_opt = " check-ssl verify none" %} + {% else %} + {% set monitor_ssl_opt = "" %} + {% endif %} + {% set hm_opt = " check%s%s inter %ds fall %d rise %d%s%s"|format( + monitor_ssl_opt, alpn_opt, pool.health_monitor.delay, + pool.health_monitor.fall_threshold, + pool.health_monitor.rise_threshold, monitor_addr_opt, + monitor_port_opt) %} + {% else %} + {% set hm_opt = "" %} + {% endif %} + {% if (pool.session_persistence.type == + constants.SESSION_PERSISTENCE_HTTP_COOKIE) %} + {% set persistence_opt = " cookie %s"|format(member.id) %} + {% else %} + {% set persistence_opt = "" %} + {% endif %} + {% if pool.proxy_protocol == 1 %} + {% set proxy_protocol_opt = " send-proxy" %} + {% elif pool.proxy_protocol == 2 %} + {% if pool.listener_tls_enabled %} + {% set proxy_protocol_opt = " send-proxy-v2-ssl-cn" %} + {% else %} + {% set proxy_protocol_opt = " send-proxy-v2" %} + {% endif %} + {% else %} + {% set proxy_protocol_opt = "" %} + {% endif %} + {% if member.backup %} + {% set member_backup_opt = " backup" %} + {% else %} + {% set member_backup_opt = "" %} + {% endif %} + {% if member.enabled %} + {% set member_enabled_opt = "" %} + {% else %} + {% set member_enabled_opt = " disabled" %} + {% endif %} + {% if pool.tls_enabled %} + {% set def_opt_prefix = " ssl" %} + {% set def_sni_opt = " sni ssl_fc_sni" %} + {% else %} + {% set def_opt_prefix = "" %} + {% set def_sni_opt = "" %} + {% endif %} + {% if pool.client_cert and pool.tls_enabled %} + {% set def_crt_opt = " crt %s"|format(pool.client_cert) %} + {% else %} + {% set def_crt_opt = "" %} + {% endif %} + {% if pool.ca_cert and pool.tls_enabled %} + {% set ca_opt = " ca-file %s"|format(pool.ca_cert) %} + {% set def_verify_opt = " verify required" %} + {% if pool.crl %} + {% set crl_opt = " crl-file %s"|format(pool.crl) %} + {% else %} + {% set def_verify_opt = "" %} + {% endif %} + {% elif pool.tls_enabled %} + {% set def_verify_opt = " verify none" %} + {% endif %} + {% if pool.tls_ciphers is defined %} + {% set ciphers_opt = " ciphers %s"|format(pool.tls_ciphers) %} + {% else %} + {% set ciphers_opt = "" %} + {% endif %} + {% set tls_versions_opt = "" %} + {% if pool.tls_versions is defined %} + {% if lib_consts.SSL_VERSION_3 not in pool.tls_versions %} + {% set tls_versions_opt = tls_versions_opt + " no-sslv3" %} + {% endif %} + {% if lib_consts.TLS_VERSION_1 not in pool.tls_versions %} + {% set tls_versions_opt = tls_versions_opt + " no-tlsv10" %} + {% endif %} + {% if lib_consts.TLS_VERSION_1_1 not in pool.tls_versions %} + {% set tls_versions_opt = tls_versions_opt + " no-tlsv11" %} + {% endif %} + {% if lib_consts.TLS_VERSION_1_2 not in pool.tls_versions %} + {% set tls_versions_opt = tls_versions_opt + " no-tlsv12" %} + {% endif %} + {% if lib_consts.TLS_VERSION_1_3 not in pool.tls_versions %} + {% set tls_versions_opt = tls_versions_opt + " no-tlsv13" %} + {% endif %} + {% endif %} + {% if pool.alpn_protocols is defined %} + {% set alpn_opt = " alpn %s"|format(pool.alpn_protocols) %} + {% else %} + {% set alpn_opt = "" %} + {% endif %} + {{ "server %s %s:%d weight %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"|e|format( + member.id, member.address, member.protocol_port, member.weight, + hm_opt, persistence_opt, proxy_protocol_opt, member_backup_opt, + member_enabled_opt, def_opt_prefix, def_crt_opt, ca_opt, crl_opt, + def_verify_opt, def_sni_opt, ciphers_opt, tls_versions_opt, + alpn_opt)|trim() }} +{% endmacro %} + + +{% macro backend_macro(constants, lib_consts, listener, pool, loadbalancer, state_file) %} +backend {{ pool.id }}:{{ listener.id }} + {% if pool.proxy_protocol is not none %} + mode {{ listener.protocol_mode }} + {% else %} + mode {{ pool.protocol }} + {% endif %} + {% if pool.get(constants.HTTP_REUSE, False) and ( + pool.protocol.lower() == constants.PROTOCOL_HTTP.lower() or + (pool.protocol.lower() == constants.PROTOCOL_PROXY.lower() and + listener.protocol_mode.lower() == + constants.PROTOCOL_HTTP.lower()))%} + http-reuse safe + {% endif %} + balance {{ pool.lb_algorithm }} + {% if pool.session_persistence %} + {% if (pool.session_persistence.type == + constants.SESSION_PERSISTENCE_SOURCE_IP) %} + {% if loadbalancer.topology == constants.TOPOLOGY_ACTIVE_STANDBY %} + stick-table type ipv6 size {{ pool.stick_size }} peers {{ + "%s_peers"|format(loadbalancer.id.replace("-", ""))|trim() }} + {% else %} + stick-table type ipv6 size {{ pool.stick_size }} + {% endif %} + stick on src + {% elif (pool.session_persistence.type == + constants.SESSION_PERSISTENCE_APP_COOKIE) %} + {% if loadbalancer.topology == constants.TOPOLOGY_ACTIVE_STANDBY %} + stick-table type string len 64 size {{ + pool.stick_size }} peers {{ + "%s_peers"|format(loadbalancer.id.replace("-", ""))|trim() }} + {% else %} + stick-table type string len 64 size {{ pool.stick_size }} + {% endif %} + stick store-response res.cook({{ pool.session_persistence.cookie_name }}) + stick match req.cook({{ pool.session_persistence.cookie_name }}) + {% elif (pool.session_persistence.type == + constants.SESSION_PERSISTENCE_HTTP_COOKIE) %} + cookie SRV insert indirect nocache + {% endif %} + {% endif %} + {% if pool.health_monitor and pool.health_monitor.enabled %} + {% if state_file %} + load-server-state-from-file global + {% endif %} + timeout check {{ pool.health_monitor.timeout }}s + {% if (pool.health_monitor.type == + constants.HEALTH_MONITOR_HTTP or pool.health_monitor.type == + constants.HEALTH_MONITOR_HTTPS) %} + {% if pool.health_monitor.http_version %} + option httpchk {{ pool.health_monitor.http_method }} {{ pool.health_monitor.url_path }} HTTP/{{ pool.health_monitor.http_version }} + {% if (pool.health_monitor.http_version == 1.1 and + pool.health_monitor.domain_name) %} + http-check send hdr Host {{ pool.health_monitor.domain_name }} + {% endif %} + {% else %} + option httpchk {{ pool.health_monitor.http_method }} {{ pool.health_monitor.url_path }} + {% endif %} + http-check expect rstatus {{ pool.health_monitor.expected_codes }} + {% endif %} + {% if pool.health_monitor.type == constants.HEALTH_MONITOR_PING %} + option external-check + external-check command /var/lib/octavia/ping-wrapper.sh + {% endif %} + {% endif %} + {% if pool.protocol.lower() == constants.PROTOCOL_HTTP.lower() %} + {% if listener.insert_headers.get('X-Forwarded-For', + 'False').lower() == 'true' %} + option forwardfor + {% endif %} + {% if listener.insert_headers.get('X-Forwarded-Port', + 'False').lower() == 'true' %} + http-request set-header X-Forwarded-Port %[dst_port] + {% endif %} + {% endif %} + {% if listener.insert_headers.get('X-Forwarded-Proto', + 'False').lower() == 'true' %} + {% if listener.protocol.lower() == constants.PROTOCOL_HTTP.lower() %} + http-request set-header X-Forwarded-Proto http + {% elif listener.protocol.lower() == + constants.PROTOCOL_TERMINATED_HTTPS.lower() %} + http-request set-header X-Forwarded-Proto https + {% endif %} + {% endif %} + {% if listener.protocol.lower() == constants.PROTOCOL_TERMINATED_HTTPS.lower() %} + {% if listener.insert_headers.get('X-SSL-Client-Verify', + 'False').lower() == 'true' %} + http-request set-header X-SSL-Client-Verify %[ssl_c_verify] + {% endif %} + {% if listener.insert_headers.get('X-SSL-Client-Has-Cert', + 'False').lower() == 'true' %} + http-request set-header X-SSL-Client-Has-Cert %[ssl_c_used] + {% endif %} + {% if listener.insert_headers.get('X-SSL-Client-DN', + 'False').lower() == 'true' %} + http-request set-header X-SSL-Client-DN %{+Q}[ssl_c_s_dn] + {% endif %} + {% if listener.insert_headers.get('X-SSL-Client-CN', + 'False').lower() == 'true' %} + http-request set-header X-SSL-Client-CN %{+Q}[ssl_c_s_dn(cn)] + {% endif %} + {% if listener.insert_headers.get('X-SSL-Issuer', + 'False').lower() == 'true' %} + http-request set-header X-SSL-Issuer %{+Q}[ssl_c_i_dn] + {% endif %} + {% if listener.insert_headers.get('X-SSL-Client-SHA1', + 'False').lower() == 'true' %} + http-request set-header X-SSL-Client-SHA1 %{+Q}[ssl_c_sha1,hex] + {% endif %} + {% if listener.insert_headers.get('X-SSL-Client-Not-Before', + 'False').lower() == 'true' %} + http-request set-header X-SSL-Client-Not-Before %{+Q}[ssl_c_notbefore] + {% endif %} + {% if listener.insert_headers.get('X-SSL-Client-Not-After', + 'False').lower() == 'true' %} + http-request set-header X-SSL-Client-Not-After %{+Q}[ssl_c_notafter] + {% endif %} + {% endif %} + {% if listener.connection_limit is defined %} + fullconn {{ listener.connection_limit }} + {% endif %} + option allbackups + timeout connect {{ listener.timeout_member_connect }} + timeout server {{ listener.timeout_member_data }} + {% for member in pool.members %} + {{- member_macro(constants, lib_consts, pool, member) -}} + {% endfor %} +{% endmacro %} diff --git a/octavia/common/jinja/logging/__init__.py b/octavia/common/jinja/logging/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/octavia/common/jinja/logging/logging_jinja_cfg.py b/octavia/common/jinja/logging/logging_jinja_cfg.py new file mode 100644 index 0000000000..dabab6600b --- /dev/null +++ b/octavia/common/jinja/logging/logging_jinja_cfg.py @@ -0,0 +1,66 @@ +# Copyright 2018 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +import jinja2 + +from octavia.common.config import cfg +from octavia.common import constants + +CONF = cfg.CONF + +TEMPLATES_DIR = (os.path.dirname(os.path.realpath(__file__)) + + constants.LOGGING_TEMPLATES + '/') + + +class LoggingJinjaTemplater: + + def __init__(self, logging_templates=None): + self.logging_templates = logging_templates or TEMPLATES_DIR + template_loader = jinja2.FileSystemLoader(searchpath=os.path.dirname( + self.logging_templates)) + jinja_env = jinja2.Environment(loader=template_loader, autoescape=True) + self.logging_template = jinja_env.get_template( + constants.LOGGING_CONF_TEMPLATE) + + def build_logging_config(self): + admin_log_hosts = [] + for server in CONF.amphora_agent.admin_log_targets or []: + (host, port) = server.rsplit(':', 1) + admin_log_hosts.append({ + 'host': host, + 'port': port, + }) + tenant_log_hosts = [] + for server in CONF.amphora_agent.tenant_log_targets or []: + (host, port) = server.rsplit(':', 1) + tenant_log_hosts.append({ + 'host': host, + 'port': port, + }) + return self.logging_template.render( + {'admin_log_hosts': admin_log_hosts, + 'tenant_log_hosts': tenant_log_hosts, + 'protocol': CONF.amphora_agent.log_protocol, + 'retry_count': CONF.amphora_agent.log_retry_count, + 'retry_interval': CONF.amphora_agent.log_retry_interval, + 'queue_size': CONF.amphora_agent.log_queue_size, + 'forward_all_logs': CONF.amphora_agent.forward_all_logs, + 'disable_local_log_storage': + CONF.amphora_agent.disable_local_log_storage, + 'admin_log_facility': + CONF.amphora_agent.administrative_log_facility, + 'user_log_facility': CONF.amphora_agent.user_log_facility, + }) diff --git a/octavia/common/jinja/logging/templates/10-rsyslog.conf.template b/octavia/common/jinja/logging/templates/10-rsyslog.conf.template new file mode 100644 index 0000000000..3e35b42b2a --- /dev/null +++ b/octavia/common/jinja/logging/templates/10-rsyslog.conf.template @@ -0,0 +1,56 @@ +{# Copyright 2018 Rackspace, US Inc. +# Copyright 2019 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +#} +{#- Tenant traffic logs -#} +{%- if tenant_log_hosts -%} +ruleset(name="tenant_forwarding" queue.type="linkedList" queue.size="{{ queue_size }}") { +{%- for host in tenant_log_hosts %} + action(type="omfwd" + target="{{ host['host'] }}" + port="{{ host['port'] }}" + protocol="{{ protocol }}" + action.resumeRetryCount="{{ retry_count }}" + action.resumeInterval="{{ retry_interval }}" + {% if not loop.first %}action.execOnlyWhenPreviousIsSuspended="on"{%- endif -%} + ) +{%- endfor %} +} +local{{ user_log_facility }}.=info call tenant_forwarding +{% endif %} + +{#- Administrative logs -#} +{% if admin_log_hosts %} +ruleset(name="admin_forwarding" queue.type="linkedList" queue.size="{{ queue_size }}") { +{%- for host in admin_log_hosts %} + action(type="omfwd" + target="{{ host['host'] }}" + port="{{ host['port'] }}" + protocol="{{ protocol }}" + action.resumeRetryCount="{{ retry_count }}" + action.resumeInterval="{{ retry_interval }}" + {% if not loop.first %}action.execOnlyWhenPreviousIsSuspended="on"{%- endif -%} + ) +{%- endfor %} +} +{%- if forward_all_logs %} +*.*;local{{ user_log_facility }}.none call admin_forwarding +{% else %} +local{{ admin_log_facility }}.* call admin_forwarding +{%- endif -%} +{% endif %} + +{%- if disable_local_log_storage -%} +*.* stop +{%- endif -%} diff --git a/octavia/common/jinja/lvs/__init__.py b/octavia/common/jinja/lvs/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/octavia/common/jinja/lvs/jinja_cfg.py b/octavia/common/jinja/lvs/jinja_cfg.py new file mode 100644 index 0000000000..e818af86a9 --- /dev/null +++ b/octavia/common/jinja/lvs/jinja_cfg.py @@ -0,0 +1,239 @@ +# Copyright (c) 2018 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +import jinja2 +from octavia_lib.common import constants as lib_consts + +from octavia.common.config import cfg +from octavia.common import constants +from octavia.common import utils as octavia_utils +from octavia.db import models + + +CONF = cfg.CONF + +PROTOCOL_MAP = { + constants.PROTOCOL_UDP: 'udp', + lib_consts.PROTOCOL_SCTP: 'sctp' +} + +BALANCE_MAP = { + constants.LB_ALGORITHM_ROUND_ROBIN: 'wrr', + constants.LB_ALGORITHM_LEAST_CONNECTIONS: 'lc', + constants.LB_ALGORITHM_SOURCE_IP: 'sh' +} + +BASE_PATH = CONF.haproxy_amphora.base_path + +CHECK_SCRIPT_NAME = 'udp_check.sh' + +KEEPALIVED_LVS_TEMPLATE = os.path.abspath( + os.path.join(os.path.dirname(__file__), + 'templates/keepalivedlvs.cfg.j2')) + +JINJA_ENV = None + + +class LvsJinjaTemplater: + + def __init__(self, base_amp_path=None, keepalivedlvs_template=None): + """Keepalived LVS configuration generation + + :param base_amp_path: Base path for amphora data + :param keepalivedlvs_template: Absolute path to Jinja template + """ + + self.base_amp_path = base_amp_path or BASE_PATH + self.keepalivedlvs_template = (keepalivedlvs_template or + KEEPALIVED_LVS_TEMPLATE) + + def build_config(self, listener: models.Listener, **kwargs): + """Convert a logical configuration to the Keepalived LVS version + + :param listener: The listener configuration + :return: Rendered configuration + """ + return self.render_loadbalancer_obj(listener) + + def _get_template(self): + """Returns the specified Jinja configuration template.""" + global JINJA_ENV + if not JINJA_ENV: + template_loader = jinja2.FileSystemLoader( + searchpath=os.path.dirname(self.keepalivedlvs_template)) + JINJA_ENV = jinja2.Environment( + autoescape=True, + loader=template_loader, + trim_blocks=True, + lstrip_blocks=True, + extensions=['jinja2.ext.do']) + return JINJA_ENV.get_template(os.path.basename( + self.keepalivedlvs_template)) + + def render_loadbalancer_obj(self, listener, **kwargs): + """Renders a templated configuration from a load balancer object + + :param host_amphora: The Amphora this configuration is hosted on + :param listener: The listener configuration + :return: Rendered configuration + """ + loadbalancer = self._transform_loadbalancer( + listener.load_balancer, + listener) + return self._get_template().render( + {'loadbalancer': loadbalancer}, + constants=constants, + lib_consts=lib_consts) + + def _transform_loadbalancer(self, loadbalancer: models.LoadBalancer, + listener: models.Listener): + """Transforms a load balancer into an object that will + + be processed by the templating system + """ + t_listener = self._transform_listener(listener) + vips = [ + { + 'ip_address': vip.ip_address, + 'ip_version': octavia_utils.ip_version( + vip.ip_address), + } + for vip in [loadbalancer.vip] + loadbalancer.additional_vips + ] + ret_value = { + 'id': loadbalancer.id, + 'vips': vips, + 'listener': t_listener, + 'enabled': loadbalancer.enabled, + } + return ret_value + + def _transform_listener(self, listener): + """Transforms a listener into an object that will + + be processed by the templating system + """ + ret_value = { + 'id': listener.id, + 'protocol_port': listener.protocol_port, + 'protocol_mode': PROTOCOL_MAP[listener.protocol], + 'enabled': listener.enabled + } + if listener.connection_limit and listener.connection_limit > -1: + ret_value['connection_limit'] = listener.connection_limit + if (listener.default_pool and + listener.default_pool.provisioning_status != + constants.PENDING_DELETE): + ret_value['default_pool'] = self._transform_pool( + listener.default_pool) + return ret_value + + def _transform_pool(self, pool): + """Transforms a pool into an object that will + + be processed by the templating system + """ + ret_value = { + 'id': pool.id, + 'protocol': PROTOCOL_MAP[pool.protocol], + 'lb_algorithm': (BALANCE_MAP.get(pool.lb_algorithm) or + BALANCE_MAP[constants.LB_ALGORITHM_ROUND_ROBIN]), + 'members': [], + 'health_monitor': '', + 'session_persistence': '', + 'enabled': pool.enabled + } + members_gen = (mem for mem in pool.members if + mem.provisioning_status != constants.PENDING_DELETE) + members = [self._transform_member(x) for x in members_gen] + ret_value['members'] = members + if (pool.health_monitor and + pool.health_monitor.provisioning_status != + constants.PENDING_DELETE): + ret_value['health_monitor'] = self._transform_health_monitor( + pool.health_monitor) + if pool.session_persistence: + func = self._transform_session_persistence + ret_value['session_persistence'] = func( + pool.session_persistence) + return ret_value + + @staticmethod + def _transform_session_persistence(persistence): + """Transforms session persistence into an object that will + + be processed by the templating system + """ + return { + 'type': persistence.type, + 'persistence_timeout': persistence.persistence_timeout, + 'persistence_granularity': persistence.persistence_granularity + } + + @staticmethod + def _transform_member(member): + """Transforms a member into an object that will + + be processed by the templating system + """ + return { + 'id': member.id, + 'address': member.ip_address, + 'ip_version': octavia_utils.ip_version( + member.ip_address), + 'protocol_port': member.protocol_port, + 'weight': member.weight, + 'enabled': member.enabled, + 'monitor_address': member.monitor_address, + 'monitor_port': member.monitor_port + } + + def _get_default_lvs_check_script_path(self, monitor_type): + if monitor_type == constants.HEALTH_MONITOR_UDP_CONNECT: + return (CONF.haproxy_amphora.base_path + + '/lvs/check/' + CHECK_SCRIPT_NAME) + if monitor_type == lib_consts.HEALTH_MONITOR_SCTP: + return "amphora-health-checker sctp" + return None + + def _transform_health_monitor(self, monitor): + """Transforms a health monitor into an object that will + + be processed by the templating system + """ + return_val = { + 'id': monitor.id, + 'type': monitor.type, + 'delay': monitor.delay, + 'timeout': monitor.timeout, + 'enabled': monitor.enabled, + 'fall_threshold': monitor.fall_threshold, + 'check_script_path': ( + self._get_default_lvs_check_script_path(monitor.type)) + } + if monitor.type == constants.HEALTH_MONITOR_HTTP: + return_val.update({ + 'rise_threshold': monitor.rise_threshold, + 'url_path': monitor.url_path, + 'http_method': (monitor.http_method + if monitor.http_method == + constants.HEALTH_MONITOR_HTTP_METHOD_GET else + None), + 'expected_codes': (sorted(list( + octavia_utils.expand_expected_codes( + monitor.expected_codes))) + if monitor.expected_codes else [])}) + return return_val diff --git a/octavia/common/jinja/lvs/templates/base.j2 b/octavia/common/jinja/lvs/templates/base.j2 new file mode 100644 index 0000000000..b3a5d71773 --- /dev/null +++ b/octavia/common/jinja/lvs/templates/base.j2 @@ -0,0 +1,25 @@ +{# Copyright (c) 2018 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +#} +# Configuration for Loadbalancer {{ loadbalancer.id }} +{% if loadbalancer.listener.enabled %} +# Configuration for Listener {{ udp_listener_id }} +{% else %} +# Listener {{ udp_listener_id }} is disabled +{% endif %} + +{% block global_definitions %}{% endblock global_definitions %} + +{% block proxies %}{% endblock proxies %} diff --git a/octavia/common/jinja/lvs/templates/keepalivedlvs.cfg.j2 b/octavia/common/jinja/lvs/templates/keepalivedlvs.cfg.j2 new file mode 100644 index 0000000000..8371def549 --- /dev/null +++ b/octavia/common/jinja/lvs/templates/keepalivedlvs.cfg.j2 @@ -0,0 +1,29 @@ +{# Copyright (c) 2018 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +#} +{% extends 'base.j2' %} +{% from 'macros.j2' import virtualserver_macro %} +{% set udp_listener_id = loadbalancer.listener.id %} +{% block global_definitions %} +net_namespace {{ constants.AMPHORA_NAMESPACE }} +{% endblock global_definitions %} +{% block proxies %} + {% if loadbalancer.enabled and loadbalancer.listener.enabled %} + {{- virtualserver_macro(constants, lib_consts, + loadbalancer.listener, + loadbalancer.vips, + loadbalancer.listener.get('default_pool', None)) }} + {% endif %} +{% endblock proxies %} diff --git a/octavia/common/jinja/lvs/templates/macros.j2 b/octavia/common/jinja/lvs/templates/macros.j2 new file mode 100644 index 0000000000..f31008e701 --- /dev/null +++ b/octavia/common/jinja/lvs/templates/macros.j2 @@ -0,0 +1,165 @@ +{# Copyright (c) 2018 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +#} + +{%- macro lb_algo_macro(pool) -%} +lb_algo {{ pool.lb_algorithm }} +{%- endmacro -%} + +{% macro misc_path_macro(member, health_monitor) -%} +misc_path "{{ health_monitor.check_script_path }} {{ member.monitor_address|default(member.address, true) }} {{ member.monitor_port|default(member.protocol_port, true) }}" +{%- endmacro %} + +{%- macro misc_check_macro(pool, member, health_monitor) -%} +MISC_CHECK { + {{ misc_path_macro(member, health_monitor) }} + misc_timeout {{ pool.health_monitor.timeout }} + } +{%- endmacro -%} + +{%- macro sctp_check_macro(pool, member, health_monitor) -%} +MISC_CHECK { + misc_path "amphora-health-checker sctp -t {{ pool.health_monitor.timeout }} {{ member.monitor_address|default(member.address, true) }} {{ member.monitor_port|default(member.protocol_port, true) }}" + misc_timeout {{ (pool.health_monitor.timeout + 1) }} + } +{%- endmacro -%} + +{%- macro http_url_macro(health_monitor, health_monitor_status_code) %} +url { + path {{ health_monitor.url_path }} + status_code {{ health_monitor_status_code }} + } +{% endmacro -%} + +{%- macro http_get_macro(pool, member, health_monitor) -%} +HTTP_GET { + {% for status_code in health_monitor.expected_codes %} + {{ http_url_macro(health_monitor, status_code) -}} + {% endfor %} + connect_ip {{ member.monitor_address|default(member.address, true) }} + connect_port {{ member.monitor_port|default(member.protocol_port, true) }} + connect_timeout {{ health_monitor.timeout }} + } +{%- endmacro -%} + +{%- macro tcp_check_macro(pool, member, health_monitor) -%} +TCP_CHECK { + connect_ip {{ member.monitor_address|default(member.address, true) }} + connect_port {{ member.monitor_port|default(member.protocol_port, true) }} + connect_timeout {{ health_monitor.timeout }} + } +{%- endmacro -%} + +{% macro health_monitor_rs_macro(constants, lib_consts, pool, member) %} + {% if pool.health_monitor and pool.health_monitor.enabled %} + {% if pool.health_monitor.type == constants.HEALTH_MONITOR_UDP_CONNECT %} + {{ misc_check_macro(pool, member, pool.health_monitor) -}} + {% elif pool.health_monitor.type == lib_consts.HEALTH_MONITOR_SCTP %} + {{ sctp_check_macro(pool, member, pool.health_monitor) -}} + {% elif pool.health_monitor.type == constants.HEALTH_MONITOR_HTTP and pool.health_monitor.http_method == constants.HEALTH_MONITOR_HTTP_METHOD_GET %} + {{ http_get_macro(pool, member, pool.health_monitor) -}} + {% elif pool.health_monitor.type == constants.HEALTH_MONITOR_TCP %} + {{ tcp_check_macro(pool, member, pool.health_monitor) -}} + {% endif %} + {% endif %} +{% endmacro %} + +{% macro realserver_macro(constants, lib_consts, pool, member, listener) %} + {% if member.enabled %} + # Configuration for Member {{ member.id }} + real_server {{ member.address }} {{ member.protocol_port }} { + weight {{ member.weight }} + {% if listener.connection_limit %} + uthreshold {{ listener.connection_limit }} + {% endif %} +{{- health_monitor_rs_macro(constants, lib_consts, pool, member) }} + } + {% else %} + # Member {{ member.id }} is disabled + {% endif %} +{% endmacro %} + +{% macro health_monitor_vs_macro(default_pool) %} +{% if default_pool and default_pool.health_monitor and default_pool.health_monitor.enabled %} + delay_loop {{ default_pool.health_monitor.delay }} + delay_before_retry {{ default_pool.health_monitor.delay }} + {% if default_pool.health_monitor.fall_threshold %} + retry {{ default_pool.health_monitor.fall_threshold }} + {% endif %} +{% endif %} +{% endmacro %} + +{% macro virtualserver_macro(constants, lib_consts, listener, vips, default_pool) %} +{% if default_pool %} +{% for ip_version in (4, 6) %} +{%- set has_vip = namespace(found=False) %} +{%- for vip in vips %} + {%- if vip.ip_version == ip_version %} + {%- set has_vip.found = True %} + {%- endif %} +{%- endfor %} +{% if has_vip.found %} +virtual_server_group ipv{{ ip_version }}-group { + {% for vip in vips %} + {% if vip.ip_version == ip_version %} + {{ vip.ip_address }} {{ listener.protocol_port }} + {% endif %} + {% endfor %} +} + +virtual_server group ipv{{ ip_version }}-group { + {{ lb_algo_macro(default_pool) }} + lb_kind NAT + protocol {{ listener.protocol_mode.upper() }} + {% if default_pool.session_persistence and default_pool.session_persistence.type == constants.SESSION_PERSISTENCE_SOURCE_IP %} + {# set our defined defaults as I saw this not be consistent #} + {# in testing #} + {% if default_pool.session_persistence.persistence_timeout %} + persistence_timeout {{ default_pool.session_persistence.persistence_timeout }} + {% else %} + persistence_timeout 360 + {% endif %} + {% if default_pool.session_persistence.persistence_granularity %} + persistence_granularity {{ default_pool.session_persistence.persistence_granularity }} + {% elif ip_version == 4 %} + persistence_granularity 255.255.255.255 + {% else %} + persistence_granularity 128 + {% endif %} + {% endif %} +{{ health_monitor_vs_macro(default_pool) }} + +{% if default_pool.protocol.upper() in constants.LVS_PROTOCOLS %} + {% if default_pool.enabled %} + # Configuration for Pool {{ default_pool.id }} + {% else %} + # Pool {{ default_pool.id }} is disabled + {% endif %} + {% if default_pool.health_monitor and default_pool.health_monitor.enabled %} + # Configuration for HealthMonitor {{ default_pool.health_monitor.id }} + {% endif %} + {% if default_pool.enabled %} + {% for member in default_pool.members %} + {% if member.ip_version == ip_version %} + {{- realserver_macro(constants, lib_consts, default_pool, member, listener) }} + {% endif %} + {% endfor %} + {% endif %} +{% endif %} +} +{% endif %} +{% endfor %} +{% endif %} +{% endmacro %} diff --git a/octavia/common/jinja/templates/user_data_config_drive.template b/octavia/common/jinja/templates/user_data_config_drive.template new file mode 100644 index 0000000000..6df3dff643 --- /dev/null +++ b/octavia/common/jinja/templates/user_data_config_drive.template @@ -0,0 +1,41 @@ +{# Copyright 2016 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +-#} +#cloud-config +# vim: syntax=yaml +# +# This configuration with take user-data dict and build a cloud-init +# script utilizing the write_files module. The user-data dict should be a +# Key Value pair where the Key is the path to store the file and the Value +# is the data to store at that location +# +# Example: +# {'/root/path/to/file.cfg': 'I'm a file, write things in me'} +{% if user_data -%} +write_files: +{%- for key, value in user_data.items() %} +- path: {{ key }} + content: | + {{ value|indent(8) }} +{%- endfor -%} +{%- endif -%} + +{# restart agent now that configurations are in place #} +runcmd: +- systemctl restart rsyslog +{% if user_data -%} +- service amphora-agent restart +{%- endif %} + +timezone: {{ timezone }} diff --git a/octavia/common/jinja/user_data_jinja_cfg.py b/octavia/common/jinja/user_data_jinja_cfg.py new file mode 100644 index 0000000000..0ee2439190 --- /dev/null +++ b/octavia/common/jinja/user_data_jinja_cfg.py @@ -0,0 +1,39 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +import jinja2 + +from octavia.common.config import cfg +from octavia.common import constants + +CONF = cfg.CONF + +TEMPLATES_DIR = (os.path.dirname(os.path.realpath(__file__)) + + constants.TEMPLATES + '/') + + +class UserDataJinjaCfg: + + def __init__(self): + template_loader = jinja2.FileSystemLoader(searchpath=os.path.dirname( + TEMPLATES_DIR)) + jinja_env = jinja2.Environment(autoescape=True, loader=template_loader) + self.agent_template = jinja_env.get_template( + constants.USER_DATA_CONFIG_DRIVE_TEMPLATE) + + def build_user_data_config(self, user_data): + return self.agent_template.render( + user_data=user_data, timezone=CONF.controller_worker.amp_timezone) diff --git a/octavia/common/keystone.py b/octavia/common/keystone.py new file mode 100644 index 0000000000..a08361b5a3 --- /dev/null +++ b/octavia/common/keystone.py @@ -0,0 +1,133 @@ +# Copyright 2015 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from keystoneauth1 import exceptions as ks_exceptions +from keystoneauth1 import loading as ks_loading +from keystonemiddleware import auth_token +from oslo_config import cfg +from oslo_log import log as logging + +from octavia.common import constants + +LOG = logging.getLogger(__name__) + +_NOAUTH_PATHS = ['/', '/load-balancer/', '/healthcheck', + '/load-balancer/healthcheck'] + + +class KeystoneSession: + + def __init__(self, section=constants.SERVICE_AUTH): + self._session = None + self._auth = None + + self.section = section + + def get_session(self, auth=None): + """Initializes a Keystone session. + + :return: a Keystone Session object + """ + if auth: + # Do not use the singleton with custom auth params + return ks_loading.load_session_from_conf_options( + cfg.CONF, self.section, auth=auth) + + if not self._session: + self._session = ks_loading.load_session_from_conf_options( + cfg.CONF, self.section, auth=self.get_auth()) + + return self._session + + def get_auth(self): + if not self._auth: + try: + self._auth = ks_loading.load_auth_from_conf_options( + cfg.CONF, self.section) + except ks_exceptions.auth_plugins.MissingRequiredOptions as e: + if self.section == constants.SERVICE_AUTH: + raise e + # NOTE(gthiemonge): MissingRequiredOptions is raised: there is + # one or more missing auth options in the config file. It may + # be due to the migration from python-neutronclient to + # openstacksdk. + # With neutronclient, most of the auth settings were in + # [service_auth] with a few overrides in [neutron], + # but with openstacksdk, we have all the auth settings in the + # [neutron] section. In order to support smooth upgrades, in + # case those options are missing, we override the undefined + # options with the existing settings from [service_auth]. + + # This code should be removed when all the deployment tools set + # the correct options in [neutron] + + # The config options are lazily registered/loaded by keystone, + # it means that we cannot get/set them before invoking + # 'load_auth_from_conf_options' on 'service_auth'. + ks_loading.load_auth_from_conf_options( + cfg.CONF, constants.SERVICE_AUTH) + + config = getattr(cfg.CONF, self.section) + for opt in config: + # For each option in the [section] section, get its setting + # location, if the location is 'opt_default', it means that + # the option is not configured in the config file. + # if the option is also defined in [service_auth], the + # option of the [section] can be replaced by the one from + # [service_auth] + loc = cfg.CONF.get_location(opt, self.section) + if not loc or loc.location == cfg.Locations.opt_default: + if hasattr(cfg.CONF.service_auth, opt): + cur_value = getattr(config, opt) + value = getattr(cfg.CONF.service_auth, opt) + if value != cur_value: + log_value = (value if opt != "password" + else "") + LOG.debug("Overriding [%s].%s with '%s'", + self.section, opt, log_value) + cfg.CONF.set_override(opt, value, self.section) + + # Now we can call load_auth_from_conf_options for this specific + # service with the newly defined options. + self._auth = ks_loading.load_auth_from_conf_options( + cfg.CONF, self.section) + + return self._auth + + def get_service_user_id(self): + return self.get_auth().get_user_id(self.get_session()) + + +class SkippingAuthProtocol(auth_token.AuthProtocol): + """SkippingAuthProtocol to reach special endpoints + + Bypasses keystone authentication for special request paths, such + as the api version discovery path. + + Note: + SkippingAuthProtocol is lean customization + of :py:class:`keystonemiddleware.auth_token.AuthProtocol` + that disables keystone communication if the request path + is in the _NOAUTH_PATHS list. + + """ + + def process_request(self, request): + path = request.path + if path in _NOAUTH_PATHS: + LOG.debug('Request path is %s and it does not require keystone ' + 'authentication', path) + return None # return NONE to reach actual logic + + return super().process_request(request) diff --git a/octavia/common/policy.py b/octavia/common/policy.py new file mode 100644 index 0000000000..328f0ccff1 --- /dev/null +++ b/octavia/common/policy.py @@ -0,0 +1,161 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Policy Engine For Octavia.""" +from oslo_config import cfg +from oslo_log import log as logging +from oslo_policy import policy as oslo_policy +from oslo_utils import excutils + +from octavia.common import exceptions +from octavia import policies + + +LOG = logging.getLogger(__name__) +OCTAVIA_POLICY = None + + +def get_enforcer(): + global OCTAVIA_POLICY + if OCTAVIA_POLICY is None: + LOG.debug('Loading octavia policy object.') + OCTAVIA_POLICY = Policy() + return OCTAVIA_POLICY + + +def reset(): + global OCTAVIA_POLICY + if OCTAVIA_POLICY: + OCTAVIA_POLICY.clear() + OCTAVIA_POLICY = None + + +class Policy(oslo_policy.Enforcer): + + def __init__(self, conf=cfg.CONF, policy_file=None, rules=None, + default_rule=None, use_conf=True, overwrite=True): + """Init an Enforcer class. + + :param context: A context object. + :param conf: A configuration object. + :param policy_file: Custom policy file to use, if none is + specified, ``conf.oslo_policy.policy_file`` + will be used. + :param rules: Default dictionary / Rules to use. It will be + considered just in the first instantiation. If + :meth:`load_rules` with ``force_reload=True``, + :meth:`clear` or :meth:`set_rules` with + ``overwrite=True`` is called this will be + overwritten. + :param default_rule: Default rule to use, conf.default_rule will + be used if none is specified. + :param use_conf: Whether to load rules from cache or config file. + :param overwrite: Whether to overwrite existing rules when reload + rules from config file. + """ + + super().__init__(conf, policy_file, rules, default_rule, use_conf, + overwrite) + + self.register_defaults(policies.list_rules()) + + def authorize(self, action, target, context, do_raise=True, exc=None): + """Verifies that the action is valid on the target in this context. + + :param context: The oslo context for this request. + :param action: string representing the action to be checked + this should be colon separated for clarity. + i.e. ``compute:create_instance``, + ``compute:attach_volume``, + ``volume:attach_volume`` + :param target: dictionary representing the object of the action + for object creation this should be a dictionary representing the + location of the object e.g. + ``{'project_id': context.project_id}`` + :param do_raise: if True (the default), raises PolicyForbidden; + if False, returns False + :param exc: Class of the exceptions to raise if the check fails. + Any remaining arguments passed to :meth:`enforce` (both + positional and keyword arguments) will be passed to + the exceptions class. If not specified, + :class:`PolicyForbidden` will be used. + + :raises PolicyForbidden: if verification fails + and do_raise is True. Or if 'exc' is specified it will raise an + exceptions of that type. + + :return: returns a non-False value (not necessarily "True") if + authorized, and the exact value False if not authorized and + do_raise is False. + """ + credentials = context.to_policy_values() + # Inject is_admin into the credentials to allow override via + # config auth_strategy = constants.NOAUTH + credentials['is_admin'] = ( + credentials.get('is_admin') or context.is_admin) + + if not exc: + exc = exceptions.PolicyForbidden + + try: + return super().authorize( + action, target, credentials, do_raise=do_raise, exc=exc) + except oslo_policy.PolicyNotRegistered: + with excutils.save_and_reraise_exception(): + LOG.exception('Policy not registered') + except Exception: + credentials.pop('auth_token', None) + with excutils.save_and_reraise_exception(): + LOG.debug('Policy check for %(action)s failed with ' + 'credentials %(credentials)s', + {'action': action, 'credentials': credentials}) + return None + + def check_is_admin(self, context): + """Does roles contains 'admin' role according to policy setting. + + """ + credentials = context.to_dict() + result = False + try: + result = self.enforce('context_is_admin', credentials, credentials) + except oslo_policy.InvalidScope as e: + # This will happen if the token being used is system scoped + # when scope checking is enabled. + LOG.warning(str(e)) + return False + return result + + def get_rules(self): + return self.rules + + +@oslo_policy.register('is_admin') +class IsAdminCheck(oslo_policy.Check): + """An explicit check for is_admin.""" + + def __init__(self, kind, match): + """Initialize the check.""" + + self.expected = match.lower() == 'true' + + super().__init__(kind, str(self.expected)) + + def __call__(self, target, creds, enforcer): + """Determine whether is_admin matches the requested value.""" + + return creds['is_admin'] == self.expected + + +# This is used for the oslopolicy-policy-generator tool +def get_no_context_enforcer(): + return Policy() diff --git a/octavia/common/rpc.py b/octavia/common/rpc.py new file mode 100644 index 0000000000..9563a567c7 --- /dev/null +++ b/octavia/common/rpc.py @@ -0,0 +1,75 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from octavia_lib.i18n import _ +from oslo_config import cfg +from oslo_log import log as logging +import oslo_messaging as messaging +from oslo_messaging.rpc import dispatcher + +LOG = logging.getLogger(__name__) +TRANSPORT = None +NOTIFICATION_TRANSPORT = None +NOTIFIER = None + + +def init(): + global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER + TRANSPORT = create_transport(get_transport_url()) + NOTIFICATION_TRANSPORT = messaging.get_notification_transport(cfg.CONF) + NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT) + + +def cleanup(): + global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER + if TRANSPORT is not None: + TRANSPORT.cleanup() + if NOTIFICATION_TRANSPORT is not None: + NOTIFICATION_TRANSPORT.cleanup() + TRANSPORT = NOTIFICATION_TRANSPORT = NOTIFIER = None + + +def get_transport_url(http://github.com/url_str=None): + return messaging.TransportURL.parse(cfg.CONF, url_str) + + +def get_client(target, version_cap=None, serializer=None, + call_monitor_timeout=None): + + assert TRANSPORT is not None, _("'TRANSPORT' must not be None") + + return messaging.get_rpc_client( + TRANSPORT, target, version_cap=version_cap, + serializer=serializer, + call_monitor_timeout=call_monitor_timeout) + + +def get_server(target, endpoints, + access_policy=dispatcher.DefaultRPCAccessPolicy, + serializer=None): + assert TRANSPORT is not None, _("'TRANSPORT' must not be None") + + return messaging.get_rpc_server(TRANSPORT, + target, + endpoints, + serializer=serializer, + access_policy=access_policy) + + +def get_notifier(service=None, host=None, publisher_id=None): + assert NOTIFIER is not None, _("'NOTIFIER' must not be None") + + return NOTIFIER.prepare() + + +def create_transport(url): + return messaging.get_rpc_transport(cfg.CONF, url=url) diff --git a/octavia/common/service.py b/octavia/common/service.py new file mode 100644 index 0000000000..bfa6326a98 --- /dev/null +++ b/octavia/common/service.py @@ -0,0 +1,26 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from octavia.common import config +from octavia.common import rpc + + +def prepare_service(argv=None): + """Sets global config from config file and sets up logging.""" + argv = argv or [] + config.init(argv[1:]) + config.setup_logging(cfg.CONF) + rpc.init() diff --git a/octavia/common/stats.py b/octavia/common/stats.py new file mode 100644 index 0000000000..e662ba4d6d --- /dev/null +++ b/octavia/common/stats.py @@ -0,0 +1,68 @@ +# Copyright 2016 IBM +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging + +from octavia.common import constants +from octavia.common import data_models +from octavia.db import repositories as repo + +LOG = logging.getLogger(__name__) + + +class StatsMixin: + + def __init__(self): + super().__init__() + self.listener_stats_repo = repo.ListenerStatisticsRepository() + self.repo_amphora = repo.AmphoraRepository() + self.repo_loadbalancer = repo.LoadBalancerRepository() + + def get_listener_stats(self, session, listener_id): + """Gets the listener statistics data_models object.""" + db_ls, _ = self.listener_stats_repo.get_all( + session, listener_id=listener_id) + if not db_ls: + LOG.warning("Listener Statistics for Listener %s was not found", + listener_id) + + statistics = data_models.ListenerStatistics(listener_id=listener_id) + + for db_l in db_ls: + statistics += db_l + + amp = self.repo_amphora.get(session, id=db_l.amphora_id) + # Amphora ID and Listener ID will be the same in the case that the + # stats are coming from a provider driver other than the `amphora` + # driver. In that case and when the current amphora is ALLOCATED + # are the only times we should include the *active* connections, + # because non-active amphora will have incorrect counts. + if (amp and amp.status == constants.AMPHORA_ALLOCATED) or ( + db_l.amphora_id == db_l.listener_id): + statistics.active_connections += db_l.active_connections + return statistics + + def get_loadbalancer_stats(self, session, loadbalancer_id): + statistics = data_models.LoadBalancerStatistics() + lb_db = self.repo_loadbalancer.get(session, id=loadbalancer_id) + + for listener in lb_db.listeners: + data = self.get_listener_stats(session, listener.id) + statistics.bytes_in += data.bytes_in + statistics.bytes_out += data.bytes_out + statistics.request_errors += data.request_errors + statistics.active_connections += data.active_connections + statistics.total_connections += data.total_connections + statistics.listeners.append(data) + return statistics diff --git a/octavia/common/tls_utils/__init__.py b/octavia/common/tls_utils/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/common/tls_utils/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/common/tls_utils/cert_parser.py b/octavia/common/tls_utils/cert_parser.py new file mode 100644 index 0000000000..47fd4d8823 --- /dev/null +++ b/octavia/common/tls_utils/cert_parser.py @@ -0,0 +1,430 @@ +# +# Copyright 2014 Rackspace. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import base64 +import hashlib + +from cryptography.hazmat import backends +from cryptography.hazmat.primitives import serialization +from cryptography import x509 +from oslo_context import context as oslo_context +from oslo_log import log as logging +from pyasn1.codec.der import decoder as der_decoder +from pyasn1.codec.der import encoder as der_encoder +from pyasn1_modules import rfc2315 + +from octavia.common import data_models +from octavia.common import exceptions +from octavia.common import utils as octavia_utils + +X509_BEG = b'-----BEGIN CERTIFICATE-----' +X509_END = b'-----END CERTIFICATE-----' +PKCS7_BEG = b'-----BEGIN PKCS7-----' +PKCS7_END = b'-----END PKCS7-----' + +LOG = logging.getLogger(__name__) + + +def validate_cert(certificate, private_key=None, + private_key_passphrase=None, intermediates=None): + """Validate that the certificate is a valid PEM encoded X509 object + + Optionally verify that the private key matches the certificate. + Optionally verify that the intermediates are valid X509 objects. + + :param certificate: A PEM encoded certificate + :param private_key: The private key for the certificate + :param private_key_passphrase: Passphrase for accessing the private key + :param intermediates: PEM or PKCS7 encoded intermediate certificates + :returns: boolean + """ + cert = _get_x509_from_pem_bytes(certificate) + if intermediates and not isinstance(intermediates, list): + # If the intermediates are in a list, then they are already loaded. + # Load the certificates to validate them, if they weren't already. + list(get_intermediates_pems(intermediates)) + if private_key: + pkey = _read_private_key(private_key, + passphrase=private_key_passphrase) + pknum = pkey.public_key().public_numbers() + certnum = cert.public_key().public_numbers() + if pknum != certnum: + raise exceptions.MisMatchedKey + return True + + +def _read_private_key(private_key_pem, passphrase=None): + """Reads a private key PEM block and returns a RSAPrivatekey + + :param private_key_pem: The private key PEM block + :param passphrase: Optional passphrase needed to decrypt the private key + :returns: a RSAPrivatekey object + """ + if passphrase and isinstance(passphrase, str): + passphrase = passphrase.encode("utf-8") + if isinstance(private_key_pem, str): + private_key_pem = private_key_pem.encode('utf-8') + + try: + return serialization.load_pem_private_key(private_key_pem, passphrase, + backends.default_backend()) + except Exception as e: + LOG.exception("Passphrase required.") + raise exceptions.NeedsPassphrase from e + + +def prepare_private_key(private_key, passphrase=None): + """Prepares an unencrypted PEM-encoded private key for printing + + :param private_key: The private key in PEM format (encrypted or not) + :returns: The unencrypted private key in PEM format + """ + pk = _read_private_key(private_key, passphrase) + return pk.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.TraditionalOpenSSL, + encryption_algorithm=serialization.NoEncryption()).strip() + + +def get_intermediates_pems(intermediates=None): + """Split the input string into individual x509 text blocks + + :param intermediates: PEM or PKCS7 encoded intermediate certificates + :returns: A list of strings where each string represents an + X509 pem block surrounded by BEGIN CERTIFICATE, + END CERTIFICATE block tags + """ + if isinstance(intermediates, str): + try: + intermediates = intermediates.encode("utf-8") + except UnicodeDecodeError: + LOG.debug("Couldn't encode intermediates string, it was probably " + "in binary DER format.") + if X509_BEG in intermediates: + for x509Pem in _split_x509s(intermediates): + yield _prepare_x509_cert(_get_x509_from_pem_bytes(x509Pem)) + else: + for x509Pem in _parse_pkcs7_bundle(intermediates): + yield _prepare_x509_cert(_get_x509_from_der_bytes(x509Pem)) + + +def _prepare_x509_cert(cert=None): + """Prepares a PEM-encoded X509 certificate for printing + + :param intermediates: X509Certificate object + :returns: A PEM-encoded X509 certificate + """ + return cert.public_bytes(encoding=serialization.Encoding.PEM).strip() + + +def _split_x509s(xstr): + """Split the input string into individual x509 text blocks + + :param xstr: A large multi x509 certificate block + :returns: A list of strings where each string represents an + X509 pem block surrounded by BEGIN CERTIFICATE, + END CERTIFICATE block tags + """ + curr_pem_block = [] + inside_x509 = False + if isinstance(xstr, bytes): + xstr = xstr.decode('utf-8') + for line in xstr.replace("\r", "").split("\n"): + if inside_x509: + curr_pem_block.append(line) + if line == X509_END.decode('utf-8'): + yield octavia_utils.b("\n".join(curr_pem_block)) + curr_pem_block = [] + inside_x509 = False + continue + if line == X509_BEG.decode('utf-8'): + curr_pem_block.append(line) + inside_x509 = True + + +def _parse_pkcs7_bundle(pkcs7): + """Parse a PKCS7 certificate bundle in DER or PEM format + + :param pkcs7: A pkcs7 bundle in DER or PEM format + :returns: A list of individual DER-encoded certificates + """ + # Look for PEM encoding + if PKCS7_BEG in pkcs7: + try: + for substrate in _read_pem_blocks(pkcs7): + yield from _get_certs_from_pkcs7_substrate(substrate) + except Exception as e: + LOG.exception('Unreadable Certificate.') + raise exceptions.UnreadableCert from e + + # If no PEM encoding, assume this is DER encoded and try to decode + else: + yield from _get_certs_from_pkcs7_substrate(pkcs7) + + +def _read_pem_blocks(data): + """Parse a series of PEM-encoded blocks + + This method is based on pyasn1-modules.pem.readPemBlocksFromFile, but + eliminates the need to operate on a file handle and is a generator. + + :param data: A long text string containing one or more PEM-encoded blocks + :param markers: A tuple containing the test strings that indicate the + start and end of the PEM-encoded blocks + :returns: An ASN1 substrate suitable for DER decoding. + + """ + stSpam, stHam, stDump = 0, 1, 2 + startMarkers = {PKCS7_BEG.decode('utf-8'): 0} + stopMarkers = {PKCS7_END.decode('utf-8'): 0} + idx = -1 + state = stSpam + if isinstance(data, bytes): + data = data.decode('utf-8') + certLines = [] + for certLine in data.replace('\r', '').split('\n'): + if not certLine: + continue + certLine = certLine.strip() + if state == stSpam: + if certLine in startMarkers: + certLines = [] + idx = startMarkers[certLine] + state = stHam + continue + if state == stHam: + if certLine in stopMarkers and stopMarkers[certLine] == idx: + state = stDump + else: + certLines.append(certLine) + if state == stDump: + yield b''.join([base64.b64decode(x) for x in certLines]) + state = stSpam + + +def _get_certs_from_pkcs7_substrate(substrate): + """Extracts DER-encoded X509 certificates from a PKCS7 ASN1 DER substrate + + :param substrate: The substrate to be processed + :returns: A list of DER-encoded X509 certificates + """ + try: + contentInfo, _ = der_decoder.decode(substrate, + asn1Spec=rfc2315.ContentInfo()) + contentType = contentInfo.getComponentByName('contentType') + except Exception as e: + LOG.exception('Unreadable Certificate.') + raise exceptions.UnreadableCert from e + if contentType != rfc2315.signedData: + LOG.exception('Unreadable Certificate.') + raise exceptions.UnreadableCert + + try: + content, _ = der_decoder.decode( + contentInfo.getComponentByName('content'), + asn1Spec=rfc2315.SignedData()) + except Exception as e: + LOG.exception('Unreadable Certificate.') + raise exceptions.UnreadableCert from e + + for cert in content.getComponentByName('certificates'): + yield der_encoder.encode(cert) + + +def get_host_names(certificate): + """Extract the host names from the Pem encoded X509 certificate + + :param certificate: A PEM encoded certificate + :returns: A dictionary containing the following keys: + ['cn', 'dns_names'] + where 'cn' is the CN from the SubjectName of the + certificate, and 'dns_names' is a list of dNSNames + (possibly empty) from the SubjectAltNames of the certificate. + """ + if isinstance(certificate, str): + certificate = certificate.encode('utf-8') + host_names = {'cn': None, 'dns_names': []} + try: + cert = x509.load_pem_x509_certificate(certificate, + backends.default_backend()) + try: + cn = cert.subject.get_attributes_for_oid(x509.OID_COMMON_NAME)[0] + host_names['cn'] = cn.value.lower() + except Exception as e: + LOG.debug(f'Unable to get CN from certificate due to: {e}. ' + f'Assuming subject alternative names are present.') + try: + ext = cert.extensions.get_extension_for_oid( + x509.OID_SUBJECT_ALTERNATIVE_NAME + ) + host_names['dns_names'] = ext.value.get_values_for_type( + x509.DNSName) + except x509.ExtensionNotFound: + LOG.debug("%s extension not found", + x509.OID_SUBJECT_ALTERNATIVE_NAME) + + # Certs with no subject are valid as long as a subject alternative + # name is present. If both are missing, it is an invalid cert per + # the x.509 standard. + if not host_names['cn'] and not host_names['dns_names']: + LOG.warning('No CN or DNSName(s) found in certificate. The ' + 'certificate is invalid.') + raise exceptions.MissingCertSubject() + + return host_names + except exceptions.MissingCertSubject: + raise + except Exception as e: + LOG.exception('Unreadable Certificate.') + raise exceptions.UnreadableCert from e + + +def get_cert_expiration(certificate_pem): + """Extract the expiration date from the Pem encoded X509 certificate + + :param certificate_pem: Certificate in PEM format + :returns: Expiration date of certificate_pem + """ + try: + cert = x509.load_pem_x509_certificate(certificate_pem, + backends.default_backend()) + return cert.not_valid_after_utc + except Exception as e: + LOG.exception('Unreadable Certificate.') + raise exceptions.UnreadableCert from e + + +def _get_x509_from_pem_bytes(certificate_pem): + """Parse X509 data from a PEM encoded certificate + + :param certificate_pem: Certificate in PEM format + :returns: crypto high-level x509 data from the PEM string + """ + if isinstance(certificate_pem, str): + certificate_pem = certificate_pem.encode('utf-8') + try: + x509cert = x509.load_pem_x509_certificate(certificate_pem, + backends.default_backend()) + except Exception as e: + LOG.exception('Unreadable Certificate.') + raise exceptions.UnreadableCert from e + return x509cert + + +def _get_x509_from_der_bytes(certificate_der): + """Parse X509 data from a DER encoded certificate + + :param certificate_der: Certificate in DER format + :returns: crypto high-level x509 data from the DER-encoded certificate + """ + try: + x509cert = x509.load_der_x509_certificate(certificate_der, + backends.default_backend()) + except Exception as e: + LOG.exception('Unreadable Certificate.') + raise exceptions.UnreadableCert from e + return x509cert + + +def build_pem(tls_container): + """Concatenate TLS container fields to create a PEM + + encoded certificate file + + :param tls_container: Object container TLS certificates + :returns: Pem encoded certificate file + """ + pem = [tls_container.certificate] + if tls_container.private_key: + pem.append(tls_container.private_key) + if tls_container.intermediates: + pem.extend(tls_container.intermediates[:]) + return b'\n'.join(pem) + b'\n' + + +def load_certificates_data(cert_mngr, obj, context=None): + """Load TLS certificate data from the listener/pool. + + return TLS_CERT and SNI_CERTS + """ + tls_cert = None + sni_certs = [] + if not context: + context = oslo_context.RequestContext(project_id=obj.project_id) + + if obj.tls_certificate_id: + try: + tls_cert = _map_cert_tls_container( + cert_mngr.get_cert(context, + obj.tls_certificate_id, + check_only=True)) + except exceptions.MissingCertSubject: + # This was logged below, so raise as is to provide a clear + # user error + raise + except Exception as e: + LOG.warning('Unable to retrieve certificate: %s due to %s.', + obj.tls_certificate_id, str(e)) + raise exceptions.CertificateRetrievalException( + ref=obj.tls_certificate_id) + + if hasattr(obj, 'sni_containers') and obj.sni_containers: + for sni_cont in obj.sni_containers: + try: + cert_container = _map_cert_tls_container( + cert_mngr.get_cert(context, + sni_cont.tls_container_id, + check_only=True)) + except Exception as e: + LOG.warning('Unable to retrieve certificate: %s due to %s.', + sni_cont.tls_container_id, str(e)) + raise exceptions.CertificateRetrievalException( + ref=sni_cont.tls_container_id) + sni_certs.append(cert_container) + return {'tls_cert': tls_cert, 'sni_certs': sni_certs} + + +def _map_cert_tls_container(cert): + certificate = cert.get_certificate() + private_key = cert.get_private_key() + private_key_passphrase = cert.get_private_key_passphrase() + intermediates = cert.get_intermediates() + if isinstance(certificate, str): + certificate = certificate.encode('utf-8') + if isinstance(private_key, str): + private_key = private_key.encode('utf-8') + if isinstance(private_key_passphrase, str): + private_key_passphrase = private_key_passphrase.encode('utf-8') + if intermediates: + intermediates = [ + (imd.encode('utf-8') if isinstance(imd, str) else imd) + for imd in intermediates + ] + else: + intermediates = [] + return data_models.TLSContainer( + # TODO(rm_work): applying nosec here because this is not intended to be + # secure, it's just a way to get a consistent ID. Changing this would + # break backwards compatibility with existing loadbalancers. + id=hashlib.sha1(certificate).hexdigest(), # nosec + primary_cn=get_primary_cn(certificate), + private_key=prepare_private_key(private_key, private_key_passphrase), + certificate=certificate, + intermediates=intermediates) + + +def get_primary_cn(tls_cert): + """Returns primary CN for Certificate.""" + return get_host_names(tls_cert)['cn'] diff --git a/octavia/common/utils.py b/octavia/common/utils.py new file mode 100644 index 0000000000..4ad78347df --- /dev/null +++ b/octavia/common/utils.py @@ -0,0 +1,217 @@ +# Copyright 2011, VMware, Inc., 2014 A10 Networks +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Borrowed from nova code base, more utilities will be added/borrowed as and +# when needed. + +"""Utilities and helper functions.""" + +import base64 +import hashlib +import ipaddress +import re +import socket +import typing + +from cryptography import fernet +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import excutils +from stevedore import driver as stevedore_driver + +from octavia.common import constants +if typing.TYPE_CHECKING: + from octavia.network import base as network_base + +CONF = cfg.CONF + +LOG = logging.getLogger(__name__) + + +def get_hostname(): + return socket.gethostname() + + +def base64_sha1_string(string_to_hash): + """Get a b64-encoded sha1 hash of a string. Not intended to be secure!""" + # TODO(rm_work): applying nosec here because this is not intended to be + # secure, it's just a way to get a consistent ID. Changing this would + # break backwards compatibility with existing loadbalancers. + hash_str = hashlib.sha1(string_to_hash.encode('utf-8')).digest() # nosec + b64_str = base64.b64encode(hash_str, str.encode('_-', 'ascii')) + b64_sha1 = b64_str.decode('UTF-8') + # https://github.com/haproxy/haproxy/issues/644 + return re.sub(r"^-", "x", b64_sha1) + + +def get_amphora_driver(): + amphora_driver = stevedore_driver.DriverManager( + namespace='octavia.amphora.drivers', + name=CONF.controller_worker.amphora_driver, + invoke_on_load=True + ).driver + return amphora_driver + + +def get_network_driver() -> 'network_base.AbstractNetworkDriver': + CONF.import_group('controller_worker', 'octavia.common.config') + network_driver = stevedore_driver.DriverManager( + namespace='octavia.network.drivers', + name=CONF.controller_worker.network_driver, + invoke_on_load=True + ).driver + return network_driver + + +def ip_version(ip_address): + ip = ipaddress.ip_address(ip_address) + return ip.version + + +def is_ipv4(ip_address): + """Check if ip address is IPv4 address.""" + ip = ipaddress.ip_address(ip_address) + return ip.version == 4 + + +def is_ipv6(ip_address): + """Check if ip address is IPv6 address.""" + ip = ipaddress.ip_address(ip_address) + return ip.version == 6 + + +def is_cidr_ipv6(cidr): + """Check if CIDR is IPv6 address with subnet prefix.""" + ip = ipaddress.ip_network(cidr, strict=False) + return ip.version == 6 + + +def is_ipv6_lla(ip_address): + """Check if ip address is IPv6 link local address.""" + ip = ipaddress.ip_address(ip_address) + return ip.version == 6 and ip.is_link_local + + +def ip_port_str(ip_address, port): + """Return IP port as string representation depending on address family.""" + ip = ipaddress.ip_address(ip_address) + if ip.version == 4: + return f"{ip}:{port}" + return f"[{ip}]:{port}" + + +def netmask_to_prefix(netmask): + return ipaddress.ip_network(f'0.0.0.0/{netmask}', strict=False).prefixlen + + +def ip_netmask_to_cidr(ip, netmask): + return ipaddress.ip_network(f'{ip}/{netmask}', strict=False).with_prefixlen + + +def get_vip_security_group_name(loadbalancer_id): + if loadbalancer_id: + return constants.VIP_SECURITY_GROUP_PREFIX + loadbalancer_id + return None + + +def get_compatible_value(value): + if isinstance(value, str): + value = value.encode('utf-8') + return value + + +def _get_compatible_server_certs_key_passphrases(): + key_opts = CONF.certificates.server_certs_key_passphrase + keys = [] + for key_opt in key_opts: + key = str(key_opt) + if isinstance(key, str): + key = key.encode('utf-8') + keys.append( + base64.urlsafe_b64encode(key)) + return keys + + +def get_server_certs_key_passphrases_fernet() -> fernet.MultiFernet: + """Get a cryptography.MultiFernet with loaded keys.""" + keys = [ + fernet.Fernet(x) for x in + _get_compatible_server_certs_key_passphrases()] + return fernet.MultiFernet(keys) + + +def subnet_ip_availability(nw_ip_avail, subnet_id, req_num_ips): + for subnet in nw_ip_avail.subnet_ip_availability: + if subnet['subnet_id'] == subnet_id: + return subnet['total_ips'] - subnet['used_ips'] >= req_num_ips + return None + + +def b(s): + return s.encode('utf-8') + + +def expand_expected_codes(codes): + """Expand the expected code string in set of codes. + + 200-204 -> 200, 201, 202, 204 + 200, 203 -> 200, 203 + """ + retval = set() + codes = re.split(', *', codes) + for code in codes: + if not code: + continue + if '-' in code: + low, hi = code.split('-')[:2] + retval.update( + str(i) for i in range(int(low), int(hi) + 1)) + else: + retval.add(code) + return retval + + +class exception_logger: + """Wrap a function and log raised exception + + :param logger: the logger to log the exception default is LOG.exception + + :returns: origin value if no exception raised; re-raise the exception if + any occurred + + """ + + def __init__(self, logger=None): + self.logger = logger + + def __call__(self, func): + if self.logger is None: + _LOG = logging.getLogger(func.__module__) + self.logger = _LOG.exception + + def call(*args, **kwargs): + try: + return func(*args, **kwargs) + except Exception as e: + with excutils.save_and_reraise_exception(): + self.logger(e) + return None + return call + + +def map_protocol_to_nftable_protocol(rule_dict): + rule_dict[constants.PROTOCOL] = ( + constants.L4_PROTOCOL_MAP[rule_dict[constants.PROTOCOL]]) + return rule_dict diff --git a/octavia/common/validate.py b/octavia/common/validate.py new file mode 100644 index 0000000000..e2c33bcdc7 --- /dev/null +++ b/octavia/common/validate.py @@ -0,0 +1,580 @@ +# Copyright 2016 Blue Box, an IBM Company +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Several handy validation functions that go beyond simple type checking. +Defined here so these can also be used at deeper levels than the API. +""" + + +import ipaddress +import re +import typing + +from oslo_config import cfg +from rfc3986 import uri_reference +from rfc3986 import validators +from wsme import types as wtypes + +from octavia.common import constants +from octavia.common import data_models +from octavia.common import exceptions +from octavia.common import utils +from octavia.i18n import _ + +if typing.TYPE_CHECKING: + from octavia.common import context + +CONF = cfg.CONF +_ListenerPUT = 'octavia.api.v2.types.listener.ListenerPUT' + + +def url(http://github.com/url,%20require_scheme=True): + """Raises an error if the url doesn't look like a URL.""" + validator = validators.Validator() + if require_scheme: + validator.allow_schemes('http', 'https') + validator.require_presence_of('scheme', 'host') + validator.check_validity_of('scheme', 'host', 'path') + else: + validator.check_validity_of('path') + + try: + validator.validate(uri_reference(url)) + except Exception as e: + raise exceptions.InvalidURL(url=url) from e + return True + + +def url_path(url_path): + """Raises an error if the url_path doesn't look like a URL Path.""" + validator = validators.Validator().check_validity_of('path') + try: + p_url = uri_reference(url_path) + validator.validate(p_url) + + invalid_path = ( + re.search(r"\s", url_path) or + p_url.scheme or p_url.userinfo or p_url.host or + p_url.port or + p_url.path is None or + not p_url.path.startswith('/') + ) + + if invalid_path: + raise exceptions.InvalidURLPath(url_path=url_path) + except Exception as e: + raise exceptions.InvalidURLPath(url_path=url_path) from e + return True + + +def header_name(header, what=None): + """Raises an error if header does not look like an HTML header name.""" + p = re.compile(constants.HTTP_HEADER_NAME_REGEX) + if not p.match(header): + raise exceptions.InvalidString(what=what) + return True + + +def cookie_value_string(value, what=None): + """Raises an error if the value string contains invalid characters.""" + p = re.compile(constants.HTTP_COOKIE_VALUE_REGEX) + if not p.match(value): + raise exceptions.InvalidString(what=what) + return True + + +def header_value_string(value, what=None): + """Raises an error if the value string contains invalid characters.""" + p = re.compile(constants.HTTP_HEADER_VALUE_REGEX) + q = re.compile(constants.HTTP_QUOTED_HEADER_VALUE_REGEX) + if not p.match(value) and not q.match(value): + raise exceptions.InvalidString(what=what) + return True + + +def regex(regex): + """Raises an error if the string given is not a valid regex.""" + try: + re.compile(regex) + except Exception as e: + raise exceptions.InvalidRegex(e=str(e)) + return True + + +# Note that we can evaluate this outside the context of any L7 Policy because +# L7 rules must be internally consistent. +def l7rule_data(l7rule): + """Raises an error if the l7rule given is invalid in some way.""" + if not l7rule.value: + raise exceptions.InvalidL7Rule(msg=_('L7 rule type requires a value')) + if l7rule.type == constants.L7RULE_TYPE_HEADER: + if not l7rule.key: + raise exceptions.InvalidL7Rule(msg='L7 rule type requires a key') + header_name(l7rule.key, what='key') + if l7rule.compare_type == constants.L7RULE_COMPARE_TYPE_REGEX: + regex(l7rule.value) + elif l7rule.compare_type in ( + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + constants.L7RULE_COMPARE_TYPE_ENDS_WITH, + constants.L7RULE_COMPARE_TYPE_CONTAINS, + constants.L7RULE_COMPARE_TYPE_EQUAL_TO): + header_value_string(l7rule.value, what='header value') + else: + raise exceptions.InvalidL7Rule(msg='invalid comparison type ' + 'for rule type') + + elif l7rule.type == constants.L7RULE_TYPE_COOKIE: + if not l7rule.key: + raise exceptions.InvalidL7Rule(msg='L7 rule type requires a key') + header_name(l7rule.key, what='key') + if l7rule.compare_type == constants.L7RULE_COMPARE_TYPE_REGEX: + regex(l7rule.value) + elif l7rule.compare_type in ( + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + constants.L7RULE_COMPARE_TYPE_ENDS_WITH, + constants.L7RULE_COMPARE_TYPE_CONTAINS, + constants.L7RULE_COMPARE_TYPE_EQUAL_TO): + cookie_value_string(l7rule.value, what='cookie value') + else: + raise exceptions.InvalidL7Rule(msg='invalid comparison type ' + 'for rule type') + + elif l7rule.type in (constants.L7RULE_TYPE_HOST_NAME, + constants.L7RULE_TYPE_PATH): + if l7rule.compare_type in ( + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + constants.L7RULE_COMPARE_TYPE_ENDS_WITH, + constants.L7RULE_COMPARE_TYPE_CONTAINS, + constants.L7RULE_COMPARE_TYPE_EQUAL_TO): + header_value_string(l7rule.value, what='comparison value') + elif l7rule.compare_type == constants.L7RULE_COMPARE_TYPE_REGEX: + regex(l7rule.value) + else: + raise exceptions.InvalidL7Rule(msg='invalid comparison type ' + 'for rule type') + + elif l7rule.type == constants.L7RULE_TYPE_FILE_TYPE: + if l7rule.compare_type == constants.L7RULE_COMPARE_TYPE_REGEX: + regex(l7rule.value) + elif l7rule.compare_type == constants.L7RULE_COMPARE_TYPE_EQUAL_TO: + header_value_string(l7rule.value, what='comparison value') + else: + raise exceptions.InvalidL7Rule(msg='invalid comparison type ' + 'for rule type') + + elif l7rule.type in [constants.L7RULE_TYPE_SSL_CONN_HAS_CERT, + constants.L7RULE_TYPE_SSL_VERIFY_RESULT, + constants.L7RULE_TYPE_SSL_DN_FIELD]: + validate_l7rule_ssl_types(l7rule) + + else: + raise exceptions.InvalidL7Rule(msg='invalid rule type') + return True + + +def validate_l7rule_ssl_types(l7rule): + if not l7rule.type or l7rule.type not in [ + constants.L7RULE_TYPE_SSL_CONN_HAS_CERT, + constants.L7RULE_TYPE_SSL_VERIFY_RESULT, + constants.L7RULE_TYPE_SSL_DN_FIELD]: + return + + rule_type = None if l7rule.type == wtypes.Unset else l7rule.type + req_key = None if l7rule.key == wtypes.Unset else l7rule.key + req_value = None if l7rule.value == wtypes.Unset else l7rule.value + compare_type = (None if l7rule.compare_type == wtypes.Unset else + l7rule.compare_type) + msg = None + if rule_type == constants.L7RULE_TYPE_SSL_CONN_HAS_CERT: + # key and value are not allowed + if req_key: + # log error or raise + msg = f'L7rule type {rule_type} does not use the "key" field.' + elif req_value.lower() != 'true': + msg = f'L7rule value {req_value} is not a boolean True string.' + elif compare_type != constants.L7RULE_COMPARE_TYPE_EQUAL_TO: + msg = 'L7rule type {} only supports the {} compare type.'.format( + rule_type, constants.L7RULE_COMPARE_TYPE_EQUAL_TO) + + if rule_type == constants.L7RULE_TYPE_SSL_VERIFY_RESULT: + if req_key: + # log or raise req_key not used + msg = f'L7rule type {rule_type} does not use the "key" field.' + elif not req_value.isdigit() or int(req_value) < 0: + # log or raise req_value must be int + msg = f'L7rule type {rule_type} needs a int value, which is >= 0' + elif compare_type != constants.L7RULE_COMPARE_TYPE_EQUAL_TO: + msg = 'L7rule type {} only supports the {} compare type.'.format( + rule_type, constants.L7RULE_COMPARE_TYPE_EQUAL_TO) + + if rule_type == constants.L7RULE_TYPE_SSL_DN_FIELD: + dn_regex = re.compile(constants.DISTINGUISHED_NAME_FIELD_REGEX) + if compare_type == constants.L7RULE_COMPARE_TYPE_REGEX: + regex(l7rule.value) + + if not req_key or not req_value: + # log or raise key and value must be specified. + msg = (f'L7rule type {rule_type} needs to specify a ' + f'key and a value.') + # log or raise the key must be splited by '-' + elif not dn_regex.match(req_key): + msg = 'Invalid L7rule distinguished name field.' + + if msg: + raise exceptions.InvalidL7Rule(msg=msg) + + +def sanitize_l7policy_api_args(l7policy, create=False): + """Validate and make consistent L7Policy API arguments. + + This method is mainly meant to sanitize L7 Policy create and update + API dictionaries, so that we strip 'None' values that don't apply for + our particular update. This method does *not* verify that any + redirect_pool_id exists in the database, but will raise an + error if a redirect_url doesn't look like a URL. + + :param l7policy: The L7 Policy dictionary we are sanitizing / validating + """ + if 'action' in l7policy.keys(): + if l7policy['action'] == constants.L7POLICY_ACTION_REJECT: + l7policy.update({'redirect_url': None}) + l7policy.update({'redirect_pool_id': None}) + l7policy.pop('redirect_pool', None) + elif l7policy['action'] == constants.L7POLICY_ACTION_REDIRECT_TO_URL: + if not l7policy.get('redirect_url'): + raise exceptions.InvalidL7PolicyArgs( + msg='redirect_url must not be None') + l7policy.update({'redirect_pool_id': None}) + l7policy.pop('redirect_pool', None) + elif l7policy['action'] == constants.L7POLICY_ACTION_REDIRECT_TO_POOL: + if (not l7policy.get('redirect_pool_id') and + not l7policy.get('redirect_pool')): + raise exceptions.InvalidL7PolicyArgs( + msg='redirect_pool_id or redirect_pool must not be None') + l7policy.update({'redirect_url': None}) + elif l7policy['action'] == constants.L7POLICY_ACTION_REDIRECT_PREFIX: + if not l7policy.get('redirect_prefix'): + raise exceptions.InvalidL7PolicyArgs( + msg='redirect_prefix must not be None') + else: + raise exceptions.InvalidL7PolicyAction( + action=l7policy['action']) + if ((l7policy.get('redirect_pool_id') or l7policy.get('redirect_pool')) and + (l7policy.get('redirect_url') or l7policy.get('redirect_prefix'))): + raise exceptions.InvalidL7PolicyArgs( + msg='Cannot specify redirect_pool_id and redirect_url or ' + 'redirect_prefix at the same time') + if l7policy.get('redirect_pool_id'): + l7policy.update({ + 'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL}) + l7policy.update({'redirect_url': None}) + l7policy.pop('redirect_pool', None) + l7policy.update({'redirect_prefix': None}) + l7policy.update({'redirect_http_code': None}) + if l7policy.get('redirect_pool'): + l7policy.update({ + 'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL}) + l7policy.update({'redirect_url': None}) + l7policy.pop('redirect_pool_id', None) + l7policy.update({'redirect_prefix': None}) + l7policy.update({'redirect_http_code': None}) + if l7policy.get('redirect_url'): + url(http://github.com/l7policy['redirect_url']) + l7policy.update({ + 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL}) + l7policy.update({'redirect_pool_id': None}) + l7policy.update({'redirect_prefix': None}) + l7policy.pop('redirect_pool', None) + if not l7policy.get('redirect_http_code'): + l7policy.update({'redirect_http_code': 302}) + if l7policy.get('redirect_prefix'): + url(http://github.com/l7policy['redirect_prefix']) + l7policy.update({ + 'action': constants.L7POLICY_ACTION_REDIRECT_PREFIX}) + l7policy.update({'redirect_pool_id': None}) + l7policy.update({'redirect_url': None}) + l7policy.pop('redirect_pool', None) + if not l7policy.get('redirect_http_code'): + l7policy.update({'redirect_http_code': 302}) + + # If we are creating, we need an action at this point + if create and 'action' not in l7policy.keys(): + raise exceptions.InvalidL7PolicyAction(action='http://github.com/None') + + # See if we have anything left after that... + if not l7policy.keys(): + raise exceptions.InvalidL7PolicyArgs(msg='Invalid update options') + return l7policy + + +def port_exists(port_id, context=None): + """Raises an exception when a port does not exist.""" + network_driver = utils.get_network_driver() + try: + port = network_driver.get_port(port_id, context=context) + except Exception as e: + raise exceptions.InvalidSubresource(resource='Port', id=port_id) from e + return port + + +def check_port_in_use(port): + """Raise an exception when a port is used.""" + if port.device_id: + raise exceptions.ValidationException(detail=_( + "Port %(port_id)s is already used by device %(device_id)s ") % + {'port_id': port.id, 'device_id': port.device_id}) + return False + + +def subnet_exists(subnet_id, context=None): + """Raises an exception when a subnet does not exist.""" + network_driver = utils.get_network_driver() + try: + subnet = network_driver.get_subnet(subnet_id, context=context) + except Exception as e: + raise exceptions.InvalidSubresource( + resource='Subnet', id=subnet_id) from e + return subnet + + +def qos_policy_exists(qos_policy_id): + network_driver = utils.get_network_driver() + qos_extension_enabled(network_driver) + try: + qos_policy = network_driver.get_qos_policy(qos_policy_id) + except Exception as e: + raise exceptions.InvalidSubresource( + resource='qos_policy', id=qos_policy_id) from e + return qos_policy + + +def qos_extension_enabled(network_driver): + if not network_driver.qos_enabled(): + raise exceptions.ValidationException(detail=_( + "VIP QoS policy is not allowed in this deployment.")) + + +def network_exists_optionally_contains_subnet(network_id, subnet_id=None, + context=None): + """Raises an exception when a network does not exist. + + If a subnet is provided, also validate the network contains that subnet. + """ + network_driver = utils.get_network_driver() + try: + network = network_driver.get_network(network_id, context=context) + except Exception as e: + raise exceptions.InvalidSubresource( + resource='Network', id=network_id) from e + if subnet_id: + if not network.subnets or subnet_id not in network.subnets: + raise exceptions.InvalidSubresource(resource='Subnet', + id=subnet_id) + return network + + +def security_group_exists(sg_id: str, + context: 'context.RequestContext' = None): + """Raises an exception when a security group does not exist.""" + network_driver = utils.get_network_driver() + try: + network_driver.get_security_group_by_id(sg_id, + context=context) + except Exception as e: + raise exceptions.InvalidSubresource( + resource='Security Group', id=sg_id) from e + + +def network_allowed_by_config(network_id, valid_networks=None): + if CONF.networking.valid_vip_networks and not valid_networks: + valid_networks = CONF.networking.valid_vip_networks + if valid_networks: + valid_networks = map(str.lower, valid_networks) + if network_id.lower() not in valid_networks: + raise exceptions.ValidationException(detail=_( + 'Supplied VIP network_id is not allowed by the configuration ' + 'of this deployment.')) + + +def is_ip_member_of_cidr(address, cidr): + if ipaddress.ip_address(address) in ipaddress.ip_network(cidr): + return True + return False + + +def check_session_persistence(SP_dict): + try: + if SP_dict['cookie_name']: + if SP_dict['type'] != constants.SESSION_PERSISTENCE_APP_COOKIE: + raise exceptions.ValidationException(detail=_( + 'Field "cookie_name" can only be specified with session ' + 'persistence of type "APP_COOKIE".')) + bad_cookie_name = re.compile(r'[\x00-\x20\x22\x28-\x29\x2c\x2f' + r'\x3a-\x40\x5b-\x5d\x7b\x7d\x7f]+') + valid_chars = re.compile(r'[\x00-\xff]+') + if (bad_cookie_name.search(SP_dict['cookie_name']) or + not valid_chars.search(SP_dict['cookie_name'])): + raise exceptions.ValidationException(detail=_( + 'Supplied "cookie_name" is invalid.')) + if (SP_dict['type'] == constants.SESSION_PERSISTENCE_APP_COOKIE and + not SP_dict['cookie_name']): + raise exceptions.ValidationException(detail=_( + 'Field "cookie_name" must be specified when using the ' + '"APP_COOKIE" session persistence type.')) + except exceptions.ValidationException: + raise + except Exception as e: + raise exceptions.ValidationException(detail=_( + 'Invalid session_persistence provided.')) from e + + +def ip_not_reserved(ip_address): + ip_address = ( + ipaddress.ip_address(ip_address).exploded.upper()) + if ip_address in CONF.networking.reserved_ips: + raise exceptions.InvalidOption(value=ip_address, + option='member address') + + +def check_cipher_prohibit_list(cipherstring): + ciphers = cipherstring.split(':') + prohibit_list = CONF.api_settings.tls_cipher_prohibit_list.split(':') + rejected = [] + for cipher in ciphers: + if cipher in prohibit_list: + rejected.append(cipher) + return rejected + + +def check_default_ciphers_prohibit_list_conflict(): + listener_rejected = check_cipher_prohibit_list( + CONF.api_settings.default_listener_ciphers) + if listener_rejected: + raise exceptions.ValidationException( + detail=_('Default listener ciphers conflict with prohibit list. ' + 'Conflicting ciphers: ' + ', '.join(listener_rejected))) + + pool_rejected = check_cipher_prohibit_list( + CONF.api_settings.default_pool_ciphers) + if pool_rejected: + raise exceptions.ValidationException( + detail=_('Default pool ciphers conflict with prohibit list. ' + 'Conflicting ciphers: ' + ', '.join(pool_rejected))) + + +def check_tls_version_list(versions): + if versions == []: + raise exceptions.ValidationException( + detail=_('Empty TLS version list. Either specify at least one TLS ' + 'version or remove this parameter to use the default.')) + + # Unset action + if versions is None: + return + + invalid_versions = [v for v in versions + if v not in constants.TLS_ALL_VERSIONS] + if invalid_versions: + raise exceptions.ValidationException( + detail=_('Invalid TLS versions: ' + ', '.join(invalid_versions))) + + +def check_tls_version_min(versions, message=None): + """Checks a TLS version string against the configured minimum.""" + + if not CONF.api_settings.minimum_tls_version: + return + + if not message: + message = _("Requested TLS versions are less than the minimum: ") + + min_ver_index = constants.TLS_ALL_VERSIONS.index( + CONF.api_settings.minimum_tls_version) + + rejected = [] + for ver in versions: + if constants.TLS_ALL_VERSIONS.index(ver) < min_ver_index: + rejected.append(ver) + if rejected: + raise exceptions.ValidationException(detail=( + message + ', '.join(rejected) + " < " + + CONF.api_settings.minimum_tls_version)) + + +def check_default_tls_versions_min_conflict(): + if not CONF.api_settings.minimum_tls_version: + return + + listener_message = _("Default listener TLS versions are less than the " + "minimum: ") + pool_message = _("Default pool TLS versions are less than the minimum: ") + + check_tls_version_min(CONF.api_settings.default_listener_tls_versions, + message=listener_message) + + check_tls_version_min(CONF.api_settings.default_pool_tls_versions, + message=pool_message) + + +def check_alpn_protocols(protocols): + if protocols == []: + raise exceptions.ValidationException( + detail=_('Empty ALPN protocol list. Either specify at least one ' + 'ALPN protocol or remove this parameter to use the ' + 'default.')) + + # Unset action + if protocols is None: + return + + invalid_protocols = [p for p in protocols + if p not in constants.SUPPORTED_ALPN_PROTOCOLS] + if invalid_protocols: + raise exceptions.ValidationException( + detail=_('Invalid ALPN protocol: ' + ', '.join(invalid_protocols))) + + +def check_hsts_options(listener: dict): + if ((listener.get('hsts_include_subdomains') or + listener.get('hsts_preload')) and + not isinstance(listener.get('hsts_max_age'), int)): + raise exceptions.ValidationException( + detail=_('HSTS configuration options hsts_include_subdomains and ' + 'hsts_preload only make sense if hsts_max_age is ' + 'set as well.')) + + if (isinstance(listener.get('hsts_max_age'), int) and + listener['protocol'] != constants.PROTOCOL_TERMINATED_HTTPS): + raise exceptions.ValidationException( + detail=_('The HSTS feature can only be used for listeners using ' + 'the TERMINATED_HTTPS protocol.')) + + +def check_hsts_options_put(listener: _ListenerPUT, + db_listener: data_models.Listener): + hsts_disabled = all(obj.hsts_max_age in [None, wtypes.Unset] for obj + in (db_listener, listener)) + if ((listener.hsts_include_subdomains or listener.hsts_preload) and + hsts_disabled): + raise exceptions.ValidationException( + detail=_('Cannot enable hsts_include_subdomains or hsts_preload ' + 'if hsts_max_age was not set as well.')) + + if (isinstance(listener.hsts_max_age, int) and + db_listener.protocol != constants.PROTOCOL_TERMINATED_HTTPS): + raise exceptions.ValidationException( + detail=_('The HSTS feature can only be used for listeners using ' + 'the TERMINATED_HTTPS protocol.')) diff --git a/octavia/compute/__init__.py b/octavia/compute/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/compute/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/compute/compute_base.py b/octavia/compute/compute_base.py new file mode 100644 index 0000000000..9097384d7f --- /dev/null +++ b/octavia/compute/compute_base.py @@ -0,0 +1,133 @@ +# Copyright 2011-2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + + +class ComputeBase(metaclass=abc.ABCMeta): + + @abc.abstractmethod + def build(self, name="amphora_name", amphora_flavor=None, + image_tag=None, image_owner=None, key_name=None, sec_groups=None, + network_ids=None, config_drive_files=None, user_data=None, + server_group_id=None, availability_zone=None): + """Build a new amphora. + + :param name: Optional name for Amphora + :param amphora_flavor: Optionally specify a flavor + :param image_tag: tag of the base image for the amphora instance + :param key_name: Optionally specify a keypair + :param sec_groups: Optionally specify list of security groups + :param network_ids: A list of network IDs to attach to the amphora + :param config_drive_files: An optional dict of files to overwrite on + the server upon boot. Keys are file names + (i.e. /etc/passwd) and values are the + file contents (either as a string or as + a file-like object). A maximum of five + entries is allowed, and each file must be + 10k or less. + :param user_data: Optional user data to pass to be exposed by the + metadata server this can be a file type object as + well or a string + :param server_group_id: Optional server group id(uuid) which is used + for anti_affinity feature + :param availability_zone: Availability zone data dict + + :raises ComputeBuildException: if compute failed to build amphora + :returns: UUID of amphora + """ + + @abc.abstractmethod + def delete(self, compute_id): + """Delete the specified amphora + + :param compute_id: The id of the amphora to delete + """ + + @abc.abstractmethod + def status(self, compute_id): + """Check whether the specified amphora is up + + :param compute_id: the ID of the desired amphora + :returns: The compute "status" response ("ONLINE" or "OFFLINE") + """ + + @abc.abstractmethod + def get_amphora(self, compute_id, management_network_id=None): + """Retrieve an amphora object + + :param compute_id: the compute id of the desired amphora + :param management_network_id: ID of the management network + :returns: the amphora object + :returns: fault message or None + """ + + @abc.abstractmethod + def create_server_group(self, name, policy): + """Create a server group object + + :param name: the name of the server group + :param policy: the policy of the server group + :returns: the server group object + """ + + @abc.abstractmethod + def delete_server_group(self, server_group_id): + """Delete a server group object + + :param server_group_id: the uuid of a server group + """ + + @abc.abstractmethod + def attach_network_or_port(self, compute_id, network_id=None, + ip_address=None, port_id=None): + """Connects an existing amphora to an existing network. + + :param compute_id: id of an amphora in the compute service + :param network_id: id of a network + :param ip_address: ip address to attempt to be assigned to interface + :param port_id: id of the neutron port + :return: nova interface + :raises: Exception + """ + + @abc.abstractmethod + def detach_port(self, compute_id, port_id): + """Disconnects an existing amphora from an existing port. + + :param compute_id: id of an amphora in the compute service + :param port_id: id of the port + :return: None + :raises: Exception + """ + + @abc.abstractmethod + def validate_flavor(self, flavor_id): + """Validates that a compute flavor exists. + + :param flavor_id: ID of the compute flavor. + :return: None + :raises: NotFound + :raises: NotImplementedError + """ + + @abc.abstractmethod + def validate_availability_zone(self, availability_zone): + """Validates that a compute availability zone exists. + + :param availability_zone: Name of the compute availability zone. + :return: None + :raises: NotFound + :raises: NotImplementedError + """ diff --git a/octavia/compute/drivers/__init__.py b/octavia/compute/drivers/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/compute/drivers/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/compute/drivers/noop_driver/__init__.py b/octavia/compute/drivers/noop_driver/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/compute/drivers/noop_driver/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/compute/drivers/noop_driver/driver.py b/octavia/compute/drivers/noop_driver/driver.py new file mode 100644 index 0000000000..abb47d5e06 --- /dev/null +++ b/octavia/compute/drivers/noop_driver/driver.py @@ -0,0 +1,200 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from collections import namedtuple + +from oslo_log import log as logging +from oslo_utils import uuidutils +from sqlalchemy import Column +from sqlalchemy import create_engine +from sqlalchemy import MetaData +from sqlalchemy import String +from sqlalchemy import Table +from sqlalchemy import update + +from octavia.common import constants +from octavia.common import data_models +from octavia.compute import compute_base as driver_base +from octavia.network import data_models as network_models + +LOG = logging.getLogger(__name__) + + +NoopServerGroup = namedtuple('ServerGroup', ['id']) + + +class NoopManager: + def __init__(self): + super().__init__() + self.computeconfig = {} + + # Get a DB engine for the network no-op DB + # Required to update the ports when a port is attached to a compute + self.engine = create_engine('sqlite:////tmp/octavia-network-noop.db', + isolation_level='SERIALIZABLE') + metadata_obj = MetaData() + + self.interfaces_table = Table( + 'interfaces', + metadata_obj, + Column('port_id', String(36)), + Column('network_id', String(36)), + Column('compute_id', String(36)), + Column('vnic_type', String(6))) + + def build(self, name="amphora_name", amphora_flavor=None, + image_tag=None, image_owner=None, key_name=None, sec_groups=None, + network_ids=None, config_drive_files=None, user_data=None, + port_ids=None, server_group_id=None, availability_zone=None): + LOG.debug("Compute %s no-op, build name %s, amphora_flavor %s, " + "image_tag %s, image_owner %s, key_name %s, sec_groups %s, " + "network_ids %s, config_drive_files %s, user_data %s, " + "port_ids %s, server_group_id %s, availability_zone %s", + self.__class__.__name__, + name, amphora_flavor, image_tag, image_owner, + key_name, sec_groups, network_ids, config_drive_files, + user_data, port_ids, server_group_id, availability_zone) + self.computeconfig[(name, amphora_flavor, image_tag, + image_owner, key_name, user_data, + server_group_id)] = ( + name, amphora_flavor, + image_tag, image_owner, key_name, sec_groups, + network_ids, config_drive_files, user_data, port_ids, + server_group_id, 'build') + compute_id = uuidutils.generate_uuid() + return compute_id + + def delete(self, compute_id): + LOG.debug("Compute %s no-op, compute_id %s", + self.__class__.__name__, compute_id) + self.computeconfig[compute_id] = (compute_id, 'delete') + + def status(self, compute_id): + LOG.debug("Compute %s no-op, compute_id %s", + self.__class__.__name__, compute_id) + self.computeconfig[compute_id] = (compute_id, 'status') + return constants.UP + + def get_amphora(self, compute_id, management_network_id=None): + LOG.debug("Compute %s no-op, compute_id %s, management_network_id %s", + self.__class__.__name__, compute_id, management_network_id) + self.computeconfig[(compute_id, management_network_id)] = ( + compute_id, management_network_id, 'get_amphora') + return data_models.Amphora( + compute_id=compute_id, + status=constants.ACTIVE, + lb_network_ip='192.0.2.1' + ), None + + def create_server_group(self, name, policy): + LOG.debug("Create Server Group %s no-op, name %s, policy %s ", + self.__class__.__name__, name, policy) + self.computeconfig[(name, policy)] = (name, policy, 'create') + return NoopServerGroup(id=uuidutils.generate_uuid()) + + def delete_server_group(self, server_group_id): + LOG.debug("Delete Server Group %s no-op, id %s ", + self.__class__.__name__, server_group_id) + self.computeconfig[server_group_id] = (server_group_id, 'delete') + + def attach_network_or_port(self, compute_id, network_id=None, + ip_address=None, port_id=None): + LOG.debug("Compute %s no-op, attach_network_or_port compute_id %s," + "network_id %s, ip_address %s, port_id %s", + self.__class__.__name__, compute_id, + network_id, ip_address, port_id) + self.computeconfig[(compute_id, network_id, ip_address, port_id)] = ( + compute_id, network_id, ip_address, port_id, + 'attach_network_or_port') + + # Update the port in the network no-op DB + with self.engine.connect() as connection: + connection.execute(update(self.interfaces_table).where( + self.interfaces_table.c.port_id == port_id).values( + compute_id=compute_id)) + connection.commit() + + return network_models.Interface( + id=uuidutils.generate_uuid(), + compute_id=compute_id, + network_id=network_id, + fixed_ips=[], + port_id=uuidutils.generate_uuid(), + vnic_type=constants.VNIC_TYPE_NORMAL + ) + + def detach_port(self, compute_id, port_id): + LOG.debug("Compute %s no-op, detach_network compute_id %s, " + "port_id %s", + self.__class__.__name__, compute_id, port_id) + self.computeconfig[(compute_id, port_id)] = ( + compute_id, port_id, 'detach_port') + + def validate_flavor(self, flavor_id): + LOG.debug("Compute %s no-op, validate_flavor flavor_id %s", + self.__class__.__name__, flavor_id) + self.computeconfig[flavor_id] = (flavor_id, 'validate_flavor') + + def validate_availability_zone(self, availability_zone): + LOG.debug("Compute %s no-op, validate_availability_zone name %s", + self.__class__.__name__, availability_zone) + self.computeconfig[availability_zone] = ( + availability_zone, 'validate_availability_zone') + + +class NoopComputeDriver(driver_base.ComputeBase): + def __init__(self): + super().__init__() + self.driver = NoopManager() + + def build(self, name="amphora_name", amphora_flavor=None, + image_tag=None, image_owner=None, key_name=None, sec_groups=None, + network_ids=None, config_drive_files=None, user_data=None, + port_ids=None, server_group_id=None, availability_zone=None): + + compute_id = self.driver.build(name, amphora_flavor, + image_tag, image_owner, + key_name, sec_groups, network_ids, + config_drive_files, user_data, port_ids, + server_group_id, availability_zone) + return compute_id + + def delete(self, compute_id): + self.driver.delete(compute_id) + + def status(self, compute_id): + return self.driver.status(compute_id) + + def get_amphora(self, compute_id, management_network_id=None): + return self.driver.get_amphora(compute_id, management_network_id) + + def create_server_group(self, name, policy): + return self.driver.create_server_group(name, policy) + + def delete_server_group(self, server_group_id): + self.driver.delete_server_group(server_group_id) + + def attach_network_or_port(self, compute_id, network_id=None, + ip_address=None, port_id=None): + self.driver.attach_network_or_port(compute_id, network_id, ip_address, + port_id) + + def detach_port(self, compute_id, port_id): + self.driver.detach_port(compute_id, port_id) + + def validate_flavor(self, flavor_id): + self.driver.validate_flavor(flavor_id) + + def validate_availability_zone(self, availability_zone): + self.driver.validate_availability_zone(availability_zone) diff --git a/octavia/compute/drivers/nova_driver.py b/octavia/compute/drivers/nova_driver.py new file mode 100644 index 0000000000..8f901b59c4 --- /dev/null +++ b/octavia/compute/drivers/nova_driver.py @@ -0,0 +1,442 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import random +import string + +from novaclient import exceptions as nova_exceptions +from oslo_config import cfg +from oslo_log import log as logging +from stevedore import driver as stevedore_driver +import tenacity + +from octavia.common import clients +from octavia.common import constants +from octavia.common import data_models as models +from octavia.common import exceptions +from octavia.compute import compute_base + +LOG = logging.getLogger(__name__) + +CONF = cfg.CONF + + +def _raise_compute_exception(retry_state): + LOG.exception("Error retrieving nova virtual machine.") + raise exceptions.ComputeGetException() + + +class VirtualMachineManager(compute_base.ComputeBase): + '''Compute implementation of virtual machines via nova.''' + + def __init__(self): + super().__init__() + # Must initialize nova api + self._nova_client = clients.NovaAuth.get_nova_client( + service_name=CONF.nova.service_name, + endpoint=CONF.nova.endpoint, + region=CONF.nova.region_name, + endpoint_type=CONF.nova.endpoint_type, + insecure=CONF.nova.insecure, + cacert=CONF.nova.ca_certificates_file) + self.manager = self._nova_client.servers + self.server_groups = self._nova_client.server_groups + self.flavor_manager = self._nova_client.flavors + self.availability_zone_manager = self._nova_client.availability_zones + self.volume_driver = stevedore_driver.DriverManager( + namespace='octavia.volume.drivers', + name=CONF.controller_worker.volume_driver, + invoke_on_load=True + ).driver + self.image_driver = stevedore_driver.DriverManager( + namespace='octavia.image.drivers', + name=CONF.controller_worker.image_driver, + invoke_on_load=True + ).driver + + def build(self, name="amphora_name", amphora_flavor=None, + image_tag=None, image_owner=None, key_name=None, sec_groups=None, + network_ids=None, port_ids=None, config_drive_files=None, + user_data=None, server_group_id=None, availability_zone=None): + '''Create a new virtual machine. + + :param name: optional name for amphora + :param amphora_flavor: image flavor for virtual machine + :param image_tag: image tag for virtual machine + :param key_name: keypair to add to the virtual machine + :param sec_groups: Security group IDs for virtual machine + :param network_ids: Network IDs to include on virtual machine + :param port_ids: Port IDs to include on virtual machine + :param config_drive_files: An optional dict of files to overwrite on + the server upon boot. Keys are file names + (i.e. /etc/passwd) and values are the file + contents (either as a string or as a + file-like object). A maximum of five + entries is allowed, and each file must be + 10k or less. + :param user_data: Optional user data to pass to be exposed by the + metadata server this can be a file type object as + well or a string + :param server_group_id: Optional server group id(uuid) which is used + for anti_affinity feature + :param availability_zone: Availability zone data dict + :raises ComputeBuildException: if nova failed to build virtual machine + :returns: UUID of amphora + + ''' + + volume_id = None + try: + network_ids = network_ids or [] + port_ids = port_ids or [] + nics = [] + if network_ids: + nics.extend([{"net-id": net_id} for net_id in network_ids]) + if port_ids: + nics.extend([{"port-id": port_id} for port_id in port_ids]) + + server_group = None if server_group_id is None else { + "group": server_group_id} + + if availability_zone: + az_name = availability_zone.get(constants.COMPUTE_ZONE, + CONF.nova.availability_zone) + else: + az_name = CONF.nova.availability_zone + + image_id = self.image_driver.get_image_id_by_tag( + image_tag, image_owner) + + if CONF.nova.random_amphora_name_length: + r = random.SystemRandom() + name = "a{}".format("".join( + [r.choice(string.ascii_uppercase + string.digits) + for i in range(CONF.nova.random_amphora_name_length - 1)] + )) + block_device_mapping = {} + if (CONF.controller_worker.volume_driver != + constants.VOLUME_NOOP_DRIVER): + # creating volume + LOG.debug('Creating volume for amphora from image %s', + image_id) + volume_id = self.volume_driver.create_volume_from_image( + image_id, availability_zone) + LOG.debug('Created boot volume %s for amphora', volume_id) + # If use volume based, does not require image ID anymore + image_id = None + # Boot from volume with parameters: target device name = vda, + # device id = volume_id, device type and size unspecified, + # delete-on-terminate = true (volume will be deleted by Nova + # on instance termination) + block_device_mapping = {'vda': f'{volume_id}:::true'} + amphora = self.manager.create( + name=name, image=image_id, flavor=amphora_flavor, + block_device_mapping=block_device_mapping, + key_name=key_name, security_groups=sec_groups, + nics=nics, + files=config_drive_files, + userdata=user_data, + config_drive=True, + scheduler_hints=server_group, + availability_zone=az_name + ) + + return amphora.id + except Exception as e: + if (CONF.controller_worker.volume_driver != + constants.VOLUME_NOOP_DRIVER): + self.volume_driver.delete_volume(volume_id) + LOG.exception("Nova failed to build the instance due to: %s", + str(e)) + raise exceptions.ComputeBuildException(fault=e) + + def delete(self, compute_id): + '''Delete a virtual machine. + + :param compute_id: virtual machine UUID + ''' + try: + self.manager.delete(server=compute_id) + except nova_exceptions.NotFound: + LOG.warning("Nova instance with id: %s not found. " + "Assuming already deleted.", compute_id) + except Exception as e: + LOG.exception("Error deleting nova virtual machine.") + raise exceptions.ComputeDeleteException(compute_msg=str(e)) + + def status(self, compute_id): + '''Retrieve the status of a virtual machine. + + :param compute_id: virtual machine UUID + :returns: constant of amphora status + ''' + try: + amphora, fault = self.get_amphora(compute_id) + if amphora and amphora.status == 'ACTIVE': + return constants.UP + except Exception as e: + LOG.exception("Error retrieving nova virtual machine status.") + raise exceptions.ComputeStatusException() from e + return constants.DOWN + + @tenacity.retry(retry=tenacity.retry_if_exception_type(), + stop=tenacity.stop_after_attempt(CONF.compute.max_retries), + retry_error_callback=_raise_compute_exception, + wait=tenacity.wait_fixed(CONF.compute.retry_interval)) + def get_amphora(self, compute_id, management_network_id=None): + '''Retrieve the information in nova of a virtual machine. + + :param compute_id: virtual machine UUID + :param management_network_id: ID of the management network + :returns: an amphora object + :returns: fault message or None + ''' + # utilize nova client ServerManager 'get' method to retrieve info + amphora = self.manager.get(compute_id) + return self._translate_amphora(amphora, management_network_id) + + def _translate_amphora(self, nova_response, management_network_id=None): + '''Convert a nova virtual machine into an amphora object. + + :param nova_response: JSON response from nova + :param management_network_id: ID of the management network + :returns: an amphora object + :returns: fault message or None + ''' + # Extract interfaces of virtual machine to populate desired amphora + # fields + + lb_network_ip = None + availability_zone = None + image_id = None + + if management_network_id: + boot_networks = [management_network_id] + else: + boot_networks = CONF.controller_worker.amp_boot_network_list + + try: + inf_list = nova_response.interface_list() + for interface in inf_list: + net_id = interface.net_id + # Pick the first fixed_ip if this is a boot network or if + # there are no boot networks configured (use default network) + if not boot_networks or net_id in boot_networks: + lb_network_ip = interface.fixed_ips[0]['ip_address'] + break + try: + availability_zone = getattr( + nova_response, 'OS-EXT-AZ:availability_zone') + except AttributeError: + LOG.info('No availability zone listed for server %s', + nova_response.id) + except Exception: + LOG.debug('Extracting virtual interfaces through nova ' + 'os-interfaces extension failed.') + + fault = getattr(nova_response, 'fault', None) + if (CONF.controller_worker.volume_driver == + constants.VOLUME_NOOP_DRIVER): + image_id = nova_response.image.get("id") + else: + try: + volumes = self._nova_client.volumes.get_server_volumes( + nova_response.id) + except Exception: + LOG.debug('Extracting volumes through nova ' + 'os-volumes extension failed.') + volumes = [] + if not volumes: + LOG.warning('Boot volume not found for volume backed ' + 'amphora instance %s ', nova_response.id) + else: + if len(volumes) > 1: + LOG.warning('Found more than one (%s) volumes ' + 'for amphora instance %s', + len(volumes), nova_response.id) + volume_id = volumes[0].volumeId + image_id = self.volume_driver.get_image_from_volume(volume_id) + + response = models.Amphora( + compute_id=nova_response.id, + status=nova_response.status, + lb_network_ip=lb_network_ip, + cached_zone=availability_zone, + image_id=image_id, + compute_flavor=nova_response.flavor.get("id") + ) + return response, fault + + def create_server_group(self, name, policy): + """Create a server group object + + :param name: the name of the server group + :param policy: the policy of the server group + :raises: Generic exception if the server group is not created + :returns: the server group object + """ + kwargs = {'name': name, + 'policies': [policy]} + try: + server_group_obj = self.server_groups.create(**kwargs) + return server_group_obj + except Exception as e: + LOG.exception("Error create server group instance.") + raise exceptions.ServerGroupObjectCreateException() from e + + def delete_server_group(self, server_group_id): + """Delete a server group object + + :raises: Generic exception if the server group is not deleted + :param server_group_id: the uuid of a server group + """ + try: + self.server_groups.delete(server_group_id) + + except nova_exceptions.NotFound: + LOG.warning("Server group instance with id: %s not found. " + "Assuming already deleted.", server_group_id) + except Exception as e: + LOG.exception("Error delete server group instance.") + raise exceptions.ServerGroupObjectDeleteException() from e + + def attach_network_or_port(self, compute_id, network_id=None, + ip_address=None, port_id=None): + """Attaching a port or a network to an existing amphora + + :param compute_id: id of an amphora in the compute service + :param network_id: id of a network + :param ip_address: ip address to attempt to be assigned to interface + :param port_id: id of the neutron port + :return: nova interface instance + :raises ComputePortInUseException: The port is in use somewhere else + :raises ComputeUnknownException: Unknown nova error + """ + try: + interface = self.manager.interface_attach( + server=compute_id, net_id=network_id, fixed_ip=ip_address, + port_id=port_id) + except nova_exceptions.Conflict as e: + # The port is already in use. + if port_id: + # Check if the port we want is already attached + try: + interfaces = self.manager.interface_list(compute_id) + for interface in interfaces: + if interface.id == port_id: + return interface + except Exception as e: + raise exceptions.ComputeUnknownException(exc=str(e)) + + raise exceptions.ComputePortInUseException(port=port_id) + + # Nova should have created the port, so something is really + # wrong in nova if we get here. + raise exceptions.ComputeUnknownException(exc=str(e)) + except nova_exceptions.NotFound as e: + if 'Instance' in str(e): + raise exceptions.NotFound(resource='Instance', id=compute_id) + if 'Network' in str(e): + raise exceptions.NotFound(resource='Network', id=network_id) + if 'Port' in str(e): + raise exceptions.NotFound(resource='Port', id=port_id) + raise exceptions.NotFound(resource=str(e), id=compute_id) + except nova_exceptions.BadRequest as e: + if 'Failed to claim PCI device' in str(e): + message = ('Nova failed to claim PCI devices during ' + f'interface attach for port {port_id} on ' + f'instance {compute_id}') + LOG.error(message) + raise exceptions.ComputeNoResourcesException(message, + exc=str(e)) + raise + except nova_exceptions.ClientException as e: + if 'PortBindingFailed' in str(e): + message = ('Nova failed to bind the port during ' + f'interface attach for port {port_id} on ' + f'instance {compute_id}') + LOG.error(message) + raise exceptions.ComputeNoResourcesException(message, + exc=str(e)) + raise + except Exception as e: + LOG.error('Error attaching network %(network_id)s with ip ' + '%(ip_address)s and port %(port_id)s to amphora ' + '(compute_id: %(compute_id)s) ', + { + constants.COMPUTE_ID: compute_id, + constants.NETWORK_ID: network_id, + constants.IP_ADDRESS: ip_address, + constants.PORT_ID: port_id + }) + raise exceptions.ComputeUnknownException(exc=str(e)) + return interface + + def detach_port(self, compute_id, port_id): + """Detaches a port from an existing amphora. + + :param compute_id: id of an amphora in the compute service + :param port_id: id of the port + :return: None + """ + try: + self.manager.interface_detach(server=compute_id, + port_id=port_id) + except Exception: + LOG.error('Error detaching port %(port_id)s from amphora ' + 'with compute ID %(compute_id)s. ' + 'Skipping.', + { + 'port_id': port_id, + 'compute_id': compute_id + }) + + def validate_flavor(self, flavor_id): + """Validates that a flavor exists in nova. + + :param flavor_id: ID of the flavor to lookup in nova. + :raises: NotFound + :returns: None + """ + try: + self.flavor_manager.get(flavor_id) + except nova_exceptions.NotFound as e: + LOG.info('Flavor %s was not found in nova.', flavor_id) + raise exceptions.InvalidSubresource(resource='Nova flavor', + id=flavor_id) from e + except Exception as e: + LOG.exception('Nova reports a failure getting flavor details for ' + 'flavor ID %s: %s', flavor_id, str(e)) + raise + + def validate_availability_zone(self, availability_zone): + """Validates that an availability zone exists in nova. + + :param availability_zone: Name of the availability zone to lookup. + :raises: NotFound + :returns: None + """ + try: + compute_zones = [ + a.zoneName for a in self.availability_zone_manager.list( + detailed=False)] + if availability_zone not in compute_zones: + LOG.info('Availability zone %s was not found in nova. %s', + availability_zone, compute_zones) + raise exceptions.InvalidSubresource( + resource='Nova availability zone', id=availability_zone) + except Exception as e: + LOG.exception('Nova reports a failure getting listing ' + 'availability zones: %s', str(e)) + raise diff --git a/octavia/controller/__init__.py b/octavia/controller/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/controller/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/controller/healthmanager/__init__.py b/octavia/controller/healthmanager/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/controller/healthmanager/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/controller/healthmanager/health_manager.py b/octavia/controller/healthmanager/health_manager.py new file mode 100644 index 0000000000..1ba19c0253 --- /dev/null +++ b/octavia/controller/healthmanager/health_manager.py @@ -0,0 +1,164 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from concurrent import futures +import functools +import time + +from oslo_config import cfg +from oslo_db import exception as db_exc +from oslo_log import log as logging +from oslo_utils import excutils + +from octavia.common import constants +from octavia.controller.worker.v2 import controller_worker as cw2 +from octavia.db import api as db_api +from octavia.db import repositories as repo + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +def wait_done_or_dead(futs, dead, check_timeout=1): + while True: + _done, not_done = futures.wait(futs, timeout=check_timeout) + if not not_done: + break + if dead.is_set(): + for fut in not_done: + # This may not actually be able to cancel, but try to + # if we can. + fut.cancel() + + +def update_stats_on_done(stats, fut): + # This utilizes the fact that python, non-primitive types are + # passed by reference (not by value)... + stats['failover_attempted'] += 1 + try: + fut.result() + except futures.CancelledError: + stats['failover_cancelled'] += 1 + except Exception: + stats['failover_failed'] += 1 + + +class HealthManager: + def __init__(self, exit_event): + self.cw = cw2.ControllerWorker() + self.threads = CONF.health_manager.failover_threads + self.executor = futures.ThreadPoolExecutor(max_workers=self.threads) + self.amp_repo = repo.AmphoraRepository() + self.amp_health_repo = repo.AmphoraHealthRepository() + self.lb_repo = repo.LoadBalancerRepository() + self.dead = exit_event + + def _test_and_set_failover_prov_status(self, lock_session, lb_id): + if self.lb_repo.set_status_for_failover(lock_session, lb_id, + constants.PENDING_UPDATE): + return True + db_lb = self.lb_repo.get(lock_session, id=lb_id) + prov_status = db_lb.provisioning_status + LOG.warning("Load balancer %(id)s is in immutable state " + "%(state)s. Skipping failover.", + {"state": prov_status, "id": db_lb.id}) + return False + + def health_check(self): + """Check for stale amphorae and process them + + ... until either no more stale amphora were found or all executor + threads are busy. + """ + stats = { + 'failover_attempted': 0, + 'failover_failed': 0, + 'failover_cancelled': 0, + } + futs = [] + while not self.dead.is_set(): + amp_health = None + lock_session = None + try: + lock_session = db_api.get_session() + lock_session.begin() + amp_health = self.amp_health_repo.get_stale_amphora( + lock_session) + if amp_health: + amp = self.amp_repo.get(lock_session, + id=amp_health.amphora_id) + # If there is an associated LB, attempt to set it to + # PENDING_UPDATE. If it is already immutable, skip the + # amphora on this cycle + if amp and amp.load_balancer_id: + if not self._test_and_set_failover_prov_status( + lock_session, amp.load_balancer_id): + lock_session.rollback() + break + lock_session.commit() + except db_exc.DBDeadlock: + LOG.debug('Database reports deadlock. Skipping.') + lock_session.rollback() + amp_health = None + except db_exc.RetryRequest: + LOG.debug('Database is requesting a retry. Skipping.') + lock_session.rollback() + amp_health = None + except db_exc.DBConnectionError: + db_api.wait_for_connection(self.dead) + lock_session.rollback() + amp_health = None + if not self.dead.is_set(): + # amphora heartbeat timestamps should also be outdated + # while DB is unavailable and soon after DB comes back + # online. Sleeping off the full "heartbeat_timeout" + # interval to give the amps a chance to check in before + # we start failovers. + time.sleep(CONF.health_manager.heartbeat_timeout) + except Exception: + with excutils.save_and_reraise_exception(): + if lock_session: + lock_session.rollback() + + # No more stale amps found + if amp_health is None: + break + + LOG.info("Stale amphora's id is: %s", amp_health.amphora_id) + fut = self.executor.submit( + self.cw.failover_amphora, amp_health.amphora_id, reraise=True) + fut.add_done_callback( + functools.partial(update_stats_on_done, stats) + ) + futs.append(fut) + # All threads are/were busy + if len(futs) == self.threads: + break + if futs: + LOG.info("Waiting for %s failovers to finish", + len(futs)) + wait_done_or_dead(futs, self.dead) + if stats['failover_attempted'] > 0: + LOG.info("Attempted %s failovers of amphora", + stats['failover_attempted']) + LOG.info("Failed at %s failovers of amphora", + stats['failover_failed']) + LOG.info("Cancelled %s failovers of amphora", + stats['failover_cancelled']) + happy_failovers = stats['failover_attempted'] + happy_failovers -= stats['failover_cancelled'] + happy_failovers -= stats['failover_failed'] + LOG.info("Successfully completed %s failovers of amphora", + happy_failovers) diff --git a/octavia/controller/housekeeping/__init__.py b/octavia/controller/housekeeping/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/controller/housekeeping/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/controller/housekeeping/house_keeping.py b/octavia/controller/housekeeping/house_keeping.py new file mode 100644 index 0000000000..c7e9cb1582 --- /dev/null +++ b/octavia/controller/housekeeping/house_keeping.py @@ -0,0 +1,100 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from concurrent import futures +import datetime + +from oslo_config import cfg +from oslo_log import log as logging +from sqlalchemy.orm import exc as sqlalchemy_exceptions + +from octavia.controller.worker.v2 import controller_worker as cw2 +from octavia.db import api as db_api +from octavia.db import repositories as repo + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF + + +class DatabaseCleanup: + def __init__(self): + self.amp_repo = repo.AmphoraRepository() + self.amp_health_repo = repo.AmphoraHealthRepository() + self.lb_repo = repo.LoadBalancerRepository() + + def delete_old_amphorae(self): + """Checks the DB for old amphora and deletes them based on its age.""" + exp_age = datetime.timedelta( + seconds=CONF.house_keeping.amphora_expiry_age) + + session = db_api.get_session() + with session.begin(): + amp_ids = self.amp_repo.get_all_deleted_expiring(session, + exp_age=exp_age) + + for amp_id in amp_ids: + # If we're here, we already think the amp is expiring according + # to the amphora table. Now check it is expired in the health + # table. + # In this way, we ensure that amps aren't deleted unless they + # are both expired AND no longer receiving zombie heartbeats. + if self.amp_health_repo.check_amphora_health_expired( + session, amp_id, exp_age): + LOG.debug('Attempting to purge db record for Amphora ID: ' + '%s', amp_id) + self.amp_repo.delete(session, id=amp_id) + try: + self.amp_health_repo.delete(session, amphora_id=amp_id) + except sqlalchemy_exceptions.NoResultFound: + pass # Best effort delete, this record might not exist + LOG.info('Purged db record for Amphora ID: %s', amp_id) + + def cleanup_load_balancers(self): + """Checks the DB for old load balancers and triggers their removal.""" + exp_age = datetime.timedelta( + seconds=CONF.house_keeping.load_balancer_expiry_age) + + session = db_api.get_session() + with session.begin(): + lb_ids = self.lb_repo.get_all_deleted_expiring(session, + exp_age=exp_age) + + for lb_id in lb_ids: + LOG.info('Attempting to delete load balancer id : %s', lb_id) + self.lb_repo.delete(session, id=lb_id) + LOG.info('Deleted load balancer id : %s', lb_id) + + +class CertRotation: + def __init__(self): + self.threads = CONF.house_keeping.cert_rotate_threads + self.cw = cw2.ControllerWorker() + + def rotate(self): + """Check the amphora db table for expiring auth certs.""" + amp_repo = repo.AmphoraRepository() + + with futures.ThreadPoolExecutor(max_workers=self.threads) as executor: + rotation_count = 0 + while True: + session = db_api.get_session() + with session.begin(): + amp = amp_repo.get_cert_expiring_amphora(session) + if not amp: + break + rotation_count += 1 + LOG.debug("Cert expired amphora's id is: %s", amp.id) + executor.submit(self.cw.amphora_cert_rotation, amp.id) + if rotation_count > 0: + LOG.info("Rotated certificates for %s amphora", rotation_count) diff --git a/octavia/controller/queue/__init__.py b/octavia/controller/queue/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/controller/queue/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/controller/queue/v2/__init__.py b/octavia/controller/queue/v2/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/controller/queue/v2/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/controller/queue/v2/consumer.py b/octavia/controller/queue/v2/consumer.py new file mode 100644 index 0000000000..b0e385aaff --- /dev/null +++ b/octavia/controller/queue/v2/consumer.py @@ -0,0 +1,72 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import cotyledon +from oslo_config import cfg +from oslo_log import log as logging +import oslo_messaging as messaging +from oslo_messaging.rpc import dispatcher +from oslo_utils import uuidutils + +from octavia.common import constants +from octavia.common import rpc +from octavia.controller.queue.v2 import endpoints + +LOG = logging.getLogger(__name__) + +CONF = cfg.CONF + + +class ConsumerService(cotyledon.Service): + + def __init__(self, worker_id, conf): + super().__init__(worker_id) + self.conf = conf + self.topic = constants.TOPIC_AMPHORA_V2 + self.server = conf.host + self.endpoints = [] + self.access_policy = dispatcher.DefaultRPCAccessPolicy + self.message_listener = None + + def run(self): + LOG.info('Starting V2 consumer...') + target = messaging.Target(topic=self.topic, server=self.server, + fanout=False) + self.endpoints = [endpoints.Endpoints()] + self.message_listener = rpc.get_server( + target, self.endpoints, + access_policy=self.access_policy + ) + self.message_listener.start() + if CONF.task_flow.jobboard_enabled: + for e in self.endpoints: + e.worker.services_controller.run_conductor( + f'octavia-task-flow-conductor-{uuidutils.generate_uuid()}') + + def terminate(self): + if self.message_listener: + LOG.info('Stopping V2 consumer...') + self.message_listener.stop() + + LOG.info('V2 Consumer successfully stopped. Waiting for ' + 'final messages to be processed...') + self.message_listener.wait() + if self.endpoints: + LOG.info('Shutting down V2 endpoint worker executors...') + for e in self.endpoints: + try: + e.worker.executor.shutdown() + except AttributeError: + pass + super().terminate() diff --git a/octavia/controller/queue/v2/endpoints.py b/octavia/controller/queue/v2/endpoints.py new file mode 100644 index 0000000000..e6bb40347c --- /dev/null +++ b/octavia/controller/queue/v2/endpoints.py @@ -0,0 +1,174 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_log import log as logging +import oslo_messaging as messaging + +from octavia.common import constants +from octavia.controller.worker.v2 import controller_worker + +CONF = cfg.CONF + +LOG = logging.getLogger(__name__) + + +class Endpoints: + + # API version history: + # 1.0 - Initial version. + # 2.0 - Provider driver format. + target = messaging.Target( + namespace=constants.RPC_NAMESPACE_CONTROLLER_AGENT, + version='2.0') + + def __init__(self): + self.worker = controller_worker.ControllerWorker() + + def create_load_balancer(self, context, loadbalancer, + flavor=None, availability_zone=None): + LOG.info('Creating load balancer \'%s\'...', + loadbalancer[constants.LOADBALANCER_ID]) + self.worker.create_load_balancer(loadbalancer, flavor, + availability_zone) + + def update_load_balancer(self, context, original_load_balancer, + load_balancer_updates): + LOG.info('Updating load balancer \'%s\'...', + original_load_balancer.get(constants.LOADBALANCER_ID)) + self.worker.update_load_balancer(original_load_balancer, + load_balancer_updates) + + def delete_load_balancer(self, context, loadbalancer, cascade=False): + LOG.info('Deleting load balancer \'%s\'...', + loadbalancer.get(constants.LOADBALANCER_ID)) + self.worker.delete_load_balancer(loadbalancer, cascade) + + def failover_load_balancer(self, context, load_balancer_id): + LOG.info('Failing over amphora in load balancer \'%s\'...', + load_balancer_id) + self.worker.failover_loadbalancer(load_balancer_id) + + def failover_amphora(self, context, amphora_id): + LOG.info('Failing over amphora \'%s\'...', + amphora_id) + self.worker.failover_amphora(amphora_id) + + def create_listener(self, context, listener): + LOG.info('Creating listener \'%s\'...', + listener[constants.LISTENER_ID]) + self.worker.create_listener(listener) + + def update_listener(self, context, original_listener, listener_updates): + LOG.info('Updating listener \'%s\'...', + original_listener[constants.LISTENER_ID]) + self.worker.update_listener(original_listener, listener_updates) + + def delete_listener(self, context, listener): + LOG.info('Deleting listener \'%s\'...', + listener[constants.LISTENER_ID]) + self.worker.delete_listener(listener) + + def create_pool(self, context, pool): + LOG.info('Creating pool \'%s\'...', pool.get(constants.POOL_ID)) + self.worker.create_pool(pool) + + def update_pool(self, context, original_pool, pool_updates): + LOG.info('Updating pool \'%s\'...', + original_pool.get(constants.POOL_ID)) + self.worker.update_pool(original_pool, pool_updates) + + def delete_pool(self, context, pool): + LOG.info('Deleting pool \'%s\'...', pool.get(constants.POOL_ID)) + self.worker.delete_pool(pool) + + def create_health_monitor(self, context, health_monitor): + LOG.info('Creating health monitor \'%s\'...', health_monitor.get( + constants.HEALTHMONITOR_ID)) + self.worker.create_health_monitor(health_monitor) + + def update_health_monitor(self, context, original_health_monitor, + health_monitor_updates): + LOG.info('Updating health monitor \'%s\'...', + original_health_monitor.get(constants.HEALTHMONITOR_ID)) + self.worker.update_health_monitor(original_health_monitor, + health_monitor_updates) + + def delete_health_monitor(self, context, health_monitor): + LOG.info('Deleting health monitor \'%s\'...', health_monitor.get( + constants.HEALTHMONITOR_ID)) + self.worker.delete_health_monitor(health_monitor) + + def create_member(self, context, member): + LOG.info('Creating member \'%s\'...', member.get(constants.MEMBER_ID)) + self.worker.create_member(member) + + def update_member(self, context, original_member, member_updates): + LOG.info('Updating member \'%s\'...', original_member.get( + constants.MEMBER_ID)) + self.worker.update_member(original_member, member_updates) + + def batch_update_members(self, context, old_members, new_members, + updated_members): + updated_member_ids = [m.get(constants.ID) for m in updated_members] + new_member_ids = [m.get(constants.ID) for m in new_members] + old_member_ids = [m.get(constants.ID) for m in old_members] + LOG.info( + 'Batch updating members: old=\'%(old)s\', new=\'%(new)s\', ' + 'updated=\'%(updated)s\'...', + {'old': old_member_ids, 'new': new_member_ids, + 'updated': updated_member_ids}) + self.worker.batch_update_members( + old_members, new_members, updated_members) + + def delete_member(self, context, member): + LOG.info('Deleting member \'%s\'...', member.get(constants.MEMBER_ID)) + self.worker.delete_member(member) + + def create_l7policy(self, context, l7policy): + LOG.info('Creating l7policy \'%s\'...', + l7policy.get(constants.L7POLICY_ID)) + self.worker.create_l7policy(l7policy) + + def update_l7policy(self, context, original_l7policy, l7policy_updates): + LOG.info('Updating l7policy \'%s\'...', original_l7policy.get( + constants.L7POLICY_ID)) + self.worker.update_l7policy(original_l7policy, l7policy_updates) + + def delete_l7policy(self, context, l7policy): + LOG.info('Deleting l7policy \'%s\'...', l7policy.get( + constants.L7POLICY_ID)) + self.worker.delete_l7policy(l7policy) + + def create_l7rule(self, context, l7rule): + LOG.info('Creating l7rule \'%s\'...', l7rule.get(constants.L7RULE_ID)) + self.worker.create_l7rule(l7rule) + + def update_l7rule(self, context, original_l7rule, l7rule_updates): + LOG.info('Updating l7rule \'%s\'...', original_l7rule.get( + constants.L7RULE_ID)) + self.worker.update_l7rule(original_l7rule, l7rule_updates) + + def delete_l7rule(self, context, l7rule): + LOG.info('Deleting l7rule \'%s\'...', l7rule.get(constants.L7RULE_ID)) + self.worker.delete_l7rule(l7rule) + + def update_amphora_agent_config(self, context, amphora_id): + LOG.info('Updating amphora \'%s\' agent configuration...', + amphora_id) + self.worker.update_amphora_agent_config(amphora_id) + + def delete_amphora(self, context, amphora_id): + LOG.info('Deleting amphora \'%s\'...', amphora_id) + self.worker.delete_amphora(amphora_id) diff --git a/octavia/controller/worker/__init__.py b/octavia/controller/worker/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/controller/worker/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/controller/worker/amphora_rate_limit.py b/octavia/controller/worker/amphora_rate_limit.py new file mode 100644 index 0000000000..302e38798c --- /dev/null +++ b/octavia/controller/worker/amphora_rate_limit.py @@ -0,0 +1,99 @@ +# Copyright 2016 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import time + +from oslo_config import cfg +from oslo_log import log as logging + +from octavia.common import exceptions +from octavia.db import api as db_apis +from octavia.db import repositories as repo + +LOG = logging.getLogger(__name__) + +CONF = cfg.CONF +CONF.import_group('haproxy_amphora', 'octavia.common.config') + + +class AmphoraBuildRateLimit: + + def __init__(self): + self.amp_build_slots_repo = repo.AmphoraBuildSlotsRepository() + self.amp_build_req_repo = repo.AmphoraBuildReqRepository() + + def add_to_build_request_queue(self, amphora_id, build_priority): + with db_apis.session().begin() as session: + self.amp_build_req_repo.add_to_build_queue( + session, + amphora_id=amphora_id, + priority=build_priority) + LOG.debug("Added build request for amphora %s to the queue", + amphora_id) + self.wait_for_build_slot(amphora_id) + LOG.info("Build slot for amphora %s is ready", amphora_id) + + def has_build_slot(self): + build_rate_limit = CONF.haproxy_amphora.build_rate_limit + session = db_apis.get_session() + with session.begin(): + used_build_slots = (self.amp_build_slots_repo + .get_used_build_slots_count(session)) + available_build_slots = build_rate_limit - used_build_slots + LOG.debug("Available build slots %d", available_build_slots) + return available_build_slots > 0 + + def has_highest_priority(self, amphora_id): + session = db_apis.get_session() + with session.begin(): + highest_priority_build_req = ( + self.amp_build_req_repo.get_highest_priority_build_req( + session)) + LOG.debug("Highest priority req: %s, Current req: %s", + highest_priority_build_req, amphora_id) + return amphora_id == highest_priority_build_req + + def update_build_status_and_available_build_slots(self, amphora_id): + session = db_apis.get_session() + with session.begin(): + self.amp_build_slots_repo.update_count(session, action='http://github.com/increment') + self.amp_build_req_repo.update_req_status(session, amphora_id) + + def remove_from_build_req_queue(self, amphora_id): + session = db_apis.get_session() + with session.begin(): + self.amp_build_req_repo.delete(session, amphora_id=amphora_id) + self.amp_build_slots_repo.update_count(session, action='http://github.com/decrement') + LOG.debug("Removed request for %s from queue" + " and released the build slot", amphora_id) + + def remove_all_from_build_req_queue(self): + session = db_apis.get_session() + with session.begin(): + self.amp_build_req_repo.delete_all(session) + self.amp_build_slots_repo.update_count(session, action='http://github.com/reset') + LOG.debug("Removed all the build requests and " + "released the build slots") + + def wait_for_build_slot(self, amphora_id): + LOG.debug("Waiting for a build slot") + for i in range(CONF.haproxy_amphora.build_active_retries): + if (self.has_build_slot() and + self.has_highest_priority(amphora_id)): + self.update_build_status_and_available_build_slots(amphora_id) + return + time.sleep(CONF.haproxy_amphora.build_retry_interval) + self.remove_all_from_build_req_queue() + raise exceptions.ComputeBuildQueueTimeoutException() diff --git a/octavia/controller/worker/task_utils.py b/octavia/controller/worker/task_utils.py new file mode 100644 index 0000000000..9438f614f8 --- /dev/null +++ b/octavia/controller/worker/task_utils.py @@ -0,0 +1,305 @@ +# Copyright 2016 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" Methods common to the controller work tasks.""" + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import excutils +import tenacity + +from octavia.common import constants +from octavia.db import api as db_apis +from octavia.db import repositories as repo + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class TaskUtils: + """Class of helper/utility methods used by tasks.""" + + status_update_retry = tenacity.retry( + retry=tenacity.retry_if_exception_type(Exception), + wait=tenacity.wait_incrementing( + CONF.controller_worker.db_commit_retry_initial_delay, + CONF.controller_worker.db_commit_retry_backoff, + CONF.controller_worker.db_commit_retry_max), + stop=tenacity.stop_after_attempt( + CONF.controller_worker.db_commit_retry_attempts), + after=tenacity.after_log(LOG, logging.DEBUG)) + + def __init__(self, **kwargs): + self.amphora_repo = repo.AmphoraRepository() + self.health_mon_repo = repo.HealthMonitorRepository() + self.listener_repo = repo.ListenerRepository() + self.loadbalancer_repo = repo.LoadBalancerRepository() + self.member_repo = repo.MemberRepository() + self.pool_repo = repo.PoolRepository() + self.amp_health_repo = repo.AmphoraHealthRepository() + self.l7policy_repo = repo.L7PolicyRepository() + self.l7rule_repo = repo.L7RuleRepository() + super().__init__(**kwargs) + + def unmark_amphora_health_busy(self, amphora_id): + """Unmark the amphora_health record busy for an amphora. + + NOTE: This should only be called from revert methods. + + :param amphora_id: The amphora id to unmark busy + """ + LOG.debug('Unmarking health monitoring busy on amphora: %s', + amphora_id) + try: + with db_apis.session().begin() as session: + self.amp_health_repo.update(session, + amphora_id=amphora_id, + busy=False) + except Exception as e: + LOG.debug('Failed to update amphora health record %(amp)s ' + 'due to: %(except)s', + {'amp': amphora_id, 'except': str(e)}) + + def mark_amphora_status_error(self, amphora_id): + """Sets an amphora status to ERROR. + + NOTE: This should only be called from revert methods. + + :param amphora_id: Amphora ID to set the status to ERROR + """ + try: + with db_apis.session().begin() as session: + self.amphora_repo.update(session, + id=amphora_id, + status=constants.ERROR) + except Exception as e: + LOG.error("Failed to update amphora %(amp)s " + "status to ERROR due to: " + "%(except)s", {'amp': amphora_id, 'except': str(e)}) + + def mark_health_mon_prov_status_error(self, health_mon_id): + """Sets a health monitor provisioning status to ERROR. + + NOTE: This should only be called from revert methods. + + :param health_mon_id: Health Monitor ID to set prov status to ERROR + """ + try: + with db_apis.session().begin() as session: + self.health_mon_repo.update( + session, id=health_mon_id, + provisioning_status=constants.ERROR) + except Exception as e: + LOG.error("Failed to update health monitor %(health)s " + "provisioning status to ERROR due to: " + "%(except)s", + {'health': health_mon_id, 'except': str(e)}) + + def mark_l7policy_prov_status_active(self, l7policy_id): + """Sets a L7 policy provisioning status to ACTIVE. + + NOTE: This should only be called from revert methods. + + :param l7policy_id: L7 Policy ID to set provisioning status to ACTIVE + """ + try: + with db_apis.session().begin() as session: + self.l7policy_repo.update(session, + id=l7policy_id, + provisioning_status=constants.ACTIVE) + except Exception as e: + LOG.error("Failed to update l7policy %(l7p)s " + "provisioning status to ACTIVE due to: " + "%(except)s", {'l7p': l7policy_id, 'except': str(e)}) + + def mark_l7policy_prov_status_error(self, l7policy_id): + """Sets a L7 policy provisioning status to ERROR. + + NOTE: This should only be called from revert methods. + + :param l7policy_id: L7 Policy ID to set provisioning status to ERROR + """ + try: + with db_apis.session().begin() as session: + self.l7policy_repo.update(session, + id=l7policy_id, + provisioning_status=constants.ERROR) + except Exception as e: + LOG.error("Failed to update l7policy %(l7p)s " + "provisioning status to ERROR due to: " + "%(except)s", {'l7p': l7policy_id, 'except': str(e)}) + + def mark_l7rule_prov_status_error(self, l7rule_id): + """Sets a L7 rule provisioning status to ERROR. + + NOTE: This should only be called from revert methods. + + :param l7rule_id: L7 Rule ID to set provisioning status to ERROR + """ + try: + with db_apis.session().begin() as session: + self.l7rule_repo.update(session, + id=l7rule_id, + provisioning_status=constants.ERROR) + except Exception as e: + LOG.error("Failed to update l7rule %(l7r)s " + "provisioning status to ERROR due to: " + "%(except)s", {'l7r': l7rule_id, 'except': str(e)}) + + def mark_listener_prov_status_error(self, listener_id): + """Sets a listener provisioning status to ERROR. + + NOTE: This should only be called from revert methods. + + :param listener_id: Listener ID to set provisioning status to ERROR + """ + try: + with db_apis.session().begin() as session: + self.listener_repo.update(session, + id=listener_id, + provisioning_status=constants.ERROR) + except Exception as e: + LOG.error("Failed to update listener %(list)s " + "provisioning status to ERROR due to: " + "%(except)s", {'list': listener_id, 'except': str(e)}) + + @status_update_retry + def mark_loadbalancer_prov_status_error(self, loadbalancer_id): + """Sets a load balancer provisioning status to ERROR. + + NOTE: This should only be called from revert methods. + + :param loadbalancer_id: Load balancer ID to set provisioning + status to ERROR + """ + try: + with db_apis.session().begin() as session: + self.loadbalancer_repo.update( + session, + id=loadbalancer_id, + provisioning_status=constants.ERROR) + except Exception as e: + # Reraise for tenacity + with excutils.save_and_reraise_exception(): + LOG.error("Failed to update load balancer %(lb)s " + "provisioning status to ERROR due to: " + "%(except)s", {'lb': loadbalancer_id, + 'except': str(e)}) + + def mark_listener_prov_status_active(self, listener_id): + """Sets a listener provisioning status to ACTIVE. + + NOTE: This should only be called from revert methods. + + :param listener_id: Listener ID to set provisioning + status to ACTIVE + """ + try: + with db_apis.session().begin() as session: + self.listener_repo.update(session, + id=listener_id, + provisioning_status=constants.ACTIVE) + except Exception as e: + LOG.error("Failed to update listener %(list)s " + "provisioning status to ACTIVE due to: " + "%(except)s", {'list': listener_id, 'except': str(e)}) + + def mark_pool_prov_status_active(self, pool_id): + """Sets a pool provisioning status to ACTIVE. + + NOTE: This should only be called from revert methods. + + :param pool_id: Pool ID to set provisioning status to ACTIVE + """ + try: + with db_apis.session().begin() as session: + self.pool_repo.update(session, + id=pool_id, + provisioning_status=constants.ACTIVE) + except Exception as e: + LOG.error("Failed to update pool %(pool)s provisioning status " + "to ACTIVE due to: %(except)s", {'pool': pool_id, + 'except': str(e)}) + + @status_update_retry + def mark_loadbalancer_prov_status_active(self, loadbalancer_id): + """Sets a load balancer provisioning status to ACTIVE. + + NOTE: This should only be called from revert methods. + + :param loadbalancer_id: Load balancer ID to set provisioning + status to ACTIVE + """ + try: + with db_apis.session().begin() as session: + self.loadbalancer_repo.update( + session, + id=loadbalancer_id, + provisioning_status=constants.ACTIVE) + except Exception as e: + # Reraise for tenacity + with excutils.save_and_reraise_exception(): + LOG.error("Failed to update load balancer %(lb)s " + "provisioning status to ACTIVE due to: " + "%(except)s", {'lb': loadbalancer_id, + 'except': str(e)}) + + def mark_member_prov_status_error(self, member_id): + """Sets a member provisioning status to ERROR. + + NOTE: This should only be called from revert methods. + + :param member_id: Member ID to set provisioning status to ERROR + """ + try: + with db_apis.session().begin() as session: + self.member_repo.update(session, + id=member_id, + provisioning_status=constants.ERROR) + except Exception as e: + LOG.error("Failed to update member %(member)s " + "provisioning status to ERROR due to: " + "%(except)s", {'member': member_id, 'except': str(e)}) + + def mark_pool_prov_status_error(self, pool_id): + """Sets a pool provisioning status to ERROR. + + NOTE: This should only be called from revert methods. + + :param pool_id: Pool ID to set provisioning status to ERROR + """ + try: + with db_apis.session().begin() as session: + self.pool_repo.update(session, + id=pool_id, + provisioning_status=constants.ERROR) + except Exception as e: + LOG.error("Failed to update pool %(pool)s " + "provisioning status to ERROR due to: " + "%(except)s", {'pool': pool_id, 'except': str(e)}) + + def get_current_loadbalancer_from_db(self, loadbalancer_id): + """Gets a Loadbalancer from db. + + :param: loadbalancer_id: Load balancer ID which to get from db + """ + try: + with db_apis.session().begin() as session: + return self.loadbalancer_repo.get(session, + id=loadbalancer_id) + except Exception as e: + LOG.error("Failed to get loadbalancer %(loadbalancer)s " + "due to: %(except)s", + {'loadbalancer': loadbalancer_id, 'except': str(e)}) + return None diff --git a/octavia/controller/worker/v2/__init__.py b/octavia/controller/worker/v2/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/controller/worker/v2/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/controller/worker/v2/controller_worker.py b/octavia/controller/worker/v2/controller_worker.py new file mode 100644 index 0000000000..fa1feb2164 --- /dev/null +++ b/octavia/controller/worker/v2/controller_worker.py @@ -0,0 +1,1300 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from octavia_lib.common import constants as lib_consts +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import excutils +from sqlalchemy.orm import exc as db_exceptions +from stevedore import driver as stevedore_driver +from taskflow.listeners import logging as tf_logging +import tenacity + +from octavia.amphorae.driver_exceptions import exceptions as driver_exc +from octavia.api.drivers import utils as provider_utils +from octavia.common import base_taskflow +from octavia.common import constants +from octavia.common import exceptions +from octavia.common import utils +from octavia.controller.worker.v2.flows import flow_utils +from octavia.controller.worker.v2 import taskflow_jobboard_driver as tsk_driver +from octavia.db import api as db_apis +from octavia.db import repositories as repo + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +# We do not need to log retry exception information. Warning "Could not connect +# to instance" will be logged as usual. +def retryMaskFilter(record): + if record.exc_info is not None and isinstance( + record.exc_info[1], ( + driver_exc.AmpConnectionRetry, + exceptions.ComputeWaitTimeoutException)): + return False + return True + + +LOG.logger.addFilter(retryMaskFilter) + + +def _is_provisioning_status_pending_update(lb_obj): + return not lb_obj.provisioning_status == constants.PENDING_UPDATE + + +class ControllerWorker: + + def __init__(self): + + self._amphora_repo = repo.AmphoraRepository() + self._amphora_health_repo = repo.AmphoraHealthRepository() + self._health_mon_repo = repo.HealthMonitorRepository() + self._lb_repo = repo.LoadBalancerRepository() + self._listener_repo = repo.ListenerRepository() + self._member_repo = repo.MemberRepository() + self._pool_repo = repo.PoolRepository() + self._l7policy_repo = repo.L7PolicyRepository() + self._l7rule_repo = repo.L7RuleRepository() + self._flavor_repo = repo.FlavorRepository() + self._az_repo = repo.AvailabilityZoneRepository() + + if CONF.task_flow.jobboard_enabled: + persistence = tsk_driver.MysqlPersistenceDriver() + + self.jobboard_driver = stevedore_driver.DriverManager( + namespace='octavia.worker.jobboard_driver', + name=CONF.task_flow.jobboard_backend_driver, + invoke_args=(persistence,), + invoke_on_load=True).driver + else: + self.tf_engine = base_taskflow.BaseTaskFlowEngine() + + @tenacity.retry( + retry=( + tenacity.retry_if_result(_is_provisioning_status_pending_update) | + tenacity.retry_if_exception_type()), + wait=tenacity.wait_incrementing( + CONF.haproxy_amphora.api_db_commit_retry_initial_delay, + CONF.haproxy_amphora.api_db_commit_retry_backoff, + CONF.haproxy_amphora.api_db_commit_retry_max), + stop=tenacity.stop_after_attempt( + CONF.haproxy_amphora.api_db_commit_retry_attempts)) + def _get_db_obj_until_pending_update(self, repo, id): + + session = db_apis.get_session() + with session.begin(): + return repo.get(session, id=id) + + @property + def services_controller(self): + return base_taskflow.TaskFlowServiceController(self.jobboard_driver) + + def run_flow(self, func, *args, **kwargs): + if CONF.task_flow.jobboard_enabled: + self.services_controller.run_poster(func, *args, **kwargs) + else: + store = kwargs.pop('store', None) + tf = self.tf_engine.taskflow_load( + func(*args, **kwargs), store=store) + with tf_logging.DynamicLoggingListener(tf, log=LOG): + tf.run() + + def delete_amphora(self, amphora_id): + """Deletes an existing Amphora. + + :param amphora_id: ID of the amphora to delete + :returns: None + :raises AmphoraNotFound: The referenced Amphora was not found + """ + try: + session = db_apis.get_session() + with session.begin(): + amphora = self._amphora_repo.get(session, + id=amphora_id) + store = {constants.AMPHORA: amphora.to_dict()} + self.run_flow( + flow_utils.get_delete_amphora_flow, + store=store) + except Exception as e: + LOG.error('Failed to delete a amphora %s due to: %s', + amphora_id, str(e)) + return + LOG.info('Finished deleting amphora %s.', amphora_id) + + @tenacity.retry( + retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), + wait=tenacity.wait_incrementing( + CONF.haproxy_amphora.api_db_commit_retry_initial_delay, + CONF.haproxy_amphora.api_db_commit_retry_backoff, + CONF.haproxy_amphora.api_db_commit_retry_max), + stop=tenacity.stop_after_attempt( + CONF.haproxy_amphora.api_db_commit_retry_attempts)) + def create_health_monitor(self, health_monitor): + """Creates a health monitor. + + :param health_monitor: Provider health monitor dict + :returns: None + :raises NoResultFound: Unable to find the object + """ + session = db_apis.get_session() + with session.begin(): + db_health_monitor = self._health_mon_repo.get( + session, + id=health_monitor[constants.HEALTHMONITOR_ID]) + + if not db_health_monitor: + LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' + '60 seconds.', 'healthmonitor', + health_monitor[constants.HEALTHMONITOR_ID]) + raise db_exceptions.NoResultFound + + pool = db_health_monitor.pool + pool.health_monitor = db_health_monitor + load_balancer = pool.load_balancer + provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( + load_balancer).to_dict(recurse=True) + listeners_dicts = provider_lb.get('listeners', []) + + store = {constants.HEALTH_MON: health_monitor, + constants.POOL_ID: pool.id, + constants.LISTENERS: listeners_dicts, + constants.LOADBALANCER_ID: load_balancer.id, + constants.LOADBALANCER: provider_lb} + self.run_flow( + flow_utils.get_create_health_monitor_flow, + store=store) + + def delete_health_monitor(self, health_monitor): + """Deletes a health monitor. + + :param health_monitor: Provider health monitor dict + :returns: None + :raises HMNotFound: The referenced health monitor was not found + """ + session = db_apis.get_session() + with session.begin(): + db_health_monitor = self._health_mon_repo.get( + session, + id=health_monitor[constants.HEALTHMONITOR_ID]) + + pool = db_health_monitor.pool + load_balancer = pool.load_balancer + provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( + load_balancer).to_dict(recurse=True) + listeners_dicts = provider_lb.get('listeners', []) + + store = {constants.HEALTH_MON: health_monitor, + constants.POOL_ID: pool.id, + constants.LISTENERS: listeners_dicts, + constants.LOADBALANCER_ID: load_balancer.id, + constants.LOADBALANCER: provider_lb, + constants.PROJECT_ID: load_balancer.project_id} + self.run_flow( + flow_utils.get_delete_health_monitor_flow, + store=store) + + def update_health_monitor(self, original_health_monitor, + health_monitor_updates): + """Updates a health monitor. + + :param original_health_monitor: Provider health monitor dict + :param health_monitor_updates: Dict containing updated health monitor + :returns: None + :raises HMNotFound: The referenced health monitor was not found + """ + try: + db_health_monitor = self._get_db_obj_until_pending_update( + self._health_mon_repo, + original_health_monitor[constants.HEALTHMONITOR_ID]) + except tenacity.RetryError as e: + LOG.warning('Health monitor did not go into %s in 60 seconds. ' + 'This either due to an in-progress Octavia upgrade ' + 'or an overloaded and failing database. Assuming ' + 'an upgrade is in progress and continuing.', + constants.PENDING_UPDATE) + db_health_monitor = e.last_attempt.result() + + pool = db_health_monitor.pool + + load_balancer = pool.load_balancer + provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( + load_balancer).to_dict(recurse=True) + listeners_dicts = provider_lb.get('listeners', []) + + store = {constants.HEALTH_MON: original_health_monitor, + constants.POOL_ID: pool.id, + constants.LISTENERS: listeners_dicts, + constants.LOADBALANCER_ID: load_balancer.id, + constants.LOADBALANCER: provider_lb, + constants.UPDATE_DICT: health_monitor_updates} + self.run_flow( + flow_utils.get_update_health_monitor_flow, + store=store) + + @tenacity.retry( + retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), + wait=tenacity.wait_incrementing( + CONF.haproxy_amphora.api_db_commit_retry_initial_delay, + CONF.haproxy_amphora.api_db_commit_retry_backoff, + CONF.haproxy_amphora.api_db_commit_retry_max), + stop=tenacity.stop_after_attempt( + CONF.haproxy_amphora.api_db_commit_retry_attempts)) + def create_listener(self, listener): + """Creates a listener. + + :param listener: A listener provider dictionary. + :returns: None + :raises NoResultFound: Unable to find the object + """ + session = db_apis.get_session() + with session.begin(): + db_listener = self._listener_repo.get( + session, id=listener[constants.LISTENER_ID]) + if not db_listener: + LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' + '60 seconds.', 'listener', + listener[constants.LISTENER_ID]) + raise db_exceptions.NoResultFound + + load_balancer = db_listener.load_balancer + flavor_dict = {} + if load_balancer.flavor_id: + with session.begin(): + flavor_dict = ( + self._flavor_repo.get_flavor_metadata_dict( + session, load_balancer.flavor_id)) + flavor_dict[constants.LOADBALANCER_TOPOLOGY] = load_balancer.topology + + provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( + load_balancer).to_dict(recurse=True) + + store = {constants.LISTENERS: provider_lb['listeners'], + constants.LOADBALANCER: provider_lb, + constants.LOADBALANCER_ID: load_balancer.id} + + self.run_flow( + flow_utils.get_create_listener_flow, + flavor_dict=flavor_dict, store=store) + + def delete_listener(self, listener): + """Deletes a listener. + + :param listener: A listener provider dictionary to delete + :returns: None + :raises ListenerNotFound: The referenced listener was not found + """ + try: + db_lb = self._get_db_obj_until_pending_update( + self._lb_repo, listener[constants.LOADBALANCER_ID]) + except tenacity.RetryError as e: + LOG.warning('Loadbalancer did not go into %s in 60 seconds. ' + 'This either due to an in-progress Octavia upgrade ' + 'or an overloaded and failing database. Assuming ' + 'an upgrade is in progress and continuing.', + constants.PENDING_UPDATE) + db_lb = e.last_attempt.result() + + flavor_dict = {} + if db_lb.flavor_id: + session = db_apis.get_session() + with session.begin(): + flavor_dict = ( + self._flavor_repo.get_flavor_metadata_dict( + session, db_lb.flavor_id)) + flavor_dict[constants.LOADBALANCER_TOPOLOGY] = db_lb.topology + + store = {constants.LISTENER: listener, + constants.LOADBALANCER_ID: + listener[constants.LOADBALANCER_ID], + constants.PROJECT_ID: listener[constants.PROJECT_ID]} + self.run_flow( + flow_utils.get_delete_listener_flow, flavor_dict=flavor_dict, + store=store) + + def update_listener(self, listener, listener_updates): + """Updates a listener. + + :param listener: A listener provider dictionary to update + :param listener_updates: Dict containing updated listener attributes + :returns: None + :raises ListenerNotFound: The referenced listener was not found + """ + try: + db_lb = self._get_db_obj_until_pending_update( + self._lb_repo, listener[constants.LOADBALANCER_ID]) + except tenacity.RetryError as e: + LOG.warning('Loadbalancer did not go into %s in 60 seconds. ' + 'This either due to an in-progress Octavia upgrade ' + 'or an overloaded and failing database. Assuming ' + 'an upgrade is in progress and continuing.', + constants.PENDING_UPDATE) + db_lb = e.last_attempt.result() + + session = db_apis.get_session() + flavor_dict = {} + if db_lb.flavor_id: + with session.begin(): + flavor_dict = ( + self._flavor_repo.get_flavor_metadata_dict( + session, db_lb.flavor_id)) + flavor_dict[constants.LOADBALANCER_TOPOLOGY] = db_lb.topology + + store = {constants.LISTENER: listener, + constants.UPDATE_DICT: listener_updates, + constants.LOADBALANCER_ID: db_lb.id, + constants.LISTENERS: [listener]} + self.run_flow( + flow_utils.get_update_listener_flow, flavor_dict=flavor_dict, + store=store) + + @tenacity.retry( + retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), + wait=tenacity.wait_incrementing( + CONF.haproxy_amphora.api_db_commit_retry_initial_delay, + CONF.haproxy_amphora.api_db_commit_retry_backoff, + CONF.haproxy_amphora.api_db_commit_retry_max), + stop=tenacity.stop_after_attempt( + CONF.haproxy_amphora.api_db_commit_retry_attempts)) + def create_load_balancer(self, loadbalancer, flavor=None, + availability_zone=None): + """Creates a load balancer by allocating Amphorae. + + First tries to allocate an existing Amphora in READY state. + If none are available it will attempt to build one specifically + for this load balancer. + + :param loadbalancer: The dict of load balancer to create + :returns: None + :raises NoResultFound: Unable to find the object + """ + session = db_apis.get_session() + with session.begin(): + lb = self._lb_repo.get(session, + id=loadbalancer[constants.LOADBALANCER_ID]) + if not lb: + LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' + '60 seconds.', 'load_balancer', + loadbalancer[constants.LOADBALANCER_ID]) + raise db_exceptions.NoResultFound + + store = {lib_consts.LOADBALANCER_ID: + loadbalancer[lib_consts.LOADBALANCER_ID], + constants.BUILD_TYPE_PRIORITY: + constants.LB_CREATE_NORMAL_PRIORITY, + lib_consts.FLAVOR: flavor, + lib_consts.AVAILABILITY_ZONE: availability_zone} + + topology = lb.topology + if (not CONF.nova.enable_anti_affinity or + topology == constants.TOPOLOGY_SINGLE): + store[constants.SERVER_GROUP_ID] = None + + listeners_dicts = ( + provider_utils.db_listeners_to_provider_dicts_list_of_dicts( + lb.listeners) + ) + + store[constants.UPDATE_DICT] = { + constants.TOPOLOGY: topology + } + self.run_flow( + flow_utils.get_create_load_balancer_flow, + topology, listeners=listeners_dicts, flavor_dict=flavor, + store=store) + + def delete_load_balancer(self, load_balancer, cascade=False): + """Deletes a load balancer by de-allocating Amphorae. + + :param load_balancer: Dict of the load balancer to delete + :returns: None + :raises LBNotFound: The referenced load balancer was not found + """ + loadbalancer_id = load_balancer[constants.LOADBALANCER_ID] + session = db_apis.get_session() + with session.begin(): + db_lb = self._lb_repo.get(session, id=loadbalancer_id) + store = {constants.LOADBALANCER: load_balancer, + constants.LOADBALANCER_ID: loadbalancer_id, + constants.SERVER_GROUP_ID: db_lb.server_group_id, + constants.PROJECT_ID: db_lb.project_id} + if cascade: + listeners = flow_utils.get_listeners_on_lb(db_lb, True) + pools = flow_utils.get_pools_on_lb(db_lb, True) + + self.run_flow( + flow_utils.get_cascade_delete_load_balancer_flow, + load_balancer, listeners, pools, store=store) + else: + self.run_flow( + flow_utils.get_delete_load_balancer_flow, + load_balancer, store=store) + + def update_load_balancer(self, original_load_balancer, + load_balancer_updates): + """Updates a load balancer. + + :param original_load_balancer: Dict of the load balancer to update + :param load_balancer_updates: Dict containing updated load balancer + :returns: None + :raises LBNotFound: The referenced load balancer was not found + """ + + try: + self._get_db_obj_until_pending_update( + self._lb_repo, + original_load_balancer[constants.LOADBALANCER_ID]) + except tenacity.RetryError: + LOG.warning('Load balancer did not go into %s in 60 seconds. ' + 'This either due to an in-progress Octavia upgrade ' + 'or an overloaded and failing database. Assuming ' + 'an upgrade is in progress and continuing.', + constants.PENDING_UPDATE) + + store = {constants.LOADBALANCER: original_load_balancer, + constants.LOADBALANCER_ID: + original_load_balancer[constants.LOADBALANCER_ID], + constants.UPDATE_DICT: load_balancer_updates} + + self.run_flow( + flow_utils.get_update_load_balancer_flow, + store=store) + + @tenacity.retry( + retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), + wait=tenacity.wait_incrementing( + CONF.haproxy_amphora.api_db_commit_retry_initial_delay, + CONF.haproxy_amphora.api_db_commit_retry_backoff, + CONF.haproxy_amphora.api_db_commit_retry_max), + stop=tenacity.stop_after_attempt( + CONF.haproxy_amphora.api_db_commit_retry_attempts)) + def create_member(self, member): + """Creates a pool member. + + :param member: A member provider dictionary to create + :returns: None + :raises NoSuitablePool: Unable to find the node pool + """ + session = db_apis.get_session() + with session.begin(): + db_member = self._member_repo.get(session, + id=member[constants.MEMBER_ID]) + if not db_member: + LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' + '60 seconds.', 'l7member', + member[constants.MEMBER_ID]) + raise db_exceptions.NoResultFound + + pool = db_member.pool + load_balancer = pool.load_balancer + provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( + load_balancer).to_dict(recurse=True) + listeners_dicts = provider_lb.get('listeners', []) + + store = { + constants.MEMBER: member, + constants.LISTENERS: listeners_dicts, + constants.LOADBALANCER_ID: load_balancer.id, + constants.LOADBALANCER: provider_lb, + constants.POOL_ID: pool.id} + if load_balancer.availability_zone: + with session.begin(): + store[constants.AVAILABILITY_ZONE] = ( + self._az_repo.get_availability_zone_metadata_dict( + session, load_balancer.availability_zone)) + else: + store[constants.AVAILABILITY_ZONE] = {} + + self.run_flow( + flow_utils.get_create_member_flow, + store=store) + + def delete_member(self, member): + """Deletes a pool member. + + :param member: A member provider dictionary to delete + :returns: None + :raises MemberNotFound: The referenced member was not found + """ + session = db_apis.get_session() + with session.begin(): + pool = self._pool_repo.get(session, + id=member[constants.POOL_ID]) + + load_balancer = pool.load_balancer + provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( + load_balancer).to_dict(recurse=True) + listeners_dicts = provider_lb.get('listeners', []) + + store = { + constants.MEMBER: member, + constants.LISTENERS: listeners_dicts, + constants.LOADBALANCER_ID: load_balancer.id, + constants.LOADBALANCER: provider_lb, + constants.POOL_ID: pool.id, + constants.PROJECT_ID: load_balancer.project_id} + if load_balancer.availability_zone: + with session.begin(): + store[constants.AVAILABILITY_ZONE] = ( + self._az_repo.get_availability_zone_metadata_dict( + session, load_balancer.availability_zone)) + else: + store[constants.AVAILABILITY_ZONE] = {} + + self.run_flow( + flow_utils.get_delete_member_flow, + store=store) + + @tenacity.retry( + retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), + wait=tenacity.wait_incrementing( + CONF.haproxy_amphora.api_db_commit_retry_initial_delay, + CONF.haproxy_amphora.api_db_commit_retry_backoff, + CONF.haproxy_amphora.api_db_commit_retry_max), + stop=tenacity.stop_after_attempt( + CONF.haproxy_amphora.api_db_commit_retry_attempts)) + def batch_update_members(self, old_members, new_members, + updated_members): + session = db_apis.get_session() + with session.begin(): + db_new_members = [ + self._member_repo.get( + session, id=member[constants.MEMBER_ID]) + for member in new_members] + # The API may not have committed all of the new member records yet. + # Make sure we retry looking them up. + if None in db_new_members or len(db_new_members) != len(new_members): + LOG.warning('Failed to fetch one of the new members from DB. ' + 'Retrying for up to 60 seconds.') + raise db_exceptions.NoResultFound + + with session.begin(): + updated_members = [ + (provider_utils.db_member_to_provider_member( + self._member_repo.get(session, + id=m.get(constants.ID))).to_dict(), + m) + for m in updated_members] + provider_old_members = [ + provider_utils.db_member_to_provider_member( + self._member_repo.get(session, + id=m.get(constants.ID))).to_dict() + for m in old_members] + if old_members: + pool = self._pool_repo.get( + session, id=old_members[0][constants.POOL_ID]) + elif new_members: + pool = self._pool_repo.get( + session, id=new_members[0][constants.POOL_ID]) + else: + pool = self._pool_repo.get( + session, + id=updated_members[0][0][constants.POOL_ID]) + load_balancer = pool.load_balancer + + provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( + load_balancer).to_dict(recurse=True) + listeners_dicts = provider_lb.get('listeners', []) + + store = { + constants.LISTENERS: listeners_dicts, + constants.LOADBALANCER_ID: load_balancer.id, + constants.LOADBALANCER: provider_lb, + constants.POOL_ID: pool.id, + constants.PROJECT_ID: load_balancer.project_id} + if load_balancer.availability_zone: + with session.begin(): + store[constants.AVAILABILITY_ZONE] = ( + self._az_repo.get_availability_zone_metadata_dict( + session, load_balancer.availability_zone)) + else: + store[constants.AVAILABILITY_ZONE] = {} + + self.run_flow( + flow_utils.get_batch_update_members_flow, + provider_old_members, new_members, updated_members, + store=store) + + def update_member(self, member, member_updates): + """Updates a pool member. + + :param member_id: A member provider dictionary to update + :param member_updates: Dict containing updated member attributes + :returns: None + :raises MemberNotFound: The referenced member was not found + """ + + try: + db_member = self._get_db_obj_until_pending_update( + self._member_repo, member[constants.MEMBER_ID]) + except tenacity.RetryError as e: + LOG.warning('Member did not go into %s in 60 seconds. ' + 'This either due to an in-progress Octavia upgrade ' + 'or an overloaded and failing database. Assuming ' + 'an upgrade is in progress and continuing.', + constants.PENDING_UPDATE) + db_member = e.last_attempt.result() + + pool = db_member.pool + load_balancer = pool.load_balancer + provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( + load_balancer).to_dict(recurse=True) + listeners_dicts = provider_lb.get('listeners', []) + store = { + constants.MEMBER: member, + constants.LISTENERS: listeners_dicts, + constants.LOADBALANCER_ID: load_balancer.id, + constants.LOADBALANCER: provider_lb, + constants.POOL_ID: pool.id, + constants.UPDATE_DICT: member_updates} + if load_balancer.availability_zone: + session = db_apis.get_session() + with session.begin(): + store[constants.AVAILABILITY_ZONE] = ( + self._az_repo.get_availability_zone_metadata_dict( + session, load_balancer.availability_zone)) + else: + store[constants.AVAILABILITY_ZONE] = {} + + self.run_flow( + flow_utils.get_update_member_flow, + store=store) + + @tenacity.retry( + retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), + wait=tenacity.wait_incrementing( + CONF.haproxy_amphora.api_db_commit_retry_initial_delay, + CONF.haproxy_amphora.api_db_commit_retry_backoff, + CONF.haproxy_amphora.api_db_commit_retry_max), + stop=tenacity.stop_after_attempt( + CONF.haproxy_amphora.api_db_commit_retry_attempts)) + def create_pool(self, pool): + """Creates a node pool. + + :param pool: Provider pool dict to create + :returns: None + :raises NoResultFound: Unable to find the object + """ + + # TODO(ataraday) It seems we need to get db pool here anyway to get + # proper listeners + session = db_apis.get_session() + with session.begin(): + db_pool = self._pool_repo.get(session, + id=pool[constants.POOL_ID]) + if not db_pool: + LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' + '60 seconds.', 'pool', pool[constants.POOL_ID]) + raise db_exceptions.NoResultFound + + load_balancer = db_pool.load_balancer + provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( + load_balancer).to_dict(recurse=True) + listeners_dicts = provider_lb.get('listeners', []) + + store = {constants.POOL_ID: pool[constants.POOL_ID], + constants.LISTENERS: listeners_dicts, + constants.LOADBALANCER_ID: load_balancer.id, + constants.LOADBALANCER: provider_lb} + self.run_flow( + flow_utils.get_create_pool_flow, + store=store) + + def delete_pool(self, pool): + """Deletes a node pool. + + :param pool: Provider pool dict to delete + :returns: None + :raises PoolNotFound: The referenced pool was not found + """ + session = db_apis.get_session() + with session.begin(): + db_pool = self._pool_repo.get(session, + id=pool[constants.POOL_ID]) + + load_balancer = db_pool.load_balancer + provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( + load_balancer).to_dict(recurse=True) + listeners_dicts = provider_lb.get('listeners', []) + + store = {constants.POOL_ID: pool[constants.POOL_ID], + constants.LISTENERS: listeners_dicts, + constants.LOADBALANCER: provider_lb, + constants.LOADBALANCER_ID: load_balancer.id, + constants.PROJECT_ID: db_pool.project_id} + self.run_flow( + flow_utils.get_delete_pool_flow, + store=store) + + def update_pool(self, origin_pool, pool_updates): + """Updates a node pool. + + :param origin_pool: Provider pool dict to update + :param pool_updates: Dict containing updated pool attributes + :returns: None + :raises PoolNotFound: The referenced pool was not found + """ + try: + db_pool = self._get_db_obj_until_pending_update( + self._pool_repo, origin_pool[constants.POOL_ID]) + except tenacity.RetryError as e: + LOG.warning('Pool did not go into %s in 60 seconds. ' + 'This either due to an in-progress Octavia upgrade ' + 'or an overloaded and failing database. Assuming ' + 'an upgrade is in progress and continuing.', + constants.PENDING_UPDATE) + db_pool = e.last_attempt.result() + + load_balancer = db_pool.load_balancer + provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( + load_balancer).to_dict(recurse=True) + listeners_dicts = provider_lb.get('listeners', []) + + store = {constants.POOL_ID: db_pool.id, + constants.LISTENERS: listeners_dicts, + constants.LOADBALANCER: provider_lb, + constants.LOADBALANCER_ID: load_balancer.id, + constants.UPDATE_DICT: pool_updates} + self.run_flow( + flow_utils.get_update_pool_flow, + store=store) + + @tenacity.retry( + retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), + wait=tenacity.wait_incrementing( + CONF.haproxy_amphora.api_db_commit_retry_initial_delay, + CONF.haproxy_amphora.api_db_commit_retry_backoff, + CONF.haproxy_amphora.api_db_commit_retry_max), + stop=tenacity.stop_after_attempt( + CONF.haproxy_amphora.api_db_commit_retry_attempts)) + def create_l7policy(self, l7policy): + """Creates an L7 Policy. + + :param l7policy: Provider dict of the l7policy to create + :returns: None + :raises NoResultFound: Unable to find the object + """ + session = db_apis.get_session() + with session.begin(): + db_l7policy = self._l7policy_repo.get( + session, id=l7policy[constants.L7POLICY_ID]) + if not db_l7policy: + LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' + '60 seconds.', 'l7policy', + l7policy[constants.L7POLICY_ID]) + raise db_exceptions.NoResultFound + + db_listener = db_l7policy.listener + + listeners_dicts = ( + provider_utils.db_listeners_to_provider_dicts_list_of_dicts( + [db_listener])) + + store = {constants.L7POLICY: l7policy, + constants.LISTENERS: listeners_dicts, + constants.LOADBALANCER_ID: db_listener.load_balancer.id + } + self.run_flow( + flow_utils.get_create_l7policy_flow, + store=store) + + def delete_l7policy(self, l7policy): + """Deletes an L7 policy. + + :param l7policy: Provider dict of the l7policy to delete + :returns: None + :raises L7PolicyNotFound: The referenced l7policy was not found + """ + session = db_apis.get_session() + with session.begin(): + db_listener = self._listener_repo.get( + session, id=l7policy[constants.LISTENER_ID]) + listeners_dicts = ( + provider_utils.db_listeners_to_provider_dicts_list_of_dicts( + [db_listener])) + + store = {constants.L7POLICY: l7policy, + constants.LISTENERS: listeners_dicts, + constants.LOADBALANCER_ID: db_listener.load_balancer.id + } + self.run_flow( + flow_utils.get_delete_l7policy_flow, + store=store) + + def update_l7policy(self, original_l7policy, l7policy_updates): + """Updates an L7 policy. + + :param l7policy: Provider dict of the l7policy to update + :param l7policy_updates: Dict containing updated l7policy attributes + :returns: None + :raises L7PolicyNotFound: The referenced l7policy was not found + """ + try: + db_l7policy = self._get_db_obj_until_pending_update( + self._l7policy_repo, original_l7policy[constants.L7POLICY_ID]) + except tenacity.RetryError as e: + LOG.warning('L7 policy did not go into %s in 60 seconds. ' + 'This either due to an in-progress Octavia upgrade ' + 'or an overloaded and failing database. Assuming ' + 'an upgrade is in progress and continuing.', + constants.PENDING_UPDATE) + db_l7policy = e.last_attempt.result() + + db_listener = db_l7policy.listener + + listeners_dicts = ( + provider_utils.db_listeners_to_provider_dicts_list_of_dicts( + [db_listener])) + + store = {constants.L7POLICY: original_l7policy, + constants.LISTENERS: listeners_dicts, + constants.LOADBALANCER_ID: db_listener.load_balancer.id, + constants.UPDATE_DICT: l7policy_updates} + self.run_flow( + flow_utils.get_update_l7policy_flow, + store=store) + + @tenacity.retry( + retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), + wait=tenacity.wait_incrementing( + CONF.haproxy_amphora.api_db_commit_retry_initial_delay, + CONF.haproxy_amphora.api_db_commit_retry_backoff, + CONF.haproxy_amphora.api_db_commit_retry_max), + stop=tenacity.stop_after_attempt( + CONF.haproxy_amphora.api_db_commit_retry_attempts)) + def create_l7rule(self, l7rule): + """Creates an L7 Rule. + + :param l7rule: Provider dict l7rule + :returns: None + :raises NoResultFound: Unable to find the object + """ + session = db_apis.get_session() + with session.begin(): + db_l7rule = self._l7rule_repo.get(session, + id=l7rule[constants.L7RULE_ID]) + if not db_l7rule: + LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' + '60 seconds.', 'l7rule', + l7rule[constants.L7RULE_ID]) + raise db_exceptions.NoResultFound + + db_l7policy = db_l7rule.l7policy + + load_balancer = db_l7policy.listener.load_balancer + + listeners_dicts = ( + provider_utils.db_listeners_to_provider_dicts_list_of_dicts( + [db_l7policy.listener])) + l7policy_dict = provider_utils.db_l7policy_to_provider_l7policy( + db_l7policy) + + store = {constants.L7RULE: l7rule, + constants.L7POLICY: l7policy_dict.to_dict(), + constants.L7POLICY_ID: db_l7policy.id, + constants.LISTENERS: listeners_dicts, + constants.LOADBALANCER_ID: load_balancer.id + } + self.run_flow( + flow_utils.get_create_l7rule_flow, + store=store) + + def delete_l7rule(self, l7rule): + """Deletes an L7 rule. + + :param l7rule: Provider dict of the l7rule to delete + :returns: None + :raises L7RuleNotFound: The referenced l7rule was not found + """ + session = db_apis.get_session() + with session.begin(): + db_l7policy = self._l7policy_repo.get( + session, id=l7rule[constants.L7POLICY_ID]) + l7policy = provider_utils.db_l7policy_to_provider_l7policy(db_l7policy) + load_balancer = db_l7policy.listener.load_balancer + + listeners_dicts = ( + provider_utils.db_listeners_to_provider_dicts_list_of_dicts( + [db_l7policy.listener])) + + store = {constants.L7RULE: l7rule, + constants.L7POLICY: l7policy.to_dict(), + constants.LISTENERS: listeners_dicts, + constants.L7POLICY_ID: db_l7policy.id, + constants.LOADBALANCER_ID: load_balancer.id + } + self.run_flow( + flow_utils.get_delete_l7rule_flow, + store=store) + + def update_l7rule(self, original_l7rule, l7rule_updates): + """Updates an L7 rule. + + :param l7rule: Origin dict of the l7rule to update + :param l7rule_updates: Dict containing updated l7rule attributes + :returns: None + :raises L7RuleNotFound: The referenced l7rule was not found + """ + try: + db_l7rule = self._get_db_obj_until_pending_update( + self._l7rule_repo, original_l7rule[constants.L7RULE_ID]) + except tenacity.RetryError as e: + LOG.warning('L7 rule did not go into %s in 60 seconds. ' + 'This either due to an in-progress Octavia upgrade ' + 'or an overloaded and failing database. Assuming ' + 'an upgrade is in progress and continuing.', + constants.PENDING_UPDATE) + db_l7rule = e.last_attempt.result() + db_l7policy = db_l7rule.l7policy + load_balancer = db_l7policy.listener.load_balancer + + listeners_dicts = ( + provider_utils.db_listeners_to_provider_dicts_list_of_dicts( + [db_l7policy.listener])) + l7policy_dict = provider_utils.db_l7policy_to_provider_l7policy( + db_l7policy) + + store = {constants.L7RULE: original_l7rule, + constants.L7POLICY: l7policy_dict.to_dict(), + constants.LISTENERS: listeners_dicts, + constants.L7POLICY_ID: db_l7policy.id, + constants.LOADBALANCER_ID: load_balancer.id, + constants.UPDATE_DICT: l7rule_updates} + self.run_flow( + flow_utils.get_update_l7rule_flow, + store=store) + + def failover_amphora(self, amphora_id, reraise=False): + """Perform failover operations for an amphora. + + Note: This expects the load balancer to already be in + provisioning_status=PENDING_UPDATE state. + + :param amphora_id: ID for amphora to failover + :param reraise: If enabled reraise any caught exception + :returns: None + :raises octavia.common.exceptions.NotFound: The referenced amphora was + not found + """ + amphora = None + try: + session = db_apis.get_session() + with session.begin(): + amphora = self._amphora_repo.get(session, + id=amphora_id) + if amphora is None: + LOG.error('Amphora failover for amphora %s failed because ' + 'there is no record of this amphora in the ' + 'database. Check that the [house_keeping] ' + 'amphora_expiry_age configuration setting is not ' + 'too short. Skipping failover.', amphora_id) + raise exceptions.NotFound(resource=constants.AMPHORA, + id=amphora_id) + + if amphora.status == constants.DELETED: + LOG.warning('Amphora %s is marked DELETED in the database but ' + 'was submitted for failover. Deleting it from the ' + 'amphora health table to exclude it from health ' + 'checks and skipping the failover.', amphora.id) + with session.begin(): + self._amphora_health_repo.delete(session, + amphora_id=amphora.id) + return + + loadbalancer = None + if amphora.load_balancer_id: + with session.begin(): + loadbalancer = self._lb_repo.get( + session, id=amphora.load_balancer_id) + lb_amp_count = None + if loadbalancer: + if loadbalancer.topology == constants.TOPOLOGY_ACTIVE_STANDBY: + lb_amp_count = 2 + elif loadbalancer.topology == constants.TOPOLOGY_SINGLE: + lb_amp_count = 1 + + az_metadata = {} + flavor_dict = {} + lb_id = None + vip_dict = {} + additional_vip_dicts = [] + server_group_id = None + if loadbalancer: + lb_id = loadbalancer.id + # Even if the LB doesn't have a flavor, create one and + # pass through the topology. + flavor_dict = {} + if loadbalancer.flavor_id: + with session.begin(): + flavor_dict = ( + self._flavor_repo.get_flavor_metadata_dict( + session, loadbalancer.flavor_id)) + flavor_dict[constants.LOADBALANCER_TOPOLOGY] = ( + loadbalancer.topology) + if loadbalancer.availability_zone: + with session.begin(): + az_metadata = ( + self._az_repo.get_availability_zone_metadata_dict( + session, + loadbalancer.availability_zone)) + vip_dict = loadbalancer.vip.to_dict() + additional_vip_dicts = [ + av.to_dict() + for av in loadbalancer.additional_vips] + server_group_id = loadbalancer.server_group_id + provider_lb_dict = (provider_utils. + db_loadbalancer_to_provider_loadbalancer)( + loadbalancer).to_dict() if loadbalancer else loadbalancer + + stored_params = {constants.AVAILABILITY_ZONE: az_metadata, + constants.BUILD_TYPE_PRIORITY: + constants.LB_CREATE_FAILOVER_PRIORITY, + constants.FLAVOR: flavor_dict, + constants.LOADBALANCER: provider_lb_dict, + constants.SERVER_GROUP_ID: server_group_id, + constants.LOADBALANCER_ID: lb_id, + constants.VIP: vip_dict, + constants.ADDITIONAL_VIPS: additional_vip_dicts} + + self.run_flow( + flow_utils.get_failover_amphora_flow, + amphora.to_dict(), lb_amp_count, flavor_dict=flavor_dict, + store=stored_params) + + LOG.info("Successfully completed the failover for an amphora: %s", + {"id": amphora_id, + "load_balancer_id": lb_id, + "lb_network_ip": amphora.lb_network_ip, + "compute_id": amphora.compute_id, + "role": amphora.role}) + + except Exception as e: + with excutils.save_and_reraise_exception(reraise=reraise): + LOG.exception("Amphora %s failover exception: %s", + amphora_id, str(e)) + with session.begin(): + self._amphora_repo.update(session, + amphora_id, + status=constants.ERROR) + if amphora and amphora.load_balancer_id: + self._lb_repo.update( + session, amphora.load_balancer_id, + provisioning_status=constants.ERROR) + + @staticmethod + def _get_amphorae_for_failover(load_balancer): + """Returns an ordered list of amphora to failover. + + :param load_balancer: The load balancer being failed over. + :returns: An ordered list of amphora to failover, + first amp to failover is last in the list + :raises octavia.common.exceptions.InvalidTopology: LB has an unknown + topology. + """ + if load_balancer.topology == constants.TOPOLOGY_SINGLE: + # In SINGLE topology, amp failover order does not matter + return [a.to_dict() for a in load_balancer.amphorae + if a.status != constants.DELETED] + + if load_balancer.topology == constants.TOPOLOGY_ACTIVE_STANDBY: + # In Active/Standby we should preference the standby amp + # for failover first in case the Active is still able to pass + # traffic. + # Note: The active amp can switch at any time and in less than a + # second, so this is "best effort". + amphora_driver = utils.get_amphora_driver() + timeout_dict = { + constants.CONN_MAX_RETRIES: + CONF.haproxy_amphora.failover_connection_max_retries, + constants.CONN_RETRY_INTERVAL: + CONF.haproxy_amphora.failover_connection_retry_interval} + amps = [] + selected_amp = None + for amp in load_balancer.amphorae: + if amp.status == constants.DELETED: + continue + if selected_amp is None: + try: + if amphora_driver.get_interface_from_ip( + amp, load_balancer.vip.ip_address, + timeout_dict): + # This is a potential ACTIVE, add it to the list + amps.append(amp.to_dict()) + else: + # This one doesn't have the VIP IP, so start + # failovers here. + selected_amp = amp + LOG.debug("Selected amphora %s as the initial " + "failover amphora.", amp.id) + except Exception: + # This amphora is broken, so start failovers here. + selected_amp = amp + else: + # We have already found a STANDBY, so add the rest to the + # list without querying them. + amps.append(amp.to_dict()) + # Put the selected amphora at the end of the list so it is + # first to failover. + if selected_amp: + amps.append(selected_amp.to_dict()) + return amps + + LOG.error('Unknown load balancer topology found: %s, aborting ' + 'failover.', load_balancer.topology) + raise exceptions.InvalidTopology(topology=load_balancer.topology) + + def failover_loadbalancer(self, load_balancer_id): + """Perform failover operations for a load balancer. + + Note: This expects the load balancer to already be in + provisioning_status=PENDING_UPDATE state. + + :param load_balancer_id: ID for load balancer to failover + :returns: None + :raises octavia.common.exceptions.NotFound: The load balancer was not + found. + """ + try: + session = db_apis.get_session() + with session.begin(): + lb = self._lb_repo.get(session, + id=load_balancer_id) + if lb is None: + raise exceptions.NotFound(resource=constants.LOADBALANCER, + id=load_balancer_id) + + # Get the ordered list of amphorae to failover for this LB. + amps = self._get_amphorae_for_failover(lb) + + if lb.topology == constants.TOPOLOGY_SINGLE: + if len(amps) != 1: + LOG.warning('%d amphorae found on load balancer %s where ' + 'one should exist. Repairing.', len(amps), + load_balancer_id) + elif lb.topology == constants.TOPOLOGY_ACTIVE_STANDBY: + + if len(amps) != 2: + LOG.warning('%d amphorae found on load balancer %s where ' + 'two should exist. Repairing.', len(amps), + load_balancer_id) + else: + LOG.error('Unknown load balancer topology found: %s, aborting ' + 'failover!', lb.topology) + raise exceptions.InvalidTopology(topology=lb.topology) + + # We must provide a topology in the flavor definition + # here for the amphora to be created with the correct + # configuration. + flavor = {} + if lb.flavor_id: + with session.begin(): + flavor = self._flavor_repo.get_flavor_metadata_dict( + session, lb.flavor_id) + flavor[constants.LOADBALANCER_TOPOLOGY] = lb.topology + + if lb: + provider_lb_dict = ( + provider_utils.db_loadbalancer_to_provider_loadbalancer( + lb).to_dict()) + else: + provider_lb_dict = lb + + provider_lb_dict[constants.FLAVOR] = flavor + + stored_params = {constants.LOADBALANCER: provider_lb_dict, + constants.BUILD_TYPE_PRIORITY: + constants.LB_CREATE_FAILOVER_PRIORITY, + constants.SERVER_GROUP_ID: lb.server_group_id, + constants.LOADBALANCER_ID: lb.id, + constants.FLAVOR: flavor} + + if lb.availability_zone: + with session.begin(): + stored_params[constants.AVAILABILITY_ZONE] = ( + self._az_repo.get_availability_zone_metadata_dict( + session, lb.availability_zone)) + else: + stored_params[constants.AVAILABILITY_ZONE] = {} + + self.run_flow( + flow_utils.get_failover_LB_flow, amps, provider_lb_dict, + store=stored_params) + + LOG.info('Failover of load balancer %s completed successfully.', + lb.id) + + except Exception as e: + with excutils.save_and_reraise_exception(reraise=False): + LOG.exception("LB %(lbid)s failover exception: %(exc)s", + {'lbid': load_balancer_id, 'exc': str(e)}) + with session.begin(): + self._lb_repo.update( + session, load_balancer_id, + provisioning_status=constants.ERROR) + + def amphora_cert_rotation(self, amphora_id): + """Perform cert rotation for an amphora. + + :param amphora_id: ID for amphora to rotate + :returns: None + :raises AmphoraNotFound: The referenced amphora was not found + """ + + session = db_apis.get_session() + with session.begin(): + amp = self._amphora_repo.get(session, + id=amphora_id) + LOG.info("Start amphora cert rotation, amphora's id is: %s", + amphora_id) + + store = {constants.AMPHORA: amp.to_dict(), + constants.AMPHORA_ID: amphora_id} + + self.run_flow( + flow_utils.cert_rotate_amphora_flow, + store=store) + LOG.info("Finished amphora cert rotation, amphora's id was: %s", + amphora_id) + + def update_amphora_agent_config(self, amphora_id): + """Update the amphora agent configuration. + + Note: This will update the amphora agent configuration file and + update the running configuration for mutatable configuration + items. + + :param amphora_id: ID of the amphora to update. + :returns: None + """ + LOG.info("Start amphora agent configuration update, amphora's id " + "is: %s", amphora_id) + session = db_apis.get_session() + with session.begin(): + amp = self._amphora_repo.get(session, id=amphora_id) + lb = self._amphora_repo.get_lb_for_amphora(session, + amphora_id) + flavor = {} + if lb.flavor_id: + flavor = self._flavor_repo.get_flavor_metadata_dict( + session, lb.flavor_id) + + store = {constants.AMPHORA: amp.to_dict(), + constants.FLAVOR: flavor} + + self.run_flow( + flow_utils.update_amphora_config_flow, + store=store) + LOG.info("Finished amphora agent configuration update, amphora's id " + "was: %s", amphora_id) diff --git a/octavia/controller/worker/v2/flows/__init__.py b/octavia/controller/worker/v2/flows/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/controller/worker/v2/flows/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/controller/worker/v2/flows/amphora_flows.py b/octavia/controller/worker/v2/flows/amphora_flows.py new file mode 100644 index 0000000000..d412fe896d --- /dev/null +++ b/octavia/controller/worker/v2/flows/amphora_flows.py @@ -0,0 +1,654 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from oslo_config import cfg +from oslo_log import log as logging +from taskflow.patterns import linear_flow +from taskflow.patterns import unordered_flow + +from octavia.common import constants +from octavia.common import utils +from octavia.controller.worker.v2.tasks import amphora_driver_tasks +from octavia.controller.worker.v2.tasks import cert_task +from octavia.controller.worker.v2.tasks import compute_tasks +from octavia.controller.worker.v2.tasks import database_tasks +from octavia.controller.worker.v2.tasks import lifecycle_tasks +from octavia.controller.worker.v2.tasks import network_tasks +from octavia.controller.worker.v2.tasks import retry_tasks +from octavia.controller.worker.v2.tasks import shim_tasks + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class AmphoraFlows: + + def get_amphora_for_lb_subflow(self, prefix, role): + """Create a new amphora for lb.""" + + sf_name = prefix + '-' + constants.CREATE_AMP_FOR_LB_SUBFLOW + create_amp_for_lb_subflow = linear_flow.Flow(sf_name) + create_amp_for_lb_subflow.add(database_tasks.CreateAmphoraInDB( + name=sf_name + '-' + constants.CREATE_AMPHORA_INDB, + requires=constants.LOADBALANCER_ID, + provides=constants.AMPHORA_ID)) + + create_amp_for_lb_subflow.add(cert_task.GenerateServerPEMTask( + name=sf_name + '-' + constants.GENERATE_SERVER_PEM, + provides=constants.SERVER_PEM)) + + create_amp_for_lb_subflow.add( + database_tasks.UpdateAmphoraDBCertExpiration( + name=sf_name + '-' + constants.UPDATE_CERT_EXPIRATION, + requires=(constants.AMPHORA_ID, constants.SERVER_PEM))) + + create_amp_for_lb_subflow.add(compute_tasks.CertComputeCreate( + name=sf_name + '-' + constants.CERT_COMPUTE_CREATE, + requires=(constants.AMPHORA_ID, constants.SERVER_PEM, + constants.BUILD_TYPE_PRIORITY, + constants.SERVER_GROUP_ID, + constants.FLAVOR, constants.AVAILABILITY_ZONE), + provides=constants.COMPUTE_ID)) + create_amp_for_lb_subflow.add(database_tasks.UpdateAmphoraComputeId( + name=sf_name + '-' + constants.UPDATE_AMPHORA_COMPUTEID, + requires=(constants.AMPHORA_ID, constants.COMPUTE_ID))) + create_amp_for_lb_subflow.add(database_tasks.MarkAmphoraBootingInDB( + name=sf_name + '-' + constants.MARK_AMPHORA_BOOTING_INDB, + requires=(constants.AMPHORA_ID, constants.COMPUTE_ID))) + create_amp_for_lb_subflow.add(self._retry_compute_wait_flow(sf_name)) + create_amp_for_lb_subflow.add(database_tasks.UpdateAmphoraInfo( + name=sf_name + '-' + constants.UPDATE_AMPHORA_INFO, + requires=(constants.AMPHORA_ID, constants.COMPUTE_OBJ), + provides=constants.AMPHORA)) + create_amp_for_lb_subflow.add(self._retry_flow(sf_name)) + create_amp_for_lb_subflow.add(amphora_driver_tasks.AmphoraFinalize( + name=sf_name + '-' + constants.AMPHORA_FINALIZE, + requires=constants.AMPHORA)) + create_amp_for_lb_subflow.add( + database_tasks.MarkAmphoraAllocatedInDB( + name=sf_name + '-' + constants.MARK_AMPHORA_ALLOCATED_INDB, + requires=(constants.AMPHORA, constants.LOADBALANCER_ID))) + if role == constants.ROLE_MASTER: + create_amp_for_lb_subflow.add(database_tasks.MarkAmphoraMasterInDB( + name=sf_name + '-' + constants.MARK_AMP_MASTER_INDB, + requires=constants.AMPHORA)) + elif role == constants.ROLE_BACKUP: + create_amp_for_lb_subflow.add(database_tasks.MarkAmphoraBackupInDB( + name=sf_name + '-' + constants.MARK_AMP_BACKUP_INDB, + requires=constants.AMPHORA)) + elif role == constants.ROLE_STANDALONE: + create_amp_for_lb_subflow.add( + database_tasks.MarkAmphoraStandAloneInDB( + name=sf_name + '-' + constants.MARK_AMP_STANDALONE_INDB, + requires=constants.AMPHORA)) + + return create_amp_for_lb_subflow + + def _retry_compute_wait_flow(self, sf_name): + retry_task = sf_name + '-' + constants.COMPUTE_WAIT + retry_subflow = linear_flow.Flow( + sf_name + '-' + constants.COMPUTE_CREATE_RETRY_SUBFLOW, + retry=compute_tasks.ComputeRetry()) + retry_subflow.add( + compute_tasks.ComputeWait( + name=retry_task, + requires=(constants.COMPUTE_ID, constants.AMPHORA_ID), + provides=constants.COMPUTE_OBJ)) + return retry_subflow + + def _retry_flow(self, sf_name): + retry_task = sf_name + '-' + constants.AMP_COMPUTE_CONNECTIVITY_WAIT + retry_subflow = linear_flow.Flow( + sf_name + '-' + constants.CREATE_AMPHORA_RETRY_SUBFLOW, + retry=amphora_driver_tasks.AmpRetry()) + retry_subflow.add( + amphora_driver_tasks.AmphoraComputeConnectivityWait( + name=retry_task, requires=constants.AMPHORA, + inject={'raise_retry_exception': True})) + return retry_subflow + + def get_delete_amphora_flow( + self, amphora, + retry_attempts=CONF.controller_worker.amphora_delete_retries, + retry_interval=( + CONF.controller_worker.amphora_delete_retry_interval)): + """Creates a subflow to delete an amphora and it's port. + + This flow is idempotent and safe to retry. + + :param amphora: An amphora dict object. + :param retry_attempts: The number of times the flow is retried. + :param retry_interval: The time to wait, in seconds, between retries. + :returns: The subflow for deleting the amphora. + :raises AmphoraNotFound: The referenced Amphora was not found. + """ + amphora_id = amphora[constants.ID] + delete_amphora_flow = linear_flow.Flow( + name=constants.DELETE_AMPHORA_FLOW + '-' + amphora_id, + retry=retry_tasks.SleepingRetryTimesController( + name='retry-' + constants.DELETE_AMPHORA_FLOW + '-' + + amphora_id, + attempts=retry_attempts, interval=retry_interval)) + delete_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask( + name=constants.AMPHORA_TO_ERROR_ON_REVERT + '-' + amphora_id, + inject={constants.AMPHORA: amphora})) + delete_amphora_flow.add( + database_tasks.MarkAmphoraPendingDeleteInDB( + name=constants.MARK_AMPHORA_PENDING_DELETE + '-' + amphora_id, + inject={constants.AMPHORA: amphora})) + delete_amphora_flow.add(database_tasks.MarkAmphoraHealthBusy( + name=constants.MARK_AMPHORA_HEALTH_BUSY + '-' + amphora_id, + inject={constants.AMPHORA: amphora})) + delete_amphora_flow.add(compute_tasks.ComputeDelete( + name=constants.DELETE_AMPHORA + '-' + amphora_id, + inject={constants.AMPHORA: amphora, + constants.PASSIVE_FAILURE: True})) + delete_amphora_flow.add(network_tasks.DeleteAmphoraMemberPorts( + name=constants.DELETE_AMPHORA_MEMBER_PORTS + '-' + amphora_id, + inject={constants.AMPHORA_ID: amphora[constants.ID]})) + delete_amphora_flow.add(database_tasks.DisableAmphoraHealthMonitoring( + name=constants.DISABLE_AMP_HEALTH_MONITORING + '-' + amphora_id, + inject={constants.AMPHORA: amphora})) + delete_amphora_flow.add(database_tasks.MarkAmphoraDeletedInDB( + name=constants.MARK_AMPHORA_DELETED + '-' + amphora_id, + inject={constants.AMPHORA: amphora})) + if amphora.get(constants.VRRP_PORT_ID): + delete_amphora_flow.add(network_tasks.DeletePort( + name=(constants.DELETE_PORT + '-' + str(amphora_id) + '-' + + str(amphora[constants.VRRP_PORT_ID])), + inject={constants.PORT_ID: amphora[constants.VRRP_PORT_ID], + constants.PASSIVE_FAILURE: True})) + return delete_amphora_flow + + def get_vrrp_subflow(self, prefix, timeout_dict=None, + create_vrrp_group=True, + get_amphorae_status=True, flavor_dict=None): + sf_name = prefix + '-' + constants.GET_VRRP_SUBFLOW + vrrp_subflow = linear_flow.Flow(sf_name) + + # Optimization for failover flow. No reason to call this + # when configuring the secondary amphora. + if create_vrrp_group: + vrrp_subflow.add(database_tasks.CreateVRRPGroupForLB( + name=sf_name + '-' + constants.CREATE_VRRP_GROUP_FOR_LB, + requires=constants.LOADBALANCER_ID)) + + vrrp_subflow.add(network_tasks.GetAmphoraeNetworkConfigs( + name=sf_name + '-' + constants.GET_AMP_NETWORK_CONFIG, + requires=constants.LOADBALANCER_ID, + provides=constants.AMPHORAE_NETWORK_CONFIG)) + + if get_amphorae_status: + # Get the amphorae_status dict in case the caller hasn't fetched + # it yet. + vrrp_subflow.add( + amphora_driver_tasks.AmphoraeGetConnectivityStatus( + name=constants.AMPHORAE_GET_CONNECTIVITY_STATUS, + requires=constants.AMPHORAE, + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, + inject={constants.TIMEOUT_DICT: timeout_dict}, + provides=constants.AMPHORAE_STATUS)) + + # VRRP update needs to be run on all amphora to update + # their peer configurations. So parallelize this with an + # unordered subflow. + update_amps_subflow = unordered_flow.Flow('VRRP-update-subflow') + + # We have tasks to run in order, per amphora + amp_0_subflow = linear_flow.Flow('VRRP-amp-0-update-subflow') + + amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface( + name=sf_name + '-0-' + constants.AMP_UPDATE_VRRP_INTF, + requires=(constants.AMPHORAE, constants.AMPHORAE_STATUS), + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, + inject={constants.AMPHORA_INDEX: 0, + constants.TIMEOUT_DICT: timeout_dict}, + provides=constants.AMP_VRRP_INT)) + + amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPUpdate( + name=sf_name + '-0-' + constants.AMP_VRRP_UPDATE, + requires=(constants.LOADBALANCER_ID, + constants.AMPHORAE_NETWORK_CONFIG, constants.AMPHORAE, + constants.AMPHORAE_STATUS, constants.AMP_VRRP_INT), + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, + inject={constants.AMPHORA_INDEX: 0, + constants.TIMEOUT_DICT: timeout_dict})) + + if flavor_dict and flavor_dict.get(constants.SRIOV_VIP, False): + amp_0_subflow.add(database_tasks.GetAmphoraFirewallRules( + name=sf_name + '-0-' + constants.GET_AMPHORA_FIREWALL_RULES, + requires=(constants.AMPHORAE, + constants.AMPHORAE_NETWORK_CONFIG), + provides=constants.AMPHORA_FIREWALL_RULES, + inject={constants.AMPHORA_INDEX: 0})) + + amp_0_subflow.add(amphora_driver_tasks.SetAmphoraFirewallRules( + name=sf_name + '-0-' + constants.SET_AMPHORA_FIREWALL_RULES, + requires=(constants.AMPHORAE, constants.AMPHORAE_STATUS, + constants.AMPHORA_FIREWALL_RULES), + inject={constants.AMPHORA_INDEX: 0, + constants.TIMEOUT_DICT: timeout_dict})) + + amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPStart( + name=sf_name + '-0-' + constants.AMP_VRRP_START, + requires=(constants.AMPHORAE, constants.AMPHORAE_STATUS), + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, + inject={constants.AMPHORA_INDEX: 0, + constants.TIMEOUT_DICT: timeout_dict})) + + amp_1_subflow = linear_flow.Flow('VRRP-amp-1-update-subflow') + + amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface( + name=sf_name + '-1-' + constants.AMP_UPDATE_VRRP_INTF, + requires=(constants.AMPHORAE, constants.AMPHORAE_STATUS), + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, + inject={constants.AMPHORA_INDEX: 1, + constants.TIMEOUT_DICT: timeout_dict}, + provides=constants.AMP_VRRP_INT)) + + amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPUpdate( + name=sf_name + '-1-' + constants.AMP_VRRP_UPDATE, + requires=(constants.LOADBALANCER_ID, + constants.AMPHORAE_NETWORK_CONFIG, constants.AMPHORAE, + constants.AMPHORAE_STATUS, constants.AMP_VRRP_INT), + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, + inject={constants.AMPHORA_INDEX: 1, + constants.TIMEOUT_DICT: timeout_dict})) + + if flavor_dict and flavor_dict.get(constants.SRIOV_VIP, False): + amp_1_subflow.add(database_tasks.GetAmphoraFirewallRules( + name=sf_name + '-1-' + constants.GET_AMPHORA_FIREWALL_RULES, + requires=(constants.AMPHORAE, + constants.AMPHORAE_NETWORK_CONFIG), + provides=constants.AMPHORA_FIREWALL_RULES, + inject={constants.AMPHORA_INDEX: 1})) + + amp_1_subflow.add(amphora_driver_tasks.SetAmphoraFirewallRules( + name=sf_name + '-1-' + constants.SET_AMPHORA_FIREWALL_RULES, + requires=(constants.AMPHORAE, constants.AMPHORAE_STATUS, + constants.AMPHORA_FIREWALL_RULES), + inject={constants.AMPHORA_INDEX: 1, + constants.TIMEOUT_DICT: timeout_dict})) + + amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPStart( + name=sf_name + '-1-' + constants.AMP_VRRP_START, + requires=(constants.AMPHORAE, constants.AMPHORAE_STATUS), + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, + inject={constants.AMPHORA_INDEX: 1, + constants.TIMEOUT_DICT: timeout_dict})) + + update_amps_subflow.add(amp_0_subflow) + update_amps_subflow.add(amp_1_subflow) + + vrrp_subflow.add(update_amps_subflow) + + return vrrp_subflow + + def cert_rotate_amphora_flow(self): + """Implement rotation for amphora's cert. + + 1. Create a new certificate + 2. Upload the cert to amphora + 3. update the newly created certificate info to amphora + 4. update the cert_busy flag to be false after rotation + + :returns: The flow for updating an amphora + """ + rotated_amphora_flow = linear_flow.Flow( + constants.CERT_ROTATE_AMPHORA_FLOW) + + rotated_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask( + requires=constants.AMPHORA)) + + # create a new certificate, the returned value is the newly created + # certificate + rotated_amphora_flow.add(cert_task.GenerateServerPEMTask( + provides=constants.SERVER_PEM)) + + # update it in amphora task + rotated_amphora_flow.add(amphora_driver_tasks.AmphoraCertUpload( + requires=(constants.AMPHORA, constants.SERVER_PEM))) + + # update the newly created certificate info to amphora + rotated_amphora_flow.add(database_tasks.UpdateAmphoraDBCertExpiration( + requires=(constants.AMPHORA_ID, constants.SERVER_PEM))) + + # update the cert_busy flag to be false after rotation + rotated_amphora_flow.add(database_tasks.UpdateAmphoraCertBusyToFalse( + requires=constants.AMPHORA_ID)) + + return rotated_amphora_flow + + def update_amphora_config_flow(self): + """Creates a flow to update the amphora agent configuration. + + :returns: The flow for updating an amphora + """ + update_amphora_flow = linear_flow.Flow( + constants.UPDATE_AMPHORA_CONFIG_FLOW) + + update_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask( + requires=constants.AMPHORA)) + + update_amphora_flow.add(amphora_driver_tasks.AmphoraConfigUpdate( + requires=(constants.AMPHORA, constants.FLAVOR))) + + return update_amphora_flow + + def get_amphora_for_lb_failover_subflow( + self, prefix, role=constants.ROLE_STANDALONE, + failed_amp_vrrp_port_id=None, is_vrrp_ipv6=False, + flavor_dict=None, timeout_dict=None): + """Creates a new amphora that will be used in a failover flow. + + :requires: loadbalancer_id, flavor, vip, vip_sg_id, loadbalancer + :provides: amphora_id, amphora + :param prefix: The flow name prefix to use on the flow and tasks. + :param role: The role this amphora will have in the topology. + :param failed_amp_vrrp_port_id: The base port ID of the failed amp. + :param is_vrrp_ipv6: True if the base port IP is IPv6. + :return: A Taskflow sub-flow that will create the amphora. + """ + + sf_name = prefix + '-' + constants.CREATE_AMP_FOR_FAILOVER_SUBFLOW + + amp_for_failover_flow = linear_flow.Flow(sf_name) + + # Try to allocate or boot an amphora instance (unconfigured) + amp_for_failover_flow.add(self.get_amphora_for_lb_subflow( + prefix=prefix + '-' + constants.FAILOVER_LOADBALANCER_FLOW, + role=role)) + + if flavor_dict and flavor_dict.get(constants.SRIOV_VIP, False): + amp_for_failover_flow.add(network_tasks.GetSubnetFromVIP( + name=prefix + '-' + constants.GET_SUBNET_FROM_VIP, + requires=constants.LOADBALANCER, + provides=constants.SUBNET)) + amp_for_failover_flow.add(network_tasks.CreateSRIOVBasePort( + name=prefix + '-' + constants.PLUG_VIP_AMPHORA, + requires=(constants.LOADBALANCER, constants.AMPHORA, + constants.SUBNET), + provides=constants.BASE_PORT)) + else: + # Create the VIP base (aka VRRP) port for the amphora. + amp_for_failover_flow.add(network_tasks.CreateVIPBasePort( + name=prefix + '-' + constants.CREATE_VIP_BASE_PORT, + requires=(constants.VIP, constants.VIP_SG_ID, + constants.AMPHORA_ID, + constants.ADDITIONAL_VIPS), + provides=constants.BASE_PORT)) + + # Attach the VIP base (aka VRRP) port to the amphora. + amp_for_failover_flow.add(compute_tasks.AttachPort( + name=prefix + '-' + constants.ATTACH_PORT, + requires=(constants.AMPHORA, constants.PORT), + rebind={constants.PORT: constants.BASE_PORT})) + + # Update the amphora database record with the VIP base port info. + amp_for_failover_flow.add(database_tasks.UpdateAmpFailoverDetails( + name=prefix + '-' + constants.UPDATE_AMP_FAILOVER_DETAILS, + requires=(constants.AMPHORA, constants.VIP, constants.BASE_PORT))) + + # Update the amphora networking for the plugged VIP port + amp_for_failover_flow.add(network_tasks.GetAmphoraNetworkConfigsByID( + name=prefix + '-' + constants.GET_AMPHORA_NETWORK_CONFIGS_BY_ID, + requires=(constants.LOADBALANCER_ID, constants.AMPHORA_ID), + provides=constants.AMPHORAE_NETWORK_CONFIG)) + + # Disable the base (vrrp) port on the failed amphora + # This prevents a DAD failure when bringing up the new amphora. + # Keepalived will handle this for act/stdby. + if (role == constants.ROLE_STANDALONE and failed_amp_vrrp_port_id and + is_vrrp_ipv6): + amp_for_failover_flow.add(network_tasks.AdminDownPort( + name=prefix + '-' + constants.ADMIN_DOWN_PORT, + inject={constants.PORT_ID: failed_amp_vrrp_port_id})) + + amp_for_failover_flow.add(amphora_driver_tasks.AmphoraPostVIPPlug( + name=prefix + '-' + constants.AMPHORA_POST_VIP_PLUG, + requires=(constants.AMPHORA, constants.LOADBALANCER, + constants.AMPHORAE_NETWORK_CONFIG))) + + if flavor_dict and flavor_dict.get(constants.SRIOV_VIP, False): + amp_for_failover_flow.add( + shim_tasks.AmphoraToAmphoraeWithVRRPIP( + name=prefix + '-' + constants.AMPHORA_TO_AMPHORAE_VRRP_IP, + requires=(constants.AMPHORA, constants.BASE_PORT), + provides=constants.NEW_AMPHORAE)) + amp_for_failover_flow.add(database_tasks.GetAmphoraFirewallRules( + name=prefix + '-' + constants.GET_AMPHORA_FIREWALL_RULES, + requires=(constants.AMPHORAE, + constants.AMPHORAE_NETWORK_CONFIG), + rebind={constants.AMPHORAE: constants.NEW_AMPHORAE}, + provides=constants.AMPHORA_FIREWALL_RULES, + inject={constants.AMPHORA_INDEX: 0})) + amp_for_failover_flow.add( + amphora_driver_tasks.AmphoraeGetConnectivityStatus( + name=(prefix + '-' + + constants.AMPHORAE_GET_CONNECTIVITY_STATUS), + requires=constants.AMPHORAE, + rebind={constants.AMPHORAE: constants.NEW_AMPHORAE}, + inject={constants.TIMEOUT_DICT: timeout_dict, + constants.NEW_AMPHORA_ID: constants.NIL_UUID}, + provides=constants.AMPHORAE_STATUS)) + amp_for_failover_flow.add( + amphora_driver_tasks.SetAmphoraFirewallRules( + name=prefix + '-' + constants.SET_AMPHORA_FIREWALL_RULES, + requires=(constants.AMPHORAE, + constants.AMPHORA_FIREWALL_RULES, + constants.AMPHORAE_STATUS), + rebind={constants.AMPHORAE: constants.NEW_AMPHORAE}, + inject={constants.AMPHORA_INDEX: 0, + constants.TIMEOUT_DICT: timeout_dict})) + + # Plug member ports + amp_for_failover_flow.add(network_tasks.CalculateAmphoraDelta( + name=prefix + '-' + constants.CALCULATE_AMPHORA_DELTA, + requires=(constants.LOADBALANCER, constants.AMPHORA, + constants.AVAILABILITY_ZONE), + provides=constants.DELTA)) + + amp_for_failover_flow.add(network_tasks.HandleNetworkDelta( + name=prefix + '-' + constants.HANDLE_NETWORK_DELTA, + requires=(constants.AMPHORA, constants.DELTA), + provides=constants.UPDATED_PORTS)) + + amp_for_failover_flow.add(amphora_driver_tasks.AmphoraePostNetworkPlug( + name=prefix + '-' + constants.AMPHORAE_POST_NETWORK_PLUG, + requires=(constants.LOADBALANCER, constants.UPDATED_PORTS))) + + return amp_for_failover_flow + + def get_failover_amphora_flow(self, failed_amphora, lb_amp_count, + flavor_dict=None): + """Get a Taskflow flow to failover an amphora. + + 1. Build a replacement amphora. + 2. Delete the old amphora. + 3. Update the amphorae listener configurations. + 4. Update the VRRP configurations if needed. + + :param failed_amphora: The amphora dict to failover. + :param lb_amp_count: The number of amphora on this load balancer. + :param flavor_dict: The load balancer flavor dictionary. + :returns: The flow that will provide the failover. + """ + failover_amp_flow = linear_flow.Flow( + constants.FAILOVER_AMPHORA_FLOW) + + # Revert LB to provisioning_status ERROR if this flow goes wrong + failover_amp_flow.add(lifecycle_tasks.LoadBalancerToErrorOnRevertTask( + requires=constants.LOADBALANCER)) + + # Revert amphora to status ERROR if this flow goes wrong + failover_amp_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask( + requires=constants.AMPHORA, + inject={constants.AMPHORA: failed_amphora})) + + if failed_amphora[constants.ROLE] in (constants.ROLE_MASTER, + constants.ROLE_BACKUP): + amp_role = 'master_or_backup' + elif failed_amphora[constants.ROLE] == constants.ROLE_STANDALONE: + amp_role = 'standalone' + else: + amp_role = 'undefined' + LOG.info("Performing failover for amphora: %s", + {"id": failed_amphora[constants.ID], + "load_balancer_id": failed_amphora.get( + constants.LOAD_BALANCER_ID), + "lb_network_ip": failed_amphora.get(constants.LB_NETWORK_IP), + "compute_id": failed_amphora.get(constants.COMPUTE_ID), + "role": amp_role}) + + failover_amp_flow.add(database_tasks.MarkAmphoraPendingDeleteInDB( + requires=constants.AMPHORA, + inject={constants.AMPHORA: failed_amphora})) + + failover_amp_flow.add(database_tasks.MarkAmphoraHealthBusy( + requires=constants.AMPHORA, + inject={constants.AMPHORA: failed_amphora})) + + failover_amp_flow.add(network_tasks.GetVIPSecurityGroupID( + requires=constants.LOADBALANCER_ID, + provides=constants.VIP_SG_ID)) + + is_vrrp_ipv6 = False + if failed_amphora.get(constants.LOAD_BALANCER_ID): + if failed_amphora.get(constants.VRRP_IP): + is_vrrp_ipv6 = utils.is_ipv6(failed_amphora[constants.VRRP_IP]) + + # Get a replacement amphora and plug all of the networking. + # + # Do this early as the compute services have been observed to be + # unreliable. The community decided the chance that deleting first + # would open resources for an instance is less likely than the + # compute service failing to boot an instance for other reasons. + + # TODO(johnsom) Move this back out to run for spares after + # delete amphora API is available. + failover_amp_flow.add(self.get_amphora_for_lb_failover_subflow( + prefix=constants.FAILOVER_LOADBALANCER_FLOW, + role=failed_amphora[constants.ROLE], + failed_amp_vrrp_port_id=failed_amphora.get( + constants.VRRP_PORT_ID), + is_vrrp_ipv6=is_vrrp_ipv6, flavor_dict=flavor_dict)) + + failover_amp_flow.add( + self.get_delete_amphora_flow( + failed_amphora, + retry_attempts=CONF.controller_worker.amphora_delete_retries, + retry_interval=( + CONF.controller_worker.amphora_delete_retry_interval))) + failover_amp_flow.add( + database_tasks.DisableAmphoraHealthMonitoring( + requires=constants.AMPHORA, + inject={constants.AMPHORA: failed_amphora})) + + if not failed_amphora.get(constants.LOAD_BALANCER_ID): + # This is an unallocated amphora (bogus), we are done. + return failover_amp_flow + + failover_amp_flow.add(database_tasks.GetLoadBalancer( + requires=constants.LOADBALANCER_ID, + inject={constants.LOADBALANCER_ID: + failed_amphora[constants.LOAD_BALANCER_ID]}, + provides=constants.LOADBALANCER)) + + failover_amp_flow.add(database_tasks.GetAmphoraeFromLoadbalancer( + name=constants.GET_AMPHORAE_FROM_LB, + requires=constants.LOADBALANCER_ID, + inject={constants.LOADBALANCER_ID: + failed_amphora[constants.LOAD_BALANCER_ID]}, + provides=constants.AMPHORAE)) + + # Setup timeouts for our requests to the amphorae + timeout_dict = { + constants.CONN_MAX_RETRIES: + CONF.haproxy_amphora.active_connection_max_retries, + constants.CONN_RETRY_INTERVAL: + CONF.haproxy_amphora.active_connection_retry_interval} + + failover_amp_flow.add( + amphora_driver_tasks.AmphoraeGetConnectivityStatus( + name=constants.AMPHORAE_GET_CONNECTIVITY_STATUS, + requires=constants.AMPHORAE, + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, + inject={constants.TIMEOUT_DICT: timeout_dict}, + provides=constants.AMPHORAE_STATUS)) + + # Listeners update needs to be run on all amphora to update + # their peer configurations. So parallelize this with an + # unordered subflow. + update_amps_subflow = unordered_flow.Flow( + constants.UPDATE_AMPS_SUBFLOW) + + for amp_index in range(0, lb_amp_count): + update_amps_subflow.add( + amphora_driver_tasks.AmphoraIndexListenerUpdate( + name=str(amp_index) + '-' + constants.AMP_LISTENER_UPDATE, + requires=(constants.LOADBALANCER, constants.AMPHORAE, + constants.AMPHORAE_STATUS), + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, + inject={constants.AMPHORA_INDEX: amp_index, + constants.TIMEOUT_DICT: timeout_dict})) + + failover_amp_flow.add(update_amps_subflow) + + # Configure and enable keepalived in the amphora + if lb_amp_count == 2: + failover_amp_flow.add( + self.get_vrrp_subflow(constants.GET_VRRP_SUBFLOW, + timeout_dict, create_vrrp_group=False, + get_amphorae_status=False, + flavor_dict=flavor_dict)) + + # Reload the listener. This needs to be done here because + # it will create the required haproxy check scripts for + # the VRRP deployed above. + # A "U" or newer amphora-agent will remove the need for this + # task here. + # TODO(johnsom) Remove this in the "W" cycle + reload_listener_subflow = unordered_flow.Flow( + constants.AMPHORA_LISTENER_RELOAD_SUBFLOW) + + for amp_index in range(0, lb_amp_count): + reload_listener_subflow.add( + amphora_driver_tasks.AmphoraIndexListenersReload( + name=(str(amp_index) + '-' + + constants.AMPHORA_RELOAD_LISTENER), + requires=(constants.LOADBALANCER, constants.AMPHORAE, + constants.AMPHORAE_STATUS), + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, + inject={constants.AMPHORA_INDEX: amp_index, + constants.TIMEOUT_DICT: timeout_dict})) + + failover_amp_flow.add(reload_listener_subflow) + + # Remove any extraneous ports + # Note: Nova sometimes fails to delete ports attached to an instance. + # For example, if you create an LB with a listener, then + # 'openstack server delete' the amphora, you will see the vrrp + # port attached to that instance will remain after the instance + # is deleted. + # TODO(johnsom) Fix this as part of + # https://storyboard.openstack.org/#!/story/2007077 + + # Mark LB ACTIVE + failover_amp_flow.add( + database_tasks.MarkLBActiveInDB(mark_subobjects=True, + requires=constants.LOADBALANCER)) + + return failover_amp_flow diff --git a/octavia/controller/worker/v2/flows/flow_utils.py b/octavia/controller/worker/v2/flows/flow_utils.py new file mode 100644 index 0000000000..0a02e5aac7 --- /dev/null +++ b/octavia/controller/worker/v2/flows/flow_utils.py @@ -0,0 +1,182 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from octavia.api.drivers import utils as provider_utils +from octavia.controller.worker.v2.flows import amphora_flows +from octavia.controller.worker.v2.flows import health_monitor_flows +from octavia.controller.worker.v2.flows import l7policy_flows +from octavia.controller.worker.v2.flows import l7rule_flows +from octavia.controller.worker.v2.flows import listener_flows +from octavia.controller.worker.v2.flows import load_balancer_flows +from octavia.controller.worker.v2.flows import member_flows +from octavia.controller.worker.v2.flows import pool_flows + + +LB_FLOWS = load_balancer_flows.LoadBalancerFlows() +AMP_FLOWS = amphora_flows.AmphoraFlows() +HM_FLOWS = health_monitor_flows.HealthMonitorFlows() +L7_POLICY_FLOWS = l7policy_flows.L7PolicyFlows() +L7_RULES_FLOWS = l7rule_flows.L7RuleFlows() +LISTENER_FLOWS = listener_flows.ListenerFlows() +M_FLOWS = member_flows.MemberFlows() +P_FLOWS = pool_flows.PoolFlows() + + +def get_create_load_balancer_flow(topology, listeners=None, flavor_dict=None): + return LB_FLOWS.get_create_load_balancer_flow( + topology, listeners=listeners, flavor_dict=flavor_dict) + + +def get_delete_load_balancer_flow(lb): + return LB_FLOWS.get_delete_load_balancer_flow(lb) + + +def get_listeners_on_lb(db_lb, for_delete=False): + """Get a list of the listeners on a load balancer. + + :param db_lb: A load balancer database model object. + :param for_delete: Skip errors on tls certs loading. + :returns: A list of provider dict format listeners. + """ + listener_dicts = [] + for listener in db_lb.listeners: + prov_listener = provider_utils.db_listener_to_provider_listener( + listener, for_delete) + listener_dicts.append(prov_listener.to_dict()) + return listener_dicts + + +def get_pools_on_lb(db_lb, for_delete=False): + """Get a list of the pools on a load balancer. + + :param db_lb: A load balancer database model object. + :param for_delete: Skip errors on tls certs loading. + :returns: A list of provider dict format pools. + """ + pool_dicts = [] + for pool in db_lb.pools: + prov_pool = provider_utils.db_pool_to_provider_pool(pool, for_delete) + pool_dicts.append(prov_pool.to_dict()) + return pool_dicts + + +def get_cascade_delete_load_balancer_flow(lb, listeners=(), pools=()): + return LB_FLOWS.get_cascade_delete_load_balancer_flow(lb, listeners, + pools) + + +def get_update_load_balancer_flow(): + return LB_FLOWS.get_update_load_balancer_flow() + + +def get_delete_amphora_flow(amphora, retry_attempts=None, retry_interval=None): + return AMP_FLOWS.get_delete_amphora_flow(amphora, retry_attempts, + retry_interval) + + +def get_failover_LB_flow(amps, lb): + return LB_FLOWS.get_failover_LB_flow(amps, lb) + + +def get_failover_amphora_flow(amphora_dict, lb_amp_count, flavor_dict=None): + return AMP_FLOWS.get_failover_amphora_flow(amphora_dict, lb_amp_count, + flavor_dict=flavor_dict) + + +def cert_rotate_amphora_flow(): + return AMP_FLOWS.cert_rotate_amphora_flow() + + +def update_amphora_config_flow(): + return AMP_FLOWS.update_amphora_config_flow() + + +def get_create_health_monitor_flow(): + return HM_FLOWS.get_create_health_monitor_flow() + + +def get_delete_health_monitor_flow(): + return HM_FLOWS.get_delete_health_monitor_flow() + + +def get_update_health_monitor_flow(): + return HM_FLOWS.get_update_health_monitor_flow() + + +def get_create_l7policy_flow(): + return L7_POLICY_FLOWS.get_create_l7policy_flow() + + +def get_delete_l7policy_flow(): + return L7_POLICY_FLOWS.get_delete_l7policy_flow() + + +def get_update_l7policy_flow(): + return L7_POLICY_FLOWS.get_update_l7policy_flow() + + +def get_create_l7rule_flow(): + return L7_RULES_FLOWS.get_create_l7rule_flow() + + +def get_delete_l7rule_flow(): + return L7_RULES_FLOWS.get_delete_l7rule_flow() + + +def get_update_l7rule_flow(): + return L7_RULES_FLOWS.get_update_l7rule_flow() + + +def get_create_listener_flow(flavor_dict=None): + return LISTENER_FLOWS.get_create_listener_flow(flavor_dict=flavor_dict) + + +def get_create_all_listeners_flow(flavor_dict=None): + return LISTENER_FLOWS.get_create_all_listeners_flow( + flavor_dict=flavor_dict) + + +def get_delete_listener_flow(flavor_dict=None): + return LISTENER_FLOWS.get_delete_listener_flow(flavor_dict=flavor_dict) + + +def get_update_listener_flow(flavor_dict=None): + return LISTENER_FLOWS.get_update_listener_flow(flavor_dict=flavor_dict) + + +def get_create_member_flow(): + return M_FLOWS.get_create_member_flow() + + +def get_delete_member_flow(): + return M_FLOWS.get_delete_member_flow() + + +def get_update_member_flow(): + return M_FLOWS.get_update_member_flow() + + +def get_batch_update_members_flow(old_members, new_members, updated_members): + return M_FLOWS.get_batch_update_members_flow(old_members, new_members, + updated_members) + + +def get_create_pool_flow(): + return P_FLOWS.get_create_pool_flow() + + +def get_delete_pool_flow(): + return P_FLOWS.get_delete_pool_flow() + + +def get_update_pool_flow(): + return P_FLOWS.get_update_pool_flow() diff --git a/octavia/controller/worker/v2/flows/health_monitor_flows.py b/octavia/controller/worker/v2/flows/health_monitor_flows.py new file mode 100644 index 0000000000..adee113459 --- /dev/null +++ b/octavia/controller/worker/v2/flows/health_monitor_flows.py @@ -0,0 +1,101 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from taskflow.patterns import linear_flow + +from octavia.common import constants +from octavia.controller.worker.v2.tasks import amphora_driver_tasks +from octavia.controller.worker.v2.tasks import database_tasks +from octavia.controller.worker.v2.tasks import lifecycle_tasks + + +class HealthMonitorFlows: + + def get_create_health_monitor_flow(self): + """Create a flow to create a health monitor + + :returns: The flow for creating a health monitor + """ + create_hm_flow = linear_flow.Flow(constants.CREATE_HEALTH_MONITOR_FLOW) + create_hm_flow.add(lifecycle_tasks.HealthMonitorToErrorOnRevertTask( + requires=[constants.HEALTH_MON, + constants.LISTENERS, + constants.LOADBALANCER])) + create_hm_flow.add(database_tasks.MarkHealthMonitorPendingCreateInDB( + requires=constants.HEALTH_MON)) + create_hm_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=constants.LOADBALANCER_ID)) + create_hm_flow.add(database_tasks.MarkHealthMonitorActiveInDB( + requires=constants.HEALTH_MON)) + create_hm_flow.add(database_tasks.MarkPoolActiveInDB( + requires=constants.POOL_ID)) + create_hm_flow.add(database_tasks.MarkLBAndListenersActiveInDB( + requires=(constants.LOADBALANCER_ID, constants.LISTENERS))) + + return create_hm_flow + + def get_delete_health_monitor_flow(self): + """Create a flow to delete a health monitor + + :returns: The flow for deleting a health monitor + """ + delete_hm_flow = linear_flow.Flow(constants.DELETE_HEALTH_MONITOR_FLOW) + delete_hm_flow.add(lifecycle_tasks.HealthMonitorToErrorOnRevertTask( + requires=[constants.HEALTH_MON, + constants.LISTENERS, + constants.LOADBALANCER])) + delete_hm_flow.add(database_tasks.MarkHealthMonitorPendingDeleteInDB( + requires=constants.HEALTH_MON)) + delete_hm_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=constants.LOADBALANCER_ID)) + delete_hm_flow.add(database_tasks.DeleteHealthMonitorInDB( + requires=constants.HEALTH_MON)) + delete_hm_flow.add(database_tasks.DecrementHealthMonitorQuota( + requires=constants.PROJECT_ID)) + delete_hm_flow.add( + database_tasks.UpdatePoolMembersOperatingStatusInDB( + requires=constants.POOL_ID, + inject={constants.OPERATING_STATUS: constants.NO_MONITOR})) + delete_hm_flow.add(database_tasks.MarkPoolActiveInDB( + requires=constants.POOL_ID)) + delete_hm_flow.add(database_tasks.MarkLBAndListenersActiveInDB( + requires=(constants.LOADBALANCER_ID, constants.LISTENERS))) + + return delete_hm_flow + + def get_update_health_monitor_flow(self): + """Create a flow to update a health monitor + + :returns: The flow for updating a health monitor + """ + update_hm_flow = linear_flow.Flow(constants.UPDATE_HEALTH_MONITOR_FLOW) + update_hm_flow.add(lifecycle_tasks.HealthMonitorToErrorOnRevertTask( + requires=[constants.HEALTH_MON, + constants.LISTENERS, + constants.LOADBALANCER])) + update_hm_flow.add(database_tasks.MarkHealthMonitorPendingUpdateInDB( + requires=constants.HEALTH_MON)) + update_hm_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=constants.LOADBALANCER_ID)) + update_hm_flow.add(database_tasks.UpdateHealthMonInDB( + requires=[constants.HEALTH_MON, constants.UPDATE_DICT])) + update_hm_flow.add(database_tasks.MarkHealthMonitorActiveInDB( + requires=constants.HEALTH_MON)) + update_hm_flow.add(database_tasks.MarkPoolActiveInDB( + requires=constants.POOL_ID)) + update_hm_flow.add(database_tasks.MarkLBAndListenersActiveInDB( + requires=(constants.LOADBALANCER_ID, constants.LISTENERS))) + + return update_hm_flow diff --git a/octavia/controller/worker/v2/flows/l7policy_flows.py b/octavia/controller/worker/v2/flows/l7policy_flows.py new file mode 100644 index 0000000000..613e134635 --- /dev/null +++ b/octavia/controller/worker/v2/flows/l7policy_flows.py @@ -0,0 +1,91 @@ +# Copyright 2016 Blue Box, an IBM Company +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from taskflow.patterns import linear_flow + +from octavia.common import constants +from octavia.controller.worker.v2.tasks import amphora_driver_tasks +from octavia.controller.worker.v2.tasks import database_tasks +from octavia.controller.worker.v2.tasks import lifecycle_tasks + + +class L7PolicyFlows: + + def get_create_l7policy_flow(self): + """Create a flow to create an L7 policy + + :returns: The flow for creating an L7 policy + """ + create_l7policy_flow = linear_flow.Flow(constants.CREATE_L7POLICY_FLOW) + create_l7policy_flow.add(lifecycle_tasks.L7PolicyToErrorOnRevertTask( + requires=[constants.L7POLICY, + constants.LISTENERS, + constants.LOADBALANCER_ID])) + create_l7policy_flow.add(database_tasks.MarkL7PolicyPendingCreateInDB( + requires=constants.L7POLICY)) + create_l7policy_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=constants.LOADBALANCER_ID)) + create_l7policy_flow.add(database_tasks.MarkL7PolicyActiveInDB( + requires=constants.L7POLICY)) + create_l7policy_flow.add(database_tasks.MarkLBAndListenersActiveInDB( + requires=(constants.LOADBALANCER_ID, constants.LISTENERS))) + + return create_l7policy_flow + + def get_delete_l7policy_flow(self): + """Create a flow to delete an L7 policy + + :returns: The flow for deleting an L7 policy + """ + delete_l7policy_flow = linear_flow.Flow(constants.DELETE_L7POLICY_FLOW) + delete_l7policy_flow.add(lifecycle_tasks.L7PolicyToErrorOnRevertTask( + requires=[constants.L7POLICY, + constants.LISTENERS, + constants.LOADBALANCER_ID])) + delete_l7policy_flow.add(database_tasks.MarkL7PolicyPendingDeleteInDB( + requires=constants.L7POLICY)) + delete_l7policy_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=constants.LOADBALANCER_ID)) + delete_l7policy_flow.add(database_tasks.DeleteL7PolicyInDB( + requires=constants.L7POLICY)) + delete_l7policy_flow.add(database_tasks.DecrementL7policyQuota( + requires=constants.L7POLICY)) + delete_l7policy_flow.add(database_tasks.MarkLBAndListenersActiveInDB( + requires=(constants.LOADBALANCER_ID, constants.LISTENERS))) + + return delete_l7policy_flow + + def get_update_l7policy_flow(self): + """Create a flow to update an L7 policy + + :returns: The flow for updating an L7 policy + """ + update_l7policy_flow = linear_flow.Flow(constants.UPDATE_L7POLICY_FLOW) + update_l7policy_flow.add(lifecycle_tasks.L7PolicyToErrorOnRevertTask( + requires=[constants.L7POLICY, + constants.LISTENERS, + constants.LOADBALANCER_ID])) + update_l7policy_flow.add(database_tasks.MarkL7PolicyPendingUpdateInDB( + requires=constants.L7POLICY)) + update_l7policy_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=constants.LOADBALANCER_ID)) + update_l7policy_flow.add(database_tasks.UpdateL7PolicyInDB( + requires=[constants.L7POLICY, constants.UPDATE_DICT])) + update_l7policy_flow.add(database_tasks.MarkL7PolicyActiveInDB( + requires=constants.L7POLICY)) + update_l7policy_flow.add(database_tasks.MarkLBAndListenersActiveInDB( + requires=(constants.LOADBALANCER_ID, constants.LISTENERS))) + + return update_l7policy_flow diff --git a/octavia/controller/worker/v2/flows/l7rule_flows.py b/octavia/controller/worker/v2/flows/l7rule_flows.py new file mode 100644 index 0000000000..7b23aba9f4 --- /dev/null +++ b/octavia/controller/worker/v2/flows/l7rule_flows.py @@ -0,0 +1,100 @@ +# Copyright 2016 Blue Box, an IBM Company +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from taskflow.patterns import linear_flow + +from octavia.common import constants +from octavia.controller.worker.v2.tasks import amphora_driver_tasks +from octavia.controller.worker.v2.tasks import database_tasks +from octavia.controller.worker.v2.tasks import lifecycle_tasks + + +class L7RuleFlows: + + def get_create_l7rule_flow(self): + """Create a flow to create an L7 rule + + :returns: The flow for creating an L7 rule + """ + create_l7rule_flow = linear_flow.Flow(constants.CREATE_L7RULE_FLOW) + create_l7rule_flow.add(lifecycle_tasks.L7RuleToErrorOnRevertTask( + requires=[constants.L7RULE, + constants.L7POLICY_ID, + constants.LISTENERS, + constants.LOADBALANCER_ID])) + create_l7rule_flow.add(database_tasks.MarkL7RulePendingCreateInDB( + requires=constants.L7RULE)) + create_l7rule_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=constants.LOADBALANCER_ID)) + create_l7rule_flow.add(database_tasks.MarkL7RuleActiveInDB( + requires=constants.L7RULE)) + create_l7rule_flow.add(database_tasks.MarkL7PolicyActiveInDB( + requires=constants.L7POLICY)) + create_l7rule_flow.add(database_tasks.MarkLBAndListenersActiveInDB( + requires=(constants.LOADBALANCER_ID, constants.LISTENERS))) + + return create_l7rule_flow + + def get_delete_l7rule_flow(self): + """Create a flow to delete an L7 rule + + :returns: The flow for deleting an L7 rule + """ + delete_l7rule_flow = linear_flow.Flow(constants.DELETE_L7RULE_FLOW) + delete_l7rule_flow.add(lifecycle_tasks.L7RuleToErrorOnRevertTask( + requires=[constants.L7RULE, + constants.L7POLICY_ID, + constants.LISTENERS, + constants.LOADBALANCER_ID])) + delete_l7rule_flow.add(database_tasks.MarkL7RulePendingDeleteInDB( + requires=constants.L7RULE)) + delete_l7rule_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=constants.LOADBALANCER_ID)) + delete_l7rule_flow.add(database_tasks.DeleteL7RuleInDB( + requires=constants.L7RULE)) + delete_l7rule_flow.add(database_tasks.DecrementL7ruleQuota( + requires=constants.L7RULE)) + delete_l7rule_flow.add(database_tasks.MarkL7PolicyActiveInDB( + requires=constants.L7POLICY)) + delete_l7rule_flow.add(database_tasks.MarkLBAndListenersActiveInDB( + requires=(constants.LOADBALANCER_ID, constants.LISTENERS))) + + return delete_l7rule_flow + + def get_update_l7rule_flow(self): + """Create a flow to update an L7 rule + + :returns: The flow for updating an L7 rule + """ + update_l7rule_flow = linear_flow.Flow(constants.UPDATE_L7RULE_FLOW) + update_l7rule_flow.add(lifecycle_tasks.L7RuleToErrorOnRevertTask( + requires=[constants.L7RULE, + constants.L7POLICY_ID, + constants.LISTENERS, + constants.LOADBALANCER_ID])) + update_l7rule_flow.add(database_tasks.MarkL7RulePendingUpdateInDB( + requires=constants.L7RULE)) + update_l7rule_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=constants.LOADBALANCER_ID)) + update_l7rule_flow.add(database_tasks.UpdateL7RuleInDB( + requires=[constants.L7RULE, constants.UPDATE_DICT])) + update_l7rule_flow.add(database_tasks.MarkL7RuleActiveInDB( + requires=constants.L7RULE)) + update_l7rule_flow.add(database_tasks.MarkL7PolicyActiveInDB( + requires=constants.L7POLICY)) + update_l7rule_flow.add(database_tasks.MarkLBAndListenersActiveInDB( + requires=(constants.LOADBALANCER_ID, constants.LISTENERS))) + + return update_l7rule_flow diff --git a/octavia/controller/worker/v2/flows/listener_flows.py b/octavia/controller/worker/v2/flows/listener_flows.py new file mode 100644 index 0000000000..262c2c5775 --- /dev/null +++ b/octavia/controller/worker/v2/flows/listener_flows.py @@ -0,0 +1,232 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from taskflow.patterns import linear_flow +from taskflow.patterns import unordered_flow + +from octavia.common import constants +from octavia.controller.worker.v2.tasks import amphora_driver_tasks +from octavia.controller.worker.v2.tasks import database_tasks +from octavia.controller.worker.v2.tasks import lifecycle_tasks +from octavia.controller.worker.v2.tasks import network_tasks + + +class ListenerFlows: + + def get_create_listener_flow(self, flavor_dict=None): + """Create a flow to create a listener + + :returns: The flow for creating a listener + """ + create_listener_flow = linear_flow.Flow(constants.CREATE_LISTENER_FLOW) + create_listener_flow.add(lifecycle_tasks.ListenersToErrorOnRevertTask( + requires=constants.LISTENERS)) + create_listener_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=constants.LOADBALANCER_ID)) + create_listener_flow.add(network_tasks.UpdateVIP( + requires=constants.LISTENERS)) + + if flavor_dict and flavor_dict.get(constants.SRIOV_VIP, False): + create_listener_flow.add(*self._get_firewall_rules_subflow( + flavor_dict)) + + create_listener_flow.add(database_tasks. + MarkLBAndListenersActiveInDB( + requires=(constants.LOADBALANCER_ID, + constants.LISTENERS))) + return create_listener_flow + + def get_create_all_listeners_flow(self, flavor_dict=None): + """Create a flow to create all listeners + + :returns: The flow for creating all listeners + """ + create_all_listeners_flow = linear_flow.Flow( + constants.CREATE_LISTENERS_FLOW) + create_all_listeners_flow.add( + database_tasks.GetListenersFromLoadbalancer( + requires=constants.LOADBALANCER, + provides=constants.LISTENERS)) + create_all_listeners_flow.add(database_tasks.ReloadLoadBalancer( + requires=constants.LOADBALANCER_ID, + provides=constants.LOADBALANCER)) + create_all_listeners_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=constants.LOADBALANCER_ID)) + create_all_listeners_flow.add(network_tasks.UpdateVIP( + requires=constants.LISTENERS)) + + if flavor_dict and flavor_dict.get(constants.SRIOV_VIP, False): + create_all_listeners_flow.add(*self._get_firewall_rules_subflow( + flavor_dict)) + + create_all_listeners_flow.add( + database_tasks.MarkHealthMonitorsOnlineInDB( + requires=constants.LOADBALANCER)) + return create_all_listeners_flow + + def get_delete_listener_flow(self, flavor_dict=None): + """Create a flow to delete a listener + + :returns: The flow for deleting a listener + """ + delete_listener_flow = linear_flow.Flow(constants.DELETE_LISTENER_FLOW) + delete_listener_flow.add(lifecycle_tasks.ListenerToErrorOnRevertTask( + requires=constants.LISTENER)) + delete_listener_flow.add(amphora_driver_tasks.ListenerDelete( + requires=constants.LISTENER)) + delete_listener_flow.add(network_tasks.UpdateVIPForDelete( + requires=constants.LOADBALANCER_ID)) + delete_listener_flow.add(database_tasks.DeleteListenerInDB( + requires=constants.LISTENER)) + + if flavor_dict and flavor_dict.get(constants.SRIOV_VIP, False): + delete_listener_flow.add(*self._get_firewall_rules_subflow( + flavor_dict)) + + delete_listener_flow.add(database_tasks.DecrementListenerQuota( + requires=constants.PROJECT_ID)) + delete_listener_flow.add(database_tasks.MarkLBActiveInDBByListener( + requires=constants.LISTENER)) + + return delete_listener_flow + + def get_delete_listener_internal_flow(self, listener, flavor_dict=None): + """Create a flow to delete a listener and l7policies internally + + (will skip deletion on the amp and marking LB active) + + :returns: The flow for deleting a listener + """ + listener_id = listener[constants.LISTENER_ID] + delete_listener_flow = linear_flow.Flow( + constants.DELETE_LISTENER_FLOW + '-' + listener_id) + # Should cascade delete all L7 policies + delete_listener_flow.add(database_tasks.DeleteListenerInDB( + name='delete_listener_in_db_' + listener_id, + requires=constants.LISTENER, + inject={constants.LISTENER: listener})) + + # Currently the flavor_dict will always be None since there is + # no point updating the firewall rules when deleting the LB. + # However, this may be used for additional flows in the future, so + # adding this code for completeness. + if flavor_dict and flavor_dict.get(constants.SRIOV_VIP, False): + delete_listener_flow.add(*self._get_firewall_rules_subflow( + flavor_dict)) + + delete_listener_flow.add(database_tasks.DecrementListenerQuota( + name='decrement_listener_quota_' + listener_id, + requires=constants.PROJECT_ID)) + + return delete_listener_flow + + def get_update_listener_flow(self, flavor_dict=None): + """Create a flow to update a listener + + :returns: The flow for updating a listener + """ + update_listener_flow = linear_flow.Flow(constants.UPDATE_LISTENER_FLOW) + update_listener_flow.add(lifecycle_tasks.ListenerToErrorOnRevertTask( + requires=constants.LISTENER)) + update_listener_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=constants.LOADBALANCER_ID)) + update_listener_flow.add(network_tasks.UpdateVIP( + requires=constants.LISTENERS)) + + if flavor_dict and flavor_dict.get(constants.SRIOV_VIP, False): + update_listener_flow.add(*self._get_firewall_rules_subflow( + flavor_dict)) + + update_listener_flow.add(database_tasks.UpdateListenerInDB( + requires=[constants.LISTENER, constants.UPDATE_DICT])) + update_listener_flow.add(database_tasks. + MarkLBAndListenersActiveInDB( + requires=(constants.LOADBALANCER_ID, + constants.LISTENERS))) + + return update_listener_flow + + def _get_firewall_rules_subflow(self, flavor_dict, timeout_dict=None): + """Creates a subflow that updates the firewall rules in the amphorae. + + :returns: The subflow for updating firewall rules in the amphorae. + """ + sf_name = constants.FIREWALL_RULES_SUBFLOW + fw_rules_subflow = linear_flow.Flow(sf_name) + + fw_rules_subflow.add(database_tasks.GetAmphoraeFromLoadbalancer( + name=sf_name + '-' + constants.GET_AMPHORAE_FROM_LB, + requires=constants.LOADBALANCER_ID, + provides=constants.AMPHORAE)) + + fw_rules_subflow.add( + amphora_driver_tasks.AmphoraeGetConnectivityStatus( + name=(sf_name + '-' + + constants.AMPHORAE_GET_CONNECTIVITY_STATUS), + requires=constants.AMPHORAE, + inject={constants.TIMEOUT_DICT: timeout_dict, + constants.NEW_AMPHORA_ID: constants.NIL_UUID}, + provides=constants.AMPHORAE_STATUS)) + + fw_rules_subflow.add(network_tasks.GetAmphoraeNetworkConfigs( + name=sf_name + '-' + constants.GET_AMP_NETWORK_CONFIG, + requires=constants.LOADBALANCER_ID, + provides=constants.AMPHORAE_NETWORK_CONFIG)) + + update_amps_subflow = unordered_flow.Flow( + constants.AMP_UPDATE_FW_SUBFLOW) + + amp_0_subflow = linear_flow.Flow('amp-0-fw-update') + + amp_0_subflow.add(database_tasks.GetAmphoraFirewallRules( + name=sf_name + '-0-' + constants.GET_AMPHORA_FIREWALL_RULES, + requires=(constants.AMPHORAE, constants.AMPHORAE_NETWORK_CONFIG), + provides=constants.AMPHORA_FIREWALL_RULES, + inject={constants.AMPHORA_INDEX: 0})) + + amp_0_subflow.add(amphora_driver_tasks.SetAmphoraFirewallRules( + name=sf_name + '-0-' + constants.SET_AMPHORA_FIREWALL_RULES, + requires=(constants.AMPHORAE, constants.AMPHORA_FIREWALL_RULES, + constants.AMPHORAE_STATUS), + inject={constants.AMPHORA_INDEX: 0, + constants.TIMEOUT_DICT: timeout_dict})) + + update_amps_subflow.add(amp_0_subflow) + + if (flavor_dict[constants.LOADBALANCER_TOPOLOGY] == + constants.TOPOLOGY_ACTIVE_STANDBY): + + amp_1_subflow = linear_flow.Flow('amp-1-fw-update') + + amp_1_subflow.add(database_tasks.GetAmphoraFirewallRules( + name=sf_name + '-1-' + constants.GET_AMPHORA_FIREWALL_RULES, + requires=(constants.AMPHORAE, + constants.AMPHORAE_NETWORK_CONFIG), + provides=constants.AMPHORA_FIREWALL_RULES, + inject={constants.AMPHORA_INDEX: 1})) + + amp_1_subflow.add(amphora_driver_tasks.SetAmphoraFirewallRules( + name=sf_name + '-1-' + constants.SET_AMPHORA_FIREWALL_RULES, + requires=(constants.AMPHORAE, + constants.AMPHORA_FIREWALL_RULES, + constants.AMPHORAE_STATUS), + inject={constants.AMPHORA_INDEX: 1, + constants.TIMEOUT_DICT: timeout_dict})) + + update_amps_subflow.add(amp_1_subflow) + + fw_rules_subflow.add(update_amps_subflow) + + return fw_rules_subflow diff --git a/octavia/controller/worker/v2/flows/load_balancer_flows.py b/octavia/controller/worker/v2/flows/load_balancer_flows.py new file mode 100644 index 0000000000..7603433e9d --- /dev/null +++ b/octavia/controller/worker/v2/flows/load_balancer_flows.py @@ -0,0 +1,791 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +from oslo_config import cfg +from oslo_log import log as logging +from taskflow.patterns import linear_flow +from taskflow.patterns import unordered_flow + +from octavia.common import constants +from octavia.common import exceptions +from octavia.common import utils +from octavia.controller.worker.v2.flows import amphora_flows +from octavia.controller.worker.v2.flows import listener_flows +from octavia.controller.worker.v2.flows import member_flows +from octavia.controller.worker.v2.flows import pool_flows +from octavia.controller.worker.v2.tasks import amphora_driver_tasks +from octavia.controller.worker.v2.tasks import compute_tasks +from octavia.controller.worker.v2.tasks import database_tasks +from octavia.controller.worker.v2.tasks import lifecycle_tasks +from octavia.controller.worker.v2.tasks import network_tasks +from octavia.controller.worker.v2.tasks import notification_tasks +from octavia.db import api as db_apis +from octavia.db import repositories as repo + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class LoadBalancerFlows: + + def __init__(self): + self.amp_flows = amphora_flows.AmphoraFlows() + self.amphora_repo = repo.AmphoraRepository() + self.amphora_member_port_repo = repo.AmphoraMemberPortRepository() + self.listener_flows = listener_flows.ListenerFlows() + self.pool_flows = pool_flows.PoolFlows() + self.member_flows = member_flows.MemberFlows() + self.lb_repo = repo.LoadBalancerRepository() + + def get_create_load_balancer_flow(self, topology, listeners=None, + flavor_dict=None): + """Creates a conditional graph flow that allocates a loadbalancer. + + :raises InvalidTopology: Invalid topology specified + :return: The graph flow for creating a loadbalancer. + """ + f_name = constants.CREATE_LOADBALANCER_FLOW + lb_create_flow = linear_flow.Flow(f_name) + + lb_create_flow.add(lifecycle_tasks.LoadBalancerIDToErrorOnRevertTask( + requires=constants.LOADBALANCER_ID)) + + # allocate VIP - Saves the VIP IP(s) in neutron + lb_create_flow.add(database_tasks.ReloadLoadBalancer( + name=constants.RELOAD_LB_BEFOR_ALLOCATE_VIP, + requires=constants.LOADBALANCER_ID, + provides=constants.LOADBALANCER + )) + lb_create_flow.add(network_tasks.AllocateVIP( + requires=constants.LOADBALANCER, + provides=(constants.VIP, constants.ADDITIONAL_VIPS))) + lb_create_flow.add(database_tasks.UpdateVIPAfterAllocation( + requires=(constants.LOADBALANCER_ID, constants.VIP), + provides=constants.LOADBALANCER)) + lb_create_flow.add(database_tasks.UpdateAdditionalVIPsAfterAllocation( + requires=(constants.LOADBALANCER_ID, constants.ADDITIONAL_VIPS), + provides=constants.LOADBALANCER)) + lb_create_flow.add(network_tasks.UpdateVIPSecurityGroup( + requires=constants.LOADBALANCER_ID)) + lb_create_flow.add(network_tasks.GetSubnetFromVIP( + requires=constants.LOADBALANCER, + provides=constants.SUBNET)) + + if topology == constants.TOPOLOGY_ACTIVE_STANDBY: + lb_create_flow.add(*self._create_active_standby_topology( + flavor_dict=flavor_dict)) + elif topology == constants.TOPOLOGY_SINGLE: + lb_create_flow.add(*self._create_single_topology( + flavor_dict=flavor_dict)) + else: + LOG.error("Unknown topology: %s. Unable to build load balancer.", + topology) + raise exceptions.InvalidTopology(topology=topology) + + post_amp_prefix = constants.POST_LB_AMP_ASSOCIATION_SUBFLOW + lb_create_flow.add( + self.get_post_lb_amp_association_flow(post_amp_prefix, topology, + flavor_dict=flavor_dict)) + + if listeners: + lb_create_flow.add( + *self._create_listeners_flow(flavor_dict=flavor_dict)) + + lb_create_flow.add( + database_tasks.MarkLBActiveInDB( + mark_subobjects=True, + requires=constants.LOADBALANCER + ) + ) + + if CONF.controller_worker.event_notifications: + lb_create_flow.add( + notification_tasks.SendCreateNotification( + requires=constants.LOADBALANCER + ) + ) + + return lb_create_flow + + def _create_single_topology(self, flavor_dict=None): + sf_name = (constants.ROLE_STANDALONE + '-' + + constants.AMP_PLUG_NET_SUBFLOW) + amp_for_lb_net_flow = linear_flow.Flow(sf_name) + amp_for_lb_flow = self.amp_flows.get_amphora_for_lb_subflow( + prefix=constants.ROLE_STANDALONE, + role=constants.ROLE_STANDALONE) + amp_for_lb_net_flow.add(amp_for_lb_flow) + amp_for_lb_net_flow.add(*self._get_amp_net_subflow( + sf_name, flavor_dict=flavor_dict)) + return amp_for_lb_net_flow + + def _create_active_standby_topology( + self, lf_name=constants.CREATE_LOADBALANCER_FLOW, + flavor_dict=None): + # When we boot up amphora for an active/standby topology, + # we should leverage the Nova anti-affinity capabilities + # to place the amphora on different hosts, also we need to check + # if anti-affinity-flag is enabled or not: + anti_affinity = CONF.nova.enable_anti_affinity + flows = [] + if anti_affinity: + # we need to create a server group first + flows.append( + compute_tasks.NovaServerGroupCreate( + name=lf_name + '-' + + constants.CREATE_SERVER_GROUP_FLOW, + requires=(constants.LOADBALANCER_ID), + provides=constants.SERVER_GROUP_ID)) + + # update server group id in lb table + flows.append( + database_tasks.UpdateLBServerGroupInDB( + name=lf_name + '-' + + constants.UPDATE_LB_SERVERGROUPID_FLOW, + requires=(constants.LOADBALANCER_ID, + constants.SERVER_GROUP_ID))) + + f_name = constants.CREATE_LOADBALANCER_FLOW + amps_flow = unordered_flow.Flow(f_name) + + master_sf_name = (constants.ROLE_MASTER + '-' + + constants.AMP_PLUG_NET_SUBFLOW) + master_amp_sf = linear_flow.Flow(master_sf_name) + master_amp_sf.add(self.amp_flows.get_amphora_for_lb_subflow( + prefix=constants.ROLE_MASTER, role=constants.ROLE_MASTER)) + master_amp_sf.add(*self._get_amp_net_subflow(master_sf_name, + flavor_dict=flavor_dict)) + + backup_sf_name = (constants.ROLE_BACKUP + '-' + + constants.AMP_PLUG_NET_SUBFLOW) + backup_amp_sf = linear_flow.Flow(backup_sf_name) + backup_amp_sf.add(self.amp_flows.get_amphora_for_lb_subflow( + prefix=constants.ROLE_BACKUP, role=constants.ROLE_BACKUP)) + backup_amp_sf.add(*self._get_amp_net_subflow(backup_sf_name, + flavor_dict=flavor_dict)) + + amps_flow.add(master_amp_sf, backup_amp_sf) + + return flows + [amps_flow] + + def _get_amp_net_subflow(self, sf_name, flavor_dict=None): + flows = [] + # If we have an SRIOV VIP, we need to setup a firewall in the amp + if flavor_dict and flavor_dict.get(constants.SRIOV_VIP, False): + flows.append(network_tasks.CreateSRIOVBasePort( + name=sf_name + '-' + constants.PLUG_VIP_AMPHORA, + requires=(constants.LOADBALANCER, constants.AMPHORA, + constants.SUBNET), + provides=constants.PORT_DATA)) + flows.append(compute_tasks.AttachPort( + name=sf_name + '-' + constants.ATTACH_PORT, + requires=(constants.AMPHORA), + rebind={constants.PORT: constants.PORT_DATA})) + flows.append(network_tasks.BuildAMPData( + name=sf_name + '-' + constants.BUILD_AMP_DATA, + requires=(constants.LOADBALANCER, constants.AMPHORA, + constants.PORT_DATA), + provides=constants.AMP_DATA)) + else: + flows.append(network_tasks.PlugVIPAmphora( + name=sf_name + '-' + constants.PLUG_VIP_AMPHORA, + requires=(constants.LOADBALANCER, constants.AMPHORA, + constants.SUBNET), + provides=constants.AMP_DATA)) + + flows.append(network_tasks.ApplyQosAmphora( + name=sf_name + '-' + constants.APPLY_QOS_AMP, + requires=(constants.LOADBALANCER, constants.AMP_DATA, + constants.UPDATE_DICT))) + flows.append(database_tasks.UpdateAmphoraVIPData( + name=sf_name + '-' + constants.UPDATE_AMPHORA_VIP_DATA, + requires=constants.AMP_DATA)) + flows.append(network_tasks.GetAmphoraNetworkConfigs( + name=sf_name + '-' + constants.GET_AMP_NETWORK_CONFIG, + requires=(constants.LOADBALANCER, constants.AMPHORA), + provides=constants.AMPHORA_NETWORK_CONFIG)) + flows.append(amphora_driver_tasks.AmphoraPostVIPPlug( + name=sf_name + '-' + constants.AMP_POST_VIP_PLUG, + rebind={constants.AMPHORAE_NETWORK_CONFIG: + constants.AMPHORA_NETWORK_CONFIG}, + requires=(constants.LOADBALANCER, + constants.AMPHORAE_NETWORK_CONFIG))) + return flows + + def _create_listeners_flow(self, flavor_dict=None): + flows = [] + flows.append( + database_tasks.ReloadLoadBalancer( + name=constants.RELOAD_LB_AFTER_AMP_ASSOC_FULL_GRAPH, + requires=constants.LOADBALANCER_ID, + provides=constants.LOADBALANCER + ) + ) + flows.append( + network_tasks.CalculateDelta( + requires=(constants.LOADBALANCER, constants.AVAILABILITY_ZONE), + provides=constants.DELTAS + ) + ) + flows.append( + network_tasks.HandleNetworkDeltas( + requires=constants.DELTAS, provides=constants.UPDATED_PORTS + ) + ) + flows.append( + network_tasks.GetAmphoraeNetworkConfigs( + requires=constants.LOADBALANCER_ID, + provides=constants.AMPHORAE_NETWORK_CONFIG + ) + ) + flows.append( + amphora_driver_tasks.AmphoraePostNetworkPlug( + requires=(constants.LOADBALANCER, constants.UPDATED_PORTS, + constants.AMPHORAE_NETWORK_CONFIG) + ) + ) + flows.append( + self.listener_flows.get_create_all_listeners_flow( + flavor_dict=flavor_dict) + ) + return flows + + def get_post_lb_amp_association_flow(self, prefix, topology, + flavor_dict=None): + """Reload the loadbalancer and create networking subflows for + + created/allocated amphorae. + :return: Post amphorae association subflow + """ + sf_name = prefix + '-' + constants.POST_LB_AMP_ASSOCIATION_SUBFLOW + post_create_LB_flow = linear_flow.Flow(sf_name) + post_create_LB_flow.add( + database_tasks.ReloadLoadBalancer( + name=sf_name + '-' + constants.RELOAD_LB_AFTER_AMP_ASSOC, + requires=constants.LOADBALANCER_ID, + provides=constants.LOADBALANCER)) + + if topology == constants.TOPOLOGY_ACTIVE_STANDBY: + post_create_LB_flow.add(database_tasks.GetAmphoraeFromLoadbalancer( + requires=constants.LOADBALANCER_ID, + provides=constants.AMPHORAE)) + vrrp_subflow = self.amp_flows.get_vrrp_subflow( + prefix, flavor_dict=flavor_dict) + post_create_LB_flow.add(vrrp_subflow) + + post_create_LB_flow.add(database_tasks.UpdateLoadbalancerInDB( + requires=[constants.LOADBALANCER, constants.UPDATE_DICT])) + return post_create_LB_flow + + def _get_delete_listeners_flow(self, listeners, flavor_dict=None): + """Sets up an internal delete flow + + :param listeners: A list of listener dicts + :return: The flow for the deletion + """ + delete_listeners_flow = linear_flow.Flow('delete_listeners_flow') + delete_listeners_flow.add(network_tasks.UpdateVIPForDelete( + name='delete_update_vip', + requires=constants.LOADBALANCER_ID)) + + delete_internal_flow = unordered_flow.Flow('delete_listeners_flows') + for listener in listeners: + delete_internal_flow.add( + self.listener_flows.get_delete_listener_internal_flow( + listener, flavor_dict=flavor_dict)) + + delete_listeners_flow.add(delete_internal_flow) + return delete_listeners_flow + + def get_delete_load_balancer_flow(self, lb): + """Creates a flow to delete a load balancer. + + :returns: The flow for deleting a load balancer + """ + return self._get_delete_load_balancer_flow(lb, False) + + def _get_delete_pools_flow(self, pools): + """Sets up an internal delete flow + + Because task flow doesn't support loops we store each pool + we want to delete in the store part and then rebind + :param lb: load balancer + :return: (flow, store) -- flow for the deletion and store with all + the listeners stored properly + """ + pools_delete_flow = unordered_flow.Flow('pool_delete_flow') + for pool in pools: + pools_delete_flow.add( + self.pool_flows.get_delete_pool_flow_internal( + pool[constants.POOL_ID])) + return pools_delete_flow + + def _get_delete_load_balancer_flow(self, lb, cascade, + listeners=(), pools=()): + delete_LB_flow = linear_flow.Flow(constants.DELETE_LOADBALANCER_FLOW) + delete_LB_flow.add(lifecycle_tasks.LoadBalancerToErrorOnRevertTask( + requires=constants.LOADBALANCER)) + delete_LB_flow.add(compute_tasks.NovaServerGroupDelete( + requires=constants.SERVER_GROUP_ID)) + delete_LB_flow.add(database_tasks.MarkLBAmphoraeHealthBusy( + requires=constants.LOADBALANCER)) + if cascade: + listeners_delete = self._get_delete_listeners_flow(listeners) + pools_delete = self._get_delete_pools_flow(pools) + delete_LB_flow.add(pools_delete) + delete_LB_flow.add(listeners_delete) + member_ports_delete = self.get_delete_member_ports_subflow( + lb[constants.LOADBALANCER_ID]) + delete_LB_flow.add(member_ports_delete) + delete_LB_flow.add(network_tasks.UnplugVIP( + requires=constants.LOADBALANCER)) + delete_LB_flow.add(network_tasks.DeallocateVIP( + requires=constants.LOADBALANCER)) + delete_LB_flow.add(compute_tasks.DeleteAmphoraeOnLoadBalancer( + requires=constants.LOADBALANCER)) + delete_LB_flow.add(database_tasks.MarkLBAmphoraeDeletedInDB( + requires=constants.LOADBALANCER)) + delete_LB_flow.add(database_tasks.DisableLBAmphoraeHealthMonitoring( + requires=constants.LOADBALANCER)) + delete_LB_flow.add(database_tasks.MarkLBDeletedInDB( + requires=constants.LOADBALANCER)) + delete_LB_flow.add(database_tasks.DecrementLoadBalancerQuota( + requires=constants.PROJECT_ID)) + if CONF.controller_worker.event_notifications: + delete_LB_flow.add(notification_tasks.SendDeleteNotification( + requires=constants.LOADBALANCER)) + return delete_LB_flow + + def get_cascade_delete_load_balancer_flow(self, lb, listeners, pools): + """Creates a flow to delete a load balancer. + + :returns: The flow for deleting a load balancer + """ + return self._get_delete_load_balancer_flow(lb, True, + listeners=listeners, + pools=pools) + + def get_update_load_balancer_flow(self): + """Creates a flow to update a load balancer. + + :returns: The flow for update a load balancer + """ + update_LB_flow = linear_flow.Flow(constants.UPDATE_LOADBALANCER_FLOW) + update_LB_flow.add(lifecycle_tasks.LoadBalancerToErrorOnRevertTask( + requires=constants.LOADBALANCER)) + update_LB_flow.add(network_tasks.UpdateVIPSecurityGroup( + requires=constants.LOADBALANCER_ID, + provides=constants.VIP_SG_ID)) + update_LB_flow.add(network_tasks.UpdateAmphoraSecurityGroup( + requires=constants.LOADBALANCER_ID)) + update_LB_flow.add(network_tasks.ApplyQos( + requires=(constants.LOADBALANCER, constants.UPDATE_DICT))) + update_LB_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=constants.LOADBALANCER_ID)) + update_LB_flow.add(database_tasks.UpdateLoadbalancerInDB( + requires=[constants.LOADBALANCER, constants.UPDATE_DICT])) + update_LB_flow.add(database_tasks.MarkLBActiveInDB( + requires=constants.LOADBALANCER)) + if CONF.controller_worker.event_notifications: + update_LB_flow.add( + notification_tasks.SendUpdateNotification( + requires=constants.LOADBALANCER + ) + ) + + return update_LB_flow + + def get_failover_LB_flow(self, amps, lb): + """Failover a load balancer. + + 1. Validate the VIP port is correct and present. + 2. Build a replacement amphora. + 3. Delete the failed amphora. + 4. Configure the replacement amphora listeners. + 5. Configure VRRP for the listeners. + 6. Build the second replacement amphora. + 7. Delete the second failed amphora. + 8. Delete any extraneous amphora. + 9. Configure the listeners on the new amphorae. + 10. Configure the VRRP on the new amphorae. + 11. Reload the listener configurations to pick up VRRP changes. + 12. Mark the load balancer back to ACTIVE. + + :returns: The flow that will provide the failover. + """ + lb_topology = lb[constants.FLAVOR][constants.LOADBALANCER_TOPOLOGY] + # Pick one amphora to be failed over if any exist. + failed_amp = None + if amps: + failed_amp = amps.pop() + + failover_LB_flow = linear_flow.Flow( + constants.FAILOVER_LOADBALANCER_FLOW) + + # Revert LB to provisioning_status ERROR if this flow goes wrong + failover_LB_flow.add(lifecycle_tasks.LoadBalancerToErrorOnRevertTask( + requires=constants.LOADBALANCER)) + + # Setup timeouts for our requests to the amphorae + timeout_dict = { + constants.CONN_MAX_RETRIES: + CONF.haproxy_amphora.active_connection_max_retries, + constants.CONN_RETRY_INTERVAL: + CONF.haproxy_amphora.active_connection_retry_interval} + + if failed_amp: + failed_amp_role = failed_amp.get(constants.ROLE) + if failed_amp_role in (constants.ROLE_MASTER, + constants.ROLE_BACKUP): + amp_role = 'master_or_backup' + elif failed_amp_role == constants.ROLE_STANDALONE: + amp_role = 'standalone' + else: + amp_role = 'undefined' + LOG.info("Performing failover for amphora: %s", + {"id": failed_amp.get(constants.ID), + "load_balancer_id": lb.get(constants.ID), + "lb_network_ip": failed_amp.get(constants.LB_NETWORK_IP), + "compute_id": failed_amp.get(constants.COMPUTE_ID), + "role": amp_role}) + + failover_LB_flow.add(database_tasks.MarkAmphoraPendingDeleteInDB( + requires=constants.AMPHORA, + inject={constants.AMPHORA: failed_amp})) + + failover_LB_flow.add(database_tasks.MarkAmphoraHealthBusy( + requires=constants.AMPHORA, + inject={constants.AMPHORA: failed_amp})) + + # Check that the VIP port exists and is ok + failover_LB_flow.add( + network_tasks.AllocateVIPforFailover( + requires=constants.LOADBALANCER, + provides=(constants.VIP, constants.ADDITIONAL_VIPS))) + + # Update the database with the VIP information + failover_LB_flow.add(database_tasks.UpdateVIPAfterAllocation( + requires=(constants.LOADBALANCER_ID, constants.VIP), + provides=constants.LOADBALANCER)) + failover_LB_flow.add( + database_tasks.UpdateAdditionalVIPsAfterAllocation( + requires=(constants.LOADBALANCER_ID, + constants.ADDITIONAL_VIPS), + provides=constants.LOADBALANCER)) + + # Make sure the SG has the correct rules and re-apply to the + # VIP port. It is not used on the VIP port, but will help lock + # the SG as in use. + failover_LB_flow.add(network_tasks.UpdateVIPSecurityGroup( + requires=constants.LOADBALANCER_ID, provides=constants.VIP_SG_ID)) + + new_amp_role = constants.ROLE_STANDALONE + if lb_topology == constants.TOPOLOGY_ACTIVE_STANDBY: + new_amp_role = constants.ROLE_BACKUP + + # Get a replacement amphora and plug all of the networking. + # + # Do this early as the compute services have been observed to be + # unreliable. The community decided the chance that deleting first + # would open resources for an instance is less likely than the compute + # service failing to boot an instance for other reasons. + if failed_amp: + failed_vrrp_is_ipv6 = False + if failed_amp.get(constants.VRRP_IP): + failed_vrrp_is_ipv6 = utils.is_ipv6( + failed_amp[constants.VRRP_IP]) + failover_LB_flow.add( + self.amp_flows.get_amphora_for_lb_failover_subflow( + prefix=constants.FAILOVER_LOADBALANCER_FLOW, + role=new_amp_role, + failed_amp_vrrp_port_id=failed_amp.get( + constants.VRRP_PORT_ID), + is_vrrp_ipv6=failed_vrrp_is_ipv6, + flavor_dict=lb[constants.FLAVOR])) + else: + failover_LB_flow.add( + self.amp_flows.get_amphora_for_lb_failover_subflow( + prefix=constants.FAILOVER_LOADBALANCER_FLOW, + role=new_amp_role, flavor_dict=lb[constants.FLAVOR])) + + if lb_topology == constants.TOPOLOGY_ACTIVE_STANDBY: + failover_LB_flow.add(database_tasks.MarkAmphoraBackupInDB( + name=constants.MARK_AMP_BACKUP_INDB, + requires=constants.AMPHORA)) + + # Delete the failed amp + if failed_amp: + failover_LB_flow.add( + self.amp_flows.get_delete_amphora_flow(failed_amp)) + + # Update the data stored in the flow from the database + failover_LB_flow.add(database_tasks.ReloadLoadBalancer( + requires=constants.LOADBALANCER_ID, + provides=constants.LOADBALANCER)) + + # Configure the listener(s) + # We will run update on this amphora again later if this is + # an active/standby load balancer because we want this amp + # functional as soon as possible. It must run again to update + # the configurations for the new peers. + failover_LB_flow.add(amphora_driver_tasks.AmpListenersUpdate( + name=constants.AMP_LISTENER_UPDATE, + requires=(constants.LOADBALANCER, constants.AMPHORA), + inject={constants.TIMEOUT_DICT: timeout_dict})) + + # Bring up the new "backup" amphora VIP now to reduce the outage + # on the final failover. This dropped the outage from 8-9 seconds + # to less than one in my lab. + # This does mean some steps have to be repeated later to reconfigure + # for the second amphora as a peer. + if lb_topology == constants.TOPOLOGY_ACTIVE_STANDBY: + + failover_LB_flow.add(database_tasks.CreateVRRPGroupForLB( + name=new_amp_role + '-' + constants.CREATE_VRRP_GROUP_FOR_LB, + requires=constants.LOADBALANCER_ID)) + + failover_LB_flow.add(network_tasks.GetAmphoraNetworkConfigsByID( + name=(new_amp_role + '-' + + constants.GET_AMPHORA_NETWORK_CONFIGS_BY_ID), + requires=(constants.LOADBALANCER_ID, constants.AMPHORA_ID), + provides=constants.FIRST_AMP_NETWORK_CONFIGS)) + + failover_LB_flow.add( + amphora_driver_tasks.AmphoraUpdateVRRPInterface( + name=new_amp_role + '-' + constants.AMP_UPDATE_VRRP_INTF, + requires=constants.AMPHORA, + inject={constants.TIMEOUT_DICT: timeout_dict}, + provides=constants.FIRST_AMP_VRRP_INTERFACE)) + + failover_LB_flow.add(amphora_driver_tasks.AmphoraVRRPUpdate( + name=new_amp_role + '-' + constants.AMP_VRRP_UPDATE, + requires=(constants.LOADBALANCER_ID, constants.AMPHORA), + rebind={constants.AMPHORAE_NETWORK_CONFIG: + constants.FIRST_AMP_NETWORK_CONFIGS, + constants.AMP_VRRP_INT: + constants.FIRST_AMP_VRRP_INTERFACE}, + inject={constants.TIMEOUT_DICT: timeout_dict})) + + failover_LB_flow.add(amphora_driver_tasks.AmphoraVRRPStart( + name=new_amp_role + '-' + constants.AMP_VRRP_START, + requires=constants.AMPHORA, + inject={constants.TIMEOUT_DICT: timeout_dict})) + + # Start the listener. This needs to be done here because + # it will create the required haproxy check scripts for + # the VRRP deployed above. + # A "V" or newer amphora-agent will remove the need for this + # task here. + # TODO(johnsom) Remove this in the "X" cycle + failover_LB_flow.add(amphora_driver_tasks.ListenersStart( + name=new_amp_role + '-' + constants.AMP_LISTENER_START, + requires=(constants.LOADBALANCER, constants.AMPHORA))) + + # #### Work on standby amphora if needed ##### + + new_amp_role = constants.ROLE_MASTER + failed_amp = None + if amps: + failed_amp = amps.pop() + + if failed_amp: + failed_amp_role = failed_amp.get(constants.ROLE) + if failed_amp_role in (constants.ROLE_MASTER, + constants.ROLE_BACKUP): + amp_role = 'master_or_backup' + elif failed_amp_role == constants.ROLE_STANDALONE: + amp_role = 'standalone' + else: + amp_role = 'undefined' + LOG.info("Performing failover for amphora: %s", + {"id": failed_amp.get(constants.ID), + "load_balancer_id": lb.get(constants.ID), + "lb_network_ip": failed_amp.get( + constants.LB_NETWORK_IP), + "compute_id": failed_amp.get(constants.COMPUTE_ID), + "role": amp_role}) + + failover_LB_flow.add( + database_tasks.MarkAmphoraPendingDeleteInDB( + name=(new_amp_role + '-' + + constants.MARK_AMPHORA_PENDING_DELETE), + requires=constants.AMPHORA, + inject={constants.AMPHORA: failed_amp})) + + failover_LB_flow.add(database_tasks.MarkAmphoraHealthBusy( + name=(new_amp_role + '-' + + constants.MARK_AMPHORA_HEALTH_BUSY), + requires=constants.AMPHORA, + inject={constants.AMPHORA: failed_amp})) + + # Get a replacement amphora and plug all of the networking. + # + # Do this early as the compute services have been observed to be + # unreliable. The community decided the chance that deleting first + # would open resources for an instance is less likely than the + # compute service failing to boot an instance for other reasons. + failover_LB_flow.add( + self.amp_flows.get_amphora_for_lb_failover_subflow( + prefix=(new_amp_role + '-' + + constants.FAILOVER_LOADBALANCER_FLOW), + role=new_amp_role, + flavor_dict=lb[constants.FLAVOR])) + + failover_LB_flow.add(database_tasks.MarkAmphoraMasterInDB( + name=constants.MARK_AMP_MASTER_INDB, + requires=constants.AMPHORA)) + + # Delete the failed amp + if failed_amp: + failover_LB_flow.add( + self.amp_flows.get_delete_amphora_flow( + failed_amp)) + failover_LB_flow.add( + database_tasks.DisableAmphoraHealthMonitoring( + name=(new_amp_role + '-' + + constants.DISABLE_AMP_HEALTH_MONITORING), + requires=constants.AMPHORA, + inject={constants.AMPHORA: failed_amp})) + + # Remove any extraneous amphora + # Note: This runs in all topology situations. + # It should run before the act/stdby final listener update so + # that we don't bother attempting to update dead amphorae. + delete_extra_amps_flow = unordered_flow.Flow( + constants.DELETE_EXTRA_AMPHORAE_FLOW) + for amp in amps: + LOG.debug('Found extraneous amphora %s on load balancer %s. ' + 'Deleting.', amp.get(constants.ID), lb.get(id)) + delete_extra_amps_flow.add( + self.amp_flows.get_delete_amphora_flow(amp)) + + failover_LB_flow.add(delete_extra_amps_flow) + + if lb_topology == constants.TOPOLOGY_ACTIVE_STANDBY: + # Update the data stored in the flow from the database + failover_LB_flow.add(database_tasks.ReloadLoadBalancer( + name=new_amp_role + '-' + constants.RELOAD_LB_AFTER_AMP_ASSOC, + requires=constants.LOADBALANCER_ID, + provides=constants.LOADBALANCER)) + + failover_LB_flow.add(database_tasks.GetAmphoraeFromLoadbalancer( + name=new_amp_role + '-' + constants.GET_AMPHORAE_FROM_LB, + requires=constants.LOADBALANCER_ID, + provides=constants.AMPHORAE)) + + failover_LB_flow.add( + amphora_driver_tasks.AmphoraeGetConnectivityStatus( + name=(new_amp_role + '-' + + constants.AMPHORAE_GET_CONNECTIVITY_STATUS), + requires=constants.AMPHORAE, + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, + provides=constants.AMPHORAE_STATUS)) + + # Listeners update needs to be run on all amphora to update + # their peer configurations. So parallelize this with an + # unordered subflow. + update_amps_subflow = unordered_flow.Flow( + constants.UPDATE_AMPS_SUBFLOW) + + # Setup parallel flows for each amp. We don't know the new amp + # details at flow creation time, so setup a subflow for each + # amp on the LB, they let the task index into a list of amps + # to find the amphora it should work on. + update_amps_subflow.add( + amphora_driver_tasks.AmphoraIndexListenerUpdate( + name=(constants.AMPHORA + '-0-' + + constants.AMP_LISTENER_UPDATE), + requires=(constants.LOADBALANCER, constants.AMPHORAE, + constants.AMPHORAE_STATUS), + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, + inject={constants.AMPHORA_INDEX: 0, + constants.TIMEOUT_DICT: timeout_dict})) + update_amps_subflow.add( + amphora_driver_tasks.AmphoraIndexListenerUpdate( + name=(constants.AMPHORA + '-1-' + + constants.AMP_LISTENER_UPDATE), + requires=(constants.LOADBALANCER, constants.AMPHORAE, + constants.AMPHORAE_STATUS), + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, + inject={constants.AMPHORA_INDEX: 1, + constants.TIMEOUT_DICT: timeout_dict})) + + failover_LB_flow.add(update_amps_subflow) + + # Configure and enable keepalived in the amphora + failover_LB_flow.add(self.amp_flows.get_vrrp_subflow( + new_amp_role + '-' + constants.GET_VRRP_SUBFLOW, + timeout_dict, create_vrrp_group=False, + get_amphorae_status=False, flavor_dict=lb[constants.FLAVOR])) + + # #### End of standby #### + + # Reload the listener. This needs to be done here because + # it will create the required haproxy check scripts for + # the VRRP deployed above. + # A "V" or newer amphora-agent will remove the need for this + # task here. + # TODO(johnsom) Remove this in the "X" cycle + failover_LB_flow.add( + amphora_driver_tasks.AmphoraIndexListenersReload( + name=(new_amp_role + '-' + + constants.AMPHORA_RELOAD_LISTENER), + requires=(constants.LOADBALANCER, constants.AMPHORAE), + rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID}, + inject={constants.AMPHORA_INDEX: 1, + constants.TIMEOUT_DICT: timeout_dict})) + + # Remove any extraneous ports + # Note: Nova sometimes fails to delete ports attached to an instance. + # For example, if you create an LB with a listener, then + # 'openstack server delete' the amphora, you will see the vrrp + # port attached to that instance will remain after the instance + # is deleted. + # TODO(johnsom) Fix this as part of + # https://storyboard.openstack.org/#!/story/2007077 + + # Mark LB ACTIVE + failover_LB_flow.add( + database_tasks.MarkLBActiveInDB(mark_subobjects=True, + requires=constants.LOADBALANCER)) + + return failover_LB_flow + + def get_delete_member_ports_subflow(self, load_balancer_id): + """A subflow that will delete all of the member ports on an LB + + :param load_balancer_id: A load balancer ID + :returns: A Taskflow flow + """ + port_delete_flow = unordered_flow.Flow('delete_member_ports') + + session = db_apis.get_session() + with session.begin(): + amps = self.amphora_repo.get_amphorae_ids_on_lb(session, + load_balancer_id) + for amp in amps: + with session.begin(): + ports = self.amphora_member_port_repo.get_port_ids(session, + amp) + for port in ports: + port_delete_flow.add( + network_tasks.DeletePort( + name='delete_member_port' + '-' + port, + inject={constants.PORT_ID: port})) + port_delete_flow.add( + database_tasks.DeleteAmpMemberPortInDB( + name='delete_member_port_in_db' + '-' + port, + inject={constants.PORT_ID: port})) + return port_delete_flow diff --git a/octavia/controller/worker/v2/flows/member_flows.py b/octavia/controller/worker/v2/flows/member_flows.py new file mode 100644 index 0000000000..ea3c84a1ba --- /dev/null +++ b/octavia/controller/worker/v2/flows/member_flows.py @@ -0,0 +1,217 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from taskflow.patterns import linear_flow +from taskflow.patterns import unordered_flow + +from octavia.common import constants +from octavia.controller.worker.v2.tasks import amphora_driver_tasks +from octavia.controller.worker.v2.tasks import database_tasks +from octavia.controller.worker.v2.tasks import lifecycle_tasks +from octavia.controller.worker.v2.tasks import network_tasks + + +class MemberFlows: + + def get_create_member_flow(self): + """Create a flow to create a member + + :returns: The flow for creating a member + """ + create_member_flow = linear_flow.Flow(constants.CREATE_MEMBER_FLOW) + create_member_flow.add(lifecycle_tasks.MemberToErrorOnRevertTask( + requires=[constants.MEMBER, + constants.LISTENERS, + constants.LOADBALANCER, + constants.POOL_ID])) + create_member_flow.add(database_tasks.MarkMemberPendingCreateInDB( + requires=constants.MEMBER)) + create_member_flow.add(network_tasks.CalculateDelta( + requires=(constants.LOADBALANCER, constants.AVAILABILITY_ZONE), + provides=constants.DELTAS)) + create_member_flow.add(network_tasks.HandleNetworkDeltas( + requires=(constants.DELTAS, constants.LOADBALANCER), + provides=constants.UPDATED_PORTS)) + create_member_flow.add(network_tasks.GetAmphoraeNetworkConfigs( + requires=constants.LOADBALANCER_ID, + provides=constants.AMPHORAE_NETWORK_CONFIG)) + create_member_flow.add(amphora_driver_tasks.AmphoraePostNetworkPlug( + requires=(constants.LOADBALANCER, constants.UPDATED_PORTS, + constants.AMPHORAE_NETWORK_CONFIG))) + create_member_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=constants.LOADBALANCER_ID)) + create_member_flow.add(database_tasks.MarkMemberActiveInDB( + requires=constants.MEMBER)) + create_member_flow.add(database_tasks.MarkPoolActiveInDB( + requires=constants.POOL_ID)) + create_member_flow.add(database_tasks. + MarkLBAndListenersActiveInDB( + requires=(constants.LISTENERS, + constants.LOADBALANCER_ID))) + + return create_member_flow + + def get_delete_member_flow(self): + """Create a flow to delete a member + + :returns: The flow for deleting a member + """ + delete_member_flow = linear_flow.Flow(constants.DELETE_MEMBER_FLOW) + delete_member_flow.add(lifecycle_tasks.MemberToErrorOnRevertTask( + requires=[constants.MEMBER, + constants.LISTENERS, + constants.LOADBALANCER, + constants.POOL_ID])) + delete_member_flow.add(database_tasks.MarkMemberPendingDeleteInDB( + requires=constants.MEMBER)) + delete_member_flow.add(network_tasks.CalculateDelta( + requires=(constants.LOADBALANCER, constants.AVAILABILITY_ZONE), + provides=constants.DELTAS)) + delete_member_flow.add(network_tasks.HandleNetworkDeltas( + requires=(constants.DELTAS, constants.LOADBALANCER), + provides=constants.UPDATED_PORTS)) + delete_member_flow.add(network_tasks.GetAmphoraeNetworkConfigs( + requires=constants.LOADBALANCER_ID, + provides=constants.AMPHORAE_NETWORK_CONFIG)) + delete_member_flow.add(amphora_driver_tasks.AmphoraePostNetworkPlug( + requires=(constants.LOADBALANCER, constants.UPDATED_PORTS, + constants.AMPHORAE_NETWORK_CONFIG))) + delete_member_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=constants.LOADBALANCER_ID)) + delete_member_flow.add(database_tasks.DeleteMemberInDB( + requires=constants.MEMBER)) + delete_member_flow.add(database_tasks.DecrementMemberQuota( + requires=constants.PROJECT_ID)) + delete_member_flow.add(database_tasks.MarkPoolActiveInDB( + requires=constants.POOL_ID)) + delete_member_flow.add(database_tasks. + MarkLBAndListenersActiveInDB( + requires=(constants.LISTENERS, + constants.LOADBALANCER_ID))) + + return delete_member_flow + + def get_update_member_flow(self): + """Create a flow to update a member + + :returns: The flow for updating a member + """ + update_member_flow = linear_flow.Flow(constants.UPDATE_MEMBER_FLOW) + update_member_flow.add(lifecycle_tasks.MemberToErrorOnRevertTask( + requires=[constants.MEMBER, + constants.LISTENERS, + constants.LOADBALANCER, + constants.POOL_ID])) + update_member_flow.add(database_tasks.MarkMemberPendingUpdateInDB( + requires=constants.MEMBER)) + update_member_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=constants.LOADBALANCER_ID)) + update_member_flow.add(database_tasks.UpdateMemberInDB( + requires=[constants.MEMBER, constants.UPDATE_DICT])) + update_member_flow.add(database_tasks.MarkMemberActiveInDB( + requires=constants.MEMBER)) + update_member_flow.add(database_tasks.MarkPoolActiveInDB( + requires=constants.POOL_ID)) + update_member_flow.add(database_tasks. + MarkLBAndListenersActiveInDB( + requires=(constants.LISTENERS, + constants.LOADBALANCER_ID))) + + return update_member_flow + + def get_batch_update_members_flow(self, old_members, new_members, + updated_members): + """Create a flow to batch update members + + :returns: The flow for batch updating members + """ + batch_update_members_flow = linear_flow.Flow( + constants.BATCH_UPDATE_MEMBERS_FLOW) + unordered_members_flow = unordered_flow.Flow( + constants.UNORDERED_MEMBER_UPDATES_FLOW) + unordered_members_active_flow = unordered_flow.Flow( + constants.UNORDERED_MEMBER_ACTIVE_FLOW) + + # Delete old members + unordered_members_flow.add( + lifecycle_tasks.MembersToErrorOnRevertTask( + inject={constants.MEMBERS: old_members}, + name=f'{constants.MEMBER_TO_ERROR_ON_REVERT_FLOW}-deleted')) + for m in old_members: + unordered_members_flow.add(database_tasks.DeleteMemberInDB( + inject={constants.MEMBER: m}, + name=f'{constants.DELETE_MEMBER_INDB}-' + f'{m[constants.MEMBER_ID]}')) + unordered_members_flow.add(database_tasks.DecrementMemberQuota( + requires=constants.PROJECT_ID, + name=f'{constants.DECREMENT_MEMBER_QUOTA_FLOW}-' + f'{m[constants.MEMBER_ID]}')) + + # Create new members + unordered_members_flow.add( + lifecycle_tasks.MembersToErrorOnRevertTask( + inject={constants.MEMBERS: new_members}, + name=f'{constants.MEMBER_TO_ERROR_ON_REVERT_FLOW}-created')) + for m in new_members: + unordered_members_active_flow.add( + database_tasks.MarkMemberActiveInDB( + inject={constants.MEMBER: m}, + name=f'{constants.MARK_MEMBER_ACTIVE_INDB}-' + f'{m[constants.MEMBER_ID]}')) + + # Update existing members + unordered_members_flow.add( + lifecycle_tasks.MembersToErrorOnRevertTask( + # updated_members is a list of (obj, dict), only pass `obj` + inject={constants.MEMBERS: [m[0] for m in updated_members]}, + name=f'{constants.MEMBER_TO_ERROR_ON_REVERT_FLOW}-updated')) + for m, um in updated_members: + um.pop(constants.ID, None) + unordered_members_active_flow.add( + database_tasks.MarkMemberActiveInDB( + inject={constants.MEMBER: m}, + name=f'{constants.MARK_MEMBER_ACTIVE_INDB}-' + f'{m[constants.MEMBER_ID]}')) + + batch_update_members_flow.add(unordered_members_flow) + + # Done, do real updates + batch_update_members_flow.add(network_tasks.CalculateDelta( + requires=(constants.LOADBALANCER, constants.AVAILABILITY_ZONE), + provides=constants.DELTAS)) + batch_update_members_flow.add(network_tasks.HandleNetworkDeltas( + requires=(constants.DELTAS, constants.LOADBALANCER), + provides=constants.UPDATED_PORTS)) + batch_update_members_flow.add(network_tasks.GetAmphoraeNetworkConfigs( + requires=constants.LOADBALANCER_ID, + provides=constants.AMPHORAE_NETWORK_CONFIG)) + batch_update_members_flow.add( + amphora_driver_tasks.AmphoraePostNetworkPlug( + requires=(constants.LOADBALANCER, constants.UPDATED_PORTS, + constants.AMPHORAE_NETWORK_CONFIG))) + + # Update the Listener (this makes the changes active on the Amp) + batch_update_members_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=constants.LOADBALANCER_ID)) + + # Mark all the members ACTIVE here, then pool then LB/Listeners + batch_update_members_flow.add(unordered_members_active_flow) + batch_update_members_flow.add(database_tasks.MarkPoolActiveInDB( + requires=constants.POOL_ID)) + batch_update_members_flow.add( + database_tasks.MarkLBAndListenersActiveInDB( + requires=(constants.LISTENERS, constants.LOADBALANCER_ID))) + + return batch_update_members_flow diff --git a/octavia/controller/worker/v2/flows/pool_flows.py b/octavia/controller/worker/v2/flows/pool_flows.py new file mode 100644 index 0000000000..e869b3c297 --- /dev/null +++ b/octavia/controller/worker/v2/flows/pool_flows.py @@ -0,0 +1,121 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from taskflow.patterns import linear_flow + +from octavia.common import constants +from octavia.controller.worker.v2.tasks import amphora_driver_tasks +from octavia.controller.worker.v2.tasks import database_tasks +from octavia.controller.worker.v2.tasks import lifecycle_tasks + + +class PoolFlows: + + def get_create_pool_flow(self): + """Create a flow to create a pool + + :returns: The flow for creating a pool + """ + create_pool_flow = linear_flow.Flow(constants.CREATE_POOL_FLOW) + create_pool_flow.add(lifecycle_tasks.PoolToErrorOnRevertTask( + requires=[constants.POOL_ID, + constants.LISTENERS, + constants.LOADBALANCER])) + create_pool_flow.add(database_tasks.MarkPoolPendingCreateInDB( + requires=constants.POOL_ID)) + create_pool_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=constants.LOADBALANCER_ID)) + create_pool_flow.add(database_tasks.MarkPoolActiveInDB( + requires=constants.POOL_ID)) + create_pool_flow.add(database_tasks.MarkLBAndListenersActiveInDB( + requires=(constants.LOADBALANCER_ID, constants.LISTENERS))) + + return create_pool_flow + + def get_delete_pool_flow(self): + """Create a flow to delete a pool + + :returns: The flow for deleting a pool + """ + delete_pool_flow = linear_flow.Flow(constants.DELETE_POOL_FLOW) + delete_pool_flow.add(lifecycle_tasks.PoolToErrorOnRevertTask( + requires=[constants.POOL_ID, + constants.LISTENERS, + constants.LOADBALANCER])) + delete_pool_flow.add(database_tasks.MarkPoolPendingDeleteInDB( + requires=constants.POOL_ID)) + delete_pool_flow.add(database_tasks.CountPoolChildrenForQuota( + requires=constants.POOL_ID, provides=constants.POOL_CHILD_COUNT)) + delete_pool_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=constants.LOADBALANCER_ID)) + delete_pool_flow.add(database_tasks.DeletePoolInDB( + requires=constants.POOL_ID)) + delete_pool_flow.add(database_tasks.DecrementPoolQuota( + requires=[constants.PROJECT_ID, constants.POOL_CHILD_COUNT])) + delete_pool_flow.add(database_tasks.MarkLBAndListenersActiveInDB( + requires=(constants.LOADBALANCER_ID, constants.LISTENERS))) + + return delete_pool_flow + + def get_delete_pool_flow_internal(self, pool_id): + """Create a flow to delete a pool, etc. + + :returns: The flow for deleting a pool + """ + delete_pool_flow = linear_flow.Flow(constants.DELETE_POOL_FLOW + '-' + + pool_id) + # health monitor should cascade + # members should cascade + delete_pool_flow.add(database_tasks.MarkPoolPendingDeleteInDB( + name='mark_pool_pending_delete_in_db_' + pool_id, + requires=constants.POOL_ID, + inject={constants.POOL_ID: pool_id})) + delete_pool_flow.add(database_tasks.CountPoolChildrenForQuota( + name='count_pool_children_for_quota_' + pool_id, + requires=constants.POOL_ID, + provides=constants.POOL_CHILD_COUNT, + inject={constants.POOL_ID: pool_id})) + delete_pool_flow.add(database_tasks.DeletePoolInDB( + name='delete_pool_in_db_' + pool_id, + requires=constants.POOL_ID, + inject={constants.POOL_ID: pool_id})) + delete_pool_flow.add(database_tasks.DecrementPoolQuota( + name='decrement_pool_quota_' + pool_id, + requires=[constants.PROJECT_ID, constants.POOL_CHILD_COUNT])) + + return delete_pool_flow + + def get_update_pool_flow(self): + """Create a flow to update a pool + + :returns: The flow for updating a pool + """ + update_pool_flow = linear_flow.Flow(constants.UPDATE_POOL_FLOW) + update_pool_flow.add(lifecycle_tasks.PoolToErrorOnRevertTask( + requires=[constants.POOL_ID, + constants.LISTENERS, + constants.LOADBALANCER])) + update_pool_flow.add(database_tasks.MarkPoolPendingUpdateInDB( + requires=constants.POOL_ID)) + update_pool_flow.add(amphora_driver_tasks.ListenersUpdate( + requires=constants.LOADBALANCER_ID)) + update_pool_flow.add(database_tasks.UpdatePoolInDB( + requires=[constants.POOL_ID, constants.UPDATE_DICT])) + update_pool_flow.add(database_tasks.MarkPoolActiveInDB( + requires=constants.POOL_ID)) + update_pool_flow.add(database_tasks.MarkLBAndListenersActiveInDB( + requires=(constants.LOADBALANCER_ID, constants.LISTENERS))) + + return update_pool_flow diff --git a/octavia/controller/worker/v2/taskflow_jobboard_driver.py b/octavia/controller/worker/v2/taskflow_jobboard_driver.py new file mode 100644 index 0000000000..2bba8f053b --- /dev/null +++ b/octavia/controller/worker/v2/taskflow_jobboard_driver.py @@ -0,0 +1,161 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import contextlib + +from oslo_config import cfg +from oslo_log import log +from oslo_utils import netutils +from oslo_utils import strutils +from taskflow.jobs import backends as job_backends +from taskflow.persistence import backends as persistence_backends + +LOG = log.getLogger(__name__) +CONF = cfg.CONF + + +class JobboardTaskFlowDriver(metaclass=abc.ABCMeta): + + @abc.abstractmethod + def job_board(self, persistence): + """Setting up jobboard backend based on configuration setting. + + :param persistence: taskflow persistence backend instance + :return: taskflow jobboard backend instance + """ + + +class MysqlPersistenceDriver: + + def __init__(self): + self.persistence_conf = { + 'connection': CONF.task_flow.persistence_connection, + 'max_pool_size': CONF.database.max_pool_size, + 'max_overflow': CONF.database.max_overflow, + 'pool_timeout': CONF.database.pool_timeout, + 'idle_timeout': CONF.database.connection_recycle_time + } + + def initialize(self): + # Run migrations once on service start. + backend = persistence_backends.fetch(self.persistence_conf) + with contextlib.closing(backend): + with contextlib.closing(backend.get_connection()) as connection: + connection.upgrade() + + @contextlib.contextmanager + def get_persistence(self): + # Rewrite taskflow get backend, so it won't run migrations on each call + backend = persistence_backends.fetch(self.persistence_conf) + with contextlib.closing(backend): + with contextlib.closing(backend.get_connection()) as conn: + conn.validate() + yield backend + + +class ZookeeperTaskFlowDriver(JobboardTaskFlowDriver): + + def __init__(self, persistence_driver): + self.persistence_driver = persistence_driver + + def job_board(self, persistence): + job_backends_hosts = ','.join( + [f'{host}:{CONF.task_flow.jobboard_backend_port}' + for host in CONF.task_flow.jobboard_backend_hosts]) + jobboard_backend_conf = { + 'board': 'zookeeper', + 'hosts': job_backends_hosts, + 'path': '/' + CONF.task_flow.jobboard_backend_namespace, + } + jobboard_backend_conf.update( + CONF.task_flow.jobboard_zookeeper_ssl_options) + return job_backends.backend(CONF.task_flow.jobboard_backend_namespace, + jobboard_backend_conf, + persistence=persistence) + + +class RedisTaskFlowDriver(JobboardTaskFlowDriver): + + def __init__(self, persistence_driver): + self.persistence_driver = persistence_driver + + def job_board(self, persistence): + + jobboard_backend_conf = { + 'board': 'redis', + 'host': CONF.task_flow.jobboard_backend_hosts[0], + 'port': CONF.task_flow.jobboard_backend_port, + 'db': CONF.task_flow.jobboard_redis_backend_db, + 'namespace': CONF.task_flow.jobboard_backend_namespace, + 'sentinel': CONF.task_flow.jobboard_redis_sentinel, + 'sentinel_fallbacks': [ + '%s:%d' % (netutils.escape_ipv6(host), + CONF.task_flow.jobboard_backend_port) + for host in CONF.task_flow.jobboard_backend_hosts[1:] + ] + } + if CONF.task_flow.jobboard_backend_username is not None: + jobboard_backend_conf['username'] = ( + CONF.task_flow.jobboard_backend_username) + if CONF.task_flow.jobboard_backend_password is not None: + jobboard_backend_conf['password'] = ( + CONF.task_flow.jobboard_backend_password) + jobboard_backend_conf.update( + CONF.task_flow.jobboard_redis_backend_ssl_options) + + sentinel_kwargs = CONF.task_flow.jobboard_redis_sentinel_ssl_options + if 'ssl' in sentinel_kwargs: + sentinel_kwargs['ssl'] = strutils.bool_from_string( + sentinel_kwargs['ssl']) + if CONF.task_flow.jobboard_redis_sentinel_username is not None: + sentinel_kwargs['username'] = ( + CONF.task_flow.jobboard_redis_sentinel_username) + if CONF.task_flow.jobboard_redis_sentinel_password is not None: + sentinel_kwargs['password'] = ( + CONF.task_flow.jobboard_redis_sentinel_password) + jobboard_backend_conf['sentinel_kwargs'] = sentinel_kwargs + + return job_backends.backend( + CONF.task_flow.jobboard_backend_namespace, + jobboard_backend_conf, + persistence=persistence) + + +class EtcdTaskFlowDriver(JobboardTaskFlowDriver): + + def __init__(self, persistence_driver): + self.persistence_driver = persistence_driver + + def job_board(self, persistence): + jobboard_backend_conf = { + 'board': 'etcd', + 'host': CONF.task_flow.jobboard_backend_hosts[0], + 'port': CONF.task_flow.jobboard_backend_port, + 'path': CONF.task_flow.jobboard_backend_namespace, + 'ttl': CONF.task_flow.jobboard_expiration_time, + } + if CONF.task_flow.jobboard_etcd_ssl_options['use_ssl']: + jobboard_backend_conf.update( + CONF.task_flow.jobboard_etcd_ssl_options) + jobboard_backend_conf.pop('use_ssl') + jobboard_backend_conf['protocol'] = 'https' + if CONF.task_flow.jobboard_etcd_timeout is not None: + jobboard_backend_conf['timeout'] = ( + CONF.task_flow.jobboard_etcd_timeout) + if CONF.task_flow.jobboard_etcd_api_path is not None: + jobboard_backend_conf['api_path'] = ( + CONF.task_flow.jobboard_etcd_api_path) + + return job_backends.backend(CONF.task_flow.jobboard_backend_namespace, + jobboard_backend_conf, + persistence=persistence) diff --git a/octavia/controller/worker/v2/tasks/__init__.py b/octavia/controller/worker/v2/tasks/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/controller/worker/v2/tasks/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/controller/worker/v2/tasks/amphora_driver_tasks.py b/octavia/controller/worker/v2/tasks/amphora_driver_tasks.py new file mode 100644 index 0000000000..1778465d9f --- /dev/null +++ b/octavia/controller/worker/v2/tasks/amphora_driver_tasks.py @@ -0,0 +1,790 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import copy +from typing import List +from typing import Optional + +from oslo_config import cfg +from oslo_log import log as logging +from stevedore import driver as stevedore_driver +from taskflow import retry +from taskflow import task +from taskflow.types import failure + +from octavia.amphorae.backends.agent import agent_jinja_cfg +from octavia.amphorae.driver_exceptions import exceptions as driver_except +from octavia.common import constants +from octavia.common import utils +from octavia.controller.worker import task_utils as task_utilities +from octavia.db import api as db_apis +from octavia.db import repositories as repo +from octavia.network import data_models + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class BaseAmphoraTask(task.Task): + """Base task to load drivers common to the tasks.""" + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.amphora_driver = stevedore_driver.DriverManager( + namespace='octavia.amphora.drivers', + name=CONF.controller_worker.amphora_driver, + invoke_on_load=True + ).driver + self.amphora_repo = repo.AmphoraRepository() + self.listener_repo = repo.ListenerRepository() + self.loadbalancer_repo = repo.LoadBalancerRepository() + self.task_utils = task_utilities.TaskUtils() + + +class AmpRetry(retry.Times): + + def on_failure(self, history, *args, **kwargs): + last_errors = history[-1][1] + max_retry_attempt = CONF.haproxy_amphora.connection_max_retries + for task_name, ex_info in last_errors.items(): + if len(history) <= max_retry_attempt: + # When taskflow persistence is enabled and flow/task state is + # saved in the backend. If flow(task) is restored(restart of + # worker,etc) we are getting ex_info as None - we need to RETRY + # task to check its real state. + if ex_info is None or ex_info._exc_info is None: + return retry.RETRY + excp = ex_info._exc_info[1] + if isinstance(excp, driver_except.AmpConnectionRetry): + return retry.RETRY + + return retry.REVERT_ALL + + +class AmpListenersUpdate(BaseAmphoraTask): + """Task to update the listeners on one amphora.""" + + def execute(self, loadbalancer, amphora, timeout_dict=None): + # Note, we don't want this to cause a revert as it may be used + # in a failover flow with both amps failing. Skip it and let + # health manager fix it. + # TODO(johnsom) Optimize this to use the dicts and not need the + # DB lookups + try: + session = db_apis.get_session() + with session.begin(): + db_amp = self.amphora_repo.get(session, + id=amphora[constants.ID]) + db_lb = self.loadbalancer_repo.get( + session, + id=loadbalancer[constants.LOADBALANCER_ID]) + self.amphora_driver.update_amphora_listeners( + db_lb, db_amp, timeout_dict) + except Exception as e: + LOG.error('Failed to update listeners on amphora %s. Skipping ' + 'this amphora as it is failing to update due to: %s', + db_amp.id, str(e)) + with session.begin(): + self.amphora_repo.update(session, db_amp.id, + status=constants.ERROR) + + +class AmphoraIndexListenerUpdate(BaseAmphoraTask): + """Task to update the listeners on one amphora.""" + + def execute(self, loadbalancer, amphora_index, amphorae, + amphorae_status: dict, new_amphora_id: str, timeout_dict=()): + # Note, we don't want this to cause a revert as it may be used + # in a failover flow with both amps failing. Skip it and let + # health manager fix it. + + amphora_id = amphorae[amphora_index].get(constants.ID) + amphora_status = amphorae_status.get(amphora_id, {}) + if amphora_status.get(constants.UNREACHABLE): + LOG.warning("Skipping listener update because amphora %s " + "is not reachable.", amphora_id) + return + + try: + # TODO(johnsom) Optimize this to use the dicts and not need the + # DB lookups + session = db_apis.get_session() + with session.begin(): + db_amp = self.amphora_repo.get( + session, + id=amphorae[amphora_index][constants.ID]) + db_lb = self.loadbalancer_repo.get( + session, + id=loadbalancer[constants.LOADBALANCER_ID]) + self.amphora_driver.update_amphora_listeners( + db_lb, db_amp, timeout_dict) + except Exception as e: + LOG.error('Failed to update listeners on amphora %s. Skipping ' + 'this amphora as it is failing to update due to: %s', + amphora_id, str(e)) + # Update only the status of the newly created amphora during the + # failover + if amphora_id == new_amphora_id: + session = db_apis.get_session() + with session.begin(): + self.amphora_repo.update(session, amphora_id, + status=constants.ERROR) + + +class ListenersUpdate(BaseAmphoraTask): + """Task to update amphora with all specified listeners' configurations.""" + + def execute(self, loadbalancer_id): + """Execute updates per listener for an amphora.""" + session = db_apis.get_session() + with session.begin(): + loadbalancer = self.loadbalancer_repo.get(session, + id=loadbalancer_id) + if loadbalancer: + self.amphora_driver.update(loadbalancer) + else: + LOG.error('Load balancer %s for listeners update not found. ' + 'Skipping update.', loadbalancer_id) + + def revert(self, loadbalancer_id, *args, **kwargs): + """Handle failed listeners updates.""" + + LOG.warning("Reverting listeners updates.") + session = db_apis.get_session() + with session.begin(): + loadbalancer = self.loadbalancer_repo.get(session, + id=loadbalancer_id) + for listener in loadbalancer.listeners: + self.task_utils.mark_listener_prov_status_error( + listener.id) + + +class ListenersStart(BaseAmphoraTask): + """Task to start all listeners on the vip.""" + + def execute(self, loadbalancer, amphora=None): + """Execute listener start routines for listeners on an amphora.""" + session = db_apis.get_session() + with session.begin(): + db_lb = self.loadbalancer_repo.get( + session, id=loadbalancer[constants.LOADBALANCER_ID]) + if db_lb.listeners: + if amphora is not None: + with session.begin(): + db_amp = self.amphora_repo.get(session, + id=amphora[constants.ID]) + else: + db_amp = amphora + self.amphora_driver.start(db_lb, db_amp) + LOG.debug("Started the listeners on the vip") + + def revert(self, loadbalancer, *args, **kwargs): + """Handle failed listeners starts.""" + + LOG.warning("Reverting listeners starts.") + session = db_apis.get_session() + with session.begin(): + db_lb = self.loadbalancer_repo.get( + session, id=loadbalancer[constants.LOADBALANCER_ID]) + for listener in db_lb.listeners: + self.task_utils.mark_listener_prov_status_error(listener.id) + + +class AmphoraIndexListenersReload(BaseAmphoraTask): + """Task to reload all listeners on an amphora.""" + + def execute(self, loadbalancer, amphora_index, amphorae, + amphorae_status: dict, new_amphora_id: str, timeout_dict=None): + """Execute listener reload routines for listeners on an amphora.""" + if amphorae is None: + return + + amphora_id = amphorae[amphora_index].get(constants.ID) + amphora_status = amphorae_status.get(amphora_id, {}) + if amphora_status.get(constants.UNREACHABLE): + LOG.warning("Skipping listener reload because amphora %s " + "is not reachable.", amphora_id) + return + + # TODO(johnsom) Optimize this to use the dicts and not need the + # DB lookups + session = db_apis.get_session() + with session.begin(): + db_amp = self.amphora_repo.get( + session, id=amphorae[amphora_index][constants.ID]) + db_lb = self.loadbalancer_repo.get( + session, + id=loadbalancer[constants.LOADBALANCER_ID]) + if db_lb.listeners: + try: + self.amphora_driver.reload(db_lb, db_amp, timeout_dict) + except Exception as e: + LOG.warning('Failed to reload listeners on amphora %s. ' + 'Skipping this amphora as it is failing to ' + 'reload due to: %s', amphora_id, str(e)) + # Update only the status of the newly created amphora during + # the failover + if amphora_id == new_amphora_id: + with session.begin(): + self.amphora_repo.update(session, amphora_id, + status=constants.ERROR) + + +class ListenerDelete(BaseAmphoraTask): + """Task to delete the listener on the vip.""" + + def execute(self, listener): + """Execute listener delete routines for an amphora.""" + session = db_apis.get_session() + with session.begin(): + db_listener = self.listener_repo.get( + session, id=listener[constants.LISTENER_ID]) + self.amphora_driver.delete(db_listener) + LOG.debug("Deleted the listener on the vip") + + def revert(self, listener, *args, **kwargs): + """Handle a failed listener delete.""" + + LOG.warning("Reverting listener delete.") + + self.task_utils.mark_listener_prov_status_error( + listener[constants.LISTENER_ID]) + + +class AmphoraGetInfo(BaseAmphoraTask): + """Task to get information on an amphora.""" + + def execute(self, amphora): + """Execute get_info routine for an amphora.""" + session = db_apis.get_session() + with session.begin(): + db_amp = self.amphora_repo.get(session, + id=amphora[constants.ID]) + self.amphora_driver.get_info(db_amp) + + +class AmphoraGetDiagnostics(BaseAmphoraTask): + """Task to get diagnostics on the amphora and the loadbalancers.""" + + def execute(self, amphora): + """Execute get_diagnostic routine for an amphora.""" + self.amphora_driver.get_diagnostics(amphora) + + +class AmphoraFinalize(BaseAmphoraTask): + """Task to finalize the amphora before any listeners are configured.""" + + def execute(self, amphora): + """Execute finalize_amphora routine.""" + session = db_apis.get_session() + with session.begin(): + db_amp = self.amphora_repo.get(session, + id=amphora.get(constants.ID)) + self.amphora_driver.finalize_amphora(db_amp) + LOG.debug("Finalized the amphora.") + + def revert(self, result, amphora, *args, **kwargs): + """Handle a failed amphora finalize.""" + if isinstance(result, failure.Failure): + return + LOG.warning("Reverting amphora finalize.") + self.task_utils.mark_amphora_status_error( + amphora.get(constants.ID)) + + +class AmphoraPostNetworkPlug(BaseAmphoraTask): + """Task to notify the amphora post network plug.""" + + def execute(self, amphora, ports, amphora_network_config): + """Execute post_network_plug routine.""" + session = db_apis.get_session() + with session.begin(): + db_amp = self.amphora_repo.get(session, + id=amphora[constants.ID]) + + for port in ports: + net = data_models.Network(**port.pop(constants.NETWORK)) + ips = port.pop(constants.FIXED_IPS) + fixed_ips = [] + for ip in ips: + subnet_arg = ip.pop(constants.SUBNET) + host_routes = subnet_arg.get('host_routes') + if host_routes: + subnet_arg['host_routes'] = [ + data_models.HostRoute(**hr) + for hr in host_routes + ] + fixed_ips.append(data_models.FixedIP( + subnet=data_models.Subnet(**subnet_arg), **ip)) + self.amphora_driver.post_network_plug( + db_amp, + data_models.Port(network=net, fixed_ips=fixed_ips, **port), + amphora_network_config) + + LOG.debug("post_network_plug called on compute instance " + "%(compute_id)s for port %(port_id)s", + {"compute_id": amphora[constants.COMPUTE_ID], + "port_id": port[constants.ID]}) + + def revert(self, result, amphora, *args, **kwargs): + """Handle a failed post network plug.""" + if isinstance(result, failure.Failure): + return + LOG.warning("Reverting post network plug.") + self.task_utils.mark_amphora_status_error(amphora.get(constants.ID)) + + +class AmphoraePostNetworkPlug(BaseAmphoraTask): + """Task to notify the amphorae post network plug.""" + + def execute(self, loadbalancer, updated_ports, amphorae_network_config): + """Execute post_network_plug routine.""" + amp_post_plug = AmphoraPostNetworkPlug() + session = db_apis.get_session() + with session.begin(): + db_lb = self.loadbalancer_repo.get( + session, id=loadbalancer[constants.LOADBALANCER_ID]) + for amphora in db_lb.amphorae: + if amphora.id in updated_ports: + amp_post_plug.execute(amphora.to_dict(), + updated_ports[amphora.id], + amphorae_network_config[amphora.id]) + + def revert(self, result, loadbalancer, updated_ports, *args, **kwargs): + """Handle a failed post network plug.""" + if isinstance(result, failure.Failure): + return + session = db_apis.get_session() + with session.begin(): + db_lb = self.loadbalancer_repo.get( + session, id=loadbalancer[constants.LOADBALANCER_ID]) + LOG.warning("Reverting post network plug.") + for amphora in filter( + lambda amp: amp.status == constants.AMPHORA_ALLOCATED, + db_lb.amphorae): + + self.task_utils.mark_amphora_status_error(amphora.id) + + +class AmphoraPostVIPPlug(BaseAmphoraTask): + """Task to notify the amphora post VIP plug.""" + + def execute(self, amphora, loadbalancer, amphorae_network_config): + """Execute post_vip_routine.""" + session = db_apis.get_session() + with session.begin(): + db_amp = self.amphora_repo.get(session, + id=amphora.get(constants.ID)) + db_lb = self.loadbalancer_repo.get( + session, id=loadbalancer[constants.LOADBALANCER_ID]) + vrrp_port = data_models.Port( + **amphorae_network_config[ + amphora.get(constants.ID)][constants.VRRP_PORT]) + # Required for noop-case + vip_arg = copy.deepcopy( + amphorae_network_config[ + amphora.get(constants.ID)][constants.VIP_SUBNET]) + if vip_arg: + host_routes = vip_arg.get('host_routes') + if host_routes: + vip_arg['host_routes'] = [ + data_models.HostRoute(**hr) + for hr in host_routes + ] + vip_subnet = data_models.Subnet(**vip_arg) + else: + vip_subnet = data_models.Subnet() + + additional_vip_data = [] + for add_vip in amphorae_network_config[ + amphora[constants.ID]]['additional_vip_data']: + + subnet_arg = copy.deepcopy(add_vip['subnet']) + subnet_arg['host_routes'] = [ + data_models.HostRoute(**hr) + for hr in subnet_arg['host_routes']] + subnet = data_models.Subnet(**subnet_arg) + + additional_vip_data.append( + data_models.AdditionalVipData( + ip_address=add_vip['ip_address'], + subnet=subnet)) + + self.amphora_driver.post_vip_plug( + db_amp, db_lb, amphorae_network_config, vrrp_port, + vip_subnet, additional_vip_data=additional_vip_data) + LOG.debug("Notified amphora of vip plug") + + def revert(self, result, amphora, loadbalancer, *args, **kwargs): + """Handle a failed amphora vip plug notification.""" + if isinstance(result, failure.Failure): + return + LOG.warning("Reverting post vip plug.") + self.task_utils.mark_amphora_status_error(amphora.get(constants.ID)) + + +class AmphoraePostVIPPlug(BaseAmphoraTask): + """Task to notify the amphorae post VIP plug.""" + + def execute(self, loadbalancer, amphorae_network_config): + """Execute post_vip_plug across the amphorae.""" + amp_post_vip_plug = AmphoraPostVIPPlug() + session = db_apis.get_session() + with session.begin(): + db_lb = self.loadbalancer_repo.get( + session, id=loadbalancer[constants.LOADBALANCER_ID]) + for amphora in db_lb.amphorae: + amp_post_vip_plug.execute(amphora.to_dict(), + loadbalancer, + amphorae_network_config) + + +class AmphoraCertUpload(BaseAmphoraTask): + """Upload a certificate to the amphora.""" + + def execute(self, amphora, server_pem): + """Execute cert_update_amphora routine.""" + LOG.debug("Upload cert in amphora REST driver") + fer = utils.get_server_certs_key_passphrases_fernet() + session = db_apis.get_session() + with session.begin(): + db_amp = self.amphora_repo.get(session, + id=amphora.get(constants.ID)) + self.amphora_driver.upload_cert_amp( + db_amp, fer.decrypt(server_pem.encode('utf-8'))) + + +# TODO(johnsom) REMOVE ME! +class AmphoraUpdateVRRPInterface(BaseAmphoraTask): + """Task to get and update the VRRP interface device name from amphora.""" + + def execute(self, amphora, timeout_dict=None): + try: + # TODO(johnsom) Optimize this to use the dicts and not need the + # DB lookups + session = db_apis.get_session() + with session.begin(): + db_amp = self.amphora_repo.get(session, + id=amphora[constants.ID]) + interface = self.amphora_driver.get_interface_from_ip( + db_amp, db_amp.vrrp_ip, timeout_dict=timeout_dict) + except Exception as e: + # This can occur when an active/standby LB has no listener + LOG.error('Failed to get amphora VRRP interface on amphora ' + '%s. Skipping this amphora as it is failing due to: ' + '%s', amphora.get(constants.ID), str(e)) + with session.begin(): + self.amphora_repo.update(session, + amphora.get(constants.ID), + status=constants.ERROR) + return None + + with session.begin(): + self.amphora_repo.update(session, amphora[constants.ID], + vrrp_interface=interface) + return interface + + +class AmphoraIndexUpdateVRRPInterface(BaseAmphoraTask): + """Task to get and update the VRRP interface device name from amphora.""" + + def execute(self, amphora_index, amphorae, amphorae_status: dict, + new_amphora_id: str, timeout_dict=None): + amphora_id = amphorae[amphora_index][constants.ID] + amphora_status = amphorae_status.get(amphora_id, {}) + if amphora_status.get(constants.UNREACHABLE): + LOG.warning("Skipping VRRP interface update because amphora %s " + "is not reachable.", amphora_id) + return None + + try: + # TODO(johnsom) Optimize this to use the dicts and not need the + # DB lookups + session = db_apis.get_session() + with session.begin(): + db_amp = self.amphora_repo.get(session, + id=amphora_id) + interface = self.amphora_driver.get_interface_from_ip( + db_amp, db_amp.vrrp_ip, timeout_dict=timeout_dict) + except Exception as e: + # This can occur when an active/standby LB has no listener + LOG.error('Failed to get amphora VRRP interface on amphora ' + '%s. Skipping this amphora as it is failing due to: ' + '%s', amphora_id, str(e)) + # Update only the status of the newly created amphora during the + # failover + if amphora_id == new_amphora_id: + with session.begin(): + self.amphora_repo.update(session, amphora_id, + status=constants.ERROR) + return None + + with session.begin(): + self.amphora_repo.update(session, amphora_id, + vrrp_interface=interface) + return interface + + +class AmphoraVRRPUpdate(BaseAmphoraTask): + """Task to update the VRRP configuration of an amphora.""" + + def execute(self, loadbalancer_id, amphorae_network_config, amphora, + amp_vrrp_int, timeout_dict=None): + """Execute update_vrrp_conf.""" + # Note, we don't want this to cause a revert as it may be used + # in a failover flow with both amps failing. Skip it and let + # health manager fix it. + amphora_id = amphora[constants.ID] + try: + # TODO(johnsom) Optimize this to use the dicts and not need the + # DB lookups + session = db_apis.get_session() + with session.begin(): + db_amp = self.amphora_repo.get(session, + id=amphora_id) + loadbalancer = self.loadbalancer_repo.get(session, + id=loadbalancer_id) + db_amp.vrrp_interface = amp_vrrp_int + self.amphora_driver.update_vrrp_conf( + loadbalancer, amphorae_network_config, db_amp, timeout_dict) + except Exception as e: + LOG.error('Failed to update VRRP configuration amphora %s. ' + 'Skipping this amphora as it is failing to update due ' + 'to: %s', amphora_id, str(e)) + with session.begin(): + self.amphora_repo.update(session, amphora_id, + status=constants.ERROR) + + LOG.debug("Uploaded VRRP configuration of amphora %s.", amphora_id) + + +class AmphoraIndexVRRPUpdate(BaseAmphoraTask): + """Task to update the VRRP configuration of an amphora.""" + + def execute(self, loadbalancer_id, amphorae_network_config, amphora_index, + amphorae, amphorae_status: dict, amp_vrrp_int: Optional[str], + new_amphora_id: str, timeout_dict=None): + """Execute update_vrrp_conf.""" + # Note, we don't want this to cause a revert as it may be used + # in a failover flow with both amps failing. Skip it and let + # health manager fix it. + amphora_id = amphorae[amphora_index][constants.ID] + amphora_status = amphorae_status.get(amphora_id, {}) + if amphora_status.get(constants.UNREACHABLE): + LOG.warning("Skipping VRRP configuration because amphora %s " + "is not reachable.", amphora_id) + return + + try: + # TODO(johnsom) Optimize this to use the dicts and not need the + # DB lookups + session = db_apis.get_session() + with session.begin(): + db_amp = self.amphora_repo.get(session, + id=amphora_id) + loadbalancer = self.loadbalancer_repo.get(session, + id=loadbalancer_id) + db_amp.vrrp_interface = amp_vrrp_int + self.amphora_driver.update_vrrp_conf( + loadbalancer, amphorae_network_config, db_amp, timeout_dict) + except Exception as e: + LOG.error('Failed to update VRRP configuration amphora %s. ' + 'Skipping this amphora as it is failing to update due ' + 'to: %s', amphora_id, str(e)) + # Update only the status of the newly created amphora during the + # failover + if amphora_id == new_amphora_id: + with session.begin(): + self.amphora_repo.update(session, amphora_id, + status=constants.ERROR) + return + LOG.debug("Uploaded VRRP configuration of amphora %s.", amphora_id) + + +class AmphoraVRRPStart(BaseAmphoraTask): + """Task to start keepalived on an amphora. + + This will reload keepalived if it is already running. + """ + + def execute(self, amphora, timeout_dict=None): + # TODO(johnsom) Optimize this to use the dicts and not need the + # DB lookups + session = db_apis.get_session() + with session.begin(): + db_amp = self.amphora_repo.get( + session, id=amphora[constants.ID]) + self.amphora_driver.start_vrrp_service(db_amp, timeout_dict) + LOG.debug("Started VRRP on amphora %s.", amphora[constants.ID]) + + +class AmphoraIndexVRRPStart(BaseAmphoraTask): + """Task to start keepalived on an amphora. + + This will reload keepalived if it is already running. + """ + + def execute(self, amphora_index, amphorae, amphorae_status: dict, + new_amphora_id: str, timeout_dict=None): + # TODO(johnsom) Optimize this to use the dicts and not need the + # DB lookups + amphora_id = amphorae[amphora_index][constants.ID] + amphora_status = amphorae_status.get(amphora_id, {}) + if amphora_status.get(constants.UNREACHABLE): + LOG.warning("Skipping VRRP start because amphora %s " + "is not reachable.", amphora_id) + return + + session = db_apis.get_session() + with session.begin(): + db_amp = self.amphora_repo.get(session, id=amphora_id) + try: + self.amphora_driver.start_vrrp_service(db_amp, timeout_dict) + except Exception as e: + LOG.error('Failed to start VRRP on amphora %s. ' + 'Skipping this amphora as it is failing to start due ' + 'to: %s', amphora_id, str(e)) + # Update only the status of the newly created amphora during the + # failover + if amphora_id == new_amphora_id: + with session.begin(): + self.amphora_repo.update(session, amphora_id, + status=constants.ERROR) + return + LOG.debug("Started VRRP on amphora %s.", + amphorae[amphora_index][constants.ID]) + + +class AmphoraComputeConnectivityWait(BaseAmphoraTask): + """Task to wait for the compute instance to be up.""" + + def execute(self, amphora, raise_retry_exception=False): + """Execute get_info routine for an amphora until it responds.""" + try: + session = db_apis.get_session() + with session.begin(): + db_amphora = self.amphora_repo.get( + session, id=amphora.get(constants.ID)) + amp_info = self.amphora_driver.get_info( + db_amphora, raise_retry_exception=raise_retry_exception) + LOG.debug('Successfuly connected to amphora %s: %s', + amphora.get(constants.ID), amp_info) + except driver_except.TimeOutException: + LOG.error("Amphora compute instance failed to become reachable. " + "This either means the compute driver failed to fully " + "boot the instance inside the timeout interval or the " + "instance is not reachable via the lb-mgmt-net.") + with session.begin(): + self.amphora_repo.update(session, + amphora.get(constants.ID), + status=constants.ERROR) + raise + + +class AmphoraConfigUpdate(BaseAmphoraTask): + """Task to push a new amphora agent configuration to the amphora.""" + + def execute(self, amphora, flavor): + # Extract any flavor based settings + if flavor: + topology = flavor.get(constants.LOADBALANCER_TOPOLOGY, + CONF.controller_worker.loadbalancer_topology) + else: + topology = CONF.controller_worker.loadbalancer_topology + + # Build the amphora agent config + agent_cfg_tmpl = agent_jinja_cfg.AgentJinjaTemplater() + agent_config = agent_cfg_tmpl.build_agent_config( + amphora.get(constants.ID), topology) + session = db_apis.get_session() + with session.begin(): + db_amp = self.amphora_repo.get(session, + id=amphora[constants.ID]) + # Push the new configuration to the amphora + try: + self.amphora_driver.update_amphora_agent_config(db_amp, + agent_config) + except driver_except.AmpDriverNotImplementedError: + LOG.error('Amphora {} does not support agent configuration ' + 'update. Please update the amphora image for this ' + 'amphora. Skipping.'. + format(amphora.get(constants.ID))) + + +class AmphoraeGetConnectivityStatus(BaseAmphoraTask): + """Task that checks amphorae connectivity status. + + Check and return the connectivity status of both amphorae in ACTIVE STANDBY + load balancers + """ + + def execute(self, amphorae: List[dict], new_amphora_id: str, + timeout_dict=None): + amphorae_status = {} + + for amphora in amphorae: + amphora_id = amphora[constants.ID] + amphorae_status[amphora_id] = {} + + session = db_apis.get_session() + with session.begin(): + db_amp = self.amphora_repo.get(session, id=amphora_id) + + try: + # Verify if the amphora is reachable + self.amphora_driver.check(db_amp, timeout_dict=timeout_dict) + except Exception as e: + LOG.exception("Cannot get status for amphora %s", + amphora_id) + # In case it fails and the tested amphora is the newly created + # amphora, it's not a normal error handling, re-raise the + # exception + if amphora_id == new_amphora_id: + raise e + amphorae_status[amphora_id][constants.UNREACHABLE] = True + else: + amphorae_status[amphora_id][constants.UNREACHABLE] = False + + return amphorae_status + + +class SetAmphoraFirewallRules(BaseAmphoraTask): + """Task to push updated firewall ruls to an amphora.""" + + def execute(self, amphorae: List[dict], amphora_index: int, + amphora_firewall_rules: List[dict], amphorae_status: dict, + timeout_dict=None): + + if (amphora_firewall_rules and + amphora_firewall_rules[0].get('non-sriov-vip', False)): + # Not an SRIOV VIP, so skip setting firewall rules. + # This is already logged in GetAmphoraFirewallRules. + return + + amphora_id = amphorae[amphora_index][constants.ID] + amphora_status = amphorae_status.get(amphora_id, {}) + if amphora_status.get(constants.UNREACHABLE): + LOG.warning("Skipping firewall rules update because amphora %s " + "is not reachable.", amphora_id) + return + + session = db_apis.get_session() + with session.begin(): + db_amp = self.amphora_repo.get(session, id=amphora_id) + + self.amphora_driver.set_interface_rules( + db_amp, + amphorae[amphora_index][constants.VRRP_IP], + amphora_firewall_rules, timeout_dict=timeout_dict) diff --git a/octavia/controller/worker/v2/tasks/cert_task.py b/octavia/controller/worker/v2/tasks/cert_task.py new file mode 100644 index 0000000000..ffdbf3d28b --- /dev/null +++ b/octavia/controller/worker/v2/tasks/cert_task.py @@ -0,0 +1,51 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from oslo_config import cfg +from stevedore import driver as stevedore_driver +from taskflow import task + +from octavia.common import utils + +CONF = cfg.CONF + + +class BaseCertTask(task.Task): + """Base task to load drivers common to the tasks.""" + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.cert_generator = stevedore_driver.DriverManager( + namespace='octavia.cert_generator', + name=CONF.certificates.cert_generator, + invoke_on_load=True, + ).driver + + +class GenerateServerPEMTask(BaseCertTask): + """Create the server certs for the agent comm + + Use the amphora_id for the CN + """ + + def execute(self, amphora_id): + cert = self.cert_generator.generate_cert_key_pair( + cn=amphora_id, + validity=CONF.certificates.cert_validity_time) + fer = utils.get_server_certs_key_passphrases_fernet() + + # storing in db requires conversion bytes to string + # (required for python3) + return fer.encrypt(cert.certificate + cert.private_key).decode('utf-8') diff --git a/octavia/controller/worker/v2/tasks/compute_tasks.py b/octavia/controller/worker/v2/tasks/compute_tasks.py new file mode 100644 index 0000000000..4c556f9fc4 --- /dev/null +++ b/octavia/controller/worker/v2/tasks/compute_tasks.py @@ -0,0 +1,372 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import time + +from oslo_config import cfg +from oslo_log import log as logging +from stevedore import driver as stevedore_driver +from taskflow import retry +from taskflow import task +from taskflow.types import failure +import tenacity + +from octavia.amphorae.backends.agent import agent_jinja_cfg +from octavia.common import constants +from octavia.common import exceptions +from octavia.common.jinja.logging import logging_jinja_cfg +from octavia.common.jinja import user_data_jinja_cfg +from octavia.common import utils +from octavia.controller.worker import amphora_rate_limit +from octavia.db import api as db_apis +from octavia.db import repositories as repo + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class BaseComputeTask(task.Task): + """Base task to load drivers common to the tasks.""" + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.compute = stevedore_driver.DriverManager( + namespace='octavia.compute.drivers', + name=CONF.controller_worker.compute_driver, + invoke_on_load=True + ).driver + self.loadbalancer_repo = repo.LoadBalancerRepository() + self.rate_limit = amphora_rate_limit.AmphoraBuildRateLimit() + + +class ComputeRetry(retry.Times): + + def on_failure(self, history, *args, **kwargs): + last_errors = history[-1][1] + max_retry_attempt = CONF.controller_worker.amp_active_retries + for task_name, ex_info in last_errors.items(): + if len(history) <= max_retry_attempt: + # When taskflow persistence is enabled and flow/task state is + # saved in the backend. If flow(task) is restored(restart of + # worker,etc) we are getting ex_info as None - we need to RETRY + # task to check its real state. + if ex_info is None or ex_info._exc_info is None: + return retry.RETRY + excp = ex_info._exc_info[1] + if isinstance(excp, exceptions.ComputeWaitTimeoutException): + return retry.RETRY + + return retry.REVERT_ALL + + +class ComputeCreate(BaseComputeTask): + """Create the compute instance for a new amphora.""" + + def execute(self, amphora_id, server_group_id, config_drive_files=None, + build_type_priority=constants.LB_CREATE_NORMAL_PRIORITY, + ports=None, flavor=None, availability_zone=None): + """Create an amphora + + :param availability_zone: availability zone metadata dictionary + + :returns: an amphora + """ + ports = ports or [] + network_ids = CONF.controller_worker.amp_boot_network_list[:] + config_drive_files = config_drive_files or {} + user_data = None + LOG.debug("Compute create execute for amphora with id %s", amphora_id) + + user_data_config_drive = CONF.controller_worker.user_data_config_drive + key_name = CONF.controller_worker.amp_ssh_key_name + + # Apply an Octavia flavor customizations + if flavor: + topology = flavor.get(constants.LOADBALANCER_TOPOLOGY, + CONF.controller_worker.loadbalancer_topology) + amp_compute_flavor = flavor.get( + constants.COMPUTE_FLAVOR, CONF.controller_worker.amp_flavor_id) + amp_image_tag = flavor.get( + constants.AMP_IMAGE_TAG, CONF.controller_worker.amp_image_tag) + else: + topology = CONF.controller_worker.loadbalancer_topology + amp_compute_flavor = CONF.controller_worker.amp_flavor_id + amp_image_tag = CONF.controller_worker.amp_image_tag + + if availability_zone: + amp_network = availability_zone.get(constants.MANAGEMENT_NETWORK) + if amp_network: + network_ids = [amp_network] + try: + if CONF.haproxy_amphora.build_rate_limit != -1: + self.rate_limit.add_to_build_request_queue( + amphora_id, build_type_priority) + + agent_cfg = agent_jinja_cfg.AgentJinjaTemplater() + config_drive_files['/etc/octavia/amphora-agent.conf'] = ( + agent_cfg.build_agent_config(amphora_id, topology)) + + logging_cfg = logging_jinja_cfg.LoggingJinjaTemplater( + CONF.amphora_agent.logging_template_override) + config_drive_files['/etc/rsyslog.d/10-rsyslog.conf'] = ( + logging_cfg.build_logging_config()) + + udtemplater = user_data_jinja_cfg.UserDataJinjaCfg() + user_data = udtemplater.build_user_data_config( + config_drive_files if user_data_config_drive else {}) + if user_data_config_drive: + config_drive_files = None + + compute_id = self.compute.build( + name="amphora-" + amphora_id, + amphora_flavor=amp_compute_flavor, + image_tag=amp_image_tag, + image_owner=CONF.controller_worker.amp_image_owner_id, + key_name=key_name, + sec_groups=CONF.controller_worker.amp_secgroup_list, + network_ids=network_ids, + port_ids=[port.id for port in ports], + config_drive_files=config_drive_files, + user_data=user_data, + server_group_id=server_group_id, + availability_zone=availability_zone) + + LOG.info("Server created with id: %s for amphora id: %s", + compute_id, amphora_id) + return compute_id + + except Exception: + LOG.exception("Compute create for amphora id: %s failed", + amphora_id) + raise + + def revert(self, result, amphora_id, *args, **kwargs): + """This method will revert the creation of the + + amphora. So it will just delete it in this flow + """ + if isinstance(result, failure.Failure): + return + compute_id = result + LOG.warning("Reverting compute create for amphora with id " + "%(amp)s and compute id: %(comp)s", + {'amp': amphora_id, 'comp': compute_id}) + try: + self.compute.delete(compute_id) + except Exception: + LOG.exception("Reverting compute create failed") + + +class CertComputeCreate(ComputeCreate): + def execute(self, amphora_id, server_pem, server_group_id, + build_type_priority=constants.LB_CREATE_NORMAL_PRIORITY, + ports=None, flavor=None, availability_zone=None): + """Create an amphora + + :param availability_zone: availability zone metadata dictionary + + :returns: an amphora + """ + + # load client certificate + with open(CONF.controller_worker.client_ca, + encoding='utf-8') as client_ca: + ca = client_ca.read() + + fer = utils.get_server_certs_key_passphrases_fernet() + config_drive_files = { + '/etc/octavia/certs/server.pem': fer.decrypt( + server_pem.encode("utf-8")).decode("utf-8"), + '/etc/octavia/certs/client_ca.pem': ca} + return super().execute( + amphora_id, config_drive_files=config_drive_files, + build_type_priority=build_type_priority, + server_group_id=server_group_id, ports=ports, flavor=flavor, + availability_zone=availability_zone) + + +class DeleteAmphoraeOnLoadBalancer(BaseComputeTask): + """Delete the amphorae on a load balancer. + + Iterate through amphorae, deleting them + """ + + def execute(self, loadbalancer): + session = db_apis.get_session() + with session.begin(): + db_lb = self.loadbalancer_repo.get( + session, id=loadbalancer[constants.LOADBALANCER_ID]) + for amp in db_lb.amphorae: + # The compute driver will already handle NotFound + try: + self.compute.delete(amp.compute_id) + except Exception: + LOG.exception("Compute delete for amphora id: %s failed", + amp.id) + raise + + +class ComputeDelete(BaseComputeTask): + @tenacity.retry(retry=tenacity.retry_if_exception_type(), + stop=tenacity.stop_after_attempt(CONF.compute.max_retries), + wait=tenacity.wait_exponential( + multiplier=CONF.compute.retry_backoff, + min=CONF.compute.retry_interval, + max=CONF.compute.retry_max), reraise=True) + def execute(self, amphora, passive_failure=False): + + amphora_id = amphora.get(constants.ID) + compute_id = amphora[constants.COMPUTE_ID] + + # tenacity 8.5.0 moves statistics from the retry object to the function + try: + retry_statistics = self.execute.statistics + except AttributeError: + retry_statistics = self.execute.retry.statistics + if retry_statistics.get(constants.ATTEMPT_NUMBER, 1) == 1: + LOG.debug('Compute delete execute for amphora with ID %s and ' + 'compute ID: %s', amphora_id, compute_id) + else: + LOG.warning('Retrying compute delete of %s attempt %s of %s.', + compute_id, + retry_statistics[constants.ATTEMPT_NUMBER], + self.execute.retry.stop.max_attempt_number) + # Let the Taskflow engine know we are working and alive + # Don't use get with a default for 'attempt_number', we need to fail + # if that number is missing. + self.update_progress( + retry_statistics[constants.ATTEMPT_NUMBER] / + self.execute.retry.stop.max_attempt_number) + + try: + self.compute.delete(compute_id) + except Exception: + if (retry_statistics[constants.ATTEMPT_NUMBER] != + self.execute.retry.stop.max_attempt_number): + LOG.warning('Compute delete for amphora id: %s failed. ' + 'Retrying.', amphora_id) + raise + if passive_failure: + LOG.exception('Compute delete for compute ID: %s on amphora ' + 'ID: %s failed. This resource will be abandoned ' + 'and should manually be cleaned up once the ' + 'compute service is functional.', + compute_id, amphora_id) + else: + LOG.exception('Compute delete for compute ID: %s on amphora ' + 'ID: %s failed. The compute service has failed. ' + 'Aborting and reverting.', compute_id, + amphora_id) + raise + + +class ComputeWait(BaseComputeTask): + """Wait for the compute driver to mark the amphora active.""" + + def execute(self, compute_id, amphora_id, availability_zone): + """Wait for the compute driver to mark the amphora active + + :param compute_id: virtual machine UUID + :param amphora_id: id of the amphora object + :param availability_zone: availability zone metadata dictionary + + :raises: Generic exception if the amphora is not active + :returns: An amphora object + """ + if availability_zone: + amp_network = availability_zone.get(constants.MANAGEMENT_NETWORK) + else: + amp_network = None + + amp, fault = self.compute.get_amphora(compute_id, amp_network) + if amp.status == constants.ACTIVE: + if CONF.haproxy_amphora.build_rate_limit != -1: + self.rate_limit.remove_from_build_req_queue(amphora_id) + return amp.to_dict() + if amp.status == constants.ERROR: + raise exceptions.ComputeBuildException(fault=fault) + + time.sleep(CONF.controller_worker.amp_active_wait_sec) + raise exceptions.ComputeWaitTimeoutException(id=compute_id) + + +class NovaServerGroupCreate(BaseComputeTask): + def execute(self, loadbalancer_id): + """Create a server group by nova client api + + :param loadbalancer_id: will be used for server group's name + :param policy: will used for server group's policy + :raises: Generic exception if the server group is not created + :returns: server group's id + """ + + name = 'octavia-lb-' + loadbalancer_id + server_group = self.compute.create_server_group( + name, CONF.nova.anti_affinity_policy) + LOG.debug("Server Group created with id: %s for load balancer id: " + "%s", server_group.id, loadbalancer_id) + return server_group.id + + def revert(self, result, *args, **kwargs): + """This method will revert the creation of the + + :param result: here it refers to server group id + """ + server_group_id = result + LOG.warning("Reverting server group create with id:%s", + server_group_id) + try: + self.compute.delete_server_group(server_group_id) + except Exception as e: + LOG.error("Failed to delete server group. Resources may " + "still be in use for server group: %(sg)s due to " + "error: %(except)s", + {'sg': server_group_id, 'except': str(e)}) + + +class NovaServerGroupDelete(BaseComputeTask): + def execute(self, server_group_id): + if server_group_id is not None: + self.compute.delete_server_group(server_group_id) + + +class AttachPort(BaseComputeTask): + def execute(self, amphora, port): + """Attach a port to an amphora instance. + + :param amphora: The amphora to attach the port to. + :param port: The port to attach to the amphora. + :returns: None + """ + LOG.debug('Attaching port: %s to compute: %s', + port[constants.ID], amphora[constants.COMPUTE_ID]) + self.compute.attach_network_or_port(amphora[constants.COMPUTE_ID], + port_id=port[constants.ID]) + + def revert(self, amphora, port, *args, **kwargs): + """Revert our port attach. + + :param amphora: The amphora to detach the port from. + :param port: The port to attach to the amphora. + """ + LOG.warning('Reverting port: %s attach to compute: %s', + port[constants.ID], amphora[constants.COMPUTE_ID]) + try: + self.compute.detach_port(amphora[constants.COMPUTE_ID], + port[constants.ID]) + except Exception as e: + LOG.error('Failed to detach port %s from compute %s for revert ' + 'due to %s.', port[constants.ID], + amphora[constants.COMPUTE_ID], str(e)) diff --git a/octavia/controller/worker/v2/tasks/database_tasks.py b/octavia/controller/worker/v2/tasks/database_tasks.py new file mode 100644 index 0000000000..448d196a9b --- /dev/null +++ b/octavia/controller/worker/v2/tasks/database_tasks.py @@ -0,0 +1,3087 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from octavia_lib.common import constants as lib_consts +from oslo_config import cfg +from oslo_db import exception as odb_exceptions +from oslo_log import log as logging +from oslo_utils import excutils +from oslo_utils import uuidutils +import sqlalchemy +from sqlalchemy.orm import exc +from taskflow import task +from taskflow.types import failure + +from octavia.api.drivers import utils as provider_utils +from octavia.common import constants +from octavia.common import data_models +from octavia.common import exceptions +from octavia.common.tls_utils import cert_parser +from octavia.common import utils +from octavia.controller.worker import task_utils as task_utilities +from octavia.db import api as db_apis +from octavia.db import repositories as repo + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class BaseDatabaseTask(task.Task): + """Base task to load drivers common to the tasks.""" + + def __init__(self, **kwargs): + self.repos = repo.Repositories() + self.amphora_repo = repo.AmphoraRepository() + self.health_mon_repo = repo.HealthMonitorRepository() + self.listener_repo = repo.ListenerRepository() + self.loadbalancer_repo = repo.LoadBalancerRepository() + self.vip_repo = repo.VipRepository() + self.member_repo = repo.MemberRepository() + self.pool_repo = repo.PoolRepository() + self.amp_health_repo = repo.AmphoraHealthRepository() + self.l7policy_repo = repo.L7PolicyRepository() + self.l7rule_repo = repo.L7RuleRepository() + self.task_utils = task_utilities.TaskUtils() + self.amphora_member_port_repo = repo.AmphoraMemberPortRepository() + super().__init__(**kwargs) + + def _delete_from_amp_health(self, session, amphora_id): + """Delete the amphora_health record for an amphora. + + :param amphora_id: The amphora id to delete + """ + LOG.debug('Disabling health monitoring on amphora: %s', amphora_id) + try: + self.amp_health_repo.delete(session, + amphora_id=amphora_id) + except (sqlalchemy.orm.exc.NoResultFound, + sqlalchemy.orm.exc.UnmappedInstanceError): + LOG.debug('No existing amphora health record to delete ' + 'for amphora: %s, skipping.', amphora_id) + + def _mark_amp_health_busy(self, session, amphora_id): + """Mark the amphora_health record busy for an amphora. + + :param amphora_id: The amphora id to mark busy + """ + LOG.debug('Marking health monitoring busy on amphora: %s', amphora_id) + try: + self.amp_health_repo.update(session, + amphora_id=amphora_id, + busy=True) + except (sqlalchemy.orm.exc.NoResultFound, + sqlalchemy.orm.exc.UnmappedInstanceError): + LOG.debug('No existing amphora health record to mark busy ' + 'for amphora: %s, skipping.', amphora_id) + + +class CreateAmphoraInDB(BaseDatabaseTask): + """Task to create an initial amphora in the Database.""" + + def execute(self, *args, loadbalancer_id=None, **kwargs): + """Creates an pending create amphora record in the database. + + :returns: The created amphora object + """ + + with db_apis.session().begin() as session: + amphora = self.amphora_repo.create( + session, + id=uuidutils.generate_uuid(), + load_balancer_id=loadbalancer_id, + status=constants.PENDING_CREATE, + cert_busy=False) + if loadbalancer_id: + LOG.info("Created Amphora %s in DB for load balancer %s", + amphora.id, loadbalancer_id) + else: + LOG.info("Created Amphora %s in DB", amphora.id) + return amphora.id + + def revert(self, result, *args, **kwargs): + """Revert by storing the amphora in error state in the DB + + In a future version we might change the status to DELETED + if deleting the amphora was successful + + :param result: Id of created amphora. + :returns: None + """ + + if isinstance(result, failure.Failure): + # This task's execute failed, so nothing needed to be done to + # revert + return + + # At this point the revert is being called because another task + # executed after this failed so we will need to do something and + # result is the amphora's id + + LOG.warning("Reverting create amphora in DB for amp id %s ", result) + + # Delete the amphora for now. May want to just update status later + with db_apis.session().begin() as session: + try: + self.amphora_repo.delete(session, id=result) + except Exception as e: + LOG.error("Failed to delete amphora %(amp)s " + "in the database due to: " + "%(except)s", {'amp': result, 'except': str(e)}) + try: + self.amp_health_repo.delete(session, amphora_id=result) + except Exception: + pass + + +class MarkLBAmphoraeDeletedInDB(BaseDatabaseTask): + """Task to mark a list of amphora deleted in the Database.""" + + def execute(self, loadbalancer): + """Update load balancer's amphorae statuses to DELETED in the database. + + :param loadbalancer: The load balancer which amphorae should be + marked DELETED. + :returns: None + """ + with db_apis.session().begin() as session: + db_lb = self.repos.load_balancer.get( + session, id=loadbalancer[constants.LOADBALANCER_ID]) + for amp in db_lb.amphorae: + LOG.debug("Marking amphora %s DELETED ", amp.id) + self.amphora_repo.update(session, + id=amp.id, status=constants.DELETED) + + +class DeleteHealthMonitorInDB(BaseDatabaseTask): + """Delete the health monitor in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, health_mon): + """Delete the health monitor in DB + + :param health_mon: The health monitor which should be deleted + :returns: None + """ + + LOG.debug("DB delete health monitor: %s ", + health_mon[constants.HEALTHMONITOR_ID]) + try: + with db_apis.session().begin() as session: + self.health_mon_repo.delete( + session, + id=health_mon[constants.HEALTHMONITOR_ID]) + except exc.NoResultFound: + # ignore if the HealthMonitor was not found + pass + + def revert(self, health_mon, *args, **kwargs): + """Mark the health monitor ERROR since the mark active couldn't happen + + :param health_mon: The health monitor which couldn't be deleted + :returns: None + """ + + LOG.warning("Reverting mark health monitor delete in DB " + "for health monitor with id %s", + health_mon[constants.HEALTHMONITOR_ID]) + with db_apis.session().begin() as session: + self.health_mon_repo.update( + session, + id=health_mon[constants.HEALTHMONITOR_ID], + provisioning_status=constants.ERROR) + + +class DeleteHealthMonitorInDBByPool(DeleteHealthMonitorInDB): + """Delete the health monitor in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, pool_id): + """Delete the health monitor in the DB. + + :param pool_id: ID of pool which health monitor should be deleted. + :returns: None + """ + with db_apis.session().begin() as session: + db_pool = self.pool_repo.get(session, + id=pool_id) + provider_hm = provider_utils.db_HM_to_provider_HM( + db_pool.health_monitor).to_dict() + super().execute( + provider_hm) + + def revert(self, pool_id, *args, **kwargs): + """Mark the health monitor ERROR since the mark active couldn't happen + + :param pool_id: ID of pool which health monitor couldn't be deleted + :returns: None + """ + with db_apis.session().begin() as session: + db_pool = self.pool_repo.get(session, + id=pool_id) + provider_hm = provider_utils.db_HM_to_provider_HM( + db_pool.health_monitor).to_dict() + super().revert( + provider_hm, *args, **kwargs) + + +class DeleteMemberInDB(BaseDatabaseTask): + """Delete the member in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, member): + """Delete the member in the DB + + :param member: The member to be deleted + :returns: None + """ + + LOG.debug("DB delete member for id: %s ", member[constants.MEMBER_ID]) + with db_apis.session().begin() as session: + self.member_repo.delete(session, + id=member[constants.MEMBER_ID]) + + def revert(self, member, *args, **kwargs): + """Mark the member ERROR since the delete couldn't happen + + :param member: Member that failed to get deleted + :returns: None + """ + + LOG.warning("Reverting delete in DB for member id %s", + member[constants.MEMBER_ID]) + try: + with db_apis.session().begin() as session: + self.member_repo.update(session, + member[constants.MEMBER_ID], + provisioning_status=constants.ERROR) + except Exception as e: + LOG.error("Failed to update member %(mem)s " + "provisioning_status to ERROR due to: %(except)s", + {'mem': member[constants.MEMBER_ID], 'except': str(e)}) + + +class DeleteListenerInDB(BaseDatabaseTask): + """Delete the listener in the DB.""" + + def execute(self, listener): + """Delete the listener in DB + + :param listener: The listener to delete + :returns: None + """ + LOG.debug("Delete in DB for listener id: %s", + listener[constants.LISTENER_ID]) + with db_apis.session().begin() as session: + self.listener_repo.delete(session, + id=listener[constants.LISTENER_ID]) + + def revert(self, listener, *args, **kwargs): + """Mark the listener ERROR since the listener didn't delete + + :param listener: Listener that failed to get deleted + :returns: None + """ + + # TODO(johnsom) Fix this, it doesn't revert anything + LOG.warning("Reverting mark listener delete in DB for listener id %s", + listener[constants.LISTENER_ID]) + + +class DeletePoolInDB(BaseDatabaseTask): + """Delete the pool in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, pool_id): + """Delete the pool in DB + + :param pool_id: The pool_id to be deleted + :returns: None + """ + + LOG.debug("Delete in DB for pool id: %s ", pool_id) + with db_apis.session().begin() as session: + self.pool_repo.delete(session, id=pool_id) + + def revert(self, pool_id, *args, **kwargs): + """Mark the pool ERROR since the delete couldn't happen + + :param pool_id: pool_id that failed to get deleted + :returns: None + """ + + LOG.warning("Reverting delete in DB for pool id %s", pool_id) + try: + with db_apis.session().begin() as session: + self.pool_repo.update(session, pool_id, + provisioning_status=constants.ERROR) + except Exception as e: + LOG.error("Failed to update pool %(pool)s " + "provisioning_status to ERROR due to: %(except)s", + {'pool': pool_id, 'except': str(e)}) + + +class DeleteL7PolicyInDB(BaseDatabaseTask): + """Delete the L7 policy in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, l7policy): + """Delete the l7policy in DB + + :param l7policy: The l7policy to be deleted + :returns: None + """ + + LOG.debug("Delete in DB for l7policy id: %s ", + l7policy[constants.L7POLICY_ID]) + with db_apis.session().begin() as session: + self.l7policy_repo.delete(session, + id=l7policy[constants.L7POLICY_ID]) + + def revert(self, l7policy, *args, **kwargs): + """Mark the l7policy ERROR since the delete couldn't happen + + :param l7policy: L7 policy that failed to get deleted + :returns: None + """ + + LOG.warning("Reverting delete in DB for l7policy id %s", + l7policy[constants.L7POLICY_ID]) + try: + with db_apis.session().begin() as session: + self.l7policy_repo.update(session, + l7policy[constants.L7POLICY_ID], + provisioning_status=constants.ERROR) + except Exception as e: + LOG.error("Failed to update l7policy %(l7policy)s " + "provisioning_status to ERROR due to: %(except)s", + {'l7policy': l7policy[constants.L7POLICY_ID], + 'except': str(e)}) + + +class DeleteL7RuleInDB(BaseDatabaseTask): + """Delete the L7 rule in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, l7rule): + """Delete the l7rule in DB + + :param l7rule: The l7rule to be deleted + :returns: None + """ + + LOG.debug("Delete in DB for l7rule id: %s", + l7rule[constants.L7RULE_ID]) + with db_apis.session().begin() as session: + self.l7rule_repo.delete(session, + id=l7rule[constants.L7RULE_ID]) + + def revert(self, l7rule, *args, **kwargs): + """Mark the l7rule ERROR since the delete couldn't happen + + :param l7rule: L7 rule that failed to get deleted + :returns: None + """ + + LOG.warning("Reverting delete in DB for l7rule id %s", + l7rule[constants.L7RULE_ID]) + try: + with db_apis.session().begin() as session: + self.l7rule_repo.update(session, + l7rule[constants.L7RULE_ID], + provisioning_status=constants.ERROR) + except Exception as e: + LOG.error("Failed to update l7rule %(l7rule)s " + "provisioning_status to ERROR due to: %(except)s", + {'l7rule': l7rule[constants.L7RULE_ID], + 'except': str(e)}) + + +class DeleteAmpMemberPortInDB(BaseDatabaseTask): + """Delete an amphora member port record in the DB.""" + + def execute(self, port_id): + """Delete the amphora member port in DB + + :param port_id: The port_id to be deleted + :returns: None + """ + + LOG.debug("Delete in DB for amphora member port %s", port_id) + with db_apis.session().begin() as session: + self.amphora_member_port_repo.delete(session, port_id=port_id) + + +class ReloadAmphora(BaseDatabaseTask): + """Get an amphora object from the database.""" + + def execute(self, amphora): + """Get an amphora object from the database. + + :param amphora_id: The amphora ID to lookup + :returns: The amphora object + """ + + LOG.debug("Get amphora from DB for amphora id: %s ", + amphora[constants.ID]) + with db_apis.session().begin() as session: + return self.amphora_repo.get( + session, id=amphora[constants.ID]).to_dict() + + +class ReloadLoadBalancer(BaseDatabaseTask): + """Get an load balancer object from the database.""" + + def execute(self, loadbalancer_id, *args, **kwargs): + """Get an load balancer object from the database. + + :param loadbalancer_id: The load balancer ID to lookup + :returns: The load balancer object + """ + + LOG.debug("Get load balancer from DB for load balancer id: %s ", + loadbalancer_id) + with db_apis.session().begin() as session: + db_lb = self.loadbalancer_repo.get(session, + id=loadbalancer_id) + lb_dict = provider_utils.db_loadbalancer_to_provider_loadbalancer( + db_lb) + return lb_dict.to_dict() + + +class UpdateVIPAfterAllocation(BaseDatabaseTask): + """Update a VIP associated with a given load balancer.""" + + def execute(self, loadbalancer_id, vip): + """Update a VIP associated with a given load balancer. + + :param loadbalancer_id: Id of a load balancer which VIP should be + updated. + :param vip: data_models.Vip object with update data. + :returns: The load balancer object. + """ + with db_apis.session().begin() as session: + self.repos.vip.update(session, loadbalancer_id, + port_id=vip[constants.PORT_ID], + subnet_id=vip[constants.SUBNET_ID], + ip_address=vip[constants.IP_ADDRESS]) + db_lb = self.repos.load_balancer.get(session, + id=loadbalancer_id) + prov_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( + db_lb) + LOG.info("Updated vip with port id %s, subnet id %s, ip address %s " + "for load balancer %s", + vip[constants.PORT_ID], + vip[constants.SUBNET_ID], + vip[constants.IP_ADDRESS], + loadbalancer_id) + return prov_lb.to_dict() + + +class UpdateAdditionalVIPsAfterAllocation(BaseDatabaseTask): + """Update a VIP associated with a given load balancer.""" + + def execute(self, loadbalancer_id, additional_vips): + """Update additional VIPs associated with a given load balancer. + + :param loadbalancer_id: Id of a load balancer which VIP should be + updated. + :param additional_vips: data_models.AdditionalVip object with update + data. + :returns: The load balancer object. + """ + with db_apis.session().begin() as session: + for vip in additional_vips: + LOG.info("Updating additional VIP with subnet_id %s, " + "ip_address %s for load balancer %s", + vip[constants.SUBNET_ID], vip[constants.IP_ADDRESS], + loadbalancer_id) + self.repos.additional_vip.update( + session, loadbalancer_id, + vip[constants.SUBNET_ID], + ip_address=vip[constants.IP_ADDRESS], + port_id=vip[constants.PORT_ID]) + db_lb = self.repos.load_balancer.get(session, + id=loadbalancer_id) + return provider_utils.db_loadbalancer_to_provider_loadbalancer( + db_lb).to_dict() + + +class UpdateAmphoraeVIPData(BaseDatabaseTask): + """Update amphorae VIP data.""" + + def execute(self, amps_data): + """Update amphorae VIP data. + + :param amps_data: Amphorae update dicts. + :returns: None + """ + with db_apis.session().begin() as session: + for amp_data in amps_data: + self.repos.amphora.update( + session, + amp_data.get(constants.ID), + vrrp_ip=amp_data[constants.VRRP_IP], + ha_ip=amp_data[constants.HA_IP], + vrrp_port_id=amp_data[constants.VRRP_PORT_ID], + ha_port_id=amp_data[constants.HA_PORT_ID], + vrrp_id=1) + + +class UpdateAmphoraVIPData(BaseDatabaseTask): + """Update amphorae VIP data.""" + + def execute(self, amp_data): + """Update amphorae VIP data. + + :param amps_data: Amphorae update dicts. + :returns: None + """ + with db_apis.session().begin() as session: + self.repos.amphora.update( + session, + amp_data.get(constants.ID), + vrrp_ip=amp_data[constants.VRRP_IP], + ha_ip=amp_data[constants.HA_IP], + vrrp_port_id=amp_data[constants.VRRP_PORT_ID], + ha_port_id=amp_data[constants.HA_PORT_ID], + vrrp_id=1) + + +class UpdateAmpFailoverDetails(BaseDatabaseTask): + """Update amphora failover details in the database.""" + + def execute(self, amphora, vip, base_port): + """Update amphora failover details in the database. + + :param amphora: The amphora to update + :param vip: The VIP object associated with this amphora. + :param base_port: The base port object associated with the amphora. + :returns: None + """ + # role and vrrp_priority will be updated later. + with db_apis.session().begin() as session: + self.repos.amphora.update( + session, + amphora.get(constants.ID), + # TODO(johnsom) We should do a better job getting the fixed_ip + # as this could be a problem with dual stack. + # Fix this during the multi-vip patch. + vrrp_ip=( + base_port[constants.FIXED_IPS][0][constants.IP_ADDRESS]), + ha_ip=vip[constants.IP_ADDRESS], + vrrp_port_id=base_port[constants.ID], + ha_port_id=vip[constants.PORT_ID], + vrrp_id=1) + + +class AssociateFailoverAmphoraWithLBID(BaseDatabaseTask): + """Associate failover amphora with loadbalancer in the database.""" + + def execute(self, amphora_id, loadbalancer_id): + """Associate failover amphora with loadbalancer in the database. + + :param amphora_id: Id of an amphora to update + :param loadbalancer_id: Id of a load balancer to be associated with + a given amphora. + :returns: None + """ + with db_apis.session().begin() as session: + self.repos.amphora.associate(session, + load_balancer_id=loadbalancer_id, + amphora_id=amphora_id) + + def revert(self, amphora_id, *args, **kwargs): + """Remove amphora-load balancer association. + + :param amphora_id: Id of an amphora that couldn't be associated + with a load balancer. + :returns: None + """ + try: + with db_apis.session().begin() as session: + self.repos.amphora.update(session, amphora_id, + loadbalancer_id=None) + except Exception as e: + LOG.error("Failed to update amphora %(amp)s " + "load balancer id to None due to: " + "%(except)s", {'amp': amphora_id, 'except': str(e)}) + + +class _MarkAmphoraRoleAndPriorityInDB(BaseDatabaseTask): + """Alter the amphora role and priority in DB.""" + + def _execute(self, amphora_id, amp_role, vrrp_priority): + """Alter the amphora role and priority in DB. + + :param amphora_id: Amphora ID to update. + :param amp_role: Amphora role to be set. + :param vrrp_priority: VRRP priority to set. + :returns: None + """ + LOG.debug("Mark %(role)s in DB for amphora: %(amp)s", + {constants.ROLE: amp_role, 'amp': amphora_id}) + with db_apis.session().begin() as session: + self.amphora_repo.update(session, amphora_id, role=amp_role, + vrrp_priority=vrrp_priority) + + def _revert(self, result, amphora_id, *args, **kwargs): + """Removes role and vrrp_priority association. + + :param result: Result of the association. + :param amphora_id: Amphora ID which role/vrrp_priority association + failed. + :returns: None + """ + + if isinstance(result, failure.Failure): + return + + LOG.warning("Reverting amphora role in DB for amp id %(amp)s", + {'amp': amphora_id}) + try: + with db_apis.session().begin() as session: + self.amphora_repo.update(session, amphora_id, + role=None, vrrp_priority=None) + except Exception as e: + LOG.error("Failed to update amphora %(amp)s " + "role and vrrp_priority to None due to: " + "%(except)s", {'amp': amphora_id, 'except': str(e)}) + + +class MarkAmphoraMasterInDB(_MarkAmphoraRoleAndPriorityInDB): + """Alter the amphora role to: MASTER.""" + + def execute(self, amphora): + """Mark amphora as MASTER in db. + + :param amphora: Amphora to update role. + :returns: None + """ + amp_role = constants.ROLE_MASTER + self._execute(amphora[constants.ID], amp_role, + constants.ROLE_MASTER_PRIORITY) + + def revert(self, result, amphora, *args, **kwargs): + """Removes amphora role association. + + :param amphora: Amphora to update role. + :returns: None + """ + self._revert(result, amphora[constants.ID], *args, **kwargs) + + +class MarkAmphoraBackupInDB(_MarkAmphoraRoleAndPriorityInDB): + """Alter the amphora role to: Backup.""" + + def execute(self, amphora): + """Mark amphora as BACKUP in db. + + :param amphora: Amphora to update role. + :returns: None + """ + amp_role = constants.ROLE_BACKUP + self._execute(amphora[constants.ID], amp_role, + constants.ROLE_BACKUP_PRIORITY) + + def revert(self, result, amphora, *args, **kwargs): + """Removes amphora role association. + + :param amphora: Amphora to update role. + :returns: None + """ + self._revert(result, amphora[constants.ID], *args, **kwargs) + + +class MarkAmphoraStandAloneInDB(_MarkAmphoraRoleAndPriorityInDB): + """Alter the amphora role to: Standalone.""" + + def execute(self, amphora): + """Mark amphora as STANDALONE in db. + + :param amphora: Amphora to update role. + :returns: None + """ + amp_role = constants.ROLE_STANDALONE + self._execute(amphora[constants.ID], amp_role, None) + + def revert(self, result, amphora, *args, **kwargs): + """Removes amphora role association. + + :param amphora: Amphora to update role. + :returns: None + """ + self._revert(result, amphora[constants.ID], *args, **kwargs) + + +class MarkAmphoraAllocatedInDB(BaseDatabaseTask): + """Will mark an amphora as allocated to a load balancer in the database. + + Assume sqlalchemy made sure the DB got + retried sufficiently - so just abort + """ + + def execute(self, amphora, loadbalancer_id): + """Mark amphora as allocated to a load balancer in DB. + + :param amphora: Amphora to be updated. + :param loadbalancer_id: Id of a load balancer to which an amphora + should be allocated. + :returns: None + """ + + LOG.info('Mark ALLOCATED in DB for amphora: %(amp)s with ' + 'compute id: %(comp)s for load balancer: %(lb)s', + { + 'amp': amphora.get(constants.ID), + 'comp': amphora[constants.COMPUTE_ID], + 'lb': loadbalancer_id + }) + with db_apis.session().begin() as session: + self.amphora_repo.update( + session, + amphora.get(constants.ID), + status=constants.AMPHORA_ALLOCATED, + compute_id=amphora[constants.COMPUTE_ID], + lb_network_ip=amphora[constants.LB_NETWORK_IP], + load_balancer_id=loadbalancer_id) + + def revert(self, result, amphora, loadbalancer_id, *args, **kwargs): + """Mark the amphora as broken and ready to be cleaned up. + + :param result: Execute task result + :param amphora: Amphora that was updated. + :param loadbalancer_id: Id of a load balancer to which an amphora + failed to be allocated. + :returns: None + """ + + if isinstance(result, failure.Failure): + return + + LOG.warning("Reverting mark amphora ready in DB for amp " + "id %(amp)s and compute id %(comp)s", + {'amp': amphora.get(constants.ID), + 'comp': amphora[constants.COMPUTE_ID]}) + self.task_utils.mark_amphora_status_error( + amphora.get(constants.ID)) + + +class MarkAmphoraBootingInDB(BaseDatabaseTask): + """Mark the amphora as booting in the database.""" + + def execute(self, amphora_id, compute_id): + """Mark amphora booting in DB. + + :param amphora_id: Id of the amphora to update + :param compute_id: Id of a compute on which an amphora resides + :returns: None + """ + + LOG.debug("Mark BOOTING in DB for amphora: %(amp)s with " + "compute id %(id)s", {'amp': amphora_id, + constants.ID: compute_id}) + with db_apis.session().begin() as session: + self.amphora_repo.update(session, amphora_id, + status=constants.AMPHORA_BOOTING, + compute_id=compute_id) + + def revert(self, result, amphora_id, compute_id, *args, **kwargs): + """Mark the amphora as broken and ready to be cleaned up. + + :param result: Execute task result + :param amphora_id: Id of the amphora that failed to update + :param compute_id: Id of a compute on which an amphora resides + :returns: None + """ + + if isinstance(result, failure.Failure): + return + + LOG.warning("Reverting mark amphora booting in DB for amp " + "id %(amp)s and compute id %(comp)s", + {'amp': amphora_id, 'comp': compute_id}) + try: + with db_apis.session().begin() as session: + self.amphora_repo.update(session, amphora_id, + status=constants.ERROR, + compute_id=compute_id) + except Exception as e: + LOG.error("Failed to update amphora %(amp)s " + "status to ERROR due to: " + "%(except)s", {'amp': amphora_id, 'except': str(e)}) + + +class MarkAmphoraDeletedInDB(BaseDatabaseTask): + """Mark the amphora deleted in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, amphora): + """Mark the amphora as deleted in DB. + + :param amphora: Amphora to be updated. + :returns: None + """ + + LOG.debug("Mark DELETED in DB for amphora: %(amp)s with " + "compute id %(comp)s", + {'amp': amphora[constants.ID], + 'comp': amphora[constants.COMPUTE_ID]}) + with db_apis.session().begin() as session: + self.amphora_repo.update(session, + amphora[constants.ID], + status=constants.DELETED) + + def revert(self, amphora, *args, **kwargs): + """Mark the amphora as broken and ready to be cleaned up. + + :param amphora: Amphora that was updated. + :returns: None + """ + + LOG.warning("Reverting mark amphora deleted in DB " + "for amp id %(amp)s and compute id %(comp)s", + {'amp': amphora[constants.ID], + 'comp': amphora[constants.COMPUTE_ID]}) + + self.task_utils.mark_amphora_status_error(amphora[constants.ID]) + + +class MarkAmphoraPendingDeleteInDB(BaseDatabaseTask): + """Mark the amphora pending delete in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, amphora): + """Mark the amphora as pending delete in DB. + + :param amphora: Amphora to be updated. + :returns: None + """ + + LOG.debug("Mark PENDING DELETE in DB for amphora: %(amp)s " + "with compute id %(id)s", + {'amp': amphora[constants.ID], + 'id': amphora[constants.COMPUTE_ID]}) + with db_apis.session().begin() as session: + self.amphora_repo.update(session, + amphora[constants.ID], + status=constants.PENDING_DELETE) + + def revert(self, amphora, *args, **kwargs): + """Mark the amphora as broken and ready to be cleaned up. + + :param amphora: Amphora that was updated. + :returns: None + """ + + LOG.warning("Reverting mark amphora pending delete in DB " + "for amp id %(amp)s and compute id %(comp)s", + {'amp': amphora[constants.ID], + 'comp': amphora[constants.COMPUTE_ID]}) + self.task_utils.mark_amphora_status_error(amphora[constants.ID]) + + +class MarkAmphoraPendingUpdateInDB(BaseDatabaseTask): + """Mark the amphora pending update in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, amphora): + """Mark the amphora as pending update in DB. + + :param amphora: Amphora to be updated. + :returns: None + """ + + LOG.debug("Mark PENDING UPDATE in DB for amphora: %(amp)s " + "with compute id %(id)s", + {'amp': amphora.get(constants.ID), + 'id': amphora[constants.COMPUTE_ID]}) + with db_apis.session().begin() as session: + self.amphora_repo.update(session, + amphora.get(constants.ID), + status=constants.PENDING_UPDATE) + + def revert(self, amphora, *args, **kwargs): + """Mark the amphora as broken and ready to be cleaned up. + + :param amphora: Amphora that was updated. + :returns: None + """ + + LOG.warning("Reverting mark amphora pending update in DB " + "for amp id %(amp)s and compute id %(comp)s", + {'amp': amphora.get(constants.ID), + 'comp': amphora[constants.COMPUTE_ID]}) + self.task_utils.mark_amphora_status_error(amphora.get(constants.ID)) + + +class UpdateAmphoraComputeId(BaseDatabaseTask): + """Associate amphora with a compute in DB.""" + + def execute(self, amphora_id, compute_id): + """Associate amphora with a compute in DB. + + :param amphora_id: Id of the amphora to update + :param compute_id: Id of a compute on which an amphora resides + :returns: None + """ + + with db_apis.session().begin() as session: + self.amphora_repo.update(session, amphora_id, + compute_id=compute_id) + + +class UpdateAmphoraInfo(BaseDatabaseTask): + """Update amphora with compute instance details.""" + + def execute(self, amphora_id, compute_obj): + """Update amphora with compute instance details. + + :param amphora_id: Id of the amphora to update + :param compute_obj: Compute on which an amphora resides + :returns: Updated amphora object + """ + with db_apis.session().begin() as session: + self.amphora_repo.update( + session, amphora_id, + lb_network_ip=compute_obj[constants.LB_NETWORK_IP], + cached_zone=compute_obj[constants.CACHED_ZONE], + image_id=compute_obj[constants.IMAGE_ID], + compute_flavor=compute_obj[constants.COMPUTE_FLAVOR]) + return self.amphora_repo.get(session, + id=amphora_id).to_dict() + + +class UpdateAmphoraDBCertExpiration(BaseDatabaseTask): + """Update the amphora expiration date with new cert file date.""" + + def execute(self, amphora_id, server_pem): + """Update the amphora expiration date with new cert file date. + + :param amphora_id: Id of the amphora to update + :param server_pem: Certificate in PEM format + :returns: None + """ + + LOG.debug("Update DB cert expiry date of amphora id: %s", amphora_id) + + fer = utils.get_server_certs_key_passphrases_fernet() + cert_expiration = cert_parser.get_cert_expiration( + fer.decrypt(server_pem.encode("utf-8"))) + LOG.debug("Certificate expiration date is %s ", cert_expiration) + with db_apis.session().begin() as session: + self.amphora_repo.update(session, amphora_id, + cert_expiration=cert_expiration) + + +class UpdateAmphoraCertBusyToFalse(BaseDatabaseTask): + """Update the amphora cert_busy flag to be false.""" + + def execute(self, amphora_id): + """Update the amphora cert_busy flag to be false. + + :param amphora: Amphora to be updated. + :returns: None + """ + + LOG.debug("Update cert_busy flag of amphora id %s to False", + amphora_id) + with db_apis.session().begin() as session: + self.amphora_repo.update(session, amphora_id, + cert_busy=False) + + +class MarkLBActiveInDB(BaseDatabaseTask): + """Mark the load balancer active in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def __init__(self, mark_subobjects=False, **kwargs): + super().__init__(**kwargs) + self.mark_subobjects = mark_subobjects + + def execute(self, loadbalancer): + """Mark the load balancer as active in DB. + + This also marks ACTIVE all sub-objects of the load balancer if + self.mark_subobjects is True. + + :param loadbalancer: Load balancer object to be updated + :returns: None + """ + + if self.mark_subobjects: + LOG.debug("Marking all listeners of loadbalancer %s ACTIVE", + loadbalancer[constants.LOADBALANCER_ID]) + with db_apis.session().begin() as session: + db_lb = self.loadbalancer_repo.get( + session, id=loadbalancer[constants.LOADBALANCER_ID]) + for listener in db_lb.listeners: + self._mark_listener_status(session, listener, + constants.ACTIVE) + for pool in db_lb.pools: + self._mark_pool_status(session, pool, constants.ACTIVE) + + LOG.info("Mark ACTIVE in DB for load balancer id: %s", + loadbalancer[constants.LOADBALANCER_ID]) + with db_apis.session().begin() as session: + self.loadbalancer_repo.update( + session, loadbalancer[constants.LOADBALANCER_ID], + provisioning_status=constants.ACTIVE) + + def _mark_listener_status(self, session, listener, status): + self.listener_repo.update(session, + listener.id, + provisioning_status=status) + LOG.debug("Marking all l7policies of listener %s %s", + listener.id, status) + for l7policy in listener.l7policies: + self._mark_l7policy_status(session, l7policy, status) + + if listener.default_pool: + LOG.debug("Marking default pool of listener %s %s", + listener.id, status) + self._mark_pool_status(session, listener.default_pool, status) + + def _mark_l7policy_status(self, session, l7policy, status): + self.l7policy_repo.update( + session, l7policy.id, + provisioning_status=status) + + LOG.debug("Marking all l7rules of l7policy %s %s", + l7policy.id, status) + for l7rule in l7policy.l7rules: + self._mark_l7rule_status(session, l7rule, status) + + if l7policy.redirect_pool: + LOG.debug("Marking redirect pool of l7policy %s %s", + l7policy.id, status) + self._mark_pool_status(session, l7policy.redirect_pool, status) + + def _mark_l7rule_status(self, session, l7rule, status): + self.l7rule_repo.update( + session, l7rule.id, + provisioning_status=status) + + def _mark_pool_status(self, session, pool, status): + self.pool_repo.update( + session, pool.id, + provisioning_status=status) + if pool.health_monitor: + LOG.debug("Marking health monitor of pool %s %s", pool.id, status) + self._mark_hm_status(session, pool.health_monitor, status) + + LOG.debug("Marking all members of pool %s %s", pool.id, status) + for member in pool.members: + self._mark_member_status(session, member, status) + + def _mark_hm_status(self, session, hm, status): + self.health_mon_repo.update( + session, hm.id, + provisioning_status=status) + + def _mark_member_status(self, session, member, status): + self.member_repo.update( + session, member.id, + provisioning_status=status) + + def revert(self, loadbalancer, *args, **kwargs): + """Mark the load balancer as broken and ready to be cleaned up. + + This also puts all sub-objects of the load balancer to ERROR state if + self.mark_subobjects is True + + :param loadbalancer: Load balancer object that failed to update + :returns: None + """ + + if self.mark_subobjects: + LOG.debug("Marking all listeners and pools of loadbalancer %s" + " ERROR", loadbalancer[constants.LOADBALANCER_ID]) + with db_apis.session().begin() as session: + db_lb = self.loadbalancer_repo.get( + session, + id=loadbalancer[constants.LOADBALANCER_ID]) + for listener in db_lb.listeners: + try: + self._mark_listener_status(session, listener, + constants.ERROR) + except Exception: + LOG.warning("Error updating listener %s provisioning " + "status", listener.id) + for pool in db_lb.pools: + try: + self._mark_pool_status(session, pool, constants.ERROR) + except Exception: + LOG.warning("Error updating POOL %s provisioning " + "status", pool.id) + + +class MarkLBActiveInDBByListener(BaseDatabaseTask): + """Mark the load balancer active in the DB using a listener dict. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, listener): + """Mark the load balancer as active in DB. + + :param listener: Listener dictionary + :returns: None + """ + + LOG.info("Mark ACTIVE in DB for load balancer id: %s", + listener[constants.LOADBALANCER_ID]) + with db_apis.session().begin() as session: + self.loadbalancer_repo.update(session, + listener[constants.LOADBALANCER_ID], + provisioning_status=constants.ACTIVE) + + +class UpdateLBServerGroupInDB(BaseDatabaseTask): + """Update the server group id info for load balancer in DB.""" + + def execute(self, loadbalancer_id, server_group_id): + """Update the server group id info for load balancer in DB. + + :param loadbalancer_id: Id of a load balancer to update + :param server_group_id: Id of a server group to associate with + the load balancer + :returns: None + """ + + LOG.debug("Server Group updated with id: %s for load balancer id: %s:", + server_group_id, loadbalancer_id) + with db_apis.session().begin() as session: + self.loadbalancer_repo.update(session, + id=loadbalancer_id, + server_group_id=server_group_id) + + def revert(self, loadbalancer_id, server_group_id, *args, **kwargs): + """Remove server group information from a load balancer in DB. + + :param loadbalancer_id: Id of a load balancer that failed to update + :param server_group_id: Id of a server group that couldn't be + associated with the load balancer + :returns: None + """ + LOG.warning('Reverting Server Group updated with id: %(s1)s for ' + 'load balancer id: %(s2)s ', + {'s1': server_group_id, 's2': loadbalancer_id}) + try: + with db_apis.session().begin() as session: + self.loadbalancer_repo.update(session, + id=loadbalancer_id, + server_group_id=None) + except Exception as e: + LOG.error("Failed to update load balancer %(lb)s " + "server_group_id to None due to: " + "%(except)s", {'lb': loadbalancer_id, 'except': str(e)}) + + +class MarkLBDeletedInDB(BaseDatabaseTask): + """Mark the load balancer deleted in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, loadbalancer): + """Mark the load balancer as deleted in DB. + + :param loadbalancer: Load balancer object to be updated + :returns: None + """ + + LOG.debug("Mark DELETED in DB for load balancer id: %s", + loadbalancer[constants.LOADBALANCER_ID]) + with db_apis.session().begin() as session: + self.loadbalancer_repo.update( + session, loadbalancer[constants.LOADBALANCER_ID], + provisioning_status=constants.DELETED) + + +class MarkLBPendingDeleteInDB(BaseDatabaseTask): + """Mark the load balancer pending delete in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, loadbalancer): + """Mark the load balancer as pending delete in DB. + + :param loadbalancer: Load balancer object to be updated + :returns: None + """ + + LOG.debug("Mark PENDING DELETE in DB for load balancer id: %s", + loadbalancer[constants.LOADBALANCER_ID]) + with db_apis.session().begin() as session: + self.loadbalancer_repo.update( + session, loadbalancer[constants.LOADBALANCER_ID], + provisioning_status=constants.PENDING_DELETE) + + +class MarkLBAndListenersActiveInDB(BaseDatabaseTask): + """Mark the load balancer and specified listeners active in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, loadbalancer_id, listeners): + """Mark the load balancer and listeners as active in DB. + + :param loadbalancer_id: The load balancer ID to be updated + :param listeners: Listener objects to be updated + :returns: None + """ + lb_id = None + if loadbalancer_id: + lb_id = loadbalancer_id + elif listeners: + lb_id = listeners[0][constants.LOADBALANCER_ID] + + if lb_id: + LOG.debug("Mark ACTIVE in DB for load balancer id: %s " + "and updating status for listener ids: %s", lb_id, + ', '.join([listener[constants.LISTENER_ID] + for listener in listeners])) + with db_apis.session().begin() as session: + self.loadbalancer_repo.update( + session, lb_id, provisioning_status=constants.ACTIVE) + for listener in listeners: + with db_apis.session().begin() as session: + self.listener_repo.prov_status_active_if_not_error( + session, listener[constants.LISTENER_ID]) + + def revert(self, loadbalancer_id, listeners, *args, **kwargs): + """Mark the load balancer and listeners as broken. + + :param loadbalancer_id: The load balancer ID to be updated + :param listeners: Listener objects that failed to update + :returns: None + """ + lists = ', '.join([listener[constants.LISTENER_ID] + for listener in listeners]) + LOG.warning("Reverting mark listeners active in " + "DB for listener ids: " + "%(list)s", {'list': lists}) + + for listener in listeners: + self.task_utils.mark_listener_prov_status_error( + listener[constants.LISTENER_ID]) + + +class MarkListenerDeletedInDB(BaseDatabaseTask): + """Mark the listener deleted in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, listener): + """Mark the listener as deleted in DB + + :param listener: The listener to be marked deleted + :returns: None + """ + + LOG.debug("Mark DELETED in DB for listener id: %s ", listener.id) + with db_apis.session().begin() as session: + self.listener_repo.update(session, listener.id, + provisioning_status=constants.DELETED) + + def revert(self, listener, *args, **kwargs): + """Mark the listener ERROR since the delete couldn't happen + + :param listener: The listener that couldn't be updated + :returns: None + """ + + LOG.warning("Reverting mark listener deleted in DB " + "for listener id %s", listener.id) + self.task_utils.mark_listener_prov_status_error(listener.id) + + +class MarkListenerPendingDeleteInDB(BaseDatabaseTask): + """Mark the listener pending delete in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, listener): + """Mark the listener as pending delete in DB. + + :param listener: The listener to be updated + :returns: None + """ + + LOG.debug("Mark PENDING DELETE in DB for listener id: %s", + listener.id) + with db_apis.session().begin() as session: + self.listener_repo.update( + session, listener.id, + provisioning_status=constants.PENDING_DELETE) + + def revert(self, listener, *args, **kwargs): + """Mark the listener as broken and ready to be cleaned up. + + :param listener: The listener that couldn't be updated + :returns: None + """ + + LOG.warning("Reverting mark listener pending delete in DB " + "for listener id %s", listener.id) + self.task_utils.mark_listener_prov_status_error(listener.id) + + +class UpdateLoadbalancerInDB(BaseDatabaseTask): + """Update the loadbalancer in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, loadbalancer, update_dict): + """Update the loadbalancer in the DB + + :param loadbalancer: The load balancer to be updated + :param update_dict: The dictionary of updates to apply + :returns: None + """ + + LOG.debug("Update DB for loadbalancer id: %s ", + loadbalancer[constants.LOADBALANCER_ID]) + with db_apis.session().begin() as session: + if update_dict.get('vip'): + vip_dict = update_dict.pop('vip') + self.vip_repo.update(session, + loadbalancer[constants.LOADBALANCER_ID], + **vip_dict) + self.loadbalancer_repo.update( + session, loadbalancer[constants.LOADBALANCER_ID], + **update_dict) + + +class UpdateHealthMonInDB(BaseDatabaseTask): + """Update the health monitor in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, health_mon, update_dict): + """Update the health monitor in the DB + + :param health_mon: The health monitor to be updated + :param update_dict: The dictionary of updates to apply + :returns: None + """ + + LOG.debug("Update DB for health monitor id: %s ", + health_mon[constants.HEALTHMONITOR_ID]) + with db_apis.session().begin() as session: + self.health_mon_repo.update(session, + health_mon[constants.HEALTHMONITOR_ID], + **update_dict) + + def revert(self, health_mon, *args, **kwargs): + """Mark the health monitor ERROR since the update couldn't happen + + :param health_mon: The health monitor that couldn't be updated + :returns: None + """ + + LOG.warning("Reverting update health monitor in DB " + "for health monitor id %s", + health_mon[constants.HEALTHMONITOR_ID]) + try: + with db_apis.session().begin() as session: + self.health_mon_repo.update( + session, + health_mon[constants.HEALTHMONITOR_ID], + provisioning_status=constants.ERROR) + except Exception as e: + LOG.error("Failed to update health monitor %(hm)s " + "provisioning_status to ERROR due to: %(except)s", + {'hm': health_mon[constants.HEALTHMONITOR_ID], + 'except': str(e)}) + + +class UpdateListenerInDB(BaseDatabaseTask): + """Update the listener in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, listener, update_dict): + """Update the listener in the DB + + :param listener: The listener to be updated + :param update_dict: The dictionary of updates to apply + :returns: None + """ + + LOG.debug("Update DB for listener id: %s ", + listener[constants.LISTENER_ID]) + with db_apis.session().begin() as session: + self.listener_repo.update(session, + listener[constants.LISTENER_ID], + **update_dict) + + def revert(self, listener, *args, **kwargs): + """Mark the listener ERROR since the update couldn't happen + + :param listener: The listener that couldn't be updated + :returns: None + """ + + LOG.warning("Reverting update listener in DB " + "for listener id %s", listener[constants.LISTENER_ID]) + self.task_utils.mark_listener_prov_status_error( + listener[constants.LISTENER_ID]) + + +class UpdateMemberInDB(BaseDatabaseTask): + """Update the member in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, member, update_dict): + """Update the member in the DB + + :param member: The member to be updated + :param update_dict: The dictionary of updates to apply + :returns: None + """ + + LOG.debug("Update DB for member id: %s ", member[constants.MEMBER_ID]) + with db_apis.session().begin() as session: + self.member_repo.update(session, + member[constants.MEMBER_ID], + **update_dict) + + def revert(self, member, *args, **kwargs): + """Mark the member ERROR since the update couldn't happen + + :param member: The member that couldn't be updated + :returns: None + """ + + LOG.warning("Reverting update member in DB " + "for member id %s", member[constants.MEMBER_ID]) + try: + with db_apis.session().begin() as session: + self.member_repo.update(session, + member[constants.MEMBER_ID], + provisioning_status=constants.ERROR) + except Exception as e: + LOG.error("Failed to update member %(member)s provisioning_status " + "to ERROR due to: %(except)s", + {'member': member[constants.MEMBER_ID], + 'except': str(e)}) + + +class UpdatePoolInDB(BaseDatabaseTask): + """Update the pool in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, pool_id, update_dict): + """Update the pool in the DB + + :param pool_id: The pool_id to be updated + :param update_dict: The dictionary of updates to apply + :returns: None + """ + + LOG.debug("Update DB for pool id: %s ", pool_id) + with db_apis.session().begin() as session: + self.repos.update_pool_and_sp(session, pool_id, + update_dict) + + def revert(self, pool_id, *args, **kwargs): + """Mark the pool ERROR since the update couldn't happen + + :param pool_id: The pool_id that couldn't be updated + :returns: None + """ + + LOG.warning("Reverting update pool in DB for pool id %s", pool_id) + try: + with db_apis.session().begin() as session: + self.repos.update_pool_and_sp( + session, pool_id, + {'provisioning_status': constants.ERROR}) + except Exception as e: + LOG.error("Failed to update pool %(pool)s provisioning_status to " + "ERROR due to: %(except)s", {'pool': pool_id, + 'except': str(e)}) + + +class UpdateL7PolicyInDB(BaseDatabaseTask): + """Update the L7 policy in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, l7policy, update_dict): + """Update the L7 policy in the DB + + :param l7policy: The L7 policy to be updated + :param update_dict: The dictionary of updates to apply + :returns: None + """ + + LOG.debug("Update DB for l7policy id: %s", + l7policy[constants.L7POLICY_ID]) + with db_apis.session().begin() as session: + self.l7policy_repo.update(session, + l7policy[constants.L7POLICY_ID], + **update_dict) + + def revert(self, l7policy, *args, **kwargs): + """Mark the l7policy ERROR since the update couldn't happen + + :param l7policy: L7 policy that couldn't be updated + :returns: None + """ + + LOG.warning("Reverting update l7policy in DB " + "for l7policy id %s", l7policy[constants.L7POLICY_ID]) + try: + with db_apis.session().begin() as session: + self.l7policy_repo.update(session, + l7policy[constants.L7POLICY_ID], + provisioning_status=constants.ERROR) + except Exception as e: + LOG.error("Failed to update l7policy %(l7p)s provisioning_status " + "to ERROR due to: %(except)s", + {'l7p': l7policy[constants.L7POLICY_ID], + 'except': str(e)}) + + +class UpdateL7RuleInDB(BaseDatabaseTask): + """Update the L7 rule in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, l7rule, update_dict): + """Update the L7 rule in the DB + + :param l7rule: The L7 rule to be updated + :param update_dict: The dictionary of updates to apply + :returns: None + """ + + LOG.debug("Update DB for l7rule id: %s", l7rule[constants.L7RULE_ID]) + with db_apis.session().begin() as session: + self.l7rule_repo.update(session, + l7rule[constants.L7RULE_ID], + **update_dict) + + def revert(self, l7rule, *args, **kwargs): + """Mark the L7 rule ERROR since the update couldn't happen + + :param l7rule: L7 rule that couldn't be updated + :returns: None + """ + + LOG.warning("Reverting update l7rule in DB " + "for l7rule id %s", l7rule[constants.L7RULE_ID]) + try: + with db_apis.session().begin() as session: + self.l7policy_repo.update(session, + l7rule[constants.L7POLICY_ID], + provisioning_status=constants.ERROR) + except Exception as e: + LOG.error("Failed to update L7rule %(l7r)s provisioning_status to " + "ERROR due to: %(except)s", + {'l7r': l7rule[constants.L7POLICY_ID], 'except': str(e)}) + + +class GetAmphoraDetails(BaseDatabaseTask): + """Task to retrieve amphora network details.""" + + def execute(self, amphora): + """Retrieve amphora network details. + + :param amphora: Amphora which network details are required + :returns: Amphora data dict + """ + with db_apis.session().begin() as session: + db_amp = self.amphora_repo.get(session, + id=amphora.get(constants.ID)) + amphora.update({ + constants.VRRP_IP: db_amp.vrrp_ip, + constants.HA_IP: db_amp.ha_ip, + constants.HA_PORT_ID: db_amp.ha_port_id, + constants.ROLE: db_amp.role, + constants.VRRP_ID: db_amp.vrrp_id, + constants.VRRP_PRIORITY: db_amp.vrrp_priority + }) + return amphora + + +class GetAmphoraeFromLoadbalancer(BaseDatabaseTask): + """Task to pull the amphorae from a loadbalancer.""" + + def execute(self, loadbalancer_id): + """Pull the amphorae from a loadbalancer. + + :param loadbalancer_id: Load balancer ID to get amphorae from + :returns: A list of Listener objects + """ + amphorae = [] + with db_apis.session().begin() as session: + db_lb = self.repos.load_balancer.get(session, + id=loadbalancer_id) + for amp in db_lb.amphorae: + a = self.amphora_repo.get(session, id=amp.id, + show_deleted=False) + if a is None: + continue + amphorae.append(a.to_dict()) + return amphorae + + +class GetListenersFromLoadbalancer(BaseDatabaseTask): + """Task to pull the listeners from a loadbalancer.""" + + def execute(self, loadbalancer): + """Pull the listeners from a loadbalancer. + + :param loadbalancer: Load balancer which listeners are required + :returns: A list of Listener objects + """ + listeners = [] + with db_apis.session().begin() as session: + db_lb = self.repos.load_balancer.get( + session, id=loadbalancer[constants.LOADBALANCER_ID]) + for listener in db_lb.listeners: + db_l = self.listener_repo.get(session, id=listener.id) + prov_listener = ( + provider_utils.db_listener_to_provider_listener( + db_l)) + listeners.append(prov_listener.to_dict()) + return listeners + + +class GetVipFromLoadbalancer(BaseDatabaseTask): + """Task to pull the vip from a loadbalancer.""" + + def execute(self, loadbalancer): + """Pull the vip from a loadbalancer. + + :param loadbalancer: Load balancer which VIP is required + :returns: VIP associated with a given load balancer + """ + with db_apis.session().begin() as session: + db_lb = self.repos.load_balancer.get( + session, id=loadbalancer[constants.LOADBALANCER_ID]) + return db_lb.vip.to_dict(recurse=True) + + +class GetLoadBalancer(BaseDatabaseTask): + """Get an load balancer object from the database.""" + + def execute(self, loadbalancer_id, *args, **kwargs): + """Get an load balancer object from the database. + + :param loadbalancer_id: The load balancer ID to lookup + :returns: The load balancer object + """ + + LOG.debug("Get load balancer from DB for load balancer id: %s", + loadbalancer_id) + with db_apis.session().begin() as session: + db_lb = self.loadbalancer_repo.get(session, + id=loadbalancer_id) + provider_lb = ( + provider_utils.db_loadbalancer_to_provider_loadbalancer( + db_lb)) + return provider_lb.to_dict() + + +class CreateVRRPGroupForLB(BaseDatabaseTask): + """Create a VRRP group for a load balancer.""" + + def execute(self, loadbalancer_id): + """Create a VRRP group for a load balancer. + + :param loadbalancer_id: Load balancer ID for which a VRRP group + should be created + """ + try: + with db_apis.session().begin() as session: + self.repos.vrrpgroup.create( + session, + load_balancer_id=loadbalancer_id, + vrrp_group_name=str(loadbalancer_id).replace('-', ''), + vrrp_auth_type=constants.VRRP_AUTH_DEFAULT, + vrrp_auth_pass=( + uuidutils.generate_uuid().replace('-', '')[0:7]), + advert_int=CONF.keepalived_vrrp.vrrp_advert_int) + except odb_exceptions.DBDuplicateEntry: + LOG.debug('VRRP_GROUP entry already exists for load balancer, ' + 'skipping create.') + + +class DisableAmphoraHealthMonitoring(BaseDatabaseTask): + """Disable amphora health monitoring. + + This disables amphora health monitoring by removing it from + the amphora_health table. + """ + + def execute(self, amphora): + """Disable health monitoring for an amphora + + :param amphora: The amphora to disable health monitoring for + :returns: None + """ + with db_apis.session().begin() as session: + self._delete_from_amp_health(session, amphora[constants.ID]) + + +class DisableLBAmphoraeHealthMonitoring(BaseDatabaseTask): + """Disable health monitoring on the LB amphorae. + + This disables amphora health monitoring by removing it from + the amphora_health table for each amphora on a load balancer. + """ + + def execute(self, loadbalancer): + """Disable health monitoring for amphora on a load balancer + + :param loadbalancer: The load balancer to disable health monitoring on + :returns: None + """ + with db_apis.session().begin() as session: + db_lb = self.loadbalancer_repo.get( + session, id=loadbalancer[constants.LOADBALANCER_ID]) + for amphora in db_lb.amphorae: + self._delete_from_amp_health(session, amphora.id) + + +class MarkAmphoraHealthBusy(BaseDatabaseTask): + """Mark amphora health monitoring busy. + + This prevents amphora failover by marking the amphora busy in + the amphora_health table. + """ + + def execute(self, amphora): + """Mark amphora health monitoring busy + + :param amphora: The amphora to mark amphora health busy + :returns: None + """ + with db_apis.session().begin() as session: + self._mark_amp_health_busy(session, amphora[constants.ID]) + + +class MarkLBAmphoraeHealthBusy(BaseDatabaseTask): + """Mark amphorae health monitoring busy for the LB. + + This prevents amphorae failover by marking each amphora of a given + load balancer busy in the amphora_health table. + """ + + def execute(self, loadbalancer): + """Marks amphorae health busy for each amphora on a load balancer + + :param loadbalancer: The load balancer to mark amphorae health busy + :returns: None + """ + with db_apis.session().begin() as session: + db_lb = self.loadbalancer_repo.get( + session, id=loadbalancer[constants.LOADBALANCER_ID]) + for amphora in db_lb.amphorae: + self._mark_amp_health_busy(session, amphora.id) + + +class MarkHealthMonitorActiveInDB(BaseDatabaseTask): + """Mark the health monitor ACTIVE in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, health_mon): + """Mark the health monitor ACTIVE in DB. + + :param health_mon: Health Monitor object to be updated + :returns: None + """ + + LOG.debug("Mark ACTIVE in DB for health monitor id: %s", + health_mon[constants.HEALTHMONITOR_ID]) + with db_apis.session().begin() as session: + db_health_mon = self.health_mon_repo.get( + session, id=health_mon[constants.HEALTHMONITOR_ID]) + op_status = (constants.ONLINE if db_health_mon.enabled + else constants.OFFLINE) + self.health_mon_repo.update(session, + health_mon[constants.HEALTHMONITOR_ID], + provisioning_status=constants.ACTIVE, + operating_status=op_status) + + def revert(self, health_mon, *args, **kwargs): + """Mark the health monitor as broken + + :param health_mon: Health Monitor object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark health montor ACTIVE in DB " + "for health monitor id %s", + health_mon[constants.HEALTHMONITOR_ID]) + self.task_utils.mark_health_mon_prov_status_error( + health_mon[constants.HEALTHMONITOR_ID]) + + +class MarkHealthMonitorPendingCreateInDB(BaseDatabaseTask): + """Mark the health monitor pending create in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, health_mon): + """Mark the health monitor as pending create in DB. + + :param health_mon: Health Monitor object to be updated + :returns: None + """ + + LOG.debug("Mark PENDING CREATE in DB for health monitor id: %s", + health_mon[constants.HEALTHMONITOR_ID]) + with db_apis.session().begin() as session: + self.health_mon_repo.update(session, + health_mon[constants.HEALTHMONITOR_ID], + provisioning_status=(constants. + PENDING_CREATE)) + + def revert(self, health_mon, *args, **kwargs): + """Mark the health monitor as broken + + :param health_mon: Health Monitor object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark health monitor pending create in DB " + "for health monitor id %s", + health_mon[constants.HEALTHMONITOR_ID]) + self.task_utils.mark_health_mon_prov_status_error( + health_mon[constants.HEALTHMONITOR_ID]) + + +class MarkHealthMonitorPendingDeleteInDB(BaseDatabaseTask): + """Mark the health monitor pending delete in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, health_mon): + """Mark the health monitor as pending delete in DB. + + :param health_mon: Health Monitor object to be updated + :returns: None + """ + + LOG.debug("Mark PENDING DELETE in DB for health monitor id: %s", + health_mon[constants.HEALTHMONITOR_ID]) + with db_apis.session().begin() as session: + self.health_mon_repo.update(session, + health_mon[constants.HEALTHMONITOR_ID], + provisioning_status=(constants. + PENDING_DELETE)) + + def revert(self, health_mon, *args, **kwargs): + """Mark the health monitor as broken + + :param health_mon: Health Monitor object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark health monitor pending delete in DB " + "for health monitor id %s", + health_mon[constants.HEALTHMONITOR_ID]) + self.task_utils.mark_health_mon_prov_status_error( + health_mon[constants.HEALTHMONITOR_ID]) + + +class MarkHealthMonitorPendingUpdateInDB(BaseDatabaseTask): + """Mark the health monitor pending update in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, health_mon): + """Mark the health monitor as pending update in DB. + + :param health_mon: Health Monitor object to be updated + :returns: None + """ + + LOG.debug("Mark PENDING UPDATE in DB for health monitor id: %s", + health_mon[constants.HEALTHMONITOR_ID]) + with db_apis.session().begin() as session: + self.health_mon_repo.update(session, + health_mon[constants.HEALTHMONITOR_ID], + provisioning_status=(constants. + PENDING_UPDATE)) + + def revert(self, health_mon, *args, **kwargs): + """Mark the health monitor as broken + + :param health_mon: Health Monitor object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark health monitor pending update in DB " + "for health monitor id %s", + health_mon[constants.HEALTHMONITOR_ID]) + self.task_utils.mark_health_mon_prov_status_error( + health_mon[constants.HEALTHMONITOR_ID]) + + +class MarkHealthMonitorsOnlineInDB(BaseDatabaseTask): + def execute(self, loadbalancer: dict): + """Mark all enabled health monitors Online + + :param loadbalancer: Dictionary of a Load Balancer that has associated + health monitors + :returns: None + """ + + with db_apis.session().begin() as session: + db_lb = self.loadbalancer_repo.get( + session, id=loadbalancer[constants.LOADBALANCER_ID]) + + # Update the healthmonitors of either attached listeners or + # l7policies + hms_to_update = [] + + for listener in db_lb.listeners: + if (listener.default_pool and + listener.default_pool.health_monitor): + hm = listener.default_pool.health_monitor + if hm.enabled: + hms_to_update.append(hm.id) + for l7policy in listener.l7policies: + if l7policy.redirect_pool and ( + l7policy.redirect_pool.health_monitor): + hm = l7policy.redirect_pool.health_monitor + if hm.enabled: + hms_to_update.append(hm.id) + + for hm_id in hms_to_update: + self.health_mon_repo.update( + session, hm_id, operating_status=constants.ONLINE) + + +class MarkL7PolicyActiveInDB(BaseDatabaseTask): + """Mark the l7policy ACTIVE in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, l7policy): + """Mark the l7policy ACTIVE in DB. + + :param l7policy: L7Policy object to be updated + :returns: None + """ + + LOG.debug("Mark ACTIVE in DB for l7policy id: %s", + l7policy[constants.L7POLICY_ID]) + with db_apis.session().begin() as session: + db_l7policy = self.l7policy_repo.get( + session, id=l7policy[constants.L7POLICY_ID]) + op_status = (constants.ONLINE if db_l7policy.enabled + else constants.OFFLINE) + self.l7policy_repo.update(session, + l7policy[constants.L7POLICY_ID], + provisioning_status=constants.ACTIVE, + operating_status=op_status) + + def revert(self, l7policy, *args, **kwargs): + """Mark the l7policy as broken + + :param l7policy: L7Policy object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark l7policy ACTIVE in DB " + "for l7policy id %s", l7policy[constants.L7POLICY_ID]) + self.task_utils.mark_l7policy_prov_status_error( + l7policy[constants.L7POLICY_ID]) + + +class MarkL7PolicyPendingCreateInDB(BaseDatabaseTask): + """Mark the l7policy pending create in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, l7policy): + """Mark the l7policy as pending create in DB. + + :param l7policy: L7Policy object to be updated + :returns: None + """ + + LOG.debug("Mark PENDING CREATE in DB for l7policy id: %s", + l7policy[constants.L7POLICY_ID]) + with db_apis.session().begin() as session: + self.l7policy_repo.update( + session, l7policy[constants.L7POLICY_ID], + provisioning_status=constants.PENDING_CREATE) + + def revert(self, l7policy, *args, **kwargs): + """Mark the l7policy as broken + + :param l7policy: L7Policy object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark l7policy pending create in DB " + "for l7policy id %s", l7policy[constants.L7POLICY_ID]) + self.task_utils.mark_l7policy_prov_status_error( + l7policy[constants.L7POLICY_ID]) + + +class MarkL7PolicyPendingDeleteInDB(BaseDatabaseTask): + """Mark the l7policy pending delete in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, l7policy): + """Mark the l7policy as pending delete in DB. + + :param l7policy: L7Policy object to be updated + :returns: None + """ + + LOG.debug("Mark PENDING DELETE in DB for l7policy id: %s", + l7policy[constants.L7POLICY_ID]) + with db_apis.session().begin() as session: + self.l7policy_repo.update( + session, l7policy[constants.L7POLICY_ID], + provisioning_status=constants.PENDING_DELETE) + + def revert(self, l7policy, *args, **kwargs): + """Mark the l7policy as broken + + :param l7policy: L7Policy object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark l7policy pending delete in DB " + "for l7policy id %s", l7policy[constants.L7POLICY_ID]) + self.task_utils.mark_l7policy_prov_status_error( + l7policy[constants.L7POLICY_ID]) + + +class MarkL7PolicyPendingUpdateInDB(BaseDatabaseTask): + """Mark the l7policy pending update in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, l7policy): + """Mark the l7policy as pending update in DB. + + :param l7policy: L7Policy object to be updated + :returns: None + """ + + LOG.debug("Mark PENDING UPDATE in DB for l7policy id: %s", + l7policy[constants.L7POLICY_ID]) + with db_apis.session().begin() as session: + self.l7policy_repo.update(session, + l7policy[constants.L7POLICY_ID], + provisioning_status=(constants. + PENDING_UPDATE)) + + def revert(self, l7policy, *args, **kwargs): + """Mark the l7policy as broken + + :param l7policy: L7Policy object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark l7policy pending update in DB " + "for l7policy id %s", l7policy[constants.L7POLICY_ID]) + self.task_utils.mark_l7policy_prov_status_error( + l7policy[constants.L7POLICY_ID]) + + +class MarkL7RuleActiveInDB(BaseDatabaseTask): + """Mark the l7rule ACTIVE in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, l7rule): + """Mark the l7rule ACTIVE in DB. + + :param l7rule: L7Rule object to be updated + :returns: None + """ + + LOG.debug("Mark ACTIVE in DB for l7rule id: %s", + l7rule[constants.L7RULE_ID]) + with db_apis.session().begin() as session: + db_rule = self.l7rule_repo.get(session, + id=l7rule[constants.L7RULE_ID]) + op_status = (constants.ONLINE if db_rule.enabled + else constants.OFFLINE) + self.l7rule_repo.update(session, + l7rule[constants.L7RULE_ID], + provisioning_status=constants.ACTIVE, + operating_status=op_status) + + def revert(self, l7rule, *args, **kwargs): + """Mark the l7rule as broken + + :param l7rule: L7Rule object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark l7rule ACTIVE in DB " + "for l7rule id %s", l7rule[constants.L7RULE_ID]) + self.task_utils.mark_l7rule_prov_status_error( + l7rule[constants.L7RULE_ID]) + + +class MarkL7RulePendingCreateInDB(BaseDatabaseTask): + """Mark the l7rule pending create in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, l7rule): + """Mark the l7rule as pending create in DB. + + :param l7rule: L7Rule object to be updated + :returns: None + """ + + LOG.debug("Mark PENDING CREATE in DB for l7rule id: %s", + l7rule[constants.L7RULE_ID]) + with db_apis.session().begin() as session: + self.l7rule_repo.update( + session, l7rule[constants.L7RULE_ID], + provisioning_status=constants.PENDING_CREATE) + + def revert(self, l7rule, *args, **kwargs): + """Mark the l7rule as broken + + :param l7rule: L7Rule object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark l7rule pending create in DB " + "for l7rule id %s", l7rule[constants.L7RULE_ID]) + self.task_utils.mark_l7rule_prov_status_error( + l7rule[constants.L7RULE_ID]) + + +class MarkL7RulePendingDeleteInDB(BaseDatabaseTask): + """Mark the l7rule pending delete in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, l7rule): + """Mark the l7rule as pending delete in DB. + + :param l7rule: L7Rule object to be updated + :returns: None + """ + + LOG.debug("Mark PENDING DELETE in DB for l7rule id: %s", + l7rule[constants.L7RULE_ID]) + with db_apis.session().begin() as session: + self.l7rule_repo.update( + session, l7rule[constants.L7RULE_ID], + provisioning_status=constants.PENDING_DELETE) + + def revert(self, l7rule, *args, **kwargs): + """Mark the l7rule as broken + + :param l7rule: L7Rule object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark l7rule pending delete in DB " + "for l7rule id %s", l7rule[constants.L7RULE_ID]) + self.task_utils.mark_l7rule_prov_status_error( + l7rule[constants.L7RULE_ID]) + + +class MarkL7RulePendingUpdateInDB(BaseDatabaseTask): + """Mark the l7rule pending update in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, l7rule): + """Mark the l7rule as pending update in DB. + + :param l7rule: L7Rule object to be updated + :returns: None + """ + + LOG.debug("Mark PENDING UPDATE in DB for l7rule id: %s", + l7rule[constants.L7RULE_ID]) + with db_apis.session().begin() as session: + self.l7rule_repo.update( + session, l7rule[constants.L7RULE_ID], + provisioning_status=constants.PENDING_UPDATE) + + def revert(self, l7rule, *args, **kwargs): + """Mark the l7rule as broken + + :param l7rule: L7Rule object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark l7rule pending update in DB " + "for l7rule id %s", l7rule[constants.L7RULE_ID]) + self.task_utils.mark_l7rule_prov_status_error( + l7rule[constants.L7RULE_ID]) + + +class MarkMemberActiveInDB(BaseDatabaseTask): + """Mark the member ACTIVE in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, member): + """Mark the member ACTIVE in DB. + + :param member: Member object to be updated + :returns: None + """ + + LOG.debug("Mark ACTIVE in DB for member id: %s", + member[constants.MEMBER_ID]) + with db_apis.session().begin() as session: + self.member_repo.update(session, + member[constants.MEMBER_ID], + provisioning_status=constants.ACTIVE) + + def revert(self, member, *args, **kwargs): + """Mark the member as broken + + :param member: Member object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark member ACTIVE in DB " + "for member id %s", member[constants.MEMBER_ID]) + self.task_utils.mark_member_prov_status_error( + member[constants.MEMBER_ID]) + + +class MarkMemberPendingCreateInDB(BaseDatabaseTask): + """Mark the member pending create in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, member): + """Mark the member as pending create in DB. + + :param member: Member object to be updated + :returns: None + """ + + LOG.debug("Mark PENDING CREATE in DB for member id: %s", + member[constants.MEMBER_ID]) + with db_apis.session().begin() as session: + self.member_repo.update( + session, member[constants.MEMBER_ID], + provisioning_status=constants.PENDING_CREATE) + + def revert(self, member, *args, **kwargs): + """Mark the member as broken + + :param member: Member object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark member pending create in DB " + "for member id %s", member[constants.MEMBER_ID]) + self.task_utils.mark_member_prov_status_error( + member[constants.MEMBER_ID]) + + +class MarkMemberPendingDeleteInDB(BaseDatabaseTask): + """Mark the member pending delete in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, member): + """Mark the member as pending delete in DB. + + :param member: Member object to be updated + :returns: None + """ + + LOG.debug("Mark PENDING DELETE in DB for member id: %s", + member[constants.MEMBER_ID]) + with db_apis.session().begin() as session: + self.member_repo.update( + session, member[constants.MEMBER_ID], + provisioning_status=constants.PENDING_DELETE) + + def revert(self, member, *args, **kwargs): + """Mark the member as broken + + :param member: Member object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark member pending delete in DB " + "for member id %s", member[constants.MEMBER_ID]) + self.task_utils.mark_member_prov_status_error( + member[constants.MEMBER_ID]) + + +class MarkMemberPendingUpdateInDB(BaseDatabaseTask): + """Mark the member pending update in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, member): + """Mark the member as pending update in DB. + + :param member: Member object to be updated + :returns: None + """ + + LOG.debug("Mark PENDING UPDATE in DB for member id: %s", + member[constants.MEMBER_ID]) + with db_apis.session().begin() as session: + self.member_repo.update( + session, member[constants.MEMBER_ID], + provisioning_status=constants.PENDING_UPDATE) + + def revert(self, member, *args, **kwargs): + """Mark the member as broken + + :param member: Member object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark member pending update in DB " + "for member id %s", member[constants.MEMBER_ID]) + self.task_utils.mark_member_prov_status_error( + member[constants.MEMBER_ID]) + + +class MarkPoolActiveInDB(BaseDatabaseTask): + """Mark the pool ACTIVE in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, pool_id): + """Mark the pool ACTIVE in DB. + + :param pool_id: pool_id to be updated + :returns: None + """ + + LOG.debug("Mark ACTIVE in DB for pool id: %s", + pool_id) + with db_apis.session().begin() as session: + self.pool_repo.update(session, + pool_id, + provisioning_status=constants.ACTIVE) + + def revert(self, pool_id, *args, **kwargs): + """Mark the pool as broken + + :param pool_id: pool_id that failed to update + :returns: None + """ + + LOG.warning("Reverting mark pool ACTIVE in DB for pool id %s", + pool_id) + self.task_utils.mark_pool_prov_status_error(pool_id) + + +class MarkPoolPendingCreateInDB(BaseDatabaseTask): + """Mark the pool pending create in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, pool_id): + """Mark the pool as pending create in DB. + + :param pool_id: pool_id of pool object to be updated + :returns: None + """ + + LOG.debug("Mark PENDING CREATE in DB for pool id: %s", + pool_id) + with db_apis.session().begin() as session: + self.pool_repo.update(session, + pool_id, + provisioning_status=constants.PENDING_CREATE) + + def revert(self, pool_id, *args, **kwargs): + """Mark the pool as broken + + :param pool_id: pool_id of pool object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark pool pending create in DB " + "for pool id %s", pool_id) + self.task_utils.mark_pool_prov_status_error(pool_id) + + +class MarkPoolPendingDeleteInDB(BaseDatabaseTask): + """Mark the pool pending delete in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, pool_id): + """Mark the pool as pending delete in DB. + + :param pool_id: pool_id of pool object to be updated + :returns: None + """ + + LOG.debug("Mark PENDING DELETE in DB for pool id: %s", + pool_id) + with db_apis.session().begin() as session: + self.pool_repo.update(session, + pool_id, + provisioning_status=constants.PENDING_DELETE) + + def revert(self, pool_id, *args, **kwargs): + """Mark the pool as broken + + :param pool_id: pool_id of pool object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark pool pending delete in DB " + "for pool id %s", pool_id) + self.task_utils.mark_pool_prov_status_error(pool_id) + + +class MarkPoolPendingUpdateInDB(BaseDatabaseTask): + """Mark the pool pending update in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, pool_id): + """Mark the pool as pending update in DB. + + :param pool_id: pool_id of pool object to be updated + :returns: None + """ + + LOG.debug("Mark PENDING UPDATE in DB for pool id: %s", + pool_id) + with db_apis.session().begin() as session: + self.pool_repo.update(session, + pool_id, + provisioning_status=constants.PENDING_UPDATE) + + def revert(self, pool_id, *args, **kwargs): + """Mark the pool as broken + + :param pool_id: pool_id of pool object that failed to update + :returns: None + """ + + LOG.warning("Reverting mark pool pending update in DB " + "for pool id %s", pool_id) + self.task_utils.mark_pool_prov_status_error(pool_id) + + +class DecrementHealthMonitorQuota(BaseDatabaseTask): + """Decrements the health monitor quota for a project. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, project_id): + """Decrements the health monitor quota. + + :param project_id: The project_id to decrement the quota on. + :returns: None + """ + + LOG.debug("Decrementing health monitor quota for " + "project: %s ", project_id) + + lock_session = db_apis.get_session() + try: + self.repos.decrement_quota(lock_session, + data_models.HealthMonitor, + project_id) + lock_session.commit() + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error('Failed to decrement health monitor quota for ' + 'project: %(proj)s the project may have excess ' + 'quota in use.', {'proj': project_id}) + lock_session.rollback() + + def revert(self, project_id, result, *args, **kwargs): + """Re-apply the quota + + :param project_id: The project_id to decrement the quota on. + :returns: None + """ + + LOG.warning('Reverting decrement quota for health monitor on project' + ' %(proj)s Project quota counts may be incorrect.', + {'proj': project_id}) + + # Increment the quota back if this task wasn't the failure + if not isinstance(result, failure.Failure): + + try: + session = db_apis.get_session() + try: + self.repos.check_quota_met(session, + data_models.HealthMonitor, + project_id) + session.commit() + except Exception: + session.rollback() + except Exception: + # Don't fail the revert flow + pass + + +class DecrementListenerQuota(BaseDatabaseTask): + """Decrements the listener quota for a project. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, project_id): + """Decrements the listener quota. + + :param project_id: The project_id to decrement the quota on. + :returns: None + """ + + LOG.debug("Decrementing listener quota for " + "project: %s ", project_id) + + lock_session = db_apis.get_session() + try: + self.repos.decrement_quota(lock_session, + data_models.Listener, + project_id) + lock_session.commit() + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error('Failed to decrement listener quota for project: ' + '%(proj)s the project may have excess quota in use.', + {'proj': project_id}) + lock_session.rollback() + + def revert(self, project_id, result, *args, **kwargs): + """Re-apply the quota + + :param project_id: The project_id to decrement the quota on. + :returns: None + """ + LOG.warning('Reverting decrement quota for listener on project ' + '%(proj)s Project quota counts may be incorrect.', + {'proj': project_id}) + + # Increment the quota back if this task wasn't the failure + if not isinstance(result, failure.Failure): + + try: + session = db_apis.get_session() + try: + self.repos.check_quota_met(session, + data_models.Listener, + project_id) + session.commit() + except Exception: + session.rollback() + except Exception: + # Don't fail the revert flow + pass + + +class DecrementLoadBalancerQuota(BaseDatabaseTask): + """Decrements the load balancer quota for a project. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, project_id): + """Decrements the load balancer quota. + + :param project_id: Project id where quota should be reduced + :returns: None + """ + + LOG.debug("Decrementing load balancer quota for " + "project: %s ", project_id) + + lock_session = db_apis.get_session() + try: + self.repos.decrement_quota(lock_session, + data_models.LoadBalancer, + project_id) + lock_session.commit() + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error('Failed to decrement load balancer quota for ' + 'project: %(proj)s the project may have excess ' + 'quota in use.', + {'proj': project_id}) + lock_session.rollback() + + def revert(self, project_id, result, *args, **kwargs): + """Re-apply the quota + + :param project_id: The project id to decrement the quota on. + :returns: None + """ + + LOG.warning('Reverting decrement quota for load balancer on project ' + '%(proj)s Project quota counts may be incorrect.', + {'proj': project_id}) + + # Increment the quota back if this task wasn't the failure + if not isinstance(result, failure.Failure): + + try: + session = db_apis.get_session() + try: + self.repos.check_quota_met(session, + data_models.LoadBalancer, + project_id) + session.commit() + except Exception: + session.rollback() + except Exception: + # Don't fail the revert flow + pass + + +class DecrementMemberQuota(BaseDatabaseTask): + """Decrements the member quota for a project. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, project_id): + """Decrements the member quota. + + :param member: The member to decrement the quota on. + :returns: None + """ + + LOG.debug("Decrementing member quota for " + "project: %s ", project_id) + + lock_session = db_apis.get_session() + try: + self.repos.decrement_quota(lock_session, + data_models.Member, + project_id) + lock_session.commit() + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error('Failed to decrement member quota for project: ' + '%(proj)s the project may have excess quota in use.', + {'proj': project_id}) + lock_session.rollback() + + def revert(self, project_id, result, *args, **kwargs): + """Re-apply the quota + + :param member: The member to decrement the quota on. + :returns: None + """ + + LOG.warning('Reverting decrement quota for member on project %(proj)s ' + 'Project quota counts may be incorrect.', + {'proj': project_id}) + + # Increment the quota back if this task wasn't the failure + if not isinstance(result, failure.Failure): + + try: + session = db_apis.get_session() + try: + self.repos.check_quota_met(session, + data_models.Member, + project_id) + session.commit() + except Exception: + session.rollback() + except Exception: + # Don't fail the revert flow + pass + + +class DecrementPoolQuota(BaseDatabaseTask): + """Decrements the pool quota for a project. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, project_id, pool_child_count): + """Decrements the pool quota. + + :param project_id: project_id where the pool to decrement the quota on + :returns: None + """ + + LOG.debug("Decrementing pool quota for " + "project: %s ", project_id) + + lock_session = db_apis.get_session() + try: + self.repos.decrement_quota(lock_session, + data_models.Pool, + project_id) + + # Pools cascade delete members and health monitors + # update the quota for those items as well. + if pool_child_count['HM'] > 0: + self.repos.decrement_quota(lock_session, + data_models.HealthMonitor, + project_id) + if pool_child_count['member'] > 0: + self.repos.decrement_quota( + lock_session, data_models.Member, + project_id, quantity=pool_child_count['member']) + + lock_session.commit() + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error('Failed to decrement pool quota for project: ' + '%(proj)s the project may have excess quota in use.', + {'proj': project_id}) + lock_session.rollback() + + def revert(self, project_id, pool_child_count, result, *args, **kwargs): + """Re-apply the quota + + :param project_id: The id of project to decrement the quota on + :returns: None + """ + + LOG.warning('Reverting decrement quota for pool on project %(proj)s ' + 'Project quota counts may be incorrect.', + {'proj': project_id}) + + # Increment the quota back if this task wasn't the failure + if not isinstance(result, failure.Failure): + + # These are all independent to maximize the correction + # in case other quota actions have occurred + try: + session = db_apis.get_session() + try: + self.repos.check_quota_met(session, + data_models.Pool, + project_id) + session.commit() + except Exception: + session.rollback() + + # Attempt to increment back the health monitor quota + if pool_child_count['HM'] > 0: + session = db_apis.get_session() + try: + self.repos.check_quota_met(session, + data_models.HealthMonitor, + project_id) + session.commit() + except Exception: + session.rollback() + + # Attempt to increment back the member quota + # This is separate calls to maximize the correction + # should other factors have increased the in use quota + # before this point in the revert flow + for i in range(pool_child_count['member']): + session = db_apis.get_session() + try: + self.repos.check_quota_met(session, + data_models.Member, + project_id) + session.commit() + except Exception: + session.rollback() + except Exception: + # Don't fail the revert flow + pass + + +class CountPoolChildrenForQuota(BaseDatabaseTask): + """Counts the pool child resources for quota management. + + Since the children of pools are cleaned up by the sqlalchemy + cascade delete settings, we need to collect the quota counts + for the child objects early. + + """ + + def execute(self, pool_id): + """Count the pool child resources for quota management + + :param pool_id: pool_id of pool object to count children on + :returns: None + """ + with db_apis.session().begin() as session: + hm_count, member_count = ( + self.pool_repo.get_children_count(session, pool_id)) + + return {'HM': hm_count, 'member': member_count} + + +class DecrementL7policyQuota(BaseDatabaseTask): + """Decrements the l7policy quota for a project. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, l7policy): + """Decrements the l7policy quota. + + :param l7policy: The l7policy to decrement the quota on. + :returns: None + """ + LOG.debug("Decrementing l7policy quota for " + "project: %s ", l7policy[constants.PROJECT_ID]) + lock_session = db_apis.get_session() + try: + self.repos.decrement_quota(lock_session, + data_models.L7Policy, + l7policy[constants.PROJECT_ID]) + db_l7policy = self.l7policy_repo.get( + lock_session, + id=l7policy[constants.L7POLICY_ID]) + + if db_l7policy and db_l7policy.l7rules: + self.repos.decrement_quota(lock_session, + data_models.L7Rule, + l7policy[constants.PROJECT_ID], + quantity=len(db_l7policy.l7rules)) + lock_session.commit() + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error('Failed to decrement l7policy quota for project: ' + '%(proj)s the project may have excess quota in use.', + {'proj': l7policy[constants.L7POLICY_ID]}) + lock_session.rollback() + + def revert(self, l7policy, result, *args, **kwargs): + """Re-apply the quota + + :param l7policy: The l7policy to decrement the quota on. + :returns: None + """ + LOG.warning('Reverting decrement quota for l7policy on project' + ' %(proj)s Project quota counts may be incorrect.', + {'proj': l7policy[constants.PROJECT_ID]}) + # Increment the quota back if this task wasn't the failure + if not isinstance(result, failure.Failure): + try: + session = db_apis.get_session() + try: + self.repos.check_quota_met(session, + data_models.L7Policy, + l7policy[constants.PROJECT_ID]) + session.commit() + except Exception: + session.rollback() + db_l7policy = self.l7policy_repo.get( + session, id=l7policy[constants.L7POLICY_ID]) + if db_l7policy: + # Attempt to increment back the L7Rule quota + for i in range(len(db_l7policy.l7rules)): + session = db_apis.get_session() + try: + self.repos.check_quota_met( + session, data_models.L7Rule, + db_l7policy.project_id) + session.commit() + except Exception: + session.rollback() + except Exception: + # Don't fail the revert flow + pass + + +class DecrementL7ruleQuota(BaseDatabaseTask): + """Decrements the l7rule quota for a project. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, l7rule): + """Decrements the l7rule quota. + + :param l7rule: The l7rule to decrement the quota on. + :returns: None + """ + + LOG.debug("Decrementing l7rule quota for " + "project: %s ", l7rule[constants.PROJECT_ID]) + + lock_session = db_apis.get_session() + try: + self.repos.decrement_quota(lock_session, + data_models.L7Rule, + l7rule[constants.PROJECT_ID]) + lock_session.commit() + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error('Failed to decrement l7rule quota for project: ' + '%(proj)s the project may have excess quota in use.', + {'proj': l7rule[constants.PROJECT_ID]}) + lock_session.rollback() + + def revert(self, l7rule, result, *args, **kwargs): + """Re-apply the quota + + :param l7rule: The l7rule to decrement the quota on. + :returns: None + """ + + LOG.warning('Reverting decrement quota for l7rule on project %(proj)s ' + 'Project quota counts may be incorrect.', + {'proj': l7rule[constants.PROJECT_ID]}) + + # Increment the quota back if this task wasn't the failure + if not isinstance(result, failure.Failure): + + try: + session = db_apis.get_session() + try: + self.repos.check_quota_met(session, + data_models.L7Rule, + l7rule[constants.PROJECT_ID]) + session.commit() + except Exception: + session.rollback() + except Exception: + # Don't fail the revert flow + pass + + +class UpdatePoolMembersOperatingStatusInDB(BaseDatabaseTask): + """Updates the members of a pool operating status. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, pool_id, operating_status): + """Update the members of a pool operating status in DB. + + :param pool_id: pool_id of pool object to be updated + :param operating_status: Operating status to set + :returns: None + """ + + LOG.debug("Updating member operating status to %(status)s in DB for " + "pool id: %(pool)s", {'status': operating_status, + 'pool': pool_id}) + with db_apis.session().begin() as session: + self.member_repo.update_pool_members( + session, pool_id, operating_status=operating_status) + + +class GetAmphoraFirewallRules(BaseDatabaseTask): + """Task to build firewall rules for the amphora.""" + + def execute(self, amphorae, amphora_index, amphorae_network_config): + this_amp_id = amphorae[amphora_index][constants.ID] + amp_net_config = amphorae_network_config[this_amp_id] + + lb_dict = amp_net_config[constants.AMPHORA]['load_balancer'] + vip_dict = lb_dict[constants.VIP] + + if vip_dict[constants.VNIC_TYPE] != constants.VNIC_TYPE_DIRECT: + LOG.debug('Load balancer VIP port is not SR-IOV enabled. Skipping ' + 'firewall rules update.') + return [{'non-sriov-vip': True}] + + session = db_apis.get_session() + with session.begin(): + rules = self.listener_repo.get_port_protocol_cidr_for_lb( + session, + amp_net_config[constants.AMPHORA][constants.LOAD_BALANCER_ID]) + + # If we are act/stdby, inject the VRRP firewall rule(s) + if lb_dict[constants.TOPOLOGY] == constants.TOPOLOGY_ACTIVE_STANDBY: + for amp_cfg in lb_dict[constants.AMPHORAE]: + if (amp_cfg[constants.ID] != this_amp_id and + amp_cfg[constants.STATUS] == + lib_consts.AMPHORA_ALLOCATED): + vrrp_ip = amp_cfg[constants.VRRP_IP] + vrrp_ip_ver = utils.ip_version(vrrp_ip) + + if vrrp_ip_ver == 4: + vrrp_ip_cidr = f'{vrrp_ip}/32' + elif vrrp_ip_ver == 6: + vrrp_ip_cidr = f'{vrrp_ip}/128' + else: + raise exceptions.InvalidIPAddress(ip_addr=vrrp_ip) + + rules.append({constants.PROTOCOL: constants.VRRP, + constants.CIDR: vrrp_ip_cidr, + constants.PORT: 112}) + LOG.debug('Amphora %s SR-IOV firewall rules: %s', this_amp_id, rules) + return rules diff --git a/octavia/controller/worker/v2/tasks/lifecycle_tasks.py b/octavia/controller/worker/v2/tasks/lifecycle_tasks.py new file mode 100644 index 0000000000..24dd694803 --- /dev/null +++ b/octavia/controller/worker/v2/tasks/lifecycle_tasks.py @@ -0,0 +1,254 @@ +# Copyright 2016 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from taskflow import task + +from octavia.common import constants +from octavia.controller.worker import task_utils as task_utilities + + +class BaseLifecycleTask(task.Task): + """Base task to instantiate common classes.""" + + def __init__(self, **kwargs): + self.task_utils = task_utilities.TaskUtils() + super().__init__(**kwargs) + + +class AmphoraIDToErrorOnRevertTask(BaseLifecycleTask): + """Task to checkpoint Amphora lifecycle milestones.""" + + def execute(self, amphora_id): + pass + + def revert(self, amphora_id, *args, **kwargs): + self.task_utils.mark_amphora_status_error(amphora_id) + + +class AmphoraToErrorOnRevertTask(AmphoraIDToErrorOnRevertTask): + """Task to checkpoint Amphora lifecycle milestones.""" + + def execute(self, amphora): + pass + + def revert(self, amphora, *args, **kwargs): + super().revert( + amphora.get(constants.ID)) + + +class HealthMonitorToErrorOnRevertTask(BaseLifecycleTask): + """Task to set a member to ERROR on revert.""" + + def execute(self, health_mon, listeners, loadbalancer): + pass + + def revert(self, health_mon, listeners, loadbalancer, *args, **kwargs): + try: + self.task_utils.mark_health_mon_prov_status_error( + health_mon[constants.HEALTHMONITOR_ID]) + self.task_utils.mark_pool_prov_status_active( + health_mon[constants.POOL_ID]) + for listener in listeners: + self.task_utils.mark_listener_prov_status_active( + listener[constants.LISTENER_ID]) + except Exception: + # Catching and skipping, errors are already reported by task_utils + # and we want to ensure that mark_loadbalancer_prov_status_active + # is called to unlock the LB (it will pass or it will fail after a + # very long timeout) + pass + self.task_utils.mark_loadbalancer_prov_status_active( + loadbalancer[constants.LOADBALANCER_ID]) + + +class L7PolicyToErrorOnRevertTask(BaseLifecycleTask): + """Task to set a l7policy to ERROR on revert.""" + + def execute(self, l7policy, listeners, loadbalancer_id): + pass + + def revert(self, l7policy, listeners, loadbalancer_id, *args, **kwargs): + try: + self.task_utils.mark_l7policy_prov_status_error( + l7policy[constants.L7POLICY_ID]) + for listener in listeners: + self.task_utils.mark_listener_prov_status_active( + listener[constants.LISTENER_ID]) + except Exception: + # Catching and skipping, errors are already reported by task_utils + # and we want to ensure that mark_loadbalancer_prov_status_active + # is called to unlock the LB (it will pass or it will fail after a + # very long timeout) + pass + self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer_id) + + +class L7RuleToErrorOnRevertTask(BaseLifecycleTask): + """Task to set a l7rule to ERROR on revert.""" + + def execute(self, l7rule, l7policy_id, listeners, loadbalancer_id): + pass + + def revert(self, l7rule, l7policy_id, listeners, loadbalancer_id, *args, + **kwargs): + try: + self.task_utils.mark_l7rule_prov_status_error( + l7rule[constants.L7RULE_ID]) + self.task_utils.mark_l7policy_prov_status_active(l7policy_id) + for listener in listeners: + self.task_utils.mark_listener_prov_status_active( + listener[constants.LISTENER_ID]) + except Exception: + # Catching and skipping, errors are already reported by task_utils + # and we want to ensure that mark_loadbalancer_prov_status_active + # is called to unlock the LB (it will pass or it will fail after a + # very long timeout) + pass + self.task_utils.mark_loadbalancer_prov_status_active( + loadbalancer_id) + + +class ListenerToErrorOnRevertTask(BaseLifecycleTask): + """Task to set a listener to ERROR on revert.""" + + def execute(self, listener): + pass + + def revert(self, listener, *args, **kwargs): + try: + self.task_utils.mark_listener_prov_status_error( + listener[constants.LISTENER_ID]) + except Exception: + # Catching and skipping, errors are already reported by task_utils + # and we want to ensure that mark_loadbalancer_prov_status_active + # is called to unlock the LB (it will pass or it will fail after a + # very long timeout) + pass + self.task_utils.mark_loadbalancer_prov_status_active( + listener[constants.LOADBALANCER_ID]) + + +class ListenersToErrorOnRevertTask(BaseLifecycleTask): + """Task to set a listener to ERROR on revert.""" + + def execute(self, listeners): + pass + + def revert(self, listeners, *args, **kwargs): + try: + for listener in listeners: + self.task_utils.mark_listener_prov_status_error( + listener[constants.LISTENER_ID]) + except Exception: + # Catching and skipping, errors are already reported by task_utils + # and we want to ensure that mark_loadbalancer_prov_status_active + # is called to unlock the LB (it will pass or it will fail after a + # very long timeout) + pass + self.task_utils.mark_loadbalancer_prov_status_active( + listeners[0][constants.LOADBALANCER_ID]) + + +class LoadBalancerIDToErrorOnRevertTask(BaseLifecycleTask): + """Task to set the load balancer to ERROR on revert.""" + + def execute(self, loadbalancer_id): + pass + + def revert(self, loadbalancer_id, *args, **kwargs): + self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer_id) + + +class LoadBalancerToErrorOnRevertTask(LoadBalancerIDToErrorOnRevertTask): + """Task to set the load balancer to ERROR on revert.""" + + def execute(self, loadbalancer): + pass + + def revert(self, loadbalancer, *args, **kwargs): + super().revert( + loadbalancer[constants.LOADBALANCER_ID]) + + +class MemberToErrorOnRevertTask(BaseLifecycleTask): + """Task to set a member to ERROR on revert.""" + + def execute(self, member, listeners, loadbalancer, pool_id): + pass + + def revert(self, member, listeners, loadbalancer, pool_id, *args, + **kwargs): + try: + self.task_utils.mark_member_prov_status_error( + member[constants.MEMBER_ID]) + for listener in listeners: + self.task_utils.mark_listener_prov_status_active( + listener[constants.LISTENER_ID]) + self.task_utils.mark_pool_prov_status_active(pool_id) + except Exception: + # Catching and skipping, errors are already reported by task_utils + # and we want to ensure that mark_loadbalancer_prov_status_active + # is called to unlock the LB (it will pass or it will fail after a + # very long timeout) + pass + self.task_utils.mark_loadbalancer_prov_status_active( + loadbalancer[constants.LOADBALANCER_ID]) + + +class MembersToErrorOnRevertTask(BaseLifecycleTask): + """Task to set members to ERROR on revert.""" + + def execute(self, members, listeners, loadbalancer, pool_id): + pass + + def revert(self, members, listeners, loadbalancer, pool_id, *args, + **kwargs): + try: + for m in members: + self.task_utils.mark_member_prov_status_error( + m[constants.MEMBER_ID]) + for listener in listeners: + self.task_utils.mark_listener_prov_status_active( + listener[constants.LISTENER_ID]) + self.task_utils.mark_pool_prov_status_active(pool_id) + except Exception: + # Catching and skipping, errors are already reported by task_utils + # and we want to ensure that mark_loadbalancer_prov_status_active + # is called to unlock the LB (it will pass or it will fail after a + # very long timeout) + pass + self.task_utils.mark_loadbalancer_prov_status_active( + loadbalancer[constants.LOADBALANCER_ID]) + + +class PoolToErrorOnRevertTask(BaseLifecycleTask): + """Task to set a pool to ERROR on revert.""" + + def execute(self, pool_id, listeners, loadbalancer): + pass + + def revert(self, pool_id, listeners, loadbalancer, *args, **kwargs): + try: + self.task_utils.mark_pool_prov_status_error(pool_id) + for listener in listeners: + self.task_utils.mark_listener_prov_status_active( + listener[constants.LISTENER_ID]) + except Exception: + # Catching and skipping, errors are already reported by task_utils + # and we want to ensure that mark_loadbalancer_prov_status_active + # is called to unlock the LB (it will pass or it will fail after a + # very long timeout) + pass + self.task_utils.mark_loadbalancer_prov_status_active( + loadbalancer[constants.LOADBALANCER_ID]) diff --git a/octavia/controller/worker/v2/tasks/network_tasks.py b/octavia/controller/worker/v2/tasks/network_tasks.py new file mode 100644 index 0000000000..5577a8a32e --- /dev/null +++ b/octavia/controller/worker/v2/tasks/network_tasks.py @@ -0,0 +1,1157 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +import time + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import excutils +from sqlalchemy.orm import exc as sa_exception +from taskflow import task +from taskflow.types import failure +import tenacity + +from octavia.common import constants +from octavia.common import data_models +from octavia.common import exceptions +from octavia.common import utils +from octavia.controller.worker import task_utils +from octavia.db import api as db_apis +from octavia.db import repositories as repo +from octavia.i18n import _ +from octavia.network import base +from octavia.network import data_models as n_data_models + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF + + +class BaseNetworkTask(task.Task): + """Base task to load drivers common to the tasks.""" + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self._network_driver = None + self.task_utils = task_utils.TaskUtils() + self.loadbalancer_repo = repo.LoadBalancerRepository() + self.amphora_repo = repo.AmphoraRepository() + self.amphora_member_port_repo = repo.AmphoraMemberPortRepository() + + @property + def network_driver(self): + if self._network_driver is None: + self._network_driver = utils.get_network_driver() + return self._network_driver + + +class CalculateAmphoraDelta(BaseNetworkTask): + + default_provides = constants.DELTA + + def execute(self, loadbalancer, amphora, availability_zone): + LOG.debug("Calculating network delta for amphora id: %s", + amphora.get(constants.ID)) + + # Figure out what networks we want + # seed with lb network(s) + if (availability_zone and + availability_zone.get(constants.MANAGEMENT_NETWORK)): + management_nets = [ + availability_zone.get(constants.MANAGEMENT_NETWORK)] + else: + management_nets = CONF.controller_worker.amp_boot_network_list + + session = db_apis.get_session() + with session.begin(): + db_lb = self.loadbalancer_repo.get( + session, id=loadbalancer[constants.LOADBALANCER_ID]) + + desired_subnet_to_net_map = { + loadbalancer[constants.VIP_SUBNET_ID]: + loadbalancer[constants.VIP_NETWORK_ID] + } + + net_vnic_type_map = {} + for pool in db_lb.pools: + for member in pool.members: + if (member.subnet_id and + member.provisioning_status != + constants.PENDING_DELETE): + member_network = self.network_driver.get_subnet( + member.subnet_id).network_id + net_vnic_type_map[member_network] = getattr( + member, 'vnic_type', constants.VNIC_TYPE_NORMAL) + desired_subnet_to_net_map[member.subnet_id] = ( + member_network) + + desired_network_ids = set(desired_subnet_to_net_map.values()) + desired_subnet_ids = set(desired_subnet_to_net_map) + + # Calculate Network deltas + nics = self.network_driver.get_plugged_networks( + amphora[constants.COMPUTE_ID]) + # we don't have two nics in the same network + # Don't include the nics connected to the management network, we don't + # want to update these interfaces. + network_to_nic_map = { + nic.network_id: nic + for nic in nics + if nic.network_id not in management_nets} + + plugged_network_ids = set(network_to_nic_map) + + del_ids = plugged_network_ids - desired_network_ids + delete_nics = [n_data_models.Interface( + network_id=net_id, + port_id=network_to_nic_map[net_id].port_id) + for net_id in del_ids] + + add_ids = desired_network_ids - plugged_network_ids + add_nics = [n_data_models.Interface( + network_id=add_net_id, + fixed_ips=[ + n_data_models.FixedIP( + subnet_id=subnet_id) + for subnet_id, net_id in desired_subnet_to_net_map.items() + if net_id == add_net_id], + vnic_type=net_vnic_type_map[add_net_id]) + for add_net_id in add_ids] + + # Calculate member Subnet deltas + plugged_subnets = {} + for nic in network_to_nic_map.values(): + for fixed_ip in nic.fixed_ips or []: + plugged_subnets[fixed_ip.subnet_id] = nic.network_id + + plugged_subnet_ids = set(plugged_subnets) + del_subnet_ids = plugged_subnet_ids - desired_subnet_ids + add_subnet_ids = desired_subnet_ids - plugged_subnet_ids + + def _subnet_updates(subnet_ids, subnets): + updates = [] + for s in subnet_ids: + network_id = subnets[s] + nic = network_to_nic_map.get(network_id) + port_id = nic.port_id if nic else None + updates.append({ + constants.SUBNET_ID: s, + constants.NETWORK_ID: network_id, + constants.PORT_ID: port_id + }) + return updates + + add_subnets = _subnet_updates(add_subnet_ids, + desired_subnet_to_net_map) + del_subnets = _subnet_updates(del_subnet_ids, + plugged_subnets) + + delta = n_data_models.Delta( + amphora_id=amphora[constants.ID], + compute_id=amphora[constants.COMPUTE_ID], + add_nics=add_nics, delete_nics=delete_nics, + add_subnets=add_subnets, + delete_subnets=del_subnets) + return delta.to_dict(recurse=True) + + +class CalculateDelta(BaseNetworkTask): + """Task to calculate the delta between + + the nics on the amphora and the ones + we need. Returns a list for + plumbing them. + """ + + default_provides = constants.DELTAS + + def execute(self, loadbalancer, availability_zone): + """Compute which NICs need to be plugged + + for the amphora to become operational. + + :param loadbalancer: the loadbalancer to calculate deltas for all + amphorae + :param availability_zone: availability zone metadata dict + + :returns: dict of octavia.network.data_models.Delta keyed off amphora + id + """ + + calculate_amp = CalculateAmphoraDelta() + deltas = {} + session = db_apis.get_session() + with session.begin(): + db_lb = self.loadbalancer_repo.get( + session, id=loadbalancer[constants.LOADBALANCER_ID]) + for amphora in filter( + lambda amp: amp.status == constants.AMPHORA_ALLOCATED, + db_lb.amphorae): + + delta = calculate_amp.execute(loadbalancer, amphora.to_dict(), + availability_zone) + deltas[amphora.id] = delta + return deltas + + +class GetPlumbedNetworks(BaseNetworkTask): + """Task to figure out the NICS on an amphora. + + This will likely move into the amphora driver + :returns: Array of networks + """ + + default_provides = constants.NICS + + def execute(self, amphora): + """Get plumbed networks for the amphora.""" + + LOG.debug("Getting plumbed networks for amphora id: %s", + amphora[constants.ID]) + + return self.network_driver.get_plugged_networks( + amphora[constants.COMPUTE_ID]) + + +class UnPlugNetworks(BaseNetworkTask): + """Task to unplug the networks + + Loop over all nics and unplug them + based on delta + """ + + def execute(self, amphora, delta): + """Unplug the networks.""" + + LOG.debug("Unplug network for amphora") + if not delta: + LOG.debug("No network deltas for amphora id: %s", + amphora[constants.ID]) + return + + for nic in delta[constants.DELETE_NICS]: + try: + self.network_driver.unplug_network( + amphora[constants.COMPUTE_ID], nic[constants.NETWORK_ID]) + except base.NetworkNotFound: + LOG.debug("Network %d not found", nic[constants.NETWORK_ID]) + except Exception: + LOG.exception("Unable to unplug network") + # TODO(xgerman) follow up if that makes sense + + +class GetMemberPorts(BaseNetworkTask): + + def execute(self, loadbalancer, amphora): + vip_port = self.network_driver.get_port(loadbalancer['vip_port_id']) + member_ports = [] + interfaces = self.network_driver.get_plugged_networks( + amphora[constants.COMPUTE_ID]) + for interface in interfaces: + port = self.network_driver.get_port(interface.port_id) + if vip_port.network_id == port.network_id: + continue + port.network = self.network_driver.get_network(port.network_id) + for fixed_ip in port.fixed_ips: + if amphora['lb_network_ip'] == fixed_ip.ip_address: + break + fixed_ip.subnet = self.network_driver.get_subnet( + fixed_ip.subnet_id) + # Only add the port to the list if the IP wasn't the mgmt IP + else: + member_ports.append(port) + return member_ports + + +class HandleNetworkDelta(BaseNetworkTask): + """Task to plug and unplug networks + + Plug or unplug networks based on delta + """ + + def _fill_port_info(self, port): + port.network = self.network_driver.get_network(port.network_id) + for fixed_ip in port.fixed_ips: + fixed_ip.subnet = self.network_driver.get_subnet( + fixed_ip.subnet_id) + + def _cleanup_port(self, port_id, compute_id): + try: + self.network_driver.delete_port(port_id) + except Exception: + LOG.error(f'Unable to delete port {port_id} after failing to plug ' + f'the port into compute {compute_id}. This port ' + f'may now be abandoned in neutron.') + + def execute(self, amphora, delta): + """Handle network plugging based off deltas.""" + session = db_apis.get_session() + with session.begin(): + db_amp = self.amphora_repo.get(session, + id=amphora.get(constants.ID)) + updated_ports = {} + for nic in delta[constants.ADD_NICS]: + network_id = nic[constants.NETWORK_ID] + subnet_id = nic[constants.FIXED_IPS][0][constants.SUBNET_ID] + + try: + port = self.network_driver.create_port( + network_id, + name=f'octavia-lb-member-{amphora.get(constants.ID)}', + vnic_type=nic[constants.VNIC_TYPE]) + except exceptions.NotFound as e: + if 'Network' in str(e): + raise base.NetworkNotFound(str(e)) + raise base.CreatePortException(str(e)) + except Exception as e: + message = _('Error creating a port on network {network_id}.' + ).format(network_id=network_id) + LOG.exception(message) + raise base.CreatePortException(message) from e + + try: + self.network_driver.plug_port(db_amp, port) + except exceptions.NotFound as e: + self._cleanup_port(port.id, db_amp.compute_id) + if 'Instance' in str(e): + raise base.AmphoraNotFound(str(e)) + raise base.PlugNetworkException(str(e)) + except Exception as e: + self._cleanup_port(port.id, db_amp.compute_id) + message = _('Error plugging amphora (compute_id: ' + '{compute_id}) into network {network_id}.').format( + compute_id=db_amp.compute_id, network_id=network_id) + LOG.exception(message) + raise base.PlugNetworkException(message) from e + with session.begin(): + self.amphora_member_port_repo.create( + session, port_id=port.id, + amphora_id=amphora.get(constants.ID), + network_id=network_id) + + self._fill_port_info(port) + updated_ports[port.network_id] = port.to_dict(recurse=True) + + for update in delta.get(constants.ADD_SUBNETS, []): + network_id = update[constants.NETWORK_ID] + # Get already existing port from Deltas or + # newly created port from updated_ports dict + port_id = (update[constants.PORT_ID] or + updated_ports[network_id][constants.ID]) + subnet_id = update[constants.SUBNET_ID] + # Avoid duplicated subnets + has_subnet = False + if network_id in updated_ports: + has_subnet = any( + fixed_ip[constants.SUBNET_ID] == subnet_id + for fixed_ip in updated_ports[network_id][ + constants.FIXED_IPS]) + if not has_subnet: + port = self.network_driver.plug_fixed_ip( + port_id=port_id, subnet_id=subnet_id) + self._fill_port_info(port) + updated_ports[network_id] = ( + port.to_dict(recurse=True)) + + for update in delta.get(constants.DELETE_SUBNETS, []): + network_id = update[constants.NETWORK_ID] + port_id = update[constants.PORT_ID] + subnet_id = update[constants.SUBNET_ID] + port = self.network_driver.unplug_fixed_ip( + port_id=port_id, subnet_id=subnet_id) + self._fill_port_info(port) + # In neutron, when removing an ipv6 subnet (with slaac) from a + # port, it just ignores it. + # https://bugs.launchpad.net/neutron/+bug/1945156 + # When it happens, don't add the port to the updated_ports dict + has_subnet = any( + fixed_ip.subnet_id == subnet_id + for fixed_ip in port.fixed_ips) + if not has_subnet: + updated_ports[network_id] = ( + port.to_dict(recurse=True)) + + for nic in delta[constants.DELETE_NICS]: + network_id = nic[constants.NETWORK_ID] + try: + self.network_driver.unplug_network( + db_amp.compute_id, network_id) + except base.NetworkNotFound: + LOG.debug("Network %s not found", network_id) + except Exception: + LOG.exception("Unable to unplug network") + + port_id = nic[constants.PORT_ID] + try: + self.network_driver.delete_port(port_id) + except Exception: + LOG.exception("Unable to delete the port") + try: + with session.begin(): + self.amphora_member_port_repo.delete(session, + port_id=port_id) + except sa_exception.NoResultFound: + # Passively fail here for upgrade compatibility + LOG.warning("No Amphora member port records found for " + "port_id: %s", port_id) + + updated_ports.pop(network_id, None) + return {amphora[constants.ID]: list(updated_ports.values())} + + def revert(self, result, amphora, delta, *args, **kwargs): + """Handle a network plug or unplug failures.""" + + if isinstance(result, failure.Failure): + return + + if not delta: + return + + LOG.warning("Unable to plug networks for amp id %s", + delta['amphora_id']) + + for nic in delta[constants.ADD_NICS]: + try: + self.network_driver.unplug_network(delta[constants.COMPUTE_ID], + nic[constants.NETWORK_ID]) + except Exception: + LOG.exception("Unable to unplug network %s", + nic[constants.NETWORK_ID]) + + port_id = nic[constants.PORT_ID] + try: + self.network_driver.delete_port(port_id) + except Exception: + LOG.exception("Unable to delete port %s", port_id) + + +class HandleNetworkDeltas(BaseNetworkTask): + """Task to plug and unplug networks + + Loop through the deltas and plug or unplug + networks based on delta + """ + + def execute(self, deltas, loadbalancer): + """Handle network plugging based off deltas.""" + session = db_apis.get_session() + with session.begin(): + db_lb = self.loadbalancer_repo.get( + session, id=loadbalancer[constants.LOADBALANCER_ID]) + amphorae = {amp.id: amp for amp in db_lb.amphorae} + + updated_ports = {} + handle_delta = HandleNetworkDelta() + + for amp_id, delta in deltas.items(): + ret = handle_delta.execute(amphorae[amp_id].to_dict(), delta) + updated_ports.update(ret) + + return updated_ports + + def revert(self, result, deltas, *args, **kwargs): + """Handle a network plug or unplug failures.""" + + if isinstance(result, failure.Failure): + return + + if not deltas: + return + + for amp_id, delta in deltas.items(): + LOG.warning("Unable to plug networks for amp id %s", + delta[constants.AMPHORA_ID]) + for nic in delta[constants.ADD_NICS]: + try: + self.network_driver.unplug_network( + delta[constants.COMPUTE_ID], + nic[constants.NETWORK_ID]) + except Exception: + LOG.exception("Unable to unplug network %s", + nic[constants.NETWORK_ID]) + + port_id = nic[constants.PORT_ID] + try: + self.network_driver.delete_port(port_id) + except Exception: + LOG.exception("Unable to delete port %s", port_id) + + +class UpdateVIPSecurityGroup(BaseNetworkTask): + """Task to setup SG for LB.""" + + def execute(self, loadbalancer_id): + """Task to setup SG for LB.""" + + LOG.debug("Setting up VIP SG for load balancer id: %s", + loadbalancer_id) + session = db_apis.get_session() + with session.begin(): + db_lb = self.loadbalancer_repo.get( + session, id=loadbalancer_id) + + sg_id = self.network_driver.update_vip_sg(db_lb, db_lb.vip) + LOG.info("Set up VIP SG %s for load balancer %s complete", + sg_id if sg_id else "None", loadbalancer_id) + return sg_id + + +class UpdateAmphoraSecurityGroup(BaseNetworkTask): + """Task to update SGs for an Amphora.""" + + def execute(self, loadbalancer_id: str): + session = db_apis.get_session() + with session.begin(): + db_lb = self.loadbalancer_repo.get( + session, id=loadbalancer_id) + for amp in db_lb.amphorae: + self.network_driver.update_aap_port_sg(db_lb, + amp, + db_lb.vip) + + +class GetSubnetFromVIP(BaseNetworkTask): + """Task to plumb a VIP.""" + + def execute(self, loadbalancer): + """Plumb a vip to an amphora.""" + + LOG.debug("Getting subnet for LB: %s", + loadbalancer[constants.LOADBALANCER_ID]) + + subnet = self.network_driver.get_subnet(loadbalancer['vip_subnet_id']) + LOG.info("Got subnet %s for load balancer %s", + loadbalancer['vip_subnet_id'] if subnet else "None", + loadbalancer[constants.LOADBALANCER_ID]) + return subnet.to_dict() + + +class PlugVIPAmphora(BaseNetworkTask): + """Task to plumb a VIP.""" + + def execute(self, loadbalancer, amphora, subnet): + """Plumb a vip to an amphora.""" + + LOG.debug("Plumbing VIP for amphora id: %s", + amphora.get(constants.ID)) + session = db_apis.get_session() + with session.begin(): + db_amp = self.amphora_repo.get(session, + id=amphora.get(constants.ID)) + db_subnet = self.network_driver.get_subnet(subnet[constants.ID]) + db_lb = self.loadbalancer_repo.get( + session, id=loadbalancer[constants.LOADBALANCER_ID]) + amp_data = self.network_driver.plug_aap_port( + db_lb, db_lb.vip, db_amp, db_subnet) + return amp_data.to_dict() + + def revert(self, result, loadbalancer, amphora, subnet, *args, **kwargs): + """Handle a failure to plumb a vip.""" + if isinstance(result, failure.Failure): + return + lb_id = loadbalancer[constants.LOADBALANCER_ID] + LOG.warning("Unable to plug VIP for amphora id %s " + "load balancer id %s", + amphora.get(constants.ID), lb_id) + try: + session = db_apis.get_session() + with session.begin(): + db_amp = self.amphora_repo.get(session, + id=amphora.get(constants.ID)) + db_amp.vrrp_port_id = result[constants.VRRP_PORT_ID] + db_amp.ha_port_id = result[constants.HA_PORT_ID] + db_subnet = self.network_driver.get_subnet( + subnet[constants.ID]) + db_lb = self.loadbalancer_repo.get(session, id=lb_id) + self.network_driver.unplug_aap_port(db_lb.vip, + db_amp, db_subnet) + except Exception as e: + LOG.error( + 'Failed to unplug AAP port for load balancer: %s. ' + 'Resources may still be in use for VRRP port: %s. ' + 'Due to error: %s', + lb_id, result[constants.VRRP_PORT_ID], str(e) + ) + + +class UnplugVIP(BaseNetworkTask): + """Task to unplug the vip.""" + + def execute(self, loadbalancer): + """Unplug the vip.""" + + LOG.debug("Unplug vip on amphora") + try: + session = db_apis.get_session() + with session.begin(): + db_lb = self.loadbalancer_repo.get( + session, + id=loadbalancer[constants.LOADBALANCER_ID]) + self.network_driver.unplug_vip(db_lb, db_lb.vip) + except Exception: + LOG.exception("Unable to unplug vip from load balancer %s", + loadbalancer[constants.LOADBALANCER_ID]) + + +class AllocateVIP(BaseNetworkTask): + """Task to allocate a VIP.""" + + def execute(self, loadbalancer): + """Allocate a vip to the loadbalancer.""" + + LOG.debug("Allocating vip with port id %s, subnet id %s, " + "ip address %s for load balancer %s", + loadbalancer[constants.VIP_PORT_ID], + loadbalancer[constants.VIP_SUBNET_ID], + loadbalancer[constants.VIP_ADDRESS], + loadbalancer[constants.LOADBALANCER_ID]) + session = db_apis.get_session() + with session.begin(): + db_lb = self.loadbalancer_repo.get( + session, id=loadbalancer[constants.LOADBALANCER_ID]) + vip, additional_vips = self.network_driver.allocate_vip(db_lb) + LOG.info("Allocated vip with port id %s, subnet id %s, ip address %s " + "for load balancer %s", + loadbalancer[constants.VIP_PORT_ID], + loadbalancer[constants.VIP_SUBNET_ID], + loadbalancer[constants.VIP_ADDRESS], + loadbalancer[constants.LOADBALANCER_ID]) + for add_vip in additional_vips: + LOG.debug('Allocated an additional VIP: subnet=%(subnet)s ' + 'ip_address=%(ip)s', {'subnet': add_vip.subnet_id, + 'ip': add_vip.ip_address}) + return (vip.to_dict(), + [additional_vip.to_dict() + for additional_vip in additional_vips]) + + def revert(self, result, loadbalancer, *args, **kwargs): + """Handle a failure to allocate vip.""" + + if isinstance(result, failure.Failure): + LOG.exception("Unable to allocate VIP") + return + vip, additional_vips = result + vip = data_models.Vip(**vip) + LOG.warning("Deallocating vip %s", vip.ip_address) + try: + self.network_driver.deallocate_vip(vip) + except Exception as e: + LOG.error("Failed to deallocate VIP. Resources may still " + "be in use from vip: %(vip)s due to error: %(except)s", + {'vip': vip.ip_address, 'except': str(e)}) + + +class AllocateVIPforFailover(AllocateVIP): + """Task to allocate/validate the VIP for a failover flow.""" + + def revert(self, result, loadbalancer, *args, **kwargs): + """Handle a failure to allocate vip.""" + + if isinstance(result, failure.Failure): + LOG.exception("Unable to allocate VIP") + return + vip, additional_vips = result + vip = data_models.Vip(**vip) + LOG.info("Failover revert is not deallocating vip %s because this is " + "a failover.", vip.ip_address) + + +class DeallocateVIP(BaseNetworkTask): + """Task to deallocate a VIP.""" + + def execute(self, loadbalancer): + """Deallocate a VIP.""" + + LOG.debug("Deallocating a VIP %s", loadbalancer[constants.VIP_ADDRESS]) + + # NOTE(blogan): this is kind of ugly but sufficient for now. Drivers + # will need access to the load balancer that the vip is/was attached + # to. However the data model serialization for the vip does not give a + # backref to the loadbalancer if accessed through the loadbalancer. + session = db_apis.get_session() + with session.begin(): + db_lb = self.loadbalancer_repo.get( + session, id=loadbalancer[constants.LOADBALANCER_ID]) + vip = db_lb.vip + vip.load_balancer = db_lb + self.network_driver.deallocate_vip(vip) + + +class UpdateVIP(BaseNetworkTask): + """Task to update a VIP.""" + + def execute(self, listeners): + session = db_apis.get_session() + with session.begin(): + loadbalancer = self.loadbalancer_repo.get( + session, id=listeners[0][constants.LOADBALANCER_ID]) + + LOG.debug("Updating VIP of load_balancer %s.", loadbalancer.id) + + self.network_driver.update_vip(loadbalancer) + + +class UpdateVIPForDelete(BaseNetworkTask): + """Task to update a VIP for listener delete flows.""" + + def execute(self, loadbalancer_id): + session = db_apis.get_session() + with session.begin(): + loadbalancer = self.loadbalancer_repo.get( + session, id=loadbalancer_id) + LOG.debug("Updating VIP for listener delete on load_balancer %s.", + loadbalancer.id) + self.network_driver.update_vip(loadbalancer, for_delete=True) + + +class GetAmphoraNetworkConfigs(BaseNetworkTask): + """Task to retrieve amphora network details.""" + + def execute(self, loadbalancer, amphora=None): + LOG.debug("Retrieving vip network details.") + session = db_apis.get_session() + with session.begin(): + db_amp = self.amphora_repo.get(session, + id=amphora.get(constants.ID)) + db_lb = self.loadbalancer_repo.get( + session, id=loadbalancer[constants.LOADBALANCER_ID]) + db_configs = self.network_driver.get_network_configs( + db_lb, amphora=db_amp) + provider_dict = {} + for amp_id, amp_conf in db_configs.items(): + # Do not serialize loadbalancer class. It's unused later and + # could be ignored for storing in results of task in persistence DB + provider_dict[amp_id] = amp_conf.to_dict( + recurse=True, calling_classes=[data_models.LoadBalancer] + ) + return provider_dict + + +class GetAmphoraNetworkConfigsByID(BaseNetworkTask): + """Task to retrieve amphora network details.""" + + def execute(self, loadbalancer_id, amphora_id=None): + LOG.debug("Retrieving vip network details.") + session = db_apis.get_session() + with session.begin(): + loadbalancer = self.loadbalancer_repo.get(session, + id=loadbalancer_id) + amphora = self.amphora_repo.get(session, id=amphora_id) + db_configs = self.network_driver.get_network_configs(loadbalancer, + amphora=amphora) + provider_dict = {} + for amp_id, amp_conf in db_configs.items(): + # Do not serialize loadbalancer class. It's unused later and + # could be ignored for storing in results of task in persistence DB + provider_dict[amp_id] = amp_conf.to_dict( + recurse=True, calling_classes=[data_models.LoadBalancer] + ) + return provider_dict + + +class GetAmphoraeNetworkConfigs(BaseNetworkTask): + """Task to retrieve amphorae network details.""" + + def execute(self, loadbalancer_id): + LOG.debug("Retrieving vip network details.") + session = db_apis.get_session() + with session.begin(): + db_lb = self.loadbalancer_repo.get( + session, id=loadbalancer_id) + db_configs = self.network_driver.get_network_configs(db_lb) + provider_dict = {} + for amp_id, amp_conf in db_configs.items(): + # Do not serialize loadbalancer class. It's unused later and + # could be ignored for storing in results of task in persistence DB + provider_dict[amp_id] = amp_conf.to_dict( + recurse=True, calling_classes=[data_models.LoadBalancer] + ) + return provider_dict + + +class RetrievePortIDsOnAmphoraExceptLBNetwork(BaseNetworkTask): + """Task retrieving all the port ids on an amphora, except lb network.""" + + def execute(self, amphora): + LOG.debug("Retrieve all but the lb network port id on amphora %s.", + amphora[constants.ID]) + + interfaces = self.network_driver.get_plugged_networks( + compute_id=amphora[constants.COMPUTE_ID]) + + ports = [] + for interface_ in interfaces: + if interface_.port_id not in ports: + port = self.network_driver.get_port(port_id=interface_.port_id) + ips = port.fixed_ips + lb_network = False + for ip in ips: + if ip.ip_address == amphora[constants.LB_NETWORK_IP]: + lb_network = True + if not lb_network: + ports.append(port) + + return ports + + +class PlugPorts(BaseNetworkTask): + """Task to plug neutron ports into a compute instance.""" + + def execute(self, amphora, ports): + session = db_apis.get_session() + with session.begin(): + db_amp = self.amphora_repo.get(session, + id=amphora[constants.ID]) + for port in ports: + LOG.debug('Plugging port ID: %(port_id)s into compute instance: ' + '%(compute_id)s.', + {constants.PORT_ID: port.id, + constants.COMPUTE_ID: amphora[constants.COMPUTE_ID]}) + self.network_driver.plug_port(db_amp, port) + + +class ApplyQos(BaseNetworkTask): + """Apply Quality of Services to the VIP""" + + def _apply_qos_on_vrrp_ports(self, loadbalancer, amps_data, qos_policy_id, + is_revert=False, request_qos_id=None): + """Call network driver to apply QoS Policy on the vrrp ports.""" + + session = db_apis.get_session() + with session.begin(): + if not amps_data: + db_lb = self.loadbalancer_repo.get( + session, + id=loadbalancer[constants.LOADBALANCER_ID]) + amps_data = db_lb.amphorae + + amps_data = [amp + for amp in amps_data + if amp.status == constants.AMPHORA_ALLOCATED] + + apply_qos = ApplyQosAmphora() + for amp_data in amps_data: + apply_qos._apply_qos_on_vrrp_port(loadbalancer, amp_data.to_dict(), + qos_policy_id) + + def execute(self, loadbalancer, amps_data=None, update_dict=None): + """Apply qos policy on the vrrp ports which are related with vip.""" + session = db_apis.get_session() + with session.begin(): + db_lb = self.loadbalancer_repo.get( + session, + id=loadbalancer[constants.LOADBALANCER_ID]) + + qos_policy_id = db_lb.vip.qos_policy_id + if not qos_policy_id and ( + not update_dict or ( + 'vip' not in update_dict or + 'qos_policy_id' not in update_dict[constants.VIP])): + return + if update_dict and update_dict.get(constants.VIP): + vip_dict = update_dict[constants.VIP] + if vip_dict.get(constants.QOS_POLICY_ID): + qos_policy_id = vip_dict[constants.QOS_POLICY_ID] + + self._apply_qos_on_vrrp_ports(loadbalancer, amps_data, qos_policy_id) + + def revert(self, result, loadbalancer, amps_data=None, update_dict=None, + *args, **kwargs): + """Handle a failure to apply QoS to VIP""" + + request_qos_id = loadbalancer['vip_qos_policy_id'] + orig_lb = self.task_utils.get_current_loadbalancer_from_db( + loadbalancer[constants.LOADBALANCER_ID]) + orig_qos_id = orig_lb.vip.qos_policy_id + if request_qos_id != orig_qos_id: + self._apply_qos_on_vrrp_ports(loadbalancer, amps_data, orig_qos_id, + is_revert=True, + request_qos_id=request_qos_id) + + +class ApplyQosAmphora(BaseNetworkTask): + """Apply Quality of Services to the VIP""" + + def _apply_qos_on_vrrp_port(self, loadbalancer, amp_data, qos_policy_id, + is_revert=False, request_qos_id=None): + """Call network driver to apply QoS Policy on the vrrp ports.""" + try: + self.network_driver.apply_qos_on_port( + qos_policy_id, + amp_data[constants.VRRP_PORT_ID]) + except Exception: + if not is_revert: + raise + LOG.warning('Failed to undo qos policy %(qos_id)s ' + 'on vrrp port: %(port)s from ' + 'amphorae: %(amp)s', + {'qos_id': request_qos_id, + 'port': amp_data[constants.VRRP_PORT_ID], + 'amp': [amp.get(constants.ID) for amp in amp_data]}) + + def execute(self, loadbalancer, amp_data=None, update_dict=None): + """Apply qos policy on the vrrp ports which are related with vip.""" + qos_policy_id = loadbalancer['vip_qos_policy_id'] + if not qos_policy_id and ( + update_dict and ( + 'vip' not in update_dict or + 'qos_policy_id' not in update_dict[constants.VIP])): + return + self._apply_qos_on_vrrp_port(loadbalancer, amp_data, qos_policy_id) + + def revert(self, result, loadbalancer, amp_data=None, update_dict=None, + *args, **kwargs): + """Handle a failure to apply QoS to VIP""" + try: + request_qos_id = loadbalancer['vip_qos_policy_id'] + orig_lb = self.task_utils.get_current_loadbalancer_from_db( + loadbalancer[constants.LOADBALANCER_ID]) + orig_qos_id = orig_lb.vip.qos_policy_id + if request_qos_id != orig_qos_id: + self._apply_qos_on_vrrp_port(loadbalancer, amp_data, + orig_qos_id, is_revert=True, + request_qos_id=request_qos_id) + except Exception as e: + LOG.error('Failed to remove QoS policy: %s from port: %s due ' + 'to error: %s', orig_qos_id, + amp_data[constants.VRRP_PORT_ID], str(e)) + + +class DeletePort(BaseNetworkTask): + """Task to delete a network port.""" + + @tenacity.retry(retry=tenacity.retry_if_exception_type(), + stop=tenacity.stop_after_attempt( + CONF.networking.max_retries), + wait=tenacity.wait_exponential( + multiplier=CONF.networking.retry_backoff, + min=CONF.networking.retry_interval, + max=CONF.networking.retry_max), reraise=True) + def execute(self, port_id, passive_failure=False): + """Delete the network port.""" + if port_id is None: + return + # tenacity 8.5.0 moves statistics from the retry object to the function + try: + retry_statistics = self.execute.statistics + except AttributeError: + retry_statistics = self.execute.retry.statistics + + if retry_statistics.get(constants.ATTEMPT_NUMBER, 1) == 1: + LOG.debug("Deleting network port %s", port_id) + else: + LOG.warning('Retrying network port %s delete attempt %s of %s.', + port_id, + retry_statistics[constants.ATTEMPT_NUMBER], + self.execute.retry.stop.max_attempt_number) + # Let the Taskflow engine know we are working and alive + # Don't use get with a default for 'attempt_number', we need to fail + # if that number is missing. + self.update_progress( + retry_statistics[constants.ATTEMPT_NUMBER] / + self.execute.retry.stop.max_attempt_number) + try: + self.network_driver.delete_port(port_id) + except Exception: + if (retry_statistics[constants.ATTEMPT_NUMBER] != + self.execute.retry.stop.max_attempt_number): + LOG.warning('Network port delete for port id: %s failed. ' + 'Retrying.', port_id) + raise + if passive_failure: + LOG.exception('Network port delete for port ID: %s failed. ' + 'This resource will be abandoned and should ' + 'manually be cleaned up once the ' + 'network service is functional.', port_id) + # Let's at least attempt to disable it so if the instance + # comes back from the dead it doesn't conflict with anything. + try: + self.network_driver.admin_down_port(port_id) + LOG.info('Successfully disabled (admin down) network port ' + '%s that failed to delete.', port_id) + except Exception: + LOG.warning('Attempt to disable (admin down) network port ' + '%s failed. The network service has failed. ' + 'Continuing.', port_id) + else: + LOG.exception('Network port delete for port ID: %s failed. ' + 'The network service has failed. ' + 'Aborting and reverting.', port_id) + raise + + +class DeleteAmphoraMemberPorts(BaseNetworkTask): + """Task to delete all of the member ports on an Amphora.""" + + def execute(self, amphora_id, passive_failure=False): + delete_port = DeletePort() + session = db_apis.get_session() + + with session.begin(): + ports = self.amphora_member_port_repo.get_port_ids( + session, amphora_id) + for port in ports: + delete_port.execute(port, passive_failure) + with session.begin(): + self.amphora_member_port_repo.delete(session, port_id=port) + + +class CreateVIPBasePort(BaseNetworkTask): + """Task to create the VIP base port for an amphora.""" + + @tenacity.retry(retry=tenacity.retry_if_exception_type(), + stop=tenacity.stop_after_attempt( + CONF.networking.max_retries), + wait=tenacity.wait_exponential( + multiplier=CONF.networking.retry_backoff, + min=CONF.networking.retry_interval, + max=CONF.networking.retry_max), reraise=True) + def execute(self, vip, vip_sg_id, amphora_id, additional_vips): + port_name = constants.AMP_BASE_PORT_PREFIX + amphora_id + fixed_ips = [{constants.SUBNET_ID: vip[constants.SUBNET_ID]}] + sg_ids = [] + # NOTE(gthiemonge) clarification: + # - vip_sg_id is the ID of the SG created and managed by Octavia. + # - vip['sg_ids'] are the IDs of the SGs provided by the user. + if vip_sg_id: + sg_ids = [vip_sg_id] + if vip["sg_ids"]: + sg_ids += vip["sg_ids"] + secondary_ips = [vip[constants.IP_ADDRESS]] + for add_vip in additional_vips: + secondary_ips.append(add_vip[constants.IP_ADDRESS]) + port = self.network_driver.create_port( + vip[constants.NETWORK_ID], name=port_name, fixed_ips=fixed_ips, + secondary_ips=secondary_ips, + security_group_ids=sg_ids, + qos_policy_id=vip[constants.QOS_POLICY_ID]) + LOG.info('Created port %s with ID %s for amphora %s', + port_name, port.id, amphora_id) + return port.to_dict(recurse=True) + + def revert(self, result, vip, vip_sg_id, amphora_id, additional_vips, + *args, **kwargs): + if isinstance(result, failure.Failure): + return + try: + port_name = constants.AMP_BASE_PORT_PREFIX + amphora_id + self.network_driver.delete_port(result[constants.ID]) + LOG.info('Deleted port %s with ID %s for amphora %s due to a ' + 'revert.', port_name, result[constants.ID], amphora_id) + except Exception as e: + LOG.error('Failed to delete port %s. Resources may still be in ' + 'use for a port intended for amphora %s due to error ' + '%s. Search for a port named %s', + result, amphora_id, str(e), port_name) + + +class AdminDownPort(BaseNetworkTask): + + def execute(self, port_id): + try: + self.network_driver.set_port_admin_state_up(port_id, False) + except base.PortNotFound: + return + for i in range(CONF.networking.max_retries): + port = self.network_driver.get_port(port_id) + if port.status == constants.DOWN: + LOG.debug('Disabled port: %s', port_id) + return + LOG.debug('Port %s is %s instead of DOWN, waiting.', + port_id, port.status) + time.sleep(CONF.networking.retry_interval) + LOG.error('Port %s failed to go DOWN. Port status is still %s. ' + 'Ignoring and continuing.', port_id, port.status) + + def revert(self, result, port_id, *args, **kwargs): + if isinstance(result, failure.Failure): + return + try: + self.network_driver.set_port_admin_state_up(port_id, True) + except Exception as e: + LOG.error('Failed to bring port %s admin up on revert due to: %s.', + port_id, str(e)) + + +class GetVIPSecurityGroupID(BaseNetworkTask): + + def execute(self, loadbalancer_id): + sg_name = utils.get_vip_security_group_name(loadbalancer_id) + try: + security_group = self.network_driver.get_security_group(sg_name) + if security_group: + return security_group.id + except base.SecurityGroupNotFound: + with excutils.save_and_reraise_exception() as ctxt: + if self.network_driver.sec_grp_enabled: + LOG.error('VIP security group %s was not found.', sg_name) + else: + ctxt.reraise = False + return None + + +class CreateSRIOVBasePort(BaseNetworkTask): + """Task to create a SRIOV base port for an amphora.""" + + @tenacity.retry(retry=tenacity.retry_if_exception_type(), + stop=tenacity.stop_after_attempt( + CONF.networking.max_retries), + wait=tenacity.wait_exponential( + multiplier=CONF.networking.retry_backoff, + min=CONF.networking.retry_interval, + max=CONF.networking.retry_max), reraise=True) + def execute(self, loadbalancer, amphora, subnet): + session = db_apis.get_session() + with session.begin(): + db_lb = self.loadbalancer_repo.get( + session, id=loadbalancer[constants.LOADBALANCER_ID]) + port_name = constants.AMP_BASE_PORT_PREFIX + amphora[constants.ID] + fixed_ips = [{constants.SUBNET_ID: subnet[constants.ID]}] + addl_vips = [obj.ip_address for obj in db_lb.additional_vips] + addl_vips.append(loadbalancer[constants.VIP_ADDRESS]) + port = self.network_driver.create_port( + loadbalancer[constants.VIP_NETWORK_ID], + name=port_name, fixed_ips=fixed_ips, + secondary_ips=addl_vips, + qos_policy_id=loadbalancer[constants.VIP_QOS_POLICY_ID], + vnic_type=constants.VNIC_TYPE_DIRECT) + LOG.info('Created port %s with ID %s for amphora %s', + port_name, port.id, amphora[constants.ID]) + return port.to_dict(recurse=True) + + def revert(self, result, loadbalancer, amphora, subnet, *args, **kwargs): + if isinstance(result, failure.Failure): + return + try: + port_name = constants.AMP_BASE_PORT_PREFIX + amphora['id'] + self.network_driver.delete_port(result[constants.ID]) + LOG.info('Deleted port %s with ID %s for amphora %s due to a ' + 'revert.', port_name, result[constants.ID], amphora['id']) + except Exception as e: + LOG.error('Failed to delete port %s. Resources may still be in ' + 'use for a port intended for amphora %s due to error ' + '%s. Search for a port named %s', + result, amphora['id'], str(e), port_name) + + +class BuildAMPData(BaseNetworkTask): + """Glue task to store the AMP_DATA dict from netork port information.""" + + def execute(self, loadbalancer, amphora, port_data): + amphora[constants.HA_IP] = loadbalancer[constants.VIP_ADDRESS] + amphora[constants.HA_PORT_ID] = loadbalancer[constants.VIP_PORT_ID] + amphora[constants.VRRP_ID] = 1 + amphora[constants.VRRP_PORT_ID] = port_data[constants.ID] + amphora[constants.VRRP_IP] = port_data[ + constants.FIXED_IPS][0][constants.IP_ADDRESS] + return amphora diff --git a/octavia/controller/worker/v2/tasks/notification_tasks.py b/octavia/controller/worker/v2/tasks/notification_tasks.py new file mode 100644 index 0000000000..7a8fba14f3 --- /dev/null +++ b/octavia/controller/worker/v2/tasks/notification_tasks.py @@ -0,0 +1,52 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from oslo_log import log as logging +from taskflow import task + +from octavia.common import constants # noqa H306 +from octavia.common import context +from octavia.common import rpc + +LOG = logging.getLogger(__name__) + + +class BaseNotificationTask(task.Task): + event_type = None + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self._rpc_notifier = rpc.get_notifier() + + def execute(self, loadbalancer): + ctx = context.RequestContext( + project_id=loadbalancer[constants.PROJECT_ID]) + LOG.debug(f"Sending rpc notification: {self.event_type} " + f"{loadbalancer[constants.LOADBALANCER_ID]}") + self._rpc_notifier.info( + ctx, + self.event_type, + loadbalancer + ) + + +class SendUpdateNotification(BaseNotificationTask): + event_type = 'octavia.loadbalancer.update.end' + + +class SendCreateNotification(BaseNotificationTask): + event_type = 'octavia.loadbalancer.create.end' + + +class SendDeleteNotification(BaseNotificationTask): + event_type = 'octavia.loadbalancer.delete.end' diff --git a/octavia/controller/worker/v2/tasks/retry_tasks.py b/octavia/controller/worker/v2/tasks/retry_tasks.py new file mode 100644 index 0000000000..04efa852e9 --- /dev/null +++ b/octavia/controller/worker/v2/tasks/retry_tasks.py @@ -0,0 +1,73 @@ +# Copyright 2019 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import time + +from oslo_log import log as logging +from taskflow import retry + +LOG = logging.getLogger(__name__) + + +class SleepingRetryTimesController(retry.Times): + """A retry controller to attempt subflow retries a number of times. + + This retry controller overrides the Times on_failure to inject a + sleep interval between retries. + It also adds a log message when all of the retries are exhausted. + + :param attempts: number of attempts to retry the associated subflow + before giving up + :type attempts: int + :param name: Meaningful name for this atom, should be something that is + distinguishable and understandable for notification, + debugging, storing and any other similar purposes. + :param provides: A set, string or list of items that + this will be providing (or could provide) to others, used + to correlate and associate the thing/s this atom + produces, if it produces anything at all. + :param requires: A set or list of required inputs for this atom's + ``execute`` method. + :param rebind: A dict of key/value pairs used to define argument + name conversions for inputs to this atom's ``execute`` + method. + :param revert_all: when provided this will cause the full flow to revert + when the number of attempts that have been tried + has been reached (when false, it will only locally + revert the associated subflow) + :type revert_all: bool + :param interval: Interval, in seconds, between retry attempts. + :type interval: int + """ + + def __init__(self, attempts=1, name=None, provides=None, requires=None, + auto_extract=True, rebind=None, revert_all=False, interval=1): + super().__init__(attempts, name, provides, requires, auto_extract, + rebind, revert_all) + self._interval = interval + + def on_failure(self, history, *args, **kwargs): + if len(history) < self._attempts: + LOG.warning('%s attempt %s of %s failed. Sleeping %s seconds and ' + 'retrying.', + self.name[self.name.startswith('retry-') and + len('retry-'):], len(history), + self._attempts, self._interval) + time.sleep(self._interval) + return retry.RETRY + return self._revert_action + + def revert(self, history, *args, **kwargs): + LOG.error('%s retries with interval %s seconds have failed for %s. ' + 'Giving up.', len(history), self._interval, self.name) diff --git a/octavia/controller/worker/v2/tasks/shim_tasks.py b/octavia/controller/worker/v2/tasks/shim_tasks.py new file mode 100644 index 0000000000..b6b587fd71 --- /dev/null +++ b/octavia/controller/worker/v2/tasks/shim_tasks.py @@ -0,0 +1,28 @@ +# Copyright 2024 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +from taskflow import task + +from octavia.common import constants + + +class AmphoraToAmphoraeWithVRRPIP(task.Task): + """A shim class to convert a single Amphora instance to a list.""" + + def execute(self, amphora: dict, base_port: dict): + # The VRRP_IP has not been stamped on the Amphora at this point in the + # flow, so inject it from our port create call in a previous task. + amphora[constants.VRRP_IP] = ( + base_port[constants.FIXED_IPS][0][constants.IP_ADDRESS]) + return [amphora] diff --git a/octavia/db/__init__.py b/octavia/db/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/db/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/db/api.py b/octavia/db/api.py new file mode 100644 index 0000000000..635df05a1d --- /dev/null +++ b/octavia/db/api.py @@ -0,0 +1,76 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import time + +from sqlalchemy.sql.expression import select + +from oslo_config import cfg +from oslo_db.sqlalchemy import enginefacade +from oslo_log import log as logging + +LOG = logging.getLogger(__name__) +_FACADE = None + + +def _create_facade_lazily(): + global _FACADE + if _FACADE is None: + _FACADE = True + enginefacade.configure(sqlite_fk=True, expire_on_commit=True) + + +def _get_transaction_context(reader=False): + _create_facade_lazily() + # TODO(gthiemonge) Create and use new functions to get read-only sessions + if reader: + context = enginefacade.reader + else: + context = enginefacade.writer + return context + + +def _get_sessionmaker(reader=False): + context = _get_transaction_context(reader) + return context.get_sessionmaker() + + +def get_engine(): + context = _get_transaction_context() + return context.get_engine() + + +def get_session(): + """Helper method to grab session.""" + return _get_sessionmaker()() + + +def session(): + return _get_sessionmaker() + + +def wait_for_connection(exit_event): + """Helper method to wait for DB connection""" + down = True + while down and not exit_event.is_set(): + try: + LOG.debug('Trying to re-establish connection to database.') + get_engine().scalar(select([1])) + down = False + LOG.debug('Connection to database re-established.') + except Exception: + retry_interval = cfg.CONF.database.retry_interval + LOG.exception('Connection to database failed. Retrying in %s ' + 'seconds.', retry_interval) + time.sleep(retry_interval) diff --git a/octavia/db/base_models.py b/octavia/db/base_models.py new file mode 100644 index 0000000000..052aff9795 --- /dev/null +++ b/octavia/db/base_models.py @@ -0,0 +1,235 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from typing import Optional + +from wsme import types as wtypes + +from oslo_db.sqlalchemy import models +from oslo_utils import strutils +from oslo_utils import uuidutils +import sqlalchemy as sa +from sqlalchemy.orm import collections +from sqlalchemy.orm import declarative_base + +from octavia.common import constants + + +class OctaviaBase(models.ModelBase): + + __data_model__ = None + + @staticmethod + def _get_unique_key(obj): + """Returns a unique key for passed object for data model building.""" + # First handle all objects with their own ID, then handle subordinate + # objects. + if obj.__class__.__name__ in ['Member', 'Pool', 'LoadBalancer', + 'Listener', 'Amphora', 'L7Policy', + 'L7Rule', 'Flavor', 'FlavorProfile', + 'AvailabilityZoneProfile']: + return obj.__class__.__name__ + obj.id + if obj.__class__.__name__ in ['SessionPersistence', 'HealthMonitor']: + return obj.__class__.__name__ + obj.pool_id + if obj.__class__.__name__ in ['ListenerStatistics']: + return obj.__class__.__name__ + obj.listener_id + obj.amphora_id + if obj.__class__.__name__ in ['ListenerCidr']: + return obj.__class__.__name__ + obj.listener_id + obj.cidr + if obj.__class__.__name__ in ['VRRPGroup', 'Vip']: + return obj.__class__.__name__ + obj.load_balancer_id + if obj.__class__.__name__ in ['AmphoraHealth']: + return obj.__class__.__name__ + obj.amphora_id + if obj.__class__.__name__ in ['SNI']: + return (obj.__class__.__name__ + + obj.listener_id + obj.tls_container_id) + if obj.__class__.__name__ in ['Quotas']: + return obj.__class__.__name__ + obj.project_id + if obj.__class__.__name__ in ['AvailabilityZone']: + return obj.__class__.__name__ + obj.name + if obj.__class__.__name__ in ['AdditionalVip']: + return (obj.__class__.__name__ + + obj.load_balancer_id + obj.subnet_id) + if obj.__class__.__name__ in ['VipSecurityGroup']: + return obj.__class__.__name__ + obj.load_balancer_id + obj.sg_id + if obj.__class__.__name__ in ['AmphoraMemberPort']: + return obj.__class__.__name__ + obj.port_id + raise NotImplementedError + + def to_data_model( + self, _graph_nodes=None, recursion_depth: Optional[int] = None): + """Converts to a data model graph. + + In order to make the resulting data model graph usable no matter how + many internal references are followed, we generate a complete graph of + OctaviaBase nodes connected to the object passed to this method. + + :param _graph_nodes: Used only for internal recursion of this + method. Should not be called from the outside. + Contains a dictionary of all OctaviaBase type + objects in the generated graph + :param recursion_depth: Used only for configuring recursion. + This option allows to limit recursion depth. + It could be used when we need only main node + and its first level relationships. + It allows to save time on recursion calls for + huge graphs, when only main object is + necessary. + """ + _graph_nodes = _graph_nodes or {} + if not self.__data_model__: + raise NotImplementedError + dm_kwargs = {} + for column in self.__table__.columns: + dm_kwargs[column.name] = getattr(self, column.name) + + attr_names = [attr_name for attr_name in dir(self) + if not attr_name.startswith('_')] + # Appending early, as any unique ID should be defined already and + # the rest of this object will get filled out more fully later on, + # and we need to add ourselves to the _graph_nodes before we + # attempt recursion. + dm_self = self.__data_model__(**dm_kwargs) + dm_key = self._get_unique_key(dm_self) + _graph_nodes.update({dm_key: dm_self}) + new_depth = recursion_depth + need_recursion = recursion_depth is None or recursion_depth > 0 + # decrease depth of recursion on new recursion call + if new_depth: + new_depth -= 1 + for attr_name in attr_names: + attr = getattr(self, attr_name) + if (need_recursion and + isinstance(attr, OctaviaBase) and + attr.__class__): + # If this attr is already in the graph node list, just + # reference it there and don't recurse. + ukey = self._get_unique_key(attr) + if ukey in _graph_nodes.keys(): + setattr(dm_self, attr_name, _graph_nodes[ukey]) + else: + setattr(dm_self, attr_name, attr.to_data_model( + _graph_nodes=_graph_nodes, + recursion_depth=new_depth)) + elif isinstance(attr, (collections.InstrumentedList, list)): + setattr(dm_self, attr_name, []) + listref = getattr(dm_self, attr_name) + for item in attr: + if (need_recursion and + isinstance(item, OctaviaBase) and + item.__class__): + ukey = self._get_unique_key(item) + if ukey in _graph_nodes.keys(): + listref.append(_graph_nodes[ukey]) + else: + listref.append( + item.to_data_model(_graph_nodes=_graph_nodes, + recursion_depth=new_depth)) + elif not isinstance(item, OctaviaBase): + listref.append(item) + return dm_self + + @staticmethod + def apply_filter(query, model, filters): + # Convert boolean filters to proper type + for key in filters: + attr = getattr(model.__v2_wsme__, key, None) + if isinstance(attr, wtypes.wsattr) and attr.datatype == bool: + filters[key] = strutils.bool_from_string(filters[key]) + # Special case for 'enabled', it's 'admin_state_up' in the WSME class + # definition and the attribute has already been renamed to 'enabled' by + # a previous pagination filter + if constants.ENABLED in filters: + filters[constants.ENABLED] = strutils.bool_from_string( + filters[constants.ENABLED]) + + translated_filters = {} + child_map = {} + for attr, name_map in model.__v2_wsme__._child_map.items(): + for k, v in name_map.items(): + if attr in filters and k in filters[attr]: + child_map.setdefault(attr, {}).update( + {k: filters[attr].pop(k)}) + filters.pop(attr, None) + + for k, v in model.__v2_wsme__._type_to_model_map.items(): + if k in filters: + translated_filters[v] = filters.pop(k) + translated_filters.update(filters) + if translated_filters: + query = query.filter_by(**translated_filters) + for k, v in child_map.items(): + query = query.join(getattr(model, k)).filter_by(**v) + return query + + def __repr__(self): + params = sorted( + (k, getattr(self, k)) for k in self.__mapper__.columns.keys() + ) + params = ", ".join(f"{k}={v!r}" for k, v in params) + return f"{self.__class__.__name__}({params})" + + +class LookupTableMixin: + """Mixin to add to classes that are lookup tables.""" + name = sa.Column(sa.String(255), primary_key=True, nullable=False) + description = sa.Column(sa.String(255), nullable=True) + + +class IdMixin: + """Id mixin, add to subclasses that have an id.""" + id = sa.Column(sa.String(36), primary_key=True, + default=uuidutils.generate_uuid) + + +class ProjectMixin: + """Tenant mixin, add to subclasses that have a project.""" + project_id = sa.Column(sa.String(36)) + + +class NameMixin: + """Name mixin to add to classes which need a name.""" + name = sa.Column(sa.String(255), nullable=True) + + +class TagMixin: + """Tags mixin to add to classes which need tags. + + The class must realize the specified db relationship as well. + """ + + @property + def tags(self): + if self._tags: + return [each_tag.tag for each_tag in self._tags] + return [] + + @tags.setter + def tags(self, values): + new_tags = [] + if values: + for tag in values: + tag_ref = Tags() + tag_ref.resource_id = self.id + tag_ref.tag = tag + new_tags.append(tag_ref) + self._tags = new_tags + + +BASE = declarative_base(cls=OctaviaBase) + + +class Tags(BASE): + __tablename__ = "tags" + + resource_id = sa.Column(sa.String(36), primary_key=True) + tag = sa.Column(sa.String(255), primary_key=True, index=True) diff --git a/octavia/db/healthcheck.py b/octavia/db/healthcheck.py new file mode 100644 index 0000000000..151dfd4c92 --- /dev/null +++ b/octavia/db/healthcheck.py @@ -0,0 +1,38 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from oslo_log import log as logging +from sqlalchemy import text + +from octavia.i18n import _ + +LOG = logging.getLogger(__name__) + + +def check_database_connection(session): + """This is a simple database connection check function. + + It will do a simple no-op query (low overhead) against the sqlalchemy + session passed in. + + :param session: A Sql Alchemy database session. + :returns: True if the connection check is successful, False if not. + """ + try: + session.execute(text('SELECT 1;')) + return True, None + except Exception as e: + message = _('Database health check failed due to: {err}.').format( + err=str(e)) + LOG.error(message) + return False, message diff --git a/octavia/db/migration/__init__.py b/octavia/db/migration/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/db/migration/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/db/migration/alembic.ini b/octavia/db/migration/alembic.ini new file mode 100644 index 0000000000..79daf221ff --- /dev/null +++ b/octavia/db/migration/alembic.ini @@ -0,0 +1,59 @@ +# A generic, single database configuration. + +[alembic] +# path to migration scripts +script_location = %(here)s/alembic_migrations + +# template used to generate migration files +# file_template = %%(rev)s_%%(slug)s + +# max length of characters to apply to the +# "slug" field +#truncate_slug_length = 40 + +# set to 'true' to run the environment during +# the 'revision' command, regardless of autogenerate +# revision_environment = false + +# set to 'true' to allow .pyc and .pyo files without +# a source .py file to be detected as revisions in the +# versions/ directory +# sourceless = false + +sqlalchemy.url = + + +# Logging configuration +[loggers] +keys = root,sqlalchemy,alembic + +[handlers] +keys = console + +[formatters] +keys = generic + +[logger_root] +level = WARN +handlers = console +qualname = + +[logger_sqlalchemy] +level = WARN +handlers = +qualname = sqlalchemy.engine + +[logger_alembic] +level = INFO +handlers = +qualname = alembic + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatter_generic] +format = %(levelname)-5.5s [%(name)s] %(message)s +datefmt = %H:%M:%S diff --git a/octavia/db/migration/alembic_migrations/README.rst b/octavia/db/migration/alembic_migrations/README.rst new file mode 100644 index 0000000000..6f0d42bb35 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/README.rst @@ -0,0 +1,8 @@ +The migrations in the alembic/versions contain the migrations. + +Before running this migration ensure that the database octavia exists. + +To run migrations you must first be in the octavia/db/migration directory. + +To migrate to the most current version run: +$ octavia-db-manage upgrade head diff --git a/octavia/db/migration/alembic_migrations/env.py b/octavia/db/migration/alembic_migrations/env.py new file mode 100644 index 0000000000..87a1c7fb08 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/env.py @@ -0,0 +1,87 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +from alembic import context +from sqlalchemy import create_engine +from sqlalchemy import pool +# this is the Alembic Config object, which provides +# access to the values within the .ini file in use. +config = context.config +try: + octavia_config = config.octavia_config +except AttributeError: + print("Error: Please use the octavia-db-manage command for octavia" + " alembic actions.") + sys.exit(1) + +# add your model's MetaData object here +# for 'autogenerate' support +# from myapp import mymodel +# target_metadata = mymodel.Base.metadata +target_metadata = None + +# other values from the config, defined by the needs of env.py, +# can be acquired: +# my_important_option = config.get_main_option("my_important_option") +# ... etc. + + +def run_migrations_offline(): + """Run migrations in 'offline' mode. + + This configures the context with just a URL + and not an Engine, though an Engine is acceptable + here as well. By skipping the Engine creation + we don't even need a DBAPI to be available. + + Calls to context.execute() here emit the given string to the + script output. + + """ + context.configure(url=octavia_config.database.connection, + target_metadata=target_metadata) + + with context.begin_transaction(): + context.run_migrations() + + +def run_migrations_online(): + """Run migrations in 'online' mode. + + In this scenario we need to create an Engine + and associate a connection with the context. + + """ + engine = create_engine( + octavia_config.database.connection, + poolclass=pool.NullPool) + + connection = engine.connect() + context.configure( + connection=connection, + target_metadata=target_metadata) + + try: + with context.begin_transaction(): + context.run_migrations() + finally: + connection.close() + + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/octavia/db/migration/alembic_migrations/script.py.mako b/octavia/db/migration/alembic_migrations/script.py.mako new file mode 100644 index 0000000000..70836e08f8 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/script.py.mako @@ -0,0 +1,30 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""${message} + +Revision ID: ${up_revision} +Revises: ${down_revision} +Create Date: ${create_date} + +""" + +from alembic import op +import sqlalchemy as sa +${imports if imports else ""} + +# revision identifiers, used by Alembic. +revision = ${repr(up_revision)} +down_revision = ${repr(down_revision)} + +def upgrade(): + ${upgrades if upgrades else "pass"} diff --git a/octavia/db/migration/alembic_migrations/versions/034756a182a2_amphora_add_image_id.py b/octavia/db/migration/alembic_migrations/versions/034756a182a2_amphora_add_image_id.py new file mode 100644 index 0000000000..e3a61cbd9b --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/034756a182a2_amphora_add_image_id.py @@ -0,0 +1,35 @@ +# Copyright 2017 GoDaddy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""amphora add image id + +Revision ID: 034756a182a2 +Revises: 10d38216ad34 +Create Date: 2018-02-26 17:38:37.971677 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '034756a182a2' +down_revision = '10d38216ad34' + + +def upgrade(): + op.add_column( + 'amphora', + sa.Column('image_id', sa.String(36), nullable=True) + ) diff --git a/octavia/db/migration/alembic_migrations/versions/034b2dc2f3e0_modernize_l7policy_fields.py b/octavia/db/migration/alembic_migrations/versions/034b2dc2f3e0_modernize_l7policy_fields.py new file mode 100644 index 0000000000..b00ad108ef --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/034b2dc2f3e0_modernize_l7policy_fields.py @@ -0,0 +1,141 @@ +# Copyright 2017 GoDaddy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""modernize_l7policy_fields + +Revision ID: 034b2dc2f3e0 +Revises: fac584114642 +Create Date: 2017-04-01 05:44:43.400535 + +""" + +from alembic import op +import sqlalchemy as sa + +from octavia.common import constants + +# revision identifiers, used by Alembic. +revision = '034b2dc2f3e0' +down_revision = 'fac584114642' + + +def upgrade(): + # Add timing data + op.add_column( + 'l7policy', + sa.Column('created_at', sa.DateTime(), nullable=True) + ) + op.add_column( + 'l7policy', + sa.Column('updated_at', sa.DateTime(), nullable=True) + ) + + # Add project_id + op.add_column( + 'l7policy', + sa.Column('project_id', sa.String(36), nullable=True) + ) + + # Add new operating_status column, setting existing rows to ONLINE + op.add_column( + 'l7policy', + sa.Column('operating_status', sa.String(16), + nullable=False, server_default=constants.ONLINE) + ) + # Remove the default, as we don't actually want one + op.alter_column('l7policy', 'operating_status', + existing_type=sa.String(16), server_default=None) + # Add the foreign key for operating_status_name + op.create_foreign_key( + 'fk_l7policy_operating_status_name', 'l7policy', + 'operating_status', ['operating_status'], ['name'] + ) + + op.drop_constraint('fk_health_monitor_provisioning_status_name', + 'health_monitor', + type_='foreignkey') + + op.drop_constraint('fk_l7policy_provisioning_status_name', + 'l7policy', + type_='foreignkey') + + op.drop_constraint('fk_l7rule_provisioning_status_name', + 'l7rule', + type_='foreignkey') + + op.drop_constraint('fk_member_provisioning_status_name', + 'member', + type_='foreignkey') + + op.drop_constraint('fk_pool_provisioning_status_name', + 'pool', + type_='foreignkey') + + # provisioning_status was mistakenly added as nullable, the fix is similar + op.alter_column('l7policy', 'provisioning_status', nullable=False, + existing_type=sa.String(16), + server_default=constants.ACTIVE) + op.alter_column('l7policy', 'provisioning_status', + existing_type=sa.String(16), server_default=None) + + # Fix the rest of these that were also mistakenly set as nullable in: + # 9b5473976d6d_add_provisioning_status_to_objects.py + op.alter_column('health_monitor', 'provisioning_status', nullable=False, + existing_type=sa.String(16), + server_default=constants.ACTIVE) + op.alter_column('health_monitor', 'provisioning_status', + existing_type=sa.String(16), server_default=None) + + op.alter_column('member', 'provisioning_status', nullable=False, + existing_type=sa.String(16), + server_default=constants.ACTIVE) + op.alter_column('member', 'provisioning_status', + existing_type=sa.String(16), server_default=None) + + op.alter_column('pool', 'provisioning_status', nullable=False, + existing_type=sa.String(16), + server_default=constants.ACTIVE) + op.alter_column('pool', 'provisioning_status', + existing_type=sa.String(16), server_default=None) + + op.alter_column('l7rule', 'provisioning_status', nullable=False, + existing_type=sa.String(16), + server_default=constants.ACTIVE) + op.alter_column('l7rule', 'provisioning_status', + existing_type=sa.String(16), server_default=None) + + op.create_foreign_key( + 'fk_health_monitor_provisioning_status_name', 'health_monitor', + 'provisioning_status', ['provisioning_status'], ['name'] + ) + + op.create_foreign_key( + 'fk_l7policy_provisioning_status_name', 'l7policy', + 'provisioning_status', ['provisioning_status'], ['name'] + ) + + op.create_foreign_key( + 'fk_l7rule_provisioning_status_name', 'l7rule', + 'provisioning_status', ['provisioning_status'], ['name'] + ) + + op.create_foreign_key( + 'fk_member_provisioning_status_name', 'member', + 'provisioning_status', ['provisioning_status'], ['name'] + ) + + op.create_foreign_key( + 'fk_pool_provisioning_status_name', 'pool', + 'provisioning_status', ['provisioning_status'], ['name'] + ) diff --git a/octavia/db/migration/alembic_migrations/versions/0995c26fc506_add_failover_stopped_to_provisioning_.py b/octavia/db/migration/alembic_migrations/versions/0995c26fc506_add_failover_stopped_to_provisioning_.py new file mode 100644 index 0000000000..396b346f43 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/0995c26fc506_add_failover_stopped_to_provisioning_.py @@ -0,0 +1,41 @@ +# Copyright Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Add FAILOVER_STOPPED to provisioning_status table + +Revision ID: 0995c26fc506 +Revises: 31f7653ded67 +Create Date: 2022-03-24 04:53:10.768658 + +""" +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '0995c26fc506' +down_revision = '31f7653ded67' + + +def upgrade(): + insert_table = sa.sql.table( + 'provisioning_status', + sa.sql.column('name', sa.String), + sa.sql.column('description', sa.String) + ) + + op.bulk_insert( + insert_table, + [ + {'name': 'FAILOVER_STOPPED'}, + ] + ) diff --git a/octavia/db/migration/alembic_migrations/versions/0aee2b450512_extend_api_to_accept_qos_policy_id.py b/octavia/db/migration/alembic_migrations/versions/0aee2b450512_extend_api_to_accept_qos_policy_id.py new file mode 100644 index 0000000000..52d54bdbe2 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/0aee2b450512_extend_api_to_accept_qos_policy_id.py @@ -0,0 +1,37 @@ +# Copyright 2017 Huawei +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Add QoS Policy ID column to VIP table + +Revision ID: 0aee2b450512 +Revises: bf171d0d91c3 +Create Date: 2017-02-07 20:47:52.405865 + +""" + + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '0aee2b450512' +down_revision = 'bf171d0d91c3' + + +def upgrade(): + op.add_column('vip', + sa.Column('qos_policy_id', + sa.String(length=36), + nullable=True, server_default=None)) diff --git a/octavia/db/migration/alembic_migrations/versions/0f242cf02c74_add_provider_column.py b/octavia/db/migration/alembic_migrations/versions/0f242cf02c74_add_provider_column.py new file mode 100644 index 0000000000..3af37c325d --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/0f242cf02c74_add_provider_column.py @@ -0,0 +1,37 @@ +# Copyright 2018 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Add provider column + +Revision ID: 0f242cf02c74 +Revises: 0fd2c131923f +Create Date: 2018-04-23 16:22:26.971048 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '0f242cf02c74' +down_revision = '0fd2c131923f' + + +def upgrade(): + op.add_column( + 'load_balancer', + sa.Column('provider', sa.String(64), nullable=True) + ) + op.execute("UPDATE load_balancer set provider='amphora' where provider " + "is null") diff --git a/octavia/db/migration/alembic_migrations/versions/0fd2c131923f_add_timeout_fields_to_listener.py b/octavia/db/migration/alembic_migrations/versions/0fd2c131923f_add_timeout_fields_to_listener.py new file mode 100644 index 0000000000..40ffcf615d --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/0fd2c131923f_add_timeout_fields_to_listener.py @@ -0,0 +1,50 @@ +# Copyright 2018 GoDaddy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""add timeout fields to listener + +Revision ID: 0fd2c131923f +Revises: ba35e0fb88e1 +Create Date: 2018-03-23 03:34:26.657254 + +""" + +from alembic import op +import sqlalchemy as sa + +from octavia.common import constants + +# revision identifiers, used by Alembic. +revision = '0fd2c131923f' +down_revision = 'ba35e0fb88e1' + + +def upgrade(): + op.add_column('listener', + sa.Column('timeout_client_data', + sa.Integer(), nullable=True, + default=constants.DEFAULT_TIMEOUT_CLIENT_DATA)) + op.add_column('listener', + sa.Column('timeout_member_connect', + sa.Integer(), nullable=True, + default=constants.DEFAULT_TIMEOUT_MEMBER_CONNECT)) + op.add_column('listener', + sa.Column('timeout_member_data', + sa.Integer(), nullable=True, + default=constants.DEFAULT_TIMEOUT_MEMBER_DATA)) + op.add_column('listener', + sa.Column('timeout_tcp_inspect', + sa.Integer(), nullable=True, + default=constants.DEFAULT_TIMEOUT_TCP_INSPECT)) diff --git a/octavia/db/migration/alembic_migrations/versions/10d38216ad34_add_timestamps_to_amphora.py b/octavia/db/migration/alembic_migrations/versions/10d38216ad34_add_timestamps_to_amphora.py new file mode 100644 index 0000000000..797529c6c5 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/10d38216ad34_add_timestamps_to_amphora.py @@ -0,0 +1,39 @@ +# Copyright 2018 GoDaddy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""add timestamps to amphora + +Revision ID: 10d38216ad34 +Revises: 0aee2b450512 +Create Date: 2018-02-26 10:04:59.133772 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '10d38216ad34' +down_revision = '0aee2b450512' + + +def upgrade(): + op.add_column( + 'amphora', + sa.Column('created_at', sa.DateTime(), nullable=True) + ) + op.add_column( + 'amphora', + sa.Column('updated_at', sa.DateTime(), nullable=True) + ) diff --git a/octavia/db/migration/alembic_migrations/versions/11e4bb2bb8ef_fix_ipv6_vip.py b/octavia/db/migration/alembic_migrations/versions/11e4bb2bb8ef_fix_ipv6_vip.py new file mode 100644 index 0000000000..63e09832ef --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/11e4bb2bb8ef_fix_ipv6_vip.py @@ -0,0 +1,30 @@ +# Copyright 2017 Rackspace, US Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Fix_IPv6_VIP + +Revision ID: 11e4bb2bb8ef +Revises: 211982b05afc +Create Date: 2019-01-28 08:35:35.333616 + +""" +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '11e4bb2bb8ef' +down_revision = '211982b05afc' + + +def upgrade(): + op.alter_column('vip', 'ip_address', type_=sa.String(64)) diff --git a/octavia/db/migration/alembic_migrations/versions/13500e2e978d_update_url_and_name_size.py b/octavia/db/migration/alembic_migrations/versions/13500e2e978d_update_url_and_name_size.py new file mode 100644 index 0000000000..a8f3c4ad00 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/13500e2e978d_update_url_and_name_size.py @@ -0,0 +1,43 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +'''update url and name size + +Revision ID: 13500e2e978d +Revises: 4c094013699a +Create Date: 2014-09-18 16:07:04.859812 + +''' + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '13500e2e978d' +down_revision = '4c094013699a' + + +def upgrade(): + op.alter_column('provisioning_status', 'name', + existing_type=sa.String(255)) + op.alter_column('operating_status', 'name', + existing_type=sa.String(255)) + op.alter_column('health_monitor_type', 'name', + existing_type=sa.String(255)) + op.alter_column('protocol', 'name', + existing_type=sa.String(255)) + op.alter_column('algorithm', 'name', + existing_type=sa.String(255)) + op.alter_column('session_persistence_type', 'name', + existing_type=sa.String(255)) diff --git a/octavia/db/migration/alembic_migrations/versions/14892634e228_update_vip.py b/octavia/db/migration/alembic_migrations/versions/14892634e228_update_vip.py new file mode 100644 index 0000000000..a8a5006cfa --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/14892634e228_update_vip.py @@ -0,0 +1,38 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""update vip + +Revision ID: 14892634e228 +Revises: 3a1e1cdb7b27 +Create Date: 2015-01-10 00:53:57.798213 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '14892634e228' +down_revision = '3a1e1cdb7b27' + + +def upgrade(): + with op.batch_alter_table('vip') as batch_op: + batch_op.alter_column('subnet_id', new_column_name='network_id', + existing_type=sa.String(36)) + batch_op.alter_column('net_port_id', new_column_name='port_id', + existing_type=sa.String(36)) + batch_op.drop_column('floating_ip_id') + batch_op.drop_column('floating_ip_network_id') diff --git a/octavia/db/migration/alembic_migrations/versions/186509101b9b_add_server_group_id_to_loadbalancer.py b/octavia/db/migration/alembic_migrations/versions/186509101b9b_add_server_group_id_to_loadbalancer.py new file mode 100644 index 0000000000..b393fa280c --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/186509101b9b_add_server_group_id_to_loadbalancer.py @@ -0,0 +1,34 @@ +# Copyright 2016 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""add_server_group_id_to_loadbalancer + +Revision ID: 186509101b9b +Revises: 29ff921a6eb +Create Date: 2016-01-25 15:12:52.489652 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '186509101b9b' +down_revision = '458c9ee2a011' + + +def upgrade(): + op.add_column('load_balancer', sa.Column('server_group_id', + sa.String(36), nullable=True)) diff --git a/octavia/db/migration/alembic_migrations/versions/1afc932f1ca2_l7rule_support_client_cert.py b/octavia/db/migration/alembic_migrations/versions/1afc932f1ca2_l7rule_support_client_cert.py new file mode 100644 index 0000000000..4d62e154d7 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/1afc932f1ca2_l7rule_support_client_cert.py @@ -0,0 +1,44 @@ +# Copyright 2018 Huawei +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Extend the l7rule type for support client certificate cases + +Revision ID: 1afc932f1ca2 +Revises: ffad172e98c1 +Create Date: 2018-10-03 20:47:52.405865 + +""" + + +from alembic import op +import sqlalchemy as sa +from sqlalchemy import sql + +# revision identifiers, used by Alembic. +revision = '1afc932f1ca2' +down_revision = 'ffad172e98c1' + +new_fields = ['SSL_CONN_HAS_CERT', 'SSL_VERIFY_RESULT', 'SSL_DN_FIELD'] + + +def upgrade(): + + insert_table = sql.table( + 'l7rule_type', + sql.column('name', sa.String), + sql.column('description', sa.String) + ) + cows = [{'name': field} for field in new_fields] + op.bulk_insert(insert_table, cows) diff --git a/octavia/db/migration/alembic_migrations/versions/1e4c1d83044c_keepalived_configuration_datamodel.py b/octavia/db/migration/alembic_migrations/versions/1e4c1d83044c_keepalived_configuration_datamodel.py new file mode 100644 index 0000000000..67c67f2daf --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/1e4c1d83044c_keepalived_configuration_datamodel.py @@ -0,0 +1,85 @@ +# Copyright 2015 Hewlett Packard Enterprise Development Company LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Keepalived configuration datamodel + +Revision ID: 1e4c1d83044c +Revises: 5a3ee5472c31 +Create Date: 2015-08-06 10:39:54.998797 + +""" + +from alembic import op +import sqlalchemy as sa +from sqlalchemy import sql + +# revision identifiers, used by Alembic. +revision = '1e4c1d83044c' +down_revision = '5a3ee5472c31' + + +def upgrade(): + op.create_table( + 'vrrp_auth_method', + sa.Column('name', sa.String(36), primary_key=True), + sa.Column('description', sa.String(255), nullable=True) + ) + + insert_table = sql.table( + 'vrrp_auth_method', + sql.column('name', sa.String), + sql.column('description', sa.String) + ) + + op.bulk_insert( + insert_table, + [ + {'name': 'PASS'}, + {'name': 'AH'} + ] + ) + + op.create_table( + 'vrrp_group', + sa.Column('load_balancer_id', sa.String(36), nullable=False), + sa.Column('vrrp_group_name', sa.String(36), nullable=True), + sa.Column('vrrp_auth_type', sa.String(16), nullable=True), + sa.Column('vrrp_auth_pass', sa.String(36), nullable=True), + sa.Column('advert_int', sa.Integer(), nullable=True), + sa.PrimaryKeyConstraint('load_balancer_id'), + sa.ForeignKeyConstraint(['load_balancer_id'], ['load_balancer.id'], + name='fk_vrrp_group_load_balancer_id'), + sa.ForeignKeyConstraint(['vrrp_auth_type'], + ['vrrp_auth_method.name'], + name='fk_load_balancer_vrrp_auth_method_name') + ) + + op.add_column( + 'listener', + sa.Column('peer_port', sa.Integer(), nullable=True) + ) + + op.add_column( + 'amphora', + sa.Column('vrrp_interface', sa.String(16), nullable=True) + ) + + op.add_column( + 'amphora', + sa.Column('vrrp_id', sa.Integer(), nullable=True) + ) + + op.add_column( + 'amphora', + sa.Column('vrrp_priority', sa.Integer(), nullable=True) + ) diff --git a/octavia/db/migration/alembic_migrations/versions/211982b05afc_add_flavor_id_to_lb.py b/octavia/db/migration/alembic_migrations/versions/211982b05afc_add_flavor_id_to_lb.py new file mode 100644 index 0000000000..7d4f03de47 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/211982b05afc_add_flavor_id_to_lb.py @@ -0,0 +1,32 @@ +# Copyright 2018 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""add_flavor_id_to_lb + +Revision ID: 211982b05afc +Revises: b9c703669314 +Create Date: 2018-11-30 14:57:28.559884 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '211982b05afc' +down_revision = 'b9c703669314' + + +def upgrade(): + op.add_column('load_balancer', + sa.Column('flavor_id', sa.String(36), nullable=True)) diff --git a/octavia/db/migration/alembic_migrations/versions/2351ea316465_adding_terminate_https_tls_ref_support.py b/octavia/db/migration/alembic_migrations/versions/2351ea316465_adding_terminate_https_tls_ref_support.py new file mode 100644 index 0000000000..fae31486ed --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/2351ea316465_adding_terminate_https_tls_ref_support.py @@ -0,0 +1,48 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Adding TERMINATED_HTTPS support and TLS ref ID char length increase + +Revision ID: 2351ea316465 +Revises: 48660b6643f0 +Create Date: 2015-05-22 11:57:04.703910 + +""" + +from alembic import op +import sqlalchemy as sa +from sqlalchemy import sql + +# revision identifiers, used by Alembic. +revision = '2351ea316465' +down_revision = '357d17a6d5ac' + + +new_protocol = 'TERMINATED_HTTPS' + + +def upgrade(): + insert_table = sql.table( + 'protocol', + sql.column('name', sa.String), + sql.column('description', sa.String) + ) + + op.bulk_insert( + insert_table, + [ + {'name': new_protocol} + ] + ) + op.alter_column('listener', 'tls_certificate_id', + existing_type=sa.String(255), nullable=True) diff --git a/octavia/db/migration/alembic_migrations/versions/256852d5ff7c_add_lb_network_ip_to_amphora.py b/octavia/db/migration/alembic_migrations/versions/256852d5ff7c_add_lb_network_ip_to_amphora.py new file mode 100644 index 0000000000..5b0764228d --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/256852d5ff7c_add_lb_network_ip_to_amphora.py @@ -0,0 +1,33 @@ +# Copyright 2015 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""add lb_network_ip to amphora + +Revision ID: 256852d5ff7c +Revises: 14892634e228 +Create Date: 2015-01-13 16:18:57.359290 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '256852d5ff7c' +down_revision = '14892634e228' + + +def upgrade(): + op.add_column('amphora', sa.Column('lb_network_ip', sa.String(64), + nullable=True)) diff --git a/octavia/db/migration/alembic_migrations/versions/27e54d00c3cd_add_monitor_address_and_port_to_member.py b/octavia/db/migration/alembic_migrations/versions/27e54d00c3cd_add_monitor_address_and_port_to_member.py new file mode 100644 index 0000000000..7ae6cd6ca7 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/27e54d00c3cd_add_monitor_address_and_port_to_member.py @@ -0,0 +1,42 @@ +# Copyright 2017 EayunStack, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""add monitor address and port to member + +Revision ID: 27e54d00c3cd +Revises: 5309960964f8 +Create Date: 2017-05-01 23:12:16.695581 + +""" + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '27e54d00c3cd' +down_revision = '5309960964f8' + + +def upgrade(): + op.add_column('member', + sa.Column('monitor_address', + sa.String(64), + nullable=True) + ) + op.add_column('member', + sa.Column('monitor_port', + sa.Integer(), + nullable=True) + ) diff --git a/octavia/db/migration/alembic_migrations/versions/298eac0640a7_add_amphora_vrrp_port_id_and_ha_port_id.py b/octavia/db/migration/alembic_migrations/versions/298eac0640a7_add_amphora_vrrp_port_id_and_ha_port_id.py new file mode 100644 index 0000000000..8cc563fad6 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/298eac0640a7_add_amphora_vrrp_port_id_and_ha_port_id.py @@ -0,0 +1,35 @@ +# Copyright 2015 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Add vrrp_port_id and ha_port_id to amphora + +Revision ID: 298eac0640a7 +Revises: 4fe8240425b4 +Create Date: 2015-07-20 15:25:37.044098 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '298eac0640a7' +down_revision = '4fe8240425b4' + + +def upgrade(): + op.add_column('amphora', + sa.Column('vrrp_port_id', sa.String(36), nullable=True)) + op.add_column('amphora', + sa.Column('ha_port_id', sa.String(36), nullable=True)) diff --git a/octavia/db/migration/alembic_migrations/versions/29ff921a6eb_shared_pools.py b/octavia/db/migration/alembic_migrations/versions/29ff921a6eb_shared_pools.py new file mode 100644 index 0000000000..93c8b1ead3 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/29ff921a6eb_shared_pools.py @@ -0,0 +1,76 @@ +# Copyright 2016 Blue Box, an IBM Company +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Shared pools + +Revision ID: 29ff921a6eb +Revises: 43287cd10fef +Create Date: 2015-12-09 10:32:12.712932 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '29ff921a6eb' +down_revision = '43287cd10fef' + + +def upgrade(): + conn = op.get_bind() + # Minimal examples of the tables we need to manipulate + listener = sa.sql.table( + 'listener', + sa.sql.column('load_balancer_id', sa.String), + sa.sql.column('default_pool_id', sa.String)) + pool = sa.sql.table( + 'pool', + sa.sql.column('load_balancer_id', sa.String), + sa.sql.column('id', sa.String)) + + # This foreign key does not need to be unique anymore. To remove the + # uniqueness but keep the foreign key we have to do some juggling. + op.drop_constraint('fk_listener_pool_id', 'listener', + type_='foreignkey') + op.drop_constraint('uq_listener_default_pool_id', 'listener', + type_='unique') + op.create_foreign_key('fk_listener_pool_id', 'listener', + 'pool', ['default_pool_id'], ['id']) + + op.add_column('pool', + sa.Column('load_balancer_id', sa.String(36), + sa.ForeignKey('load_balancer.id'), nullable=True)) + + # Populate this new column appropriately + select_obj = sa.select(listener.c.load_balancer_id, + listener.c.default_pool_id).where( + listener.c.default_pool_id is not None) + result = conn.execute(select_obj) + for row in result: + stmt = pool.update().values(load_balancer_id=row[0]).where( + pool.c.id == row[1]) + op.execute(stmt) + +# For existing installations, the above ETL should populate the above column +# using the following procedure: +# +# Get the output from this: +# +# SELECT default_pool_id, load_balancer_id l_id FROM listener WHERE +# default_pool_id IS NOT NULL; +# +# Then for every row returned run: +# +# UPDATE pool SET load_balancer_id = l_id WHERE id = default_pool_id; diff --git a/octavia/db/migration/alembic_migrations/versions/2ab994dd3ec2_add_listener_alpn_protocols_column.py b/octavia/db/migration/alembic_migrations/versions/2ab994dd3ec2_add_listener_alpn_protocols_column.py new file mode 100644 index 0000000000..1bd6e43fce --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/2ab994dd3ec2_add_listener_alpn_protocols_column.py @@ -0,0 +1,35 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""add listener alpn protocols column + +Revision ID: 2ab994dd3ec2 +Revises: 32e5c35b26a8 +Create Date: 2020-08-02 21:51:21.261087 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '2ab994dd3ec2' +down_revision = '32e5c35b26a8' + + +def upgrade(): + op.add_column( + 'listener', + sa.Column('alpn_protocols', sa.String(512), nullable=True) + ) diff --git a/octavia/db/migration/alembic_migrations/versions/2ad093f6353f_add_listener_client_ca_tls_certificate_.py b/octavia/db/migration/alembic_migrations/versions/2ad093f6353f_add_listener_client_ca_tls_certificate_.py new file mode 100644 index 0000000000..a5db4c8764 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/2ad093f6353f_add_listener_client_ca_tls_certificate_.py @@ -0,0 +1,37 @@ +# Copyright 2018 Huawei +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Add listener client_ca_tls_certificate_id column + +Revision ID: 2ad093f6353f +Revises: 11e4bb2bb8ef +Create Date: 2019-02-13 08:32:43.009997 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '2ad093f6353f' +down_revision = '11e4bb2bb8ef' + + +def upgrade(): + op.add_column( + 'listener', + sa.Column('client_ca_tls_certificate_id', sa.String(255), + nullable=True) + ) diff --git a/octavia/db/migration/alembic_migrations/versions/3097e55493ae_add_sg_id_to_vip_table.py b/octavia/db/migration/alembic_migrations/versions/3097e55493ae_add_sg_id_to_vip_table.py new file mode 100644 index 0000000000..332fee308e --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/3097e55493ae_add_sg_id_to_vip_table.py @@ -0,0 +1,39 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""add sg_id to vip table + +Revision ID: 3097e55493ae +Revises: db2a73e82626 +Create Date: 2024-04-05 10:04:32.015445 + +""" + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '3097e55493ae' +down_revision = 'db2a73e82626' + + +def upgrade(): + op.create_table( + "vip_security_group", + sa.Column("load_balancer_id", sa.String(36), nullable=False), + sa.Column("sg_id", sa.String(36), nullable=False), + sa.ForeignKeyConstraint(["load_balancer_id"], + ["vip.load_balancer_id"], + name="fk_vip_sg_vip_lb_id"), + sa.PrimaryKeyConstraint("load_balancer_id", "sg_id") + ) diff --git a/octavia/db/migration/alembic_migrations/versions/31f7653ded67_allow_multiple_vips_per_loadbalancer.py b/octavia/db/migration/alembic_migrations/versions/31f7653ded67_allow_multiple_vips_per_loadbalancer.py new file mode 100644 index 0000000000..1ab3ca9325 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/31f7653ded67_allow_multiple_vips_per_loadbalancer.py @@ -0,0 +1,44 @@ +# Copyright 2019 Verizon Media +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""allow multiple vips per loadbalancer + +Revision ID: 31f7653ded67 +Revises: 6ac558d7fc21 +Create Date: 2019-05-04 19:44:22.825499 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '31f7653ded67' +down_revision = '6ac558d7fc21' + + +def upgrade(): + op.create_table( + 'additional_vip', + sa.Column('load_balancer_id', sa.String(36), nullable=False, + index=True), + sa.Column('ip_address', sa.String(64), nullable=True), + sa.Column('port_id', sa.String(36), nullable=True), + sa.Column('subnet_id', sa.String(36), nullable=True), + sa.Column('network_id', sa.String(36), nullable=True), + sa.ForeignKeyConstraint(['load_balancer_id'], ['load_balancer.id'], + name='fk_add_vip_load_balancer_id'), + sa.PrimaryKeyConstraint('load_balancer_id', 'subnet_id', + name='pk_add_vip_load_balancer_subnet'), + ) diff --git a/octavia/db/migration/alembic_migrations/versions/32e5c35b26a8_add_l7policy_and_l7rule_quota.py b/octavia/db/migration/alembic_migrations/versions/32e5c35b26a8_add_l7policy_and_l7rule_quota.py new file mode 100644 index 0000000000..858ec6bb18 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/32e5c35b26a8_add_l7policy_and_l7rule_quota.py @@ -0,0 +1,40 @@ +# Copyright (c) 2018 China Telecom Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""add l7policy and l7rule quota + +Revision ID: 32e5c35b26a8 +Revises: d3c8a090f3de +Create Date: 2018-08-10 09:13:59.383272 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '32e5c35b26a8' +down_revision = 'd3c8a090f3de' + + +def upgrade(): + op.add_column('quotas', + sa.Column('l7policy', sa.Integer(), nullable=True)) + op.add_column('quotas', + sa.Column('l7rule', sa.Integer(), nullable=True)) + op.add_column('quotas', + sa.Column('in_use_l7policy', sa.Integer(), nullable=True)) + op.add_column('quotas', + sa.Column('in_use_l7rule', sa.Integer(), nullable=True)) diff --git a/octavia/db/migration/alembic_migrations/versions/357d17a6d5ac_update_lb_and_amphora_data_model_for_.py b/octavia/db/migration/alembic_migrations/versions/357d17a6d5ac_update_lb_and_amphora_data_model_for_.py new file mode 100644 index 0000000000..8c1776c7bf --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/357d17a6d5ac_update_lb_and_amphora_data_model_for_.py @@ -0,0 +1,89 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""update lb and amphora data model for active passive + +Revision ID: 357d17a6d5ac +Revises: 298eac0640a7 +Create Date: 2015-07-16 17:41:49.029145 + +""" + +from alembic import op +import sqlalchemy as sa +from sqlalchemy import sql + +# revision identifiers, used by Alembic. +revision = '357d17a6d5ac' +down_revision = '298eac0640a7' + + +def upgrade(): + op.create_table( + 'lb_topology', + sa.Column('name', sa.String(36), primary_key=True), + sa.Column('description', sa.String(255), nullable=True) + ) + + insert_table = sql.table( + 'lb_topology', + sql.column('name', sa.String), + sql.column('description', sa.String) + ) + + op.bulk_insert( + insert_table, + [ + {'name': 'SINGLE'}, + {'name': 'ACTIVE_STANDBY'} + ] + ) + + op.create_table( + 'amphora_roles', + sa.Column('name', sa.String(36), primary_key=True), + sa.Column('description', sa.String(255), nullable=True) + ) + + insert_table = sql.table( + 'amphora_roles', + sql.column('name', sa.String), + sql.column('description', sa.String) + ) + + op.bulk_insert( + insert_table, + [ + {'name': 'MASTER'}, + {'name': 'BACKUP'}, + {'name': 'STANDALONE'} + ] + ) + + op.add_column( + 'load_balancer', + sa.Column('topology', sa.String(36), + sa.ForeignKey('lb_topology.name', + name='fk_lb_topology_name'), + nullable=True) + ) + + op.add_column( + 'amphora', + sa.Column('role', sa.String(36), + sa.ForeignKey('amphora_roles.name', + name='fk_amphora_roles_name'), + nullable=True) + ) diff --git a/octavia/db/migration/alembic_migrations/versions/35dee79d5865_initial_create.py b/octavia/db/migration/alembic_migrations/versions/35dee79d5865_initial_create.py new file mode 100644 index 0000000000..bff883d329 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/35dee79d5865_initial_create.py @@ -0,0 +1,352 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +'''initial_create + +Revision ID: 35dee79d5865 +Revises: None +Create Date: 2014-08-15 11:01:14.897223 + +''' + +from alembic import op +import sqlalchemy as sa +from sqlalchemy import sql + +# revision identifiers, used by Alembic. +revision = '35dee79d5865' +down_revision = None + + +def upgrade(): + # Create lookup tables + op.create_table( + 'health_monitor_type', + sa.Column('name', sa.String(30), primary_key=True), + sa.Column('description', sa.String(255), nullable=True) + ) + + # Create temporary table for table data seeding + insert_table = sql.table( + 'health_monitor_type', + sql.column('name', sa.String), + sql.column('description', sa.String) + ) + + op.bulk_insert( + insert_table, + [ + {'name': 'HTTP'}, + {'name': 'HTTPS'}, + {'name': 'TCP'} + ] + ) + + op.create_table( + 'protocol', + sa.Column('name', sa.String(30), primary_key=True), + sa.Column('description', sa.String(255), nullable=True) + ) + + insert_table = sql.table( + 'protocol', + sql.column('name', sa.String), + sql.column('description', sa.String) + ) + + op.bulk_insert( + insert_table, + [ + {'name': 'HTTP'}, + {'name': 'HTTPS'}, + {'name': 'TCP'} + ] + ) + + op.create_table( + 'algorithm', + sa.Column('name', sa.String(30), primary_key=True), + sa.Column('description', sa.String(255), nullable=True) + ) + + insert_table = sql.table( + 'algorithm', + sql.column('name', sa.String), + sql.column('description', sa.String) + ) + + op.bulk_insert( + insert_table, + [ + {'name': 'ROUND_ROBIN'}, + {'name': 'LEAST_CONNECTIONS'}, + {'name': 'SOURCE_IP'} + ] + ) + + op.create_table( + 'session_persistence_type', + sa.Column('name', sa.String(30), primary_key=True), + sa.Column('description', sa.String(255), nullable=True) + ) + + insert_table = sql.table( + 'session_persistence_type', + sql.column('name', sa.String), + sql.column('description', sa.String) + ) + + op.bulk_insert( + insert_table, + [ + {'name': 'SOURCE_IP'}, + {'name': 'HTTP_COOKIE'}, + {'name': 'APP_COOKIE'} + ] + ) + + op.create_table( + 'provisioning_status', + sa.Column('name', sa.String(30), primary_key=True), + sa.Column('description', sa.String(255), nullable=True) + ) + + insert_table = sql.table( + 'provisioning_status', + sql.column('name', sa.String), + sql.column('description', sa.String) + ) + + op.bulk_insert( + insert_table, + [ + {'name': 'ACTIVE'}, + {'name': 'PENDING_CREATE'}, + {'name': 'PENDING_UPDATE'}, + {'name': 'PENDING_DELETE'}, + {'name': 'DELETED'}, + {'name': 'ERROR'} + ] + ) + + op.create_table( + 'operating_status', + sa.Column('name', sa.String(30), primary_key=True), + sa.Column('description', sa.String(255), nullable=True) + ) + + insert_table = sql.table( + 'operating_status', + sql.column('name', sa.String), + sql.column('description', sa.String) + ) + + op.bulk_insert( + insert_table, + [ + {'name': 'ONLINE'}, + {'name': 'OFFLINE'}, + {'name': 'DEGRADED'}, + {'name': 'ERROR'} + ] + ) + + op.create_table( + 'pool', + sa.Column('tenant_id', sa.String(255), nullable=True), + sa.Column('id', sa.String(36), nullable=False), + sa.Column('name', sa.String(255), nullable=True), + sa.Column('description', sa.String(255), nullable=True), + sa.Column('protocol', sa.String(16), nullable=False), + sa.Column('lb_algorithm', sa.String(16), nullable=False), + sa.Column('operating_status', sa.String(16), nullable=False), + sa.Column('enabled', sa.Boolean(), nullable=False), + sa.PrimaryKeyConstraint('id'), + sa.ForeignKeyConstraint(['protocol'], + ['protocol.name'], + name='fk_pool_protocol_name'), + sa.ForeignKeyConstraint(['lb_algorithm'], + ['algorithm.name'], + name='fk_pool_algorithm_name'), + sa.ForeignKeyConstraint(['operating_status'], + ['operating_status.name'], + name='fk_pool_operating_status_name') + ) + + op.create_table( + 'health_monitor', + sa.Column('pool_id', sa.String(36), nullable=False), + sa.Column('type', sa.String(36), nullable=False), + sa.Column('delay', sa.Integer(), nullable=False), + sa.Column('timeout', sa.Integer(), nullable=False), + sa.Column('fall_threshold', sa.Integer(), nullable=False), + sa.Column('rise_threshold', sa.Integer(), nullable=False), + sa.Column('http_method', sa.String(16), nullable=True), + sa.Column('url_path', sa.String(255), nullable=True), + sa.Column('expected_codes', sa.String(64), nullable=True), + sa.Column('enabled', sa.Boolean(), nullable=False), + sa.PrimaryKeyConstraint('pool_id'), + sa.ForeignKeyConstraint(['pool_id'], ['pool.id'], + name='fk_health_monitor_pool_id'), + sa.ForeignKeyConstraint( + ['type'], ['health_monitor_type.name'], + name='fk_health_monitor_health_monitor_type_name') + ) + + op.create_table( + 'session_persistence', + sa.Column('pool_id', sa.String(36), nullable=False), + sa.Column('type', sa.String(16), nullable=False), + sa.Column('cookie_name', sa.String(255), nullable=True), + sa.ForeignKeyConstraint( + ['type'], ['session_persistence_type.name'], + name='fk_session_persistence_session_persistence_type_name'), + sa.ForeignKeyConstraint(['pool_id'], ['pool.id'], + name='fk_session_persistence_pool_id'), + sa.PrimaryKeyConstraint('pool_id') + ) + + op.create_table( + 'member', + sa.Column('tenant_id', sa.String(255), nullable=True), + sa.Column('id', sa.String(36), nullable=False), + sa.Column('pool_id', sa.String(36), nullable=False), + sa.Column('subnet_id', sa.String(36), nullable=True), + sa.Column('address', sa.String(64), nullable=False), + sa.Column('protocol_port', sa.Integer(), nullable=False), + sa.Column('weight', sa.Integer(), nullable=True), + sa.Column('operating_status', sa.String(16), nullable=False), + sa.Column('enabled', sa.Boolean(), nullable=False), + sa.PrimaryKeyConstraint('id'), + sa.ForeignKeyConstraint(['pool_id'], ['pool.id'], + name='fk_member_pool_id'), + sa.ForeignKeyConstraint(['operating_status'], + ['operating_status.name'], + name='fk_member_operating_status_name'), + sa.UniqueConstraint('pool_id', 'address', 'protocol_port', + name='uq_member_pool_id_address_protocol_port') + ) + + op.create_table( + 'load_balancer', + sa.Column('tenant_id', sa.String(255), nullable=True), + sa.Column('id', sa.String(36), nullable=False), + sa.Column('name', sa.String(255), nullable=True), + sa.Column('description', sa.String(255), nullable=True), + sa.Column('provisioning_status', sa.String(16), nullable=False), + sa.Column('operating_status', sa.String(16), nullable=False), + sa.Column('enabled', sa.Boolean(), nullable=False), + sa.PrimaryKeyConstraint('id'), + sa.ForeignKeyConstraint( + ['provisioning_status'], ['provisioning_status.name'], + name='fk_load_balancer_provisioning_status_name'), + sa.ForeignKeyConstraint(['operating_status'], + ['operating_status.name'], + name='fk_load_balancer_operating_status_name') + ) + + op.create_table( + 'vip', + sa.Column('load_balancer_id', sa.String(36), nullable=False), + sa.Column('ip_address', sa.String(36), nullable=True), + sa.Column('net_port_id', sa.String(36), nullable=True), + sa.Column('subnet_id', sa.String(36), nullable=True), + sa.Column('floating_ip_id', sa.String(36), nullable=True), + sa.Column('floating_ip_network_id', sa.String(36), nullable=True), + sa.PrimaryKeyConstraint('load_balancer_id'), + sa.ForeignKeyConstraint(['load_balancer_id'], ['load_balancer.id'], + name='fk_vip_load_balancer_id') + ) + + op.create_table( + 'listener', + sa.Column('tenant_id', sa.String(255), nullable=True), + sa.Column('id', sa.String(36), nullable=False), + sa.Column('name', sa.String(255), nullable=True), + sa.Column('description', sa.String(255), nullable=True), + sa.Column('protocol', sa.String(16), nullable=False), + sa.Column('protocol_port', sa.Integer(), nullable=False), + sa.Column('connection_limit', sa.Integer(), nullable=True), + sa.Column('load_balancer_id', sa.String(36), nullable=True), + sa.Column('tls_certificate_id', sa.String(36), nullable=True), + sa.Column('default_pool_id', sa.String(36), nullable=True), + sa.Column('provisioning_status', sa.String(16), nullable=False), + sa.Column('operating_status', sa.String(16), nullable=False), + sa.Column('enabled', sa.Boolean(), nullable=False), + sa.ForeignKeyConstraint(['load_balancer_id'], ['load_balancer.id'], + name='fk_listener_load_balancer_id'), + sa.ForeignKeyConstraint(['default_pool_id'], ['pool.id'], + name='fk_listener_pool_id'), + sa.ForeignKeyConstraint(['protocol'], ['protocol.name'], + name='fk_listener_protocol_name'), + sa.ForeignKeyConstraint(['provisioning_status'], + ['provisioning_status.name'], + name='fk_listener_provisioning_status_name'), + sa.ForeignKeyConstraint(['operating_status'], + ['operating_status.name'], + name='fk_listener_operating_status_name'), + sa.UniqueConstraint('default_pool_id', + name='uq_listener_default_pool_id'), + sa.UniqueConstraint( + 'load_balancer_id', 'protocol_port', + name='uq_listener_load_balancer_id_protocol_port'), + sa.PrimaryKeyConstraint('id') + ) + + op.create_table( + 'sni', + sa.Column('listener_id', sa.String(36), nullable=False), + sa.Column('tls_container_id', sa.String(36), nullable=False), + sa.Column('position', sa.Integer, nullable=True), + sa.ForeignKeyConstraint(['listener_id'], ['listener.id'], + name='fk_sni_listener_id'), + sa.PrimaryKeyConstraint('listener_id', 'tls_container_id') + ) + + op.create_table( + 'listener_statistics', + sa.Column('listener_id', sa.String(36), nullable=False), + sa.Column('bytes_in', sa.BigInteger(), nullable=False), + sa.Column('bytes_out', sa.BigInteger(), nullable=False), + sa.Column('active_connections', sa.Integer(), nullable=False), + sa.Column('total_connections', sa.BigInteger(), nullable=False), + sa.PrimaryKeyConstraint('listener_id'), + sa.ForeignKeyConstraint(['listener_id'], ['listener.id'], + name='fk_listener_statistics_listener_id') + ) + + op.create_table( + 'amphora', + # id should come from the service providing the amphora (i.e. nova) + sa.Column('id', sa.String(36), nullable=False, autoincrement=False), + sa.Column('host_id', sa.String(36), nullable=False), + sa.Column('status', sa.String(36), nullable=False), + sa.PrimaryKeyConstraint('id'), + sa.ForeignKeyConstraint( + ['status'], ['provisioning_status.name'], + name='fk_container_provisioning_status_name') + ) + + op.create_table( + 'load_balancer_amphora', + sa.Column('amphora_id', sa.String(36), nullable=False), + sa.Column('load_balancer_id', sa.String(36), nullable=False), + sa.ForeignKeyConstraint( + ['load_balancer_id'], ['load_balancer.id'], + name='fk_load_balancer_amphora_load_balancer_id'), + sa.ForeignKeyConstraint(['amphora_id'], + ['amphora.id'], + name='fk_load_balancer_amphora_id'), + sa.PrimaryKeyConstraint('amphora_id', 'load_balancer_id') + ) diff --git a/octavia/db/migration/alembic_migrations/versions/36b94648fef8_add_timestamp.py b/octavia/db/migration/alembic_migrations/versions/36b94648fef8_add_timestamp.py new file mode 100644 index 0000000000..c242e05ac0 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/36b94648fef8_add_timestamp.py @@ -0,0 +1,42 @@ +# Copyright 2016 Catalyst IT +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""add timestamp + +Revision ID: 36b94648fef8 +Revises: 4d9cf7d32f2 +Create Date: 2016-04-21 10:45:32.278433 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '36b94648fef8' +down_revision = '4d9cf7d32f2' + +tables = ['member', 'pool', 'load_balancer', 'listener'] + + +def upgrade(): + for table in tables: + op.add_column( + table, + sa.Column('created_at', sa.DateTime(), nullable=True) + ) + op.add_column( + table, + sa.Column('updated_at', sa.DateTime(), nullable=True) + ) diff --git a/octavia/db/migration/alembic_migrations/versions/392fb85b4419_add_primary_key_to_spares_pool.py b/octavia/db/migration/alembic_migrations/versions/392fb85b4419_add_primary_key_to_spares_pool.py new file mode 100644 index 0000000000..09e723d3db --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/392fb85b4419_add_primary_key_to_spares_pool.py @@ -0,0 +1,47 @@ +# Copyright 2016 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""add primary key to spares_pool + +Revision ID: 392fb85b4419 +Revises: 46d914b2a5e5 +Create Date: 2019-04-30 09:58:54.159823 + +""" + +from alembic import op +from sqlalchemy.engine import reflection + +from oslo_log import log as logging + + +# revision identifiers, used by Alembic. +revision = '392fb85b4419' +down_revision = '46d914b2a5e5' + +LOG = logging.getLogger(__name__) + + +def upgrade(): + bind = op.get_bind() + inspector = reflection.Inspector.from_engine(bind.engine) + pk = inspector.get_pk_constraint('spares_pool') + if not pk['constrained_columns']: + op.create_primary_key( + 'pk_spares_pool', 'spares_pool', ['updated_at']) + else: + # Revision '46d914b2a5e5' has been updated to create the + # missing PK. Depending whether the env is already deployed or + # not we may or not have to add the primary key. + LOG.info("The primary key in spares_pool already exists, continuing.") diff --git a/octavia/db/migration/alembic_migrations/versions/3a1e1cdb7b27_rename_amphora_host_id.py b/octavia/db/migration/alembic_migrations/versions/3a1e1cdb7b27_rename_amphora_host_id.py new file mode 100644 index 0000000000..622f8116e9 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/3a1e1cdb7b27_rename_amphora_host_id.py @@ -0,0 +1,33 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""rename amphora host id + +Revision ID: 3a1e1cdb7b27 +Revises: 4faaa983e7a9 +Create Date: 2015-01-10 02:01:04.997336 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '3a1e1cdb7b27' +down_revision = '4faaa983e7a9' + + +def upgrade(): + op.alter_column('amphora', 'host_id', new_column_name='compute_id', + existing_type=sa.String(36), nullable=True) diff --git a/octavia/db/migration/alembic_migrations/versions/3b199c848b96_create_no_monitor_operational_status.py b/octavia/db/migration/alembic_migrations/versions/3b199c848b96_create_no_monitor_operational_status.py new file mode 100644 index 0000000000..f6175b974a --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/3b199c848b96_create_no_monitor_operational_status.py @@ -0,0 +1,34 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Create NO_MONITOR operational_status + +Revision ID: 3b199c848b96 +Revises: 543f5d8e4e56 +Create Date: 2015-09-03 17:11:03.724070 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '3b199c848b96' +down_revision = '543f5d8e4e56' + + +def upgrade(): + bind = op.get_bind() + md = sa.MetaData() + sa.Table('operating_status', md, autoload_with=bind) + op.bulk_insert(md.tables['operating_status'], [{'name': 'NO_MONITOR'}]) diff --git a/octavia/db/migration/alembic_migrations/versions/3e5b37a0bdb9_add_vrrp_ip_and_ha_ip_to_amphora.py b/octavia/db/migration/alembic_migrations/versions/3e5b37a0bdb9_add_vrrp_ip_and_ha_ip_to_amphora.py new file mode 100644 index 0000000000..6d306283d7 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/3e5b37a0bdb9_add_vrrp_ip_and_ha_ip_to_amphora.py @@ -0,0 +1,35 @@ +# Copyright 2015 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Add vrrp_ip and ha_ip to amphora + +Revision ID: 3e5b37a0bdb9 +Revises: 92fe9857279 +Create Date: 2015-03-24 18:17:36.998604 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '3e5b37a0bdb9' +down_revision = '92fe9857279' + + +def upgrade(): + op.add_column('amphora', + sa.Column('vrrp_ip', sa.String(64), nullable=True)) + op.add_column('amphora', + sa.Column('ha_ip', sa.String(64), nullable=True)) diff --git a/octavia/db/migration/alembic_migrations/versions/3f8ff3be828e_create_quotas_table.py b/octavia/db/migration/alembic_migrations/versions/3f8ff3be828e_create_quotas_table.py new file mode 100644 index 0000000000..dc90ed1b69 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/3f8ff3be828e_create_quotas_table.py @@ -0,0 +1,45 @@ +# Copyright 2016 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""create quotas table + +Revision ID: 3f8ff3be828e +Revises: 44a2414dd683 +Create Date: 2016-09-01 13:59:20.723621 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '3f8ff3be828e' +down_revision = '44a2414dd683' + + +def upgrade(): + op.create_table( + 'quotas', + sa.Column('project_id', sa.String(36), primary_key=True, + nullable=False), + sa.Column('health_monitor', sa.Integer(), nullable=True), + sa.Column('load_balancer', sa.Integer(), nullable=True), + sa.Column('listener', sa.Integer(), nullable=True), + sa.Column('member', sa.Integer(), nullable=True), + sa.Column('pool', sa.Integer(), nullable=True), + sa.Column('in_use_health_monitor', sa.Integer(), nullable=True), + sa.Column('in_use_load_balancer', sa.Integer(), nullable=True), + sa.Column('in_use_listener', sa.Integer(), nullable=True), + sa.Column('in_use_member', sa.Integer(), nullable=True), + sa.Column('in_use_pool', sa.Integer(), nullable=True), + ) diff --git a/octavia/db/migration/alembic_migrations/versions/43287cd10fef_make_pool_lb_algorithm_larger.py b/octavia/db/migration/alembic_migrations/versions/43287cd10fef_make_pool_lb_algorithm_larger.py new file mode 100644 index 0000000000..7daf1879f2 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/43287cd10fef_make_pool_lb_algorithm_larger.py @@ -0,0 +1,43 @@ +# Copyright 2016 Blue Box, an IBM Company +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Make pool.lb_algorithm larger + +Revision ID: 43287cd10fef +Revises: 6abb04f24c5 +Create Date: 2016-01-14 10:05:27.803518 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '43287cd10fef' +down_revision = '6abb04f24c5' + + +def upgrade(): + op.drop_constraint( + 'fk_pool_algorithm_name', 'pool', + type_='foreignkey' + ) + op.alter_column('algorithm', 'name', nullable=False, + existing_type=sa.String(255)) + op.alter_column('pool', 'lb_algorithm', nullable=False, + existing_type=sa.String(255)) + op.create_foreign_key( + 'fk_pool_algorithm_name', 'pool', + 'algorithm', ['lb_algorithm'], ['name'] + ) diff --git a/octavia/db/migration/alembic_migrations/versions/443fe6676637_add_network_id_to_vip.py b/octavia/db/migration/alembic_migrations/versions/443fe6676637_add_network_id_to_vip.py new file mode 100644 index 0000000000..51b042d092 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/443fe6676637_add_network_id_to_vip.py @@ -0,0 +1,32 @@ +# Copyright 2017 GoDaddy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Add a column network_id in table vip + +Revision ID: 443fe6676637 +Revises: 3f8ff3be828e +Create Date: 2017-02-06 15:21:25.637744 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '443fe6676637' +down_revision = '3f8ff3be828e' + + +def upgrade(): + op.add_column('vip', + sa.Column('network_id', sa.String(36), nullable=True)) diff --git a/octavia/db/migration/alembic_migrations/versions/44a2414dd683_adding_name_column_to_member_and_health_.py b/octavia/db/migration/alembic_migrations/versions/44a2414dd683_adding_name_column_to_member_and_health_.py new file mode 100644 index 0000000000..c41ce1d571 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/44a2414dd683_adding_name_column_to_member_and_health_.py @@ -0,0 +1,39 @@ +# Copyright 2016 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""adding name column to member and health monitor + +Revision ID: 44a2414dd683 +Revises: c11292016060 +Create Date: 2016-12-19 13:14:58.879793 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '44a2414dd683' +down_revision = 'c11292016060' + + +tables = ['member', 'health_monitor'] + + +def upgrade(): + for table in tables: + op.add_column( + table, + sa.Column('name', sa.String(255), nullable=True) + ) diff --git a/octavia/db/migration/alembic_migrations/versions/458c9ee2a011_l7_policies_and_rules.py b/octavia/db/migration/alembic_migrations/versions/458c9ee2a011_l7_policies_and_rules.py new file mode 100644 index 0000000000..9fea0360e2 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/458c9ee2a011_l7_policies_and_rules.py @@ -0,0 +1,149 @@ +# Copyright 2015 Blue Box, an IBM Company +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""L7 Policies and Rules + +Revision ID: 458c9ee2a011 +Revises: 29ff921a6eb +Create Date: 2016-01-07 11:45:45.391851 + +""" + +from alembic import op +import sqlalchemy as sa +from sqlalchemy import sql + +# revision identifiers, used by Alembic. +revision = '458c9ee2a011' +down_revision = '29ff921a6eb' + + +def upgrade(): + # L7 Rule Types + op.create_table( + 'l7rule_type', + sa.Column('name', sa.String(36), primary_key=True), + sa.Column('description', sa.String(255), nullable=True) + ) + + # Create temporary table for table data seeding + insert_table = sql.table( + 'l7rule_type', + sql.column('name', sa.String), + sql.column('description', sa.String) + ) + + op.bulk_insert( + insert_table, + [ + {'name': 'HOST_NAME'}, + {'name': 'PATH'}, + {'name': 'FILE_TYPE'}, + {'name': 'HEADER'}, + {'name': 'COOKIE'} + ] + ) + + # L7 Rule Compare Types + op.create_table( + 'l7rule_compare_type', + sa.Column('name', sa.String(36), primary_key=True), + sa.Column('description', sa.String(255), nullable=True) + ) + + insert_table = sql.table( + 'l7rule_compare_type', + sql.column('name', sa.String), + sql.column('description', sa.String) + ) + + op.bulk_insert( + insert_table, + [ + {'name': 'REGEX'}, + {'name': 'STARTS_WITH'}, + {'name': 'ENDS_WITH'}, + {'name': 'CONTAINS'}, + {'name': 'EQUAL_TO'} + ] + ) + + # L7 Policy Actions + op.create_table( + 'l7policy_action', + sa.Column('name', sa.String(36), primary_key=True), + sa.Column('description', sa.String(255), nullable=True) + ) + + insert_table = sql.table( + 'l7policy_action', + sql.column('name', sa.String), + sql.column('description', sa.String) + ) + + op.bulk_insert( + insert_table, + [ + {'name': 'REJECT'}, + {'name': 'REDIRECT_TO_URL'}, + {'name': 'REDIRECT_TO_POOL'} + ] + ) + + # L7 Policies + op.create_table( + 'l7policy', + sa.Column('id', sa.String(36), nullable=False), + sa.Column('name', sa.String(255), nullable=True), + sa.Column('description', sa.String(255), nullable=True), + sa.Column('listener_id', sa.String(36), nullable=False), + sa.Column('action', sa.String(36), nullable=False), + sa.Column('redirect_pool_id', sa.String(36), nullable=True), + sa.Column('redirect_url', sa.String(255), nullable=True), + sa.Column('position', sa.Integer, nullable=False), + sa.Column('enabled', sa.Boolean(), default=True, nullable=False), + + sa.PrimaryKeyConstraint('id'), + sa.ForeignKeyConstraint(['listener_id'], + ['listener.id'], + name='fk_l7policy_listener_id'), + sa.ForeignKeyConstraint(['redirect_pool_id'], + ['pool.id'], + name='fk_l7policy_pool_id'), + sa.ForeignKeyConstraint(['action'], + ['l7policy_action.name'], + name='fk_l7policy_l7policy_action_name') + ) + + # L7 Rules + op.create_table( + 'l7rule', + sa.Column('id', sa.String(36), nullable=False), + sa.Column('l7policy_id', sa.String(36), nullable=False), + sa.Column('type', sa.String(36), nullable=False), + sa.Column('compare_type', sa.String(36), nullable=False), + sa.Column('key', sa.String(255), nullable=True), + sa.Column('value', sa.String(255), nullable=False), + sa.Column('invert', sa.Boolean(), default=False, nullable=False), + sa.PrimaryKeyConstraint('id'), + sa.ForeignKeyConstraint(['l7policy_id'], + ['l7policy.id'], + name='fk_l7rule_l7policy_id'), + sa.ForeignKeyConstraint(['type'], + ['l7rule_type.name'], + name='fk_l7rule_l7rule_type_name'), + sa.ForeignKeyConstraint(['compare_type'], + ['l7rule_compare_type.name'], + name='fk_l7rule_l7rule_compare_type_name') + ) diff --git a/octavia/db/migration/alembic_migrations/versions/46d914b2a5e5_seed_the_spares_pool_table.py b/octavia/db/migration/alembic_migrations/versions/46d914b2a5e5_seed_the_spares_pool_table.py new file mode 100644 index 0000000000..32add94c5b --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/46d914b2a5e5_seed_the_spares_pool_table.py @@ -0,0 +1,47 @@ +# Copyright 2019 Michael Johnson +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Seed the spares_pool table + +Revision ID: 46d914b2a5e5 +Revises: 6ffc710674ef +Create Date: 2019-04-03 14:03:25.596157 + +""" + + +import datetime + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '46d914b2a5e5' +down_revision = '6ffc710674ef' + + +def upgrade(): + # Create temporary table for table data seeding + insert_table = sa.table( + 'spares_pool', + sa.column('updated_at', sa.DateTime), + ) + + # Note: The date/time doesn't matter, we just need to seed the table. + op.bulk_insert( + insert_table, + [ + {'updated_at': datetime.datetime.now()} + ] + ) diff --git a/octavia/db/migration/alembic_migrations/versions/48660b6643f0_add_new_states_for_amphora.py b/octavia/db/migration/alembic_migrations/versions/48660b6643f0_add_new_states_for_amphora.py new file mode 100644 index 0000000000..763dbbe3b7 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/48660b6643f0_add_new_states_for_amphora.py @@ -0,0 +1,47 @@ +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""add new states for amphora + +Revision ID: 48660b6643f0 +Revises: 3e5b37a0bdb9 +Create Date: 2015-01-20 13:31:30.017959 + +""" + +from alembic import op +import sqlalchemy as sa +from sqlalchemy import sql + +# revision identifiers, used by Alembic. +revision = '48660b6643f0' +down_revision = '3e5b37a0bdb9' + + +def upgrade(): + insert_table = sql.table( + 'provisioning_status', + sql.column('name', sa.String), + sql.column('description', sa.String) + ) + + op.bulk_insert( + insert_table, + [ + {'name': 'READY'}, + {'name': 'BOOTING'}, + {'name': 'ALLOCATED'} + ] + ) diff --git a/octavia/db/migration/alembic_migrations/versions/4a6ec0ab7284_remove_fk_constraints_on_listener_.py b/octavia/db/migration/alembic_migrations/versions/4a6ec0ab7284_remove_fk_constraints_on_listener_.py new file mode 100644 index 0000000000..77f8f1e831 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/4a6ec0ab7284_remove_fk_constraints_on_listener_.py @@ -0,0 +1,37 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Remove FK constraints on listener_statistics because it will be cross-DB + +Revision ID: 4a6ec0ab7284 +Revises: 62816c232310 +Create Date: 2016-07-05 14:09:16.320931 + +""" +from alembic import op + +# revision identifiers, used by Alembic. +revision = '4a6ec0ab7284' +down_revision = '62816c232310' + + +def upgrade(): + # OpenStack has decided that "down" migrations are not supported. + # The downgrade() method has been omitted for this reason. + op.drop_constraint('fk_listener_statistics_listener_id', + 'listener_statistics', + type_='foreignkey') + op.drop_constraint('fk_listener_statistic_amphora_id', + 'listener_statistics', + type_='foreignkey') diff --git a/octavia/db/migration/alembic_migrations/versions/4aeb9e23ad43_add_draining_operating_status.py b/octavia/db/migration/alembic_migrations/versions/4aeb9e23ad43_add_draining_operating_status.py new file mode 100644 index 0000000000..b05e737e2d --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/4aeb9e23ad43_add_draining_operating_status.py @@ -0,0 +1,34 @@ +# Copyright 2017 GoDaddy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Add DRAINING operating status + +Revision ID: 4aeb9e23ad43 +Revises: e6672bda93bf +Create Date: 2017-07-27 00:54:07.128617 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '4aeb9e23ad43' +down_revision = 'e6672bda93bf' + + +def upgrade(): + bind = op.get_bind() + md = sa.MetaData() + sa.Table('operating_status', md, autoload_with=bind) + op.bulk_insert(md.tables['operating_status'], [{'name': 'DRAINING'}]) diff --git a/octavia/db/migration/alembic_migrations/versions/4c094013699a_update_load_balancer_amphora.py b/octavia/db/migration/alembic_migrations/versions/4c094013699a_update_load_balancer_amphora.py new file mode 100644 index 0000000000..5c744cdcdd --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/4c094013699a_update_load_balancer_amphora.py @@ -0,0 +1,47 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +'''update load balancer amphora relationship + +Revision ID: 4c094013699a +Revises: 35dee79d5865 +Create Date: 2014-09-15 14:42:44.875448 + +''' + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '4c094013699a' +down_revision = '35dee79d5865' + + +def upgrade(): + op.add_column( + 'amphora', + sa.Column('load_balancer_id', sa.String(36), + sa.ForeignKey('load_balancer.id', + name='fk_amphora_load_balancer_id'), + nullable=True) + ) + op.drop_table('load_balancer_amphora') + op.drop_constraint( + 'fk_container_provisioning_status_name', 'amphora', + type_='foreignkey' + ) + op.create_foreign_key( + 'fk_amphora_provisioning_status_name', 'amphora', + 'provisioning_status', ['status'], ['name'] + ) diff --git a/octavia/db/migration/alembic_migrations/versions/4d9cf7d32f2_insert_headers.py b/octavia/db/migration/alembic_migrations/versions/4d9cf7d32f2_insert_headers.py new file mode 100644 index 0000000000..20f75d4ea7 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/4d9cf7d32f2_insert_headers.py @@ -0,0 +1,32 @@ +# Copyright 2016 VMware +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Insert headers + +Revision ID: 4d9cf7d32f2 +Revises: 9bf4d21caaea +Create Date: 2016-02-21 17:16:22.316744 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '4d9cf7d32f2' +down_revision = '9bf4d21caaea' + + +def upgrade(): + op.add_column('listener', sa.Column('insert_headers', sa.PickleType())) diff --git a/octavia/db/migration/alembic_migrations/versions/4f65b4f91c39_amphora_add_flavor_id.py b/octavia/db/migration/alembic_migrations/versions/4f65b4f91c39_amphora_add_flavor_id.py new file mode 100644 index 0000000000..baca46b230 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/4f65b4f91c39_amphora_add_flavor_id.py @@ -0,0 +1,36 @@ +# Copyright (c) 2018 China Telecom Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""amphora add flavor id + +Revision ID: 4f65b4f91c39 +Revises: 80dba23a159f +Create Date: 2018-07-16 09:59:07.169894 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '4f65b4f91c39' +down_revision = '80dba23a159f' + + +def upgrade(): + op.add_column( + 'amphora', + sa.Column('compute_flavor', sa.String(255), nullable=True) + ) diff --git a/octavia/db/migration/alembic_migrations/versions/4faaa983e7a9_update_member_address_column.py b/octavia/db/migration/alembic_migrations/versions/4faaa983e7a9_update_member_address_column.py new file mode 100644 index 0000000000..affd32f7ee --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/4faaa983e7a9_update_member_address_column.py @@ -0,0 +1,33 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""update member address column + +Revision ID: 4faaa983e7a9 +Revises: 13500e2e978d +Create Date: 2014-09-29 11:22:16.565071 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '4faaa983e7a9' +down_revision = '13500e2e978d' + + +def upgrade(): + op.alter_column('member', 'address', new_column_name='ip_address', + existing_type=sa.String(64)) diff --git a/octavia/db/migration/alembic_migrations/versions/4fe8240425b4_update_vip_add_subnet_id.py b/octavia/db/migration/alembic_migrations/versions/4fe8240425b4_update_vip_add_subnet_id.py new file mode 100644 index 0000000000..a5b1459800 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/4fe8240425b4_update_vip_add_subnet_id.py @@ -0,0 +1,33 @@ +# Copyright 2015 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""update vip add subnet id + +Revision ID: 4fe8240425b4 +Revises: 48660b6643f0 +Create Date: 2015-07-01 14:27:44.187179 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '4fe8240425b4' +down_revision = '48660b6643f0' + + +def upgrade(): + with op.batch_alter_table('vip') as batch_op: + batch_op.alter_column('network_id', new_column_name='subnet_id', + existing_type=sa.String(36)) diff --git a/octavia/db/migration/alembic_migrations/versions/52377704420e_add_timestamps_to_healthmonitor.py b/octavia/db/migration/alembic_migrations/versions/52377704420e_add_timestamps_to_healthmonitor.py new file mode 100644 index 0000000000..cfba5f2523 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/52377704420e_add_timestamps_to_healthmonitor.py @@ -0,0 +1,55 @@ +# Copyright 2017 GoDaddy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""add timestamps and operating_status to healthmonitor + +Revision ID: 52377704420e +Revises: d85ca7258d21 +Create Date: 2017-04-13 08:58:18.078170 + +""" + +from alembic import op +import sqlalchemy as sa + +from octavia.common import constants + +# revision identifiers, used by Alembic. +revision = '52377704420e' +down_revision = 'd85ca7258d21' + + +def upgrade(): + op.add_column( + 'health_monitor', + sa.Column('created_at', sa.DateTime(), nullable=True) + ) + op.add_column( + 'health_monitor', + sa.Column('updated_at', sa.DateTime(), nullable=True) + ) + + op.add_column('health_monitor', + sa.Column('operating_status', + sa.String(16), + nullable=False, + server_default=constants.ONLINE) + ) + op.alter_column('health_monitor', 'operating_status', + existing_type=sa.String(16), server_default=None) + + op.create_foreign_key( + 'fk_health_monitor_operating_status_name', 'health_monitor', + 'operating_status', ['operating_status'], ['name'] + ) diff --git a/octavia/db/migration/alembic_migrations/versions/5309960964f8_add_proxy_protocol_for_pool.py b/octavia/db/migration/alembic_migrations/versions/5309960964f8_add_proxy_protocol_for_pool.py new file mode 100644 index 0000000000..a3aa014b55 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/5309960964f8_add_proxy_protocol_for_pool.py @@ -0,0 +1,46 @@ +# Copyright 2017 EayunStack, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""add proxy protocol for pool + +Revision ID: 5309960964f8 +Revises: 52377704420e +Create Date: 2017-04-27 01:13:38.064697 + +""" + +from alembic import op +import sqlalchemy as sa +from sqlalchemy import sql + + +# revision identifiers, used by Alembic. +revision = '5309960964f8' +down_revision = '52377704420e' + +new_protocol = 'PROXY' + + +def upgrade(): + insert_table = sql.table( + 'protocol', + sql.column('name', sa.String), + sql.column('description', sa.String) + ) + + op.bulk_insert( + insert_table, + [ + {'name': new_protocol} + ] + ) diff --git a/octavia/db/migration/alembic_migrations/versions/543f5d8e4e56_add_a_column_busy_in_table_amphora_health.py b/octavia/db/migration/alembic_migrations/versions/543f5d8e4e56_add_a_column_busy_in_table_amphora_health.py new file mode 100644 index 0000000000..c286dec98f --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/543f5d8e4e56_add_a_column_busy_in_table_amphora_health.py @@ -0,0 +1,32 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Add a column busy in table amphora health + +Revision ID: 543f5d8e4e56 +Revises: 2351ea316465 +Create Date: 2015-07-27 11:32:16.685383 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '543f5d8e4e56' +down_revision = '2351ea316465' + + +def upgrade(): + op.add_column('amphora_health', + sa.Column('busy', sa.Boolean(), nullable=False)) diff --git a/octavia/db/migration/alembic_migrations/versions/55874a4ceed6_add_l7policy_action_redirect_prefix.py b/octavia/db/migration/alembic_migrations/versions/55874a4ceed6_add_l7policy_action_redirect_prefix.py new file mode 100644 index 0000000000..57c4908570 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/55874a4ceed6_add_l7policy_action_redirect_prefix.py @@ -0,0 +1,49 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""add l7policy action redirect prefix + +Revision ID: 55874a4ceed6 +Revises: 76aacf2e176c +Create Date: 2018-09-09 20:35:38.780054 + +""" + +from alembic import op +import sqlalchemy as sa +from sqlalchemy import sql + +# revision identifiers, used by Alembic. +revision = '55874a4ceed6' +down_revision = '76aacf2e176c' + + +def upgrade(): + # Add column redirect_prefix + op.add_column( + 'l7policy', + sa.Column('redirect_prefix', sa.String(255), nullable=True) + ) + insert_table = sql.table( + 'l7policy_action', + sql.column('name', sa.String), + sql.column('description', sa.String) + ) + + op.bulk_insert( + insert_table, + [ + {'name': 'REDIRECT_PREFIX'} + ] + ) diff --git a/octavia/db/migration/alembic_migrations/versions/5a3ee5472c31_add_cert_expiration__infor_in_amphora_table.py b/octavia/db/migration/alembic_migrations/versions/5a3ee5472c31_add_cert_expiration__infor_in_amphora_table.py new file mode 100644 index 0000000000..a00a76a6ae --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/5a3ee5472c31_add_cert_expiration__infor_in_amphora_table.py @@ -0,0 +1,37 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""add cert expiration info in amphora table + +Revision ID: 5a3ee5472c31 +Revises: 3b199c848b96 +Create Date: 2015-08-20 10:15:19.561066 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '5a3ee5472c31' +down_revision = '3b199c848b96' + + +def upgrade(): + op.add_column('amphora', + sa.Column('cert_expiration', sa.DateTime(timezone=True), + nullable=True) + ) + + op.add_column('amphora', sa.Column('cert_busy', sa.Boolean(), + nullable=False, default=False)) diff --git a/octavia/db/migration/alembic_migrations/versions/62816c232310_fix_migration_for_mysql_5_7.py b/octavia/db/migration/alembic_migrations/versions/62816c232310_fix_migration_for_mysql_5_7.py new file mode 100644 index 0000000000..df4a81bd91 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/62816c232310_fix_migration_for_mysql_5_7.py @@ -0,0 +1,33 @@ +# Copyright 2016 IBM +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Fix migration for MySQL 5.7 + +Revision ID: 62816c232310 +Revises: 36b94648fef8 +Create Date: 2016-06-07 12:59:21.059619 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '62816c232310' +down_revision = '36b94648fef8' + + +def upgrade(): + op.alter_column('sni', 'tls_container_id', type_=sa.String(128), + existing_type=sa.String(36), nullable=False) diff --git a/octavia/db/migration/alembic_migrations/versions/632152d2d32e_add_http_strict_transport_security_.py b/octavia/db/migration/alembic_migrations/versions/632152d2d32e_add_http_strict_transport_security_.py new file mode 100644 index 0000000000..5aafa88ea6 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/632152d2d32e_add_http_strict_transport_security_.py @@ -0,0 +1,42 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Add HTTP Strict Transport Security support + +Revision ID: 632152d2d32e +Revises: 0995c26fc506 +Create Date: 2023-04-19 13:36:44.015581 + +""" + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '632152d2d32e' +down_revision = '0995c26fc506' + + +def upgrade(): + op.add_column( + 'listener', + sa.Column('hsts_max_age', sa.Integer, nullable=True) + ) + op.add_column( + 'listener', + sa.Column('hsts_include_subdomains', sa.Boolean, nullable=True) + ) + op.add_column( + 'listener', + sa.Column('hsts_preload', sa.Boolean, nullable=True) + ) diff --git a/octavia/db/migration/alembic_migrations/versions/6742ca1b27c2_add_l7policy_redirect_http_code.py b/octavia/db/migration/alembic_migrations/versions/6742ca1b27c2_add_l7policy_redirect_http_code.py new file mode 100644 index 0000000000..b974bc996d --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/6742ca1b27c2_add_l7policy_redirect_http_code.py @@ -0,0 +1,36 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Add L7policy Redirect http code + +Revision ID: 6742ca1b27c2 +Revises: a7f187cd221f +Create Date: 2018-12-13 09:35:38.780054 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '6742ca1b27c2' +down_revision = 'a7f187cd221f' + + +def upgrade(): + # Add column redirect_prefix + op.add_column( + 'l7policy', + sa.Column('redirect_http_code', sa.Integer(), nullable=True) + ) diff --git a/octavia/db/migration/alembic_migrations/versions/6abb04f24c5_tenant_id_to_project_id.py b/octavia/db/migration/alembic_migrations/versions/6abb04f24c5_tenant_id_to_project_id.py new file mode 100644 index 0000000000..3e61886917 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/6abb04f24c5_tenant_id_to_project_id.py @@ -0,0 +1,40 @@ +# Copyright 2015 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Tenant id to project id + +Revision ID: 6abb04f24c5 +Revises: 5a3ee5472c31 +Create Date: 2015-12-03 15:22:25.390595 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '6abb04f24c5' +down_revision = '1e4c1d83044c' + + +def upgrade(): + op.alter_column('load_balancer', 'tenant_id', new_column_name='project_id', + existing_type=sa.String(36)) + op.alter_column('listener', 'tenant_id', new_column_name='project_id', + existing_type=sa.String(36)) + op.alter_column('pool', 'tenant_id', new_column_name='project_id', + existing_type=sa.String(36)) + op.alter_column('member', 'tenant_id', new_column_name='project_id', + existing_type=sa.String(36)) + op.add_column('health_monitor', sa.Column('project_id', sa.String(36))) diff --git a/octavia/db/migration/alembic_migrations/versions/6ac558d7fc21_add_prometheus_listener_protocol.py b/octavia/db/migration/alembic_migrations/versions/6ac558d7fc21_add_prometheus_listener_protocol.py new file mode 100644 index 0000000000..0d4e1af9de --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/6ac558d7fc21_add_prometheus_listener_protocol.py @@ -0,0 +1,45 @@ +# Copyright 2021 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Add prometheus listener protocol + +Revision ID: 6ac558d7fc21 +Revises: b8bd389cbae7 +Create Date: 2021-10-01 20:06:46.813842 + +""" + +from alembic import op +import sqlalchemy as sa +from sqlalchemy import sql + +# revision identifiers, used by Alembic. +revision = '6ac558d7fc21' +down_revision = 'b8bd389cbae7' + +new_protocol = 'PROMETHEUS' + + +def upgrade(): + insert_table = sql.table( + 'protocol', + sql.column('name', sa.String), + sql.column('description', sa.String) + ) + + op.bulk_insert( + insert_table, + [ + {'name': new_protocol} + ] + ) diff --git a/octavia/db/migration/alembic_migrations/versions/6ffc710674ef_spares_pool_table.py b/octavia/db/migration/alembic_migrations/versions/6ffc710674ef_spares_pool_table.py new file mode 100644 index 0000000000..a7ae9a3438 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/6ffc710674ef_spares_pool_table.py @@ -0,0 +1,35 @@ +# Copyright 2019 Michael Johnson +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Spares pool table + +Revision ID: 6ffc710674ef +Revises: 7432f1d4ea83 +Create Date: 2019-03-11 10:45:43.296236 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '6ffc710674ef' +down_revision = '7432f1d4ea83' + + +def upgrade(): + op.create_table( + 'spares_pool', + sa.Column('updated_at', sa.DateTime(), primary_key=True, + server_default=sa.func.current_timestamp())) diff --git a/octavia/db/migration/alembic_migrations/versions/7432f1d4ea83_add_http_host_head_inject_for_http_health_check.py b/octavia/db/migration/alembic_migrations/versions/7432f1d4ea83_add_http_host_head_inject_for_http_health_check.py new file mode 100644 index 0000000000..c15722ea30 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/7432f1d4ea83_add_http_host_head_inject_for_http_health_check.py @@ -0,0 +1,39 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""add l7policy action redirect prefix + +Revision ID: 7432f1d4ea83 +Revises: 6742ca1b27c2 +Create Date: 2018-09-09 20:35:38.780054 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '7432f1d4ea83' +down_revision = '6742ca1b27c2' + + +def upgrade(): + op.add_column( + 'health_monitor', + sa.Column('http_version', sa.Float(), nullable=True) + ) + op.add_column( + 'health_monitor', + sa.Column('domain_name', sa.String(255), nullable=True) + ) diff --git a/octavia/db/migration/alembic_migrations/versions/74aae261694c_extend_pool_for_backend_ca_and_crl.py b/octavia/db/migration/alembic_migrations/versions/74aae261694c_extend_pool_for_backend_ca_and_crl.py new file mode 100644 index 0000000000..c472a4a571 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/74aae261694c_extend_pool_for_backend_ca_and_crl.py @@ -0,0 +1,36 @@ +# Copyright 2019 Rackspace US Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""extend pool for backend CA and CRL + +Revision ID: 74aae261694c +Revises: a1f689aecc1d +Create Date: 2019-02-27 09:22:24.779576 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '74aae261694c' +down_revision = 'a1f689aecc1d' + + +def upgrade(): + op.add_column('pool', sa.Column('ca_tls_certificate_id', sa.String(255), + nullable=True)) + op.add_column('pool', sa.Column('crl_container_id', sa.String(255), + nullable=True)) diff --git a/octavia/db/migration/alembic_migrations/versions/76aacf2e176c_extend_support_udp_protocol.py b/octavia/db/migration/alembic_migrations/versions/76aacf2e176c_extend_support_udp_protocol.py new file mode 100644 index 0000000000..bbc8edb00b --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/76aacf2e176c_extend_support_udp_protocol.py @@ -0,0 +1,62 @@ +# Copyright 2018 Huawei +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Extend some necessary fields for udp support + +Revision ID: 76aacf2e176c +Revises: ebbcc72b4e5e +Create Date: 2018-01-01 20:47:52.405865 + +""" + + +from alembic import op +import sqlalchemy as sa +from sqlalchemy import sql + +# revision identifiers, used by Alembic. +revision = '76aacf2e176c' +down_revision = 'ebbcc72b4e5e' + +tables = ['protocol', 'health_monitor_type'] +new_fields = ['UDP', 'UDP-CONNECT'] + + +def upgrade(): + # New UDP protocol addition. + # New UDP_CONNNECT healthmonitor type addition. + for table, new_field in zip(tables, new_fields): + insert_table = sql.table( + table, + sql.column('name', sa.String), + sql.column('description', sa.String) + ) + + op.bulk_insert( + insert_table, + [ + {'name': new_field} + ] + ) + + # Two new columns add to session_persistence table + op.add_column('session_persistence', + sa.Column('persistence_timeout', + sa.Integer(), + nullable=True, server_default=None)) + op.add_column('session_persistence', + sa.Column('persistence_granularity', + sa.String(length=64), + nullable=True, server_default=None)) diff --git a/octavia/db/migration/alembic_migrations/versions/7c36b277bfb0_add_listener_ciphers_column.py b/octavia/db/migration/alembic_migrations/versions/7c36b277bfb0_add_listener_ciphers_column.py new file mode 100644 index 0000000000..0c074875be --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/7c36b277bfb0_add_listener_ciphers_column.py @@ -0,0 +1,35 @@ +# Copyright 2020 Dawson Coleman +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""add listener ciphers column + +Revision ID: 7c36b277bfb0 +Revises: 8ac4ed24df3a +Create Date: 2020-03-11 02:23:49.097485 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '7c36b277bfb0' +down_revision = '8ac4ed24df3a' + + +def upgrade(): + op.add_column( + 'listener', + sa.Column('tls_ciphers', sa.String(2048), nullable=True) + ) diff --git a/octavia/db/migration/alembic_migrations/versions/80dba23a159f_tags_support.py b/octavia/db/migration/alembic_migrations/versions/80dba23a159f_tags_support.py new file mode 100644 index 0000000000..5c791cba1c --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/80dba23a159f_tags_support.py @@ -0,0 +1,37 @@ +# Copyright 2018 Huawei +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""tags_support + +Revision ID: 80dba23a159f +Revises: 55874a4ceed6 +Create Date: 2018-10-15 15:29:27.258640 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '80dba23a159f' +down_revision = '55874a4ceed6' + + +def upgrade(): + op.create_table( + 'tags', + sa.Column('resource_id', sa.String(36), primary_key=True, + nullable=False), + sa.Column('tag', sa.String(255), primary_key=True, nullable=False, + index=True), + ) diff --git a/octavia/db/migration/alembic_migrations/versions/82b9402e71fd_update_vip_address_size.py b/octavia/db/migration/alembic_migrations/versions/82b9402e71fd_update_vip_address_size.py new file mode 100644 index 0000000000..05a82d27f3 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/82b9402e71fd_update_vip_address_size.py @@ -0,0 +1,33 @@ +# Copyright 2016 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Update vip address size + +Revision ID: 82b9402e71fd +Revises: 62816c232310 +Create Date: 2016-07-17 14:36:36.698870 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '82b9402e71fd' +down_revision = '4a6ec0ab7284' + + +def upgrade(): + op.alter_column('vip', 'ip_address', + existing_type=sa.String(64)) diff --git a/octavia/db/migration/alembic_migrations/versions/8ac4ed24df3a_add_availability_zone_to_lb.py b/octavia/db/migration/alembic_migrations/versions/8ac4ed24df3a_add_availability_zone_to_lb.py new file mode 100644 index 0000000000..34664f9eea --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/8ac4ed24df3a_add_availability_zone_to_lb.py @@ -0,0 +1,41 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""add availability_zone to lb + +Revision ID: 8ac4ed24df3a +Revises: c761c8a71579 +Create Date: 2019-11-13 08:37:39.392163 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '8ac4ed24df3a' +down_revision = 'c761c8a71579' + + +def upgrade(): + op.add_column('load_balancer', + sa.Column('availability_zone', + sa.String(255), + nullable=True) + ) + + op.create_foreign_key( + 'fk_load_balancer_availability_zone_name', 'load_balancer', + 'availability_zone', ['availability_zone'], ['name'] + ) diff --git a/octavia/db/migration/alembic_migrations/versions/8b47b2546312_sctp_support.py b/octavia/db/migration/alembic_migrations/versions/8b47b2546312_sctp_support.py new file mode 100644 index 0000000000..8753f2b90b --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/8b47b2546312_sctp_support.py @@ -0,0 +1,46 @@ +# Copyright 2020 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""sctp support + +Revision ID: 8b47b2546312 +Revises: e6ee84f0abf3 +Create Date: 2020-06-26 09:26:45.397873 + +""" + +from alembic import op +import sqlalchemy as sa +from sqlalchemy import sql + + +# revision identifiers, used by Alembic. +revision = '8b47b2546312' +down_revision = 'e6ee84f0abf3' + + +def upgrade(): + for table in ['protocol', 'health_monitor_type']: + insert_table = sql.table( + table, + sql.column('name', sa.String), + sql.column('description', sa.String) + ) + + op.bulk_insert( + insert_table, + [ + {'name': 'SCTP'} + ] + ) diff --git a/octavia/db/migration/alembic_migrations/versions/8c0851bdf6c3_change_tls_container_id_length_in_sni_.py b/octavia/db/migration/alembic_migrations/versions/8c0851bdf6c3_change_tls_container_id_length_in_sni_.py new file mode 100644 index 0000000000..ec65f4a406 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/8c0851bdf6c3_change_tls_container_id_length_in_sni_.py @@ -0,0 +1,33 @@ +# Copyright 2016 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""change_tls_container_id_length_in_sni_table + +Revision ID: 8c0851bdf6c3 +Revises: 186509101b9b +Create Date: 2016-03-23 19:08:53.148812 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '8c0851bdf6c3' +down_revision = '186509101b9b' + + +def upgrade(): + op.alter_column('sni', 'tls_container_id', type_=sa.String(128), + existing_type=sa.String(36), nullable=False) diff --git a/octavia/db/migration/alembic_migrations/versions/8db7a6443785_add_member_vnic_type.py b/octavia/db/migration/alembic_migrations/versions/8db7a6443785_add_member_vnic_type.py new file mode 100644 index 0000000000..ef20e9fce0 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/8db7a6443785_add_member_vnic_type.py @@ -0,0 +1,36 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Add member vnic_type + +Revision ID: 8db7a6443785 +Revises: 3097e55493ae +Create Date: 2024-03-29 20:34:37.263847 + +""" + +from alembic import op +import sqlalchemy as sa + +from octavia.common import constants + +# revision identifiers, used by Alembic. +revision = '8db7a6443785' +down_revision = '3097e55493ae' + + +def upgrade(): + op.add_column( + u'member', + sa.Column(u'vnic_type', sa.String(64), nullable=False, + server_default=constants.VNIC_TYPE_NORMAL) + ) diff --git a/octavia/db/migration/alembic_migrations/versions/92fe9857279_create_healthmanager_table.py b/octavia/db/migration/alembic_migrations/versions/92fe9857279_create_healthmanager_table.py new file mode 100644 index 0000000000..a707bfd8ce --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/92fe9857279_create_healthmanager_table.py @@ -0,0 +1,38 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""create healthmanager table + +Revision ID: 92fe9857279 +Revises: 256852d5ff7c +Create Date: 2015-01-22 16:58:23.440247 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '92fe9857279' +down_revision = '256852d5ff7c' + + +def upgrade(): + op.create_table( + 'amphora_health', + sa.Column('amphora_id', sa.String(36), nullable=False, + primary_key=True), + sa.Column('last_update', sa.DateTime(timezone=True), + nullable=False) + + ) diff --git a/octavia/db/migration/alembic_migrations/versions/9b5473976d6d_add_provisioning_status_to_objects.py b/octavia/db/migration/alembic_migrations/versions/9b5473976d6d_add_provisioning_status_to_objects.py new file mode 100644 index 0000000000..82cf9de2e6 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/9b5473976d6d_add_provisioning_status_to_objects.py @@ -0,0 +1,81 @@ +# Copyright 2016 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Add provisioning_status to objects + +Revision ID: 9b5473976d6d +Revises: 82b9402e71fd +Create Date: 2016-09-20 21:46:26.843695 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '9b5473976d6d' +down_revision = '82b9402e71fd' + + +def upgrade(): + + op.add_column('health_monitor', + sa.Column('provisioning_status', + sa.String(16), + nullable=True) + ) + op.create_foreign_key( + 'fk_health_monitor_provisioning_status_name', 'health_monitor', + 'provisioning_status', ['provisioning_status'], ['name'] + ) + + op.add_column('l7policy', + sa.Column('provisioning_status', + sa.String(16), + nullable=True) + ) + op.create_foreign_key( + 'fk_l7policy_provisioning_status_name', 'l7policy', + 'provisioning_status', ['provisioning_status'], ['name'] + ) + + op.add_column('l7rule', + sa.Column('provisioning_status', + sa.String(16), + nullable=True) + ) + op.create_foreign_key( + 'fk_l7rule_provisioning_status_name', 'l7rule', + 'provisioning_status', ['provisioning_status'], ['name'] + ) + + op.add_column('member', + sa.Column('provisioning_status', + sa.String(16), + nullable=True) + ) + op.create_foreign_key( + 'fk_member_provisioning_status_name', 'member', + 'provisioning_status', ['provisioning_status'], ['name'] + ) + + op.add_column('pool', + sa.Column('provisioning_status', + sa.String(16), + nullable=True) + ) + op.create_foreign_key( + 'fk_pool_provisioning_status_name', 'pool', + 'provisioning_status', ['provisioning_status'], ['name'] + ) diff --git a/octavia/db/migration/alembic_migrations/versions/9bf4d21caaea_adding_amphora_id_to_listener_.py b/octavia/db/migration/alembic_migrations/versions/9bf4d21caaea_adding_amphora_id_to_listener_.py new file mode 100644 index 0000000000..8e121fddba --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/9bf4d21caaea_adding_amphora_id_to_listener_.py @@ -0,0 +1,55 @@ +# Copyright 2016 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""adding Amphora ID to listener_statistics table + +Revision ID: 9bf4d21caaea +Revises: 8c0851bdf6c3 +Create Date: 2016-05-02 07:50:12.888263 + +""" +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '9bf4d21caaea' +down_revision = '8c0851bdf6c3' + + +def upgrade(): + op.add_column('listener_statistics', + sa.Column('amphora_id', + sa.String(36), + nullable=False) + ) + + op.drop_constraint('fk_listener_statistics_listener_id', + 'listener_statistics', + type_='foreignkey') + op.drop_constraint('PRIMARY', + 'listener_statistics', + type_='primary') + + op.create_primary_key('pk_listener_statistics', 'listener_statistics', + ['listener_id', 'amphora_id']) + op.create_foreign_key('fk_listener_statistics_listener_id', + 'listener_statistics', + 'listener', + ['listener_id'], + ['id']) + op.create_foreign_key('fk_listener_statistic_amphora_id', + 'listener_statistics', + 'amphora', + ['amphora_id'], + ['id']) diff --git a/octavia/db/migration/alembic_migrations/versions/a1f689aecc1d_extend_pool_for_support_backend_reencryption.py b/octavia/db/migration/alembic_migrations/versions/a1f689aecc1d_extend_pool_for_support_backend_reencryption.py new file mode 100644 index 0000000000..390390a1a1 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/a1f689aecc1d_extend_pool_for_support_backend_reencryption.py @@ -0,0 +1,35 @@ +# Copyright 2018 Huawei +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Extend pool for support backend re-encryption + +Revision ID: a1f689aecc1d +Revises: 1afc932f1ca2 +Create Date: 2018-10-23 20:47:52.405865 + +""" + + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = 'a1f689aecc1d' +down_revision = '1afc932f1ca2' + + +def upgrade(): + op.add_column('pool', sa.Column('tls_certificate_id', sa.String(255), + nullable=True)) diff --git a/octavia/db/migration/alembic_migrations/versions/a5762a99609a_add_protocol_in_listener_keys.py b/octavia/db/migration/alembic_migrations/versions/a5762a99609a_add_protocol_in_listener_keys.py new file mode 100644 index 0000000000..c888de618d --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/a5762a99609a_add_protocol_in_listener_keys.py @@ -0,0 +1,35 @@ +# Copyright (c) 2019 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""add protocol in listener keys + +Revision ID: a5762a99609a +Revises: 392fb85b4419 +Create Date: 2019-06-28 14:02:11.415292 + +""" + +from alembic import op + +# revision identifiers, used by Alembic. +revision = 'a5762a99609a' +down_revision = '392fb85b4419' + + +def upgrade(): + op.execute("ALTER TABLE `listener` " + "DROP INDEX `uq_listener_load_balancer_id_protocol_port`, " + "ADD UNIQUE KEY " + "`uq_listener_load_balancer_id_protocol_port` " + "(`load_balancer_id`, `protocol`, `protocol_port`)") diff --git a/octavia/db/migration/alembic_migrations/versions/a7f187cd221f_add_tls_boolean_type_for_reencryption.py b/octavia/db/migration/alembic_migrations/versions/a7f187cd221f_add_tls_boolean_type_for_reencryption.py new file mode 100644 index 0000000000..0abbbc0b2c --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/a7f187cd221f_add_tls_boolean_type_for_reencryption.py @@ -0,0 +1,37 @@ +# Copyright 2018 Huawei +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Add tls boolean type for backend re-encryption + +Revision ID: a7f187cd221f +Revises: 74aae261694c +Create Date: 2018-11-01 20:47:52.405865 + +""" + + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = 'a7f187cd221f' +down_revision = '74aae261694c' + + +def upgrade(): + op.add_column('pool', + sa.Column('tls_enabled', sa.Boolean(), + server_default=sa.sql.expression.false(), + nullable=False)) diff --git a/octavia/db/migration/alembic_migrations/versions/b8bd389cbae7_update_default_value_in_l7rule_table.py b/octavia/db/migration/alembic_migrations/versions/b8bd389cbae7_update_default_value_in_l7rule_table.py new file mode 100644 index 0000000000..ba5614e42f --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/b8bd389cbae7_update_default_value_in_l7rule_table.py @@ -0,0 +1,37 @@ +# Copyright 2020 Yovole +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""update default value in l7rule table + +Revision ID: b8bd389cbae7 +Revises: 8b47b2546312 +Create Date: 2020-12-03 13:40:00.520336 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = 'b8bd389cbae7' +down_revision = 'be9fdc039b51' + + +def upgrade(): + op.alter_column( + 'l7rule', + 'enabled', + existing_nullable=False, + server_default=sa.sql.expression.true()) diff --git a/octavia/db/migration/alembic_migrations/versions/b9c703669314_add_flavor_and_flavor_profile_table.py b/octavia/db/migration/alembic_migrations/versions/b9c703669314_add_flavor_and_flavor_profile_table.py new file mode 100644 index 0000000000..618f1011a7 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/b9c703669314_add_flavor_and_flavor_profile_table.py @@ -0,0 +1,53 @@ +# Copyright 2017 Walmart Stores Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""add flavor and flavor_profile table + +Revision ID: b9c703669314 +Revises: 4f65b4f91c39 +Create Date: 2018-01-02 16:05:29.745457 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = 'b9c703669314' +down_revision = '4f65b4f91c39' + + +def upgrade(): + + op.create_table( + 'flavor_profile', + sa.Column('id', sa.String(36), nullable=False), + sa.Column('name', sa.String(255), nullable=False), + sa.Column('provider_name', sa.String(255), nullable=False), + sa.Column('flavor_data', sa.String(4096), nullable=False), + sa.PrimaryKeyConstraint('id')) + + op.create_table( + 'flavor', + sa.Column('id', sa.String(36), nullable=False), + sa.Column('name', sa.String(255), nullable=False), + sa.Column('description', sa.String(255), nullable=True), + sa.Column('enabled', sa.Boolean(), nullable=False), + sa.Column('flavor_profile_id', sa.String(36), nullable=False), + sa.ForeignKeyConstraint(['flavor_profile_id'], + ['flavor_profile.id'], + name='fk_flavor_flavor_profile_id'), + sa.PrimaryKeyConstraint('id'), + sa.UniqueConstraint('name', + name='uq_flavor_name'),) diff --git a/octavia/db/migration/alembic_migrations/versions/ba35e0fb88e1_add_backup_field_to_member.py b/octavia/db/migration/alembic_migrations/versions/ba35e0fb88e1_add_backup_field_to_member.py new file mode 100644 index 0000000000..3d2ecc7f87 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/ba35e0fb88e1_add_backup_field_to_member.py @@ -0,0 +1,34 @@ +# Copyright 2016 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""add backup field to member + +Revision ID: ba35e0fb88e1 +Revises: 034756a182a2 +Create Date: 2018-03-14 00:46:16.281857 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = 'ba35e0fb88e1' +down_revision = '034756a182a2' + + +def upgrade(): + op.add_column('member', sa.Column('backup', sa.Boolean(), + nullable=False, default=False)) diff --git a/octavia/db/migration/alembic_migrations/versions/be9fdc039b51_add_pool_alpn_protocols_column.py b/octavia/db/migration/alembic_migrations/versions/be9fdc039b51_add_pool_alpn_protocols_column.py new file mode 100644 index 0000000000..9f78b6fe27 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/be9fdc039b51_add_pool_alpn_protocols_column.py @@ -0,0 +1,35 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""add pool alpn protocols column + +Revision ID: be9fdc039b51 +Revises: 8b47b2546312 +Create Date: 2020-09-15 09:30:00.521760 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = 'be9fdc039b51' +down_revision = '8b47b2546312' + + +def upgrade(): + op.add_column( + 'pool', + sa.Column('alpn_protocols', sa.String(512), nullable=True) + ) diff --git a/octavia/db/migration/alembic_migrations/versions/bf171d0d91c3_amphora_add_cached_zone.py b/octavia/db/migration/alembic_migrations/versions/bf171d0d91c3_amphora_add_cached_zone.py new file mode 100644 index 0000000000..cc8acc7f5d --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/bf171d0d91c3_amphora_add_cached_zone.py @@ -0,0 +1,33 @@ +# Copyright 2017 GoDaddy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""add cached_zone to amphora + +Revision ID: bf171d0d91c3 +Revises: 4aeb9e23ad43 +Create Date: 2017-10-06 12:07:34.290451 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = 'bf171d0d91c3' +down_revision = '4aeb9e23ad43' + + +def upgrade(): + op.add_column('amphora', sa.Column('cached_zone', sa.String(255), + nullable=True)) diff --git a/octavia/db/migration/alembic_migrations/versions/c11292016060_add_request_errors_for_stats.py b/octavia/db/migration/alembic_migrations/versions/c11292016060_add_request_errors_for_stats.py new file mode 100644 index 0000000000..2436ea3d19 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/c11292016060_add_request_errors_for_stats.py @@ -0,0 +1,34 @@ +# Copyright 2016 IBM +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""adding request error number to listener_statistics table + +Revision ID: c11292016060 +Revises: 9b5473976d6d +Create Date: 2016-08-12 03:37:38.656962 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = 'c11292016060' +down_revision = '9b5473976d6d' + + +def upgrade(): + op.add_column('listener_statistics', + sa.Column('request_errors', sa.BigInteger(), + nullable=False, default=0)) diff --git a/octavia/db/migration/alembic_migrations/versions/c761c8a71579_add_availability_zone_table.py b/octavia/db/migration/alembic_migrations/versions/c761c8a71579_add_availability_zone_table.py new file mode 100644 index 0000000000..9a3210fe41 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/c761c8a71579_add_availability_zone_table.py @@ -0,0 +1,71 @@ +# Copyright 2017 Walmart Stores Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""add availability_zone table + +Revision ID: c761c8a71579 +Revises: e37941b010db +Create Date: 2019-11-11 18:53:15.428386 + +""" + +from alembic import op +import sqlalchemy as sa + +from octavia.common import constants + +# revision identifiers, used by Alembic. +revision = 'c761c8a71579' +down_revision = 'e37941b010db' + + +def upgrade(): + azp_table = op.create_table( + 'availability_zone_profile', + sa.Column('id', sa.String(36), nullable=False), + sa.Column('name', sa.String(255), nullable=False), + sa.Column('provider_name', sa.String(255), nullable=False), + sa.Column('availability_zone_data', sa.String(4096), nullable=False), + sa.PrimaryKeyConstraint('id')) + + op.bulk_insert( + azp_table, + [ + {'id': constants.NIL_UUID, 'name': 'DELETED-PLACEHOLDER', + 'provider_name': 'DELETED', 'availability_zone_data': '{}'}, + ] + ) + + az_table = op.create_table( + 'availability_zone', + sa.Column('name', sa.String(255), nullable=False), + sa.Column('description', sa.String(255), nullable=True), + sa.Column('enabled', sa.Boolean(), nullable=False), + sa.Column('availability_zone_profile_id', sa.String(36), + nullable=False), + sa.ForeignKeyConstraint(['availability_zone_profile_id'], + ['availability_zone_profile.id'], + name='fk_az_az_profile_id'), + sa.PrimaryKeyConstraint('name'),) + + op.bulk_insert( + az_table, + [ + {'name': constants.NIL_UUID, + 'description': 'Placeholder for DELETED LBs with DELETED ' + 'availability zones', + 'enabled': False, + 'availability_zone_profile_id': constants.NIL_UUID} + ] + ) diff --git a/octavia/db/migration/alembic_migrations/versions/d3c8a090f3de_add_pool_tls_versions_column.py b/octavia/db/migration/alembic_migrations/versions/d3c8a090f3de_add_pool_tls_versions_column.py new file mode 100644 index 0000000000..7bbc659009 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/d3c8a090f3de_add_pool_tls_versions_column.py @@ -0,0 +1,35 @@ +# Copyright 2020 Dawson Coleman +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""add pool tls versions column + +Revision ID: d3c8a090f3de +Revises: e5493ae5f9a7 +Create Date: 2020-04-21 13:17:10.861932 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = 'd3c8a090f3de' +down_revision = 'e5493ae5f9a7' + + +def upgrade(): + op.add_column( + 'pool', + sa.Column('tls_versions', sa.String(512), nullable=True) + ) diff --git a/octavia/db/migration/alembic_migrations/versions/d85ca7258d21_modernize_l7rule.py b/octavia/db/migration/alembic_migrations/versions/d85ca7258d21_modernize_l7rule.py new file mode 100644 index 0000000000..b61c67b6c9 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/d85ca7258d21_modernize_l7rule.py @@ -0,0 +1,71 @@ +# Copyright 2017 GoDaddy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""modernize l7rule + +Revision ID: d85ca7258d21 +Revises: 034b2dc2f3e0 +Create Date: 2017-04-04 06:26:55.287198 + +""" + +from alembic import op +import sqlalchemy as sa + +from octavia.common import constants + +# revision identifiers, used by Alembic. +revision = 'd85ca7258d21' +down_revision = '034b2dc2f3e0' + + +def upgrade(): + # Add timing data + op.add_column( + 'l7rule', + sa.Column('created_at', sa.DateTime(), nullable=True) + ) + op.add_column( + 'l7rule', + sa.Column('updated_at', sa.DateTime(), nullable=True) + ) + + # Add project_id + op.add_column( + 'l7rule', + sa.Column('project_id', sa.String(36), nullable=True) + ) + + # Add enabled + op.add_column( + 'l7rule', + sa.Column('enabled', sa.Boolean(), + server_default=sa.sql.expression.true(), + nullable=False) + ) + + # Add new operating_status column, setting existing rows to ONLINE + op.add_column( + 'l7rule', + sa.Column('operating_status', sa.String(16), + nullable=False, server_default=constants.ONLINE) + ) + # Remove the default, as we don't actually want one + op.alter_column('l7rule', 'operating_status', + existing_type=sa.String(16), server_default=None) + # Add the foreign key for operating_status_name + op.create_foreign_key( + 'fk_l7rule_operating_status_name', 'l7rule', + 'operating_status', ['operating_status'], ['name'] + ) diff --git a/octavia/db/migration/alembic_migrations/versions/da371b422669_allowed_cidr_for_listeners.py b/octavia/db/migration/alembic_migrations/versions/da371b422669_allowed_cidr_for_listeners.py new file mode 100644 index 0000000000..997fab164a --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/da371b422669_allowed_cidr_for_listeners.py @@ -0,0 +1,40 @@ +# Copyright 2018 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Add CIDRs for listeners + +Revision ID: da371b422669 +Revises: a5762a99609a +Create Date: 2018-11-22 12:31:39.864238 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = 'da371b422669' +down_revision = 'a5762a99609a' + + +def upgrade(): + op.create_table( + 'listener_cidr', + sa.Column('listener_id', sa.String(36), nullable=False), + sa.Column('cidr', sa.String(64), nullable=False), + + sa.ForeignKeyConstraint(['listener_id'], + ['listener.id'], + name='fk_listener_cidr_listener_id'), + sa.PrimaryKeyConstraint('listener_id', 'cidr') + ) diff --git a/octavia/db/migration/alembic_migrations/versions/db2a73e82626_add_vnic_type_for_vip.py b/octavia/db/migration/alembic_migrations/versions/db2a73e82626_add_vnic_type_for_vip.py new file mode 100644 index 0000000000..dbe563b891 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/db2a73e82626_add_vnic_type_for_vip.py @@ -0,0 +1,36 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Add vnic_type for VIP + +Revision ID: db2a73e82626 +Revises: 632152d2d32e +Create Date: 2023-11-09 21:57:05.302435 + +""" + +from alembic import op +import sqlalchemy as sa + +from octavia.common import constants + +# revision identifiers, used by Alembic. +revision = 'db2a73e82626' +down_revision = '632152d2d32e' + + +def upgrade(): + op.add_column( + u'vip', + sa.Column(u'vnic_type', sa.String(64), nullable=False, + server_default=constants.VNIC_TYPE_NORMAL) + ) diff --git a/octavia/db/migration/alembic_migrations/versions/dcf88e59aae4_add_lb_algorithm_source_ip_port.py b/octavia/db/migration/alembic_migrations/versions/dcf88e59aae4_add_lb_algorithm_source_ip_port.py new file mode 100644 index 0000000000..5ceff64f3b --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/dcf88e59aae4_add_lb_algorithm_source_ip_port.py @@ -0,0 +1,67 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Add LB_ALGORITHM_SOURCE_IP_PORT + +Revision ID: dcf88e59aae4 +Revises: da371b422669 +Create Date: 2019-07-23 12:50:49.722003 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = 'dcf88e59aae4' +down_revision = 'da371b422669' + + +def migrate_pools(): + conn = op.get_bind() + lb_table = sa.sql.table( + 'load_balancer', + sa.sql.column('id', sa.String), + sa.sql.column('provider', sa.String), + sa.sql.column('provisioning_status', sa.String)) + pool_table = sa.sql.table( + 'pool', + sa.sql.column('id', sa.String), + sa.sql.column('load_balancer_id', sa.String), + sa.sql.column('lb_algorithm', sa.String)) + + j = pool_table.join(lb_table, + pool_table.c.load_balancer_id == lb_table.c.id) + stmt = sa.select(pool_table.c.id).select_from(j).where( + lb_table.c.provider == 'ovn') + result = conn.execute(stmt) + + for row in result: + stmt = pool_table.update().values(lb_algorithm='SOURCE_IP_PORT').where( + pool_table.c.id == row[0]) + op.execute(stmt) + + +def upgrade(): + insert_table = sa.table( + 'algorithm', + sa.column('name', sa.String(255)), + ) + op.bulk_insert( + insert_table, + [ + {'name': 'SOURCE_IP_PORT'} + ] + ) + migrate_pools() diff --git a/octavia/db/migration/alembic_migrations/versions/e37941b010db_add_lb_flavor_constraint.py b/octavia/db/migration/alembic_migrations/versions/e37941b010db_add_lb_flavor_constraint.py new file mode 100644 index 0000000000..63b7745e70 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/e37941b010db_add_lb_flavor_constraint.py @@ -0,0 +1,78 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Add lb flavor ID constraint + +Revision ID: e37941b010db +Revises: dcf88e59aae4 +Create Date: 2019-10-31 10:09:37.869653 + +""" + +from alembic import op +import sqlalchemy as sa +from sqlalchemy import sql + +from octavia.common import constants + +# revision identifiers, used by Alembic. +revision = 'e37941b010db' +down_revision = 'dcf88e59aae4' + + +def upgrade(): + insert_table = sql.table( + 'flavor_profile', + sa.Column('id', sa.String(36), nullable=False), + sa.Column('name', sa.String(255), nullable=False), + sa.Column('provider_name', sa.String(255), nullable=False), + sa.Column('flavor_data', sa.String(4096), nullable=False), + ) + + op.bulk_insert( + insert_table, + [ + {'id': constants.NIL_UUID, 'name': 'DELETED-PLACEHOLDER', + 'provider_name': 'DELETED', 'flavor_data': '{}'}, + ] + ) + + insert_table = sql.table( + 'flavor', + sa.Column('id', sa.String(36), nullable=False), + sa.Column('name', sa.String(255), nullable=False), + sa.Column('description', sa.String(255), nullable=True), + sa.Column('enabled', sa.Boolean(), nullable=False), + sa.Column('flavor_profile_id', sa.String(36), nullable=False), + ) + + op.bulk_insert( + insert_table, + [ + {'id': constants.NIL_UUID, 'name': 'DELETED-PLACEHOLDER', + 'description': 'Placeholder for DELETED LBs with DELETED flavors', + 'enabled': False, 'flavor_profile_id': constants.NIL_UUID} + ] + ) + + # Make sure any existing load balancers with invalid flavor_id + # map to a valid flavor. + # Note: constant is not used here to not trigger security tool errors. + op.execute("UPDATE load_balancer LEFT JOIN flavor ON " + "load_balancer.flavor_id = flavor.id SET " + "load_balancer.flavor_id = " + "'00000000-0000-0000-0000-000000000000' WHERE " + "flavor.id IS NULL and load_balancer.flavor_id IS NOT NULL") + + op.create_foreign_key('fk_loadbalancer_flavor_id', 'load_balancer', + 'flavor', ['flavor_id'], ['id']) diff --git a/octavia/db/migration/alembic_migrations/versions/e5493ae5f9a7_add_listener_tls_versions_column.py b/octavia/db/migration/alembic_migrations/versions/e5493ae5f9a7_add_listener_tls_versions_column.py new file mode 100644 index 0000000000..d39f3527c4 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/e5493ae5f9a7_add_listener_tls_versions_column.py @@ -0,0 +1,35 @@ +# Copyright 2020 Dawson Coleman +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""add listener tls versions column + +Revision ID: e5493ae5f9a7 +Revises: fbd705961c3a +Create Date: 2020-04-19 02:35:28.502424 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = 'e5493ae5f9a7' +down_revision = 'fbd705961c3a' + + +def upgrade(): + op.add_column( + 'listener', + sa.Column('tls_versions', sa.String(512), nullable=True) + ) diff --git a/octavia/db/migration/alembic_migrations/versions/e6672bda93bf_add_ping_and_tlshello_monitor_types.py b/octavia/db/migration/alembic_migrations/versions/e6672bda93bf_add_ping_and_tlshello_monitor_types.py new file mode 100644 index 0000000000..bd8ec82684 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/e6672bda93bf_add_ping_and_tlshello_monitor_types.py @@ -0,0 +1,45 @@ +# Copyright 2017 GoDaddy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""add ping and tls-hello monitor types + +Revision ID: e6672bda93bf +Revises: 27e54d00c3cd +Create Date: 2017-06-21 16:13:09.615651 + +""" + +from alembic import op +import sqlalchemy as sa +from sqlalchemy import sql + +# revision identifiers, used by Alembic. +revision = 'e6672bda93bf' +down_revision = '27e54d00c3cd' + + +def upgrade(): + insert_table = sql.table( + 'health_monitor_type', + sql.column('name', sa.String), + sql.column('description', sa.String) + ) + + op.bulk_insert( + insert_table, + [ + {'name': 'PING'}, + {'name': 'TLS-HELLO'} + ] + ) diff --git a/octavia/db/migration/alembic_migrations/versions/e6ee84f0abf3_add_proxy_v2_pool_protocol.py b/octavia/db/migration/alembic_migrations/versions/e6ee84f0abf3_add_proxy_v2_pool_protocol.py new file mode 100644 index 0000000000..41a2e70a0c --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/e6ee84f0abf3_add_proxy_v2_pool_protocol.py @@ -0,0 +1,43 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Add PROXY v2 pool protocol + +Revision ID: e6ee84f0abf3 +Revises: 2ab994dd3ec2 +Create Date: 2020-08-24 11:12:46.745185 + +""" +from alembic import op +import sqlalchemy as sa +from sqlalchemy import sql + + +# revision identifiers, used by Alembic. +revision = 'e6ee84f0abf3' +down_revision = '2ab994dd3ec2' + + +def upgrade(): + insert_table = sql.table( + 'protocol', + sql.column('name', sa.String), + sql.column('description', sa.String) + ) + + op.bulk_insert( + insert_table, + [ + {'name': 'PROXYV2'} + ] + ) diff --git a/octavia/db/migration/alembic_migrations/versions/ebbcc72b4e5e_add_octavia_owned_vip_column_to_vip_.py b/octavia/db/migration/alembic_migrations/versions/ebbcc72b4e5e_add_octavia_owned_vip_column_to_vip_.py new file mode 100644 index 0000000000..f13149e0b0 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/ebbcc72b4e5e_add_octavia_owned_vip_column_to_vip_.py @@ -0,0 +1,34 @@ +# Copyright 2018 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Add Octavia owned VIP column to VIP table + +Revision ID: ebbcc72b4e5e +Revises: 0f242cf02c74 +Create Date: 2018-07-09 17:25:30.137527 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = 'ebbcc72b4e5e' +down_revision = '0f242cf02c74' + + +def upgrade(): + op.add_column( + 'vip', + sa.Column('octavia_owned', sa.Boolean(), nullable=True) + ) diff --git a/octavia/db/migration/alembic_migrations/versions/f21ae3f21adc_add_client_auth_option.py b/octavia/db/migration/alembic_migrations/versions/f21ae3f21adc_add_client_auth_option.py new file mode 100644 index 0000000000..1436b273cd --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/f21ae3f21adc_add_client_auth_option.py @@ -0,0 +1,61 @@ +# Copyright 2018 Huawei +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Add Client Auth options + +Revision ID: f21ae3f21adc +Revises: 2ad093f6353f +Create Date: 2018-10-01 20:47:52.405865 + +""" + + +from alembic import op +import sqlalchemy as sa + +from octavia.common import constants + +# revision identifiers, used by Alembic. +revision = 'f21ae3f21adc' +down_revision = '2ad093f6353f' + + +def upgrade(): + op.create_table( + 'client_authentication_mode', + sa.Column('name', sa.String(10), primary_key=True), + ) + + # Create temporary table for table data seeding + insert_table = sa.table( + 'client_authentication_mode', + sa.column('name', sa.String), + ) + + op.bulk_insert( + insert_table, + [ + {'name': constants.CLIENT_AUTH_NONE}, + {'name': constants.CLIENT_AUTH_OPTIONAL}, + {'name': constants.CLIENT_AUTH_MANDATORY} + ] + ) + + op.add_column( + 'listener', + sa.Column('client_authentication', sa.String(10), + sa.ForeignKey('client_authentication_mode.name'), + server_default=constants.CLIENT_AUTH_NONE, nullable=False) + ) diff --git a/octavia/db/migration/alembic_migrations/versions/fabf4983846b_add_member_port_table.py b/octavia/db/migration/alembic_migrations/versions/fabf4983846b_add_member_port_table.py new file mode 100644 index 0000000000..c37862776e --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/fabf4983846b_add_member_port_table.py @@ -0,0 +1,40 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""add_member_port_table + +Revision ID: fabf4983846b +Revises: 8db7a6443785 +Create Date: 2024-08-30 23:12:01.713217 + +""" +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = 'fabf4983846b' +down_revision = '8db7a6443785' + + +def upgrade(): + op.create_table( + 'amphora_member_port', + sa.Column('port_id', sa.String(36), primary_key=True), + sa.Column('amphora_id', sa.String(36), nullable=False, index=True), + sa.Column('network_id', sa.String(36)), + sa.Column('created_at', sa.DateTime()), + sa.Column('updated_at', sa.DateTime()) + ) + op.create_foreign_key( + 'fk_member_port_amphora_id', 'amphora_member_port', + 'amphora', ['amphora_id'], ['id'] + ) diff --git a/octavia/db/migration/alembic_migrations/versions/fac584114642_.py b/octavia/db/migration/alembic_migrations/versions/fac584114642_.py new file mode 100644 index 0000000000..fa5d3894c5 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/fac584114642_.py @@ -0,0 +1,56 @@ +# Copyright 2017 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Add ID column to Healthmonitor table + +Revision ID: fac584114642 +Revises: fc5582da7d8a +Create Date: 2017-02-07 20:47:52.405865 + +""" + + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = 'fac584114642' +down_revision = 'fc5582da7d8a' + + +def upgrade(): + op.add_column('health_monitor', + sa.Column('id', + sa.String(length=36), + nullable=True, + )) + + op.drop_constraint('fk_health_monitor_pool_id', + 'health_monitor', + type_='foreignkey',) + + op.execute("UPDATE health_monitor SET id = pool_id") + + op.execute("ALTER TABLE health_monitor MODIFY id varchar(36) NOT NULL") + + op.execute("ALTER TABLE health_monitor DROP PRIMARY KEY," + "ADD PRIMARY KEY(id);") + + op.create_foreign_key('fk_health_monitor_pool_id', 'health_monitor', + 'pool', ['pool_id'], ['id']) + + op.create_index('uq_health_monitor_pool', + 'health_monitor', ['pool_id'], + unique=True) diff --git a/octavia/db/migration/alembic_migrations/versions/fbd705961c3a_add_pool_ciphers_column.py b/octavia/db/migration/alembic_migrations/versions/fbd705961c3a_add_pool_ciphers_column.py new file mode 100644 index 0000000000..7679e920e7 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/fbd705961c3a_add_pool_ciphers_column.py @@ -0,0 +1,35 @@ +# Copyright 2020 Dawson Coleman +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""add pool ciphers column + +Revision ID: fbd705961c3a +Revises: 7c36b277bfb0 +Create Date: 2020-03-31 14:19:25.280946 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = 'fbd705961c3a' +down_revision = '7c36b277bfb0' + + +def upgrade(): + op.add_column( + 'pool', + sa.Column('tls_ciphers', sa.String(2048), nullable=True) + ) diff --git a/octavia/db/migration/alembic_migrations/versions/fc5582da7d8a_create_amphora_build_rate_limit_tables.py b/octavia/db/migration/alembic_migrations/versions/fc5582da7d8a_create_amphora_build_rate_limit_tables.py new file mode 100644 index 0000000000..832d4e8799 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/fc5582da7d8a_create_amphora_build_rate_limit_tables.py @@ -0,0 +1,64 @@ +# Copyright 2016 Hewlett Packard Enterprise Development Company LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""create_amphora_build_rate_limit_tables + +Revision ID: fc5582da7d8a +Revises: 443fe6676637 +Create Date: 2016-04-07 19:42:28.171902 + +""" + +from alembic import op +import sqlalchemy as sa +from sqlalchemy import sql + +# revision identifiers, used by Alembic. +revision = 'fc5582da7d8a' +down_revision = '443fe6676637' + + +def upgrade(): + op.create_table( + 'amphora_build_slots', + sa.Column('id', sa.Integer(), primary_key=True), + sa.Column('slots_used', sa.Integer(), default=0) + ) + + # Create temporary table for table data seeding + insert_table = sql.table( + 'amphora_build_slots', + sql.column('id', sa.Integer), + sql.column('slots_used', sa.Integer) + ) + + op.bulk_insert( + insert_table, + [ + {'id': 1, 'slots_used': 0} + ] + ) + + op.create_table( + 'amphora_build_request', + sa.Column('amphora_id', sa.String(36), nullable=True, + primary_key=True), + sa.Column('priority', sa.Integer()), + sa.Column('created_time', sa.DateTime(timezone=True), nullable=False), + sa.Column('status', sa.String(16), default='WAITING', nullable=False) + ) + + +def downgrade(): + pass diff --git a/octavia/db/migration/alembic_migrations/versions/ffad172e98c1_add_certificate_revoke_list_option.py b/octavia/db/migration/alembic_migrations/versions/ffad172e98c1_add_certificate_revoke_list_option.py new file mode 100644 index 0000000000..fce291f147 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/ffad172e98c1_add_certificate_revoke_list_option.py @@ -0,0 +1,36 @@ +# Copyright 2018 Huawei +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Add certificate revoke revocation list field + +Revision ID: ffad172e98c1 +Revises: f21ae3f21adc +Create Date: 2018-10-01 20:47:52.405865 + +""" + + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = 'ffad172e98c1' +down_revision = 'f21ae3f21adc' + + +def upgrade(): + op.add_column('listener', + sa.Column('client_crl_container_id', sa.String(255), + nullable=True)) diff --git a/octavia/db/migration/cli.py b/octavia/db/migration/cli.py new file mode 100644 index 0000000000..bb37fee812 --- /dev/null +++ b/octavia/db/migration/cli.py @@ -0,0 +1,156 @@ +# Copyright (c) 2016 Catalyst IT Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +from alembic import command as alembic_cmd +from alembic import config as alembic_cfg +from alembic import util as alembic_u +from oslo_config import cfg +from oslo_db import options +from oslo_log import log + +from octavia.controller.worker.v2 import taskflow_jobboard_driver +from octavia.i18n import _ + +CONF = cfg.CONF +options.set_defaults(CONF) +# Setting explicitly here needed for taskflow persistence successful +# initialization +options.set_defaults(CONF, max_pool_size=10, max_overflow=20, + pool_timeout=10) +log.set_defaults() +log.register_options(CONF) +log.setup(CONF, 'octavia-db-manage') + + +def do_alembic_command(config, cmd, *args, **kwargs): + try: + getattr(alembic_cmd, cmd)(config, *args, **kwargs) + except alembic_u.CommandError as e: + alembic_u.err(str(e)) + + +def do_check_migration(config, _cmd): + do_alembic_command(config, 'branches') + + +def add_alembic_subparser(sub, cmd): + return sub.add_parser(cmd, help=getattr(alembic_cmd, cmd).__doc__) + + +def do_upgrade(config, cmd): + if not CONF.command.revision and not CONF.command.delta: + raise SystemExit(_('You must provide a revision or relative delta')) + + revision = CONF.command.revision or '' + if '-' in revision: + raise SystemExit(_('Negative relative revision (downgrade) not ' + 'supported')) + + delta = CONF.command.delta + + if delta: + if '+' in revision: + raise SystemExit(_('Use either --delta or relative revision, ' + 'not both')) + if delta < 0: + raise SystemExit(_('Negative delta (downgrade) not supported')) + revision = '%s+%d' % (revision, delta) + + do_alembic_command(config, cmd, revision, sql=CONF.command.sql) + + +def no_downgrade(config, cmd): + raise SystemExit(_("Downgrade no longer supported")) + + +def do_stamp(config, cmd): + do_alembic_command(config, cmd, + CONF.command.revision, + sql=CONF.command.sql) + + +def do_revision(config, cmd): + do_alembic_command(config, cmd, + message=CONF.command.message, + autogenerate=CONF.command.autogenerate, + sql=CONF.command.sql) + + +def do_persistence_upgrade(config, cmd): + opt = cfg.StrOpt('persistence_connection', + default='sqlite://') + cfg.CONF.register_opts([opt], group='task_flow') + persistence = taskflow_jobboard_driver.MysqlPersistenceDriver() + persistence.initialize() + + +def add_command_parsers(subparsers): + for name in ['current', 'history', 'branches']: + parser = add_alembic_subparser(subparsers, name) + parser.set_defaults(func=do_alembic_command) + + help_text = (getattr(alembic_cmd, 'branches').__doc__ + + ' and validate head file') + parser = subparsers.add_parser('check_migration', help=help_text) + parser.set_defaults(func=do_check_migration) + + parser = add_alembic_subparser(subparsers, 'upgrade') + parser.add_argument('--delta', type=int) + parser.add_argument('--sql', action='http://github.com/store_true') + parser.add_argument('revision', nargs='?') + parser.set_defaults(func=do_upgrade) + + parser = subparsers.add_parser( + "upgrade_persistence", + help="Run migrations for persistence backend") + parser.set_defaults(func=do_persistence_upgrade) + + parser = subparsers.add_parser('downgrade', help="(No longer supported)") + parser.add_argument('None', nargs='?', help="Downgrade not supported") + parser.set_defaults(func=no_downgrade) + + parser = add_alembic_subparser(subparsers, 'stamp') + parser.add_argument('--sql', action='http://github.com/store_true') + parser.add_argument('revision') + parser.set_defaults(func=do_stamp) + + parser = add_alembic_subparser(subparsers, 'revision') + parser.add_argument('-m', '--message') + parser.add_argument('--autogenerate', action='http://github.com/store_true') + parser.add_argument('--sql', action='http://github.com/store_true') + parser.set_defaults(func=do_revision) + + +command_opt = cfg.SubCommandOpt('command', + title='Command', + help='Available commands', + handler=add_command_parsers) + +CONF.register_cli_opt(command_opt) + + +def main(): + config = alembic_cfg.Config( + os.path.join(os.path.dirname(__file__), 'alembic.ini') + ) + config.set_main_option('script_location', + 'octavia.db.migration:alembic_migrations') + # attach the octavia conf to the Alembic conf + config.octavia_config = CONF + + CONF(project='octavia') + CONF.command.func(config, CONF.command.name) diff --git a/octavia/db/models.py b/octavia/db/models.py new file mode 100644 index 0000000000..34efd2d669 --- /dev/null +++ b/octavia/db/models.py @@ -0,0 +1,1020 @@ +# Copyright 2014 Rackspace +# Copyright 2016 Blue Box, an IBM Company +# Copyright 2017 Walmart Stores Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from oslo_db.sqlalchemy import models +import sqlalchemy as sa +from sqlalchemy.ext import orderinglist +from sqlalchemy import orm +from sqlalchemy.orm import Mapped +from sqlalchemy.orm import validates +from sqlalchemy.sql import func +from sqlalchemy_utils import ScalarListType + +from octavia.api.v2.types import amphora +from octavia.api.v2.types import availability_zone_profile +from octavia.api.v2.types import availability_zones +from octavia.api.v2.types import flavor_profile +from octavia.api.v2.types import flavors +from octavia.api.v2.types import health_monitor +from octavia.api.v2.types import l7policy +from octavia.api.v2.types import l7rule +from octavia.api.v2.types import listener +from octavia.api.v2.types import load_balancer +from octavia.api.v2.types import member +from octavia.api.v2.types import pool +from octavia.api.v2.types import quotas +from octavia.common import constants +from octavia.common import data_models +from octavia.db import base_models +from octavia.i18n import _ + + +class ProvisioningStatus(base_models.BASE, base_models.LookupTableMixin): + + __tablename__ = "provisioning_status" + + +class OperatingStatus(base_models.BASE, base_models.LookupTableMixin): + + __tablename__ = "operating_status" + + +class Protocol(base_models.BASE, base_models.LookupTableMixin): + + __tablename__ = "protocol" + + +class Algorithm(base_models.BASE, base_models.LookupTableMixin): + + __tablename__ = "algorithm" + + +class AmphoraRoles(base_models.BASE, base_models.LookupTableMixin): + + __tablename__ = "amphora_roles" + + +class LBTopology(base_models.BASE, base_models.LookupTableMixin): + + __tablename__ = "lb_topology" + + +class SessionPersistenceType(base_models.BASE, base_models.LookupTableMixin): + + __tablename__ = "session_persistence_type" + + +class HealthMonitorType(base_models.BASE, base_models.LookupTableMixin): + + __tablename__ = "health_monitor_type" + + +class VRRPAuthMethod(base_models.BASE, base_models.LookupTableMixin): + + __tablename__ = "vrrp_auth_method" + + +class L7RuleType(base_models.BASE, base_models.LookupTableMixin): + + __tablename__ = "l7rule_type" + + +class L7RuleCompareType(base_models.BASE, base_models.LookupTableMixin): + + __tablename__ = "l7rule_compare_type" + + +class L7PolicyAction(base_models.BASE, base_models.LookupTableMixin): + + __tablename__ = "l7policy_action" + + +class AmphoraBuildSlots(base_models.BASE): + + __tablename__ = "amphora_build_slots" + + id = sa.Column(sa.Integer(), primary_key=True) + slots_used = sa.Column(sa.Integer()) + + +class AmphoraBuildRequest(base_models.BASE): + + __tablename__ = "amphora_build_request" + + amphora_id = sa.Column(sa.String(36), nullable=True, primary_key=True) + priority = sa.Column(sa.Integer()) + created_time = sa.Column(sa.DateTime, default=func.now(), nullable=False) + status = sa.Column(sa.String(16), default='WAITING', nullable=False) + + +class SessionPersistence(base_models.BASE): + + __data_model__ = data_models.SessionPersistence + + __tablename__ = "session_persistence" + + pool_id = sa.Column( + sa.String(36), + sa.ForeignKey("pool.id", name="fk_session_persistence_pool_id"), + nullable=False, + primary_key=True) + type = sa.Column( + sa.String(36), + sa.ForeignKey( + "session_persistence_type.name", + name="fk_session_persistence_session_persistence_type_name"), + nullable=False) + cookie_name = sa.Column(sa.String(255), nullable=True) + persistence_timeout = sa.Column(sa.Integer(), nullable=True) + persistence_granularity = sa.Column(sa.String(64), nullable=True) + pool = orm.relationship("Pool", uselist=False, + back_populates="session_persistence") + + +class ListenerStatistics(base_models.BASE): + + __data_model__ = data_models.ListenerStatistics + + __tablename__ = "listener_statistics" + + listener_id = sa.Column( + sa.String(36), + primary_key=True, + nullable=False) + amphora_id = sa.Column( + sa.String(36), + primary_key=True, + nullable=False) + bytes_in = sa.Column(sa.BigInteger, nullable=False) + bytes_out = sa.Column(sa.BigInteger, nullable=False) + active_connections = sa.Column(sa.Integer, nullable=False) + total_connections = sa.Column(sa.BigInteger, nullable=False) + request_errors = sa.Column(sa.BigInteger, nullable=False) + + @validates('bytes_in', 'bytes_out', + 'active_connections', 'total_connections', + 'request_errors') + def validate_non_negative_int(self, key, value): + if value < 0: + data = {'key': key, 'value': value} + raise ValueError(_('The %(key)s field can not have ' + 'negative value. ' + 'Current value is %(value)d.') % data) + return value + + def __iadd__(self, other): + if isinstance(other, (ListenerStatistics, + data_models.ListenerStatistics)): + self.bytes_in += other.bytes_in + self.bytes_out += other.bytes_out + self.request_errors += other.request_errors + self.total_connections += other.total_connections + else: + raise TypeError( # noqa: O342 + "unsupported operand type(s) for +=: '{}' and '{}'".format( + type(self), type(other))) + + return self + + +class Member(base_models.BASE, base_models.IdMixin, base_models.ProjectMixin, + models.TimestampMixin, base_models.NameMixin, + base_models.TagMixin): + + __data_model__ = data_models.Member + + __tablename__ = "member" + + __v2_wsme__ = member.MemberResponse + + __table_args__ = ( + sa.UniqueConstraint('pool_id', 'ip_address', 'protocol_port', + name='uq_member_pool_id_address_protocol_port'), + ) + + pool_id = sa.Column( + sa.String(36), + sa.ForeignKey("pool.id", name="fk_member_pool_id"), + nullable=False) + subnet_id = sa.Column(sa.String(36), nullable=True) + ip_address = sa.Column('ip_address', sa.String(64), nullable=False) + protocol_port = sa.Column(sa.Integer, nullable=False) + weight = sa.Column(sa.Integer, nullable=True) + backup = sa.Column(sa.Boolean(), nullable=False) + monitor_address = sa.Column(sa.String(64), nullable=True) + monitor_port = sa.Column(sa.Integer, nullable=True) + provisioning_status = sa.Column( + sa.String(16), + sa.ForeignKey("provisioning_status.name", + name="fk_member_provisioning_status_name"), + nullable=False) + operating_status = sa.Column( + sa.String(16), + sa.ForeignKey("operating_status.name", + name="fk_member_operating_status_name"), + nullable=False) + enabled = sa.Column(sa.Boolean(), nullable=False) + pool = orm.relationship("Pool", back_populates="members") + vnic_type = sa.Column(sa.String(64), nullable=True) + + _tags = orm.relationship( + 'Tags', + single_parent=True, + lazy='subquery', + cascade='all,delete-orphan', + primaryjoin='and_(foreign(Tags.resource_id)==Member.id)', + overlaps='_tags' + ) + + def __str__(self): + return (f"Member(id={self.id!r}, name={self.name!r}, " + f"project_id={self.project_id!r}, " + f"provisioning_status={self.provisioning_status!r}, " + f"ip_address={self.ip_address!r}, " + f"protocol_port={self.protocol_port!r}, " + f"operating_status={self.operating_status!r}, " + f"weight={self.weight!r}, vnic_type={self.vnic_type!r})") + + +class HealthMonitor(base_models.BASE, base_models.IdMixin, + base_models.ProjectMixin, models.TimestampMixin, + base_models.NameMixin, base_models.TagMixin): + + __data_model__ = data_models.HealthMonitor + + __tablename__ = "health_monitor" + + __v2_wsme__ = health_monitor.HealthMonitorResponse + + __table_args__ = ( + sa.UniqueConstraint('pool_id', + name='uq_health_monitor_pool'), + ) + + type = sa.Column( + sa.String(36), + sa.ForeignKey("health_monitor_type.name", + name="fk_health_monitor_health_monitor_type_name"), + nullable=False) + pool_id = sa.Column( + sa.String(36), + sa.ForeignKey("pool.id", name="fk_health_monitor_pool_id"), + nullable=False) + delay = sa.Column(sa.Integer, nullable=False) + timeout = sa.Column(sa.Integer, nullable=False) + fall_threshold = sa.Column(sa.Integer, nullable=False) + rise_threshold = sa.Column(sa.Integer, nullable=False) + http_method = sa.Column(sa.String(16), nullable=True) + url_path = sa.Column(sa.String(2048), nullable=True) + expected_codes = sa.Column(sa.String(64), nullable=True) + enabled = sa.Column(sa.Boolean, nullable=False) + pool = orm.relationship("Pool", uselist=False, + back_populates="health_monitor") + + provisioning_status = sa.Column( + sa.String(16), + sa.ForeignKey("provisioning_status.name", + name="fk_health_monitor_provisioning_status_name"), + nullable=False) + operating_status = sa.Column( + sa.String(16), + sa.ForeignKey("operating_status.name", + name="fk_health_monitor_operating_status_name"), + nullable=False) + _tags = orm.relationship( + 'Tags', + single_parent=True, + lazy='subquery', + cascade='all,delete-orphan', + primaryjoin='and_(foreign(Tags.resource_id)==HealthMonitor.id)', + overlaps='_tags' + ) + http_version = sa.Column(sa.Float, nullable=True) + domain_name = sa.Column(sa.String(255), nullable=True) + + def __str__(self): + return (f"HealthMonitor(id={self.id!r}, name={self.name!r}, " + f"project_id={self.project_id!r}, type={self.type!r}, " + f"enabled={self.enabled!r})") + + +class Pool(base_models.BASE, base_models.IdMixin, base_models.ProjectMixin, + models.TimestampMixin, base_models.NameMixin, base_models.TagMixin): + + __data_model__ = data_models.Pool + + __tablename__ = "pool" + + __v2_wsme__ = pool.PoolResponse + + description = sa.Column(sa.String(255), nullable=True) + protocol = sa.Column( + sa.String(16), + sa.ForeignKey("protocol.name", name="fk_pool_protocol_name"), + nullable=False) + lb_algorithm = sa.Column( + sa.String(255), + sa.ForeignKey("algorithm.name", name="fk_pool_algorithm_name"), + nullable=False) + provisioning_status = sa.Column( + sa.String(16), + sa.ForeignKey("provisioning_status.name", + name="fk_pool_provisioning_status_name"), + nullable=False) + operating_status = sa.Column( + sa.String(16), + sa.ForeignKey("operating_status.name", + name="fk_pool_operating_status_name"), + nullable=False) + enabled = sa.Column(sa.Boolean, nullable=False) + load_balancer_id = sa.Column( + sa.String(36), + sa.ForeignKey("load_balancer.id", name="fk_pool_load_balancer_id"), + nullable=True) + health_monitor = orm.relationship("HealthMonitor", uselist=False, + cascade="delete", back_populates="pool") + load_balancer = orm.relationship("LoadBalancer", uselist=False, + back_populates="pools") + members = orm.relationship("Member", uselist=True, cascade="delete", + back_populates="pool") + session_persistence = orm.relationship( + "SessionPersistence", uselist=False, cascade="delete", + back_populates="pool") + _default_listeners = orm.relationship("Listener", uselist=True, + back_populates="default_pool", + cascade_backrefs=False) + l7policies = orm.relationship("L7Policy", uselist=True, + back_populates="redirect_pool") + _tags = orm.relationship( + 'Tags', + single_parent=True, + lazy='subquery', + cascade='all,delete-orphan', + primaryjoin='and_(foreign(Tags.resource_id)==Pool.id)', + overlaps='_tags' + ) + tls_certificate_id = sa.Column(sa.String(255), nullable=True) + ca_tls_certificate_id = sa.Column(sa.String(255), nullable=True) + crl_container_id = sa.Column(sa.String(255), nullable=True) + tls_enabled = sa.Column(sa.Boolean, default=False, nullable=False) + tls_ciphers = sa.Column(sa.String(2048), nullable=True) + tls_versions = sa.Column(ScalarListType(), nullable=True) + alpn_protocols = sa.Column(ScalarListType(), nullable=True) + + # This property should be a unique list of any listeners that reference + # this pool as its default_pool and any listeners referenced by enabled + # L7Policies with at least one l7rule which also reference this pool. The + # intent is that pool.listeners should be a unique list of listeners + # *actually* using the pool. + @property + def listeners(self): + _listeners = self._default_listeners[:] + _l_ids = [li.id for li in _listeners] + l7_listeners = [p.listener for p in self.l7policies + if len(p.l7rules) > 0 and p.enabled is True] + for li in l7_listeners: + if li.id not in _l_ids: + _listeners.append(li) + _l_ids.append(li.id) + return _listeners + + def __str__(self): + return (f"Pool(id={self.id!r}, name={self.name!r}, " + f"project_id={self.project_id!r}, " + f"provisioning_status={self.provisioning_status!r}, " + f"protocol={self.protocol!r}, " + f"lb_algorithm={self.lb_algorithm!r}, " + f"enabled={self.enabled!r})") + + +class LoadBalancer(base_models.BASE, base_models.IdMixin, + base_models.ProjectMixin, models.TimestampMixin, + base_models.NameMixin, base_models.TagMixin): + + __data_model__ = data_models.LoadBalancer + + __tablename__ = "load_balancer" + + __v2_wsme__ = load_balancer.LoadBalancerResponse + + description = sa.Column(sa.String(255), nullable=True) + provisioning_status = sa.Column( + sa.String(16), + sa.ForeignKey("provisioning_status.name", + name="fk_load_balancer_provisioning_status_name"), + nullable=False) + operating_status = sa.Column( + sa.String(16), + sa.ForeignKey("operating_status.name", + name="fk_load_balancer_operating_status_name"), + nullable=False) + topology = sa.Column( + sa.String(36), + sa.ForeignKey("lb_topology.name", name="fk_lb_topology_name"), + nullable=True) + enabled = sa.Column(sa.Boolean, nullable=False) + amphorae = orm.relationship("Amphora", uselist=True, + back_populates="load_balancer") + server_group_id = sa.Column(sa.String(36), nullable=True) + provider = sa.Column(sa.String(64), nullable=True) + vip = orm.relationship('Vip', cascade='delete', uselist=False, + backref=orm.backref('load_balancer', uselist=False)) + additional_vips = orm.relationship( + 'AdditionalVip', cascade='delete', uselist=True, + backref=orm.backref('load_balancer', uselist=False)) + pools = orm.relationship('Pool', cascade='delete', uselist=True, + back_populates="load_balancer") + listeners = orm.relationship('Listener', cascade='delete', uselist=True, + back_populates='load_balancer') + _tags = orm.relationship( + 'Tags', + single_parent=True, + lazy='subquery', + cascade='all,delete-orphan', + primaryjoin='and_(foreign(Tags.resource_id)==LoadBalancer.id)', + overlaps='_tags' + ) + flavor_id = sa.Column( + sa.String(36), + sa.ForeignKey("flavor.id", name="fk_lb_flavor_id"), nullable=True) + availability_zone = sa.Column( + sa.String(255), + sa.ForeignKey("availability_zone.name", + name="fk_load_balancer_availability_zone_name"), + nullable=True) + flavor: Mapped["Flavor"] = orm.relationship("Flavor") + + def __str__(self): + return (f"LoadBalancer(id={self.id!r}, name={self.name!r}, " + f"project_id={self.project_id!r}, vip={self.vip!r}, " + f"provisioning_status={self.provisioning_status!r}, " + f"operating_status={self.operating_status!r}, " + f"provider={self.provider!r})") + + +class VRRPGroup(base_models.BASE): + + __data_model__ = data_models.VRRPGroup + + __tablename__ = "vrrp_group" + + load_balancer_id = sa.Column( + sa.String(36), + sa.ForeignKey("load_balancer.id", + name="fk_vrrp_group_load_balancer_id"), + nullable=False, primary_key=True) + + vrrp_group_name = sa.Column(sa.String(36), nullable=True) + vrrp_auth_type = sa.Column(sa.String(16), sa.ForeignKey( + "vrrp_auth_method.name", + name="fk_load_balancer_vrrp_auth_method_name")) + vrrp_auth_pass = sa.Column(sa.String(36), nullable=True) + advert_int = sa.Column(sa.Integer(), nullable=True) + load_balancer = orm.relationship("LoadBalancer", uselist=False, + backref=orm.backref("vrrp_group", + uselist=False, + cascade="delete")) + + +class Vip(base_models.BASE): + + __data_model__ = data_models.Vip + + __tablename__ = "vip" + + load_balancer_id = sa.Column( + sa.String(36), + sa.ForeignKey("load_balancer.id", + name="fk_vip_load_balancer_id"), + nullable=False, primary_key=True) + ip_address = sa.Column(sa.String(64), nullable=True) + port_id = sa.Column(sa.String(36), nullable=True) + subnet_id = sa.Column(sa.String(36), nullable=True) + network_id = sa.Column(sa.String(36), nullable=True) + qos_policy_id = sa.Column(sa.String(36), nullable=True) + octavia_owned = sa.Column(sa.Boolean(), nullable=True) + vnic_type = sa.Column(sa.String(64), nullable=True) + + sgs = orm.relationship( + "VipSecurityGroup", cascade="all,delete-orphan", + uselist=True, backref=orm.backref("vip", uselist=False)) + + @property + def sg_ids(self) -> list[str]: + return [sg.sg_id for sg in self.sgs] + + +class AdditionalVip(base_models.BASE): + + __data_model__ = data_models.AdditionalVip + + __tablename__ = "additional_vip" + + __table_args__ = ( + sa.PrimaryKeyConstraint('load_balancer_id', 'subnet_id', + name='pk_add_vip_load_balancer_subnet'), + ) + + load_balancer_id = sa.Column( + sa.String(36), + sa.ForeignKey("load_balancer.id", + name="fk_add_vip_load_balancer_id"), + nullable=False, index=True) + ip_address = sa.Column(sa.String(64), nullable=True) + port_id = sa.Column(sa.String(36), nullable=True) + subnet_id = sa.Column(sa.String(36), nullable=True) + network_id = sa.Column(sa.String(36), nullable=True) + + +class Listener(base_models.BASE, base_models.IdMixin, + base_models.ProjectMixin, models.TimestampMixin, + base_models.NameMixin, base_models.TagMixin): + + __data_model__ = data_models.Listener + + __tablename__ = "listener" + + __v2_wsme__ = listener.ListenerResponse + + __table_args__ = ( + sa.UniqueConstraint( + 'load_balancer_id', 'protocol', 'protocol_port', + name='uq_listener_load_balancer_id_protocol_port'), + ) + + description = sa.Column(sa.String(255), nullable=True) + protocol = sa.Column( + sa.String(16), + sa.ForeignKey("protocol.name", name="fk_listener_protocol_name"), + nullable=False) + protocol_port = sa.Column(sa.Integer(), nullable=False) + connection_limit = sa.Column(sa.Integer, nullable=True) + load_balancer_id = sa.Column( + sa.String(36), + sa.ForeignKey("load_balancer.id", name="fk_listener_load_balancer_id"), + nullable=True) + tls_certificate_id = sa.Column(sa.String(255), nullable=True) + default_pool_id = sa.Column( + sa.String(36), + sa.ForeignKey("pool.id", name="fk_listener_pool_id"), + nullable=True) + provisioning_status = sa.Column( + sa.String(16), + sa.ForeignKey("provisioning_status.name", + name="fk_listener_provisioning_status_name"), + nullable=False) + operating_status = sa.Column( + sa.String(16), + sa.ForeignKey("operating_status.name", + name="fk_listener_operating_status_name"), + nullable=False) + enabled = sa.Column(sa.Boolean(), nullable=False) + load_balancer = orm.relationship("LoadBalancer", uselist=False, + back_populates="listeners") + default_pool = orm.relationship("Pool", uselist=False, + back_populates="_default_listeners", + cascade_backrefs=False) + sni_containers = orm.relationship( + 'SNI', cascade='all,delete-orphan', + uselist=True, backref=orm.backref('listener', uselist=False)) + + l7policies = orm.relationship( + 'L7Policy', uselist=True, order_by='L7Policy.position', + collection_class=orderinglist.ordering_list('position', count_from=1), + cascade='delete', back_populates='listener') + + peer_port = sa.Column(sa.Integer(), nullable=True) + insert_headers = sa.Column(sa.PickleType()) + timeout_client_data = sa.Column(sa.Integer, nullable=True) + timeout_member_connect = sa.Column(sa.Integer, nullable=True) + timeout_member_data = sa.Column(sa.Integer, nullable=True) + timeout_tcp_inspect = sa.Column(sa.Integer, nullable=True) + client_ca_tls_certificate_id = sa.Column(sa.String(255), nullable=True) + client_authentication = sa.Column( + sa.String(10), + sa.ForeignKey("client_authentication_mode.name", + name="fk_listener_client_authentication_mode_name"), + nullable=False, default=constants.CLIENT_AUTH_NONE) + client_crl_container_id = sa.Column(sa.String(255), nullable=True) + tls_ciphers = sa.Column(sa.String(2048), nullable=True) + tls_versions = sa.Column(ScalarListType(), nullable=True) + alpn_protocols = sa.Column(ScalarListType(), nullable=True) + hsts_max_age = sa.Column(sa.Integer, nullable=True) + hsts_include_subdomains = sa.Column(sa.Boolean, nullable=True) + hsts_preload = sa.Column(sa.Boolean, nullable=True) + + _tags = orm.relationship( + 'Tags', + single_parent=True, + lazy='subquery', + cascade='all,delete-orphan', + primaryjoin='and_(foreign(Tags.resource_id)==Listener.id)', + overlaps='_tags' + ) + + # This property should be a unique list of the default_pool and anything + # referenced by enabled L7Policies with at least one rule that also + # reference this listener. The intent is that listener.pools should be a + # unique list of pools this listener is *actually* using. + @property + def pools(self): + _pools = [] + _p_ids = [] + if self.default_pool: + _pools.append(self.default_pool) + _p_ids.append(self.default_pool.id) + l7_pools = [p.redirect_pool for p in self.l7policies + if p.redirect_pool is not None and len(p.l7rules) > 0 and + p.enabled is True] + for p in l7_pools: + if p.id not in _p_ids: + _pools.append(p) + _p_ids.append(p.id) + return _pools + + allowed_cidrs = orm.relationship( + 'ListenerCidr', cascade='all,delete-orphan', + uselist=True, backref=orm.backref('listener', uselist=False)) + + def __str__(self): + return (f"Listener(id={self.id!r}, " + f"default_pool={self.default_pool!r}, name={self.name!r}, " + f"project_id={self.project_id!r}, protocol={self.protocol!r}, " + f"protocol_port={self.protocol_port!r}, " + f"enabled={self.enabled!r})") + + +class SNI(base_models.BASE): + + __data_model__ = data_models.SNI + + __tablename__ = "sni" + __table_args__ = ( + sa.PrimaryKeyConstraint('listener_id', 'tls_container_id'), + ) + listener_id = sa.Column( + sa.String(36), + sa.ForeignKey("listener.id", name="fk_sni_listener_id"), + nullable=False) + tls_container_id = sa.Column(sa.String(128), nullable=False) + position = sa.Column(sa.Integer(), nullable=True) + + +class Amphora(base_models.BASE, base_models.IdMixin, models.TimestampMixin): + + __data_model__ = data_models.Amphora + + __tablename__ = "amphora" + + __v2_wsme__ = amphora.AmphoraResponse + + load_balancer_id = sa.Column( + sa.String(36), sa.ForeignKey("load_balancer.id", + name="fk_amphora_load_balancer_id"), + nullable=True) + compute_id = sa.Column(sa.String(36), nullable=True) + lb_network_ip = sa.Column(sa.String(64), nullable=True) + vrrp_ip = sa.Column(sa.String(64), nullable=True) + ha_ip = sa.Column(sa.String(64), nullable=True) + vrrp_port_id = sa.Column(sa.String(36), nullable=True) + ha_port_id = sa.Column(sa.String(36), nullable=True) + cert_expiration = sa.Column(sa.DateTime(timezone=True), default=None, + nullable=True) + cert_busy = sa.Column(sa.Boolean(), default=False, nullable=False) + + role = sa.Column( + sa.String(36), + sa.ForeignKey("amphora_roles.name", name="fk_amphora_roles_name"), + nullable=True) + status = sa.Column( + sa.String(36), + sa.ForeignKey("provisioning_status.name", + name="fk_container_provisioning_status_name")) + vrrp_interface = sa.Column(sa.String(16), nullable=True) + vrrp_id = sa.Column(sa.Integer(), nullable=True) + vrrp_priority = sa.Column(sa.Integer(), nullable=True) + cached_zone = sa.Column(sa.String(255), nullable=True) + image_id = sa.Column(sa.String(36), nullable=True) + load_balancer = orm.relationship("LoadBalancer", uselist=False, + back_populates='amphorae') + compute_flavor = sa.Column(sa.String(255), nullable=True) + + def __str__(self): + return (f"Amphora(id={self.id!r}, load_balancer_id=" + f"{self.load_balancer_id!r}, status={self.status!r}, " + f"role={self.role!r}, lb_network_ip={self.lb_network_ip!r}, " + f"vrrp_ip={self.vrrp_ip!r})") + + +class AmphoraHealth(base_models.BASE): + __data_model__ = data_models.AmphoraHealth + __tablename__ = "amphora_health" + + amphora_id = sa.Column( + sa.String(36), nullable=False, primary_key=True) + last_update = sa.Column(sa.DateTime, default=func.now(), + nullable=False) + + busy = sa.Column(sa.Boolean(), default=False, nullable=False) + + +class L7Rule(base_models.BASE, base_models.IdMixin, base_models.ProjectMixin, + models.TimestampMixin, base_models.TagMixin): + + __data_model__ = data_models.L7Rule + + __tablename__ = "l7rule" + + __v2_wsme__ = l7rule.L7RuleResponse + + l7policy_id = sa.Column( + sa.String(36), + sa.ForeignKey("l7policy.id", name="fk_l7rule_l7policy_id"), + nullable=False) + type = sa.Column( + sa.String(36), + sa.ForeignKey( + "l7rule_type.name", + name="fk_l7rule_l7rule_type_name"), + nullable=False) + compare_type = sa.Column( + sa.String(36), + sa.ForeignKey( + "l7rule_compare_type.name", + name="fk_l7rule_l7rule_compare_type_name"), + nullable=False) + key = sa.Column(sa.String(255), nullable=True) + value = sa.Column(sa.String(255), nullable=False) + invert = sa.Column(sa.Boolean(), default=False, nullable=False) + enabled = sa.Column(sa.Boolean(), nullable=False) + l7policy = orm.relationship("L7Policy", uselist=False, + back_populates="l7rules") + provisioning_status = sa.Column( + sa.String(16), + sa.ForeignKey("provisioning_status.name", + name="fk_l7rule_provisioning_status_name"), + nullable=False) + operating_status = sa.Column( + sa.String(16), + sa.ForeignKey("operating_status.name", + name="fk_l7rule_operating_status_name"), + nullable=False) + _tags = orm.relationship( + 'Tags', + single_parent=True, + lazy='subquery', + cascade='all,delete-orphan', + primaryjoin='and_(foreign(Tags.resource_id)==L7Rule.id)', + overlaps='_tags' + ) + + def __str__(self): + return (f"L7Rule(id={self.id!r}, project_id={self.project_id!r}, " + f"provisioning_status={self.provisioning_status!r}, " + f"type={self.type!r}, key={self.key!r}, value={self.value!r}, " + f"invert={self.invert!r}, enabled={self.enabled!r})") + + +class L7Policy(base_models.BASE, base_models.IdMixin, base_models.ProjectMixin, + models.TimestampMixin, base_models.NameMixin, + base_models.TagMixin): + + __data_model__ = data_models.L7Policy + + __tablename__ = "l7policy" + + __v2_wsme__ = l7policy.L7PolicyResponse + + description = sa.Column(sa.String(255), nullable=True) + listener_id = sa.Column( + sa.String(36), + sa.ForeignKey("listener.id", name="fk_l7policy_listener_id"), + nullable=False) + action = sa.Column( + sa.String(36), + sa.ForeignKey( + "l7policy_action.name", + name="fk_l7policy_l7policy_action_name"), + nullable=False) + redirect_pool_id = sa.Column( + sa.String(36), + sa.ForeignKey("pool.id", name="fk_l7policy_pool_id"), + nullable=True) + redirect_url = sa.Column( + sa.String(255), + nullable=True) + redirect_prefix = sa.Column( + sa.String(255), + nullable=True) + redirect_http_code = sa.Column(sa.Integer, nullable=True) + position = sa.Column(sa.Integer, nullable=False) + enabled = sa.Column(sa.Boolean(), nullable=False) + listener = orm.relationship("Listener", uselist=False, + back_populates="l7policies") + redirect_pool = orm.relationship("Pool", uselist=False, + back_populates="l7policies") + l7rules = orm.relationship("L7Rule", uselist=True, cascade="delete", + back_populates="l7policy") + provisioning_status = sa.Column( + sa.String(16), + sa.ForeignKey("provisioning_status.name", + name="fk_l7policy_provisioning_status_name"), + nullable=False) + operating_status = sa.Column( + sa.String(16), + sa.ForeignKey("operating_status.name", + name="fk_l7policy_operating_status_name"), + nullable=False) + _tags = orm.relationship( + 'Tags', + single_parent=True, + lazy='subquery', + cascade='all,delete-orphan', + primaryjoin='and_(foreign(Tags.resource_id)==L7Policy.id)', + overlaps='_tags' + ) + + def __str__(self): + return (f"L7Policy(id={self.id!r}, name={self.name!r}, " + f"project_id={self.project_id!r}, " + f"provisioning_status={self.provisioning_status!r}, " + f"action={self.action!r}, position={self.position!r}, " + f"enabled={self.enabled!r})") + + +class Quotas(base_models.BASE): + + __data_model__ = data_models.Quotas + + __tablename__ = "quotas" + + __v2_wsme__ = quotas.QuotaAllBase + + project_id = sa.Column(sa.String(36), primary_key=True) + health_monitor = sa.Column(sa.Integer(), nullable=True) + listener = sa.Column(sa.Integer(), nullable=True) + load_balancer = sa.Column(sa.Integer(), nullable=True) + member = sa.Column(sa.Integer(), nullable=True) + pool = sa.Column(sa.Integer(), nullable=True) + l7policy = sa.Column(sa.Integer(), nullable=True) + l7rule = sa.Column(sa.Integer(), nullable=True) + in_use_health_monitor = sa.Column(sa.Integer(), nullable=True) + in_use_listener = sa.Column(sa.Integer(), nullable=True) + in_use_load_balancer = sa.Column(sa.Integer(), nullable=True) + in_use_member = sa.Column(sa.Integer(), nullable=True) + in_use_pool = sa.Column(sa.Integer(), nullable=True) + in_use_l7policy = sa.Column(sa.Integer(), nullable=True) + in_use_l7rule = sa.Column(sa.Integer(), nullable=True) + + def __str__(self): + return (f"Quotas(project_id={self.project_id!r}, " + f"load_balancer={self.load_balancer!r}, " + f"listener={self.listener!r}, pool={self.pool!r}, " + f"health_monitor={self.health_monitor!r}, " + f"member={self.member!r}, l7policy={self.l7policy!r}, " + f"l7rule={self.l7rule!r})") + + +class FlavorProfile(base_models.BASE, base_models.IdMixin, + base_models.NameMixin): + + __data_model__ = data_models.FlavorProfile + + __tablename__ = "flavor_profile" + + __v2_wsme__ = flavor_profile.FlavorProfileResponse + + provider_name = sa.Column(sa.String(255), nullable=False) + flavor_data = sa.Column(sa.String(4096), nullable=False) + + +class Flavor(base_models.BASE, + base_models.IdMixin, + base_models.NameMixin): + + __data_model__ = data_models.Flavor + + __tablename__ = "flavor" + + __v2_wsme__ = flavors.FlavorResponse + + __table_args__ = ( + sa.UniqueConstraint('name', + name='uq_flavor_name'), + ) + + description = sa.Column(sa.String(255), nullable=True) + enabled = sa.Column(sa.Boolean(), nullable=False) + flavor_profile_id = sa.Column( + sa.String(36), + sa.ForeignKey("flavor_profile.id", + name="fk_flavor_flavor_profile_id"), + nullable=False) + flavor_profile: Mapped["FlavorProfile"] = orm.relationship("FlavorProfile") + + +class AvailabilityZoneProfile(base_models.BASE, base_models.IdMixin, + base_models.NameMixin): + + __data_model__ = data_models.AvailabilityZoneProfile + + __tablename__ = "availability_zone_profile" + + __v2_wsme__ = availability_zone_profile.AvailabilityZoneProfileResponse + + provider_name = sa.Column(sa.String(255), nullable=False) + availability_zone_data = sa.Column(sa.String(4096), nullable=False) + + +class AvailabilityZone(base_models.BASE, + base_models.NameMixin): + + __data_model__ = data_models.AvailabilityZone + + __tablename__ = "availability_zone" + + __v2_wsme__ = availability_zones.AvailabilityZoneResponse + + __table_args__ = ( + sa.PrimaryKeyConstraint('name'), + ) + + description = sa.Column(sa.String(255), nullable=True) + enabled = sa.Column(sa.Boolean(), nullable=False) + availability_zone_profile_id = sa.Column( + sa.String(36), + sa.ForeignKey("availability_zone_profile.id", + name="fk_az_az_profile_id"), + nullable=False) + availability_zone_profile: Mapped["AvailabilityZoneProfile"] = ( + orm.relationship("AvailabilityZoneProfile")) + + +class ClientAuthenticationMode(base_models.BASE): + + __tablename__ = "client_authentication_mode" + + name = sa.Column(sa.String(10), primary_key=True, nullable=False) + + +class ListenerCidr(base_models.BASE): + + __data_model__ = data_models.ListenerCidr + + __tablename__ = "listener_cidr" + __table_args__ = ( + sa.PrimaryKeyConstraint('listener_id', 'cidr'), + ) + + listener_id = sa.Column( + sa.String(36), + sa.ForeignKey("listener.id", name="fk_listener_cidr_listener_id"), + nullable=False) + cidr = sa.Column(sa.String(64), nullable=False) + + +class VipSecurityGroup(base_models.BASE): + + __data_model__ = data_models.VipSecurityGroup + + __tablename__ = "vip_security_group" + __table_args__ = ( + sa.PrimaryKeyConstraint('load_balancer_id', 'sg_id'), + ) + + load_balancer_id = sa.Column( + sa.String(36), + sa.ForeignKey("vip.load_balancer_id", name="fk_vip_sg_vip_lb_id"), + nullable=False) + sg_id = sa.Column(sa.String(64), nullable=False) + + +class AmphoraMemberPort(base_models.BASE, models.TimestampMixin): + + __data_model__ = data_models.AmphoraMemberPort + + __tablename__ = "amphora_member_port" + + port_id = sa.Column( + sa.String(36), + primary_key=True) + amphora_id = sa.Column( + sa.String(36), + sa.ForeignKey("amphora.id", name="fk_member_port_amphora_id"), + nullable=False) + network_id = sa.Column( + sa.String(36)) diff --git a/octavia/db/prepare.py b/octavia/db/prepare.py new file mode 100644 index 0000000000..04ad165517 --- /dev/null +++ b/octavia/db/prepare.py @@ -0,0 +1,194 @@ +# Copyright 2016 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_utils import uuidutils + +from octavia.api.v2.types import l7rule +from octavia.common import constants +from octavia.common import exceptions +from octavia.common import validate + +CONF = cfg.CONF + + +def create_load_balancer(lb_dict): + if not lb_dict.get('id'): + lb_dict['id'] = uuidutils.generate_uuid() + if lb_dict.get('vip'): + lb_dict['vip']['load_balancer_id'] = lb_dict.get('id') + lb_dict[constants.PROVISIONING_STATUS] = constants.PENDING_CREATE + lb_dict[constants.OPERATING_STATUS] = constants.OFFLINE + + # Set defaults later possibly overridden by flavors later + lb_dict['topology'] = CONF.controller_worker.loadbalancer_topology + + return lb_dict + + +def create_listener(listener_dict, lb_id): + if not listener_dict.get('id'): + listener_dict['id'] = uuidutils.generate_uuid() + if 'loadbalancer_id' in listener_dict: + listener_dict['load_balancer_id'] = listener_dict.pop( + 'loadbalancer_id') + else: + listener_dict['load_balancer_id'] = lb_id + + listener_dict[constants.PROVISIONING_STATUS] = constants.PENDING_CREATE + listener_dict[constants.OPERATING_STATUS] = constants.OFFLINE + # NOTE(blogan): Throwing away because we should not store secure data + # in the database nor should we send it to a handler. + if 'tls_termination' in listener_dict: + del listener_dict['tls_termination'] + + if 'sni_containers' in listener_dict: + sni_container_ids = listener_dict.pop('sni_containers') or [] + elif 'sni_container_refs' in listener_dict: + sni_container_ids = listener_dict.pop('sni_container_refs') or [] + else: + sni_container_ids = [] + sni_containers = [{'listener_id': listener_dict.get('id'), + 'tls_container_id': sni_container_id} + for sni_container_id in sni_container_ids] + listener_dict['sni_containers'] = sni_containers + + if 'client_authentication' not in listener_dict: + listener_dict['client_authentication'] = constants.CLIENT_AUTH_NONE + + if listener_dict['protocol'] == constants.PROTOCOL_TERMINATED_HTTPS: + if ('tls_ciphers' not in listener_dict or + listener_dict['tls_ciphers'] is None): + listener_dict['tls_ciphers'] = ( + CONF.api_settings.default_listener_ciphers) + if ('tls_versions' not in listener_dict or + listener_dict['tls_versions'] is None): + listener_dict['tls_versions'] = ( + CONF.api_settings.default_listener_tls_versions) + if ('alpn_protocols' not in listener_dict or + listener_dict['alpn_protocols'] is None): + listener_dict['alpn_protocols'] = ( + CONF.api_settings.default_listener_alpn_protocols) + + if listener_dict.get('timeout_client_data') is None: + listener_dict['timeout_client_data'] = ( + CONF.haproxy_amphora.timeout_client_data) + if listener_dict.get('timeout_member_connect') is None: + listener_dict['timeout_member_connect'] = ( + CONF.haproxy_amphora.timeout_member_connect) + if listener_dict.get('timeout_member_data') is None: + listener_dict['timeout_member_data'] = ( + CONF.haproxy_amphora.timeout_member_data) + if listener_dict.get('timeout_tcp_inspect') is None: + listener_dict['timeout_tcp_inspect'] = ( + CONF.haproxy_amphora.timeout_tcp_inspect) + + return listener_dict + + +def create_l7policy(l7policy_dict, lb_id, listener_id): + l7policy_dict = validate.sanitize_l7policy_api_args(l7policy_dict, + create=True) + l7policy_dict[constants.PROVISIONING_STATUS] = constants.PENDING_CREATE + l7policy_dict[constants.OPERATING_STATUS] = constants.OFFLINE + if not l7policy_dict.get('id'): + l7policy_dict['id'] = uuidutils.generate_uuid() + l7policy_dict['listener_id'] = listener_id + if l7policy_dict.get('redirect_pool'): + pool_dict = l7policy_dict.pop('redirect_pool') + prepped_pool = create_pool(pool_dict, lb_id) + l7policy_dict['redirect_pool'] = prepped_pool + l7policy_dict['redirect_pool_id'] = prepped_pool['id'] + rules = l7policy_dict.pop('rules', None) + if rules: + l7policy_dict['l7rules'] = rules + if l7policy_dict.get('l7rules'): + if (len(l7policy_dict.get('l7rules')) > + constants.MAX_L7RULES_PER_L7POLICY): + raise exceptions.TooManyL7RulesOnL7Policy(id=l7policy_dict['id']) + prepped_l7rules = [] + for l7rule_dict in l7policy_dict.get('l7rules'): + try: + validate.l7rule_data(l7rule.L7RulePOST(**l7rule_dict)) + except Exception as e: + raise exceptions.L7RuleValidation(error=e) + prepped_l7rule = create_l7rule(l7rule_dict, l7policy_dict['id']) + prepped_l7rules.append(prepped_l7rule) + return l7policy_dict + + +def create_l7rule(l7rule_dict, l7policy_id): + l7rule_dict[constants.PROVISIONING_STATUS] = constants.PENDING_CREATE + l7rule_dict[constants.OPERATING_STATUS] = constants.OFFLINE + if not l7rule_dict.get('id'): + l7rule_dict['id'] = uuidutils.generate_uuid() + l7rule_dict['l7policy_id'] = l7policy_id + if 'enabled' not in l7rule_dict: + l7rule_dict['enabled'] = True + return l7rule_dict + + +def create_pool(pool_dict, lb_id=None): + if not pool_dict.get('id'): + pool_dict['id'] = uuidutils.generate_uuid() + if 'loadbalancer_id' in pool_dict: + pool_dict['load_balancer_id'] = pool_dict.pop('loadbalancer_id') + else: + pool_dict['load_balancer_id'] = lb_id + if pool_dict.get('session_persistence'): + pool_dict['session_persistence']['pool_id'] = pool_dict.get('id') + if 'members' in pool_dict and not pool_dict.get('members'): + del pool_dict['members'] + elif pool_dict.get('members'): + prepped_members = [] + for member_dict in pool_dict.get('members'): + prepped_members.append(create_member(member_dict, pool_dict['id'])) + if pool_dict['tls_enabled'] is True: + if pool_dict['tls_ciphers'] is None: + pool_dict['tls_ciphers'] = CONF.api_settings.default_pool_ciphers + if pool_dict['tls_versions'] is None: + pool_dict['tls_versions'] = ( + CONF.api_settings.default_pool_tls_versions) + if pool_dict['alpn_protocols'] is None: + pool_dict['alpn_protocols'] = ( + CONF.api_settings.default_pool_alpn_protocols) + pool_dict[constants.PROVISIONING_STATUS] = constants.PENDING_CREATE + pool_dict[constants.OPERATING_STATUS] = constants.OFFLINE + return pool_dict + + +def create_member(member_dict, pool_id, has_health_monitor=False): + if not member_dict.get('id'): + member_dict['id'] = uuidutils.generate_uuid() + member_dict['pool_id'] = pool_id + member_dict[constants.PROVISIONING_STATUS] = constants.PENDING_CREATE + if has_health_monitor: + member_dict[constants.OPERATING_STATUS] = constants.OFFLINE + else: + member_dict[constants.OPERATING_STATUS] = constants.NO_MONITOR + if 'backup' not in member_dict: + member_dict['backup'] = False + return member_dict + + +def create_health_monitor(hm_dict, pool_id=None): + hm_dict[constants.PROVISIONING_STATUS] = constants.PENDING_CREATE + hm_dict[constants.OPERATING_STATUS] = constants.OFFLINE + if pool_id: + hm_dict['id'] = pool_id + hm_dict['pool_id'] = pool_id + else: + if not hm_dict.get('id'): + hm_dict['id'] = uuidutils.generate_uuid() + return hm_dict diff --git a/octavia/db/repositories.py b/octavia/db/repositories.py new file mode 100644 index 0000000000..fa28dc5c0e --- /dev/null +++ b/octavia/db/repositories.py @@ -0,0 +1,2163 @@ +# Copyright 2014 Rackspace +# Copyright 2016 Blue Box, an IBM Company +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Defines interface for DB access that Resource or Octavia Controllers may +reference +""" + +import datetime +from typing import Optional + +from oslo_config import cfg +from oslo_db import api as oslo_db_api +from oslo_db import exception as db_exception +from oslo_log import log as logging +from oslo_serialization import jsonutils +from oslo_utils import timeutils +from oslo_utils import uuidutils +from sqlalchemy.orm import noload +from sqlalchemy.orm import Session +from sqlalchemy.orm import subqueryload +from sqlalchemy import select +from sqlalchemy.sql.expression import false +from sqlalchemy.sql import func +from sqlalchemy import text +from sqlalchemy import update + +from octavia.common import constants as consts +from octavia.common import data_models +from octavia.common import exceptions +from octavia.common import utils +from octavia.common import validate +from octavia.db import api as db_api +from octavia.db import models + +CONF = cfg.CONF + +LOG = logging.getLogger(__name__) + + +class BaseRepository: + model_class = None + + def count(self, session, **filters): + """Retrieves a count of entities from the database. + + :param session: A Sql Alchemy database session. + :param filters: Filters to decide which entities should be retrieved. + :returns: int + """ + deleted = filters.pop('show_deleted', True) + model = session.query(self.model_class).filter_by(**filters) + + if not deleted: + if hasattr(self.model_class, 'status'): + model = model.filter( + self.model_class.status != consts.DELETED) + else: + model = model.filter( + self.model_class.provisioning_status != consts.DELETED) + + return model.count() + + def create(self, session, **model_kwargs): + """Base create method for a database entity. + + :param session: A Sql Alchemy database session. + :param model_kwargs: Attributes of the model to insert. + :returns: octavia.common.data_model + """ + model = self.model_class(**model_kwargs) + session.add(model) + return model.to_data_model() + + def delete(self, session, **filters): + """Deletes an entity from the database. + + :param session: A Sql Alchemy database session. + :param filters: Filters to decide which entity should be deleted. + :returns: None + :raises: sqlalchemy.orm.exc.NoResultFound + """ + model = session.query(self.model_class).filter_by(**filters).one() + session.delete(model) + session.flush() + + def delete_batch(self, session, ids=None): + """Batch deletes by entity ids.""" + ids = ids or [] + for id in ids: + self.delete(session, id=id) + + def update(self, session, id, **model_kwargs): + """Updates an entity in the database. + + :param session: A Sql Alchemy database session. + :param model_kwargs: Entity attributes that should be updates. + :returns: octavia.common.data_model + """ + tags = model_kwargs.pop('tags', None) + if tags is not None: + resource = session.get(self.model_class, id) + resource.tags = tags + session.query(self.model_class).filter_by( + id=id).update(model_kwargs) + + def get(self, session, limited_graph=False, **filters): + """Retrieves an entity from the database. + + :param session: A Sql Alchemy database session. + :param limited_graph: Option controls number of processed nodes + in the graph. Default (with False) behaviour + is recursion iteration through all nodes + in the graph via to_data_model. With True value + recursion will stop at the first child node. + It means, that only limited number of nodes be + converted. This logic could be used for specific + cases, where information about full graph + is unnecessary. + :param filters: Filters to decide which entity should be retrieved. + :returns: octavia.common.data_model + """ + deleted = filters.pop('show_deleted', True) + model = session.query(self.model_class).filter_by(**filters) + + if not deleted: + if hasattr(self.model_class, 'status'): + model = model.filter( + self.model_class.status != consts.DELETED) + else: + model = model.filter( + self.model_class.provisioning_status != consts.DELETED) + + model = model.first() + + if not model: + return None + + recursion_depth = 0 if limited_graph else None + return model.to_data_model(recursion_depth=recursion_depth) + + def get_all(self, session, pagination_helper=None, + query_options=None, limited_graph=False, **filters): + + """Retrieves a list of entities from the database. + + :param session: A Sql Alchemy database session. + :param pagination_helper: Helper to apply pagination and sorting. + :param query_options: Optional query options to apply. + :param limited_graph: Option controls number of processed nodes + in the graph. Default (with False) behaviour + is recursion iteration through all nodes + in the graph via to_data_model. With True value + recursion will stop at the first child node. + It means, that only limited number of nodes be + converted. This logic could be used for specific + cases, where information about full graph + is unnecessary. + :param filters: Filters to decide which entities should be retrieved. + :returns: [octavia.common.data_model] + """ + deleted = filters.pop('show_deleted', True) + query = session.query(self.model_class).filter_by(**filters) + if query_options: + query = query.options(query_options) + + if not deleted: + if hasattr(self.model_class, 'status'): + query = query.filter( + self.model_class.status != consts.DELETED) + else: + query = query.filter( + self.model_class.provisioning_status != consts.DELETED) + + if pagination_helper: + model_list, links = pagination_helper.apply( + query, self.model_class) + else: + links = None + model_list = query.all() + recursion_depth = 1 if limited_graph else None + data_model_list = [ + model.to_data_model(recursion_depth=recursion_depth) + for model in model_list + ] + return data_model_list, links + + def exists(self, session, id): + """Determines whether an entity exists in the database by its id. + + :param session: A Sql Alchemy database session. + :param id: id of entity to check for existence. + :returns: octavia.common.data_model + """ + return bool(session.query(self.model_class).filter_by(id=id).first()) + + def get_all_deleted_expiring(self, session, exp_age): + """Get all previously deleted resources that are now expiring. + + :param session: A Sql Alchemy database session. + :param exp_age: A standard datetime delta which is used to see for how + long can a resource live without updates before + it is considered expired + :returns: A list of resource IDs + """ + + expiry_time = timeutils.utcnow() - exp_age + + query = session.query(self.model_class).filter( + self.model_class.updated_at < expiry_time) + if hasattr(self.model_class, 'status'): + query = query.filter_by(status=consts.DELETED) + else: + query = query.filter_by(provisioning_status=consts.DELETED) + # Do not load any relationship + query = query.options(noload('*')) + model_list = query.all() + + id_list = [model.id for model in model_list] + return id_list + + +class Repositories: + def __init__(self): + self.load_balancer = LoadBalancerRepository() + self.vip = VipRepository() + self.additional_vip = AdditionalVipRepository() + self.health_monitor = HealthMonitorRepository() + self.session_persistence = SessionPersistenceRepository() + self.pool = PoolRepository() + self.member = MemberRepository() + self.listener = ListenerRepository() + self.listener_cidr = ListenerCidrRepository() + self.listener_stats = ListenerStatisticsRepository() + self.amphora = AmphoraRepository() + self.sni = SNIRepository() + self.amphorahealth = AmphoraHealthRepository() + self.vrrpgroup = VRRPGroupRepository() + self.l7rule = L7RuleRepository() + self.l7policy = L7PolicyRepository() + self.amp_build_slots = AmphoraBuildSlotsRepository() + self.amp_build_req = AmphoraBuildReqRepository() + self.quotas = QuotasRepository() + self.flavor = FlavorRepository() + self.flavor_profile = FlavorProfileRepository() + self.availability_zone = AvailabilityZoneRepository() + self.availability_zone_profile = AvailabilityZoneProfileRepository() + self.amphora_member_port = AmphoraMemberPortRepository() + + def create_load_balancer_and_vip(self, session, lb_dict, vip_dict, + additional_vip_dicts=None): + """Inserts load balancer and vip entities into the database. + + Inserts load balancer and vip entities into the database in one + transaction and returns the data model of the load balancer. + + :param session: A Sql Alchemy database session. + :param lb_dict: Dictionary representation of a load balancer + :param vip_dict: Dictionary representation of a vip + :param additional_vip_dicts: Dict representations of additional vips + :returns: octavia.common.data_models.LoadBalancer + """ + additional_vip_dicts = additional_vip_dicts or [] + if not lb_dict.get('id'): + lb_dict['id'] = uuidutils.generate_uuid() + lb = models.LoadBalancer(**lb_dict) + session.add(lb) + vip_sg_ids = vip_dict.pop(consts.SG_IDS, []) + vip_dict['load_balancer_id'] = lb_dict['id'] + vip = models.Vip(**vip_dict) + session.add(vip) + if vip_sg_ids: + vip_dict[consts.SG_IDS] = vip_sg_ids + for vip_sg_id in vip_sg_ids: + vip_sg = models.VipSecurityGroup( + load_balancer_id=lb_dict['id'], + sg_id=vip_sg_id) + session.add(vip_sg) + for add_vip_dict in additional_vip_dicts: + add_vip_dict['load_balancer_id'] = lb_dict['id'] + add_vip_dict['network_id'] = vip_dict.get('network_id') + add_vip_dict['port_id'] = vip_dict.get('port_id') + add_vip = models.AdditionalVip(**add_vip_dict) + session.add(add_vip) + + return self.load_balancer.get(session, id=lb.id) + + def create_pool_on_load_balancer(self, session, pool_dict, + listener_id=None): + """Inserts a pool and session persistence entity into the database. + + :param session: A Sql Alchemy database session. + :param pool_dict: Dictionary representation of a pool + :param listener_id: Optional listener id that will + reference this pool as its default_pool_id + :returns: octavia.common.data_models.Pool + """ + if not pool_dict.get('id'): + pool_dict['id'] = uuidutils.generate_uuid() + sp_dict = pool_dict.pop('session_persistence', None) + db_pool = self.pool.create(session, **pool_dict) + if sp_dict is not None and sp_dict != {}: + sp_dict['pool_id'] = pool_dict['id'] + self.session_persistence.create(session, **sp_dict) + if listener_id: + self.listener.update(session, listener_id, + default_pool_id=pool_dict['id']) + + # Immediate refresh, as we have found that sqlalchemy will sometimes + # cache the above query and the pool object may miss the listener_id + # information + if listener_id: + pool = session.query(models.Pool).filter_by(id=db_pool.id).first() + session.refresh(pool) + return self.pool.get(session, id=db_pool.id) + + def update_pool_and_sp(self, session, pool_id, pool_dict): + """Updates a pool and session persistence entity in the database. + + :param session: A Sql Alchemy database session. + :param pool_dict: Dictionary representation of a pool + :returns: octavia.common.data_models.Pool + """ + if 'session_persistence' in pool_dict.keys(): + sp_dict = pool_dict.pop('session_persistence') + if sp_dict is None or sp_dict == {}: + if self.session_persistence.exists(session, pool_id): + self.session_persistence.delete(session, + pool_id=pool_id) + elif self.session_persistence.exists(session, pool_id): + self.session_persistence.update(session, pool_id, + **sp_dict) + else: + sp_dict['pool_id'] = pool_id + self.session_persistence.create(session, **sp_dict) + # If only the session_persistence is being updated, this will be + # empty + if pool_dict: + self.pool.update(session, pool_id, **pool_dict) + session.flush() + return self.pool.get(session, id=pool_id) + + def test_and_set_lb_and_listeners_prov_status(self, session, lb_id, + lb_prov_status, + listener_prov_status, + listener_ids=None, + pool_id=None, + l7policy_id=None): + """Tests and sets a load balancer and listener provisioning status. + + Puts a lock on the load balancer table to check the status of a + load balancer. If the status is ACTIVE then the status of the load + balancer and listener is updated and the method returns True. If the + status is not ACTIVE, then nothing is done and False is returned. + + :param session: A Sql Alchemy database session. + :param lb_id: ID of the Load Balancer to check and lock + :param lb_prov_status: Status to set Load Balancer and Listener if + check passes. + :param listener_prov_status: Status to set Listeners if check passes + :param listener_ids: List of IDs of listeners to check and lock + (only use this when relevant to the operation) + :param pool_id: ID of the Pool to check and lock (only use this when + relevant to the operation) + :param l7policy_id: ID of the L7Policy to check and lock (only use this + when relevant to the operation) + :returns: bool + """ + listener_ids = listener_ids or [] + # Always set the status requested, regardless of whether we have + # listeners-- sometimes pools will be disassociated with a listener + # and we still need the LB locked when Pools or subordinate objects + # are changed. + success = self.load_balancer.test_and_set_provisioning_status( + session, lb_id, lb_prov_status) + if not success: + return success + for listener_id in listener_ids: + self.listener.update(session, listener_id, + provisioning_status=listener_prov_status) + if pool_id: + self.pool.update(session, pool_id, + provisioning_status=lb_prov_status) + if l7policy_id: + self.l7policy.update(session, l7policy_id, + provisioning_status=lb_prov_status) + return success + + def check_quota_met(self, session: Session, _class, project_id, count=1): + """Checks and updates object quotas. + + This method makes sure the project has available quota + for the resource and updates the quota to reflect the + new ussage. + + :param session: Context database session + :param _class: Data model object requesting quota + :param project_id: Project ID requesting quota + :param count: Number of objects we're going to create (default=1) + :returns: True if quota is met, False if quota was available + """ + LOG.debug('Checking quota for project: %(proj)s object: %(obj)s', + {'proj': project_id, 'obj': _class}) + + # Under noauth everything is admin, so no quota + if CONF.api_settings.auth_strategy == consts.NOAUTH: + LOG.debug('Auth strategy is NOAUTH, skipping quota check.') + return False + + if not project_id: + raise exceptions.MissingProjectID() + + self.quotas.ensure_project_exists(project_id) + + # Lock the project record in the database to block other quota checks + # + # Note: You cannot just use the current count as the in-use + # value as we don't want to lock the whole resource table + try: + quotas = (session.query(models.Quotas) + .filter_by(project_id=project_id) + .populate_existing() + .with_for_update() + .first()) + if _class == data_models.LoadBalancer: + # Decide which quota to use + if quotas.load_balancer is None: + lb_quota = CONF.quotas.default_load_balancer_quota + else: + lb_quota = quotas.load_balancer + # Get the current in use count + if not quotas.in_use_load_balancer: + # This is to handle the upgrade case + lb_count = session.query(models.LoadBalancer).filter( + models.LoadBalancer.project_id == project_id, + models.LoadBalancer.provisioning_status != + consts.DELETED).count() + count + else: + lb_count = quotas.in_use_load_balancer + count + # Decide if the quota is met + if lb_count <= lb_quota or lb_quota == consts.QUOTA_UNLIMITED: + quotas.in_use_load_balancer = lb_count + return False + return True + if _class == data_models.Listener: + # Decide which quota to use + if quotas.listener is None: + listener_quota = CONF.quotas.default_listener_quota + else: + listener_quota = quotas.listener + # Get the current in use count + if not quotas.in_use_listener: + # This is to handle the upgrade case + listener_count = session.query(models.Listener).filter( + models.Listener.project_id == project_id, + models.Listener.provisioning_status != + consts.DELETED).count() + count + else: + listener_count = quotas.in_use_listener + count + # Decide if the quota is met + if (listener_count <= listener_quota or + listener_quota == consts.QUOTA_UNLIMITED): + quotas.in_use_listener = listener_count + return False + return True + if _class == data_models.Pool: + # Decide which quota to use + if quotas.pool is None: + pool_quota = CONF.quotas.default_pool_quota + else: + pool_quota = quotas.pool + # Get the current in use count + if not quotas.in_use_pool: + # This is to handle the upgrade case + pool_count = session.query(models.Pool).filter( + models.Pool.project_id == project_id, + models.Pool.provisioning_status != + consts.DELETED).count() + count + else: + pool_count = quotas.in_use_pool + count + # Decide if the quota is met + if (pool_count <= pool_quota or + pool_quota == consts.QUOTA_UNLIMITED): + quotas.in_use_pool = pool_count + return False + return True + if _class == data_models.HealthMonitor: + # Decide which quota to use + if quotas.health_monitor is None: + hm_quota = CONF.quotas.default_health_monitor_quota + else: + hm_quota = quotas.health_monitor + # Get the current in use count + if not quotas.in_use_health_monitor: + # This is to handle the upgrade case + hm_count = session.query(models.HealthMonitor).filter( + models.HealthMonitor.project_id == project_id, + models.HealthMonitor.provisioning_status != + consts.DELETED).count() + count + else: + hm_count = quotas.in_use_health_monitor + count + # Decide if the quota is met + if (hm_count <= hm_quota or + hm_quota == consts.QUOTA_UNLIMITED): + quotas.in_use_health_monitor = hm_count + return False + return True + if _class == data_models.Member: + # Decide which quota to use + if quotas.member is None: + member_quota = CONF.quotas.default_member_quota + else: + member_quota = quotas.member + # Get the current in use count + if not quotas.in_use_member: + # This is to handle the upgrade case + member_count = session.query(models.Member).filter( + models.Member.project_id == project_id, + models.Member.provisioning_status != + consts.DELETED).count() + count + else: + member_count = quotas.in_use_member + count + # Decide if the quota is met + if (member_count <= member_quota or + member_quota == consts.QUOTA_UNLIMITED): + quotas.in_use_member = member_count + return False + return True + if _class == data_models.L7Policy: + # Decide which quota to use + if quotas.l7policy is None: + l7policy_quota = CONF.quotas.default_l7policy_quota + else: + l7policy_quota = quotas.l7policy + # Get the current in use count + if not quotas.in_use_l7policy: + # This is to handle the upgrade case + l7policy_count = session.query(models.L7Policy).filter( + models.L7Policy.project_id == project_id, + models.L7Policy.provisioning_status != + consts.DELETED).count() + count + else: + l7policy_count = quotas.in_use_l7policy + count + # Decide if the quota is met + if (l7policy_count <= l7policy_quota or + l7policy_quota == consts.QUOTA_UNLIMITED): + quotas.in_use_l7policy = l7policy_count + return False + return True + if _class == data_models.L7Rule: + # Decide which quota to use + if quotas.l7rule is None: + l7rule_quota = CONF.quotas.default_l7rule_quota + else: + l7rule_quota = quotas.l7rule + # Get the current in use count + if not quotas.in_use_l7rule: + # This is to handle the upgrade case + l7rule_count = session.query(models.L7Rule).filter( + models.L7Rule.project_id == project_id, + models.L7Rule.provisioning_status != + consts.DELETED).count() + count + else: + l7rule_count = quotas.in_use_l7rule + count + # Decide if the quota is met + if (l7rule_count <= l7rule_quota or + l7rule_quota == consts.QUOTA_UNLIMITED): + quotas.in_use_l7rule = l7rule_count + return False + return True + except db_exception.DBDeadlock as e: + LOG.warning('Quota project lock timed out for project: %(proj)s', + {'proj': project_id}) + raise exceptions.ProjectBusyException() from e + return False + + def decrement_quota(self, lock_session, _class, project_id, quantity=1): + """Decrements the object quota for a project + + :param lock_session: Locking database session (autocommit=False) + :param _class: Data model object to decrement quota + :param project_id: Project ID to decrement quota on + :param quantity: Quantity of quota to decrement + :returns: None + """ + LOG.debug('Decrementing quota by: %(quant)s for project: %(proj)s ' + 'object: %(obj)s', + {'quant': quantity, 'proj': project_id, 'obj': _class}) + + # Lock the project record in the database to block other quota checks + try: + quotas = (lock_session.query(models.Quotas) + .filter_by(project_id=project_id) + .populate_existing() + .with_for_update() + .first()) + if not quotas: + if not CONF.api_settings.auth_strategy == consts.NOAUTH: + LOG.error('Quota decrement on %(clss)s called on ' + 'project: %(proj)s with no quota record in ' + 'the database.', + {'clss': _class, 'proj': project_id}) + return + if _class == data_models.LoadBalancer: + if (quotas.in_use_load_balancer is not None and + quotas.in_use_load_balancer > 0): + quotas.in_use_load_balancer = ( + quotas.in_use_load_balancer - quantity) + else: + if not CONF.api_settings.auth_strategy == consts.NOAUTH: + LOG.warning('Quota decrement on %(clss)s called on ' + 'project: %(proj)s that would cause a ' + 'negative quota.', + {'clss': _class, 'proj': project_id}) + if _class == data_models.Listener: + if (quotas.in_use_listener is not None and + quotas.in_use_listener > 0): + quotas.in_use_listener = ( + quotas.in_use_listener - quantity) + else: + if not CONF.api_settings.auth_strategy == consts.NOAUTH: + LOG.warning('Quota decrement on %(clss)s called on ' + 'project: %(proj)s that would cause a ' + 'negative quota.', + {'clss': _class, 'proj': project_id}) + if _class == data_models.Pool: + if (quotas.in_use_pool is not None and + quotas.in_use_pool > 0): + quotas.in_use_pool = ( + quotas.in_use_pool - quantity) + else: + if not CONF.api_settings.auth_strategy == consts.NOAUTH: + LOG.warning('Quota decrement on %(clss)s called on ' + 'project: %(proj)s that would cause a ' + 'negative quota.', + {'clss': _class, 'proj': project_id}) + if _class == data_models.HealthMonitor: + if (quotas.in_use_health_monitor is not None and + quotas.in_use_health_monitor > 0): + quotas.in_use_health_monitor = ( + quotas.in_use_health_monitor - quantity) + else: + if not CONF.api_settings.auth_strategy == consts.NOAUTH: + LOG.warning('Quota decrement on %(clss)s called on ' + 'project: %(proj)s that would cause a ' + 'negative quota.', + {'clss': _class, 'proj': project_id}) + if _class == data_models.Member: + if (quotas.in_use_member is not None and + quotas.in_use_member > 0): + quotas.in_use_member = ( + quotas.in_use_member - quantity) + else: + if not CONF.api_settings.auth_strategy == consts.NOAUTH: + LOG.warning('Quota decrement on %(clss)s called on ' + 'project: %(proj)s that would cause a ' + 'negative quota.', + {'clss': _class, 'proj': project_id}) + if _class == data_models.L7Policy: + if (quotas.in_use_l7policy is not None and + quotas.in_use_l7policy > 0): + quotas.in_use_l7policy = ( + quotas.in_use_l7policy - quantity) + else: + if not CONF.api_settings.auth_strategy == consts.NOAUTH: + LOG.warning('Quota decrement on %(clss)s called on ' + 'project: %(proj)s that would cause a ' + 'negative quota.', + {'clss': _class, 'proj': project_id}) + if _class == data_models.L7Rule: + if (quotas.in_use_l7rule is not None and + quotas.in_use_l7rule > 0): + quotas.in_use_l7rule = ( + quotas.in_use_l7rule - quantity) + else: + if not CONF.api_settings.auth_strategy == consts.NOAUTH: + LOG.warning('Quota decrement on %(clss)s called on ' + 'project: %(proj)s that would cause a ' + 'negative quota.', + {'clss': _class, 'proj': project_id}) + except db_exception.DBDeadlock as e: + LOG.warning('Quota project lock timed out for project: %(proj)s', + {'proj': project_id}) + raise exceptions.ProjectBusyException() from e + + def get_amphora_stats(self, session, amp_id): + """Gets the statistics for all listeners on an amphora. + + :param session: A Sql Alchemy database session. + :param amp_id: The amphora ID to query. + :returns: An amphora stats dictionary + """ + columns = (list(models.ListenerStatistics.__table__.columns) + + [models.Amphora.load_balancer_id]) + amp_records = ( + session.query(*columns) + .filter(models.ListenerStatistics.amphora_id == amp_id) + .filter(models.ListenerStatistics.amphora_id == + models.Amphora.id).all()) + amp_stats = [] + for amp in amp_records: + amp_stat = {consts.LOADBALANCER_ID: amp.load_balancer_id, + consts.LISTENER_ID: amp.listener_id, + 'id': amp.amphora_id, + consts.ACTIVE_CONNECTIONS: amp.active_connections, + consts.BYTES_IN: amp.bytes_in, + consts.BYTES_OUT: amp.bytes_out, + consts.REQUEST_ERRORS: amp.request_errors, + consts.TOTAL_CONNECTIONS: amp.total_connections} + amp_stats.append(amp_stat) + return amp_stats + + +class LoadBalancerRepository(BaseRepository): + model_class = models.LoadBalancer + + def get_all_API_list(self, session, pagination_helper=None, **filters): + """Get a list of load balancers for the API list call. + + This get_all returns a data set that is only one level deep + in the data graph. This is an optimized query for the API load + balancer list method. + + :param session: A Sql Alchemy database session. + :param pagination_helper: Helper to apply pagination and sorting. + :param filters: Filters to decide which entities should be retrieved. + :returns: [octavia.common.data_model] + """ + + # sub-query load the tables we need + # no-load (blank) the tables we don't need + query_options = ( + subqueryload(models.LoadBalancer.vip), + subqueryload(models.LoadBalancer.additional_vips), + (subqueryload(models.LoadBalancer.vip). + subqueryload(models.Vip.sgs)), + subqueryload(models.LoadBalancer.amphorae), + subqueryload(models.LoadBalancer.pools), + subqueryload(models.LoadBalancer.listeners), + subqueryload(models.LoadBalancer._tags), + noload('*')) + + return super().get_all( + session, pagination_helper=pagination_helper, + query_options=query_options, **filters) + + def test_and_set_provisioning_status(self, session, id, status, + raise_exception=False): + """Tests and sets a load balancer and provisioning status. + + Puts a lock on the load balancer table to check the status of a + load balancer. If the status is ACTIVE then the status of the load + balancer is updated and the method returns True. If the + status is not ACTIVE, then nothing is done and False is returned. + + :param session: A Sql Alchemy database session. + :param id: id of Load Balancer + :param status: Status to set Load Balancer if check passes. + :param raise_exception: If True, raise ImmutableObject on failure + :returns: bool + """ + lb = (session.query(self.model_class) + .populate_existing() + .with_for_update() + .filter_by(id=id).one()) + is_delete = status == consts.PENDING_DELETE + acceptable_statuses = ( + consts.DELETABLE_STATUSES + if is_delete else consts.MUTABLE_STATUSES + ) + if lb.provisioning_status not in acceptable_statuses: + if raise_exception: + raise exceptions.ImmutableObject( + resource='Load Balancer', id=id) + return False + lb.provisioning_status = status + session.add(lb) + return True + + def set_status_for_failover(self, session, id, status, + raise_exception=False): + """Tests and sets a load balancer provisioning status. + + Puts a lock on the load balancer table to check the status of a + load balancer. If the status is ACTIVE or ERROR then the status of + the load balancer is updated and the method returns True. If the + status is not ACTIVE, then nothing is done and False is returned. + + :param session: A Sql Alchemy database session. + :param id: id of Load Balancer + :param status: Status to set Load Balancer if check passes. + :param raise_exception: If True, raise ImmutableObject on failure + :returns: bool + """ + lb = (session.query(self.model_class) + .populate_existing() + .with_for_update() + .filter_by(id=id).one()) + if lb.provisioning_status not in consts.FAILOVERABLE_STATUSES: + if raise_exception: + raise exceptions.ImmutableObject( + resource='Load Balancer', id=id) + return False + lb.provisioning_status = status + session.add(lb) + return True + + +class VipRepository(BaseRepository): + model_class = models.Vip + + def update(self, session, load_balancer_id, **model_kwargs): + """Updates a vip entity in the database by load_balancer_id.""" + sg_ids = model_kwargs.pop(consts.SG_IDS, None) + + vip = session.query(self.model_class).filter_by( + load_balancer_id=load_balancer_id) + if model_kwargs: + vip.update(model_kwargs) + + # NOTE(gthiemonge) the vip must be updated when sg_ids is [] + # (removal of current sg_ids) + if sg_ids is not None: + vip = vip.first() + vip.sgs = [ + models.VipSecurityGroup( + load_balancer_id=load_balancer_id, + sg_id=sg_id) + for sg_id in sg_ids] + + session.flush() + + +class AdditionalVipRepository(BaseRepository): + model_class = models.AdditionalVip + + def update(self, session, load_balancer_id, subnet_id, + **model_kwargs): + """Updates an additional vip entity in the database. + + Uses load_balancer_id + subnet_id. + """ + session.query(self.model_class).filter_by( + load_balancer_id=load_balancer_id, + subnet_id=subnet_id).update(model_kwargs) + + +class HealthMonitorRepository(BaseRepository): + model_class = models.HealthMonitor + + def get_all_API_list(self, session, pagination_helper=None, **filters): + """Get a list of health monitors for the API list call. + + This get_all returns a data set that is only one level deep + in the data graph. This is an optimized query for the API health + monitor list method. + + :param session: A Sql Alchemy database session. + :param pagination_helper: Helper to apply pagination and sorting. + :param filters: Filters to decide which entities should be retrieved. + :returns: [octavia.common.data_model] + """ + + # sub-query load the tables we need + # no-load (blank) the tables we don't need + query_options = ( + subqueryload(models.HealthMonitor.pool), + subqueryload(models.HealthMonitor._tags), + noload('*')) + + return super().get_all( + session, pagination_helper=pagination_helper, + query_options=query_options, **filters) + + +class SessionPersistenceRepository(BaseRepository): + model_class = models.SessionPersistence + + def update(self, session, pool_id, **model_kwargs): + """Updates a session persistence entity in the database by pool_id.""" + session.query(self.model_class).filter_by( + pool_id=pool_id).update(model_kwargs) + + def exists(self, session, pool_id): + """Checks if session persistence exists on a pool.""" + return bool(session.query(self.model_class).filter_by( + pool_id=pool_id).first()) + + +class ListenerCidrRepository(BaseRepository): + model_class = models.ListenerCidr + + def create(self, session, listener_id, allowed_cidrs): + if allowed_cidrs: + for cidr in set(allowed_cidrs): + cidr_dict = {'listener_id': listener_id, 'cidr': cidr} + model = self.model_class(**cidr_dict) + session.add(model) + + def update(self, session, listener_id, allowed_cidrs): + """Updates allowed CIDRs in the database by listener_id.""" + session.query(self.model_class).filter_by( + listener_id=listener_id).delete() + self.create(session, listener_id, allowed_cidrs) + + +class PoolRepository(BaseRepository): + model_class = models.Pool + + def get_all_API_list(self, session, pagination_helper=None, **filters): + """Get a list of pools for the API list call. + + This get_all returns a data set that is only one level deep + in the data graph. This is an optimized query for the API pool + list method. + + :param session: A Sql Alchemy database session. + :param pagination_helper: Helper to apply pagination and sorting. + :param filters: Filters to decide which entities should be retrieved. + :returns: [octavia.common.data_model] + """ + + # sub-query load the tables we need + # no-load (blank) the tables we don't need + query_options = ( + subqueryload(models.Pool._default_listeners), + subqueryload(models.Pool.health_monitor), + subqueryload(models.Pool.l7policies), + (subqueryload(models.Pool.l7policies). + subqueryload(models.L7Policy.l7rules)), + (subqueryload(models.Pool.l7policies). + subqueryload(models.L7Policy.listener)), + subqueryload(models.Pool.load_balancer), + subqueryload(models.Pool.members), + subqueryload(models.Pool.session_persistence), + subqueryload(models.Pool._tags), + noload('*')) + + return super().get_all( + session, pagination_helper=pagination_helper, + query_options=query_options, **filters) + + def get_children_count(self, session, pool_id): + hm_count = session.query(models.HealthMonitor).filter( + models.HealthMonitor.pool_id == pool_id, + models.HealthMonitor.provisioning_status != consts.DELETED).count() + member_count = session.query(models.Member).filter( + models.Member.pool_id == pool_id, + models.Member.provisioning_status != consts.DELETED).count() + + return (hm_count, member_count) + + +class MemberRepository(BaseRepository): + model_class = models.Member + + def get_all_API_list(self, session, pagination_helper=None, + limited_graph=False, **filters): + """Get a list of members for the API list call. + + This get_all returns a data set that is only one level deep + in the data graph. This is an optimized query for the API member + list method. + + :param session: A Sql Alchemy database session. + :param pagination_helper: Helper to apply pagination and sorting. + :param limited_graph: Option to avoid recursion iteration through all + nodes in the graph via to_data_model + :param filters: Filters to decide which entities should be retrieved. + :returns: [octavia.common.data_model] + """ + + # sub-query load the tables we need + # no-load (blank) the tables we don't need + query_options = ( + subqueryload(models.Member.pool), + subqueryload(models.Member._tags), + noload('*')) + + return super().get_all( + session, pagination_helper=pagination_helper, + query_options=query_options, limited_graph=limited_graph, + **filters) + + def delete_members(self, session, member_ids): + """Batch deletes members from a pool.""" + self.delete_batch(session, member_ids) + + def update_pool_members(self, session, pool_id, **model_kwargs): + """Updates all of the members of a pool. + + :param session: A Sql Alchemy database session. + :param pool_id: ID of the pool to update members on. + :param model_kwargs: Entity attributes that should be updates. + :returns: octavia.common.data_model + """ + session.query(self.model_class).filter_by( + pool_id=pool_id).update(model_kwargs) + + +class ListenerRepository(BaseRepository): + model_class = models.Listener + + def get_all_API_list(self, session, pagination_helper=None, **filters): + """Get a list of listeners for the API list call. + + This get_all returns a data set that is only one level deep + in the data graph. This is an optimized query for the API listener + list method. + + :param session: A Sql Alchemy database session. + :param pagination_helper: Helper to apply pagination and sorting. + :param filters: Filters to decide which entities should be retrieved. + :returns: [octavia.common.data_model] + """ + + # sub-query load the tables we need + # no-load (blank) the tables we don't need + query_options = ( + subqueryload(models.Listener.l7policies), + subqueryload(models.Listener.load_balancer), + subqueryload(models.Listener.sni_containers), + subqueryload(models.Listener._tags), + subqueryload(models.Listener.allowed_cidrs), + noload('*')) + + return super().get_all( + session, pagination_helper=pagination_helper, + query_options=query_options, **filters) + + def _find_next_peer_port(self, session, lb_id): + """Finds the next available peer port on the load balancer.""" + max_peer_port = 0 + load_balancer = session.query(models.LoadBalancer).filter_by( + id=lb_id).first() + for listener in load_balancer.listeners: + if (listener.peer_port is not None and + listener.peer_port > max_peer_port): + max_peer_port = listener.peer_port + if max_peer_port == 0: + return consts.HAPROXY_BASE_PEER_PORT + return max_peer_port + 1 + + def _pool_check(self, session, pool_id, listener_id=None, + lb_id=None): + """Sanity checks for default_pool_id if specified.""" + # Pool must exist on same loadbalancer as listener + pool_db = None + if listener_id: + lb_subquery = (session.query(self.model_class.load_balancer_id). + filter_by(id=listener_id).subquery()) + pool_db = (session.query(models.Pool). + filter_by(id=pool_id). + filter(models.LoadBalancer.id.in_(lb_subquery)).first()) + elif lb_id: + pool_db = (session.query(models.Pool). + filter_by(id=pool_id). + filter_by(load_balancer_id=lb_id).first()) + if not pool_db: + raise exceptions.NotFound( + resource=data_models.Pool._name(), id=pool_id) + return pool_db + + def has_default_pool(self, session, id): + """Checks if a listener has a default pool.""" + listener = self.get(session, id=id) + return bool(listener.default_pool) + + def update(self, session, id, **model_kwargs): + listener_db = session.query(self.model_class).filter_by( + id=id).first() + if not listener_db: + raise exceptions.NotFound( + resource=data_models.Listener._name(), id=id) + tags = model_kwargs.pop('tags', None) + if tags is not None: + resource = session.get(self.model_class, id) + resource.tags = tags + # Verify any newly specified default_pool_id exists + default_pool_id = model_kwargs.get('default_pool_id') + if default_pool_id: + self._pool_check(session, default_pool_id, listener_id=id) + if 'sni_containers' in model_kwargs: + # sni_container_refs is being updated. It is either being set + # or unset/cleared. We need to update in DB side. + containers = model_kwargs.pop('sni_containers', []) or [] + listener_db.sni_containers = [] + if containers: + listener_db.sni_containers = [ + models.SNI(listener_id=id, + tls_container_id=container_ref) + for container_ref in containers] + if 'allowed_cidrs' in model_kwargs: + # allowed_cidrs is being updated. It is either being set or + # unset/cleared. We need to update in DB side. + allowed_cidrs = model_kwargs.pop('allowed_cidrs', []) or [] + listener_db.allowed_cidrs = [] + if allowed_cidrs: + listener_db.allowed_cidrs = [ + models.ListenerCidr(listener_id=id, cidr=cidr) + for cidr in allowed_cidrs] + listener_db.update(model_kwargs) + + def create(self, session, **model_kwargs): + """Creates a new Listener with some validation.""" + listener_id = model_kwargs.get('id') + allowed_cidrs = set(model_kwargs.pop('allowed_cidrs', []) or []) + model_kwargs['allowed_cidrs'] = [ + models.ListenerCidr(listener_id=listener_id, cidr=cidr) + for cidr in allowed_cidrs] + model = self.model_class(**model_kwargs) + if model.default_pool_id: + model.default_pool = self._pool_check( + session, model.default_pool_id, + lb_id=model.load_balancer_id) + if model.peer_port is None: + model.peer_port = self._find_next_peer_port( + session, lb_id=model.load_balancer_id) + session.add(model) + return model.to_data_model() + + def prov_status_active_if_not_error(self, session, listener_id): + """Update provisioning_status to ACTIVE if not already in ERROR.""" + (session.query(self.model_class).filter_by(id=listener_id). + # Don't mark ERROR or already ACTIVE as ACTIVE + filter(~self.model_class.provisioning_status.in_( + [consts.ERROR, consts.ACTIVE])). + update({self.model_class.provisioning_status: consts.ACTIVE}, + synchronize_session='fetch')) + + def get_port_protocol_cidr_for_lb(self, session, loadbalancer_id): + # readability variables + Listener = self.model_class + ListenerCidr = models.ListenerCidr + + stmt = (select(Listener.protocol, + ListenerCidr.cidr, + Listener.protocol_port.label(consts.PORT)) + .select_from(Listener) + .join(models.ListenerCidr, + Listener.id == ListenerCidr.listener_id, isouter=True) + .where(Listener.load_balancer_id == loadbalancer_id)) + rows = session.execute(stmt) + + return [utils.map_protocol_to_nftable_protocol(u._asdict()) for u + in rows.all()] + + +class ListenerStatisticsRepository(BaseRepository): + model_class = models.ListenerStatistics + + def replace(self, session, stats_obj): + """Create or override a listener's statistics (insert/update) + + :param session: A Sql Alchemy database session + :param stats_obj: Listener statistics object to store + :type stats_obj: octavia.common.data_models.ListenerStatistics + """ + if not stats_obj.amphora_id: + # amphora_id can't be null, so clone the listener_id + stats_obj.amphora_id = stats_obj.listener_id + + # TODO(johnsom): This can be simplified/optimized using an "upsert" + count = session.query(self.model_class).filter_by( + listener_id=stats_obj.listener_id, + amphora_id=stats_obj.amphora_id).count() + if count: + session.query(self.model_class).filter_by( + listener_id=stats_obj.listener_id, + amphora_id=stats_obj.amphora_id).update( + stats_obj.get_stats(), + synchronize_session=False) + else: + self.create(session, **stats_obj.db_fields()) + + def increment(self, session, delta_stats): + """Updates a listener's statistics, incrementing by the passed deltas. + + :param session: A Sql Alchemy database session + :param delta_stats: Listener statistics deltas to add + :type delta_stats: octavia.common.data_models.ListenerStatistics + """ + if not delta_stats.amphora_id: + # amphora_id can't be null, so clone the listener_id + delta_stats.amphora_id = delta_stats.listener_id + + # TODO(johnsom): This can be simplified/optimized using an "upsert" + count = session.query(self.model_class).filter_by( + listener_id=delta_stats.listener_id, + amphora_id=delta_stats.amphora_id).count() + if count: + existing_stats = ( + session.query(self.model_class) + .populate_existing() + .with_for_update() + .filter_by( + listener_id=delta_stats.listener_id, + amphora_id=delta_stats.amphora_id).one()) + existing_stats += delta_stats + existing_stats.active_connections = ( + delta_stats.active_connections) + else: + self.create(session, **delta_stats.db_fields()) + + def update(self, session, listener_id, **model_kwargs): + """Updates a listener's statistics, overriding with the passed values. + + :param session: A Sql Alchemy database session + :param listener_id: The UUID of the listener to update + :type listener_id: str + :param model_kwargs: Entity attributes that should be updated + + """ + session.query(self.model_class).filter_by( + listener_id=listener_id).update(model_kwargs) + + +class AmphoraRepository(BaseRepository): + model_class = models.Amphora + + def get_all_API_list(self, session, pagination_helper=None, **filters): + """Get a list of amphorae for the API list call. + + This get_all returns a data set that is only one level deep + in the data graph. This is an optimized query for the API amphora + list method. + + :param session: A Sql Alchemy database session. + :param pagination_helper: Helper to apply pagination and sorting. + :param filters: Filters to decide which entities should be retrieved. + :returns: [octavia.common.data_model] + """ + + # sub-query load the tables we need + # no-load (blank) the tables we don't need + query_options = ( + subqueryload(models.Amphora.load_balancer), + noload('*')) + + return super().get_all( + session, pagination_helper=pagination_helper, + query_options=query_options, **filters) + + def associate(self, session, load_balancer_id, amphora_id): + """Associates an amphora with a load balancer. + + :param session: A Sql Alchemy database session. + :param load_balancer_id: The load balancer id to associate + :param amphora_id: The amphora id to associate + """ + load_balancer = session.query(models.LoadBalancer).filter_by( + id=load_balancer_id).first() + amphora = session.query(self.model_class).filter_by( + id=amphora_id).first() + load_balancer.amphorae.append(amphora) + + @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) + def allocate_and_associate(self, session, load_balancer_id, + availability_zone=None): + """Allocate an amphora for a load balancer. + + For v0.5 this is simple, find a free amp and + associate the lb. In the future this needs to be + enhanced. + + :param session: A Sql Alchemy database session. + :param load_balancer_id: The load balancer id to associate + :returns: The amphora ID for the load balancer or None + """ + filters = { + 'status': 'READY', + 'load_balancer_id': None + } + if availability_zone: + LOG.debug("Filtering amps by zone: %s", availability_zone) + filters['cached_zone'] = availability_zone + + amp = (session.query(self.model_class) + .populate_existing() + .with_for_update() + .filter_by(**filters).first()) + + if amp is None: + return None + + if availability_zone: + LOG.debug("Found amp: %s in %s", amp.id, amp.cached_zone) + amp.status = 'ALLOCATED' + amp.load_balancer_id = load_balancer_id + + return amp.to_data_model() + + @staticmethod + def get_lb_for_amphora(session, amphora_id): + """Get all of the load balancers on an amphora. + + :param session: A Sql Alchemy database session. + :param amphora_id: The amphora id to list the load balancers from + :returns: [octavia.common.data_model] + """ + db_lb = ( + # Get LB records + session.query(models.LoadBalancer) + # Joined to amphora records + .filter(models.LoadBalancer.id == + models.Amphora.load_balancer_id) + # For just this amphora + .filter(models.Amphora.id == amphora_id) + # Where the amphora is not DELETED + .filter(models.Amphora.status != consts.DELETED) + # And the LB is also not DELETED + .filter(models.LoadBalancer.provisioning_status != + consts.DELETED)).first() + if db_lb: + return db_lb.to_data_model() + return None + + def get_cert_expiring_amphora(self, session): + """Retrieves an amphora whose cert is close to expiring.. + + :param session: A Sql Alchemy database session. + :returns: one amphora with expiring certificate + """ + # get amphorae with certs that will expire within the + # configured buffer period, so we can rotate their certs ahead of time + expired_seconds = CONF.house_keeping.cert_expiry_buffer + expired_date = timeutils.utcnow() + datetime.timedelta( + seconds=expired_seconds) + + amp = (session.query(self.model_class) + .populate_existing() + .with_for_update() + .filter( + self.model_class.status.notin_( + [consts.DELETED, consts.PENDING_DELETE]), + self.model_class.cert_busy == false(), + self.model_class.cert_expiration < expired_date) + .first()) + + if amp is None: + return None + + amp.cert_busy = True + + return amp.to_data_model() + + def get_lb_for_health_update(self, session, amphora_id): + """This method is for the health manager status update process. + + This is a time sensitive query that occurs often. + It is an explicit query as the ORM produces a poorly + optimized query. + + Use extreme caution making any changes to this query + as it can impact the scalability of the health manager. + All changes should be analyzed using SQL "EXPLAIN" to + make sure only indexes are being used. + Changes should also be evaluated using the stressHM tool. + + Note: The returned object is flat and not a graph representation + of the load balancer as it is not needed. This is on + purpose to optimize the processing time. This is not in + the normal data model objects. + + :param session: A Sql Alchemy database session. + :param amphora_id: The amphora ID to lookup the load balancer for. + :returns: A dictionary containing the required load balancer details. + """ + rows = session.execute(text( + "SELECT load_balancer.id, load_balancer.enabled, " + "load_balancer.provisioning_status AS lb_prov_status, " + "load_balancer.operating_status AS lb_op_status, " + "listener.id AS list_id, " + "listener.operating_status AS list_op_status, " + "listener.enabled AS list_enabled, " + "listener.protocol AS list_protocol, " + "pool.id AS pool_id, " + "pool.operating_status AS pool_op_status, " + "member.id AS member_id, " + "member.operating_status AS mem_op_status from " + "amphora JOIN load_balancer ON " + "amphora.load_balancer_id = load_balancer.id LEFT JOIN " + "listener ON load_balancer.id = listener.load_balancer_id " + "LEFT JOIN pool ON load_balancer.id = pool.load_balancer_id " + "LEFT JOIN member ON pool.id = member.pool_id WHERE " + "amphora.id = :amp_id AND amphora.status != :deleted AND " + "load_balancer.provisioning_status != :deleted;").bindparams( + amp_id=amphora_id, deleted=consts.DELETED)) + + lb = {} + listeners = {} + pools = {} + for row in rows.mappings(): + if not lb: + lb['id'] = row['id'] + lb['enabled'] = row['enabled'] == 1 + lb['provisioning_status'] = row['lb_prov_status'] + lb['operating_status'] = row['lb_op_status'] + if row['list_id'] and row['list_id'] not in listeners: + listener = {'operating_status': row['list_op_status'], + 'protocol': row['list_protocol'], + 'enabled': row['list_enabled']} + listeners[row['list_id']] = listener + if row['pool_id']: + if row['pool_id'] in pools and row['member_id']: + member = {'operating_status': row['mem_op_status']} + pools[row['pool_id']]['members'][row['member_id']] = member + else: + pool = {'operating_status': row['pool_op_status'], + 'members': {}} + if row['member_id']: + member = {'operating_status': row['mem_op_status']} + pool['members'][row['member_id']] = member + pools[row['pool_id']] = pool + + if listeners: + lb['listeners'] = listeners + if pools: + lb['pools'] = pools + + return lb + + def test_and_set_status_for_delete(self, lock_session, id): + """Tests and sets an amphora status. + + Puts a lock on the amphora table to check the status of the + amphora. The status must be ERROR to successfully update the + amphora status. + + :param lock_session: A Sql Alchemy database session. + :param id: id of Load Balancer + :raises ImmutableObject: The amphora is not in a state that can be + deleted. + :raises NoResultFound: The amphora was not found or already deleted. + :returns: None + """ + amp = (lock_session.query(self.model_class) + .populate_existing() + .with_for_update() + .filter_by(id=id) + .filter(self.model_class.status != consts.DELETED).one()) + if amp.status != consts.ERROR: + raise exceptions.ImmutableObject(resource=consts.AMPHORA, id=id) + amp.status = consts.PENDING_DELETE + lock_session.flush() + + def get_amphorae_ids_on_lb(self, session, lb_id): + """Returns a list of amphora IDs associated with the load balancer + + :param session: A Sql Alchemy database session. + :param lb_id: A load balancer ID. + :returns: A list of amphora IDs + """ + return session.scalars( + select( + self.model_class.id + ).where( + self.model_class.load_balancer_id == lb_id + )).all() + + +class AmphoraBuildReqRepository(BaseRepository): + model_class = models.AmphoraBuildRequest + + def add_to_build_queue(self, session, amphora_id=None, priority=None): + """Adds the build request to the table.""" + model = self.model_class(amphora_id=amphora_id, priority=priority) + session.add(model) + + def update_req_status(self, session, amphora_id=None): + """Updates the request status.""" + (session.query(self.model_class) + .filter_by(amphora_id=amphora_id) + .update({self.model_class.status: 'BUILDING'})) + + def get_highest_priority_build_req(self, session): + """Fetches build request with highest priority and least created_time. + + priority 20 = failover (highest) + priority 40 = create_loadbalancer (lowest) + :param session: A Sql Alchemy database session. + :returns amphora_id corresponding to highest priority and least created + time in 'WAITING' status. + """ + return (session.query(self.model_class.amphora_id) + .order_by(self.model_class.status.desc()) + .order_by(self.model_class.priority.asc()) + .order_by(self.model_class.created_time.asc()) + .first())[0] + + def delete_all(self, session): + "Deletes all the build requests." + session.query(self.model_class).delete() + + +class AmphoraBuildSlotsRepository(BaseRepository): + model_class = models.AmphoraBuildSlots + + def get_used_build_slots_count(self, session): + """Gets the number of build slots in use. + + :returns: Number of current build slots. + """ + count = session.query(self.model_class.slots_used).one() + return count[0] + + def update_count(self, session, action='http://github.com/increment'): + """Increments/Decrements/Resets the number of build_slots used.""" + if action == 'increment': + session.query(self.model_class).filter_by(id=1).update( + {self.model_class.slots_used: + self.get_used_build_slots_count(session) + 1}) + elif action == 'decrement': + session.query(self.model_class).filter_by(id=1).update( + {self.model_class.slots_used: + self.get_used_build_slots_count(session) - 1}) + elif action == 'reset': + session.query(self.model_class).filter_by(id=1).update( + {self.model_class.slots_used: 0}) + + +class SNIRepository(BaseRepository): + model_class = models.SNI + + def update(self, session, listener_id=None, tls_container_id=None, + **model_kwargs): + """Updates an SNI entity in the database.""" + if not listener_id and tls_container_id: + raise exceptions.MissingArguments + if listener_id: + session.query(self.model_class).filter_by( + listener_id=listener_id).update(model_kwargs) + elif tls_container_id: + session.query(self.model_class).filter_by( + tls_container_id=tls_container_id).update(model_kwargs) + + +class AmphoraHealthRepository(BaseRepository): + model_class = models.AmphoraHealth + + def update(self, session, amphora_id, **model_kwargs): + """Updates a healthmanager entity in the database by amphora_id.""" + session.query(self.model_class).filter_by( + amphora_id=amphora_id).update(model_kwargs) + + def replace(self, session, amphora_id, **model_kwargs): + """replace or insert amphora into database.""" + count = session.query(self.model_class).filter_by( + amphora_id=amphora_id).count() + if count: + session.query(self.model_class).filter_by( + amphora_id=amphora_id).update(model_kwargs, + synchronize_session=False) + else: + model_kwargs['amphora_id'] = amphora_id + self.create(session, **model_kwargs) + + def check_amphora_health_expired(self, session, amphora_id, exp_age=None): + """check if a specific amphora is expired in the amphora_health table + + :param session: A Sql Alchemy database session. + :param amphora_id: id of an amphora object + :param exp_age: A standard datetime delta which is used to see for how + long can an amphora live without updates before it is + considered expired (default: + CONF.house_keeping.amphora_expiry_age) + :returns: boolean + """ + if not exp_age: + exp_age = datetime.timedelta( + seconds=CONF.house_keeping.amphora_expiry_age) + + expiry_time = timeutils.utcnow() - exp_age + + amphora_model = ( + session.query(models.AmphoraHealth) + .filter_by(amphora_id=amphora_id) + .filter(models.AmphoraHealth.last_update > expiry_time) + ).first() + # This will return a value if: + # * there is an entry in the table for this amphora_id + # AND + # * the entry was last updated more recently than our expiry_time + # Receiving any value means that the amp is unexpired. + + # In contrast, we receive no value if: + # * there is no entry for this amphora_id + # OR + # * the entry was last updated before our expiry_time + # In this case, the amphora is expired. + return amphora_model is None + + def get_stale_amphora(self, + lock_session: Session) -> Optional[models.Amphora]: + """Retrieves a stale amphora from the health manager database. + + :param lock_session: A Sql Alchemy database autocommit session. + :returns: [octavia.common.data_model] + """ + timeout = CONF.health_manager.heartbeat_timeout + expired_time = timeutils.utcnow() - datetime.timedelta( + seconds=timeout) + + # Update any amphora that were previously FAILOVER_STOPPED + # but are no longer expired. + self.update_failover_stopped(lock_session, expired_time) + + # Handle expired amphora + expired_ids_query = select(self.model_class.amphora_id).where( + self.model_class.busy == false()).where( + self.model_class.last_update < expired_time).subquery() + + expired_count = lock_session.scalar( + select(func.count()).select_from(expired_ids_query)) + + threshold = CONF.health_manager.failover_threshold + if threshold is not None and expired_count >= threshold: + LOG.error('Stale amphora count reached the threshold ' + '(%(th)s). %(count)s amphorae were set into ' + 'FAILOVER_STOPPED status.', + {'th': threshold, 'count': expired_count}) + lock_session.execute( + update( + models.Amphora + ).where( + models.Amphora.status.notin_( + [consts.DELETED, consts.PENDING_DELETE]) + ).where( + models.Amphora.id.in_(expired_ids_query) + ).values( + status=consts.AMPHORA_FAILOVER_STOPPED + ).execution_options(synchronize_session="fetch")) + return None + + # We don't want to attempt to failover amphora that are not + # currently in the ALLOCATED or FAILOVER_STOPPED state. + # i.e. Not DELETED, PENDING_*, etc. + allocated_amp_ids_subquery = ( + select(models.Amphora.id).where( + models.Amphora.status.in_( + [consts.AMPHORA_ALLOCATED, + consts.AMPHORA_FAILOVER_STOPPED]))) + + # Pick one expired amphora for automatic failover + amp_health = lock_session.query( + self.model_class + ).populate_existing( + ).with_for_update( + ).filter( + self.model_class.amphora_id.in_(expired_ids_query) + ).filter( + self.model_class.amphora_id.in_(allocated_amp_ids_subquery) + ).order_by( + func.random() + ).limit(1).first() + + if amp_health is None: + return None + + amp_health.busy = True + + return amp_health.to_data_model() + + def update_failover_stopped(self, lock_session: Session, + expired_time: datetime) -> None: + """Updates the status of amps that are FAILOVER_STOPPED.""" + # Update any FAILOVER_STOPPED amphora that are no longer stale + # back to ALLOCATED. + # Note: This uses sqlalchemy 2.0 syntax + not_expired_ids_subquery = ( + select(self.model_class.amphora_id).where( + self.model_class.busy == false() + ).where( + self.model_class.last_update >= expired_time + )) + + # Note: mysql and sqlite do not support RETURNING, so we cannot + # get back the affected amphora IDs. (09/2022) + lock_session.execute( + update(models.Amphora).where( + models.Amphora.status == consts.AMPHORA_FAILOVER_STOPPED + ).where( + models.Amphora.id.in_(not_expired_ids_subquery) + ).values( + status=consts.AMPHORA_ALLOCATED + ).execution_options(synchronize_session="fetch")) + + +class VRRPGroupRepository(BaseRepository): + model_class = models.VRRPGroup + + def update(self, session, load_balancer_id, **model_kwargs): + """Updates a VRRPGroup entry for by load_balancer_id.""" + session.query(self.model_class).filter_by( + load_balancer_id=load_balancer_id).update(model_kwargs) + + +class L7RuleRepository(BaseRepository): + model_class = models.L7Rule + + def get_all_API_list(self, session, pagination_helper=None, **filters): + """Get a list of L7 Rules for the API list call. + + This get_all returns a data set that is only one level deep + in the data graph. This is an optimized query for the API L7 Rule + list method. + + :param session: A Sql Alchemy database session. + :param pagination_helper: Helper to apply pagination and sorting. + :param filters: Filters to decide which entities should be retrieved. + :returns: [octavia.common.data_model] + """ + + # sub-query load the tables we need + # no-load (blank) the tables we don't need + query_options = ( + subqueryload(models.L7Rule.l7policy), + subqueryload(models.L7Rule._tags), + noload('*')) + + return super().get_all( + session, pagination_helper=pagination_helper, + query_options=query_options, **filters) + + def update(self, session, id, **model_kwargs): + l7rule_db = session.query(self.model_class).filter_by( + id=id).first() + if not l7rule_db: + raise exceptions.NotFound( + resource=data_models.L7Rule._name(), id=id) + + l7rule_dict = l7rule_db.to_data_model().to_dict() + # Ignore values that are None + for k, v in model_kwargs.items(): + if v is not None: + l7rule_dict.update({k: v}) + # Clear out the 'key' attribute for rule types that don't use it. + if ('type' in l7rule_dict.keys() and + l7rule_dict['type'] in (consts.L7RULE_TYPE_HOST_NAME, + consts.L7RULE_TYPE_PATH, + consts.L7RULE_TYPE_FILE_TYPE)): + l7rule_dict['key'] = None + model_kwargs.update({'key': None}) + validate.l7rule_data(self.model_class(**l7rule_dict)) + l7rule_db.update(model_kwargs) + + l7rule_db = self.get(session, id=id) + return l7rule_db + + def create(self, session, **model_kwargs): + if not model_kwargs.get('id'): + model_kwargs.update(id=uuidutils.generate_uuid()) + if model_kwargs.get('l7policy_id'): + l7policy_db = session.query(models.L7Policy).filter_by( + id=model_kwargs.get('l7policy_id')).first() + model_kwargs.update(l7policy=l7policy_db) + l7rule = self.model_class(**model_kwargs) + validate.l7rule_data(l7rule) + session.add(l7rule) + + l7rule_db = self.get(session, id=l7rule.id) + return l7rule_db + + +class L7PolicyRepository(BaseRepository): + model_class = models.L7Policy + + def _pool_check(self, session, pool_id, lb_id, project_id): + """Sanity checks for the redirect_pool if specified.""" + pool_db = (session.query(models.Pool). + filter_by(id=pool_id). + filter_by(project_id=project_id). + filter_by(load_balancer_id=lb_id).first()) + if not pool_db: + raise exceptions.NotFound( + resource=data_models.Pool._name(), id=pool_id) + + def _validate_l7policy_pool_data(self, session, l7policy): + """Does validations on a given L7 policy.""" + if l7policy.action == consts.L7POLICY_ACTION_REDIRECT_TO_POOL: + session.expire(session.query(models.Listener).filter_by( + id=l7policy.listener_id).first()) + listener = (session.query(models.Listener). + filter_by(id=l7policy.listener_id).first()) + self._pool_check(session, l7policy.redirect_pool_id, + listener.load_balancer_id, listener.project_id) + + def get_all(self, session, pagination_helper=None, **filters): + deleted = filters.pop('show_deleted', True) + query = session.query(self.model_class).filter_by( + **filters) + + if not deleted: + query = query.filter( + self.model_class.provisioning_status != consts.DELETED) + + if pagination_helper: + model_list, links = pagination_helper.apply( + query, self.model_class) + else: + links = None + model_list = query.order_by(self.model_class.position).all() + + data_model_list = [model.to_data_model() for model in model_list] + return data_model_list, links + + def get_all_API_list(self, session, pagination_helper=None, **filters): + deleted = filters.pop('show_deleted', True) + query = session.query(self.model_class).filter_by( + **filters) + + query = query.options( + subqueryload(models.L7Policy.l7rules), + subqueryload(models.L7Policy.listener), + subqueryload(models.L7Policy.redirect_pool), + subqueryload(models.L7Policy._tags), + noload('*')) + + if not deleted: + query = query.filter( + self.model_class.provisioning_status != consts.DELETED) + + if pagination_helper: + model_list, links = pagination_helper.apply( + query, self.model_class) + else: + links = None + model_list = query.order_by(self.model_class.position).all() + + data_model_list = [model.to_data_model() for model in model_list] + return data_model_list, links + + def update(self, session, id, **model_kwargs): + l7policy_db = session.query(self.model_class).filter_by( + id=id).first() + if not l7policy_db: + raise exceptions.NotFound( + resource=data_models.L7Policy._name(), id=id) + + # Necessary to work around unexpected / idiotic behavior of + # the SQLAlchemy Orderinglist extension if the position changes. + position = model_kwargs.pop('position', None) + if position == l7policy_db.position: + position = None + + model_kwargs.update(listener_id=l7policy_db.listener_id) + l7policy = self.model_class( + **validate.sanitize_l7policy_api_args(model_kwargs)) + self._validate_l7policy_pool_data(session, l7policy) + + if l7policy.action: + model_kwargs.update(action=l7policy.action) + if l7policy.action == consts.L7POLICY_ACTION_REJECT: + model_kwargs.update(redirect_url=None) + model_kwargs.update(redirect_pool_id=None) + model_kwargs.update(redirect_prefix=None) + model_kwargs.update(redirect_http_code=None) + elif (l7policy.action == + consts.L7POLICY_ACTION_REDIRECT_TO_URL): + model_kwargs.update(redirect_pool_id=None) + model_kwargs.update(redirect_prefix=None) + elif (l7policy.action == + consts.L7POLICY_ACTION_REDIRECT_TO_POOL): + model_kwargs.update(redirect_url=None) + model_kwargs.update(redirect_prefix=None) + model_kwargs.update(redirect_http_code=None) + elif (l7policy.action == + consts.L7POLICY_ACTION_REDIRECT_PREFIX): + model_kwargs.update(redirect_url=None) + model_kwargs.update(redirect_pool_id=None) + + l7policy_db.update(model_kwargs) + + # Position manipulation must happen outside the other alterations + # in the previous transaction + if position is not None: + listener = (session.query(models.Listener). + filter_by(id=l7policy_db.listener_id).first()) + # Immediate refresh, as we have found that sqlalchemy will + # sometimes cache the above query + session.refresh(listener) + l7policy_db = listener.l7policies.pop(l7policy_db.position - 1) + listener.l7policies.insert(position - 1, l7policy_db) + listener.l7policies.reorder() + session.flush() + + return self.get(session, id=id) + + def create(self, session, **model_kwargs): + # We must append the new policy to the end of the collection. We + # later re-insert it wherever it was requested to appear in order. + # This is to work around unexpected / idiotic behavior of the + # SQLAlchemy orderinglist extension. + position = model_kwargs.pop('position', None) + model_kwargs.update(position=consts.MAX_POLICY_POSITION) + if not model_kwargs.get('id'): + model_kwargs.update(id=uuidutils.generate_uuid()) + if model_kwargs.get('redirect_pool_id'): + pool_db = session.query(models.Pool).filter_by( + id=model_kwargs.get('redirect_pool_id')).first() + model_kwargs.update(redirect_pool=pool_db) + if model_kwargs.get('listener_id'): + listener_db = session.query(models.Listener).filter_by( + id=model_kwargs.get('listener_id')).first() + model_kwargs.update(listener=listener_db) + l7policy = self.model_class( + **validate.sanitize_l7policy_api_args(model_kwargs, + create=True)) + self._validate_l7policy_pool_data(session, l7policy) + session.add(l7policy) + session.flush() + + # Must be done outside the transaction which creates the L7Policy + listener = (session.query(models.Listener). + filter_by(id=l7policy.listener_id).first()) + # Immediate refresh, as we have found that sqlalchemy will sometimes + # cache the above query + session.refresh(listener) + session.refresh(l7policy) + + if position is not None and position < len(listener.l7policies) + 1: + # New L7Policy will always be at the end of the list + l7policy_db = listener.l7policies.pop() + listener.l7policies.insert(position - 1, l7policy_db) + + listener.l7policies.reorder() + session.flush() + l7policy.updated_at = None + return self.get(session, id=l7policy.id) + + def delete(self, session, id, **filters): + l7policy_db = session.query(self.model_class).filter_by( + id=id).first() + if not l7policy_db: + raise exceptions.NotFound( + resource=data_models.L7Policy._name(), id=id) + listener_id = l7policy_db.listener_id + session.delete(l7policy_db) + session.flush() + + # Must do reorder outside of the delete transaction. + listener = (session.query(models.Listener). + filter_by(id=listener_id).first()) + # Immediate refresh, as we have found that sqlalchemy will + # sometimes cache the above query + session.refresh(listener) + listener.l7policies.reorder() + session.flush() + + +class QuotasRepository(BaseRepository): + model_class = models.Quotas + + def update(self, session, project_id, **model_kwargs): + kwargs_quota = model_kwargs['quota'] + quotas = ( + session.query(self.model_class) + .filter_by(project_id=project_id) + .populate_existing() + .with_for_update().first()) + if not quotas: + quotas = models.Quotas(project_id=project_id) + + for key, val in kwargs_quota.items(): + setattr(quotas, key, val) + session.add(quotas) + session.flush() + return self.get(session, project_id=project_id) + + # Since this is for the initial quota record creation it locks the table + # which can lead to recoverable deadlocks. Thus we use the deadlock + # retry wrapper here. This may not be appropriate for other sessions + # and or queries. Use with caution. + @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) + def ensure_project_exists(self, project_id): + with db_api.session().begin() as session: + quotas = self.get(session, project_id=project_id) + if not quotas: + # Make sure we have a record to lock + self.update(session, project_id, quota={}) + session.commit() + + def delete(self, session, project_id): + quotas = ( + session.query(self.model_class) + .filter_by(project_id=project_id) + .populate_existing() + .with_for_update().first()) + if not quotas: + raise exceptions.NotFound( + resource=data_models.Quotas._name(), id=project_id) + quotas.health_monitor = None + quotas.load_balancer = None + quotas.listener = None + quotas.member = None + quotas.pool = None + quotas.l7policy = None + quotas.l7rule = None + session.flush() + + +class _GetALLExceptDELETEDIdMixin: + + def get_all(self, session, pagination_helper=None, + query_options=None, **filters): + + """Retrieves a list of entities from the database. + + This filters the "DELETED" placeholder from the list. + + :param session: A Sql Alchemy database session. + :param pagination_helper: Helper to apply pagination and sorting. + :param query_options: Optional query options to apply. + :param filters: Filters to decide which entities should be retrieved. + :returns: [octavia.common.data_model] + """ + query = session.query(self.model_class).filter_by(**filters) + if query_options: + query = query.options(query_options) + + if hasattr(self.model_class, 'id'): + query = query.filter(self.model_class.id != consts.NIL_UUID) + else: + query = query.filter(self.model_class.name != consts.NIL_UUID) + + if pagination_helper: + model_list, links = pagination_helper.apply( + query, self.model_class) + else: + links = None + model_list = query.all() + + data_model_list = [model.to_data_model() for model in model_list] + return data_model_list, links + + +class FlavorRepository(_GetALLExceptDELETEDIdMixin, BaseRepository): + model_class = models.Flavor + + def get_flavor_metadata_dict(self, session, flavor_id): + flavor_metadata_json = ( + session.query(models.FlavorProfile.flavor_data) + .filter(models.Flavor.id == flavor_id) + .filter( + models.Flavor.flavor_profile_id == models.FlavorProfile.id) + .one()[0]) + result_dict = ({} if flavor_metadata_json is None + else jsonutils.loads(flavor_metadata_json)) + return result_dict + + def get_flavor_provider(self, session, flavor_id): + return (session.query(models.FlavorProfile.provider_name) + .filter(models.Flavor.id == flavor_id) + .filter(models.Flavor.flavor_profile_id == + models.FlavorProfile.id).one()[0]) + + def delete(self, serial_session, **filters): + """Sets DELETED LBs flavor_id to NIL_UUID, then removes the flavor + + :param serial_session: A Sql Alchemy database transaction session. + :param filters: Filters to decide which entity should be deleted. + :returns: None + :raises: odb_exceptions.DBReferenceError + :raises: sqlalchemy.orm.exc.NoResultFound + """ + (serial_session.query(models.LoadBalancer). + filter(models.LoadBalancer.flavor_id == filters['id']). + filter(models.LoadBalancer.provisioning_status == consts.DELETED). + update({models.LoadBalancer.flavor_id: consts.NIL_UUID}, + synchronize_session=False)) + flavor = (serial_session.query(self.model_class). + filter_by(**filters).one()) + serial_session.delete(flavor) + + +class FlavorProfileRepository(_GetALLExceptDELETEDIdMixin, BaseRepository): + model_class = models.FlavorProfile + + +class AvailabilityZoneRepository(_GetALLExceptDELETEDIdMixin, BaseRepository): + model_class = models.AvailabilityZone + + def get_availability_zone_metadata_dict(self, session, + availability_zone_name): + availability_zone_metadata_json = ( + session.query( + models.AvailabilityZoneProfile.availability_zone_data) + .filter(models.AvailabilityZone.name == availability_zone_name) + .filter(models.AvailabilityZone.availability_zone_profile_id == + models.AvailabilityZoneProfile.id) + .one()[0]) + result_dict = ( + {} if availability_zone_metadata_json is None + else jsonutils.loads(availability_zone_metadata_json)) + return result_dict + + def get_availability_zone_provider(self, session, availability_zone_name): + return (session.query(models.AvailabilityZoneProfile.provider_name) + .filter( + models.AvailabilityZone.name == availability_zone_name) + .filter( + models.AvailabilityZone.availability_zone_profile_id == + models.AvailabilityZoneProfile.id).one()[0]) + + def update(self, session, name, **model_kwargs): + """Updates an entity in the database. + + :param session: A Sql Alchemy database session. + :param model_kwargs: Entity attributes that should be updates. + :returns: octavia.common.data_model + """ + session.query(self.model_class).filter_by( + name=name).update(model_kwargs) + + def delete(self, serial_session, **filters): + """Special delete method for availability_zone. + + Sets DELETED LBs availability_zone to NIL_UUID, then removes the + availability_zone. + + :param serial_session: A Sql Alchemy database transaction session. + :param filters: Filters to decide which entity should be deleted. + :returns: None + :raises: odb_exceptions.DBReferenceError + :raises: sqlalchemy.orm.exc.NoResultFound + """ + (serial_session.query(models.LoadBalancer). + filter(models.LoadBalancer.availability_zone == filters[consts.NAME]). + filter(models.LoadBalancer.provisioning_status == consts.DELETED). + update({models.LoadBalancer.availability_zone: consts.NIL_UUID}, + synchronize_session=False)) + availability_zone = ( + serial_session.query(self.model_class).filter_by(**filters).one()) + serial_session.delete(availability_zone) + + +class AvailabilityZoneProfileRepository(_GetALLExceptDELETEDIdMixin, + BaseRepository): + model_class = models.AvailabilityZoneProfile + + +class AmphoraMemberPortRepository(BaseRepository): + model_class = models.AmphoraMemberPort + + def get_port_ids(self, session, amphora_id): + return session.scalars( + select( + self.model_class.port_id + ).where( + self.model_class.amphora_id == amphora_id + )).all() diff --git a/octavia/distributor/__init__.py b/octavia/distributor/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/distributor/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/distributor/drivers/__init__.py b/octavia/distributor/drivers/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/distributor/drivers/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/distributor/drivers/driver_base.py b/octavia/distributor/drivers/driver_base.py new file mode 100644 index 0000000000..7e16d7de12 --- /dev/null +++ b/octavia/distributor/drivers/driver_base.py @@ -0,0 +1,134 @@ +# Copyright 2016 IBM Corp. +# Copyright 2017 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +# This class describes the abstraction of a distributor interface. +# Distributor implementations may be: a noop, a single hardware device, +# a single amphora, or multiple amphora among other options. + + +class DistributorDriver(metaclass=abc.ABCMeta): + @abc.abstractmethod + def get_create_distributor_subflow(self): + """Get a subflow to create a distributor + + :requires: **load_balancer** (object) - Load balancer object + associated with this distributor + :provides: **distributor_id** (string) - The created distributor ID + :returns: A TaskFlow Flow that will create the distributor + + This method will setup the TaskFlow Flow required to setup the + database fields and create a distributor should the driver need to + instantiate one. + The flow must store the generated distibutor ID in the flow. + """ + + @abc.abstractmethod + def get_delete_distributor_subflow(self): + """Get a subflow that deletes a distributor + + :requires: **distributor_id** (string) - The ID of the distributor + to delete + :returns: A TaskFlow Flow that will delete the distributor + + This method will return a TaskFlow Flow that deletes the distributor + (if applicable for the driver) and cleans up any associated database + records. + """ + + @abc.abstractmethod + def get_add_vip_subflow(self): + """Get a subflow that adds a VIP to a distributor + + :requires: **distributor_id** (string) - The ID of the distributor + to create the VIP on. + :requires: **vip** (object) - The VIP object to create on the + distributor. + :requires: **vip_alg** (string) - The optional algorithm to use for + this VIP. + :requires: **vip_persistence** (string) - The persistence type for + this VIP. + :returns: A TaskFlow Flow that will add a VIP to the distributor + + This method will return a TaskFlow Flow that adds a VIP to the + distributor by performing the necessary steps to plug the VIP and + configure the distributor to start receiving requests on this VIP. + """ + + @abc.abstractmethod + def get_remove_vip_subflow(self): + """Get a subflow that removes a VIP from a distributor + + :requires: **distributor_id** (string) - The ID of the distributor + to remove the VIP from. + :requires: **vip** (object) - The VIP object to remove from the + distributor. + :returns: A TaskFlow Flow that will remove a VIP from the distributor + + This method will return a TaskFlow Flow that removes the VIP from the + distributor by reconfiguring the distributor and unplugging the + associated port. + """ + + @abc.abstractmethod + def get_register_amphorae_subflow(self): + """Get a subflow that Registers amphorae with the distributor + + :requires: **distributor_id** (string) - The ID of the distributor + to register the amphora on + :requires: **amphorae** (tuple) - Tuple of amphora objects to + register with the distributor. + :returns: A TaskFlow Flow that will register amphorae with the + distributor + + This method will return a TaskFlow Flow that registers amphorae with + the distributor so it can begin to receive requests from the + distributor. Amphora should be ready to receive requests prior to + this call being made. + """ + + @abc.abstractmethod + def get_drain_amphorae_subflow(self): + """Get a subflow that drains connections from amphorae + + :requires: **distributor_id** (string) - The ID of the distributor + to drain amphorae from + :requires: **amphorae** (tuple) - Tuple of amphora objects to drain + from distributor. + :returns: A TaskFlow Flow that will drain the listed amphorae on the + distributor + + This method will return a TaskFlow Flow that configures the + distributor to stop sending new connections to the amphorae in the + list. Existing connections will continue to pass traffic to the + amphorae in this list. + """ + + @abc.abstractmethod + def get_unregister_amphorae_subflow(self): + """Get a subflow that unregisters amphorae from a distributor + + :requires: **distributor_id** (string) - The ID of the distributor + to unregister amphorae from + :requires: **amphorae** (tuple) - Tuple of amphora objects to + unregister from distributor. + :returns: A TaskFlow Flow that will unregister amphorae from the + distributor + + This method will return a TaskFlow Flow that unregisters amphorae + from the distributor. Amphorae in this list will immediately stop + receiving traffic. + """ diff --git a/octavia/distributor/drivers/noop_driver/__init__.py b/octavia/distributor/drivers/noop_driver/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/distributor/drivers/noop_driver/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/distributor/drivers/noop_driver/driver.py b/octavia/distributor/drivers/noop_driver/driver.py new file mode 100644 index 0000000000..5681cbfe5f --- /dev/null +++ b/octavia/distributor/drivers/noop_driver/driver.py @@ -0,0 +1,124 @@ +# Copyright 2016 IBM Corp. +# Copyright 2017 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from taskflow.patterns import linear_flow +from taskflow import task + +from oslo_log import log as logging +from oslo_utils import uuidutils + +from octavia.distributor.drivers import driver_base + +LOG = logging.getLogger(__name__) + + +class NoopProvidesRequiresTask(task.Task): + def __init__(self, name, provides_dicts=None, requires=None): + if provides_dicts is None: + provides_dicts = {} + super().__init__( + name=name, + provides=list(provides_dicts), + requires=requires) + self.provides_dict = provides_dicts + + def execute(self, *args, **kwargs): + return self.provides_dict.values() + + +class NoopManager: + def __init__(self): + super().__init__() + + def get_create_distributor_subflow(self): + LOG.debug('Distributor %s create_distributor', self.__class__.__name__) + create_distributor_flow = linear_flow.Flow('create-distributor') + create_distributor_flow.add(NoopProvidesRequiresTask( + 'create-distributor-task', + requires=('load_balancer'), + provides_dicts={'distributor_id': uuidutils.generate_uuid()})) + return create_distributor_flow + + def get_delete_distributor_subflow(self): + LOG.debug('Distributor %s delete_distributor', self.__class__.__name__) + delete_distributor_flow = linear_flow.Flow('delete-distributor') + delete_distributor_flow.add(NoopProvidesRequiresTask( + 'delete-distributor-task', requires='distributor_id')) + return delete_distributor_flow + + def get_add_vip_subflow(self): + LOG.debug('Distributor %s add_vip', self.__class__.__name__) + add_vip_flow = linear_flow.Flow('add-vip') + add_vip_flow.add(NoopProvidesRequiresTask( + 'add-vip-task', requires=('distributor_id', 'vip', + 'vip_alg', 'vip_persistence'))) + return add_vip_flow + + def get_remove_vip_subflow(self): + LOG.debug('Distributor %s remove_vip', self.__class__.__name__) + remove_vip_flow = linear_flow.Flow('remove-vip') + remove_vip_flow.add(NoopProvidesRequiresTask('remove-vip-task', + requires=('distributor_id', 'vip'))) + return remove_vip_flow + + def get_register_amphorae_subflow(self): + LOG.debug('Distributor %s register_amphorae', self.__class__.__name__) + register_amphorae_flow = linear_flow.Flow('register_amphorae') + register_amphorae_flow.add(NoopProvidesRequiresTask( + 'register_amphorae_task', requires=('distributor_id', 'amphorae'))) + return register_amphorae_flow + + def get_drain_amphorae_subflow(self): + LOG.debug('Distributor %s drain_amphorae', self.__class__.__name__) + drain_amphorae_flow = linear_flow.Flow('drain-amphorae') + drain_amphorae_flow.add(NoopProvidesRequiresTask( + 'drain_amphorae_task', requires=('distributor_id', 'amphorae'))) + return drain_amphorae_flow + + def get_unregister_amphorae_subflow(self): + LOG.debug('Distributor %s unregister_amphorae', + self.__class__.__name__) + unregister_amphorae_flow = linear_flow.Flow('unregister_amphora') + unregister_amphorae_flow.add(NoopProvidesRequiresTask( + 'unregister_amphorae_task', requires=('distributor_id', + 'amphorae'))) + return unregister_amphorae_flow + + +class NoopDistributorDriver(driver_base.DistributorDriver): + def __init__(self): + super().__init__() + self.driver = NoopManager() + + def get_create_distributor_subflow(self): + return self.driver.get_create_distributor_subflow() + + def get_delete_distributor_subflow(self): + return self.driver.get_delete_distributor_subflow() + + def get_add_vip_subflow(self): + return self.driver.get_add_vip_subflow() + + def get_remove_vip_subflow(self): + return self.driver.get_remove_vip_subflow() + + def get_register_amphorae_subflow(self): + return self.driver.get_register_amphorae_subflow() + + def get_drain_amphorae_subflow(self): + self.driver.get_drain_amphorae_subflow() + + def get_unregister_amphorae_subflow(self): + self.driver.get_unregister_amphorae_subflow() diff --git a/octavia/hacking/__init__.py b/octavia/hacking/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/hacking/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/hacking/checks.py b/octavia/hacking/checks.py new file mode 100644 index 0000000000..0b3bfc230c --- /dev/null +++ b/octavia/hacking/checks.py @@ -0,0 +1,202 @@ +# Copyright (c) 2014 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +""" +Guidelines for writing new hacking checks + + - Use only for Octavia specific tests. OpenStack general tests + should be submitted to the common 'hacking' module. + - Pick numbers in the range O3xx. Find the current test with + the highest allocated number and then pick the next value. + - Keep the test method code in the source file ordered based + on the O3xx value. + - List the new rule in the top level HACKING.rst file + - Add test cases for each new rule to + octavia/tests/unit/test_hacking.py + +""" + +import re + +from hacking import core + + +_all_log_levels = {'critical', 'error', 'exception', 'info', 'warning'} +_all_hints = {'_LC', '_LE', '_LI', '_', '_LW'} + +_log_translation_hint = re.compile( + r".*LOG\.({levels})\(\s*({hints})\(".format( + levels='|'.join(_all_log_levels), + hints='|'.join(_all_hints), + )) + +assert_equal_with_true_re = re.compile( + r"assertEqual\(True,") +assert_equal_with_false_re = re.compile( + r"assertEqual\(False,") +mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])") +revert_must_have_kwargs_re = re.compile( + r'[ ]*def revert\(.+,[ ](?!\*\*kwargs)\w+\):') +untranslated_exception_re = re.compile(r"raise (?:\w*)\((.*)\)") +no_eventlet_re = re.compile(r'(import|from)\s+[(]?eventlet') +no_line_continuation_backslash_re = re.compile(r'.*(\\)\n') +no_logging_re = re.compile(r'(import|from)\s+[(]?logging') + + +def _translation_checks_not_enforced(filename): + # Do not do these validations on tests + return any(pat in filename for pat in ["/tests/", "rally-jobs/plugins/"]) + + +@core.flake8ext +def assert_equal_true_or_false(logical_line): + """Check for assertEqual(True, A) or assertEqual(False, A) sentences + + O323 + """ + res = (assert_equal_with_true_re.search(logical_line) or + assert_equal_with_false_re.search(logical_line)) + if res: + yield (0, "O323: assertEqual(True, A) or assertEqual(False, A) " + "sentences not allowed") + + +@core.flake8ext +def no_mutable_default_args(logical_line): + msg = "O324: Method's default argument shouldn't be mutable!" + if mutable_default_args.match(logical_line): + yield (0, msg) + + +@core.flake8ext +def no_log_warn(logical_line): + """Disallow 'LOG.warn(' + + O339 + """ + if logical_line.startswith('LOG.warn('): + yield (0, "O339:Use LOG.warning() rather than LOG.warn()") + + +@core.flake8ext +def no_translate_logs(logical_line, filename): + """O341 - Don't translate logs. + + Check for 'LOG.*(_(' and 'LOG.*(_Lx(' + + Translators don't provide translations for log messages, and operators + asked not to translate them. + + * This check assumes that 'LOG' is a logger. + + :param logical_line: The logical line to check. + :param filename: The file name where the logical line exists. + :returns: None if the logical line passes the check, otherwise a tuple + is yielded that contains the offending index in logical line + and a message describe the check validation failure. + """ + if _translation_checks_not_enforced(filename): + return + + msg = "O341: Log messages should not be translated!" + match = _log_translation_hint.match(logical_line) + if match: + yield (logical_line.index(match.group()), msg) + + +@core.flake8ext +def check_raised_localized_exceptions(logical_line, filename): + """O342 - Untranslated exception message. + + :param logical_line: The logical line to check. + :param filename: The file name where the logical line exists. + :returns: None if the logical line passes the check, otherwise a tuple + is yielded that contains the offending index in logical line + and a message describe the check validation failure. + """ + if _translation_checks_not_enforced(filename): + return + + logical_line = logical_line.strip() + raised_search = untranslated_exception_re.match(logical_line) + if raised_search: + exception_msg = raised_search.groups()[0] + if exception_msg.startswith("\"") or exception_msg.startswith("\'"): + msg = "O342: Untranslated exception message." + yield (logical_line.index(exception_msg), msg) + + +@core.flake8ext +def check_no_eventlet_imports(logical_line): + """O345 - Usage of Python eventlet module not allowed. + + :param logical_line: The logical line to check. + :returns: None if the logical line passes the check, otherwise a tuple + is yielded that contains the offending index in logical line + and a message describe the check validation failure. + """ + if no_eventlet_re.match(logical_line): + msg = 'O345 Usage of Python eventlet module not allowed' + yield logical_line.index('eventlet'), msg + + +@core.flake8ext +def check_line_continuation_no_backslash(logical_line, tokens): + """O346 - Don't use backslashes for line continuation. + + :param logical_line: The logical line to check. Not actually used. + :param tokens: List of tokens to check. + :returns: None if the tokens don't contain any issues, otherwise a tuple + is yielded that contains the offending index in the logical + line and a message describe the check validation failure. + """ + backslash = None + for token_type, text, start, end, orig_line in tokens: + m = no_line_continuation_backslash_re.match(orig_line) + if m: + backslash = (start[0], m.start(1)) + break + + if backslash is not None: + msg = 'O346 Backslash line continuations not allowed' + yield backslash, msg + + +@core.flake8ext +def revert_must_have_kwargs(logical_line): + """O347 - Taskflow revert methods must have \\*\\*kwargs. + + :param logical_line: The logical line to check. + :returns: None if the logical line passes the check, otherwise a tuple + is yielded that contains the offending index in logical line + and a message describe the check validation failure. + """ + if revert_must_have_kwargs_re.match(logical_line): + msg = 'O347 Taskflow revert methods must have **kwargs' + yield 0, msg + + +@core.flake8ext +def check_no_logging_imports(logical_line): + """O348 - Usage of Python logging module not allowed. + + :param logical_line: The logical line to check. + :returns: None if the logical line passes the check, otherwise a tuple + is yielded that contains the offending index in logical line + and a message describe the check validation failure. + """ + if no_logging_re.match(logical_line): + msg = 'O348 Usage of Python logging module not allowed, use oslo_log' + yield logical_line.index('logging'), msg diff --git a/octavia/i18n.py b/octavia/i18n.py new file mode 100644 index 0000000000..54e80c3342 --- /dev/null +++ b/octavia/i18n.py @@ -0,0 +1,20 @@ +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import oslo_i18n as i18n + +_translators = i18n.TranslatorFactory(domain='octavia') + +# The primary translation function using the well-known name "_" +_ = _translators.primary diff --git a/octavia/image/__init__.py b/octavia/image/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/octavia/image/drivers/__init__.py b/octavia/image/drivers/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/octavia/image/drivers/glance_driver.py b/octavia/image/drivers/glance_driver.py new file mode 100644 index 0000000000..2b77ce30dc --- /dev/null +++ b/octavia/image/drivers/glance_driver.py @@ -0,0 +1,69 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_log import log as logging + +from octavia.common import clients +from octavia.common import constants +from octavia.common import exceptions +from octavia.image import image_base + +LOG = logging.getLogger(__name__) + +CONF = cfg.CONF + + +class ImageManager(image_base.ImageBase): + '''Image implementation of virtual machines via Glance.''' + + def __init__(self): + super().__init__() + # Must initialize glance api + self._glance_client = clients.GlanceAuth.get_glance_client( + service_name=CONF.glance.service_name, + endpoint=CONF.glance.endpoint, + region=CONF.glance.region_name, + endpoint_type=CONF.glance.endpoint_type, + insecure=CONF.glance.insecure, + cacert=CONF.glance.ca_certificates_file + ) + self.manager = self._glance_client.images + + def get_image_id_by_tag(self, image_tag, image_owner=None): + """Get image ID by image tag and owner + + :param image_tag: image tag + :param image_owner: optional image owner + :raises: ImageGetException if no images found with given tag + :return: image id + """ + filters = {'tag': [image_tag], + 'status': constants.GLANCE_IMAGE_ACTIVE} + if image_owner: + filters.update({'owner': image_owner}) + + images = list(self.manager.list( + filters=filters, sort='created_at:desc', limit=2)) + + if not images: + raise exceptions.ImageGetException(tag=image_tag) + image_id = images[0]['id'] + num_images = len(images) + if num_images > 1: + LOG.warning("A single Glance image should be tagged with %(tag)s " + "tag, but at least two were found. Using " + "%(image_id)s.", + {'tag': image_tag, 'image_id': image_id}) + return image_id diff --git a/octavia/image/drivers/noop_driver/__init__.py b/octavia/image/drivers/noop_driver/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/octavia/image/drivers/noop_driver/driver.py b/octavia/image/drivers/noop_driver/driver.py new file mode 100644 index 0000000000..84ba69a6eb --- /dev/null +++ b/octavia/image/drivers/noop_driver/driver.py @@ -0,0 +1,43 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging + +from octavia.image import image_base as driver_base + +LOG = logging.getLogger(__name__) + + +class NoopManager: + def __init__(self): + super().__init__() + self.imageconfig = {} + + def get_image_id_by_tag(self, image_tag, image_owner=None): + LOG.debug("Image %s no-op, get_image_id_by_tag image tag %s, " + "image owner %s", + self.__class__.__name__, image_tag, image_owner) + self.imageconfig[image_tag, image_owner] = ( + image_tag, image_owner, 'get_image_id_by_tag') + return 1 + + +class NoopImageDriver(driver_base.ImageBase): + def __init__(self): + super().__init__() + self.driver = NoopManager() + + def get_image_id_by_tag(self, image_tag, image_owner=None): + image_id = self.driver.get_image_id_by_tag(image_tag, image_owner) + return image_id diff --git a/octavia/image/image_base.py b/octavia/image/image_base.py new file mode 100644 index 0000000000..eca84632ef --- /dev/null +++ b/octavia/image/image_base.py @@ -0,0 +1,28 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + + +class ImageBase(metaclass=abc.ABCMeta): + + @abc.abstractmethod + def get_image_id_by_tag(self, image_tag, image_owner=None): + """Get image ID by image tag and owner. + + :param image_tag: image tag + :param image_owner: optional image owner + :raises: ImageGetException if no images found with given tag + :return: image id + """ diff --git a/octavia/network/__init__.py b/octavia/network/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/network/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/network/base.py b/octavia/network/base.py new file mode 100644 index 0000000000..f2bc778776 --- /dev/null +++ b/octavia/network/base.py @@ -0,0 +1,411 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import typing + +from octavia.common import constants +from octavia.common import data_models +from octavia.common import exceptions + +if typing.TYPE_CHECKING: + from octavia.common import context + import octavia.network.data_models as n_data_models + + +class NetworkException(exceptions.OctaviaException): + pass + + +class PlugVIPException(NetworkException): + pass + + +class UnplugVIPException(NetworkException): + pass + + +class AllocateVIPException(NetworkException): + pass + + +class DeallocateVIPException(NetworkException): + pass + + +class PlugNetworkException(NetworkException): + pass + + +class UnplugNetworkException(NetworkException): + pass + + +class VIPInUseException(NetworkException): + pass + + +class PortNotFound(NetworkException): + pass + + +class NetworkNotFound(NetworkException): + pass + + +class SubnetNotFound(NetworkException): + pass + + +class AmphoraNotFound(NetworkException): + pass + + +class PluggedVIPNotFound(NetworkException): + pass + + +class TimeoutException(NetworkException): + pass + + +class QosPolicyNotFound(NetworkException): + pass + + +class SecurityGroupNotFound(NetworkException): + pass + + +class CreatePortException(NetworkException): + pass + + +class AbstractNetworkDriver(metaclass=abc.ABCMeta): + """This class defines the methods for a fully functional network driver. + + Implementations of this interface can expect a rollback to occur if any of + the non-nullipotent methods raise an exception. + """ + + @abc.abstractmethod + def allocate_vip(self, load_balancer: data_models.LoadBalancer) -> ( + tuple[data_models.Vip, list[data_models.AdditionalVip]]): + """Allocates a virtual ip. + + Reserves it for later use as the frontend connection of a load + balancer. + + :param load_balancer: octavia.common.data_models.LoadBalancer instance + :return: octavia.common.data_models.Vip, + list(octavia.common.data_models.AdditionalVip) + :raises: AllocateVIPException, PortNotFound, SubnetNotFound + """ + + @abc.abstractmethod + def create_port(self, network_id, name=None, fixed_ips=(), + secondary_ips=(), security_group_ids=(), + admin_state_up=True, qos_policy_id=None, + vnic_type=constants.VNIC_TYPE_NORMAL): + """Creates a network port. + + fixed_ips = [{'subnet_id': , ('ip_address': ')},] + ip_address is optional in the fixed_ips dictionary. + + :param network_id: The network the port should be created on. + :param name: The name to apply to the port. + :param fixed_ips: A list of fixed IP dicts. + :param secondary_ips: A list of secondary IPs to add to the port. + :param security_group_ids: A list of security group IDs for the port. + :param qos_policy_id: The QoS policy ID to apply to the port. + :returns port: A port data model object. + """ + + @abc.abstractmethod + def deallocate_vip(self, vip): + """Removes any resources that reserved this virtual ip. + + :param vip: octavia.common.data_models.VIP instance + :return: None + :raises: DeallocateVIPException, VIPInUseException, + VIPConfiigurationNotFound + """ + + @abc.abstractmethod + def delete_port(self, port_id): + """Delete a network port. + + :param port_id: The port ID to delete. + :returns: None + """ + + @abc.abstractmethod + def unplug_vip(self, load_balancer, vip): + """Unplugs a virtual ip as the frontend connection of a load balancer. + + Removes the routing of traffic from the vip to the load balancer + and its amphorae. + + :param load_balancer: octavia.common.data_models.LoadBalancer instance + :param vip: octavia.common.data_models.VIP instance + :return: octavia.common.data_models.VIP instance + :raises: UnplugVIPException, PluggedVIPNotFound + """ + + @abc.abstractmethod + def unplug_network(self, compute_id, network_id): + """Disconnects an existing amphora from an existing network. + + If ip_address is not specified, all the interfaces plugged on + network_id should be unplugged. + + :param compute_id: id of an amphora in the compute service + :param network_id: id of a network + :return: None + :raises: UnplugNetworkException, AmphoraNotFound, NetworkNotFound, + NetworkException + """ + + @abc.abstractmethod + def plug_fixed_ip(self, port_id, subnet_id, ip_address=None): + """Plug a fixed ip to an existing port. + + If ip_address is not specified, one will be auto-assigned. + + :param port_id: id of a port to add a fixed ip + :param subnet_id: id of a subnet + :param ip_address: specific ip_address to add + :return: octavia.network.data_models.Port + :raises: NetworkException, PortNotFound + """ + + @abc.abstractmethod + def unplug_fixed_ip(self, port_id, subnet_id): + """Unplug a fixed ip from an existing port. + + :param port_id: id of a port to remove the fixed ip from + :param subnet_id: id of a subnet + :return: octavia.network.data_models.Port + :raises: NetworkException, PortNotFound + """ + + @abc.abstractmethod + def get_plugged_networks(self, compute_id): + """Retrieves the current plugged networking configuration. + + :param compute_id: id of an amphora in the compute service + :return: [octavia.network.data_models.Instance] + """ + + def update_vip(self, load_balancer, for_delete): + """Hook for the driver to update the VIP information. + + This method will be called upon the change of a load_balancer + configuration. It is an optional method to be implemented by drivers. + It allows the driver to update any VIP information based on the + state of the passed in load_balancer. + + :param load_balancer: octavia.common.data_models.LoadBalancer instance + :param for_delete: Boolean indicating if this update is for a delete + :raises: MissingVIPSecurityGroup + :return: None + """ + + @abc.abstractmethod + def get_network(self, network_id, context=None): + """Retrieves network from network id. + + :param network_id: id of an network to retrieve + :param context: A request context + :return: octavia.network.data_models.Network + :raises: NetworkException, NetworkNotFound + """ + + @abc.abstractmethod + def get_subnet(self, subnet_id, context=None): + """Retrieves subnet from subnet id. + + :param subnet_id: id of a subnet to retrieve + :param context: A request context + :return: octavia.network.data_models.Subnet + :raises: NetworkException, SubnetNotFound + """ + + @abc.abstractmethod + def get_port(self, port_id, context=None): + """Retrieves port from port id. + + :param port_id: id of a port to retrieve + :param context: A request context + :return: octavia.network.data_models.Port + :raises: NetworkException, PortNotFound + """ + + @abc.abstractmethod + def get_network_by_name(self, network_name): + """Retrieves network from network name. + + :param network_name: name of a network to retrieve + :return: octavia.network.data_models.Network + :raises: NetworkException, NetworkNotFound + """ + + @abc.abstractmethod + def get_subnet_by_name(self, subnet_name): + """Retrieves subnet from subnet name. + + :param subnet_name: name of a subnet to retrieve + :return: octavia.network.data_models.Subnet + :raises: NetworkException, SubnetNotFound + """ + + @abc.abstractmethod + def get_port_by_name(self, port_name): + """Retrieves port from port name. + + :param port_name: name of a port to retrieve + :return: octavia.network.data_models.Port + :raises: NetworkException, PortNotFound + """ + + @abc.abstractmethod + def get_port_by_net_id_device_id(self, network_id, device_id): + """Retrieves port from network id and device id. + + :param network_id: id of a network to filter by + :param device_id: id of a network device to filter by + :return: octavia.network.data_models.Port + :raises: NetworkException, PortNotFound + """ + + @abc.abstractmethod + def get_security_group(self, sg_name): + """Retrieves the security group by its name. + + :param sg_name: The security group name. + :return: octavia.network.data_models.SecurityGroup, None if not enabled + :raises: NetworkException, SecurityGroupNotFound + """ + + @abc.abstractmethod + def get_security_group_by_id(self, sg_id: str, + context: 'context.RequestContext' = None) -> ( + 'n_data_models.SecurityGroup'): + """Retrieves the security group by its id. + + :param sg_id: The security group ID. + :param context: A request context + :return: octavia.network.data_models.SecurityGroup, None if not enabled + :raises: NetworkException, SecurityGroupNotFound + """ + + @abc.abstractmethod + def failover_preparation(self, amphora): + """Prepare an amphora for failover. + + :param amphora: amphora object to failover + :return: None + :raises: PortNotFound + """ + + @abc.abstractmethod + def plug_port(self, amphora, port): + """Plug a neutron port in to a compute instance + + :param amphora: amphora object to plug the port into + :param port: port to plug into the compute instance + :return: None + :raises: PlugNetworkException, AmphoraNotFound, NetworkNotFound + """ + + @abc.abstractmethod + def get_network_configs(self, load_balancer, amphora=None): + """Retrieve network configurations + + This method assumes that a dictionary of AmphoraNetworkConfigs keyed + off of the related amphora id are returned. + The configs contain data pertaining to each amphora that is later + used for finalization of the entire load balancer configuration. + The data provided to these configs is left up to the driver, this + means the driver is responsible for providing data that is appropriate + for the amphora network configurations. + + Example return: {: } + + :param load_balancer: The load_balancer configuration + :param amphora: Optional amphora to only query. + :return: dict of octavia.network.data_models.AmphoraNetworkConfig + keyed off of the amphora id the config is associated with. + :raises: NotFound, NetworkNotFound, SubnetNotFound, PortNotFound + """ + + @abc.abstractmethod + def update_vip_sg(self, load_balancer, vip): + """Updates the security group for a VIP + + :param load_balancer: Load Balancer to rpepare the VIP for + :param vip: The VIP to plug + """ + + @abc.abstractmethod + def update_aap_port_sg(self, load_balancer: data_models.LoadBalancer, + amphora: data_models.Amphora, + vip: data_models.Vip): + """Updates the security group of the AAP port of an amphora + + """ + + @abc.abstractmethod + def plug_aap_port(self, load_balancer, vip, amphora, subnet): + """Plugs the AAP port to the amp + + :param load_balancer: Load Balancer to prepare the VIP for + :param vip: The VIP to plug + :param amphora: The amphora to plug the VIP into + :param subnet: The subnet to plug the aap into + """ + + @abc.abstractmethod + def unplug_aap_port(self, vip, amphora, subnet): + """Unplugs the AAP port to the amp + + :param vip: The VIP to plug + :param amphora: The amphora to plug the VIP into + :param subnet: The subnet to plug the aap into + """ + + @abc.abstractmethod + def qos_enabled(self): + """Whether QoS is enabled + + :return: Boolean + """ + + @abc.abstractmethod + def get_network_ip_availability(self, network): + """Retrieves network IP availability. + + :param network: octavia.network.data_models.Network + :return: octavia.network.data_models.Network_IP_Availability + :raises: NetworkException, NetworkNotFound + """ + + @abc.abstractmethod + def set_port_admin_state_up(self, port_id, state): + """Set the admin state of a port. True is up, False is down. + + :param port_id: The port ID to update. + :param state: True for up, False for down. + :returns: None + """ diff --git a/octavia/network/data_models.py b/octavia/network/data_models.py new file mode 100644 index 0000000000..2646879652 --- /dev/null +++ b/octavia/network/data_models.py @@ -0,0 +1,176 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from octavia.common import constants +from octavia.common import data_models + + +class Interface(data_models.BaseDataModel): + + def __init__(self, id=None, compute_id=None, network_id=None, + fixed_ips=None, port_id=None, vnic_type=None): + self.id = id + self.compute_id = compute_id + self.network_id = network_id + self.port_id = port_id + self.fixed_ips = fixed_ips + self.vnic_type = vnic_type + + +class Delta(data_models.BaseDataModel): + + def __init__(self, amphora_id=None, compute_id=None, + add_nics=None, delete_nics=None, + add_subnets=None, delete_subnets=None): + self.compute_id = compute_id + self.amphora_id = amphora_id + self.add_nics = add_nics + self.delete_nics = delete_nics + self.add_subnets = add_subnets + self.delete_subnets = delete_subnets + + +class Network(data_models.BaseDataModel): + + def __init__(self, id=None, name=None, subnets=None, + project_id=None, admin_state_up=None, mtu=None, + provider_network_type=None, + provider_physical_network=None, + provider_segmentation_id=None, + router_external=None, + port_security_enabled=None): + self.id = id + self.name = name + self.subnets = subnets + self.project_id = project_id + self.admin_state_up = admin_state_up + self.provider_network_type = provider_network_type + self.provider_physical_network = provider_physical_network + self.provider_segmentation_id = provider_segmentation_id + self.router_external = router_external + self.mtu = mtu + self.port_security_enabled = port_security_enabled + + +class Subnet(data_models.BaseDataModel): + + def __init__(self, id=None, name=None, network_id=None, project_id=None, + gateway_ip=None, cidr=None, ip_version=None, + host_routes=None): + self.id = id + self.name = name + self.network_id = network_id + self.project_id = project_id + self.gateway_ip = gateway_ip + self.cidr = cidr + self.ip_version = ip_version + self.host_routes = host_routes + + +class Port(data_models.BaseDataModel): + + def __init__(self, id=None, name=None, device_id=None, device_owner=None, + mac_address=None, network_id=None, status=None, + project_id=None, admin_state_up=None, fixed_ips=None, + network=None, qos_policy_id=None, security_group_ids=None, + vnic_type=constants.VNIC_TYPE_NORMAL): + self.id = id + self.name = name + self.device_id = device_id + self.device_owner = device_owner + self.mac_address = mac_address + self.network_id = network_id + self.status = status + self.project_id = project_id + self.admin_state_up = admin_state_up + self.fixed_ips = fixed_ips or [] + self.network = network + self.qos_policy_id = qos_policy_id + self.security_group_ids = security_group_ids or [] + self.vnic_type = vnic_type + + def get_subnet_id(self, fixed_ip_address): + for fixed_ip in self.fixed_ips: + if fixed_ip.ip_address == fixed_ip_address: + return fixed_ip.subnet_id + return None + + +class FixedIP(data_models.BaseDataModel): + + def __init__(self, subnet_id=None, ip_address=None, subnet=None): + self.subnet_id = subnet_id + self.ip_address = ip_address + self.subnet = subnet + + +class AmphoraNetworkConfig(data_models.BaseDataModel): + + def __init__(self, amphora=None, vip_subnet=None, vip_port=None, + vrrp_subnet=None, vrrp_port=None, ha_subnet=None, + ha_port=None, additional_vip_data=None): + self.amphora = amphora + self.vip_subnet = vip_subnet + self.vip_port = vip_port + self.vrrp_subnet = vrrp_subnet + self.vrrp_port = vrrp_port + self.ha_subnet = ha_subnet + self.ha_port = ha_port + self.additional_vip_data = additional_vip_data or [] + + +class AdditionalVipData(data_models.BaseDataModel): + + def __init__(self, ip_address=None, subnet=None): + self.ip_address = ip_address + self.subnet = subnet + + +class HostRoute(data_models.BaseDataModel): + + def __init__(self, nexthop=None, destination=None): + self.nexthop = nexthop + self.destination = destination + + +class QosPolicy(data_models.BaseDataModel): + def __init__(self, id): + self.id = id + + +class Network_IP_Availability(data_models.BaseDataModel): + + def __init__(self, network_id=None, tenant_id=None, project_id=None, + network_name=None, total_ips=None, used_ips=None, + subnet_ip_availability=None): + self.network_id = network_id + self.tenant_id = tenant_id + self.project_id = project_id + self.network_name = network_name + self.total_ips = total_ips + self.used_ips = used_ips + self.subnet_ip_availability = subnet_ip_availability + + +class SecurityGroup(data_models.BaseDataModel): + + def __init__(self, id=None, project_id=None, name=None, description=None, + security_group_rule_ids=None, tags=None, stateful=None): + self.id = id + self.project_id = project_id + self.name = name + self.description = description + self.security_group_rule_ids = security_group_rule_ids or [] + self.tags = tags or [] + self.stateful = stateful diff --git a/octavia/network/drivers/__init__.py b/octavia/network/drivers/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/network/drivers/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/network/drivers/neutron/__init__.py b/octavia/network/drivers/neutron/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/network/drivers/neutron/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/network/drivers/neutron/allowed_address_pairs.py b/octavia/network/drivers/neutron/allowed_address_pairs.py new file mode 100644 index 0000000000..155a57b33f --- /dev/null +++ b/octavia/network/drivers/neutron/allowed_address_pairs.py @@ -0,0 +1,921 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import ipaddress +import time + +from novaclient import exceptions as nova_client_exceptions +from octavia_lib.common import constants as lib_consts +import openstack.exceptions as os_exceptions +from oslo_config import cfg +from oslo_log import log as logging +from stevedore import driver as stevedore_driver + +from octavia.common import constants +from octavia.common import data_models +from octavia.common import exceptions +from octavia.common import utils as common_utils +from octavia.i18n import _ +from octavia.network import base +from octavia.network import data_models as n_data_models +from octavia.network.drivers.neutron import base as neutron_base +from octavia.network.drivers.neutron import utils + +LOG = logging.getLogger(__name__) +AAP_EXT_ALIAS = 'allowed-address-pairs' +PROJECT_ID_ALIAS = 'project-id' + +CONF = cfg.CONF + + +class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver): + + def __init__(self): + super().__init__() + self._check_aap_loaded() + self.compute = stevedore_driver.DriverManager( + namespace='octavia.compute.drivers', + name=CONF.controller_worker.compute_driver, + invoke_on_load=True + ).driver + + def _check_aap_loaded(self): + if not self._check_extension_enabled(AAP_EXT_ALIAS): + raise base.NetworkException( + 'The {alias} extension is not enabled in neutron. This ' + 'driver cannot be used with the {alias} extension ' + 'disabled.'.format(alias=AAP_EXT_ALIAS)) + + def _get_interfaces_to_unplug(self, interfaces, network_id, + ip_address=None): + ret = [] + for interface in interfaces: + if interface.network_id == network_id: + if ip_address: + for fixed_ip in interface.fixed_ips: + if ip_address == fixed_ip.ip_address: + ret.append(interface) + else: + ret.append(interface) + return ret + + def _get_plugged_interface(self, compute_id, network_id, lb_network_ip): + interfaces = self.get_plugged_networks(compute_id) + for interface in interfaces: + is_correct_interface = interface.network_id == network_id + for ip in interface.fixed_ips: + if ip.ip_address == lb_network_ip: + is_correct_interface = False + if is_correct_interface: + return interface + return None + + def _plug_amphora_vip(self, amphora, subnet, vip: data_models.Vip): + # We need a vip port owned by Octavia for Act/Stby and failover + try: + port = { + constants.NAME: 'octavia-lb-vrrp-' + amphora.id, + constants.NETWORK_ID: subnet.network_id, + constants.FIXED_IPS: [{'subnet_id': subnet.id}], + constants.ADMIN_STATE_UP: True, + constants.DEVICE_OWNER: constants.OCTAVIA_OWNER, + constants.SECURITY_GROUP_IDS: vip.sg_ids + } + + new_port = self.network_proxy.create_port(**port) + new_port = utils.convert_port_to_model(new_port) + + LOG.debug('Created vip port: %(port_id)s for amphora: %(amp)s', + {'port_id': new_port.id, 'amp': amphora.id}) + + except Exception as e: + message = _('Error creating the base (VRRP) port for the VIP with ' + 'port details: {}').format(port) + LOG.exception(message) + raise base.PlugVIPException(message) from e + + try: + interface = self.plug_port(amphora, new_port) + except Exception as e: + message = _('Error plugging amphora (compute_id: {compute_id}) ' + 'into vip network {network_id}.').format( + compute_id=amphora.compute_id, + network_id=subnet.network_id) + LOG.exception(message) + try: + if new_port: + self.network_proxy.delete_port(new_port.id) + LOG.debug('Deleted base (VRRP) port %s due to plug_port ' + 'failure.', new_port.id) + except Exception: + LOG.exception('Failed to delete base (VRRP) port %s after ' + 'plug_port failed. This resource is being ' + 'abandoned and should be manually deleted when ' + 'neutron is functional.', new_port.id) + raise base.PlugVIPException(message) from e + return interface + + def _add_vip_address_pairs(self, port_id, vip_address_list): + try: + self._add_allowed_address_pairs_to_port(port_id, vip_address_list) + except os_exceptions.ResourceNotFound as e: + raise base.PortNotFound(str(e)) + except Exception as e: + message = _('Error adding allowed address pair(s) {ips} ' + 'to port {port_id}.').format(ips=vip_address_list, + port_id=port_id) + LOG.exception(message) + raise base.PlugVIPException(message) from e + + def _get_lb_security_group(self, load_balancer_id): + sec_grp_name = common_utils.get_vip_security_group_name( + load_balancer_id) + sec_grp = self.network_proxy.find_security_group(sec_grp_name) + return sec_grp + + def _get_ethertype_for_ip(self, ip): + address = ipaddress.ip_address(ip) + return 'IPv6' if address.version == 6 else 'IPv4' + + def _get_ethertype_for_cidr(self, cidr): + net = ipaddress.ip_network(cidr) + return 'IPv6' if net.version == 6 else 'IPv4' + + def _update_security_group_rules(self, + load_balancer: data_models.LoadBalancer, + sec_grp_id): + # Skip adding listener rules if sgs is not None or not empty + skip_listener_rules = load_balancer.vip.sg_ids + + rules = tuple(self.network_proxy.security_group_rules( + security_group_id=sec_grp_id)) + + updated_ports = [] + listener_peer_ports = [] + for listener in load_balancer.listeners: + if (listener.provisioning_status in [constants.PENDING_DELETE, + constants.DELETED]): + continue + + if not skip_listener_rules: + protocol = constants.PROTOCOL_TCP.lower() + if listener.protocol == constants.PROTOCOL_UDP: + protocol = constants.PROTOCOL_UDP.lower() + elif listener.protocol == lib_consts.PROTOCOL_SCTP: + protocol = lib_consts.PROTOCOL_SCTP.lower() + + if listener.allowed_cidrs: + for ac in listener.allowed_cidrs: + port = (listener.protocol_port, protocol, ac.cidr) + updated_ports.append(port) + else: + port = (listener.protocol_port, protocol, None) + updated_ports.append(port) + + listener_peer_ports.append(listener.peer_port) + + # As the peer port will hold the tcp connection for keepalived and + # haproxy session synchronization, so here the security group rule + # should be just related with tcp protocol only. To avoid adding + # duplicate rules, peer_port info should be added if updated_ports + # does not have the peer_port entry with allowed_cidr 0.0.0.0/0 + tcp_lower = constants.PROTOCOL_TCP.lower() + for peer_port in listener_peer_ports: + if (peer_port, tcp_lower, "0.0.0.0/0") not in updated_ports: + updated_ports.append((peer_port, tcp_lower, None)) + + # Just going to use port_range_max for now because we can assume that + # port_range_max and min will be the same since this driver is + # responsible for creating these rules + old_ports = [] + for rule in rules: + # Don't remove egress rules and don't confuse other protocols with + # None ports with the egress rules. VRRP uses protocol 51 and 112 + if (rule.get('direction') == 'egress' or + rule.get('protocol') is None or + rule['protocol'].upper() not in + [constants.PROTOCOL_TCP, constants.PROTOCOL_UDP, + lib_consts.PROTOCOL_SCTP]): + continue + old_ports.append((rule.get('port_range_max'), + rule['protocol'].lower(), + rule.get('remote_ip_prefix'))) + + add_ports = set(updated_ports) - set(old_ports) + del_ports = set(old_ports) - set(updated_ports) + for rule in rules: + if (rule.get('protocol', '') and + rule.get('protocol', '').upper() in + [constants.PROTOCOL_TCP, constants.PROTOCOL_UDP, + lib_consts.PROTOCOL_SCTP] and + (rule.get('port_range_max'), rule.get('protocol'), + rule.get('remote_ip_prefix')) in del_ports): + rule_id = rule.get(constants.ID) + try: + self.network_proxy.delete_security_group_rule(rule_id) + except os_exceptions.ResourceNotFound: + LOG.info("Security group rule %s not found, will assume " + "it is already deleted.", rule_id) + + ethertypes = set() + primary_ethertype = self._get_ethertype_for_ip( + load_balancer.vip.ip_address) + ethertypes.add(primary_ethertype) + for add_vip in load_balancer.additional_vips: + ethertypes.add(self._get_ethertype_for_ip(add_vip.ip_address)) + for port_protocol in add_ports: + for ethertype in ethertypes: + cidr = port_protocol[2] + if not cidr or self._get_ethertype_for_cidr(cidr) == ethertype: + self._create_security_group_rule( + sec_grp_id, port_protocol[1], + port_min=port_protocol[0], + port_max=port_protocol[0], + ethertype=ethertype, + cidr=cidr, + ) + + # Currently we are using the VIP network for VRRP + # so we need to open up the protocols for it + if load_balancer.topology == constants.TOPOLOGY_ACTIVE_STANDBY: + try: + self._create_security_group_rule( + sec_grp_id, + constants.VRRP_PROTOCOL_NUM, + direction='ingress', + ethertype=primary_ethertype) + except os_exceptions.ConflictException: + # It's ok if this rule already exists + pass + except Exception as e: + raise base.PlugVIPException(str(e)) + + try: + self._create_security_group_rule( + sec_grp_id, constants.AUTH_HEADER_PROTOCOL_NUMBER, + direction='ingress', ethertype=primary_ethertype) + except os_exceptions.ConflictException: + # It's ok if this rule already exists + pass + except Exception as e: + raise base.PlugVIPException(str(e)) + + def _add_vip_security_group_to_port(self, load_balancer_id, port_id, + sec_grp_id: str = None, + vip_sg_ids: list[str] = None): + sec_grp_ids = [sec_grp_id or + self._get_lb_security_group(load_balancer_id).get( + constants.ID)] + if vip_sg_ids: + sec_grp_ids += vip_sg_ids + try: + self._update_security_groups(sec_grp_ids, port_id) + except base.PortNotFound: + raise + except base.NetworkException as e: + raise base.PlugVIPException(str(e)) + + def _delete_vip_security_group(self, sec_grp): + """Deletes a security group in neutron. + + Retries upon an exception because removing a security group from + a neutron port does not happen immediately. + """ + attempts = 0 + while attempts <= CONF.networking.max_retries: + try: + self.network_proxy.delete_security_group(sec_grp) + LOG.info("Deleted security group %s", sec_grp) + return + except os_exceptions.ResourceNotFound: + LOG.info("Security group %s not found, will assume it is " + "already deleted", sec_grp) + return + except Exception: + LOG.warning("Attempt %(attempt)s to remove security group " + "%(sg)s failed.", + {'attempt': attempts + 1, 'sg': sec_grp}) + attempts += 1 + time.sleep(CONF.networking.retry_interval) + message = _("All attempts to remove security group {0} have " + "failed.").format(sec_grp) + LOG.exception(message) + raise base.DeallocateVIPException(message) + + def _delete_security_group(self, vip, port): + if self.sec_grp_enabled: + try: + lb_id = vip.load_balancer.id + except AttributeError: + sec_grp = None + else: + sec_grp = self._get_lb_security_group(lb_id) + if sec_grp: + sec_grp_id = sec_grp.id + LOG.info( + "Removing security group %(sg)s from port %(port)s", + {'sg': sec_grp_id, constants.PORT: vip.port_id}) + raw_port = None + try: + if port: + raw_port = self.network_proxy.get_port(port.id) + except Exception: + LOG.warning('Unable to get port information for port ' + '%s. Continuing to delete the security ' + 'group.', port.id) + if raw_port: + sec_grps = raw_port.security_group_ids + if sec_grps and sec_grp_id in sec_grps: + sec_grps.remove(sec_grp_id) + try: + self.network_proxy.update_port( + port.id, security_group_ids=sec_grps) + except os_exceptions.ResourceNotFound: + LOG.warning('Unable to update port information ' + 'for port %s. Continuing to delete ' + 'the security group since port not ' + 'found', port.id) + + try: + self._delete_vip_security_group(sec_grp_id) + except base.DeallocateVIPException: + # Try to delete any leftover ports on this security group. + # Because this security group is created and managed by us, + # it *should* only return ports that we own / can delete. + LOG.warning('Failed to delete security group on first ' + 'pass: %s', sec_grp_id) + extra_ports = self._get_ports_by_security_group(sec_grp_id) + for extra_port in extra_ports: + port_id = extra_port.get(constants.ID) + try: + LOG.warning('Deleting extra port %s on security ' + 'group %s...', port_id, sec_grp_id) + self.network_proxy.delete_port(port_id) + except Exception: + LOG.warning('Failed to delete extra port %s on ' + 'security group %s.', + port_id, sec_grp_id) + # Now try it again + self._delete_vip_security_group(sec_grp_id) + + def deallocate_vip(self, vip): + """Delete the vrrp_port (instance port) in case nova didn't + + This can happen if a failover has occurred. + """ + try: + for amphora in vip.load_balancer.amphorae: + if amphora.vrrp_port_id: + try: + self.network_proxy.delete_port(amphora.vrrp_port_id) + except os_exceptions.ResourceNotFound: + LOG.debug( + 'VIP instance port %s already deleted. Skipping.', + amphora.vrrp_port_id) + except AttributeError as ex: + LOG.warning(f"Cannot delete port from amphorae. Object does not " + f"exist ({ex!r})") + + try: + port = self.get_port(vip.port_id) + except base.PortNotFound: + LOG.warning("Can't deallocate VIP because the vip port %s " + "cannot be found in neutron. " + "Continuing cleanup.", vip.port_id) + port = None + + self._delete_security_group(vip, port) + + if port and port.device_owner == constants.OCTAVIA_OWNER: + try: + self.network_proxy.delete_port(vip.port_id) + except os_exceptions.ResourceNotFound: + LOG.debug('VIP port %s already deleted. Skipping.', + vip.port_id) + except Exception as e: + message = _('Error deleting VIP port_id {port_id} from ' + 'neutron').format(port_id=vip.port_id) + LOG.exception(message) + raise base.DeallocateVIPException(message) from e + elif port: + LOG.info("Port %s will not be deleted by Octavia as it was " + "not created by Octavia.", vip.port_id) + + def update_vip_sg(self, load_balancer, vip): + if self.sec_grp_enabled: + sec_grp = self._get_lb_security_group(load_balancer.id) + if not sec_grp: + sec_grp_name = common_utils.get_vip_security_group_name( + load_balancer.id) + sec_grp = self._create_security_group(sec_grp_name) + self._update_security_group_rules(load_balancer, + sec_grp.get(constants.ID)) + self._add_vip_security_group_to_port(load_balancer.id, vip.port_id, + sec_grp.get(constants.ID), + vip_sg_ids=vip.sg_ids) + return sec_grp.get(constants.ID) + return None + + def update_aap_port_sg(self, + load_balancer: data_models.LoadBalancer, + amphora: data_models.Amphora, + vip: data_models.Vip): + if self.sec_grp_enabled: + sec_grp = self._get_lb_security_group(load_balancer.id) + if sec_grp: + self._add_vip_security_group_to_port(load_balancer.id, + amphora.vrrp_port_id, + sec_grp.get(constants.ID), + vip_sg_ids=vip.sg_ids) + + def plug_aap_port(self, load_balancer, vip, amphora, subnet): + interface = self._get_plugged_interface( + amphora.compute_id, subnet.network_id, amphora.lb_network_ip) + if not interface: + interface = self._plug_amphora_vip(amphora, subnet, vip) + + aap_address_list = [vip.ip_address] + for add_vip in load_balancer.additional_vips: + aap_address_list.append(add_vip.ip_address) + self._add_vip_address_pairs(interface.port_id, aap_address_list) + + if self.sec_grp_enabled: + self._add_vip_security_group_to_port(load_balancer.id, + interface.port_id, + vip_sg_ids=vip.sg_ids) + vrrp_ip = None + for fixed_ip in interface.fixed_ips: + is_correct_subnet = fixed_ip.subnet_id == subnet.id + is_management_ip = fixed_ip.ip_address == amphora.lb_network_ip + if is_correct_subnet and not is_management_ip: + vrrp_ip = fixed_ip.ip_address + break + return data_models.Amphora( + id=amphora.id, + compute_id=amphora.compute_id, + vrrp_ip=vrrp_ip, + ha_ip=vip.ip_address, + vrrp_port_id=interface.port_id, + ha_port_id=vip.port_id) + + def _validate_fixed_ip(self, fixed_ips, subnet_id, ip_address): + """Validate an IP address exists in a fixed_ips dict + + :param fixed_ips: A port fixed_ups dict + :param subnet_id: The subnet that should contain the IP + :param ip_address: The IP address to validate + :returns: True if the ip address is in the dict, False if not + """ + for fixed_ip in fixed_ips: + normalized_fixed_ip = ipaddress.ip_address( + fixed_ip.ip_address).compressed + normalized_ip = ipaddress.ip_address(ip_address).compressed + if (fixed_ip.subnet_id == subnet_id and + normalized_fixed_ip == normalized_ip): + return True + return False + + @staticmethod + def _fixed_ips_to_list_of_dicts(fixed_ips): + list_of_dicts = [] + for fixed_ip in fixed_ips: + list_of_dicts.append(fixed_ip.to_dict()) + return list_of_dicts + + def allocate_vip(self, load_balancer: data_models.LoadBalancer): + """Allocates a virtual ip. + + Reserves the IP for later use as the frontend connection of a load + balancer. + + :param load_balancer: octavia.common.data_models.LoadBalancer instance + :return: octavia.common.data_models.Vip, + list(octavia.common.data_models.AdditionalVip) + :raises AllocateVIPException: generic error allocating the VIP + :raises PortNotFound: port was not found + :raises SubnetNotFound: subnet was not found + """ + if load_balancer.vip.port_id: + try: + port = self.get_port(load_balancer.vip.port_id) + fixed_ip_found = self._validate_fixed_ip( + port.fixed_ips, load_balancer.vip.subnet_id, + load_balancer.vip.ip_address) + if (port.network_id == load_balancer.vip.network_id and + fixed_ip_found): + LOG.info('Port %s already exists. Nothing to be done.', + load_balancer.vip.port_id) + return self._port_to_vip(port, load_balancer) + LOG.error('Neutron VIP mismatch. Expected ip %s on ' + 'subnet %s in network %s. Neutron has fixed_ips %s ' + 'in network %s. Deleting and recreating the VIP ' + 'port.', load_balancer.vip.ip_address, + load_balancer.vip.subnet_id, + load_balancer.vip.network_id, + self._fixed_ips_to_list_of_dicts(port.fixed_ips), + port.network_id) + if load_balancer.vip.octavia_owned: + self.delete_port(load_balancer.vip.port_id) + else: + raise base.AllocateVIPException( + 'VIP port {} is broken, but is owned by project {} ' + 'so will not be recreated. Aborting VIP allocation.' + .format(port.id, port.project_id)) + except base.AllocateVIPException as e: + # Catch this explicitly because otherwise we blame Neutron + LOG.error(getattr(e, constants.MESSAGE, None)) + raise + except base.PortNotFound: + LOG.warning('VIP port %s is missing from neutron. Rebuilding.', + load_balancer.vip.port_id) + except Exception as e: + message = _('Neutron is failing to service requests due to: ' + '{}. Aborting.').format(str(e)) + LOG.error(message) + raise base.AllocateVIPException( + message, + orig_msg=getattr(e, constants.MESSAGE, None), + orig_code=getattr(e, constants.STATUS_CODE, None),) + + fixed_ip = {} + if load_balancer.vip.subnet_id: + fixed_ip[constants.SUBNET_ID] = load_balancer.vip.subnet_id + if load_balancer.vip.ip_address: + fixed_ip[constants.IP_ADDRESS] = load_balancer.vip.ip_address + + fixed_ips = [] + if fixed_ip: + fixed_ips.append(fixed_ip) + + for add_vip in load_balancer.additional_vips: + add_ip = {} + if add_vip.subnet_id: + add_ip['subnet_id'] = add_vip.subnet_id + if add_vip.ip_address: + add_ip['ip_address'] = add_vip.ip_address + if add_ip: + fixed_ips.append(add_ip) + else: + LOG.warning('Additional VIP contains neither subnet_id nor ' + 'ip_address, ignoring.') + + # Make sure we are backward compatible with older neutron + if self._check_extension_enabled(PROJECT_ID_ALIAS): + project_id_key = 'project_id' + else: + project_id_key = 'tenant_id' + + # It can be assumed that network_id exists + port = { + constants.NAME: 'octavia-lb-' + load_balancer.id, + constants.NETWORK_ID: load_balancer.vip.network_id, + constants.ADMIN_STATE_UP: False, + 'device_id': f'lb-{load_balancer.id}', + constants.DEVICE_OWNER: constants.OCTAVIA_OWNER, + project_id_key: load_balancer.project_id} + + if load_balancer.vip.sg_ids: + port[constants.SECURITY_GROUP_IDS] = load_balancer.vip.sg_ids + + if fixed_ips: + port[constants.FIXED_IPS] = fixed_ips + try: + new_port = self.network_proxy.create_port(**port) + except os_exceptions.ConflictException as e: + message = _('Error creating neutron port on network ' + '{network_id} due to {e}.').format( + network_id=load_balancer.vip.network_id, e=repr(e)) + raise base.VIPInUseException( + message, + orig_msg=getattr(e, 'details', None), + orig_code=getattr(e, constants.STATUS_CODE, None), + ) + except Exception as e: + message = _('Error creating neutron port on network ' + '{network_id} due to {e}.').format( + network_id=load_balancer.vip.network_id, e=repr(e)) + LOG.exception(message) + raise base.AllocateVIPException( + message, + orig_msg=getattr(e, constants.MESSAGE, None), + orig_code=getattr(e, constants.STATUS_CODE, None), + ) + new_port = utils.convert_port_to_model(new_port) + return self._port_to_vip(new_port, load_balancer, octavia_owned=True) + + def unplug_aap_port(self, vip, amphora, subnet): + interface = self._get_plugged_interface( + amphora.compute_id, subnet.network_id, amphora.lb_network_ip) + if not interface: + # Thought about raising PluggedVIPNotFound exception but + # then that wouldn't evaluate all amphorae, so just continue + LOG.debug('Cannot get amphora %s interface, skipped', + amphora.compute_id) + return + try: + self.unplug_network(amphora.compute_id, subnet.network_id) + except Exception: + pass + try: + aap_update = { + constants.ALLOWED_ADDRESS_PAIRS: [] + } + self.network_proxy.update_port(interface.port_id, + **aap_update) + except Exception as e: + message = _('Error unplugging VIP. Could not clear ' + 'allowed address pairs from port ' + '{port_id}.').format(port_id=vip.port_id) + LOG.exception(message) + raise base.UnplugVIPException(message) from e + + # Delete the VRRP port if we created it + try: + port = self.get_port(amphora.vrrp_port_id) + if port.name.startswith('octavia-lb-vrrp-'): + self.network_proxy.delete_port(amphora.vrrp_port_id) + except base.PortNotFound: + pass + except Exception as e: + LOG.error('Failed to delete port. Resources may still be in ' + 'use for port: %(port)s due to error: %(except)s', + {constants.PORT: amphora.vrrp_port_id, 'except': str(e)}) + + def unplug_vip(self, load_balancer, vip): + try: + subnet = self.get_subnet(vip.subnet_id) + except base.SubnetNotFound as e: + msg = (f"Can't unplug vip because vip subnet {vip.subnet_id} " + f"was not found") + LOG.exception(msg) + raise base.PluggedVIPNotFound(msg) from e + for amphora in filter( + lambda amp: amp.status == constants.AMPHORA_ALLOCATED, + load_balancer.amphorae): + self.unplug_aap_port(vip, amphora, subnet) + + def unplug_network(self, compute_id, network_id): + interfaces = self.get_plugged_networks(compute_id) + if not interfaces: + msg = (f'Amphora with compute id {compute_id} does not have any ' + f'plugged networks') + raise base.NetworkNotFound(msg) + + unpluggers = self._get_interfaces_to_unplug(interfaces, network_id) + removed_port_ids = set() + for index, unplugger in enumerate(unpluggers): + self.compute.detach_port( + compute_id=compute_id, port_id=unplugger.port_id) + removed_port_ids.add(unplugger.port_id) + + port_detach_timeout = CONF.networking.port_detach_timeout + + start = time.time() + while time.time() - start < port_detach_timeout: + interfaces = self.get_plugged_networks(compute_id) + plugged_port_ids = {i.port_id for i in interfaces} + if not plugged_port_ids & removed_port_ids: + break + time.sleep(CONF.networking.retry_interval) + else: + LOG.warning("Ports (%s) still attached to compute %s after " + "%s seconds.", + ", ".join(removed_port_ids), + compute_id, port_detach_timeout) + + def update_vip(self, load_balancer, for_delete=False): + sec_grp = self._get_lb_security_group(load_balancer.id) + if sec_grp: + self._update_security_group_rules(load_balancer, + sec_grp.get(constants.ID)) + elif not for_delete: + raise exceptions.MissingVIPSecurityGroup(lb_id=load_balancer.id) + else: + LOG.warning('VIP security group missing when updating the VIP for ' + 'delete on load balancer: %s. Skipping update ' + 'because this is for delete.', load_balancer.id) + + def failover_preparation(self, amphora): + if self.dns_integration_enabled: + self._failover_preparation(amphora) + + def _failover_preparation(self, amphora): + interfaces = self.get_plugged_networks(compute_id=amphora.compute_id) + + ports = [] + for interface_ in interfaces: + port = self.get_port(port_id=interface_.port_id) + ips = port.fixed_ips + lb_network = False + for ip in ips: + if ip.ip_address == amphora.lb_network_ip: + lb_network = True + if not lb_network: + ports.append(port) + + for port in ports: + try: + self.network_proxy.update_port( + port.id, dns_name='') + + except os_exceptions.ResourceNotFound as e: + raise base.PortNotFound() from e + + def plug_port(self, amphora, port): + try: + interface = self.compute.attach_network_or_port( + compute_id=amphora.compute_id, network_id=None, + ip_address=None, port_id=port.id) + plugged_interface = self._nova_interface_to_octavia_interface( + amphora.compute_id, interface) + except exceptions.NotFound as e: + if 'Instance' in str(e): + raise base.AmphoraNotFound(str(e)) + if 'Network' in str(e): + raise base.NetworkNotFound(str(e)) + raise base.PlugNetworkException(str(e)) + except nova_client_exceptions.Conflict: + LOG.info('Port %(portid)s is already plugged, ' + 'skipping', {'portid': port.id}) + plugged_interface = n_data_models.Interface( + compute_id=amphora.compute_id, + network_id=port.network_id, + port_id=port.id, + fixed_ips=port.fixed_ips) + except Exception as e: + message = _('Error plugging amphora (compute_id: ' + '{compute_id}) into port ' + '{port_id}.').format( + compute_id=amphora.compute_id, + port_id=port.id) + LOG.exception(message) + raise base.PlugNetworkException(message) from e + + return plugged_interface + + def _get_amp_net_configs(self, amp, amp_configs, vip_subnet, vip_port, + additional_vips): + if amp.status != constants.DELETED: + LOG.debug("Retrieving network details for amphora %s", amp.id) + vrrp_port = self.get_port(amp.vrrp_port_id) + vrrp_subnet = self.get_subnet( + vrrp_port.get_subnet_id(amp.vrrp_ip)) + vrrp_port.network = self.get_network(vrrp_port.network_id) + ha_port = self.get_port(amp.ha_port_id) + ha_subnet = self.get_subnet( + ha_port.get_subnet_id(amp.ha_ip)) + + additional_vip_data = [] + for add_vip in additional_vips: + add_vip_subnet = self.get_subnet(add_vip.subnet_id) + add_vip_data = n_data_models.AdditionalVipData( + ip_address=add_vip.ip_address, + subnet=add_vip_subnet + ) + additional_vip_data.append(add_vip_data) + + amp_configs[amp.id] = n_data_models.AmphoraNetworkConfig( + amphora=amp, + vip_subnet=vip_subnet, + vip_port=vip_port, + vrrp_subnet=vrrp_subnet, + vrrp_port=vrrp_port, + ha_subnet=ha_subnet, + ha_port=ha_port, + additional_vip_data=additional_vip_data + ) + + def get_network_configs(self, loadbalancer, amphora=None): + vip_subnet = self.get_subnet(loadbalancer.vip.subnet_id) + vip_port = self.get_port(loadbalancer.vip.port_id) + amp_configs = {} + if amphora: + self._get_amp_net_configs(amphora, amp_configs, + vip_subnet, vip_port, + loadbalancer.additional_vips) + else: + for amp in loadbalancer.amphorae: + try: + self._get_amp_net_configs(amp, amp_configs, + vip_subnet, vip_port, + loadbalancer.additional_vips) + except Exception as e: + LOG.warning('Getting network configurations for amphora ' + '%(amp)s failed due to %(err)s.', + {'amp': amp.id, 'err': str(e)}) + return amp_configs + + def delete_port(self, port_id): + """delete a neutron port. + + :param port_id: The port ID to delete. + :returns: None + """ + try: + self.network_proxy.delete_port(port_id) + except os_exceptions.ResourceNotFound: + LOG.debug('VIP instance port %s already deleted. Skipping.', + port_id) + except Exception as e: + raise exceptions.NetworkServiceError(net_error=str(e)) + + def set_port_admin_state_up(self, port_id, state): + """Set the admin state of a port. True is up, False is down. + + :param port_id: The port ID to update. + :param state: True for up, False for down. + :returns: None + """ + try: + self.network_proxy.update_port( + port_id, admin_state_up=state) + except os_exceptions.ResourceNotFound as e: + raise base.PortNotFound(str(e)) + except Exception as e: + raise exceptions.NetworkServiceError(net_error=str(e)) + + def create_port(self, network_id, name=None, fixed_ips=(), + secondary_ips=(), security_group_ids=(), + admin_state_up=True, qos_policy_id=None, + vnic_type=constants.VNIC_TYPE_NORMAL): + """Creates a network port. + + fixed_ips = [{'subnet_id': , ('ip_addrss': ')},] + ip_address is optional in the fixed_ips dictionary. + + :param network_id: The network the port should be created on. + :param name: The name to apply to the port. + :param fixed_ips: A list of fixed IP dicts. + :param secondary_ips: A list of secondary IPs to add to the port. + :param security_group_ids: A list of security group IDs for the port. + :param qos_policy_id: The QoS policy ID to apply to the port. + :param vnic_type: The vNIC type this port should attach to. + :returns port: A port data model object. + """ + try: + aap_list = [] + for ip in secondary_ips: + aap_list.append({constants.IP_ADDRESS: ip}) + port = {constants.NETWORK_ID: network_id, + constants.ADMIN_STATE_UP: admin_state_up, + constants.DEVICE_OWNER: constants.OCTAVIA_OWNER, + constants.BINDING_VNIC_TYPE: vnic_type} + if aap_list: + port[constants.ALLOWED_ADDRESS_PAIRS] = aap_list + if fixed_ips: + port[constants.FIXED_IPS] = fixed_ips + if name: + port[constants.NAME] = name + if qos_policy_id: + port[constants.QOS_POLICY_ID] = qos_policy_id + if security_group_ids: + port[constants.SECURITY_GROUPS] = security_group_ids + + new_port = self.network_proxy.create_port(**port) + + LOG.debug('Created port: %(port)s', {constants.PORT: new_port}) + + return utils.convert_port_to_model(new_port) + except Exception as e: + message = _('Error creating a port on network ' + '{network_id} due to {error}.').format( + network_id=network_id, error=str(e)) + LOG.exception(message) + raise base.CreatePortException(message) + + def get_security_group(self, sg_name): + """Retrieves the security group by its name. + + :param sg_name: The security group name. + :return: octavia.network.data_models.SecurityGroup, None if not enabled + :raises: NetworkException, SecurityGroupNotFound + """ + try: + if self.sec_grp_enabled and sg_name: + sec_grps = self.network_proxy.security_groups(name=sg_name) + try: + sg = next(sec_grps) + return utils.convert_security_group_to_model(sg) + except StopIteration: + # pylint: disable=raise-missing-from + message = _('Security group {name} not found.').format( + name=sg_name) + raise base.SecurityGroupNotFound(message) + return None + except base.SecurityGroupNotFound: + raise + except Exception as e: + message = _('Error when getting security group {name} due to ' + '{error}').format(name=sg_name, error=str(e)) + LOG.exception(message) + raise base.NetworkException(message) diff --git a/octavia/network/drivers/neutron/base.py b/octavia/network/drivers/neutron/base.py new file mode 100644 index 0000000000..063fca0225 --- /dev/null +++ b/octavia/network/drivers/neutron/base.py @@ -0,0 +1,323 @@ +# Copyright 2015 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import typing + +from openstack.connection import Connection +import openstack.exceptions as os_exceptions +from openstack.network.v2._proxy import Proxy +from oslo_config import cfg +from oslo_log import log as logging + +from octavia.common import clients +from octavia.common import data_models +from octavia.i18n import _ +from octavia.network import base +from octavia.network import data_models as network_models +from octavia.network.drivers.neutron import utils + +if typing.TYPE_CHECKING: + from octavia.common import context + +LOG = logging.getLogger(__name__) +DNS_INT_EXT_ALIAS = 'dns-integration' +SEC_GRP_EXT_ALIAS = 'security-group' +QOS_EXT_ALIAS = 'qos' +CONF_GROUP = 'neutron' + +CONF = cfg.CONF + + +class BaseNeutronDriver(base.AbstractNetworkDriver): + def __init__(self): + self.network_proxy: Proxy = self.os_connection.network + self._check_extension_cache = {} + self.sec_grp_enabled = self._check_extension_enabled(SEC_GRP_EXT_ALIAS) + self.dns_integration_enabled = self._check_extension_enabled( + DNS_INT_EXT_ALIAS) + self._qos_enabled = self._check_extension_enabled(QOS_EXT_ALIAS) + self.project_id = self.os_connection.current_project_id + + @property + def os_connection(self) -> Connection: + return clients.NeutronAuth.get_neutron_client() + + def _check_extension_enabled(self, extension_alias): + if extension_alias in self._check_extension_cache: + status = self._check_extension_cache[extension_alias] + LOG.debug('Neutron extension %(ext)s cached as %(status)s', + { + 'ext': extension_alias, + 'status': 'enabled' if status else 'disabled' + }) + else: + if self.network_proxy.find_extension(extension_alias): + LOG.debug('Neutron extension %(ext)s found enabled', + {'ext': extension_alias}) + self._check_extension_cache[extension_alias] = True + else: + LOG.debug('Neutron extension %(ext)s is not enabled', + {'ext': extension_alias}) + self._check_extension_cache[extension_alias] = False + return self._check_extension_cache[extension_alias] + + def _port_to_vip(self, port, load_balancer, octavia_owned=False): + fixed_ip = None + additional_ips = [] + for port_fixed_ip in port.fixed_ips: + if (not fixed_ip and + port_fixed_ip.subnet_id == load_balancer.vip.subnet_id): + fixed_ip = port_fixed_ip + else: + additional_ips.append(port_fixed_ip) + kwargs = { + 'ip_address': None, + 'subnet_id': None + } + if fixed_ip: + kwargs['ip_address'] = fixed_ip.ip_address + kwargs['subnet_id'] = fixed_ip.subnet_id + + primary_vip = data_models.Vip( + network_id=port.network_id, + port_id=port.id, + load_balancer=load_balancer, + load_balancer_id=load_balancer.id, + octavia_owned=octavia_owned, + sg_ids=load_balancer.vip.sg_ids, + **kwargs) + additional_vips = [ + data_models.AdditionalVip( + ip_address=add_fixed_ip.ip_address, + subnet_id=add_fixed_ip.subnet_id, + network_id=port.network_id, + port_id=port.id, + load_balancer=load_balancer, + load_balancer_id=load_balancer.id) + for add_fixed_ip in additional_ips] + return primary_vip, additional_vips + + def _nova_interface_to_octavia_interface(self, compute_id, nova_interface): + fixed_ips = [utils.convert_fixed_ip_dict_to_model(fixed_ip) + for fixed_ip in nova_interface.fixed_ips] + return network_models.Interface(compute_id=compute_id, + network_id=nova_interface.net_id, + port_id=nova_interface.port_id, + fixed_ips=fixed_ips) + + def _port_to_octavia_interface(self, compute_id, port): + fixed_ips = [utils.convert_fixed_ip_dict_to_model(fixed_ip) + for fixed_ip in port.get('fixed_ips', [])] + return network_models.Interface(compute_id=compute_id, + network_id=port['network_id'], + port_id=port['id'], + fixed_ips=fixed_ips) + + def _add_allowed_address_pairs_to_port(self, port_id, ip_address_list): + aap = [{'ip_address': ip} for ip in ip_address_list] + self.network_proxy.update_port(port_id, + allowed_address_pairs=aap) + + def _update_security_groups(self, sec_grp_ids: list[str], + port_id: str): + # Note: Neutron accepts the SG even if it already exists + try: + self.network_proxy.update_port( + port_id, security_groups=sec_grp_ids) + except os_exceptions.NotFoundException as e: + raise base.PortNotFound(str(e)) + except Exception as e: + raise base.NetworkException(str(e)) + + def _get_ports_by_security_group(self, sec_grp_id): + all_ports = self.network_proxy.ports(project_id=self.project_id) + filtered_ports = [ + p for p in all_ports if (p.security_group_ids and + sec_grp_id in p.security_group_ids)] + return filtered_ports + + def _create_security_group(self, name): + sec_grp = self.network_proxy.create_security_group(name=name) + return sec_grp + + def _create_security_group_rule(self, sec_grp_id, protocol, + direction='ingress', port_min=None, + port_max=None, ethertype='IPv6', + cidr=None): + rule = { + 'security_group_id': sec_grp_id, + 'direction': direction, + 'protocol': protocol, + 'port_range_min': port_min, + 'port_range_max': port_max, + 'ethertype': ethertype, + 'remote_ip_prefix': cidr, + } + + self.network_proxy.create_security_group_rule(**rule) + + def apply_qos_on_port(self, qos_id, port_id): + try: + self.network_proxy.update_port(port_id, qos_policy_id=qos_id) + except os_exceptions.ResourceNotFound as e: + raise base.PortNotFound(str(e)) + except Exception as e: + raise base.NetworkException(str(e)) + + def get_plugged_networks(self, compute_id): + # List neutron ports associated with the Amphora + try: + ports = self.network_proxy.ports(device_id=compute_id) + except Exception: + LOG.debug('Error retrieving plugged networks for compute ' + 'device %s.', compute_id) + ports = tuple() + return [self._port_to_octavia_interface(compute_id, port) for port in + ports] + + def _get_resource(self, resource_type, resource_id, context=None): + network = self.network_proxy + if context and not CONF.networking.allow_invisible_resource_usage: + network = clients.NeutronAuth.get_user_neutron_client( + context) + + try: + resource = getattr( + network, f"get_{resource_type}")(resource_id) + return getattr( + utils, f'convert_{resource_type}_to_model')(resource) + except os_exceptions.ResourceNotFound as e: + message = _('{resource_type} not found ' + '({resource_type} id: {resource_id}).').format( + resource_type=resource_type, resource_id=resource_id) + raise getattr(base, '%sNotFound' % ''.join( + [w.capitalize() for w in resource_type.split('_')] + ))(message) from e + except Exception as e: + message = _('Error retrieving {resource_type} ' + '({resource_type} id: {resource_id}.').format( + resource_type=resource_type, resource_id=resource_id) + LOG.exception(message) + raise base.NetworkException(message) from e + + def _get_resources_by_filters(self, resource_type, unique_item=False, + **filters): + """Retrieves item(s) from filters. By default, a list is returned. + + If unique_item set to True, only the first resource is returned. + """ + try: + resources = getattr( + self.network_proxy, f"{resource_type}s")(**filters) + conversion_function = getattr( + utils, + f'convert_{resource_type}_to_model') + try: + # get first item to see if there is at least one resource + res_list = [conversion_function(next(resources))] + except StopIteration: + # pylint: disable=raise-missing-from + raise os_exceptions.NotFoundException( + f'No resource of type {resource_type} found that matches ' + f'given filter criteria: {filters}.') + + if unique_item: + return res_list[0] + return res_list + [conversion_function(r) for r in resources] + + except os_exceptions.NotFoundException as e: + message = _('{resource_type} not found ' + '({resource_type} Filters: {filters}.').format( + resource_type=resource_type, filters=filters) + raise getattr(base, '%sNotFound' % ''.join( + [w.capitalize() for w in resource_type.split('_')] + ))(message) from e + except Exception as e: + message = _('Error retrieving {resource_type} ' + '({resource_type} Filters: {filters}.').format( + resource_type=resource_type, filters=filters) + LOG.exception(message) + raise base.NetworkException(message) from e + + def get_network(self, network_id, context=None): + return self._get_resource('network', network_id, context=context) + + def get_subnet(self, subnet_id, context=None): + return self._get_resource('subnet', subnet_id, context=context) + + def get_port(self, port_id, context=None): + return self._get_resource('port', port_id, context=context) + + def get_security_group_by_id(self, sg_id: str, + context: 'context.RequestContext' = None) -> ( + 'network_models.SecurityGroup'): + return self._get_resource('security_group', sg_id, context=context) + + def get_network_by_name(self, network_name): + return self._get_resources_by_filters( + 'network', unique_item=True, name=network_name) + + def get_subnet_by_name(self, subnet_name): + return self._get_resources_by_filters( + 'subnet', unique_item=True, name=subnet_name) + + def get_port_by_name(self, port_name): + return self._get_resources_by_filters( + 'port', unique_item=True, name=port_name) + + def get_port_by_net_id_device_id(self, network_id, device_id): + return self._get_resources_by_filters( + 'port', unique_item=True, + network_id=network_id, device_id=device_id) + + def get_qos_policy(self, qos_policy_id): + return self._get_resource('qos_policy', qos_policy_id) + + def qos_enabled(self): + return self._qos_enabled + + def get_network_ip_availability(self, network): + return self._get_resource('network_ip_availability', network.id) + + def plug_fixed_ip(self, port_id, subnet_id, ip_address=None): + port = self.get_port(port_id).to_dict(recurse=True) + fixed_ips = port['fixed_ips'] + + new_fixed_ip_dict = {'subnet_id': subnet_id} + if ip_address: + new_fixed_ip_dict['ip_address'] = ip_address + + fixed_ips.append(new_fixed_ip_dict) + + try: + updated_port = self.network_proxy.update_port( + port_id, fixed_ips=fixed_ips) + return utils.convert_port_to_model(updated_port) + except Exception as e: + raise base.NetworkException(str(e)) + + def unplug_fixed_ip(self, port_id, subnet_id): + port = self.get_port(port_id) + fixed_ips = [ + fixed_ip.to_dict() + for fixed_ip in port.fixed_ips + if fixed_ip.subnet_id != subnet_id + ] + + try: + updated_port = self.network_proxy.update_port( + port_id, fixed_ips=fixed_ips) + return utils.convert_port_to_model(updated_port) + except Exception as e: + raise base.NetworkException(str(e)) diff --git a/octavia/network/drivers/neutron/utils.py b/octavia/network/drivers/neutron/utils.py new file mode 100644 index 0000000000..1f8fb26df2 --- /dev/null +++ b/octavia/network/drivers/neutron/utils.py @@ -0,0 +1,109 @@ +# Copyright 2015 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.network.v2.network_ip_availability import NetworkIPAvailability + +from octavia.network import data_models as network_models + + +def convert_subnet_to_model(subnet): + host_routes = [network_models.HostRoute(nexthop=hr.get('nexthop'), + destination=hr.get('destination')) + for hr in subnet.host_routes] if subnet.host_routes else [] + return network_models.Subnet( + id=subnet.id, + name=subnet.name, + network_id=subnet.network_id, + project_id=subnet.project_id, + gateway_ip=subnet.gateway_ip, + cidr=subnet.cidr, + ip_version=subnet.ip_version, + host_routes=host_routes, + ) + + +def convert_port_to_model(port): + if port.get('fixed_ips'): + fixed_ips = [convert_fixed_ip_dict_to_model(fixed_ip) + for fixed_ip in port.fixed_ips] + else: + fixed_ips = [] + return network_models.Port( + id=port.id, + name=port.name, + device_id=port.device_id, + device_owner=port.device_owner, + mac_address=port.mac_address, + network_id=port.network_id, + status=port.status, + project_id=port.project_id, + admin_state_up=port.is_admin_state_up, + fixed_ips=fixed_ips, + qos_policy_id=port.qos_policy_id, + security_group_ids=port.security_group_ids, + vnic_type=port.binding_vnic_type + ) + + +def convert_network_to_model(nw): + return network_models.Network( + id=nw.id, + name=nw.name, + subnets=nw.subnet_ids, + project_id=nw.project_id, + admin_state_up=nw.is_admin_state_up, + mtu=nw.mtu, + provider_network_type=nw.provider_network_type, + provider_physical_network=nw.provider_physical_network, + provider_segmentation_id=nw.provider_segmentation_id, + router_external=nw.is_router_external, + port_security_enabled=nw.is_port_security_enabled, + ) + + +def convert_fixed_ip_dict_to_model(fixed_ip: dict): + return network_models.FixedIP(subnet_id=fixed_ip.get('subnet_id'), + ip_address=fixed_ip.get('ip_address')) + + +def convert_qos_policy_to_model(qos_policy): + return network_models.QosPolicy(id=qos_policy.id) + + +def convert_network_ip_availability_to_model( + nw_ip_avail: NetworkIPAvailability): + ip_avail = network_models.Network_IP_Availability( + network_id=nw_ip_avail.network_id, + tenant_id=nw_ip_avail.tenant_id, + project_id=nw_ip_avail.project_id, + network_name=nw_ip_avail.network_name, total_ips=nw_ip_avail.total_ips, + used_ips=nw_ip_avail.used_ips, + subnet_ip_availability=nw_ip_avail.subnet_ip_availability) + return ip_avail + + +def convert_security_group_to_model(security_group): + if security_group.security_group_rules: + sg_rule_ids = [rule['id'] for rule in + security_group.security_group_rules] + else: + sg_rule_ids = [] + return network_models.SecurityGroup( + id=security_group.id, + project_id=security_group.project_id, + name=security_group.name, + description=security_group.description, + security_group_rule_ids=sg_rule_ids, + tags=security_group.tags, + stateful=security_group.stateful) diff --git a/octavia/network/drivers/noop_driver/__init__.py b/octavia/network/drivers/noop_driver/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/network/drivers/noop_driver/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/network/drivers/noop_driver/driver.py b/octavia/network/drivers/noop_driver/driver.py new file mode 100644 index 0000000000..e30e0656f2 --- /dev/null +++ b/octavia/network/drivers/noop_driver/driver.py @@ -0,0 +1,662 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import typing + +from oslo_log import log as logging +from oslo_utils import uuidutils +from sqlalchemy import Column +from sqlalchemy import create_engine +from sqlalchemy import delete +from sqlalchemy import event +from sqlalchemy import insert +from sqlalchemy import MetaData +from sqlalchemy import select +from sqlalchemy import String +from sqlalchemy import Table + +from octavia.common import constants +from octavia.common import data_models +from octavia.network import base as driver_base +from octavia.network import data_models as network_models + +if typing.TYPE_CHECKING: + from octavia.common import context + +LOG = logging.getLogger(__name__) + +_NOOP_MANAGER_VARS = { + 'networks': {}, + 'subnets': {}, + 'ports': {}, + 'interfaces': {}, + 'current_network': None +} + + +class NoopManager: + + def __init__(self): + super().__init__() + self.networkconfigconfig = {} + self._qos_extension_enabled = True + + # The controller worker can run with multiple processes, so we + # need to have a persistent and shared store for the no-op driver. + self.engine = create_engine('sqlite:////tmp/octavia-network-noop.db') + + # TODO(johnsom) work around pysqlite locking issues per: + # https://github.com/sqlalchemy/sqlalchemy/discussions/12330 + @event.listens_for(self.engine, "connect") + def do_connect(dbapi_connection, connection_record): + # disable pysqlite's emitting of the BEGIN statement entirely. + # also stops it from emitting COMMIT before any DDL. + dbapi_connection.isolation_level = None + + @event.listens_for(self.engine, "begin") + def do_begin(conn): + conn.exec_driver_sql("BEGIN EXCLUSIVE") + + metadata_obj = MetaData() + + self.interfaces_table = Table( + 'interfaces', + metadata_obj, + Column('port_id', String(36)), + Column('network_id', String(36)), + Column('compute_id', String(36)), + Column('vnic_type', String(6))) + self.fixed_ips_table = Table( + 'fixed_ips', + metadata_obj, + Column('port_id', String(36)), + Column('subnet_id', String(36)), + Column('ip_address', String(64))) + metadata_obj.create_all(self.engine) + + def allocate_vip(self, loadbalancer): + LOG.debug("Network %s no-op, allocate_vip loadbalancer %s", + self.__class__.__name__, loadbalancer) + self.networkconfigconfig[loadbalancer.id] = ( + loadbalancer, 'allocate_vip') + subnet_id = uuidutils.generate_uuid() + network_id = uuidutils.generate_uuid() + port_id = uuidutils.generate_uuid() + ip_address = '198.51.100.1' + if loadbalancer.vip: + subnet_id = loadbalancer.vip.subnet_id or subnet_id + network_id = loadbalancer.vip.network_id or network_id + port_id = loadbalancer.vip.port_id or port_id + ip_address = loadbalancer.vip.ip_address or ip_address + return_vip = data_models.Vip(ip_address=ip_address, + subnet_id=subnet_id, + network_id=network_id, + port_id=port_id, + load_balancer_id=loadbalancer.id) + additional_vips = [ + data_models.AdditionalVip( + ip_address=add_vip.ip_address, + subnet_id=add_vip.subnet_id, + network_id=network_id, + port_id=port_id, + load_balancer=loadbalancer, + load_balancer_id=loadbalancer.id) + for add_vip in loadbalancer.additional_vips] + return return_vip, additional_vips + + def deallocate_vip(self, vip): + LOG.debug("Network %s no-op, deallocate_vip vip %s", + self.__class__.__name__, vip.ip_address) + self.networkconfigconfig[vip.ip_address] = (vip, + 'deallocate_vip') + + def update_vip_sg(self, load_balancer, vip): + LOG.debug("Network %s no-op, update_vip_sg loadbalancer %s, vip %s", + self.__class__.__name__, + load_balancer.id, vip.ip_address) + self.networkconfigconfig[(load_balancer.id, + vip.ip_address)] = (load_balancer, vip, + 'update_vip_sg') + + def update_aap_port_sg(self, + load_balancer: data_models.LoadBalancer, + amphora: data_models.Amphora, + vip: data_models.Vip): + LOG.debug("Network %s no-op, update_aap_port_sg load_balancer %s, " + " vip %s, amphora %s", self.__class__.__name__, + load_balancer.id, vip.ip_address, amphora) + self.networkconfigconfig[(amphora.id, + vip.ip_address)] = (load_balancer, vip, + amphora, + 'update_aap_port_sg') + + def plug_aap_port(self, load_balancer, vip, amphora, subnet): + LOG.debug("Network %s no-op, plug_aap_port loadbalancer %s, vip %s," + " amphora %s, subnet %s", + self.__class__.__name__, + load_balancer.id, vip.ip_address, amphora, subnet) + self.networkconfigconfig[(amphora.id, + vip.ip_address)] = ( + load_balancer, vip, amphora, subnet, + 'plug_aap_port') + + fixed_ips = [network_models.FixedIP(subnet_id=subnet.id, + ip_address=vip.ip_address)] + + port_id = uuidutils.generate_uuid() + LOG.debug("Network %s no-op, plug_aap_port loadbalancer %s is using " + "base port ID %s", + self.__class__.__name__, + load_balancer.id, port_id) + + # Store the interface information in the no-op DB + with self.engine.connect() as connection: + connection.execute(insert(self.interfaces_table).values( + port_id=port_id, network_id=vip.network_id, + compute_id=amphora.compute_id, + vnic_type=constants.VNIC_TYPE_NORMAL)) + for fixed_ip in fixed_ips: + connection.execute(insert(self.fixed_ips_table).values( + port_id=port_id, subnet_id=fixed_ip.subnet_id, + ip_address=fixed_ip.ip_address)) + connection.commit() + + return data_models.Amphora( + id=amphora.id, + compute_id=amphora.compute_id, + vrrp_ip='198.51.100.1', + ha_ip='198.51.100.1', + vrrp_port_id=port_id, + ha_port_id=uuidutils.generate_uuid() + ) + + def unplug_vip(self, loadbalancer, vip): + LOG.debug("Network %s no-op, unplug_vip loadbalancer %s, vip %s", + self.__class__.__name__, + loadbalancer.id, vip.ip_address) + self.networkconfigconfig[(loadbalancer.id, + vip.ip_address)] = (loadbalancer, vip, + 'unplug_vip') + + def unplug_aap_port(self, vip, amphora, subnet): + LOG.debug("Network %s no-op, unplug_aap_port vip %s amp: %s " + "subnet: %s", + self.__class__.__name__, + vip.ip_address, amphora.id, subnet.id) + self.networkconfigconfig[(amphora.id, + vip.ip_address)] = (vip, amphora, subnet, + 'unplug_aap_port') + + def unplug_network(self, compute_id, network_id): + LOG.debug("Network %s no-op, unplug_network compute_id %s, " + "network_id %s", + self.__class__.__name__, compute_id, network_id) + self.networkconfigconfig[(compute_id, network_id)] = ( + compute_id, network_id, 'unplug_network') + + def get_plugged_networks(self, compute_id): + LOG.debug("Network %s no-op, get_plugged_networks compute_id %s", + self.__class__.__name__, compute_id) + self.networkconfigconfig[compute_id] = ( + compute_id, 'get_plugged_networks') + + # Retrieve the interfaces from the no-op DB for this compute_id + interfaces = [] + with self.engine.connect() as connection: + int_results = connection.execute( + select(self.interfaces_table) + .where(self.interfaces_table.c.compute_id == compute_id)) + # Walk through the matching interfaces + for interface in int_results: + fixed_ips = [] + # Get the fixed IPs on each interface + fixed_ip_results = connection.execute( + select(self.fixed_ips_table) + .where( + self.fixed_ips_table.c.port_id == interface.port_id)) + # Build the FixedIP objects for the interface + for fixed_ip in fixed_ip_results: + fixed_ips.append(network_models.FixedIP( + subnet_id=fixed_ip.subnet_id, + ip_address=fixed_ip.ip_address)) + # Add the interface object to the list + interfaces.append(network_models.Interface( + compute_id=interface.compute_id, + network_id=interface.network_id, + port_id=interface.port_id, fixed_ips=fixed_ips, + vnic_type=interface.vnic_type)) + return interfaces + + def update_vip(self, loadbalancer, for_delete=False): + LOG.debug("Network %s no-op, update_vip loadbalancer %s " + "with for delete %s", + self.__class__.__name__, loadbalancer, for_delete) + self.networkconfigconfig[loadbalancer.id] = ( + loadbalancer, for_delete, 'update_vip') + + def get_network(self, network_id): + LOG.debug("Network %s no-op, get_network network_id %s", + self.__class__.__name__, network_id) + self.networkconfigconfig[network_id] = (network_id, 'get_network') + if network_id in _NOOP_MANAGER_VARS['networks']: + return _NOOP_MANAGER_VARS['networks'][network_id] + + network = network_models.Network(id=network_id, + port_security_enabled=True) + + class ItIsInsideMe(list): + known_subnets = None + + def __init__(self, network, parent): + super().__init__() + self.network = network + self.parent = parent + self.known_subnets = {} + + def to_dict(self, **kwargs): + return [{}] + + def __contains__(self, item): + self.known_subnets[item] = self.parent.get_subnet(item) + self.known_subnets[item].network_id = self.network.id + return True + + def __len__(self): + return len(self.known_subnets) + 1 + + def __iter__(self): + yield from self.known_subnets + subnet = network_models.Subnet(id=uuidutils.generate_uuid(), + network_id=self.network.id) + self.known_subnets[subnet.id] = subnet + _NOOP_MANAGER_VARS['subnets'][subnet.id] = subnet + yield subnet.id + + network.subnets = ItIsInsideMe(network, self) + _NOOP_MANAGER_VARS['networks'][network_id] = network + _NOOP_MANAGER_VARS['current_network'] = network_id + return network + + def get_subnet(self, subnet_id): + LOG.debug("Subnet %s no-op, get_subnet subnet_id %s", + self.__class__.__name__, subnet_id) + self.networkconfigconfig[subnet_id] = (subnet_id, 'get_subnet') + if subnet_id in _NOOP_MANAGER_VARS['subnets']: + return _NOOP_MANAGER_VARS['subnets'][subnet_id] + + subnet = network_models.Subnet( + id=subnet_id, + network_id=_NOOP_MANAGER_VARS['current_network']) + _NOOP_MANAGER_VARS['subnets'][subnet_id] = subnet + return subnet + + def get_port(self, port_id): + LOG.debug("Port %s no-op, get_port port_id %s", + self.__class__.__name__, port_id) + self.networkconfigconfig[port_id] = (port_id, 'get_port') + if port_id in _NOOP_MANAGER_VARS['ports']: + return _NOOP_MANAGER_VARS['ports'][port_id] + + port = network_models.Port(id=port_id) + _NOOP_MANAGER_VARS['ports'][port_id] = port + return port + + def get_security_group_by_id(self, sg_id: str, + context: 'context.RequestContext' = None) -> ( + 'network_models.SecurityGroup'): + LOG.debug("Network %s no-op, get_security_group_by_id %s", + self.__class__.__name__, sg_id) + self.networkconfigconfig[sg_id] = (sg_id, 'get_security_group_by_id') + return network_models.SecurityGroup(id=sg_id) + + def get_network_by_name(self, network_name): + LOG.debug("Network %s no-op, get_network_by_name network_name %s", + self.__class__.__name__, network_name) + self.networkconfigconfig[network_name] = (network_name, + 'get_network_by_name') + by_name = {n.name: n for n in _NOOP_MANAGER_VARS['networks'].values()} + if network_name in by_name: + return by_name[network_name] + + network = network_models.Network(id=uuidutils.generate_uuid(), + port_security_enabled=True, + name=network_name) + _NOOP_MANAGER_VARS['networks'][network.id] = network + _NOOP_MANAGER_VARS['current_network'] = network.id + return network + + def get_subnet_by_name(self, subnet_name): + LOG.debug("Subnet %s no-op, get_subnet_by_name subnet_name %s", + self.__class__.__name__, subnet_name) + self.networkconfigconfig[subnet_name] = (subnet_name, + 'get_subnet_by_name') + by_name = {s.name: s for s in _NOOP_MANAGER_VARS['subnets'].values()} + if subnet_name in by_name: + return by_name[subnet_name] + + subnet = network_models.Subnet( + id=uuidutils.generate_uuid(), + name=subnet_name, + network_id=_NOOP_MANAGER_VARS['current_network']) + _NOOP_MANAGER_VARS['subnets'][subnet.id] = subnet + return subnet + + def get_port_by_name(self, port_name): + LOG.debug("Port %s no-op, get_port_by_name port_name %s", + self.__class__.__name__, port_name) + self.networkconfigconfig[port_name] = (port_name, 'get_port_by_name') + by_name = {p.name: p for p in _NOOP_MANAGER_VARS['ports'].values()} + if port_name in by_name: + return by_name[port_name] + + port = network_models.Port(id=uuidutils.generate_uuid(), + name=port_name) + _NOOP_MANAGER_VARS['ports'][port.id] = port + return port + + def get_port_by_net_id_device_id(self, network_id, device_id): + LOG.debug("Port %s no-op, get_port_by_net_id_device_id network_id %s" + " device_id %s", + self.__class__.__name__, network_id, device_id) + self.networkconfigconfig[(network_id, device_id)] = ( + network_id, device_id, 'get_port_by_net_id_device_id') + by_net_dev_id = {(p.network_id, p.device_id): p + for p in _NOOP_MANAGER_VARS['ports'].values()} + if (network_id, device_id) in by_net_dev_id: + return by_net_dev_id[(network_id, device_id)] + + port = network_models.Port(id=uuidutils.generate_uuid(), + network_id=network_id, + device_id=device_id) + _NOOP_MANAGER_VARS['ports'][port.id] = port + return port + + def get_security_group(self, sg_name): + LOG.debug("Network %s no-op, get_security_group name %s", + self.__class__.__name__, sg_name) + self.networkconfigconfig[(sg_name)] = (sg_name, 'get_security_group') + return network_models.SecurityGroup(id=uuidutils.generate_uuid()) + + def failover_preparation(self, amphora): + LOG.debug("failover %s no-op, failover_preparation, amphora id %s", + self.__class__.__name__, amphora.id) + + def plug_port(self, amphora, port): + LOG.debug("Network %s no-op, plug_port amphora.id %s, port_id " + "%s", self.__class__.__name__, amphora.id, port.id) + self.networkconfigconfig[(amphora.id, port.id)] = ( + amphora, port, 'plug_port') + + def _get_amp_net_configs(self, amp, amp_configs, vip_subnet, vip_port): + vrrp_port = self.get_port(amp.vrrp_port_id) + ha_port = self.get_port(amp.ha_port_id) + amp_configs[amp.id] = network_models.AmphoraNetworkConfig( + amphora=amp, + vrrp_subnet=self.get_subnet( + vrrp_port.get_subnet_id(amp.vrrp_ip)), + vrrp_port=vrrp_port, + ha_subnet=self.get_subnet( + ha_port.get_subnet_id(amp.ha_ip)), + ha_port=ha_port) + + def get_network_configs(self, loadbalancer, amphora=None): + amphora_id = amphora.id if amphora else None + LOG.debug("Network %s no-op, get_network_configs loadbalancer id " + "%s amphora id: %s", self.__class__.__name__, + loadbalancer.id, amphora_id) + self.networkconfigconfig[(loadbalancer.id)] = ( + loadbalancer, 'get_network_configs') + vip_subnet = self.get_subnet(loadbalancer.vip.subnet_id) + vip_port = self.get_port(loadbalancer.vip.port_id) + + amp_configs = {} + if amphora: + self._get_amp_net_configs(amphora, amp_configs, + vip_subnet, vip_port) + else: + for amp in loadbalancer.amphorae: + self._get_amp_net_configs(amp, amp_configs, + vip_subnet, vip_port) + + return amp_configs + + def get_qos_policy(self, qos_policy_id): + LOG.debug("Qos Policy %s no-op, get_qos_policy qos_policy_id %s", + self.__class__.__name__, qos_policy_id) + self.networkconfigconfig[qos_policy_id] = (qos_policy_id, + 'get_qos_policy') + return qos_policy_id + + def apply_qos_on_port(self, qos_id, port_id): + LOG.debug("Network %s no-op, apply_qos_on_port qos_id %s, port_id " + "%s", self.__class__.__name__, qos_id, port_id) + self.networkconfigconfig[(qos_id, port_id)] = ( + qos_id, port_id, 'apply_qos_on_port') + + def qos_enabled(self): + return self._qos_extension_enabled + + def get_network_ip_availability(self, network): + LOG.debug("Network %s no-op, network_ip_availability network_id %s", + self.__class__.__name__, network.id) + self.networkconfigconfig[(network.id, 'ip_availability')] = ( + network.id, 'get_network_ip_availability') + ip_avail = network_models.Network_IP_Availability( + network_id=network.id) + subnet_ip_availability = [] + for subnet_id in list(network.subnets): + subnet_ip_availability.append({'subnet_id': subnet_id, + 'used_ips': 0, 'total_ips': 254}) + ip_avail.subnet_ip_availability = subnet_ip_availability + return ip_avail + + def delete_port(self, port_id): + LOG.debug("Network %s no-op, delete_port port_id %s", + self.__class__.__name__, port_id) + self.networkconfigconfig[port_id] = (port_id, 'delete_port') + + # Remove the port from the no-op DB + with self.engine.connect() as connection: + connection.execute(delete(self.interfaces_table) + .where( + self.interfaces_table.c.port_id == port_id)) + connection.execute(delete(self.fixed_ips_table) + .where( + self.fixed_ips_table.c.port_id == port_id)) + connection.commit() + + def set_port_admin_state_up(self, port_id, state): + LOG.debug("Network %s no-op, set_port_admin_state_up port_id %s, " + "state %s", self.__class__.__name__, port_id, state) + self.networkconfigconfig[(port_id, state)] = (port_id, state, + 'admin_down_port') + + def create_port(self, network_id, name=None, fixed_ips=(), + secondary_ips=(), security_group_ids=(), + admin_state_up=True, qos_policy_id=None, + vnic_type=constants.VNIC_TYPE_NORMAL): + LOG.debug("Network %s no-op, create_port network_id %s", + self.__class__.__name__, network_id) + if not name: + name = 'no-op-port' + port_id = uuidutils.generate_uuid() + project_id = uuidutils.generate_uuid() + + fixed_ip_obj_list = [] + for fixed_ip in fixed_ips: + if fixed_ip and not fixed_ip.get('ip_address'): + fixed_ip_obj_list.append( + network_models.FixedIP(subnet_id=fixed_ip.get('subnet_id'), + ip_address='198.51.100.56')) + else: + fixed_ip_obj_list.append( + network_models.FixedIP( + subnet_id=fixed_ip.get('subnet_id'), + ip_address=fixed_ip.get('ip_address'))) + if not fixed_ip_obj_list: + fixed_ip_obj_list = [network_models.FixedIP( + subnet_id=uuidutils.generate_uuid(), + ip_address='198.51.100.56')] + + self.networkconfigconfig[(network_id, 'create_port')] = ( + network_id, name, fixed_ip_obj_list, secondary_ips, + security_group_ids, admin_state_up, qos_policy_id, vnic_type) + + # Store the interface information in the no-op DB + with self.engine.connect() as connection: + connection.execute(insert(self.interfaces_table).values( + port_id=port_id, network_id=network_id, vnic_type=vnic_type)) + for fixed_ip in fixed_ip_obj_list: + connection.execute(insert(self.fixed_ips_table).values( + port_id=port_id, subnet_id=fixed_ip.subnet_id, + ip_address=fixed_ip.ip_address)) + connection.commit() + + return network_models.Port( + id=port_id, name=name, device_id='no-op-device-id', + device_owner='Octavia', mac_address='00:00:5E:00:53:05', + network_id=network_id, status='UP', project_id=project_id, + admin_state_up=admin_state_up, fixed_ips=fixed_ip_obj_list, + qos_policy_id=qos_policy_id, security_group_ids=security_group_ids, + vnic_type=vnic_type) + + def plug_fixed_ip(self, port_id, subnet_id, ip_address=None): + LOG.debug("Network %s no-op, plug_fixed_ip port_id %s, subnet_id " + "%s, ip_address %s", self.__class__.__name__, port_id, + subnet_id, ip_address) + self.networkconfigconfig[(port_id, subnet_id)] = ( + port_id, subnet_id, ip_address, 'plug_fixed_ip') + + port = network_models.Port(id=port_id, + network_id=uuidutils.generate_uuid()) + _NOOP_MANAGER_VARS['ports'][port.id] = port + return port + + def unplug_fixed_ip(self, port_id, subnet_id): + LOG.debug("Network %s no-op, unplug_fixed_ip port_id %s, subnet_id " + "%s", self.__class__.__name__, port_id, + subnet_id) + self.networkconfigconfig[(port_id, subnet_id)] = ( + port_id, subnet_id, 'unplug_fixed_ip') + + return _NOOP_MANAGER_VARS['ports'].pop(port_id, None) + + +class NoopNetworkDriver(driver_base.AbstractNetworkDriver): + def __init__(self): + super().__init__() + self.driver = NoopManager() + + def allocate_vip(self, loadbalancer): + return self.driver.allocate_vip(loadbalancer) + + def deallocate_vip(self, vip): + self.driver.deallocate_vip(vip) + + def unplug_vip(self, loadbalancer, vip): + self.driver.unplug_vip(loadbalancer, vip) + + def unplug_network(self, compute_id, network_id): + self.driver.unplug_network(compute_id, network_id) + + def get_plugged_networks(self, compute_id): + return self.driver.get_plugged_networks(compute_id) + + def update_vip(self, loadbalancer, for_delete=False): + self.driver.update_vip(loadbalancer, for_delete) + + def get_network(self, network_id, context=None): + return self.driver.get_network(network_id) + + def get_subnet(self, subnet_id, context=None): + return self.driver.get_subnet(subnet_id) + + def get_port(self, port_id, context=None): + return self.driver.get_port(port_id) + + def get_security_group_by_id(self, sg_id: str, + context: 'context.RequestContext' = None) -> ( + 'network_models.SecurityGroup'): + return self.driver.get_security_group_by_id(sg_id, context=context) + + def get_qos_policy(self, qos_policy_id): + return self.driver.get_qos_policy(qos_policy_id) + + def get_network_by_name(self, network_name): + return self.driver.get_network_by_name(network_name) + + def get_subnet_by_name(self, subnet_name): + return self.driver.get_subnet_by_name(subnet_name) + + def get_port_by_name(self, port_name): + return self.driver.get_port_by_name(port_name) + + def get_port_by_net_id_device_id(self, network_id, device_id): + return self.driver.get_port_by_net_id_device_id(network_id, device_id) + + def get_security_group(self, sg_name): + return self.driver.get_security_group(sg_name) + + def failover_preparation(self, amphora): + self.driver.failover_preparation(amphora) + + def plug_port(self, amphora, port): + return self.driver.plug_port(amphora, port) + + def get_network_configs(self, loadbalancer, amphora=None): + return self.driver.get_network_configs(loadbalancer, amphora) + + def apply_qos_on_port(self, qos_id, port_id): + self.driver.apply_qos_on_port(qos_id, port_id) + + def update_vip_sg(self, load_balancer, vip): + self.driver.update_vip_sg(load_balancer, vip) + + def update_aap_port_sg(self, + load_balancer: data_models.LoadBalancer, + amphora: data_models.Amphora, + vip: data_models.Vip): + self.driver.update_aap_port_sg(load_balancer, amphora, vip) + + def plug_aap_port(self, load_balancer, vip, amphora, subnet): + return self.driver.plug_aap_port(load_balancer, vip, amphora, subnet) + + def unplug_aap_port(self, vip, amphora, subnet): + self.driver.unplug_aap_port(vip, amphora, subnet) + + def qos_enabled(self): + return self.driver.qos_enabled() + + def get_network_ip_availability(self, network): + return self.driver.get_network_ip_availability(network) + + def delete_port(self, port_id): + self.driver.delete_port(port_id) + + def set_port_admin_state_up(self, port_id, state): + self.driver.set_port_admin_state_up(port_id, state) + + def create_port(self, network_id, name=None, fixed_ips=(), + secondary_ips=(), security_group_ids=(), + admin_state_up=True, qos_policy_id=None, + vnic_type=constants.VNIC_TYPE_NORMAL): + return self.driver.create_port( + network_id, name, fixed_ips, secondary_ips, security_group_ids, + admin_state_up, qos_policy_id, vnic_type) + + def plug_fixed_ip(self, port_id, subnet_id, ip_address=None): + return self.driver.plug_fixed_ip(port_id, subnet_id, ip_address) + + def unplug_fixed_ip(self, port_id, subnet_id): + return self.driver.unplug_fixed_ip(port_id, subnet_id) diff --git a/octavia/opts.py b/octavia/opts.py new file mode 100644 index 0000000000..70990331bf --- /dev/null +++ b/octavia/opts.py @@ -0,0 +1,63 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy +# of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import itertools + +from keystoneauth1 import loading as ks_loading + +import octavia.certificates.common.local +import octavia.common.config +from octavia.common import constants + + +def list_opts(): + return [ + ('DEFAULT', + itertools.chain(octavia.common.config.core_opts)), + ('api_settings', octavia.common.config.api_opts), + ('amphora_agent', octavia.common.config.amphora_agent_opts), + ('compute', octavia.common.config.compute_opts), + ('networking', octavia.common.config.networking_opts), + ('oslo_messaging', octavia.common.config.oslo_messaging_opts), + ('haproxy_amphora', octavia.common.config.haproxy_amphora_opts), + ('health_manager', octavia.common.config.health_manager_opts), + ('controller_worker', octavia.common.config.controller_worker_opts), + ('task_flow', octavia.common.config.task_flow_opts), + ('certificates', itertools.chain( + octavia.common.config.certificate_opts, + octavia.certificates.common.local.certgen_opts)), + ('house_keeping', octavia.common.config.house_keeping_opts), + ('keepalived_vrrp', octavia.common.config.keepalived_vrrp_opts), + ('nova', octavia.common.config.nova_opts), + ('cinder', octavia.common.config.cinder_opts), + ('glance', octavia.common.config.glance_opts), + ('neutron', itertools.chain( + octavia.common.config.neutron_opts, + get_ksa_opts(True))), + ('quotas', octavia.common.config.quota_opts), + ('audit', octavia.common.config.audit_opts), + ('driver_agent', octavia.common.config.driver_agent_opts), + (constants.SERVICE_AUTH, get_ksa_opts()), + ] + + +def get_ksa_opts(adapter=False): + opts = ( + ks_loading.get_session_conf_options() + + ks_loading.get_auth_common_conf_options() + + ks_loading.get_auth_plugin_conf_options('password') + + ks_loading.get_auth_plugin_conf_options('v2password') + + ks_loading.get_auth_plugin_conf_options('v3password') + ) + if adapter: + opts += ks_loading.get_adapter_conf_options(include_deprecated=False) + return opts diff --git a/octavia/policies/__init__.py b/octavia/policies/__init__.py new file mode 100644 index 0000000000..afa24ea35a --- /dev/null +++ b/octavia/policies/__init__.py @@ -0,0 +1,58 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import itertools + +from octavia.policies import advanced_rbac +from octavia.policies import amphora +from octavia.policies import availability_zone +from octavia.policies import availability_zone_profile +from octavia.policies import base +from octavia.policies import flavor +from octavia.policies import flavor_profile +from octavia.policies import healthmonitor +from octavia.policies import keystone_default_roles +from octavia.policies import l7policy +from octavia.policies import l7rule +from octavia.policies import listener +from octavia.policies import loadbalancer +from octavia.policies import member +from octavia.policies import pool +from octavia.policies import provider +from octavia.policies import provider_availability_zone +from octavia.policies import provider_flavor +from octavia.policies import quota + + +def list_rules(): + return itertools.chain( + base.list_rules(), + keystone_default_roles.list_rules(), + advanced_rbac.list_rules(), + flavor.list_rules(), + flavor_profile.list_rules(), + availability_zone.list_rules(), + availability_zone_profile.list_rules(), + healthmonitor.list_rules(), + l7policy.list_rules(), + l7rule.list_rules(), + listener.list_rules(), + loadbalancer.list_rules(), + member.list_rules(), + pool.list_rules(), + provider.list_rules(), + quota.list_rules(), + amphora.list_rules(), + provider_flavor.list_rules(), + provider_availability_zone.list_rules(), + ) diff --git a/octavia/policies/advanced_rbac.py b/octavia/policies/advanced_rbac.py new file mode 100644 index 0000000000..80c31f934c --- /dev/null +++ b/octavia/policies/advanced_rbac.py @@ -0,0 +1,95 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import versionutils +from oslo_policy import policy + +from octavia.common import constants + +# Octavia specific Advanced RBAC rules + +# The default is to not allow access unless the auth_strategy is 'noauth'. +# Users must be a member of one of the following roles to have access to +# the load-balancer API: +# +# role:load-balancer_observer +# User has access to load-balancer read-only APIs +# role:load-balancer_global_observer +# User has access to load-balancer read-only APIs including resources +# owned by others. +# role:load-balancer_member +# User has access to load-balancer read and write APIs +# role:load-balancer_admin +# User is considered an admin for all load-balancer APIs including +# resources owned by others. + +deprecated_context_is_admin = policy.DeprecatedRule( + name='context_is_admin', + check_str='role:admin or ' + 'role:load-balancer_admin', + deprecated_reason=constants.RBAC_ROLES_DEPRECATED_REASON, + deprecated_since=versionutils.deprecated.WALLABY, +) + +# Note: 'is_admin:True' is a policy rule that takes into account the +# auth_strategy == noauth configuration setting. +# It is equivalent to 'rule:context_is_admin or {auth_strategy == noauth}' + +deprecated_admin = policy.DeprecatedRule( + name='load-balancer:admin', + check_str='is_admin:True or ' + 'role:admin or ' + 'role:load-balancer_admin', + deprecated_reason=constants.RBAC_ROLES_DEPRECATED_REASON, + deprecated_since=versionutils.deprecated.WALLABY, +) + +deprecated_global_observer = policy.DeprecatedRule( + name='load-balancer:global_observer', + check_str='role:load-balancer_global_observer', + deprecated_reason=constants.RBAC_ROLES_DEPRECATED_REASON, + deprecated_since=versionutils.deprecated.WALLABY, +) + +deprecated_member_and_owner = policy.DeprecatedRule( + name='load-balancer:member_and_owner', + check_str='role:load-balancer_member and ' + 'rule:load-balancer:owner', + deprecated_reason=constants.RBAC_ROLES_DEPRECATED_REASON, + deprecated_since=versionutils.deprecated.WALLABY, +) + +deprecated_observer_and_owner = policy.DeprecatedRule( + name='load-balancer:observer_and_owner', + check_str='role:load-balancer_observer and ' + 'rule:load-balancer:owner', + deprecated_reason=constants.RBAC_ROLES_DEPRECATED_REASON, + deprecated_since=versionutils.deprecated.WALLABY, +) + +deprecated_quota_admin = policy.DeprecatedRule( + name='load-balancer:quota-admin', + check_str='role:load-balancer_quota_admin', + deprecated_reason=constants.RBAC_ROLES_DEPRECATED_REASON, + deprecated_since=versionutils.deprecated.WALLABY, +) + +rules = [ + policy.RuleDefault( + name='load-balancer:owner', + check_str='project_id:%(project_id)s', + scope_types=[constants.RBAC_SCOPE_PROJECT]), +] + + +def list_rules(): + return rules diff --git a/octavia/policies/amphora.py b/octavia/policies/amphora.py new file mode 100644 index 0000000000..554ce96c42 --- /dev/null +++ b/octavia/policies/amphora.py @@ -0,0 +1,61 @@ +# Copyright 2017 Rackspace, US Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_policy import policy + +from octavia.common import constants + +rules = [ + policy.DocumentedRuleDefault( + f'{constants.RBAC_AMPHORA}{constants.RBAC_GET_ALL}', + constants.RULE_API_ADMIN, + "List Amphorae", + [{'method': 'GET', 'path': '/v2/octavia/amphorae'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_AMPHORA}{constants.RBAC_GET_ONE}', + constants.RULE_API_ADMIN, + "Show Amphora details", + [{'method': 'GET', 'path': '/v2/octavia/amphorae/{amphora_id}'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_AMPHORA}{constants.RBAC_DELETE}', + constants.RULE_API_ADMIN, + "Delete an Amphora", + [{'method': 'DELETE', 'path': '/v2/octavia/amphorae/{amphora_id}'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_AMPHORA}{constants.RBAC_PUT_CONFIG}', + constants.RULE_API_ADMIN, + "Update Amphora Agent Configuration", + [{'method': 'PUT', + 'path': '/v2/octavia/amphorae/{amphora_id}/config'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_AMPHORA}{constants.RBAC_PUT_FAILOVER}', + constants.RULE_API_ADMIN, + "Failover Amphora", + [{'method': 'PUT', + 'path': '/v2/octavia/amphorae/{amphora_id}/failover'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_AMPHORA}{constants.RBAC_GET_STATS}', + constants.RULE_API_ADMIN, + "Show Amphora statistics", + [{'method': 'GET', 'path': '/v2/octavia/amphorae/{amphora_id}/stats'}] + ), +] + + +def list_rules(): + return rules diff --git a/octavia/policies/availability_zone.py b/octavia/policies/availability_zone.py new file mode 100644 index 0000000000..e80e4b2084 --- /dev/null +++ b/octavia/policies/availability_zone.py @@ -0,0 +1,57 @@ +# Copyright 2019 Verizon Media +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_policy import policy + +from octavia.common import constants + + +rules = [ + policy.DocumentedRuleDefault( + f'{constants.RBAC_AVAILABILITY_ZONE}{constants.RBAC_GET_ALL}', + constants.RULE_API_READ, + "List Availability Zones", + [{'method': 'GET', 'path': '/v2.0/lbaas/availabilityzones'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_AVAILABILITY_ZONE}{constants.RBAC_POST}', + constants.RULE_API_ADMIN, + "Create an Availability Zone", + [{'method': 'POST', 'path': '/v2.0/lbaas/availabilityzones'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_AVAILABILITY_ZONE}{constants.RBAC_PUT}', + constants.RULE_API_ADMIN, + "Update an Availability Zone", + [{'method': 'PUT', + 'path': '/v2.0/lbaas/availabilityzones/{availability_zone_id}'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_AVAILABILITY_ZONE}{constants.RBAC_GET_ONE}', + constants.RULE_API_READ, + "Show Availability Zone details", + [{'method': 'GET', + 'path': '/v2.0/lbaas/availabilityzones/{availability_zone_id}'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_AVAILABILITY_ZONE}{constants.RBAC_DELETE}', + constants.RULE_API_ADMIN, + "Remove an Availability Zone", + [{'method': 'DELETE', + 'path': '/v2.0/lbaas/availabilityzones/{availability_zone_id}'}] + ), +] + + +def list_rules(): + return rules diff --git a/octavia/policies/availability_zone_profile.py b/octavia/policies/availability_zone_profile.py new file mode 100644 index 0000000000..99790bb1a3 --- /dev/null +++ b/octavia/policies/availability_zone_profile.py @@ -0,0 +1,60 @@ +# Copyright 2019 Verizon Media +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_policy import policy + +from octavia.common import constants + + +rules = [ + policy.DocumentedRuleDefault( + f'{constants.RBAC_AVAILABILITY_ZONE_PROFILE}{constants.RBAC_GET_ALL}', + constants.RULE_API_ADMIN, + "List Availability Zones", + [{'method': 'GET', 'path': '/v2.0/lbaas/availabilityzoneprofiles'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_AVAILABILITY_ZONE_PROFILE}{constants.RBAC_POST}', + constants.RULE_API_ADMIN, + "Create an Availability Zone", + [{'method': 'POST', 'path': '/v2.0/lbaas/availabilityzoneprofiles'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_AVAILABILITY_ZONE_PROFILE}{constants.RBAC_PUT}', + constants.RULE_API_ADMIN, + "Update an Availability Zone", + [{'method': 'PUT', + 'path': '/v2.0/lbaas/availabilityzoneprofiles/' + '{availability_zone_profile_id}'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_AVAILABILITY_ZONE_PROFILE}{constants.RBAC_GET_ONE}', + constants.RULE_API_ADMIN, + "Show Availability Zone details", + [{'method': 'GET', + 'path': '/v2.0/lbaas/availabilityzoneprofiles/' + '{availability_zone_profile_id}'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_AVAILABILITY_ZONE_PROFILE}{constants.RBAC_DELETE}', + constants.RULE_API_ADMIN, + "Remove an Availability Zone", + [{'method': 'DELETE', + 'path': '/v2.0/lbaas/availabilityzoneprofiles/' + '{availability_zone_profile_id}'}] + ), +] + + +def list_rules(): + return rules diff --git a/octavia/policies/base.py b/octavia/policies/base.py new file mode 100644 index 0000000000..4bb82fc040 --- /dev/null +++ b/octavia/policies/base.py @@ -0,0 +1,69 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_policy import policy + +from octavia.common import constants + + +rules = [ + + # API access methods + # + # These are the only rules that should be applied to API endpoints. + + policy.RuleDefault( + name='load-balancer:read', + check_str='rule:load-balancer:observer_and_owner or ' + 'rule:load-balancer:global_observer or ' + 'rule:load-balancer:member_and_owner or ' + 'rule:load-balancer:admin', + scope_types=[constants.RBAC_SCOPE_PROJECT]), + + policy.RuleDefault( + name='load-balancer:read-global', + check_str='rule:load-balancer:global_observer or ' + 'rule:load-balancer:admin', + scope_types=[constants.RBAC_SCOPE_PROJECT]), + + policy.RuleDefault( + name='load-balancer:write', + check_str='rule:load-balancer:member_and_owner or ' + 'rule:load-balancer:admin', + scope_types=[constants.RBAC_SCOPE_PROJECT]), + + policy.RuleDefault( + name='load-balancer:read-quota', + check_str='rule:load-balancer:observer_and_owner or ' + 'rule:load-balancer:global_observer or ' + 'rule:load-balancer:member_and_owner or ' + 'rule:load-balancer:quota-admin or ' + 'rule:load-balancer:admin', + scope_types=[constants.RBAC_SCOPE_PROJECT]), + + policy.RuleDefault( + name='load-balancer:read-quota-global', + check_str='rule:load-balancer:global_observer or ' + 'rule:load-balancer:quota-admin or ' + 'rule:load-balancer:admin', + scope_types=[constants.RBAC_SCOPE_PROJECT]), + + policy.RuleDefault( + name='load-balancer:write-quota', + check_str='rule:load-balancer:quota-admin or ' + 'rule:load-balancer:admin', + scope_types=[constants.RBAC_SCOPE_PROJECT]), +] + + +def list_rules(): + return rules diff --git a/octavia/policies/flavor.py b/octavia/policies/flavor.py new file mode 100644 index 0000000000..1b47b2931b --- /dev/null +++ b/octavia/policies/flavor.py @@ -0,0 +1,56 @@ +# Copyright 2017 Walmart Stores Inc.. +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_policy import policy + +from octavia.common import constants + + +rules = [ + policy.DocumentedRuleDefault( + f'{constants.RBAC_FLAVOR}{constants.RBAC_GET_ALL}', + constants.RULE_API_READ, + "List Flavors", + [{'method': 'GET', 'path': '/v2.0/lbaas/flavors'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_FLAVOR}{constants.RBAC_POST}', + constants.RULE_API_ADMIN, + "Create a Flavor", + [{'method': 'POST', 'path': '/v2.0/lbaas/flavors'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_FLAVOR}{constants.RBAC_PUT}', + constants.RULE_API_ADMIN, + "Update a Flavor", + [{'method': 'PUT', 'path': '/v2.0/lbaas/flavors/{flavor_id}'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_FLAVOR}{constants.RBAC_GET_ONE}', + constants.RULE_API_READ, + "Show Flavor details", + [{'method': 'GET', + 'path': '/v2.0/lbaas/flavors/{flavor_id}'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_FLAVOR}{constants.RBAC_DELETE}', + constants.RULE_API_ADMIN, + "Remove a Flavor", + [{'method': 'DELETE', + 'path': '/v2.0/lbaas/flavors/{flavor_id}'}] + ), +] + + +def list_rules(): + return rules diff --git a/octavia/policies/flavor_profile.py b/octavia/policies/flavor_profile.py new file mode 100644 index 0000000000..1812a55808 --- /dev/null +++ b/octavia/policies/flavor_profile.py @@ -0,0 +1,57 @@ +# Copyright 2017 Walmart Stores Inc.. +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_policy import policy + +from octavia.common import constants + + +rules = [ + policy.DocumentedRuleDefault( + f'{constants.RBAC_FLAVOR_PROFILE}{constants.RBAC_GET_ALL}', + constants.RULE_API_ADMIN, + "List Flavor Profiles", + [{'method': 'GET', 'path': '/v2.0/lbaas/flavorprofiles'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_FLAVOR_PROFILE}{constants.RBAC_POST}', + constants.RULE_API_ADMIN, + "Create a Flavor Profile", + [{'method': 'POST', 'path': '/v2.0/lbaas/flavorprofiles'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_FLAVOR_PROFILE}{constants.RBAC_PUT}', + constants.RULE_API_ADMIN, + "Update a Flavor Profile", + [{'method': 'PUT', + 'path': '/v2.0/lbaas/flavorprofiles/{flavor_profile_id}'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_FLAVOR_PROFILE}{constants.RBAC_GET_ONE}', + constants.RULE_API_ADMIN, + "Show Flavor Profile details", + [{'method': 'GET', + 'path': '/v2.0/lbaas/flavorprofiles/{flavor_profile_id}'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_FLAVOR_PROFILE}{constants.RBAC_DELETE}', + constants.RULE_API_ADMIN, + "Remove a Flavor Profile", + [{'method': 'DELETE', + 'path': '/v2.0/lbaas/flavorprofiles/{flavor_profile_id}'}] + ), +] + + +def list_rules(): + return rules diff --git a/octavia/policies/healthmonitor.py b/octavia/policies/healthmonitor.py new file mode 100644 index 0000000000..f40e9f2546 --- /dev/null +++ b/octavia/policies/healthmonitor.py @@ -0,0 +1,62 @@ +# Copyright 2017 Rackspace, US Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_policy import policy + +from octavia.common import constants + +rules = [ + policy.DocumentedRuleDefault( + f'{constants.RBAC_HEALTHMONITOR}{constants.RBAC_GET_ALL}', + constants.RULE_API_READ, + "List Health Monitors of a Pool", + [{'method': 'GET', 'path': '/v2/lbaas/healthmonitors'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_HEALTHMONITOR}{constants.RBAC_GET_ALL_GLOBAL}', + constants.RULE_API_READ_GLOBAL, + "List Health Monitors including resources owned by others", + [{'method': 'GET', 'path': '/v2/lbaas/healthmonitors'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_HEALTHMONITOR}{constants.RBAC_POST}', + constants.RULE_API_WRITE, + "Create a Health Monitor", + [{'method': 'POST', 'path': '/v2/lbaas/healthmonitors'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_HEALTHMONITOR}{constants.RBAC_GET_ONE}', + constants.RULE_API_READ, + "Show Health Monitor details", + [{'method': 'GET', + 'path': '/v2/lbaas/healthmonitors/{healthmonitor_id}'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_HEALTHMONITOR}{constants.RBAC_PUT}', + constants.RULE_API_WRITE, + "Update a Health Monitor", + [{'method': 'PUT', + 'path': '/v2/lbaas/healthmonitors/{healthmonitor_id}'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_HEALTHMONITOR}{constants.RBAC_DELETE}', + constants.RULE_API_WRITE, + "Remove a Health Monitor", + [{'method': 'DELETE', + 'path': '/v2/lbaas/healthmonitors/{healthmonitor_id}'}] + ), +] + + +def list_rules(): + return rules diff --git a/octavia/policies/keystone_default_roles.py b/octavia/policies/keystone_default_roles.py new file mode 100644 index 0000000000..b3f8c557a8 --- /dev/null +++ b/octavia/policies/keystone_default_roles.py @@ -0,0 +1,86 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_policy import policy + +from octavia.common import constants +from octavia.policies import advanced_rbac + +rules = [ + + # OpenStack keystone default roles + + # Project scoped Member + policy.RuleDefault( + name='project-member', + check_str='role:member and ' + 'project_id:%(project_id)s', + scope_types=[constants.RBAC_SCOPE_PROJECT]), + + # Project scoped Reader + policy.RuleDefault( + name='project-reader', + check_str='role:reader and ' + 'project_id:%(project_id)s', + scope_types=[constants.RBAC_SCOPE_PROJECT]), + + policy.RuleDefault( + name='context_is_admin', + check_str='role:admin', + deprecated_rule=advanced_rbac.deprecated_context_is_admin, + scope_types=[constants.RBAC_SCOPE_PROJECT]), + + # API access roles + policy.RuleDefault( + name='load-balancer:admin', + check_str='is_admin:True or ' + 'role:admin', + deprecated_rule=advanced_rbac.deprecated_admin, + scope_types=[constants.RBAC_SCOPE_PROJECT]), + + # Note: 'is_admin:True' is a policy rule that takes into account the + # auth_strategy == noauth configuration setting. + # It is equivalent to 'rule:context_is_admin or {auth_strategy == noauth}' + + policy.RuleDefault( + name='service', + check_str='role:service', + scope_types=[constants.RBAC_SCOPE_PROJECT]), + + policy.RuleDefault( + name='load-balancer:global_observer', + check_str='role:admin', + deprecated_rule=advanced_rbac.deprecated_global_observer, + scope_types=[constants.RBAC_SCOPE_PROJECT]), + + policy.RuleDefault( + name='load-balancer:member_and_owner', + check_str='rule:project-member', + deprecated_rule=advanced_rbac.deprecated_member_and_owner, + scope_types=[constants.RBAC_SCOPE_PROJECT]), + + policy.RuleDefault( + name='load-balancer:observer_and_owner', + check_str='rule:project-reader', + deprecated_rule=advanced_rbac.deprecated_observer_and_owner, + scope_types=[constants.RBAC_SCOPE_PROJECT]), + + policy.RuleDefault( + name='load-balancer:quota-admin', + check_str='role:admin', + deprecated_rule=advanced_rbac.deprecated_quota_admin, + scope_types=[constants.RBAC_SCOPE_PROJECT]), +] + + +def list_rules(): + return rules diff --git a/octavia/policies/l7policy.py b/octavia/policies/l7policy.py new file mode 100644 index 0000000000..b3c26f7311 --- /dev/null +++ b/octavia/policies/l7policy.py @@ -0,0 +1,62 @@ +# Copyright 2017 Rackspace, US Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_policy import policy + +from octavia.common import constants + +rules = [ + policy.DocumentedRuleDefault( + f'{constants.RBAC_L7POLICY}{constants.RBAC_GET_ALL}', + constants.RULE_API_READ, + "List L7 Policys", + [{'method': 'GET', 'path': '/v2/lbaas/l7policies'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_L7POLICY}{constants.RBAC_GET_ALL_GLOBAL}', + constants.RULE_API_READ_GLOBAL, + "List L7 Policys including resources owned by others", + [{'method': 'GET', 'path': '/v2/lbaas/l7policies'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_L7POLICY}{constants.RBAC_POST}', + constants.RULE_API_WRITE, + "Create a L7 Policy", + [{'method': 'POST', 'path': '/v2/lbaas/l7policies'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_L7POLICY}{constants.RBAC_GET_ONE}', + constants.RULE_API_READ, + "Show L7 Policy details", + [{'method': 'GET', + 'path': '/v2/lbaas/l7policies/{l7policy_id}'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_L7POLICY}{constants.RBAC_PUT}', + constants.RULE_API_WRITE, + "Update a L7 Policy", + [{'method': 'PUT', + 'path': '/v2/lbaas/l7policies/{l7policy_id}'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_L7POLICY}{constants.RBAC_DELETE}', + constants.RULE_API_WRITE, + "Remove a L7 Policy", + [{'method': 'DELETE', + 'path': '/v2/lbaas/l7policies/{l7policy_id}'}] + ), +] + + +def list_rules(): + return rules diff --git a/octavia/policies/l7rule.py b/octavia/policies/l7rule.py new file mode 100644 index 0000000000..797bf59103 --- /dev/null +++ b/octavia/policies/l7rule.py @@ -0,0 +1,58 @@ +# Copyright 2017 Rackspace, US Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_policy import policy + +from octavia.common import constants + +rules = [ + policy.DocumentedRuleDefault( + f'{constants.RBAC_L7RULE}{constants.RBAC_GET_ALL}', + constants.RULE_API_READ, + "List L7 Rules", + [{'method': 'GET', + 'path': '/v2/lbaas/l7policies/{l7policy_id}/rules'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_L7RULE}{constants.RBAC_POST}', + constants.RULE_API_WRITE, + "Create a L7 Rule", + [{'method': 'POST', + 'path': '/v2/lbaas/l7policies/{l7policy_id}/rules'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_L7RULE}{constants.RBAC_GET_ONE}', + constants.RULE_API_READ, + "Show L7 Rule details", + [{'method': 'GET', + 'path': '/v2/lbaas/l7policies/{l7policy_id}/rules/{l7rule_id}'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_L7RULE}{constants.RBAC_PUT}', + constants.RULE_API_WRITE, + "Update a L7 Rule", + [{'method': 'PUT', + 'path': '/v2/lbaas/l7policies/{l7policy_id}/rules/{l7rule_id}'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_L7RULE}{constants.RBAC_DELETE}', + constants.RULE_API_WRITE, + "Remove a L7 Rule", + [{'method': 'DELETE', + 'path': '/v2/lbaas/l7policies/{l7policy_id}/rules/{l7rule_id}'}] + ), +] + + +def list_rules(): + return rules diff --git a/octavia/policies/listener.py b/octavia/policies/listener.py new file mode 100644 index 0000000000..bdf2c0fb64 --- /dev/null +++ b/octavia/policies/listener.py @@ -0,0 +1,69 @@ +# Copyright 2017 Rackspace, US Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_policy import policy + +from octavia.common import constants + +rules = [ + policy.DocumentedRuleDefault( + f'{constants.RBAC_LISTENER}{constants.RBAC_GET_ALL}', + constants.RULE_API_READ, + "List Listeners", + [{'method': 'GET', 'path': '/v2/lbaas/listeners'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_LISTENER}{constants.RBAC_GET_ALL_GLOBAL}', + constants.RULE_API_READ_GLOBAL, + "List Listeners including resources owned by others", + [{'method': 'GET', 'path': '/v2/lbaas/listeners'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_LISTENER}{constants.RBAC_POST}', + constants.RULE_API_WRITE, + "Create a Listener", + [{'method': 'POST', 'path': '/v2/lbaas/listeners'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_LISTENER}{constants.RBAC_GET_ONE}', + constants.RULE_API_READ, + "Show Listener details", + [{'method': 'GET', + 'path': '/v2/lbaas/listeners/{listener_id}'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_LISTENER}{constants.RBAC_PUT}', + constants.RULE_API_WRITE, + "Update a Listener", + [{'method': 'PUT', + 'path': '/v2/lbaas/listeners/{listener_id}'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_LISTENER}{constants.RBAC_DELETE}', + constants.RULE_API_WRITE, + "Remove a Listener", + [{'method': 'DELETE', + 'path': '/v2/lbaas/listeners/{listener_id}'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_LISTENER}{constants.RBAC_GET_STATS}', + constants.RULE_API_READ, + "Show Listener statistics", + [{'method': 'GET', + 'path': '/v2/lbaas/listeners/{listener_id}/stats'}] + ), +] + + +def list_rules(): + return rules diff --git a/octavia/policies/loadbalancer.py b/octavia/policies/loadbalancer.py new file mode 100644 index 0000000000..10943aebd7 --- /dev/null +++ b/octavia/policies/loadbalancer.py @@ -0,0 +1,96 @@ +# Copyright 2017 Rackspace, US Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_policy import policy + +from octavia.common import constants + +rules = [ + policy.DocumentedRuleDefault( + f'{constants.RBAC_LOADBALANCER}{constants.RBAC_GET_ALL}', + constants.RULE_API_READ, + "List Load Balancers", + [{'method': 'GET', 'path': '/v2/lbaas/loadbalancers'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_LOADBALANCER}{constants.RBAC_GET_ALL_GLOBAL}', + constants.RULE_API_READ_GLOBAL, + "List Load Balancers including resources owned by others", + [{'method': 'GET', 'path': '/v2/lbaas/loadbalancers'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_LOADBALANCER}{constants.RBAC_POST}', + constants.RULE_API_WRITE, + "Create a Load Balancer", + [{'method': 'POST', 'path': '/v2/lbaas/loadbalancers'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_LOADBALANCER}{constants.RBAC_POST}:vip_sg_ids', + constants.RULE_API_WRITE, + "Create a Load Balancer with VIP Security Groups", + [{'method': 'POST', 'path': '/v2/lbaas/loadbalancers'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_LOADBALANCER}{constants.RBAC_GET_ONE}', + constants.RULE_API_READ, + "Show Load Balancer details", + [{'method': 'GET', + 'path': '/v2/lbaas/loadbalancers/{loadbalancer_id}'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_LOADBALANCER}{constants.RBAC_PUT}', + constants.RULE_API_WRITE, + "Update a Load Balancer", + [{'method': 'PUT', + 'path': '/v2/lbaas/loadbalancers/{loadbalancer_id}'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_LOADBALANCER}{constants.RBAC_PUT}:vip_sg_ids', + constants.RULE_API_WRITE, + "Update the VIP Security Groups of a Load Balancer", + [{'method': 'PUT', + 'path': '/v2/lbaas/loadbalancers/{loadbalancer_id}'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_LOADBALANCER}{constants.RBAC_DELETE}', + constants.RULE_API_WRITE, + "Remove a Load Balancer", + [{'method': 'DELETE', + 'path': '/v2/lbaas/loadbalancers/{loadbalancer_id}'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_LOADBALANCER}{constants.RBAC_GET_STATS}', + constants.RULE_API_READ, + "Show Load Balancer statistics", + [{'method': 'GET', + 'path': '/v2/lbaas/loadbalancers/{loadbalancer_id}/stats'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_LOADBALANCER}{constants.RBAC_GET_STATUS}', + constants.RULE_API_READ, + "Show Load Balancer status", + [{'method': 'GET', + 'path': '/v2/lbaas/loadbalancers/{loadbalancer_id}/status'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_LOADBALANCER}{constants.RBAC_PUT_FAILOVER}', + constants.RULE_API_ADMIN, + "Failover a Load Balancer", + [{'method': 'PUT', + 'path': '/v2/lbaas/loadbalancers/{loadbalancer_id}/failover'}] + ), +] + + +def list_rules(): + return rules diff --git a/octavia/policies/member.py b/octavia/policies/member.py new file mode 100644 index 0000000000..e6b98a467e --- /dev/null +++ b/octavia/policies/member.py @@ -0,0 +1,56 @@ +# Copyright 2017 Rackspace, US Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_policy import policy + +from octavia.common import constants + +rules = [ + policy.DocumentedRuleDefault( + f'{constants.RBAC_MEMBER}{constants.RBAC_GET_ALL}', + constants.RULE_MEMBER_API_READ, + "List Members of a Pool", + [{'method': 'GET', 'path': '/v2/lbaas/pools/{pool_id}/members'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_MEMBER}{constants.RBAC_POST}', + constants.RULE_API_WRITE, + "Create a Member", + [{'method': 'POST', 'path': '/v2/lbaas/pools/{pool_id}/members'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_MEMBER}{constants.RBAC_GET_ONE}', + constants.RULE_API_READ, + "Show Member details", + [{'method': 'GET', + 'path': '/v2/lbaas/pools/{pool_id}/members/{member_id}'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_MEMBER}{constants.RBAC_PUT}', + constants.RULE_API_WRITE, + "Update a Member", + [{'method': 'PUT', + 'path': '/v2/lbaas/pools/{pool_id}/members/{member_id}'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_MEMBER}{constants.RBAC_DELETE}', + constants.RULE_API_WRITE, + "Remove a Member", + [{'method': 'DELETE', + 'path': '/v2/lbaas/pools/{pool_id}/members/{member_id}'}] + ), +] + + +def list_rules(): + return rules diff --git a/octavia/policies/pool.py b/octavia/policies/pool.py new file mode 100644 index 0000000000..5bd1f8c12a --- /dev/null +++ b/octavia/policies/pool.py @@ -0,0 +1,62 @@ +# Copyright 2017 Rackspace, US Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_policy import policy + +from octavia.common import constants + +rules = [ + policy.DocumentedRuleDefault( + f'{constants.RBAC_POOL}{constants.RBAC_GET_ALL}', + constants.RULE_API_READ, + "List Pools", + [{'method': 'GET', 'path': '/v2/lbaas/pools'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_POOL}{constants.RBAC_GET_ALL_GLOBAL}', + constants.RULE_API_READ_GLOBAL, + "List Pools including resources owned by others", + [{'method': 'GET', 'path': '/v2/lbaas/pools'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_POOL}{constants.RBAC_POST}', + constants.RULE_API_WRITE, + "Create a Pool", + [{'method': 'POST', 'path': '/v2/lbaas/pools'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_POOL}{constants.RBAC_GET_ONE}', + constants.RULE_API_READ, + "Show Pool details", + [{'method': 'GET', + 'path': '/v2/lbaas/pools/{pool_id}'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_POOL}{constants.RBAC_PUT}', + constants.RULE_API_WRITE, + "Update a Pool", + [{'method': 'PUT', + 'path': '/v2/lbaas/pools/{pool_id}'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_POOL}{constants.RBAC_DELETE}', + constants.RULE_API_WRITE, + "Remove a Pool", + [{'method': 'DELETE', + 'path': '/v2/lbaas/pools/{pool_id}'}] + ), +] + + +def list_rules(): + return rules diff --git a/octavia/policies/provider.py b/octavia/policies/provider.py new file mode 100644 index 0000000000..8e03e49d4a --- /dev/null +++ b/octavia/policies/provider.py @@ -0,0 +1,29 @@ +# Copyright 2018 Rackspace, US Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_policy import policy + +from octavia.common import constants + +rules = [ + policy.DocumentedRuleDefault( + f'{constants.RBAC_PROVIDER}{constants.RBAC_GET_ALL}', + constants.RULE_API_READ, + "List enabled providers", + [{'method': 'GET', 'path': '/v2/lbaas/providers'}] + ), +] + + +def list_rules(): + return rules diff --git a/octavia/policies/provider_availability_zone.py b/octavia/policies/provider_availability_zone.py new file mode 100644 index 0000000000..69b6611aca --- /dev/null +++ b/octavia/policies/provider_availability_zone.py @@ -0,0 +1,31 @@ +# Copyright 2018 Rackspace, US Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_policy import policy + +from octavia.common import constants + +rules = [ + policy.DocumentedRuleDefault( + f'{constants.RBAC_PROVIDER_AVAILABILITY_ZONE}{constants.RBAC_GET_ALL}', + constants.RULE_API_ADMIN, + "List the provider availability zone capabilities.", + [{'method': 'GET', + 'path': '/v2/lbaas/providers/{provider}/' + 'availability_zone_capabilities'}] + ), +] + + +def list_rules(): + return rules diff --git a/octavia/policies/provider_flavor.py b/octavia/policies/provider_flavor.py new file mode 100644 index 0000000000..68b14c89a1 --- /dev/null +++ b/octavia/policies/provider_flavor.py @@ -0,0 +1,30 @@ +# Copyright 2018 Rackspace, US Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_policy import policy + +from octavia.common import constants + +rules = [ + policy.DocumentedRuleDefault( + f'{constants.RBAC_PROVIDER_FLAVOR}{constants.RBAC_GET_ALL}', + constants.RULE_API_ADMIN, + "List the provider flavor capabilities.", + [{'method': 'GET', + 'path': '/v2/lbaas/providers/{provider}/flavor_capabilities'}] + ), +] + + +def list_rules(): + return rules diff --git a/octavia/policies/quota.py b/octavia/policies/quota.py new file mode 100644 index 0000000000..a9cba0ff7e --- /dev/null +++ b/octavia/policies/quota.py @@ -0,0 +1,63 @@ +# Copyright 2017 Rackspace, US Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_policy import policy + +from octavia.common import constants + +rules = [ + policy.DocumentedRuleDefault( + f'{constants.RBAC_QUOTA}{constants.RBAC_GET_ALL}', + constants.RULE_API_READ_QUOTA, + "List Quotas", + [{'method': 'GET', 'path': '/v2/lbaas/quotas'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_QUOTA}{constants.RBAC_GET_ALL_GLOBAL}', + constants.RULE_API_READ_QUOTA_GLOBAL, + "List Quotas including resources owned by others", + [{'method': 'GET', 'path': '/v2/lbaas/quotas'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_QUOTA}{constants.RBAC_GET_ONE}', + constants.RULE_API_READ_QUOTA, + "Show Quota details", + [{'method': 'GET', + 'path': '/v2/lbaas/quotas/{project_id}'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_QUOTA}{constants.RBAC_PUT}', + constants.RULE_API_WRITE_QUOTA, + "Update a Quota", + [{'method': 'PUT', + 'path': '/v2/lbaas/quotas/{project_id}'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_QUOTA}{constants.RBAC_DELETE}', + constants.RULE_API_WRITE_QUOTA, + "Reset a Quota", + [{'method': 'DELETE', + 'path': '/v2/lbaas/quotas/{project_id}'}] + ), + policy.DocumentedRuleDefault( + f'{constants.RBAC_QUOTA}{constants.RBAC_GET_DEFAULTS}', + constants.RULE_API_READ_QUOTA, + "Show Default Quota for a Project", + [{'method': 'GET', + 'path': '/v2/lbaas/quotas/{project_id}/default'}] + ), +] + + +def list_rules(): + return rules diff --git a/octavia/statistics/__init__.py b/octavia/statistics/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/octavia/statistics/drivers/__init__.py b/octavia/statistics/drivers/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/octavia/statistics/drivers/logger.py b/octavia/statistics/drivers/logger.py new file mode 100644 index 0000000000..37e1fd38f0 --- /dev/null +++ b/octavia/statistics/drivers/logger.py @@ -0,0 +1,29 @@ +# Copyright 2018 GoDaddy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging + +from octavia.statistics import stats_base + +LOG = logging.getLogger(__name__) + + +class StatsLogger(stats_base.StatsDriverMixin): + def update_stats(self, listener_stats, deltas=False): + for stats_object in listener_stats: + LOG.info("Logging listener stats%s for listener `%s` / " + "amphora `%s`: %s", + ' deltas' if deltas else '', + stats_object.listener_id, stats_object.amphora_id, + stats_object.get_stats()) diff --git a/octavia/statistics/drivers/update_db.py b/octavia/statistics/drivers/update_db.py new file mode 100644 index 0000000000..92959bb9a9 --- /dev/null +++ b/octavia/statistics/drivers/update_db.py @@ -0,0 +1,43 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_log import log as logging + +from octavia.db import api as db_api +from octavia.db import repositories as repo +from octavia.statistics import stats_base + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class StatsUpdateDb(stats_base.StatsDriverMixin): + + def __init__(self): + super().__init__() + self.listener_stats_repo = repo.ListenerStatisticsRepository() + + def update_stats(self, listener_stats, deltas=False): + """This function is to update the db with listener stats""" + with db_api.session().begin() as session: + for stats_object in listener_stats: + LOG.debug("Updating listener stats in db for listener `%s` / " + "amphora `%s`: %s", + stats_object.listener_id, stats_object.amphora_id, + stats_object.get_stats()) + if deltas: + self.listener_stats_repo.increment(session, stats_object) + else: + self.listener_stats_repo.replace(session, stats_object) diff --git a/octavia/statistics/stats_base.py b/octavia/statistics/stats_base.py new file mode 100644 index 0000000000..5a8db5697f --- /dev/null +++ b/octavia/statistics/stats_base.py @@ -0,0 +1,60 @@ +# Copyright 2011-2014 OpenStack Foundation,author: Min Wang,German Eichberger +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +from oslo_config import cfg +from oslo_log import log as logging +from stevedore import named as stevedore_named + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) +_STATS_HANDLERS = None + + +def _get_stats_handlers(): + global _STATS_HANDLERS + if _STATS_HANDLERS is None: + _STATS_HANDLERS = stevedore_named.NamedExtensionManager( + namespace='octavia.statistics.drivers', + names=CONF.controller_worker.statistics_drivers, + invoke_on_load=True, + propagate_map_exceptions=False + ) + return _STATS_HANDLERS + + +def update_stats_via_driver(listener_stats, deltas=False): + """Send listener stats to the enabled stats driver(s) + + :param listener_stats: A list of ListenerStatistics objects + :type listener_stats: list + :param deltas: Indicates whether the stats are deltas (false==absolute) + :type deltas: bool + """ + handlers = _get_stats_handlers() + handlers.map_method('update_stats', listener_stats, deltas=deltas) + + +class StatsDriverMixin(metaclass=abc.ABCMeta): + @abc.abstractmethod + def update_stats(self, listener_stats, deltas=False): + """Return a stats object formatted for a generic backend + + :param listener_stats: A list of data_model.ListenerStatistics objects + :type listener_stats: list + :param deltas: Indicates whether the stats are deltas (false==absolute) + :type deltas: bool + """ diff --git a/octavia/tests/__init__.py b/octavia/tests/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/common/__init__.py b/octavia/tests/common/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/common/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/common/constants.py b/octavia/tests/common/constants.py new file mode 100644 index 0000000000..b8fd5c2491 --- /dev/null +++ b/octavia/tests/common/constants.py @@ -0,0 +1,285 @@ +# Copyright 2015 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from octavia_lib.common import constants as lib_constants +from openstack.network.v2.network import Network +from openstack.network.v2.network_ip_availability import NetworkIPAvailability +from openstack.network.v2.port import Port +from openstack.network.v2.security_group import SecurityGroup +from openstack.network.v2.subnet import Subnet + +from octavia.common import constants + + +class MockNovaInterface: + net_id = None + port_id = None + fixed_ips = [] + + +MOCK_NETWORK_ID = 'mock-network-1' +MOCK_NETWORK_ID2 = 'mock-network-2' +MOCK_NETWORK_NAME = 'TestNet1' +MOCK_SUBNET_ID = 'mock-subnet-1' +MOCK_SUBNET_ID2 = 'mock-subnet-2' +MOCK_SUBNET_ID3 = 'mock-subnet-3' +MOCK_SUBNET_NAME = 'TestSubnet1' +MOCK_PORT_ID = 'mock-port-1' +MOCK_PORT_ID2 = 'mock-port-2' +MOCK_PORT_NAME = 'TestPort1' +MOCK_PORT_NAME2 = 'TestPort2' +MOCK_COMPUTE_ID = 'mock-compute-1' +MOCK_IP_ADDRESS = '10.0.0.1' +MOCK_IP_ADDRESS2 = '10.0.0.2' +MOCK_GATEWAY_IP = '10.0.0.3' +MOCK_IP_VERSION = 4 +MOCK_CIDR = '10.0.0.0/24' +MOCK_MAC_ADDR = 'fe:16:3e:00:95:5c' +MOCK_MAC_ADDR2 = 'fe:16:3e:00:95:5d' +MOCK_PROJECT_ID = 'mock-project-1' +MOCK_HOST_ROUTES = [] +MOCK_SUBNET = Subnet(**{'id': MOCK_SUBNET_ID, + 'network_id': MOCK_NETWORK_ID, + 'name': MOCK_SUBNET_NAME, + 'tenant_id': MOCK_PROJECT_ID, + 'gateway_ip': MOCK_GATEWAY_IP, + 'cidr': MOCK_CIDR, + 'ip_version': MOCK_IP_VERSION, + 'host_routes': MOCK_HOST_ROUTES}) +MOCK_SUBNET2 = Subnet(**{'id': MOCK_SUBNET_ID2, + 'network_id': MOCK_NETWORK_ID2}) +MOCK_HOST_ROUTES = [] + +MOCK_NOVA_INTERFACE = MockNovaInterface() +MOCK_NOVA_INTERFACE.net_id = MOCK_NETWORK_ID +MOCK_NOVA_INTERFACE.port_id = MOCK_PORT_ID +MOCK_NOVA_INTERFACE.fixed_ips = [{'ip_address': MOCK_IP_ADDRESS}] +MOCK_NOVA_INTERFACE2 = MockNovaInterface() +MOCK_NOVA_INTERFACE2.net_id = MOCK_NETWORK_ID2 +MOCK_NOVA_INTERFACE2.port_id = MOCK_PORT_ID2 +MOCK_NOVA_INTERFACE2.fixed_ips = [{'ip_address': MOCK_IP_ADDRESS2}] +MOCK_DEVICE_OWNER = 'Moctavia' +MOCK_DEVICE_ID = 'Moctavia123' +MOCK_DEVICE_ID2 = 'Moctavia124' +MOCK_SECURITY_GROUP_ID = 'security-group-1' +MOCK_SECURITY_GROUP_NAME = 'SecurityGroup1' + +MOCK_SECURITY_GROUP = SecurityGroup(**{ + "id": MOCK_SECURITY_GROUP_ID, + "name": MOCK_SECURITY_GROUP_NAME, + "tenant_id": MOCK_PROJECT_ID, + "description": "", + "security_group_rules": [{ + "id": "85f1c72b-cdd4-484f-a9c8-b3205f4e6f53", + "tenant_id": MOCK_PROJECT_ID, + "security_group_id": MOCK_SECURITY_GROUP_ID, + "ethertype": "IPv4", + "direction": "ingress", + "protocol": "tcp", + "port_range_min": 80, + "port_range_max": 80, + "remote_ip_prefix": None, + "remote_group_id": None, + "description": "", + "tags": [], + "created_at": "2020-03-12T20:44:48Z", + "updated_at": "2020-03-12T20:44:48Z", + "revision_number": 0, + "project_id": MOCK_PROJECT_ID + }, { + "id": "aa16ae5f-eac2-40b5-994b-5169a06228a4", + "tenant_id": MOCK_PROJECT_ID, + "security_group_id": "6530d536-3083-4d5c-a4a9-272ac7b8f3de", + "ethertype": "IPv4", + "direction": "egress", + "protocol": None, + "port_range_min": None, + "port_range_max": None, + "remote_ip_prefix": None, + "remote_group_id": None, + "description": None, + "tags": [], + "created_at": "2020-03-12T20:43:31Z", + "updated_at": "2020-03-12T20:43:31Z", + "revision_number": 0, + "project_id": MOCK_PROJECT_ID, + }], + "tags": [], + "created_at": "2020-03-12T20:43:31Z", + "updated_at": "2020-03-12T20:44:48Z", + "revision_number": 3, + "project_id": MOCK_PROJECT_ID}) + +MOCK_ADMIN_STATE_UP = True +MOCK_STATUS = 'ACTIVE' +MOCK_MTU = 1500 +MOCK_NETWORK_TYPE = 'flat' +MOCK_SEGMENTATION_ID = 1 +MOCK_ROUTER_EXTERNAL = False + +MOCK_NEUTRON_PORT = Port(**{'network_id': MOCK_NETWORK_ID, + 'device_id': MOCK_DEVICE_ID, + 'device_owner': MOCK_DEVICE_OWNER, + 'id': MOCK_PORT_ID, + 'name': MOCK_PORT_NAME, + 'tenant_id': MOCK_PROJECT_ID, + 'admin_state_up': MOCK_ADMIN_STATE_UP, + 'status': MOCK_STATUS, + 'mac_address': MOCK_MAC_ADDR, + 'fixed_ips': [{'ip_address': MOCK_IP_ADDRESS, + 'subnet_id': MOCK_SUBNET_ID}], + 'security_groups': [MOCK_SECURITY_GROUP_ID], + 'binding_vnic_type': constants.VNIC_TYPE_NORMAL}) +MOCK_NEUTRON_QOS_POLICY_ID = 'mock-qos-id' +MOCK_QOS_POLICY_ID1 = 'qos1-id' +MOCK_QOS_POLICY_ID2 = 'qos2-id' + +MOCK_NEUTRON_PORT2 = Port(**{'network_id': MOCK_NETWORK_ID2, + 'device_id': MOCK_DEVICE_ID2, + 'device_owner': MOCK_DEVICE_OWNER, + 'id': MOCK_PORT_ID2, + 'name': MOCK_PORT_NAME2, + 'tenant_id': MOCK_PROJECT_ID, + 'admin_state_up': MOCK_ADMIN_STATE_UP, + 'status': MOCK_STATUS, + 'mac_address': MOCK_MAC_ADDR2, + 'fixed_ips': [{'ip_address': MOCK_IP_ADDRESS2, + 'subnet_id': MOCK_SUBNET_ID2}]}) + +MOCK_NETWORK = Network(**{'id': MOCK_NETWORK_ID, + 'name': MOCK_NETWORK_NAME, + 'project_id': MOCK_PROJECT_ID, + 'admin_state_up': MOCK_ADMIN_STATE_UP, + 'subnet_ids': [MOCK_SUBNET_ID], + 'mtu': MOCK_MTU, + 'provider_network_type': 'flat', + 'provider_physical_network': MOCK_NETWORK_NAME, + 'provider_segmentation_id': MOCK_SEGMENTATION_ID, + 'router_external': MOCK_ROUTER_EXTERNAL, + 'port_security_enabled': False}) +MOCK_FIXED_IP = {'subnet_id': MOCK_SUBNET_ID, + 'ip_address': MOCK_IP_ADDRESS} + +MOCK_AMP_ID1 = 'amp1-id' +MOCK_AMP_ID2 = 'amp2-id' +MOCK_AMP_ID3 = 'amp3-id' +MOCK_AMP_COMPUTE_ID1 = 'amp1-compute-id' +MOCK_AMP_COMPUTE_ID2 = 'amp2-compute-id' +MOCK_AMP_COMPUTE_ID3 = 'amp3-compute-id' + +MOCK_MANAGEMENT_SUBNET_ID = 'mgmt-subnet-1' +MOCK_MANAGEMENT_NET_ID = 'mgmt-net-1' +MOCK_MANAGEMENT_PORT_ID1 = 'mgmt-port-1' +MOCK_MANAGEMENT_PORT_ID2 = 'mgmt-port-2' +# These IPs become lb_network_ip +MOCK_MANAGEMENT_IP1 = '99.99.99.1' +MOCK_MANAGEMENT_IP2 = '99.99.99.2' + +MOCK_MANAGEMENT_FIXED_IPS1 = [{'ip_address': MOCK_MANAGEMENT_IP1, + 'subnet_id': MOCK_MANAGEMENT_SUBNET_ID}] +MOCK_MANAGEMENT_FIXED_IPS2 = [{'ip_address': MOCK_MANAGEMENT_IP2, + 'subnet_id': MOCK_MANAGEMENT_SUBNET_ID}] + +MOCK_MANAGEMENT_INTERFACE1 = MockNovaInterface() +MOCK_MANAGEMENT_INTERFACE1.net_id = MOCK_MANAGEMENT_NET_ID +MOCK_MANAGEMENT_INTERFACE1.port_id = MOCK_MANAGEMENT_PORT_ID1 +MOCK_MANAGEMENT_INTERFACE1.fixed_ips = MOCK_MANAGEMENT_FIXED_IPS1 +MOCK_MANAGEMENT_INTERFACE2 = MockNovaInterface() +MOCK_MANAGEMENT_INTERFACE2.net_id = MOCK_MANAGEMENT_NET_ID +MOCK_MANAGEMENT_INTERFACE2.port_id = MOCK_MANAGEMENT_PORT_ID2 +MOCK_MANAGEMENT_INTERFACE2.fixed_ips = MOCK_MANAGEMENT_FIXED_IPS2 + +MOCK_MANAGEMENT_PORT1 = Port(**{'network_id': MOCK_MANAGEMENT_NET_ID, + 'device_id': MOCK_AMP_COMPUTE_ID1, + 'device_owner': MOCK_DEVICE_OWNER, + 'id': MOCK_MANAGEMENT_PORT_ID1, + 'fixed_ips': MOCK_MANAGEMENT_FIXED_IPS1}) + +MOCK_MANAGEMENT_PORT2 = Port(**{'network_id': MOCK_MANAGEMENT_NET_ID, + 'device_id': MOCK_AMP_COMPUTE_ID2, + 'device_owner': MOCK_DEVICE_OWNER, + 'id': MOCK_MANAGEMENT_PORT_ID2, + 'fixed_ips': MOCK_MANAGEMENT_FIXED_IPS2}) + +MOCK_VIP_SUBNET_ID = 'vip-subnet-1' +MOCK_VIP_SUBNET_ID2 = 'vip-subnet-2' +MOCK_VIP_NET_ID = 'vip-net-1' +MOCK_VRRP_PORT_ID1 = 'vrrp-port-1' +MOCK_VRRP_PORT_ID2 = 'vrrp-port-2' +MOCK_VRRP_PORT_ID3 = 'vrrp-port-3' +# These IPs become vrrp_ip +MOCK_VRRP_IP1 = '55.55.55.1' +MOCK_VRRP_IP2 = '55.55.55.2' +MOCK_VRRP_IP3 = '55.55.55.3' + +MOCK_VRRP_FIXED_IPS1 = [{'ip_address': MOCK_VRRP_IP1, + 'subnet_id': MOCK_VIP_SUBNET_ID}] +MOCK_VRRP_FIXED_IPS2 = [{'ip_address': MOCK_VRRP_IP2, + 'subnet_id': MOCK_VIP_SUBNET_ID}] + +MOCK_VRRP_INTERFACE1 = MockNovaInterface() +MOCK_VRRP_INTERFACE1.net_id = MOCK_VIP_NET_ID +MOCK_VRRP_INTERFACE1.port_id = MOCK_VRRP_PORT_ID1 +MOCK_VRRP_INTERFACE1.fixed_ips = MOCK_VRRP_FIXED_IPS1 +MOCK_VRRP_INTERFACE2 = MockNovaInterface() +MOCK_VRRP_INTERFACE2.net_id = MOCK_VIP_NET_ID +MOCK_VRRP_INTERFACE2.port_id = MOCK_VRRP_PORT_ID2 +MOCK_VRRP_INTERFACE2.fixed_ips = MOCK_VRRP_FIXED_IPS2 + +MOCK_VRRP_PORT1 = Port(**{'network_id': MOCK_VIP_NET_ID, + 'device_id': MOCK_AMP_COMPUTE_ID1, + 'device_owner': MOCK_DEVICE_OWNER, + 'id': MOCK_VRRP_PORT_ID1, + 'fixed_ips': MOCK_VRRP_FIXED_IPS1}) + +MOCK_VRRP_PORT2 = Port(**{'network_id': MOCK_VIP_NET_ID, + 'device_id': MOCK_AMP_COMPUTE_ID2, + 'device_owner': MOCK_DEVICE_OWNER, + 'id': MOCK_VRRP_PORT_ID2, + 'fixed_ips': MOCK_VRRP_FIXED_IPS2}) + +MOCK_NETWORK_TOTAL_IPS = 254 +MOCK_NETWORK_USED_IPS = 0 +MOCK_SUBNET_TOTAL_IPS = 254 +MOCK_SUBNET_USED_IPS = 0 +MOCK_SUBNET_IP_AVAILABILITY = [{'used_ips': MOCK_SUBNET_USED_IPS, + 'subnet_id': MOCK_SUBNET_ID, + 'total_ips': MOCK_SUBNET_TOTAL_IPS}] + +MOCK_NETWORK_IP_AVAILABILITY = NetworkIPAvailability( + **{'network_id': MOCK_NETWORK_ID, + 'tenant_id': MOCK_PROJECT_ID, + 'network_name': MOCK_NETWORK_NAME, + 'total_ips': MOCK_NETWORK_TOTAL_IPS, + 'used_ips': MOCK_NETWORK_USED_IPS, + 'subnet_ip_availability': MOCK_SUBNET_IP_AVAILABILITY}) + +INVALID_LISTENER_POOL_PROTOCOL_MAP = { + constants.PROTOCOL_HTTP: [constants.PROTOCOL_HTTPS, + constants.PROTOCOL_TCP, + constants.PROTOCOL_TERMINATED_HTTPS, + constants.PROTOCOL_UDP], + constants.PROTOCOL_HTTPS: [constants.PROTOCOL_HTTP, + constants.PROTOCOL_TERMINATED_HTTPS, + constants.PROTOCOL_UDP], + constants.PROTOCOL_TCP: [constants.PROTOCOL_TERMINATED_HTTPS, + constants.PROTOCOL_UDP], + constants.PROTOCOL_TERMINATED_HTTPS: [constants.PROTOCOL_HTTPS, + constants.PROTOCOL_TCP, + constants.PROTOCOL_UDP], + constants.PROTOCOL_UDP: [constants.PROTOCOL_TCP, + constants.PROTOCOL_HTTP, + constants.PROTOCOL_HTTPS, + constants.PROTOCOL_TERMINATED_HTTPS, + constants.PROTOCOL_PROXY, + lib_constants.PROTOCOL_PROXYV2]} diff --git a/octavia/tests/common/data_model_helpers.py b/octavia/tests/common/data_model_helpers.py new file mode 100644 index 0000000000..b7b8d23ac8 --- /dev/null +++ b/octavia/tests/common/data_model_helpers.py @@ -0,0 +1,95 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from octavia.common import constants +from octavia.common import data_models +from octavia.tests.common import constants as ut_constants + + +def generate_load_balancer_tree(additional_vips=None): + vip = generate_vip() + amps = [generate_amphora(), generate_amphora()] + lb = generate_load_balancer(vip=vip, amphorae=amps, + additional_vips=additional_vips) + return lb + + +LB_SEED = 0 + + +def generate_load_balancer(vip=None, amphorae=None, + topology=constants.TOPOLOGY_SINGLE, + additional_vips=None): + amphorae = amphorae or [] + additional_vips = additional_vips or [] + global LB_SEED + LB_SEED += 1 + lb = data_models.LoadBalancer(id=f'lb{LB_SEED}-id', + project_id='2', + name=f'lb{LB_SEED}', + description=f'lb{LB_SEED}', + vip=vip, + topology=topology, + amphorae=amphorae) + for amp in lb.amphorae: + amp.load_balancer = lb + amp.load_balancer_id = lb.id + amp.status = constants.AMPHORA_ALLOCATED + if vip: + vip.load_balancer = lb + vip.load_balancer_id = lb.id + for add_vip in additional_vips: + add_vip_obj = data_models.AdditionalVip( + load_balancer_id=lb.id, + ip_address=add_vip.get('ip_address'), + subnet_id=add_vip.get('subnet_id'), + network_id=vip.network_id, + port_id=vip.port_id, + load_balancer=lb + ) + lb.additional_vips.append(add_vip_obj) + return lb + + +VIP_SEED = 0 + + +def generate_vip(load_balancer=None): + global VIP_SEED + VIP_SEED += 1 + vip = data_models.Vip(ip_address=f'10.0.0.{VIP_SEED}', + subnet_id=ut_constants.MOCK_VIP_SUBNET_ID, + port_id=f'vrrp-port-{VIP_SEED}', + load_balancer=load_balancer) + if load_balancer: + vip.load_balancer_id = load_balancer.id + return vip + + +AMP_SEED = 0 + + +def generate_amphora(load_balancer=None): + global AMP_SEED + AMP_SEED += 1 + amp = data_models.Amphora(id=f'amp{AMP_SEED}-id', + compute_id=f'amp{AMP_SEED}-compute-id', + status='ACTIVE', + lb_network_ip=f'99.99.99.{AMP_SEED}', + vrrp_ip=f'55.55.55.{AMP_SEED}', + vrrp_port_id=f'vrrp_port-{AMP_SEED}-id', + load_balancer=load_balancer) + if load_balancer: + amp.load_balancer_id = load_balancer.id + return amp diff --git a/octavia/tests/common/sample_certs.py b/octavia/tests/common/sample_certs.py new file mode 100644 index 0000000000..a22198effb --- /dev/null +++ b/octavia/tests/common/sample_certs.py @@ -0,0 +1,989 @@ +# Copyright 2016 IBM +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import base64 + +import pkg_resources + + +X509_CERT_CN = 'www.example.com' +X509_CERT_SHA1 = '9965834d856a7e24459522af0b91df69323947b3' + +X509_CERT = b"""-----BEGIN CERTIFICATE----- +MIIE8TCCAtmgAwIBAgICEAEwDQYJKoZIhvcNAQELBQAwIzEhMB8GA1UEAwwYY2Et +aW50QHNiYWx1a29mZi5pYm0uY29tMB4XDTE2MDkyNzA4MjkzNFoXDTI2MDkyNTA4 +MjkzNFowGjEYMBYGA1UEAwwPd3d3LmV4YW1wbGUuY29tMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEA34asqEe1MexBKGmBcrco08LYYFfJjpmW8m1yKJsm +S2nmHNhJy4Fl+3cPDyHYOiVxnsaMIv1Q8ZMRpjYH2LhvzLt2doyMiiJrqA3ScdhZ +VlGKaURvASSj9dmbRBMqdXZBvTZnMH4aSkL4DalU7NiW+jbMb5Gmf+bozE4ZAOES +6eXsP5+yEhJvzgmT/RvD/2w7EtCtrRnnAlMwHJACqozRQYXuY8iLw7YJZtk35wyc +EJRilXIcKUCuwQfHG6akd6da8PIzEZ5bbsYLtpslIoh53vG3htXTp7eGDp+MXzlr +yB0+QqjXuOMR1ml1sNwVMpHO4oUFuXFGvuIYnT2QhYerdwIDAQABo4IBNjCCATIw +CQYDVR0TBAIwADARBglghkgBhvhCAQEEBAMCBkAwMwYJYIZIAYb4QgENBCYWJE9w +ZW5TU0wgR2VuZXJhdGVkIFNlcnZlciBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQUVXOS +1PSqVuhOP1OKBMNfSHfhAsAwgZgGA1UdIwSBkDCBjYAUN1MP5SS5ZJyrWuPVSkEF +KK2SnXShcaRvMG0xCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApXYXNoaW5ndG9uMRAw +DgYDVQQHDAdTZWF0dGxlMQwwCgYDVQQKDANJQk0xKTAnBgNVBAMMIG1hc3Rlci1j +YS10ZXN0QHNiYWx1a29mZi5pYm0uY29tggIQADAOBgNVHQ8BAf8EBAMCBaAwEwYD +VR0lBAwwCgYIKwYBBQUHAwEwDQYJKoZIhvcNAQELBQADggIBAFcxJtGRESeflY6+ +WNp9q3LGYP+uzyUvjimdFQzKRi+Sq0Mi2YI7agpvtE9IZHov+JwzPaBXM4yC6Cap +lI88cE0KNowkjkON4g99F8m9WvaXRChtlJ53BizRkGKgw4Zg/0PAbpjLN7IU/Zrm +hOwyhBxmewMX3WAk76xvgFXTVN7c9FnCRvuN/6xO+CUb/a8fcNASdD+8aw+iS7iq +gvV1WGeGY8n8F19NggWSiRyb/z4Y1VoqaeIPfD9kjFrGApEGpiZphbzl9jSX8cPQ +YbDbbxBsUyfxtMK1aVx258ow92NRsDsoLGELpzF1AekzfQDWtHOpqkaPNunV2l4f +UGRi5J5stDi80Zf1t5JiFkHRXLeWAPa16AifF4WhmAaw0+zxINUqYH1/kt7LQP62 +PT5g3TK1S7TLvqfouw69AQUZAezBUfEkfy1816WGpuntWEIe3x4sCviqVHdjDtE6 +Pntzq5bvIIQ6/em2y5gvG68yOXYNTWmxOVaXPJ60eilbPyCD8UrkSMbqX+ZlAfFJ +dsAnySgPfz47dhd9jHulx4/rBZfPx330DNiO/wQZxQMTbjhlTJViojfQuNRaBT4E +Vi/XwUwVUqRURyQtuP8QJdPh9KD7uX6xHjqBALdwzCYAFaqelPue7TJ7R/I5+02A +DV8BnY7U3zPtHtPf6i8vdYwgAOJG +-----END CERTIFICATE-----""" + +X509_CERT_KEY = b"""-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEA34asqEe1MexBKGmBcrco08LYYFfJjpmW8m1yKJsmS2nmHNhJ +y4Fl+3cPDyHYOiVxnsaMIv1Q8ZMRpjYH2LhvzLt2doyMiiJrqA3ScdhZVlGKaURv +ASSj9dmbRBMqdXZBvTZnMH4aSkL4DalU7NiW+jbMb5Gmf+bozE4ZAOES6eXsP5+y +EhJvzgmT/RvD/2w7EtCtrRnnAlMwHJACqozRQYXuY8iLw7YJZtk35wycEJRilXIc +KUCuwQfHG6akd6da8PIzEZ5bbsYLtpslIoh53vG3htXTp7eGDp+MXzlryB0+QqjX +uOMR1ml1sNwVMpHO4oUFuXFGvuIYnT2QhYerdwIDAQABAoIBACVPOmSAS5tAnwOa +0LOQJO1ruWgjXw5BTrO6VvK2Kuctju5Dn9WrDJWzorzY3lmeRF/HLj7s32TjMm/2 +1spyxp56r+RLw22PHz8Wx4ifHxJMW/kEJi8fqYpwvvzW4iBnE8P8X671bXf1w6es +GvPJlzG+kdMRkaQJq9PmOUAvUVPe7+xLuouU+7q4DAiq4oXMoidVbhm0FC5k4LB2 +q+oMzcdMiQ6rQfQB1uh2s659zwW7wAtRMgx4LeY+lIpyf0Bh83Ibi4JybH+DQk8g +AkrEXDE9aslNx9ZXVdfdQiCRDklbg41HejZPRhsRntH0v4cnjOGCYrVDfTKEatka +ltWYyXECgYEA+LFxGZH9vn2nh2PtAs5wnsjc/79ZOKJwq6j4stuRr8MBazRtMhP1 +T1g6pEkBjEoUUomcpH/eG49PrB2G9oYIfhJqkRo07m5Fyom3r35/V9Q/biqcGLEM +EodujvziHbUQDFxO7jLigRjsVoG4Uo0TXT6V8KzKxHGgpdCvYKNP3A8CgYEA5hfv +829n55dkNUFU33VKhlyLD1+mAUdPkjRHYiOianv1m8C5z8rzjxs+Fa8Xl+Ujsr0m +JpRvOiNEwm/b6bF4NLKOhaBPK2IAYzGPwy2yhXELcxHuxNArJ4kVp+YdwvvRSWCa +767r/CBS7gCCM5bXlU3saMS03goZd+l4fo778hkCgYBxkVZ8vtaJbwhaI5/QcEWt +vTxu7groegXJ3lf0FaDqCrtTIZXcEJEtsrTU4SH71riBGKaX2GytWTyg9Lr1STAH +opFXwgf5+hGU9F8VnUa57QsqW/r8q50/uOkcEw+PUWgKvPyuej5FhgQnXQW3bQUy +x6nhRocyPlGGZ04va2TEsQKBgDlIpFh61+d0bWJEzZiEXvVsfMJrEa0nz8uacFsi +fAD+s3r/VENDR7fNFHvZh4otZeHN7X2VXsuelDPEHX/kywRzn7/s1Uj7sRUA9cWl +ztgh+LPBNyyQlu3U1ythwu8UOlqGTox1hBLVCVBvl/q4BxwItl6u+kh9QzHzUihP ++LGhAoGAGRjYSOy4aiFK1Ds/5doEcBj3eGsfNx0va85UYDMnoMxkw+qHlFOCzrG1 +nUBaaqVibLaROn4V1QnlSOA2vjc2jMMDKMfnjawtqBC018tQDVcE75sun7UzyxtS +OWaQy6KhqrKpPy3tS1wt1vAYPWZw/EIo4dDXYBo55REI5mSBZrM= +-----END RSA PRIVATE KEY-----""" + +X509_CERT_KEY_ENCRYPTED = b"""-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: AES-256-CBC,086BA545587FF5F6F4DD9AACC122603A + +mWGjuSlBRAU3QruM/CI7m2LCN7wTHp00V9XbSDHXQ9D2MqgRJTf09iRlSsH7GxKF +jbc0TWvrYkojb5BZBg3PePfRUxhiwGu7hYk+GLbRQsA0iL53wA2a7aPnwzBFuwky +u/d0bK39n8QoIj+vUgVe/7C4Xj6eRC9SGlGBO5syCQqx/KCmovv+cqKG+hti9KFm +e7KAsdd/7noEQNwo2Don+0gZwDc7lKzR29NlyqnASkbllKMzGaMXVfdDPGjC62AF +3rT0HllONHo7McmRfbCWs7nMEvKFgxKvHoP2/0ph5DD+DOKFCnLSfWdK22EgG9TT +UUcNiNCY/A88M2GnHdYjBMVokL/sQ4LAsf9Tz7aO1D6c2p50t9gBhDpOwKwgYGJu +sp2FLO3/HzHS30s8kfOg2ZDzRm5jOlFsK8XY175xUGrsCkSQmQPY11b7v8baBHp7 +KOA6xeJHD7+K1oKvxAqlU7Lwfmm0lbS9/JnIDiDel7oTHESk5mqSUZkkyWze+iNb +S/3J/8mtnHl72UpULoWkvSfE5xTu5W7uhXqCOayiOeiUpalKG+gwUZI1lgvDlXn/ +2LpFEFY/y21NWGIm9c1lxZdOzJpnfzvXw+27lGPjNhtjhro0wIFjQ7YCTyq7Ky36 +qPdJfU+206vkX7tzETyGPh0oO/1eP5b1QjJrtP7tMNS45yn4yzjICNhC5NXAXgbU +F5bUkWqhQDJ6UDa6hCrJ6bf63AdnqTtJ4layKyl6dz06qrVNpCyGTNNhJykdlSq/ +PkVes4X4yh6TA5pJowV2bVnM8nqN7H8TXbEetF9MP3ImYnVzDTnBWugWT1cVA45h +GyV/j4VHBqwPojGhRwFDM9reQ38tTrmss4l0hxC6B5ivIJtUvCqNa+E/cKecopmb +5fAdiROnS548tXuPzsz1EtcVor7k1i//SJJrSgqpaQb8E36uYw6r8yXQ6zhOyoUF +Pz4OVN9WR21G5R4sAjHV9U2l6ulgzwpE7O7Z5fSuTzZBttFX4U0OZZDfrDIF6jNB +jrd2RBacsjsm0PRGw2qrMZlPmhhHl0prfIPOrkRffre3wDk7POOoa2U/+CKcn86Y +780WrIGL6jMp31D8HDmLZbvsWtzKjTqMIsqo3gsFwCgtu9PKZ/z/sQGND6f9b8u9 +gpt/osBxSi5b7lHE34InizhzakEMtQ/bshO4WAayGY3Kaf0dG89mwQEOOzUw54Xk +x9F+hzYGb42IaTHO+h+mMznB4sh0iLyekt7eybwYGX/1/Oz8WQ/EfDHYu16XG681 +Zb5ev/6rojAWe6yib3MEWVjVcsoNUUA+51+hEO4UKEliNX0FvOe3q0aflqPVzi/0 +VVB3erVNQ/5uunGdZVzjgef0EbhFlHANjIcSD8N80NEaG2JmhVBd6kc7Ev1uoCK5 +r3kHNhyy/fipKamS84mhjTq3rgSeUCndf/TI+HSvJwQaA3sm1Bu5UuErjf9Qpq5P +osar1zVgWl2jEUejqwnt4626J8g0MG92amHHsHG1htzjAzaTqtMlORdUmWgppYVs +dlGLDA9eMkmOBo1WdQYZDDnCcNVdT6MoeKmDsqmM6+ma4vpHuelYmDJ5l0a3hGbF +-----END RSA PRIVATE KEY-----""" + +X509_CERT_KEY_PASSPHRASE = """asdf""" + +X509_CERT_CN_2 = 'www2.example.com' + +X509_CERT_2 = b"""-----BEGIN CERTIFICATE----- +MIIEbjCCAlagAwIBAgICEAIwDQYJKoZIhvcNAQELBQAwIzEhMB8GA1UEAwwYY2Et +aW50QHNiYWx1a29mZi5pYm0uY29tMB4XDTE2MDkyOTIzNDk0MFoXDTI2MDkyNzIz +NDk0MFowGzEZMBcGA1UEAwwQd3d3Mi5leGFtcGxlLmNvbTCBnzANBgkqhkiG9w0B +AQEFAAOBjQAwgYkCgYEAp8q9ybIlTP+Aka1jaLE22gE784t3rQ0KC83ODSY0283R +QX6BfHrAVTj1ctyvz0D6hxXiYXwi9mXXHvBzzxScPxImQ7jbvYyP0CtagQ4QGj7w ++XVWY94bY7X5cF5NlGHl0EIHBO2G0wc455Mgzlakkfoa7k9YJM37hfwlBV6IX9UC +AwEAAaOCATYwggEyMAkGA1UdEwQCMAAwEQYJYIZIAYb4QgEBBAQDAgZAMDMGCWCG +SAGG+EIBDQQmFiRPcGVuU1NMIEdlbmVyYXRlZCBTZXJ2ZXIgQ2VydGlmaWNhdGUw +HQYDVR0OBBYEFLc+lXNhKO+47kWgrMirpmSU2FMWMIGYBgNVHSMEgZAwgY2AFDdT +D+UkuWScq1rj1UpBBSitkp10oXGkbzBtMQswCQYDVQQGEwJVUzETMBEGA1UECAwK +V2FzaGluZ3RvbjEQMA4GA1UEBwwHU2VhdHRsZTEMMAoGA1UECgwDSUJNMSkwJwYD +VQQDDCBtYXN0ZXItY2EtdGVzdEBzYmFsdWtvZmYuaWJtLmNvbYICEAAwDgYDVR0P +AQH/BAQDAgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA0GCSqGSIb3DQEBCwUAA4IC +AQCpLBSdj9eeXWZNFXZzhtrI34f+oPcyZeERo04gBcNik6nSL+iwgv7TqqEnH+X2 +lSCXAjxQEsXHSf6Ox/YWlXhr20jCoTpZBQIB0kbu3wBfowxNFIwKKyIbpt4pbJzC +Hnx2EsOVkxywAzE9aos7JyXk4ya2U30v/m54shC0Jxxpp8KqNAUQ535NCM+563eN +8GXAV8uS6UwTnDwepU79xixDmk1XIsMlJP0e6ROsNFBSdZ1QwCjOwbA7clAdlpch +f7eF0mJTXKkrFUBVqZh2iGFQ4lasoXeTM6yR3be/tO12NdM1tGT9HT88WeRpRin5 +73pTSETUMy9+80T57DxpGNOVkBLI1AhRWkqQ7kgyNmm9jajZVyZTuSPhXpQAunxs +XxS9gPqe7LuBoRXsxLEGrXJ4h5+D3OBr4KGMHcFbI3o71ZzgDMWQ8Hyik7j6BE3J +zXmoSZjbvJBiz3asU74/a3dH2XkNOdzErN8RkMRzL8Z1TdgL+SRndXMpSM8cI44v +jpyx6T1AdxgMrstDuPX6U0EMl2WoEvkwtePUc3hBYCkm376yVbtbJcAqndFW2lAY +HULxFHp3QLrnbQEvPIcD0EWppJ1GMqb/Gv8jORzOks56UtOIfavrzGrcvRSKoC4Q +lDApYKCiRvvBSVfgpoiVungh2NWSmNW5bn2uOkPt+vTjcA== +-----END CERTIFICATE-----""" + +X509_CERT_KEY_2 = b"""-----BEGIN RSA PRIVATE KEY----- +MIICXAIBAAKBgQCnyr3JsiVM/4CRrWNosTbaATvzi3etDQoLzc4NJjTbzdFBfoF8 +esBVOPVy3K/PQPqHFeJhfCL2Zdce8HPPFJw/EiZDuNu9jI/QK1qBDhAaPvD5dVZj +3htjtflwXk2UYeXQQgcE7YbTBzjnkyDOVqSR+hruT1gkzfuF/CUFXohf1QIDAQAB +AoGBAJevcq8ZuxrGiAYqBwon2nxuTTI4TLJGbWSKYtIEThi/EYLxyEEt/x2L5mg2 +FUF5boIcSJD0Ve8F1dmEak00RqJO96V7riNe3a0SGhYjak6okEXB1W75LfFQ7Jik +I2Wkdg+M2gdcHNKXmVWrO83aR+zWFXv0yHINANQLaUhunW4BAkEA1TKfKbdSkTkn +T98j6rGM73nd56yCIK45CZmHg33ICyKjH/fUiNpHmZtBxCgrYTeFOJtLEW4QENy8 +vusxB1zbQQJBAMl6eOliCfr6y85pCIKGFHL/Mwzij41xRPv50wWkFpdNOPW8lqlK +SGZHdMn6fWi8e02tkcIQqeRrg2cU6WsrA5UCQCMBqeLGqDcSRGM4B4S83KBhyU8G +I2PMV68812R+3b7+U/ymy+4gsUsGlDjqQ5016ZkO3reg8+Bg7lkG80j7NUECQHJr +DPNs68IOX2OPHngRcNeFuhYdK+zlYInAbGiNsQ6nmitjuCPXvZnoBpkVmda7A0Mv +yNDu6ayAqhUGOTDVMqkCQG9Vk7xjpe8iLkI4h7PaxaqiSwY+pyY3QoErlumALffM +t3c9Zw9YGbij+605loxv5jREFeSQMYgp2GK7rO7DTbI= +-----END RSA PRIVATE KEY-----""" + +X509_CERT_KEY_ENCRYPTED_2 = b"""-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: AES-256-CBC,3CAEB474D1526248CA20B5E4F84A6BB7 + +t6D+JPlgkUbxQzP4dSifigWPrY64diq85Kl3R+XSjh4az3hsvi6dZCEna7f+G+UH +Bgoak3/EPVcTe5g09ewxfnkBvjej78ktc4fUDWqcPwl6xwSbqVkz+ejEe8MAOR4d +VN5bG559HXD1AYbhr3XyONpUrNlyQrdtaxNjPtt2U77aPfEo96/sEaYA3KXKq6pd +NEXU0K/4MSRP2sErybUubyJBz6XJLZ3LwILXRONV41GvFmnDGJ20I1X+IzlV/YDo +HpFKspuTrDzXttlMFMcQVdCWX450Zs988FWa4vwN0Ma1sgl8VwjcbWDAgx5tM1Ml ++t0PT1yL2kIGIPbnsVoPphIet+qjZZmmOFCRwfvXiYSTf9FZ8eawnqQrmoSN5iNt +T63Aidf1dV0nHk+IZxkdgzm3C7ffeIPG4yMx6px8NnJzp7lCMx76FudeeqUx0ezC +Del0Thfh8/N7RX7mUP7HdybXIrR9Gp+p9WUelag6DpMgCcGWNvTtk8NUK+3TXAax +Ud+eZLP6k5LXiqhwSuWb0/r6I7OSgseOBsSvAw8PVfDsg6LwyhLqLmOLgxVas1Ay +EXJVqD0QviMl9aXBK/kpsg6rdhJCBJ6WQlytS73Iyx0plD38SwAS84d6B4ASLHye +wXyd6UrKQ3c6hQV8c9jzHvllaEafF3WUjacwuwmNOlBuWh7887JsFeYqbEIlY82u +pVM7cDTfJhEggpKK+q3muntMeLTVaIKcqvYoITbVoRJG8F4Zc29cibZjz19zshBM +OEUKHsL+I+kFr0SBLY8UnAOjIt9AjJLgo3uVC13fj6omO4EeXQjY82GKo70RRszs +-----END RSA PRIVATE KEY-----""" + +X509_CERT_KEY_PASSPHRASE_2 = """asdf""" + +# Wildcard cert for testing +X509_CERT_CN_3 = '*.www3.example.com' + +X509_CERT_3 = b"""-----BEGIN CERTIFICATE----- +MIIFJTCCAw2gAwIBAgICEAUwDQYJKoZIhvcNAQELBQAwIzEhMB8GA1UEAwwYY2Et +aW50QHNiYWx1a29mZi5pYm0uY29tMB4XDTE2MDkzMDE3MDkyNloXDTI2MDkyODE3 +MDkyNlowHTEbMBkGA1UEAwwSKi53d3czLmV4YW1wbGUuY29tMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEA6v/973etopk2Vz95DUcx8X6hLfJ5m8s+scn7 +nMZ37fSqAGPF0veGpqyqxorwh+GYLjlrvZkhVi7IZJAsLU2ztG4+MEoYzbyhgJer +FmepBC7xPIJEjh8FKhtpvxVOMFcXJ1CZT89Ww0rVPnaoE09DS0DRo5s+lW0dD6Ta +QW0S/6RCZ5RpD1q5MP86JvTspkWhhKY29eEMFZQYDwc9HEPE+C2scapGM6reP+Ix +c/Q8806BUXNkLoXvGo+LqmeONquCUGCXL9HLP70Osp2jfqgTT3RfOFx3k5OaASeZ +MhHRqntdReYXN16PhMU/eDvKr42QxCwNAVLDSrkJGG8eChOgVwIDAQABo4IBZzCC +AWMwCQYDVR0TBAIwADARBglghkgBhvhCAQEEBAMCBkAwMwYJYIZIAYb4QgENBCYW +JE9wZW5TU0wgR2VuZXJhdGVkIFNlcnZlciBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQU +eOw2E5rYzvCuhuAR11GtoyT/qgswgZgGA1UdIwSBkDCBjYAUN1MP5SS5ZJyrWuPV +SkEFKK2SnXShcaRvMG0xCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApXYXNoaW5ndG9u +MRAwDgYDVQQHDAdTZWF0dGxlMQwwCgYDVQQKDANJQk0xKTAnBgNVBAMMIG1hc3Rl +ci1jYS10ZXN0QHNiYWx1a29mZi5pYm0uY29tggIQADAOBgNVHQ8BAf8EBAMCBaAw +EwYDVR0lBAwwCgYIKwYBBQUHAwEwLwYDVR0RBCgwJoIQd3d3My5leGFtcGxlLmNv +bYISKi53d3czLmV4YW1wbGUuY29tMA0GCSqGSIb3DQEBCwUAA4ICAQBvDBAbwipb +h1bgfOIo8Wv17QdyFDVvzj23eO+BbYbUNF+JD7HAcq/Z53RSj9Kv76NS48OLqWUk +RM81TPiHimolFvF6AZLXeYKVpl48nCQRQixHSkmW0I8BlpQ5/Cl1WUqFRcDAl3i8 +lMLpAUhMb8dt4d2hviktK+L5CiBLdmKCnlz0LOK/4GuF4Z586jrrWyjw/GBYvmXX +0ujjli4J6WMJXVZ1IIwIM438N0eG6wKRNBbJQl5tJjKVX56hSkVdgQPz0gjhNGlJ +VvImaAtLORgBUqXXs2PhcZ5HHeSd/dF2pJeOYC6P4qjb2BqhDHwDKjsSDD2sPoMF +fvI6pQ0zPCpx7waCxpk+UxshJk3CG1XoWdlWZmDBLMl2KjDH0nWM7nI6oWPXK8K1 +R+iBL4IUp8p/ZvGGcGeP2dUpm6AKcz45kYEWPm5OtB10eUaCQPkeUvWRmY35f0e5 +/7LlFF1VDlRlxJPkroxrDDm5IIWS1VPTnelXzvBKenqTFFbQUzS1mmEEY/pPEKvS +Z8NAha3g0/jex5sT6KwB0JI8fvyCzfCS8U9/n4N87IrFcKThw+KMWkR3qjZD0iz1 +LwW88v99ZsWWIkE6O22+MmJGs4kxPXBFhlDUCC9zPBn2UBK8dXSYL0+F3O7cjWQ7 +UUddoYPP4r24JRrqzBEldSDzWeNSORpUkg== +-----END CERTIFICATE-----""" + +X509_CERT_KEY_3 = b"""-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEA6v/973etopk2Vz95DUcx8X6hLfJ5m8s+scn7nMZ37fSqAGPF +0veGpqyqxorwh+GYLjlrvZkhVi7IZJAsLU2ztG4+MEoYzbyhgJerFmepBC7xPIJE +jh8FKhtpvxVOMFcXJ1CZT89Ww0rVPnaoE09DS0DRo5s+lW0dD6TaQW0S/6RCZ5Rp +D1q5MP86JvTspkWhhKY29eEMFZQYDwc9HEPE+C2scapGM6reP+Ixc/Q8806BUXNk +LoXvGo+LqmeONquCUGCXL9HLP70Osp2jfqgTT3RfOFx3k5OaASeZMhHRqntdReYX +N16PhMU/eDvKr42QxCwNAVLDSrkJGG8eChOgVwIDAQABAoIBAQCbp336bKn9BkCh +H7C9o8en7rkx5ua307KuLlxTpn3vhlxIL83rg/qTUdE3+vWA+2NCUtRTXCdhn1Eq +kvg/9bSvMUpNz/aH54aN12gCSh0AYVt2Oc7Q2Ckij8/GOoV0rWrvpoo1+967Mkj2 +u79uMtUe9ksldAHLFd/m6cmLBoVL/6rxByO9JsQjb+qFcNcLmNwTsGWttAT1a/Sa +Cy6JESzJzL6jMB1hNr/UI4nh8CkD2Ox+G6efs6FyMtayOP/AVwr8jSywVWZ+9tiX +kidCNS5xzazt1aMeJcu1h3yzYt2PvNHVE17T5imQGDUKuhmH/PZdySldnAU2srm5 +b6tGNAJpAoGBAPcjPNJHnUSYh5GooeOPCG8QtnhwEDbuwcOzoeFvJBNlSs2XF25O +cXPjUx5HVpJqeBTiOX2zvWr6jK6AggC8K+eF7ElecEeCEFf4feO6iv9n97bzntmi +lPlfKBkQOYfUA/Syva6CiLuz+dZS8zYIDiB6C5/hhIFi+O5fG+hny8ILAoGBAPNt +VBxjz8bl/oaj6wm5oVk7HMBABBbAbPcH31YelF/cEFe+sTMk07Dpqebq3BQcEQAZ +YgERoZeqA7Ix47N2LUfvUWba8Kg83JvwSYV2WRLxbGBMsubUHBX3J7+2d7mMbaUb +NycvS3K+M5HYDOdGuXwObJod54pl0D+8Kk6QHXZlAoGAOPfLdmGBtCVA4uevYag/ +9nIwzbRvWGpTCgynXTLkawAnbRrOEOROLLuTFmC1aQzX32CChiJfoIBe237N+ONn +b3gkjokRcrpdkBm80zjV/6f0pxyjWmGq24z+zkA6MsBBpS9qoAaBBFupVKlMXQEg +WIYpldJDXBv3a+NKqJj8lB8CgYA0rjlgt30U11tg+gJ4pVK0w+Ux+eion9Y1E+AS +fCWyJSboRl2E6vhhNgBN+A/OzlAjjFq4Tn+BGgsYulFD2lRsV+9u6fzg++LmYHcY +ygb24YaJxK+G4up9GnLgu3Vnk2t7Ksuh0EtstprkejQ4rQahQWHhbI1aVzRdRrSF +Mg0ePQKBgFn2yh/gKf0InDtR6IlIG9HVI+lMKxyU5iRH/9MQ7GS+sSjiAXdOtGJJ +1QT9hTtPzR9SBXqu6yWJiIvYAfnrmea6kMb9XH5L/XIciZA86DapUl+TWicpI6jH +KX8jFiCL+HcZX+pqAaUuifgwnqd88EX7MPoU6Yjq02To9ZAPA+SA +-----END RSA PRIVATE KEY-----""" + +X509_CERT_KEY_ENCRYPTED_3 = b"""-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: AES-256-CBC,088E3835B2238C332FC7AED391C9CF8D + +qKPZwlDrkX/0J8Un29cmvUuVi4FzSWRRQe+m63/ezh1vzv8IOtmw87lDffPhbJ/C +mLAj09l9Z5I2yLWb8tIJoYOzFjxU/lKI/M85EPEpuAggzpqCf4uwor7MUx+++fKv +aJZuwgbmfb4//A9iNfSriWfrrS7dzVm2RrlNtj0ityiigzTSfY8oiL7fGTfocdx3 +F0mXGEumySEMge3RF/I6/bAa8c7T4JRUc4yN1kzMftxH1H36QMjV+2thyo8UrB4Y +cBIIUncFbTvnhvjqszQrCrh08Tes+AtxUXevCLRiaLqOlnF1LIyic/PRob+yqXKw +jqUyjaj48MvJwxP1l4NtzzPz1QM4aaXZAq8O79DVHxaE9d6Qe9fw8cASFGrTzU2l +8jbHo+loXcG8LWp6Cqdvv0VMuK44G5TIOuFmuZ1YHgawCtihocoqUOyXJ3JryMUN +0RiaF3ybkplMEVtZBtTYkhx6vYnq7KX3CghpigcOajvzZ5jj6JMXkcenFSU87VaW +tfzbZRk5LYuJOLu9MN3joftvD4m/mnFVXM4SF5ypTUW5PRSZGEa0im4LPWq9LH3s +lrgh44jVxqfyAxtyVC8Mf3X7tOmm2dlHWLB8kBcqHfcJjRaZQeD25V2DbmCAp7NN +UsUKT0ftRfSKGTmsSfPv62mFo4RrdI+/Xws1iOY8V1LekGvKc6zpSwYfQnrwIUi2 +7PkslX0UyXaN7j020anNE9LV2NnccAWX/lkGCoUn8EPPrAum5wopLxm02caNKUlK +RM+Te+LJeexLadkFStDentCmH3m9GoehDvWBLHGbdb/5sXqvxuemBxkyhjqXvOau ++cyDRmfUtLf8ik9PvzP/dQqBn93fMkWRlJ6zRjn5q4lG+qKbw43UDWuYMmSBQd5Z +ZUuTaT7bymQyPLUFmjkQlQm+WOFgCg26WuaXn8sXvQCtK3Ha9v0C21gJWQ7PnhKh +hXFwuD0Jfu7G2Pie5ToBhsxC5PNYyVYZQOCJ3ZcvH1Hv8RCvIDPHMFdZohJVGwdA +8X90Z433Nv1ke2jAjMX9+Ph4txxRYwcV3IpfdyAFk6cjukdkBrcPPFARZiOSeNwO +XskiNT6E0KUAc1KNyhsBRTSxmNkkzfqe4hzEkLukWBsyJ11/jmgKJqApyKZfePGR +/kDGbJVbSlMvftmBNCkT9owMDjKmwHvo5iiV+rkhWEq3jaISu3+qtTj11S4+bRS8 +vlh3G+BjSvpA2SBbXKWM0UrSnxtLow41kIZTZJ+5QnuQ9LYER1CAuMxlqManBWq9 +JwHGmLHqcLVPxDXo2fTsDHAZlw6TD3pC53WDYbAZC7SsePyNvbPk9P8YG47F2IZP +ioxamytTKal/abrfrU8izw1HM87LNVQ4yAGCIlbj+0utN+aZfFDXgm+/FafraANr +Ti580sCEkDrRrzAp0lG3AcSGTM83Jxz5Sz1o6xdWDBdshfcPIJgv9g6NlzPWzy3/ +39Xhe11dMDqKOdiY+KtdDCT4R3rp49Zctc8KopEX9yjzmPm8aekgyzIG8ak4ka6I +V1OqZUUKNVGYtDAMDqqDEKNp3Y1mmeD8637oWVTQvbVJpatVIVoKb+MtKrGkVf0d +-----END RSA PRIVATE KEY-----""" + +X509_CERT_KEY_PASSPHRASE_3 = """asdf""" + +# The following intermediates were used to sign all of the above +# certificates and keys. Listing the same information various +# ways so that we can test the different ways users may load +# intermediate certificate chains into barbican. + +X509_IMDS_LIST = [ + b"""-----BEGIN CERTIFICATE----- +MIIFcjCCA1qgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwbTELMAkGA1UEBhMCVVMx +EzARBgNVBAgMCldhc2hpbmd0b24xEDAOBgNVBAcMB1NlYXR0bGUxDDAKBgNVBAoM +A0lCTTEpMCcGA1UEAwwgbWFzdGVyLWNhLXRlc3RAc2JhbHVrb2ZmLmlibS5jb20w +HhcNMTYwOTI3MDgxODMzWhcNMjYwOTI1MDgxODMzWjAjMSEwHwYDVQQDDBhjYS1p +bnRAc2JhbHVrb2ZmLmlibS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQC8KiZi1t9vX8IBtiOgOXHwyTo4ljYOqz92BqyIj4r/YJNHxeJ7JHNiM29e +u6K/LdJleOkDXV+m0LDchn6pMf1/VJ5/Zpr7eWTqyKSaP5zTVbIOI1It60MQpsSi +GbcfVPJ7TrwzNKtyBXREHwC+mEvIuEwDRHd8DljJG5J2CpS3Re/CgxR8DrKXnT6Z +XHikog3yYJ7vULtxz8ktgOjM7uFh27YmHU4o1WyeAmMwpesVkqY7E7frbIYYbQo5 +B1D1eWqM3KldqOQqvq34kPkf0vdfXxPurysNJrEaOXzDRdI6GiXaCKtnSEF0urCR +denIDWMeqq2w5H1ekNgpK3XeFFxQYkhvXN85HiRmPO9wrd4qdEs1nvTIVVEDpB5F +Fe4aFQmsAe57Ll1DTKZcja30VD1uJ5PbeoysHhN270+IbFeXK3x/icS5F1QdfE/p +YIA0L3JRSY88IXw4giHnlLnYb55NwLac3EXmr2Qks/T87/gbk5gk9G+0XK3FSRxF ++MDdmRiLAKLSb4Ej3wX1dXnSgelx5cBZ0n+NBY/865oauui27/OIaL7ZaDCDZU/t +jIJDy/uQkuAjH4UVF4m5PqRaykqrjbyRJeADbL2E7CxamOsgyAfzhgIt04hpKkvZ +oCCTRREeNp9rRITQiGMsfCUuxackDajsW/pnFD7m1ZKej8fcdQIDAQABo2YwZDAd +BgNVHQ4EFgQUN1MP5SS5ZJyrWuPVSkEFKK2SnXQwHwYDVR0jBBgwFoAUhmmo2HE3 +7bnky9h7IQ5phCFGU18wEgYDVR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMC +AYYwDQYJKoZIhvcNAQELBQADggIBABGdyLUs9Pm9SXpSQLSY4BWnYdZvUoS5rhJ7 +coVrkTm3bBmI6haSWyoSy/l0Qno+7rXz1YruKYqKaYTAakuDL8C/J2VuL/p29nyC +lH0rDKNuz+6jU8xLVmcZwBtsKK1QxNgAdQ3DWygtmXzZ/tigfIVVIyaTkOvOjovu +IxTWAbmJu/xbkq4zZEaYJ0xobK8LGq3ZDauCsPNykqH/jwLwYWHQbKPmppTkNff7 +unXrxQ+eSH/a1aCCldZI/NZywjZpNUdylEcnZhWshiWChD6j+CgrirdO0JeH9sGR +0La71VqujFWvVJUYYSbb7l4KFBLFw8Od5Z5rpYXm/qTHd6OvyS3qajse8ardqN0g +2Hunu0AtJ99JBHxzTP6blAcuTTrwS2XjB83/7k5YfN0jGbqQOYCJMTZ3pk3JkrZi +pxhjY1ZX1N8Opb7IwgjIXwzNy/joL7smUNBQlTPDN1IfM5b83NGNSDKaS1pWiqaL +XO6erkwabZxCVfGgvIk9hE4x6+Cu+jdOLTpAwq1mcQroAp1+CInHrZeHdnhz0zR8 +4CUmddOos2WYTF+OvRfel32rBCaKlH6Ssij0JGxSYT24WXygsCdpDXfimg3O4Fk2 +sJlV015O7iIu22bowsDcF9RfvkdHNULrClWI12sRspXF9VmRjbDyG4eASBiulJQV +bk9D26vP +-----END CERTIFICATE-----""", + b"""-----BEGIN CERTIFICATE----- +MIIFwDCCA6igAwIBAgIJAJLWg/Z3x5xpMA0GCSqGSIb3DQEBCwUAMG0xCzAJBgNV +BAYTAlVTMRMwEQYDVQQIDApXYXNoaW5ndG9uMRAwDgYDVQQHDAdTZWF0dGxlMQww +CgYDVQQKDANJQk0xKTAnBgNVBAMMIG1hc3Rlci1jYS10ZXN0QHNiYWx1a29mZi5p +Ym0uY29tMB4XDTE2MDkyNzA4MDU1M1oXDTI2MDkyNTA4MDU1M1owbTELMAkGA1UE +BhMCVVMxEzARBgNVBAgMCldhc2hpbmd0b24xEDAOBgNVBAcMB1NlYXR0bGUxDDAK +BgNVBAoMA0lCTTEpMCcGA1UEAwwgbWFzdGVyLWNhLXRlc3RAc2JhbHVrb2ZmLmli +bS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDdYtZguUzRpohJ +8GI2/KCXZxd6FzmZtqrKz1JhZxSV56WhYnYljzQgRsPX8lxUWC/nSm13cjitfKG/ +TvDNTs6bb9t7VkYM0k0ewvArcNRSSR/YHO0r7fWv7XkwTvt3yFupWkeNBaqDChaZ +vlblcQxNUgXI3r/dOJDhOlfzhF0LML5FIIHgkgQCHAUQ62OfLkmXqNPYAKa9K/TE +4UGtG9LYT0dy3AwKUpvXfnKJSEgrRd8Nul7Rp6BgYWoJg6pZD4GLFiqT2bxphJJa +AYulgtF1jDHeZgyunm7WrwZvxPC8AIcFcksRMxB5XOEo8PBXaGHxbIjl+PCw6WpF +5g7ZO95keYonpQ8nK9Vcn7BgWcQUY5SuZCaMTk79Hs/kD1upc22IHg//t1qy+0i2 +SNTxj7n7mkynBHoKSrlVviUkyZHQYniuAGciYYKTfRy0F1LaM3QOUF3XA9j+2g1j +CWolMPWpzWFTOkBwoCmCs0MX7FaYvsAeLx4rDVLRQWzvKZKGTubDBWS8wBsAq0hD +v4b3r4k6cIz9a4PYNFARsnShkKHwln9lM5HjPHUNSZ6oaaIdi4wEf0xwipMiEi+x +h3Ukztq6pBGlNbdxdlBP3PVap0AI81alswLWqCL5yBHzv0NQp+x7/EODJDcvE6sK +PRmBVTzO9Y20KMlHrcdlPiNbBDhJ+QIDAQABo2MwYTAdBgNVHQ4EFgQUhmmo2HE3 +7bnky9h7IQ5phCFGU18wHwYDVR0jBBgwFoAUhmmo2HE37bnky9h7IQ5phCFGU18w +DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQAD +ggIBAAbqGW0/XCmvptoh/6oHH8OvMUbn4m8Y9DqEnlWkfadhYkYaOh5SAZ51mD7O +uwi+he8gV0evMgmW9liDw/i/2IWfHLT56yHSQqj8kRYRu8ZEdfC8IQg7Pj8mCJru +JKtWWfCHWnoqxPmCkYfWrb2wb5K3Me33zMdvtdMg3Hzxg9UoRFvqyRJtYj7coK8A +3uWiX6vjDZTG+x7SF03zB40n6pR2mq+i7gqXeZBxV6VrIuYMQFVUk2VICe9hlsLs +MFzq5Y3/P9evKMAI8JoxLLVlmI29pLY6A6VCiAFfyjiflXGtFRGNfHyo6FTMPzoL +fGb0R/jAli47CVhvI7JyNqGMb6Oa4jqoVw5+RMmrgkaI5RhOplcTnqnxuEBqvxpk +utnLNFTZ4LLRjYyaGYiYybZF9NG/OkCbTzT4fwLxqHqa4HCzijnbdAZbLtGC2aL/ +SXMqHf1EHZmii9NZ/ndseom0l2+eVMaR8auZsSrpSbgzBB+UssVcBTD79Qb8LBQy +C6WXGJPCEOfOYsxdZMDbD7q9CqgT5P4kI8VfryB5iqaLfDtUwjT8GPoTybFiWHMk +0DiS1quLYFZK2QhyFY2D1VLweyTQl8Hb/yYbxmd9QZDpDGCaIRkDt5H+rX17+MG2 +n3yPHeLbGBLg9jphH7MMmsn57Z9fYjJADOOLFKG+W6txAQV3 +-----END CERTIFICATE-----"""] + +X509_IMDS = b'\n'.join(X509_IMDS_LIST) + +PKCS7_PEM = b"""This line of spam should be ignored, as should the next line. + +-----BEGIN PKCS7----- +MIILZwYJKoZIhvcNAQcCoIILWDCCC1QCAQExADALBgkqhkiG9w0BBwGgggs6MIIF +cjCCA1qgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwbTELMAkGA1UEBhMCVVMxEzAR +BgNVBAgMCldhc2hpbmd0b24xEDAOBgNVBAcMB1NlYXR0bGUxDDAKBgNVBAoMA0lC +TTEpMCcGA1UEAwwgbWFzdGVyLWNhLXRlc3RAc2JhbHVrb2ZmLmlibS5jb20wHhcN +MTYwOTI3MDgxODMzWhcNMjYwOTI1MDgxODMzWjAjMSEwHwYDVQQDDBhjYS1pbnRA +c2JhbHVrb2ZmLmlibS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQC8KiZi1t9vX8IBtiOgOXHwyTo4ljYOqz92BqyIj4r/YJNHxeJ7JHNiM29eu6K/ +LdJleOkDXV+m0LDchn6pMf1/VJ5/Zpr7eWTqyKSaP5zTVbIOI1It60MQpsSiGbcf +VPJ7TrwzNKtyBXREHwC+mEvIuEwDRHd8DljJG5J2CpS3Re/CgxR8DrKXnT6ZXHik +og3yYJ7vULtxz8ktgOjM7uFh27YmHU4o1WyeAmMwpesVkqY7E7frbIYYbQo5B1D1 +eWqM3KldqOQqvq34kPkf0vdfXxPurysNJrEaOXzDRdI6GiXaCKtnSEF0urCRdenI +DWMeqq2w5H1ekNgpK3XeFFxQYkhvXN85HiRmPO9wrd4qdEs1nvTIVVEDpB5FFe4a +FQmsAe57Ll1DTKZcja30VD1uJ5PbeoysHhN270+IbFeXK3x/icS5F1QdfE/pYIA0 +L3JRSY88IXw4giHnlLnYb55NwLac3EXmr2Qks/T87/gbk5gk9G+0XK3FSRxF+MDd +mRiLAKLSb4Ej3wX1dXnSgelx5cBZ0n+NBY/865oauui27/OIaL7ZaDCDZU/tjIJD +y/uQkuAjH4UVF4m5PqRaykqrjbyRJeADbL2E7CxamOsgyAfzhgIt04hpKkvZoCCT +RREeNp9rRITQiGMsfCUuxackDajsW/pnFD7m1ZKej8fcdQIDAQABo2YwZDAdBgNV +HQ4EFgQUN1MP5SS5ZJyrWuPVSkEFKK2SnXQwHwYDVR0jBBgwFoAUhmmo2HE37bnk +y9h7IQ5phCFGU18wEgYDVR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYw +DQYJKoZIhvcNAQELBQADggIBABGdyLUs9Pm9SXpSQLSY4BWnYdZvUoS5rhJ7coVr +kTm3bBmI6haSWyoSy/l0Qno+7rXz1YruKYqKaYTAakuDL8C/J2VuL/p29nyClH0r +DKNuz+6jU8xLVmcZwBtsKK1QxNgAdQ3DWygtmXzZ/tigfIVVIyaTkOvOjovuIxTW +AbmJu/xbkq4zZEaYJ0xobK8LGq3ZDauCsPNykqH/jwLwYWHQbKPmppTkNff7unXr +xQ+eSH/a1aCCldZI/NZywjZpNUdylEcnZhWshiWChD6j+CgrirdO0JeH9sGR0La7 +1VqujFWvVJUYYSbb7l4KFBLFw8Od5Z5rpYXm/qTHd6OvyS3qajse8ardqN0g2Hun +u0AtJ99JBHxzTP6blAcuTTrwS2XjB83/7k5YfN0jGbqQOYCJMTZ3pk3JkrZipxhj +Y1ZX1N8Opb7IwgjIXwzNy/joL7smUNBQlTPDN1IfM5b83NGNSDKaS1pWiqaLXO6e +rkwabZxCVfGgvIk9hE4x6+Cu+jdOLTpAwq1mcQroAp1+CInHrZeHdnhz0zR84CUm +ddOos2WYTF+OvRfel32rBCaKlH6Ssij0JGxSYT24WXygsCdpDXfimg3O4Fk2sJlV +015O7iIu22bowsDcF9RfvkdHNULrClWI12sRspXF9VmRjbDyG4eASBiulJQVbk9D +26vPMIIFwDCCA6igAwIBAgIJAJLWg/Z3x5xpMA0GCSqGSIb3DQEBCwUAMG0xCzAJ +BgNVBAYTAlVTMRMwEQYDVQQIDApXYXNoaW5ndG9uMRAwDgYDVQQHDAdTZWF0dGxl +MQwwCgYDVQQKDANJQk0xKTAnBgNVBAMMIG1hc3Rlci1jYS10ZXN0QHNiYWx1a29m +Zi5pYm0uY29tMB4XDTE2MDkyNzA4MDU1M1oXDTI2MDkyNTA4MDU1M1owbTELMAkG +A1UEBhMCVVMxEzARBgNVBAgMCldhc2hpbmd0b24xEDAOBgNVBAcMB1NlYXR0bGUx +DDAKBgNVBAoMA0lCTTEpMCcGA1UEAwwgbWFzdGVyLWNhLXRlc3RAc2JhbHVrb2Zm +LmlibS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDdYtZguUzR +pohJ8GI2/KCXZxd6FzmZtqrKz1JhZxSV56WhYnYljzQgRsPX8lxUWC/nSm13cjit +fKG/TvDNTs6bb9t7VkYM0k0ewvArcNRSSR/YHO0r7fWv7XkwTvt3yFupWkeNBaqD +ChaZvlblcQxNUgXI3r/dOJDhOlfzhF0LML5FIIHgkgQCHAUQ62OfLkmXqNPYAKa9 +K/TE4UGtG9LYT0dy3AwKUpvXfnKJSEgrRd8Nul7Rp6BgYWoJg6pZD4GLFiqT2bxp +hJJaAYulgtF1jDHeZgyunm7WrwZvxPC8AIcFcksRMxB5XOEo8PBXaGHxbIjl+PCw +6WpF5g7ZO95keYonpQ8nK9Vcn7BgWcQUY5SuZCaMTk79Hs/kD1upc22IHg//t1qy ++0i2SNTxj7n7mkynBHoKSrlVviUkyZHQYniuAGciYYKTfRy0F1LaM3QOUF3XA9j+ +2g1jCWolMPWpzWFTOkBwoCmCs0MX7FaYvsAeLx4rDVLRQWzvKZKGTubDBWS8wBsA +q0hDv4b3r4k6cIz9a4PYNFARsnShkKHwln9lM5HjPHUNSZ6oaaIdi4wEf0xwipMi +Ei+xh3Ukztq6pBGlNbdxdlBP3PVap0AI81alswLWqCL5yBHzv0NQp+x7/EODJDcv +E6sKPRmBVTzO9Y20KMlHrcdlPiNbBDhJ+QIDAQABo2MwYTAdBgNVHQ4EFgQUhmmo +2HE37bnky9h7IQ5phCFGU18wHwYDVR0jBBgwFoAUhmmo2HE37bnky9h7IQ5phCFG +U18wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQEL +BQADggIBAAbqGW0/XCmvptoh/6oHH8OvMUbn4m8Y9DqEnlWkfadhYkYaOh5SAZ51 +mD7Ouwi+he8gV0evMgmW9liDw/i/2IWfHLT56yHSQqj8kRYRu8ZEdfC8IQg7Pj8m +CJruJKtWWfCHWnoqxPmCkYfWrb2wb5K3Me33zMdvtdMg3Hzxg9UoRFvqyRJtYj7c +oK8A3uWiX6vjDZTG+x7SF03zB40n6pR2mq+i7gqXeZBxV6VrIuYMQFVUk2VICe9h +lsLsMFzq5Y3/P9evKMAI8JoxLLVlmI29pLY6A6VCiAFfyjiflXGtFRGNfHyo6FTM +PzoLfGb0R/jAli47CVhvI7JyNqGMb6Oa4jqoVw5+RMmrgkaI5RhOplcTnqnxuEBq +vxpkutnLNFTZ4LLRjYyaGYiYybZF9NG/OkCbTzT4fwLxqHqa4HCzijnbdAZbLtGC +2aL/SXMqHf1EHZmii9NZ/ndseom0l2+eVMaR8auZsSrpSbgzBB+UssVcBTD79Qb8 +LBQyC6WXGJPCEOfOYsxdZMDbD7q9CqgT5P4kI8VfryB5iqaLfDtUwjT8GPoTybFi +WHMk0DiS1quLYFZK2QhyFY2D1VLweyTQl8Hb/yYbxmd9QZDpDGCaIRkDt5H+rX17 ++MG2n3yPHeLbGBLg9jphH7MMmsn57Z9fYjJADOOLFKG+W6txAQV3oQAxAA== +-----END PKCS7----- +More spam here, too. Should be ignored.""" + + +# Needed because we want PKCS7_DER to be raw bytes, not base64 encoded +def b64decode(thing): + return base64.decodebytes(bytes(thing, encoding='UTF-8')) + + +PKCS7_DER = b64decode( + 'MIILZwYJKoZIhvcNAQcCoIILWDCCC1QCAQExADALBgkqhkiG9w0BBwGgggs6MIIF' + + 'cjCCA1qgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwbTELMAkGA1UEBhMCVVMxEzAR' + + 'BgNVBAgMCldhc2hpbmd0b24xEDAOBgNVBAcMB1NlYXR0bGUxDDAKBgNVBAoMA0lC' + + 'TTEpMCcGA1UEAwwgbWFzdGVyLWNhLXRlc3RAc2JhbHVrb2ZmLmlibS5jb20wHhcN' + + 'MTYwOTI3MDgxODMzWhcNMjYwOTI1MDgxODMzWjAjMSEwHwYDVQQDDBhjYS1pbnRA' + + 'c2JhbHVrb2ZmLmlibS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC' + + 'AQC8KiZi1t9vX8IBtiOgOXHwyTo4ljYOqz92BqyIj4r/YJNHxeJ7JHNiM29eu6K/' + + 'LdJleOkDXV+m0LDchn6pMf1/VJ5/Zpr7eWTqyKSaP5zTVbIOI1It60MQpsSiGbcf' + + 'VPJ7TrwzNKtyBXREHwC+mEvIuEwDRHd8DljJG5J2CpS3Re/CgxR8DrKXnT6ZXHik' + + 'og3yYJ7vULtxz8ktgOjM7uFh27YmHU4o1WyeAmMwpesVkqY7E7frbIYYbQo5B1D1' + + 'eWqM3KldqOQqvq34kPkf0vdfXxPurysNJrEaOXzDRdI6GiXaCKtnSEF0urCRdenI' + + 'DWMeqq2w5H1ekNgpK3XeFFxQYkhvXN85HiRmPO9wrd4qdEs1nvTIVVEDpB5FFe4a' + + 'FQmsAe57Ll1DTKZcja30VD1uJ5PbeoysHhN270+IbFeXK3x/icS5F1QdfE/pYIA0' + + 'L3JRSY88IXw4giHnlLnYb55NwLac3EXmr2Qks/T87/gbk5gk9G+0XK3FSRxF+MDd' + + 'mRiLAKLSb4Ej3wX1dXnSgelx5cBZ0n+NBY/865oauui27/OIaL7ZaDCDZU/tjIJD' + + 'y/uQkuAjH4UVF4m5PqRaykqrjbyRJeADbL2E7CxamOsgyAfzhgIt04hpKkvZoCCT' + + 'RREeNp9rRITQiGMsfCUuxackDajsW/pnFD7m1ZKej8fcdQIDAQABo2YwZDAdBgNV' + + 'HQ4EFgQUN1MP5SS5ZJyrWuPVSkEFKK2SnXQwHwYDVR0jBBgwFoAUhmmo2HE37bnk' + + 'y9h7IQ5phCFGU18wEgYDVR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYw' + + 'DQYJKoZIhvcNAQELBQADggIBABGdyLUs9Pm9SXpSQLSY4BWnYdZvUoS5rhJ7coVr' + + 'kTm3bBmI6haSWyoSy/l0Qno+7rXz1YruKYqKaYTAakuDL8C/J2VuL/p29nyClH0r' + + 'DKNuz+6jU8xLVmcZwBtsKK1QxNgAdQ3DWygtmXzZ/tigfIVVIyaTkOvOjovuIxTW' + + 'AbmJu/xbkq4zZEaYJ0xobK8LGq3ZDauCsPNykqH/jwLwYWHQbKPmppTkNff7unXr' + + 'xQ+eSH/a1aCCldZI/NZywjZpNUdylEcnZhWshiWChD6j+CgrirdO0JeH9sGR0La7' + + '1VqujFWvVJUYYSbb7l4KFBLFw8Od5Z5rpYXm/qTHd6OvyS3qajse8ardqN0g2Hun' + + 'u0AtJ99JBHxzTP6blAcuTTrwS2XjB83/7k5YfN0jGbqQOYCJMTZ3pk3JkrZipxhj' + + 'Y1ZX1N8Opb7IwgjIXwzNy/joL7smUNBQlTPDN1IfM5b83NGNSDKaS1pWiqaLXO6e' + + 'rkwabZxCVfGgvIk9hE4x6+Cu+jdOLTpAwq1mcQroAp1+CInHrZeHdnhz0zR84CUm' + + 'ddOos2WYTF+OvRfel32rBCaKlH6Ssij0JGxSYT24WXygsCdpDXfimg3O4Fk2sJlV' + + '015O7iIu22bowsDcF9RfvkdHNULrClWI12sRspXF9VmRjbDyG4eASBiulJQVbk9D' + + '26vPMIIFwDCCA6igAwIBAgIJAJLWg/Z3x5xpMA0GCSqGSIb3DQEBCwUAMG0xCzAJ' + + 'BgNVBAYTAlVTMRMwEQYDVQQIDApXYXNoaW5ndG9uMRAwDgYDVQQHDAdTZWF0dGxl' + + 'MQwwCgYDVQQKDANJQk0xKTAnBgNVBAMMIG1hc3Rlci1jYS10ZXN0QHNiYWx1a29m' + + 'Zi5pYm0uY29tMB4XDTE2MDkyNzA4MDU1M1oXDTI2MDkyNTA4MDU1M1owbTELMAkG' + + 'A1UEBhMCVVMxEzARBgNVBAgMCldhc2hpbmd0b24xEDAOBgNVBAcMB1NlYXR0bGUx' + + 'DDAKBgNVBAoMA0lCTTEpMCcGA1UEAwwgbWFzdGVyLWNhLXRlc3RAc2JhbHVrb2Zm' + + 'LmlibS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDdYtZguUzR' + + 'pohJ8GI2/KCXZxd6FzmZtqrKz1JhZxSV56WhYnYljzQgRsPX8lxUWC/nSm13cjit' + + 'fKG/TvDNTs6bb9t7VkYM0k0ewvArcNRSSR/YHO0r7fWv7XkwTvt3yFupWkeNBaqD' + + 'ChaZvlblcQxNUgXI3r/dOJDhOlfzhF0LML5FIIHgkgQCHAUQ62OfLkmXqNPYAKa9' + + 'K/TE4UGtG9LYT0dy3AwKUpvXfnKJSEgrRd8Nul7Rp6BgYWoJg6pZD4GLFiqT2bxp' + + 'hJJaAYulgtF1jDHeZgyunm7WrwZvxPC8AIcFcksRMxB5XOEo8PBXaGHxbIjl+PCw' + + '6WpF5g7ZO95keYonpQ8nK9Vcn7BgWcQUY5SuZCaMTk79Hs/kD1upc22IHg//t1qy' + + '+0i2SNTxj7n7mkynBHoKSrlVviUkyZHQYniuAGciYYKTfRy0F1LaM3QOUF3XA9j+' + + '2g1jCWolMPWpzWFTOkBwoCmCs0MX7FaYvsAeLx4rDVLRQWzvKZKGTubDBWS8wBsA' + + 'q0hDv4b3r4k6cIz9a4PYNFARsnShkKHwln9lM5HjPHUNSZ6oaaIdi4wEf0xwipMi' + + 'Ei+xh3Ukztq6pBGlNbdxdlBP3PVap0AI81alswLWqCL5yBHzv0NQp+x7/EODJDcv' + + 'E6sKPRmBVTzO9Y20KMlHrcdlPiNbBDhJ+QIDAQABo2MwYTAdBgNVHQ4EFgQUhmmo' + + '2HE37bnky9h7IQ5phCFGU18wHwYDVR0jBBgwFoAUhmmo2HE37bnky9h7IQ5phCFG' + + 'U18wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQEL' + + 'BQADggIBAAbqGW0/XCmvptoh/6oHH8OvMUbn4m8Y9DqEnlWkfadhYkYaOh5SAZ51' + + 'mD7Ouwi+he8gV0evMgmW9liDw/i/2IWfHLT56yHSQqj8kRYRu8ZEdfC8IQg7Pj8m' + + 'CJruJKtWWfCHWnoqxPmCkYfWrb2wb5K3Me33zMdvtdMg3Hzxg9UoRFvqyRJtYj7c' + + 'oK8A3uWiX6vjDZTG+x7SF03zB40n6pR2mq+i7gqXeZBxV6VrIuYMQFVUk2VICe9h' + + 'lsLsMFzq5Y3/P9evKMAI8JoxLLVlmI29pLY6A6VCiAFfyjiflXGtFRGNfHyo6FTM' + + 'PzoLfGb0R/jAli47CVhvI7JyNqGMb6Oa4jqoVw5+RMmrgkaI5RhOplcTnqnxuEBq' + + 'vxpkutnLNFTZ4LLRjYyaGYiYybZF9NG/OkCbTzT4fwLxqHqa4HCzijnbdAZbLtGC' + + '2aL/SXMqHf1EHZmii9NZ/ndseom0l2+eVMaR8auZsSrpSbgzBB+UssVcBTD79Qb8' + + 'LBQyC6WXGJPCEOfOYsxdZMDbD7q9CqgT5P4kI8VfryB5iqaLfDtUwjT8GPoTybFi' + + 'WHMk0DiS1quLYFZK2QhyFY2D1VLweyTQl8Hb/yYbxmd9QZDpDGCaIRkDt5H+rX17' + + '+MG2n3yPHeLbGBLg9jphH7MMmsn57Z9fYjJADOOLFKG+W6txAQV3oQAxAA==') + +# Keys for the above CA certs, logged here to make it simple to sign other +# certs for testing purposes in the future. + +INTERMEDIATE_KEY = b"""-----BEGIN RSA PRIVATE KEY----- +MIIJJwIBAAKCAgEAvComYtbfb1/CAbYjoDlx8Mk6OJY2Dqs/dgasiI+K/2CTR8Xi +eyRzYjNvXruivy3SZXjpA11fptCw3IZ+qTH9f1Sef2aa+3lk6sikmj+c01WyDiNS +LetDEKbEohm3H1Tye068MzSrcgV0RB8AvphLyLhMA0R3fA5YyRuSdgqUt0XvwoMU +fA6yl50+mVx4pKIN8mCe71C7cc/JLYDozO7hYdu2Jh1OKNVsngJjMKXrFZKmOxO3 +62yGGG0KOQdQ9XlqjNypXajkKr6t+JD5H9L3X18T7q8rDSaxGjl8w0XSOhol2gir +Z0hBdLqwkXXpyA1jHqqtsOR9XpDYKSt13hRcUGJIb1zfOR4kZjzvcK3eKnRLNZ70 +yFVRA6QeRRXuGhUJrAHuey5dQ0ymXI2t9FQ9bieT23qMrB4Tdu9PiGxXlyt8f4nE +uRdUHXxP6WCANC9yUUmPPCF8OIIh55S52G+eTcC2nNxF5q9kJLP0/O/4G5OYJPRv +tFytxUkcRfjA3ZkYiwCi0m+BI98F9XV50oHpceXAWdJ/jQWP/OuaGrrotu/ziGi+ +2Wgwg2VP7YyCQ8v7kJLgIx+FFReJuT6kWspKq428kSXgA2y9hOwsWpjrIMgH84YC +LdOIaSpL2aAgk0URHjafa0SE0IhjLHwlLsWnJA2o7Fv6ZxQ+5tWSno/H3HUCAwEA +AQKCAgA2onJ03nkP6Jj3UEB15FgeFv+NsKfPiI+roHJ2UF+GmS8Kdv20zbem+tJK +imbN5esiRYI61ODSGeRQk8ixLe+yCgrfaRZ1ECFqPon0s6XAgzBpBH53EMlvS0zq +2FaghVTG0uy4XYGuYMEKion3zVar2D9R745V+gBznErhdV8K/AaKzu6ius3GUgT8 +GKp6+wbbqoxbZnCWskNyr+xonK/abyYrWPT5zEZ2drEAThy9LdCQdMBBXkhtTTPb +rTEnpXJ3phaTOFfPxX/UHZwIToQ/L+cktb3lWqevuqNsO5i4ACGfdkb2fTdsQkzE +X51a1fBC1kIKi72POLVa9uCJdBX9TafN7vObGdVtrO/rzqS6PhaD85JcQ/6ns4Cx +8+zERCrNlSJ4sGkmSVXF5nFXwgZ5WgZVAbf7vyCBdBT4GqV0H5Yq0kxu2OPd5qvD +ZXesU2bkRhNpWG0LkjhM5mNE2lcBlBM+e93ZUSvP+KA83paLv6lNMmILG3DUbIpG ++juDZQgmTKAR2emsr4JBvJpp5XrczbFvxdr6Kn7UqVGFkqNFyMBBAeE0tdp1biLO +XCEptvvc0gh273csaaMHfyaDjOnvHQ0MJ+p0Z1WRNnvuoDd2rCclZ3suL0XYMZ2z +0je5yhJrnlbduFv7pDugG6mbLgmcTMFvBlKYQdjhnkRPtIDfAQKCAQEA3BQg/nLB +BupvYnoDzX0hgF5IYCm65DDg9zgXlyKrvbE+jAYpCgSA9aIkQVhL8DcxXLGj2AMV +LMtKtYn2vMz5uP5ooWxXsmlJphl6OLiWqpWOq0mW3J+jLCsjShUUWfHiwMkSvDw0 +CQvTRkXkJVeGduv63wH7jDcsB7NalpcYFQOk8s3L4tv+Yqm72bU26wB1BXGm6BSx +FeA03u4MvFI/rebyNEiVqFo9r0kBTpvHELuNpZTCotYdGZiJ3qgauJNzv1ga1KH8 +jjeXaR6YoP6xiD7TQvV02ZZ28VBSFmYmFKP5nlwHqCf4K5nq0rbaJ1OIJMx+J7Nj +hW5Li6OqRlWDwQKCAQEA2uCDEXABrwLd5JFDaAGqyIPzhtgTzOrddPEwkVY88S5n +Pv2zRnrCYx+e7erIgwtRVCny+CdH/AcQrd3jzxTjvUnDwsfWfG/vjnVCbxt/nJPL +cab1wumUQYfDYYZwdxOCs/y77V5sXskzmM/cr2ftPaVAWliKQoiMBq1qe2GX+6+v +pwuLd31bf2o2h5Ki1CbvjNPPwVycqOVuNRU4Kv+p74pdDdys8FHjtdXkkwnNyOI+ +4CWZ00ep4rGMw6jbs/REnSNmY6o2eCUjceYmb0B25U1c7VvU4rKaO5gGKP4i2YsG +zJ3LITduk9HEiy2+fHDg5+jS5A+2sa7+jr9KRLr1tQKCAQBAEnwomDAqW0ZiXTOO +N8fxH4HivUNz++cDEomLrsRhTaDwEzsPd5Az8yiq/ZHeNbNhUTZqRTt89xKhF7GF +9gceHLNJi8lcX9rZSMGUkekLcM9AfwQ05yUSTtzPTKPIK+vgRKn3s29S0uaHpwFs +/bZgGQPIuUMK52UiOM0+2m5gb9ct+UxKl9HP16qhD2kVseRf2V7GYn/L5qJ95MBA +c5Gmh34sSpWHlf4vcgm3YRLrhC8Q7eZckgmW0hoRgM+GvScNphDppt9oaDbkm8vD +02LMT74h2GRUlMG8L642Zzbe+35I5HI2Oqy9xOngvut0V5VjYUW5OTjYN+w2k0eX +gn4BAoIBAEYrB5nFgKSslJo7/5IORsP1mVK3XtGo0co1sE5HiU4AhFTrXFfR7xN8 +BaVcSV/Jdw82S5BGd4tScIRsyCWRGtmKFloRWq+V6wSOjvezpxt5PhV3Yv5Th5Xi +gj53rQJfnN06vryAMtnIQuRQbv1EogfLPHA6RkjCIbHaUnKvfNvRHMy+pL1v0K9u +S4D2/4Bn4xAQr1/b4tU6iDQ4U0NlpwMGJoLVJhP9DLU0lwyUbgZikammJERZixsD +tI7dSWHNg1mlCaQV41RtA4n2MIgl8Hfeb1YgxITQoSVNvVvS7TU0nr9mLsK9VJPL +Aelkhta6EUAHoeQ/LWCVK0J0DMkv7qkCggEAfYXt3IxEcAWGDse2GccNZ5t2LRxR +hIm6yPHpzmyJklEEoqCKltjFpr5Ltl6GWn/BuE/iHjBUL/PYpvQ2Mjz3VYR5cYe7 +W6Q8E45GTKX5A3YgAklRRKpd3BmS4pA3D6L0zt/CxWRZ/qIssGkOhV1E0/v7TgZx +mOk14aL/0t9PWKYjlqn9TJlmO8ZrTcMSpZ3fRFznIAgk1avexggrhShtrgjy+7uh +qH3e8e1WlIfA7FAqE1Dtae97oV/5wM9qp1rnijwq5jlZX+AqYq7GQ8J5x2ypGhZX ++N7I5RuaLjkJJs3i/EzCDwp8F3ZXZRiILaWSaGZlrZ8jgVtlNhNfVYVFuQ== +-----END RSA PRIVATE KEY-----""" + +CA_KEY = b"""-----BEGIN RSA PRIVATE KEY----- +-----BEGIN RSA PRIVATE KEY----- +MIIJKwIBAAKCAgEA3WLWYLlM0aaISfBiNvygl2cXehc5mbaqys9SYWcUleeloWJ2 +JY80IEbD1/JcVFgv50ptd3I4rXyhv07wzU7Om2/be1ZGDNJNHsLwK3DUUkkf2Bzt +K+31r+15ME77d8hbqVpHjQWqgwoWmb5W5XEMTVIFyN6/3TiQ4TpX84RdCzC+RSCB +4JIEAhwFEOtjny5Jl6jT2ACmvSv0xOFBrRvS2E9HctwMClKb135yiUhIK0XfDbpe +0aegYGFqCYOqWQ+BixYqk9m8aYSSWgGLpYLRdYwx3mYMrp5u1q8Gb8TwvACHBXJL +ETMQeVzhKPDwV2hh8WyI5fjwsOlqReYO2TveZHmKJ6UPJyvVXJ+wYFnEFGOUrmQm +jE5O/R7P5A9bqXNtiB4P/7dasvtItkjU8Y+5+5pMpwR6Ckq5Vb4lJMmR0GJ4rgBn +ImGCk30ctBdS2jN0DlBd1wPY/toNYwlqJTD1qc1hUzpAcKApgrNDF+xWmL7AHi8e +Kw1S0UFs7ymShk7mwwVkvMAbAKtIQ7+G96+JOnCM/WuD2DRQEbJ0oZCh8JZ/ZTOR +4zx1DUmeqGmiHYuMBH9McIqTIhIvsYd1JM7auqQRpTW3cXZQT9z1WqdACPNWpbMC +1qgi+cgR879DUKfse/xDgyQ3LxOrCj0ZgVU8zvWNtCjJR63HZT4jWwQ4SfkCAwEA +AQKCAgEA2q4m1KQ1HWJCfcbVPTuN5gAPUKpgW1X0nyDrXwtTaj/HfAKmcbNi6f78 +tPLSAP6bUvxR5QsOsU/K9g4kDqkprKBxTQOLbl7NjvVAB6kMEbvpmK/6FsqXRZBt +hSp/e3KOGFr1EnfmVkpAyN0bOMjSPg4naKOfIgYeFlxrREAbKFKdn+rcX9fb3bmP +x4a8gSBX0VcS6uq5yWMCBPf8x+IUA1dMXEjAG/I9vj9JJBIiN5xtGEJgJvhNkuam +t383ZYHLlHfw1trdId2yMvYT2wm9nT8+g1CKdnJJSgbZdM40fYCH3vlm7TZjr33v +a2GUBsM0/CUZlRCxsA7gyurVAAADS6UtfOF2lcLIxeC8FDdL/p4ytF3sYND4f0kp ++gQn5+vTnfijfEqWbHWnkn9V8MSZd3ihVn74d2544SOLJ5j+i1vYfieBj4gXhOiA +TMudpGh7wKOy/ajLRtSxtM1uYGtycA1e1jaBX8eXwfPyJemYlurK7DEyH2BlVbJY +EUCGYvR96VNpDLpvBwB0+G4E1LJOpt+h4si03mQIfnX3um6hBmUGzGwyr14i7Qh6 +mPT2i/xdZtUFD1Hp2cFCwVvkGzhorgM+ICgLOFF2FOuzBrC+zrQNj6Aom1bWakdw +x/lNKSYmzypsCQC20lCme4SRyRfn/Tz/ylN95uvZvU5gr7Lhf4ECggEBAPwKNCGI +45B16n7CqnTziO3nZHO2xeKEyzLXbKK2f/AvYiQnZxRG8aY+DqoqnnBbXCXnd7LW +nDjoOYI3l75n/F33+2IiyJUNSF2noLEu1T1gQVaFuU6p8bwCJ5rShuxXMVAGkw3z +/bcTuaZIJU4KTNCP4A9wgyB40uwRrYiEQMaYXjeph71PTOEA7XseuiOhHnaiCaeg +KVivOD9vR532Ai2mCmi/6oBtT/AjnbWLXXNJRp0OfPZ2nZ/Z8j0zmCMmbhMtpQe0 +Utk5LaABCqRh6ZRp4bvoqgR7yrOAH1NUPPJhdrQywAl0UiXgnjhNixDp4kP/TLvE +70Z2i+u3ayssEnMCggEBAODdVUYSw3F+CQ8T+IGaTAU8lzE5fgxGZcxsnzrnzbCQ +PLSuzx9FJJ8+gYCkD3FUg8vduN8uyy3YY13LFjOiJ6+TczglgmGgZM+2qxQ5mFyT +9FVijPUNtUuhJm3SBvHpJlzmcR/uNiIws55M+RbGSKB7kotf5FchZ2YBhZzpr7lG +jn6x15ts6iSlxHnR5QAPvqgCOhUJnk8CiDaypx12MXRP/A/KZX8XAeRFIMmKSC6f +O7kRY/xpSKxuyvACDybxhXbGP86t07ZXpXU8PmgU6yjnsGxQOg4iLlReI3jiaa7m +TTeiNjW3Ra2pOBd5BWn3ecVvf4UHJsJs59euYWha2uMCggEBAMbLlYrN2hBbsXYC +PUi5vktHs+fBRhwA+fVEDZ/Zqtfbx+EUYy2PN5MUZ6S4sPeWV/xdsgARXm9UW+fl +yX6Zrmi/7Dvfi65lJ6sXSJv4gKFEhsSj/SGa0dylJm/rlhhcPb0NMnhS9s+sc0ZA +qYwAe84VbXlAGW1HX7ZryyBekGkUTVxCD5q2LcFbZfUyq0bnEowoCs14eqREsGz4 +bNie7eDrklJE7cYWcnLK5N4I6tC//z5p6w7LSFCJK5QyWdF/wlrGKeEFzkMf4mjN +6YL257H0QeRhA5k9uwgSCqNDUj8ruOExFl9erFzL6oAmSYYxtBJGEFQaZVCCuKJX +reQDgxkCggEBANjfn6b94dJMJKCOOleFYVyLm6a2NIPSQcxv1wMRHqjz9BivoMQg +A7oypuRTVzGOua6eIfffQcbttKh5fug9tNj59V5pjt5pu9E59LaE9hYT/Mt9SUXv ++rL+sfmpX1lh7MYc225YaY2AOzyqMHNuug1OIYCa87e1V+xh+2PjXr/q9PPswm39 +FbZSyrRTY/IzPUb9Hte7dxvs7UMT+2nG3Nu5aPox0sJIhmKK6Zx36jZNDWTpCO4g +/R6RnNjuo36D4p0zh8bmkBKFZec0O1xXEJdbHiTZG6UWAmkMglnMxPES3daSdIZK +RMHBO4AoELirHp71cp/yzccnElRKs1faiNECggEBAJg1b53r259txjDUxY922imF +JXySNhRHlrQ6BYyfHJWMMEVNasd86wrc8VKgIqQcxtdfIL1dGxOu31xzmxsSmfjR +0aG51uHi/erTKeV0C3/bdZ8TgeTKhxXAVZXLuJ4i6HvdF1lAZmE0stXd7vA0bedJ +7RYKKnuW16qo05oNx/ADdjfCaOHA0cCfyPv294CQn0z4hyEVAbBykU/j6v0WbzS5 +r187A8Q9L5pB57JnuY9nO7MvrINJVNbLPYjanqrkqvwDjiPkzETVm50mVtFYLWgw +8m7OLXEaFVJ4XA3C8e78bzDhSMvQTc8QVYmwj24gQ/uolftqdM4lEKpUucw/ECs= +-----END RSA PRIVATE KEY-----""" + +# An expired self-signed cert for testing. +X509_EXPIRED_CN = 'www.example.com' + +X509_EXPIRED = b"""-----BEGIN CERTIFICATE----- +MIIDfzCCAmegAwIBAgIJAKByYqy5fAfLMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV +BAYTAlVTMQ0wCwYDVQQIDAREZWFkMRAwDgYDVQQHDAdUb3RhbGx5MQwwCgYDVQQK +DANJQk0xGDAWBgNVBAMMD3d3dy5leGFtcGxlLmNvbTAeFw0xNjA5MjQxODAxNTRa +Fw0xNjA5MjUxODAxNTRaMFYxCzAJBgNVBAYTAlVTMQ0wCwYDVQQIDAREZWFkMRAw +DgYDVQQHDAdUb3RhbGx5MQwwCgYDVQQKDANJQk0xGDAWBgNVBAMMD3d3dy5leGFt +cGxlLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKZXgoeI7tzM +KLUs0Fp9qEnILZTH2Xj5Q/j0KTkLD5A3VLROJof1lMb8voAkF16jnCC+A4RuphEO +QtEUAUwlbvYv0rrSEWKYtkGKpEAg7mH05/BiiLSuveIQido6u4659FJ3bgYNE/P0 +xb8vMuxy4M7JH1OF8XReI05UfLqGr5isjri/IS4ofZy97aMciDdqeAs+yDg6lCpk +e0UcPLmJw5tIMg30Pl0AsxkD9U5JejAHEOvYgNgCyk9lo8uf/S41pzmU4Wc9TmL0 +WDunicpqngmajV+V45VN6t4NDHo093kyZ/4gJcqRfsNQ2DQRyFzd8Yjllz36dO9B +HT2NhI9yKhECAwEAAaNQME4wHQYDVR0OBBYEFBRND67rjYxqeUFH3p9+vSoQS1Qe +MB8GA1UdIwQYMBaAFBRND67rjYxqeUFH3p9+vSoQS1QeMAwGA1UdEwQFMAMBAf8w +DQYJKoZIhvcNAQELBQADggEBAFOcwM8mlTsP+sw4yhxcPD72qiIn4DRI++17Yoeu +eVJWO5ZlelOaBVdMFV573/7LR0j4y22RNPiFRCj+oG+w/kQLVRBnxj/LhZj3T+sQ +DIlahXIWCroPqVXEbRejxOLugNLS7SoWp9pKqWXPawkyHIS0Ht7LyZQYm9Pt7PKc +uerOX3Qzt+W2nmgxA3mHhL76tCRqDATdn5guLH1F0g29WB614oI43kSt4WW0i4JT +S+aDmoFsO3i/E+x+qm5H0swjU9dLCvdMjo0VUpk5f1aJJ10xpeKTUYOB55haalJI +j+/EXRZyEna+vPrS8mCl0GMvlFm0ZWFdWaWPR7l3J/J4is0= +-----END CERTIFICATE-----""" + +X509_EXPIRED_KEY = b"""-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEApleCh4ju3MwotSzQWn2oScgtlMfZePlD+PQpOQsPkDdUtE4m +h/WUxvy+gCQXXqOcIL4DhG6mEQ5C0RQBTCVu9i/SutIRYpi2QYqkQCDuYfTn8GKI +tK694hCJ2jq7jrn0UnduBg0T8/TFvy8y7HLgzskfU4XxdF4jTlR8uoavmKyOuL8h +Lih9nL3toxyIN2p4Cz7IODqUKmR7RRw8uYnDm0gyDfQ+XQCzGQP1Tkl6MAcQ69iA +2ALKT2Wjy5/9LjWnOZThZz1OYvRYO6eJymqeCZqNX5XjlU3q3g0MejT3eTJn/iAl +ypF+w1DYNBHIXN3xiOWXPfp070EdPY2Ej3IqEQIDAQABAoIBAD4c/0jNASTBt5Gv +oj2oHmcusJaV6ccajR8xTRNX5f/cKW0KoaizM1L6ncgLsg5M2cgALCAPkUNdJ+Ya +qkFc2Qpk4TORrZw7mhLvSlYH9fvuD43bvWB6v7zioBc1R0QMfAcvQY5Q49p81DqH +zWQtoXSV9XSi1360iEp/kfO0x20ip9rP7qDOKuN5gdvRa8sXKD+jnmp17e1rx+fS +U0UoReBUbn4iLbOdEVyH9HSqTB+p5nPq63KJBioJZMGhLNntKMAff8uMiVhhb7Io +vIIHgoIfFce9YwC4fn+0UDrBCAx+SAyw2cmmMyXIqhd3c2Ca7zFmezSuC3H5Y4si +535VO2ECgYEA2/7I8QOkrRx+Bd2sxe6n+jeA6yRVqBb+bE6rZUUQUlSAFqoM8RKJ +K8cRjePmtkd9UkGrfDN6XTyqKD5Vt1Cd7FNl5Q08C/WP5VUOaKgdq3MkeOoJT8xf +c0LWAoRw5InP7n6TRASExekagQEIMMOHZFtwSjz+HauLqohrk6CaBRcCgYEAwZDK +J0mYspt8Wwlwrv0ouQG6tCy0NkWCdNs4EbT12qH6FKsdUuvJku+Zr1amCq/8apTn +pdn2YlRDp5+jqsKf0dui5M2zC088XJov3VF1Ujm4BtSVwRRhi7BxM9BCv1txUs20 +e2XPKV7RKexOL6iWPWDIcB6ZFhJdxQI5mOF9ExcCgYEAmLHPZvnQYxdToV6rfPaZ +QOMlaBBgI7tR/HreG/xDx+E+xnxhXzIuY2RYmtOEXyBfq6hJDnvsgqqIsEYT2Jjs +BAwevUziUKqwpczTo3CMp2PT/Nj0fZ6s4aOSR00FzpqY6ECSlrNMNNIGw2Oj+7S7 +VLziw6Rx/MYEuujVQjJGtSECgYAXlwC8BwEgC0j9g0oDWYEEAwzw9l3EG3aJrUnJ +BqfLzF/A8xWwzGGVkbPGJaY4uTfm+Vy93rFjTGeuXwtAPVXi6oSQo+0FHNP7aSMa +Mto8eiJOWswhas10i71QFjp8PbWy5LTxMPgtT4voMw9YSZB9zHTBDUmU4gohf2Lr +mdd3YwKBgHu4IlMxt40w+Bn5xasvACB5iaO5EBKO7rp0ba0Po3t9SG9iPSr8Yruq +Qv1cDRGlM5jHboqSM2ju2/b/Wc2ezdjoktrwgG+ElQuptwwNIsFrooHMLMY3B53k +Je8uvLnAPRLL95ZhclaSw2vAxmaiGIsm7WGhjnRQ2Vntgd6fNgY9 +-----END RSA PRIVATE KEY-----""" + +# Other certificates and keys used in tests. +ALT_EXT_CRT = b"""-----BEGIN CERTIFICATE----- +MIIGqjCCBZKgAwIBAgIJAIApBg8slSSiMA0GCSqGSIb3DQEBBQUAMIGLMQswCQYD +VQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxFDASBgNVBAcMC1NhbiBBbnRvbmlvMR4w +HAYDVQQKDBVPcGVuU3RhY2sgRXhwZXJpbWVudHMxFjAUBgNVBAsMDU5ldXRyb24g +TGJhYXMxHjAcBgNVBAMMFXd3dy5DTkZyb21TdWJqZWN0Lm9yZzAeFw0xNTA1MjEy +MDMzMjNaFw0yNTA1MTgyMDMzMjNaMIGLMQswCQYDVQQGEwJVUzEOMAwGA1UECAwF +VGV4YXMxFDASBgNVBAcMC1NhbiBBbnRvbmlvMR4wHAYDVQQKDBVPcGVuU3RhY2sg +RXhwZXJpbWVudHMxFjAUBgNVBAsMDU5ldXRyb24gTGJhYXMxHjAcBgNVBAMMFXd3 +dy5DTkZyb21TdWJqZWN0Lm9yZzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBALL1nmbDPUDps84i1sM3rhHrc+Dlu0N/wKQWKZFeiWUtF/pot19V3o0yXDps +g7W5RkLMTFkZEcnQpyGdpAGjTjzmNXMZw99EzxsmrR3l6hUEISifVbvEuftYZT6j +PxM5ML6WAjFNaBEZPWtZi8CgX5xdjdrDNndwyHob49n7Nc/h1kVqqBqMILabTqC6 +yEcxS/B+DugVuuYbEdYYYElQUMfM+mUdULrSqIVl2n5AvvSFjWzWzfgPyp4QKn+f +7HVRT62bh/XjQ88n1tMYNAEqixRZTPgqY1LFl9VJVgRp9fdL6ttMurOR3C0STJ5q +CdKBL7LrpbY4u8dEragRC6YAyI8CAwEAAaOCAw0wggMJMAkGA1UdEwQCMAAwCwYD +VR0PBAQDAgXgMIIC7QYDVR0RBIIC5DCCAuCCGHd3dy5ob3N0RnJvbUROU05hbWUx +LmNvbYIYd3d3Lmhvc3RGcm9tRE5TTmFtZTIuY29tghh3d3cuaG9zdEZyb21ETlNO +YW1lMy5jb22CGHd3dy5ob3N0RnJvbUROU05hbWU0LmNvbYcECgECA4cQASNFZ4mr +ze/3s9WR5qLEgIYWaHR0cDovL3d3dy5leGFtcGxlLmNvbaSBjzCBjDELMAkGA1UE +BhMCVVMxDjAMBgNVBAgMBVRleGFzMRQwEgYDVQQHDAtTYW4gQW50b25pbzEeMBwG +A1UECgwVT3BlblN0YWNrIEV4cGVyaW1lbnRzMRYwFAYDVQQLDA1OZXV0cm9uIExi +YWFzMR8wHQYDVQQDDBZ3d3cuY25Gcm9tQWx0TmFtZTEub3JnpIGPMIGMMQswCQYD +VQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxFDASBgNVBAcMC1NhbiBBbnRvbmlvMR4w +HAYDVQQKDBVPcGVuU3RhY2sgRXhwZXJpbWVudHMxFjAUBgNVBAsMDU5ldXRyb24g +TGJhYXMxHzAdBgNVBAMMFnd3dy5jbkZyb21BbHROYW1lMi5vcmekgY8wgYwxCzAJ +BgNVBAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEUMBIGA1UEBwwLU2FuIEFudG9uaW8x +HjAcBgNVBAoMFU9wZW5TdGFjayBFeHBlcmltZW50czEWMBQGA1UECwwNTmV1dHJv +biBMYmFhczEfMB0GA1UEAwwWd3d3LmNuRnJvbUFsdE5hbWUzLm9yZ6SBjzCBjDEL +MAkGA1UEBhMCVVMxDjAMBgNVBAgMBVRleGFzMRQwEgYDVQQHDAtTYW4gQW50b25p +bzEeMBwGA1UECgwVT3BlblN0YWNrIEV4cGVyaW1lbnRzMRYwFAYDVQQLDA1OZXV0 +cm9uIExiYWFzMR8wHQYDVQQDDBZ3d3cuY25Gcm9tQWx0TmFtZTQub3JnMA0GCSqG +SIb3DQEBBQUAA4IBAQCS6iDn6R3C+qJLZibaqrBSkM9yu5kwRsQ6lQ+DODvVYGWq +eGkkh5o2c6WbJlH44yF280+HvnJcuISD7epPHJN0vUM9+WMtXfEli9avFHgu2JxP +3P0ixK2kaJnqKQkSEdnA/v/eWP1Cd2v6rbKCIo9d2gSP0cnpdtlX9Zk3SzEh0V7s +RjSdfZoAvz0aAnpDHlTerLcz5T2aiRae2wSt/RLA3qDO1Ji05tWvQBmKuepxS6A1 +tL4Drm+OCXJwTrE7ClTMCwcrZnLl4tI+Z+X3DV92WQB8ldST/QFjz1hgs/4zrADA +elu2c/X7MR4ObOjhDfaVGQ8kMhYf5hx69qyNDsGi +-----END CERTIFICATE-----""" + +ALT_EXT_CRT_KEY = b""" +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAsvWeZsM9QOmzziLWwzeuEetz4OW7Q3/ApBYpkV6JZS0X+mi3 +X1XejTJcOmyDtblGQsxMWRkRydCnIZ2kAaNOPOY1cxnD30TPGyatHeXqFQQhKJ9V +u8S5+1hlPqM/EzkwvpYCMU1oERk9a1mLwKBfnF2N2sM2d3DIehvj2fs1z+HWRWqo +GowgtptOoLrIRzFL8H4O6BW65hsR1hhgSVBQx8z6ZR1QutKohWXafkC+9IWNbNbN ++A/KnhAqf5/sdVFPrZuH9eNDzyfW0xg0ASqLFFlM+CpjUsWX1UlWBGn190vq20y6 +s5HcLRJMnmoJ0oEvsuultji7x0StqBELpgDIjwIDAQABAoIBAC3DX6FZtfU+jgtd +n1vGhk3wzu4o8S0+ow2S2UhiS3JDCMmxM4s+ky26Phl2nGvBGDWGttNl9MWOBN80 +x7bfgudR20M2yH70wp1n04c8vxJmvu/7ZtogYYrjvOg6qKuKyWtDQwZGjCErOiiU +eodku25qAhd6Khh7D9kh/q9EbSteYFXsqJiNrY4ul1+cROMZpHx63xY6AzPmkvSU +garkgY4rw9E71t7it2laWkRKVsd+kEjayritdEEliNMVFFtrGEgplYkmLxGf0HLi +ROFVMCLRW/P12JpXllFPrBb8rlPL4w1c/s+yStohT0K+o4FLXhsf/inxmfc9XnZX +dJm0k/ECgYEA47FpV1caMk+TNPfu318VCGRmjwpXdmkNaUiX2Uvs3xIKQ6KJmpo3 +sj0YjQEmQVz8s6geStvU1LdPxgsWZfbDt31M6SNwylh82ABQF1bZyrcMRxM8bHhe +bhDITM1dAn6aROkS1cBpfR9NJOFD850lmJvBGR9ORVBGyucTKH5uXxkCgYEAyTU0 +zQKW2aU3J7mTCC9cp+eSD3fubJpa3ML5XfQ8YNID4PsxWglNKPcOTC4yaSfxVmyk +S0WIQUazCstszQsvwy9YyHtpkMq+0lyCPvrYnmRV0zx5zT155V2zcEh/oj64eoee +W5kvJSs/x6vT+lEN0TDEJ2gKEaJuBt6JG6P04ecCgYBSNw1CbEEZSYJt7dhi74I4 +tYgSvjk2mFgvW/b4j2HIaksqgNYO7QCPa2AiCfg2Qc09UcceYKJI7Kfxaq97wc6J +wsSyqglgBvONSw+gXcvmVpIoV9nJkO0H8SdiFAUxkWVC3KXgaMmuVE8WsgBHRsb8 +g8EFwTgR7xqgyS8xv/U6gQKBgQCdUr/dSJgAx6EPq5degAHXu0ZGWAUR38MJ+F2Y +6/5FyhCEWoRlHP66+CmywTBjbnrSk5IG1PBL8ebOmu6QiJ2o5R1rbKvHLe/0dabV +bbfwaQ1+ZDvskZP9Fr3WHqnFh3shO2dDwcvOKTnuetj9UWEXXyUQltXAohubvWbB +OPqhowKBgB3t2oUSFJI8fSNQnQNkcespJTddr0oLEwgsIl4Q7rdFHLr+/c46svjJ +kPMtpfxDQvkgK2aWpS4OP0E2vSU/IfMEDmlypfKe2SaTtFehZSUwR4R1/ZhSL3iS +iMwJYgm98P27s4TEMdhlPNVJrj1FrD+4VrgpOsoM20EkZnTvel9s +-----END RSA PRIVATE KEY-----""" + +ENCRYPTED_PKCS8_CRT_KEY_PASSPHRASE = 'test_passphrase' + +ENCRYPTED_PKCS8_CRT_KEY = b"""-----BEGIN ENCRYPTED PRIVATE KEY----- +MIIE6TAbBgkqhkiG9w0BBQMwDgQIT04zko6pmJICAggABIIEyL/79sqzTQ7BsEjY +ao2Uhh3//mpNJfCDhjSZOmWL7s4+161cEqpxrfxo4bHH8fkZ60VZUQP8CjwwQUhP +4iwpv2bYbQwzlttZwTC6s28wh7FRtgVoVPTwvXJa6fl2zAjLtsjwLZ/556ez9xIJ +67hxkIK2EzGQaeEKI1+vVF5EKsgKiPEmgspOBxRPoVWTx49NooiakGnwaBoDyTob +8FMr8mF1EheNQ4kl1bPrl+csD7PPnfbWUdNVvMljEhS3cYamQDPEWyAzvaIr0rHh +/6h80L/G2+0fensrTspWJcjX+XDBwQPk+YMic0TJ3KvkC7p2iNJhjNrjhQ+APZWq +xYrjfcmdK0RaaoqN+1zeE1P2kWIJx9CQZVMeGhVzzcmPwJPDnJFpkU+8cgTWnUr/ +Fh8YtDoDzLiAUcmV1Kk7LYtYPHuU8epuz5PYm49TbWzdS7PX5wqFAFmrVt5jysm4 +D/Ox0r4KV1t7D/1gc1WRIu8oUXkIglCHWNpTyMK0kFPctAf/ua+DUFRE4eSx3rsX +ZKIymdF9v/WF1Ud0tsNeudQbVeXWS6UCR8m/rqe81W4npQm/uqUNla+6yaYUmHlk +tvw/m6pt+jKhn0XIRkMwHrTpIaMVvInMg0xpkRuc7Xj5A7vNnkypZRNZJHgy7WWC +6GpOCWJOltYaNy7tmAkSUHJ6kNjXK5a4fi30HknEaqKjFTQNGvcybulJ3MXUzds0 +MJoTpvQfLzYQbMYZ/XRGND4lgeEbs29nWLPae8D5XlDeZQMin8EukPko8u8+YGbU +eWGOvDc+4/xrWrsq1i6R0uWq+Cyoql8oh0PNBlM04S7GAbu1pOD/tPcq/GNYcv/Q +vJcIz9KA3BNepq7tC8D88ggEvFjTsHKeW/OnuCxKducSna4Mq+GebU52tKjkLjFC +eLG4Vx0BY5xPH3gd7iyuAf7S+08BbinNZWjHLpdmR3vKK5YbLPiGSfcYQdClr6BK +9vNWH4TXmZMV+rWtfSeM/cbhCHwxT5Jx6N0OFAxOblQClWnUD79nGkEgn/GoY/Aj +FPNj8u2U/mJHgFHH3ClidYL9jJUvhGpTixB8nGgMjJ0wvFcp+5OysG3TsjqYkwR6 +RRNBmM+iLEUFTrMZYb+edHvGJsMEMZ0qvjmZDsfDz6ax5M9zH/ORFcGplgIec8kj +I106+dqAVVrv1CrBf2N/pxV0OXVhgl6ECe/Ee1xYC2e2CiEgUnQtedu8ekgPgp73 +tHcAiWMamLPTwXuL7jFtvWaQfkYBmrBdEx54+eZOfH/NgV3o8gbaWNHSxbfbwlXN +MvyJidZGkXU0DJtUUnO5i2S7ftKCdOzrrSA8HDTvxFUhxretYpF3NzPYpYkM7WJX +GM7bTMn37AWYqLZmdYYdjh1ZOH/wsM/3uxGBpyEyy4Urrr1ux7X1P0cL0O2P/72h +GRd499JLrRMrmmtQ4KrN7GCHdctvujhDP8zvmnaEyGVzg88XmDg50ZF3+8DmOOgX +EMZEYHO2Wi2uyFotFtZCuqoOJmGPPeGV8QrsRs82hnL1bcd6REUTWk0KsTt13lvF +WwMJugHFk5NQuse3P4Hh9smQrRrv1dvnpt7s4yKStKolXUaFWcXJvXVaDfR5266Y +p7cuYY1cAyI7gFfl5A== +-----END ENCRYPTED PRIVATE KEY-----""" + +UNENCRYPTED_PKCS8_CRT_KEY = b"""-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCy9Z5mwz1A6bPO +ItbDN64R63Pg5btDf8CkFimRXollLRf6aLdfVd6NMlw6bIO1uUZCzExZGRHJ0Kch +naQBo0485jVzGcPfRM8bJq0d5eoVBCEon1W7xLn7WGU+oz8TOTC+lgIxTWgRGT1r +WYvAoF+cXY3awzZ3cMh6G+PZ+zXP4dZFaqgajCC2m06gushHMUvwfg7oFbrmGxHW +GGBJUFDHzPplHVC60qiFZdp+QL70hY1s1s34D8qeECp/n+x1UU+tm4f140PPJ9bT +GDQBKosUWUz4KmNSxZfVSVYEafX3S+rbTLqzkdwtEkyeagnSgS+y66W2OLvHRK2o +EQumAMiPAgMBAAECggEALcNfoVm19T6OC12fW8aGTfDO7ijxLT6jDZLZSGJLckMI +ybEziz6TLbo+GXaca8EYNYa202X0xY4E3zTHtt+C51HbQzbIfvTCnWfThzy/Ema+ +7/tm2iBhiuO86Dqoq4rJa0NDBkaMISs6KJR6h2S7bmoCF3oqGHsP2SH+r0RtK15g +VeyomI2tji6XX5xE4xmkfHrfFjoDM+aS9JSBquSBjivD0TvW3uK3aVpaREpWx36Q +SNrKuK10QSWI0xUUW2sYSCmViSYvEZ/QcuJE4VUwItFb8/XYmleWUU+sFvyuU8vj +DVz+z7JK2iFPQr6jgUteGx/+KfGZ9z1edld0mbST8QKBgQDjsWlXVxoyT5M09+7f +XxUIZGaPCld2aQ1pSJfZS+zfEgpDoomamjeyPRiNASZBXPyzqB5K29TUt0/GCxZl +9sO3fUzpI3DKWHzYAFAXVtnKtwxHEzxseF5uEMhMzV0CfppE6RLVwGl9H00k4UPz +nSWYm8EZH05FUEbK5xMofm5fGQKBgQDJNTTNApbZpTcnuZMIL1yn55IPd+5smlrc +wvld9Dxg0gPg+zFaCU0o9w5MLjJpJ/FWbKRLRYhBRrMKy2zNCy/DL1jIe2mQyr7S +XII++tieZFXTPHnNPXnlXbNwSH+iPrh6h55bmS8lKz/Hq9P6UQ3RMMQnaAoRom4G +3okbo/Th5wKBgFI3DUJsQRlJgm3t2GLvgji1iBK+OTaYWC9b9viPYchqSyqA1g7t +AI9rYCIJ+DZBzT1Rxx5gokjsp/Fqr3vBzonCxLKqCWAG841LD6Bdy+ZWkihX2cmQ +7QfxJ2IUBTGRZULcpeBoya5UTxayAEdGxvyDwQXBOBHvGqDJLzG/9TqBAoGBAJ1S +v91ImADHoQ+rl16AAde7RkZYBRHfwwn4XZjr/kXKEIRahGUc/rr4KbLBMGNuetKT +kgbU8Evx5s6a7pCInajlHWtsq8ct7/R1ptVtt/BpDX5kO+yRk/0WvdYeqcWHeyE7 +Z0PBy84pOe562P1RYRdfJRCW1cCiG5u9ZsE4+qGjAoGAHe3ahRIUkjx9I1CdA2Rx +6yklN12vSgsTCCwiXhDut0Ucuv79zjqy+MmQ8y2l/ENC+SArZpalLg4/QTa9JT8h +8wQOaXKl8p7ZJpO0V6FlJTBHhHX9mFIveJKIzAliCb3w/buzhMQx2GU81UmuPUWs +P7hWuCk6ygzbQSRmdO96X2w= +-----END PRIVATE KEY-----""" + +EXPECTED_IMD_TEST_SUBJS = ["IMD3", "IMD2", "IMD1"] + +TEST_X509_IMDS = b"""Junk +-----BEGIN CERTIFICATE----- +MIIBhDCCAS6gAwIBAgIGAUo7hO/eMA0GCSqGSIb3DQEBCwUAMA8xDTALBgNVBAMT +BElNRDIwHhcNMTQxMjExMjI0MjU1WhcNMjUxMTIzMjI0MjU1WjAPMQ0wCwYDVQQD +EwRJTUQzMFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKHIPXo2pfD5dpnpVDVz4n43 +zn3VYsjz/mgOZU0WIWjPA97mvulb7mwb4/LB4ijOMzHj9XfwP75GiOFxYFs8O80C +AwEAAaNwMG4wDwYDVR0TAQH/BAUwAwEB/zA8BgNVHSMENTAzgBS6rfnABCO3oHEz +NUUtov2hfXzfVaETpBEwDzENMAsGA1UEAxMESU1EMYIGAUo7hO/DMB0GA1UdDgQW +BBRiLW10LVJiFO/JOLsQFev0ToAcpzANBgkqhkiG9w0BAQsFAANBABtdF+89WuDi +TC0FqCocb7PWdTucaItD9Zn55G8KMd93eXrOE/FQDf1ScC+7j0jIHXjhnyu6k3NV +8el/x5gUHlc= +-----END CERTIFICATE----- +Junk should be ignored by x509 splitter +-----BEGIN CERTIFICATE----- +MIIBhDCCAS6gAwIBAgIGAUo7hO/DMA0GCSqGSIb3DQEBCwUAMA8xDTALBgNVBAMT +BElNRDEwHhcNMTQxMjExMjI0MjU1WhcNMjUxMTIzMjI0MjU1WjAPMQ0wCwYDVQQD +EwRJTUQyMFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAJYHqnsisVKTlwVaCSa2wdrv +CeJJzqpEVV0RVgAAF6FXjX2Tioii+HkXMR9zFgpE1w4yD7iu9JDb8yTdNh+NxysC +AwEAAaNwMG4wDwYDVR0TAQH/BAUwAwEB/zA8BgNVHSMENTAzgBQt3KvN8ncGj4/s +if1+wdvIMCoiE6ETpBEwDzENMAsGA1UEAxMEcm9vdIIGAUo7hO+mMB0GA1UdDgQW +BBS6rfnABCO3oHEzNUUtov2hfXzfVTANBgkqhkiG9w0BAQsFAANBAIlJODvtmpok +eoRPOb81MFwPTTGaIqafebVWfBlR0lmW8IwLhsOUdsQqSzoeypS3SJUBpYT1Uu2v +zEDOmgdMsBY= +-----END CERTIFICATE----- +Junk should be thrown out like junk +-----BEGIN CERTIFICATE----- +MIIBfzCCASmgAwIBAgIGAUo7hO+mMA0GCSqGSIb3DQEBCwUAMA8xDTALBgNVBAMT +BHJvb3QwHhcNMTQxMjExMjI0MjU1WhcNMjUxMTIzMjI0MjU1WjAPMQ0wCwYDVQQD +EwRJTUQxMFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAI+tSJxr60ogwXFmgqbLMW7K +3fkQnh9sZBi7Qo6AzUnfe/AhXoisib651fOxKXCbp57IgzLTv7O9ygq3I+5fQqsC +AwEAAaNrMGkwDwYDVR0TAQH/BAUwAwEB/zA3BgNVHSMEMDAugBR73ZKSpjbsz9tZ +URkvFwpIO7gB4KETpBEwDzENMAsGA1UEAxMEcm9vdIIBATAdBgNVHQ4EFgQULdyr +zfJ3Bo+P7In9fsHbyDAqIhMwDQYJKoZIhvcNAQELBQADQQBenkZ2k7RgZqgj+dxA +D7BF8MN1oUAOpyYqAjkGddSEuMyNmwtHKZI1dyQ0gBIQdiU9yAG2oTbUIK4msbBV +uJIQ +-----END CERTIFICATE-----""" + +PKCS12_BUNDLE = pkg_resources.resource_string( + 'octavia.tests.unit.common.sample_configs', 'sample_pkcs12.p12') + +X509_CA_CERT_CN = 'ca.example.org' + +X509_CA_CERT_SHA1 = '3d52837151662dbe7c01a97fad0aab5f61f78280' + +X509_CA_CERT = b"""-----BEGIN CERTIFICATE----- +MIIFoDCCA4igAwIBAgIJAPBfmRtfTNF2MA0GCSqGSIb3DQEBCwUAMF0xCzAJBgNV +BAYTAlVTMQ8wDQYDVQQIDAZPcmVnb24xEjAQBgNVBAoMCU9wZW5TdGFjazEQMA4G +A1UECwwHT2N0YXZpYTEXMBUGA1UEAwwOY2EuZXhhbXBsZS5vcmcwHhcNMTkwMjE0 +MDQ1MjQwWhcNMjkwMjExMDQ1MjQwWjBdMQswCQYDVQQGEwJVUzEPMA0GA1UECAwG +T3JlZ29uMRIwEAYDVQQKDAlPcGVuU3RhY2sxEDAOBgNVBAsMB09jdGF2aWExFzAV +BgNVBAMMDmNhLmV4YW1wbGUub3JnMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC +CgKCAgEAshn5CRt949+edmECCpaQtrCnjiA8KFsNCb9Dv70LkK9XbHtFkJuUgJR1 +VE1OhGK057k/z1gEYUIFxw8s9wKMaAxta7CwxkpJR8oMa60nx4hbNLF1Q5xO0P40 +YW/fSxuBmztI8EtYGUCGDLpktUTrewWu68nnWV2Wyx5B69Z14qrDGk7b6VH2atWD +qJwDGrPkekNSUiE2Z/cCcTDH2t1jqtlGsiS8tDDH4h35ywm6fY3V/11hHT76dxDz +LhrLa2aVXeVtqGMTOHkXOFEwcQNfh78z7qBOZy9O8bCCepCmJ56ff9E3kXd1jam2 +6TiZikOVWhDOv668IosYzCU2gllKYG++7PITb+12VaVqJwWf8G9rFQ0xptZuXmHE +BTFCzxWxK8vSs85aBYWFd8eLmWrEZyEk1JfD7jU4OZm9BK3qoRvfwDwzPnmZIpCt +YPhYVi5F1W/w3Iw1mTqxkEMuy6mlMn14nKmA2seSAkPSJ+b5C92dqhwN1cvgUVhL +bIl3Yurj3ayvT+vRCYadQZJif+e/dxUrcRZ7oPpV23QxVgEZ+Yd+++3XA09LSdhQ +lLl/3/I+MNvCxHEKx4imCGLAmMOFL7u9Af/delFRVKDXferYb/HIxkiJGJco96J5 +RvYsXGr2wTCQcCRZjv1+LlAlKUAgJMeVkszKQ56pCI7tvyB2gG8CAwEAAaNjMGEw +HQYDVR0OBBYEFN/4bLQKWNMwoLzQ2du9NT33x7+DMB8GA1UdIwQYMBaAFN/4bLQK +WNMwoLzQ2du9NT33x7+DMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGG +MA0GCSqGSIb3DQEBCwUAA4ICAQB2nU0y43nVDKgL1PPIdVDnYa2vjH+DBkSAVaTv +73OKdimh4Kzy0YYlrKzeNiE2k4Q/nUjTbAN13DvQbjRFQZx17L2Gckv+cMFyB7yb +vlsBeySarJKhYeKhlLrd20Qn7GiyHGkXUshnSVQm9/HFlegoMMjQyExGsA1PYU6W +mycNYv5yWTLgbaFNfIYjL6AcIVtxMMZoD4XgpVpETwNIoble+B3sYQ05dTYxMyT0 +aHjafUPedasqXFoo5TJCJ7Wcq92dBwUXpgkHsf3PPKy8VVukWUaCP9ECAxHLmEPj +0tyElkvy55lauzVing7F/uRF6DIlRz6fH0y92qFJ5/t46L9C3V23+zIF80CJeZ21 +/goal0NlAyjhI4zfpwwAUeqnAElncNhFcmTWHLyTGQyA4rYHDl5fZIhk6MFYdLwi +ml96m+T1z8iPqmrTtd6P3SVmEkRvSt8L7ItL82VcDELUCXJoSKEm5im84yEiPdUs +emQtJbioTM4+Vze32U6MSznelKiK3dkNPnNiKA6xsjxNC+Hp2LzcANg3/SUUC9ea +pDEMmP7TJMJ3dG63RtAzQiGfRO18BIVOrRUfQpR32FkrYd9wCE02cnv0QZzY9NYt +6hAlAa6Motve8UFewoO4pNknj3MBEN+64wDzHaP6VPysNJwrAlgaHfGDU6xJffAd +uCWDmw== +-----END CERTIFICATE-----""" + +X509_CA_CRL = b"""-----BEGIN X509 CRL----- +MIIC7zCB2AIBATANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJVUzEPMA0GA1UE +CAwGT3JlZ29uMRIwEAYDVQQKDAlPcGVuU3RhY2sxEDAOBgNVBAsMB09jdGF2aWEx +FzAVBgNVBAMMDmNhLmV4YW1wbGUub3JnFw0xOTAyMTkwMjAxNTlaFw0xOTAzMjEw +MjAxNTlaMBUwEwICEAAXDTE5MDIxOTAyMDAyMlqgMDAuMB8GA1UdIwQYMBaAFN/4 +bLQKWNMwoLzQ2du9NT33x7+DMAsGA1UdFAQEAgIQADANBgkqhkiG9w0BAQsFAAOC +AgEAcPtYSLEkJwvqaAfMGXwI2uTTKWURqtwfcBMYdVF1u2xsBsrKR6ogpBjzc1sX +A5WN9Tz5TXPVd38DTEGlCGLQ7wZ8wwYAR2sArHjw/zcsOJcFVTWtpX+2UAbpqis9 +rBq7K6TF2m1fYb0RJg0AUbja/wfpghoEjfFx8FjIa8WAqqazyWR9vslm7kSoEgr+ +MDV7agVK+h1n68hdLA9osUyPaAobus5FcVlXePPp5Ab8/vx1b2/Y+VXHaJXTZCin +FLQaxaH0PsMCKN/T52GPMRKa2Cc6IEaDFgE1ZlA8nP5t2tA7MFORI8dix6jIzBJD +W2CRf1Oxkrd3iqs1IljtlKHKMUTS67lfA9EwKlt8dR+KwH/WT23LSIoC9NnS3DP+ +aT3t52soCpjXbfl8fgs62bome1/88BoNIa2T1Mj6F0aPvepLsFB/UrXWhADFj+DX +7WclP62BNBCTlUNvMF0eC9o7r5xeazo53KH1KI62qlFrz5MbRCG8g0JtTFqsMJld +phYuPfZekoNbsOIPDTiPFniuP2saOF4TSRCW4KnpgblRkds6c8X+1ExdlSo5GjNa +PftOKlYtE7T7Kw4CI9+O2H38IUOYjDt/c2twy954K4pKe4x9Ud8mImpS/oEzOsoz +/Mn++bjO55LdaAUKQ3wa8LZ5WFB+Gs6b2kmBfzGarWEiX64= +-----END X509 CRL-----""" + +# An invalid certificate due to no subject and no subjectAltName +NOCN_NOSUBALT_CRT = b"""-----BEGIN CERTIFICATE----- +MIIE4zCCAsugAwIBAgIUTo7POpWDLecy0B7fY2OAbLztmswwDQYJKoZIhvcNAQEL +BQAwADAgFw0yMzExMjIyMjE4MzBaGA8yMTIzMTAyOTIyMTgzMFowADCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBAPClqkTqRyjlp+LXE4oElYGvg7y710yZ +pR96TNqgugXxNLmIgzx2A3wWJ77z6qn3XoTFEXNnT6f4WrVr1Eh5/Zd1ioyj1r0G +hIuEWMkm42UsTv+bId6BkXrr4wTgXgU+ss82dmRsYArV1b+c+89oYlEjQorhQ6eT +2aWnt1XJbtpgRYCy5DsBKg1Iq63QRXp5svEr4iX+jAiDCQnBBLhrkfMUf8zuMCev +Ij5119OGY5ihLuopIZi6OurA0fyN9e2MFlnYmWcxSZu49+6yBnXGmhmev3qzWj1+ +9DA50Pqu+NS9rVpYBNhhKuBTBxaTeZPDAl67DC2Mc8TFI1OfpiOwb+w/ewRYznry +ZceASFovPFsAlUddwu/94sxgUSCmSE81Op+VlXS0LRgg8o/OZHp/eFsG2NM0OGAH +v2uJly4OTPTd/kT50zViX3wJlRYIH+4szSjpbNXE0aF+cqQ56PBrGEe6j+SaGZEV +6k4N9WMHNipffkq10N2d6fkRQjAD9B7gHOB6AAQ1mxoZtgchCKL7E8FuA803Yx8B +a7h9J65SJq9nbr0z4eTscFZPulW8wMZT/ZeooQJJWqvA+g2FZf0dExk46gqU3F2F +IRMvfGzSbIQF7bp/Yj4fLMUwLVaYv6NNdzhI+/eC0wVDWwbQ2rZkkvcvysSteGT4 +IDuFKuIWt4UnAgMBAAGjUzBRMB0GA1UdDgQWBBSEDhho9+R5JhsAZlQ0wU4Rjbqn +OjAfBgNVHSMEGDAWgBSEDhho9+R5JhsAZlQ0wU4RjbqnOjAPBgNVHRMBAf8EBTAD +AQH/MA0GCSqGSIb3DQEBCwUAA4ICAQAZ8E7l2H56z08yJiAa5DFmT8jmBHUCoJlM +HiZSn04mtzZEfho/21Zdnb2Pa2SDrRkVXmrO+DebO5sK1Kn/EFC9P3SAOeZ3LB+m +bJUX4WGEJ+7fv9uVRwSRfF21Lxo9QFgSVfQlQAhmXcKCE/8VtKB34oOZRhR8tAxH +I4VvHUPyCT8ZwNhofP2TYHEjRi/4fsXueBH4kBHDy0/pyHMy1b5crWQAjlOhFXhW ++qauSXkbIXNXd+wX23UF2uQ8YH819V7cHAidx9ikwn6HC5hxXjzMjViDwI451V6Q +eAgrVuKTgx6cdnd2mgra8k7Bd2S+uTxwcrzVVzNfF+D2Al43xgeFF02M8Wp6ZDsh +3/mJ7NOJGTJbXLRP+u73PEh1mGGU8H2QoGvaRO7R599sbmU4LedWX/VJc2GXojzF +ibPWaMkKtX31QiOeNiLTMSkUWiyDTvzFW2ErqyzARv/yYFcEixEFl1GV8Bqb+ujj +cxO5/y9cK6aM+qPb/FrXivXQsNArrpE3T1C54RvhUWOi+kyCiV/mDIG+oOp7sfZ5 +tBPenwWB2/LGS4rS67jZdwyIC5UbVySaVxtqJrdQXTRNjGfj2m963CHbiaQLSoSF +2Zh2e8W4ixo6k6mhih2YjZVtpHrXyzNEtHT9HpPHDeElVcWteIceZMI2Ah0C6Ggj +uTbEBYW85Q== +-----END CERTIFICATE-----""" + +# A certificate with no subject but with Subject Alternative Name +NOCN_SUBALT_CRT = b"""-----BEGIN CERTIFICATE----- +MIIFAjCCAuqgAwIBAgIUNjJqSdaJ9FsivfRHbXpdmcZgJR4wDQYJKoZIhvcNAQEL +BQAwADAgFw0yMzExMzAyMTQyNTVaGA8yMTIzMTEwNjIxNDI1NVowADCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBAKA8+0iJzx51kTufmIpxGCM/KUFWdJ0U +MmOPN1NmySNaj6nGI/Ix6m13A5SaezhbRlJvEwN7Hqg+tl+fqu0RgtQOXfBDMiJm ++kAl0CQiOH7XU41P6fyk/QL8WF3VVGBtawTWn3x9Jw7Itd/zFr+aepQOj5LIwcx1 +ncHXreWdMLqDa7PpW1Ru6BW0FKVxX6WYQr2PI08nEIxu6DzLcaLHktRyNYg7r9X9 +a0tLZcp5MCBG3h3EtVgUkL9qw8q6acJpDGBF7ssRTNDf3QUSg0jrfzkD9WJCi631 +tefdAkDNIZXGZggbWsDGPseX4JG9p7WGzPx5QY2DkMqDJqi6FoS35tT+WNcY0n9V +oBQXtXFV/AqOC070NwrhxsNA3cBbpRqEQYJsIDaXq0cmFR4aoDWk4OXqs7I+dpyi +MFeRHEU7h4DpwzaOmOyaSmzsZqEMG2lsdJZmC+fIFkyKtP0BQv/movWY25oJSpF5 +4Q/PdwKn6PFO2bRVSLStlrhpuqXw2+CzlQT6YCAz+ajqDnn/w8NIrT6y+DiFd+kt +WCed/o4ZBzsxOexRph+t0bdkTmR8PNpnHwcxzVN33gCSc6Q5DW1/M2V8VGYqnPd/ +taEaMlHm/wQ3y2/aH/tkyq85PM5tqCbUscD4TUZ7R6kb0k83Ak2iZOM5RHb4zc4p +mreNKLPfgrQ7AgMBAAGjcjBwMB0GA1UdDgQWBBT6/yXwr+5BhORB3cUkrrSgnreq +NTAfBgNVHSMEGDAWgBT6/yXwr+5BhORB3cUkrrSgnreqNTAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdEQEB/wQTMBGCD3d3dy5leGFtcGxlLmNvbTANBgkqhkiG9w0BAQsF +AAOCAgEAjxrBZ3v6wK7oZWvrzFV+aCs+KkoUkK0Y61TM4SCbWIT8oinN68nweha5 +p48Jp+hSBHEsj9h0opHezihduKh5IVM7KtbcXn1GSeN2hmyAAPm/MbxyD+l+UEfB +G/behQcsYdVXXog7nwD2NXINvra8KGPqA7n/BnQ7RsxBXHVa9+IHF2L4LpbcvG7G +Ci/jmLSBk7Gi/75TsFphHAhfomovfnnNykfJ0u99ew14MxVmRWbZ+rbpMsUL/AhV +h8VujkfUs1hFbdxePTVyHwplqH65yjzzQ18q8CX7kMGi9sz2k8xJS04Nz0x1l7xQ +JDuhFMDDrcyb7vAqG7BHQ9zXWJ3IkTg9WrbfkOyTqQsJeInToWQybmr/7lY3PmC2 +e/X0zNABF+ypX29RrKzWL+KfpbslysZIEPLEW28qAh3KOyml1du+lbDSNtcHxQcT +bnvz2rQlAYE70Ds3znLLuMXbq8GtS+h8EYH1jxcjZD9DAPhxi37v8QSY/ABIBGE2 +lfbhbzZ5OWQLMA0L1tbTg7bG5JGoi/GmPl4oA+Dbz3+8Yd/v8XJUzQgI221tx+T+ +isog5o96m62pW6hd1R+eZjVAOVMT/OxecJ9eIVva8EiZwu1Ja9arBkuhIBVK2htm +PVi6J1iFUrPZG+QrK/ZePo4xE06Lm31dr8pxdZ7Y860owwIuHfA= +-----END CERTIFICATE-----""" + +NOCN_SUBALT_KEY = b"""-----BEGIN PRIVATE KEY----- +MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQCgPPtIic8edZE7 +n5iKcRgjPylBVnSdFDJjjzdTZskjWo+pxiPyMeptdwOUmns4W0ZSbxMDex6oPrZf +n6rtEYLUDl3wQzIiZvpAJdAkIjh+11ONT+n8pP0C/Fhd1VRgbWsE1p98fScOyLXf +8xa/mnqUDo+SyMHMdZ3B163lnTC6g2uz6VtUbugVtBSlcV+lmEK9jyNPJxCMbug8 +y3Gix5LUcjWIO6/V/WtLS2XKeTAgRt4dxLVYFJC/asPKumnCaQxgRe7LEUzQ390F +EoNI6385A/ViQout9bXn3QJAzSGVxmYIG1rAxj7Hl+CRvae1hsz8eUGNg5DKgyao +uhaEt+bU/ljXGNJ/VaAUF7VxVfwKjgtO9DcK4cbDQN3AW6UahEGCbCA2l6tHJhUe +GqA1pODl6rOyPnacojBXkRxFO4eA6cM2jpjsmkps7GahDBtpbHSWZgvnyBZMirT9 +AUL/5qL1mNuaCUqReeEPz3cCp+jxTtm0VUi0rZa4abql8Nvgs5UE+mAgM/mo6g55 +/8PDSK0+svg4hXfpLVgnnf6OGQc7MTnsUaYfrdG3ZE5kfDzaZx8HMc1Td94AknOk +OQ1tfzNlfFRmKpz3f7WhGjJR5v8EN8tv2h/7ZMqvOTzObagm1LHA+E1Ge0epG9JP +NwJNomTjOUR2+M3OKZq3jSiz34K0OwIDAQABAoICABC+7r/g7w1O2hOyFR36vbwJ +QMV8RImZ774p3G1R45lXQIZMl7sa7lXsRyqDjncQSuQYiZMmjcilbSfHJvTJjLOe +oMCYNSgVPPfxO7RbAy52UFwHSvvFPk/OkWmU/tFo/fMuftJive80mJVD8U+q1D6e +2vBLHL3CWO9GG/1QFSSY0Wum6o2DXavO+w1jMMy8gdUPnXALNBaJDKo11LVfR//9 +w4xuOG0To9/ljEjBq37kCRhxU0ZWN95ZSQbpvl273rg89rywHSgDDTUXfzLisZQC +zuUq8TAH6q/FkBO3nFfruQQF39EfprXzMFvqxxkYclm8TlZ8tmgDlsmxUOMj2PKl +H9kWDC5YkynfkxltKgiEJ9Kc3pZnfaScABnz0GySsZN71bUbr7fBqwH0LhbZiQqa +b9pWcbyKuGFJ56gVsokVHcpKnKmKHedtmL33oJzI3iWYZls/mPejmkwIWt1i3F7c +ZnhDJJp3gWgzZzSyV5OjZ05SIrM9er9r+WqS75ns7vKEzhgzpHdZuUR2jNNVu/EA +rCnsebUtemr0tDYxhI5BcPgj3fzq02u7plJUFIwlPrpMxZ8VBJgoSwT7Di5qpHnt +LmiGoqRM+vVXiWshops1I7q7zLCgvP+Difi4KNjap/lBsj7hiB7alZTrMVVAXiBr +Ia++3L38ga5DJ+SHDzjBAoIBAQDNUG4URQD/j0E3pS4zn4wezSp0wOTKKIw2Z6oU +02reZq9uFLIt+/74DVy3NZm3tBgeSakYUZeDB8zpog3mGpkPAHpwObB/fPbMYmst +cCnXYDf9Uvb7k287a0GIbCOXwkHSrgRwznAZ4EQp6E0nZSoLbyZiC+uhYEVZgQQo +JswsjKCSaL7o/4XXQOi6Mdsd4BX7aVVKjYrQZ8TkkCsMYFdQMSL1fB8DW4Q+Ixco +6BGXPoaav/2XOb0HGBmrXX/yqllA8rw0U7RNLgsE7gZIlltGeTsQMeo/+w5+LJKt +HOhhEUHITJkRZ7P/S8OdXXoVCNiUzCxGy/LrHW/AWu0t1WWbAoIBAQDHy9Allaod +WDxdbe5G5ke03WFcPoVAxOWu0mloaFdbd7Ec39y4vr1hxRZz+SEUdouCie1nVB3P +sj2lPJ44qKS8triqNCuEalpMHaTBdIyjItqh1l66fLA1/FYxAM7cxcz5rBVK2zvf +KrT3LNmzVpbltl3nPQhvAKEV8zEdSVze6Z0K6QbZP8WfPtCiQYMAjeNu48AIp/+t +pxJbkcmWLIYixfiJbHfe0LUu/P3rk0WDCHnheVzOTSE8XzGqnIxyv6w4rYOl9IeT +SnYublICJHOTp6gKuiIieGD7TC14DB8vYbSc0+opIvYYItcS//laLOD+eLUgZx5K +Wb4ubbosnyXhAoIBAFGzQsqgFuCbQemBupviTmDnZZCmPaTQc9Mmd0DoTGuJ0x9r +7udrkq9kqdNh6fR3Hu3WhApgVXlXvkvuJ7e8N9IHb7F+02Q39wGn3FxteMjyyfTt +ccj0h1vOt3oxBgzayVSr2KqHC4bQfm9quGEH2a5JIa38blx+MbqHI39SyQalQzRf +qDCRldHtS27kbfw6cqTj6oPLRUTfNjN5xxeassP/eZjUNocggMQ1NH8bsfxMbkXg +RmpKGJVdGsHdaA/Jh9DXhtsPv/zCaLIiga+a3WFy1nUAV+Xz4nWFCS0IBtSxiErL +aFHLwY3CuWnCi9UY+w5jHO9jMxwqT5Ds3drSQycCggEBALoewFEy4d0iRGGYtb6w +aJ4xGLBwwXt7sKcx9eXARZi8oG5QkHI9pXg9vFPfAZTpdb7uNAzszDSeS1TxakdH +uubdpJtRrDRXSrTbbI6Wvyh9oIPgijBZVWGFJtnRceMyFGeFifRI1LZpN1mHG2o4 +QKvPPhzau0+Em4syGE+69tvlblkqiSm6gaN+RabRNnM+ul6jpVGrBsBDAhPxdIQE +CBS+rW9/bw9PB2m1XemlML0HGVsUzoKUUWDHISJZYXDH42yNHzVq3R014XARby31 +vQEQzrbnfEL2NwoChdzuFeLytujddKZLnksPsaFOeYAqjJIh6kE8Lnh+r27a4vMM +cqECggEAAx1DVI43AMBfSbAs5C41vjRdjMrZtxfKIpFjj1whGj/JzLKdMdqqH+Ai ++R6NI7IB88pGHlCOmdEpfbr4Cq1ZnizA3yLV9sluMz1bpHlIDsCIp+1VkQYKfsEv +upZy82MtfGtG3BSLn+GCTzLJcTN6KINg98Xivp/WsRAEvwT/w1o4iJMgzKmTET2I +UGJfZcF0WeSVo34FNArfXyfXPvPV7mi08Z6fQuUnFvH9tGZs5Y9mUUSgXXEDSjKY +ZHliqmDNGub7rMy6/0wDOWiS4pi/w8FeCyBvbx23rj6i+FLO6GK+5B7TaCxjOVbk +SYVTfCHpvJIgjRkRMP2yZCk3g6T4XA== +-----END PRIVATE KEY-----""" diff --git a/octavia/tests/common/sample_data_models.py b/octavia/tests/common/sample_data_models.py new file mode 100644 index 0000000000..6e7407465a --- /dev/null +++ b/octavia/tests/common/sample_data_models.py @@ -0,0 +1,696 @@ +# Copyright 2018 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import datetime + +from octavia_lib.api.drivers import data_models as driver_dm +from octavia_lib.common import constants as lib_consts +from oslo_utils import uuidutils + +from octavia.common import constants +from octavia.common import data_models + + +class SampleDriverDataModels: + + def __init__(self): + self.project_id = uuidutils.generate_uuid() + self.lb_id = uuidutils.generate_uuid() + self.ip_address = '192.0.2.30' + self.ip_address2 = '192.0.2.31' + self.port_id = uuidutils.generate_uuid() + self.network_id = uuidutils.generate_uuid() + self.subnet_id = uuidutils.generate_uuid() + self.qos_policy_id = uuidutils.generate_uuid() + self.lb_name = uuidutils.generate_uuid() + self.lb_description = uuidutils.generate_uuid() + self.flavor_id = uuidutils.generate_uuid() + self.flavor_profile_id = uuidutils.generate_uuid() + + self.listener1_id = uuidutils.generate_uuid() + self.listener2_id = uuidutils.generate_uuid() + self.default_tls_container_ref = uuidutils.generate_uuid() + self.sni_container_ref_1 = uuidutils.generate_uuid() + self.sni_container_ref_2 = uuidutils.generate_uuid() + self.client_ca_tls_certificate_ref = uuidutils.generate_uuid() + self.client_crl_container_ref = uuidutils.generate_uuid() + self.pool_sni_container_ref = uuidutils.generate_uuid() + self.pool_ca_container_ref = uuidutils.generate_uuid() + self.pool_crl_container_ref = uuidutils.generate_uuid() + + self.vip_sg_ids = [uuidutils.generate_uuid(), + uuidutils.generate_uuid()] + + self.pool1_id = uuidutils.generate_uuid() + self.pool2_id = uuidutils.generate_uuid() + + self.hm1_id = uuidutils.generate_uuid() + self.hm2_id = uuidutils.generate_uuid() + + self.member1_id = uuidutils.generate_uuid() + self.member2_id = uuidutils.generate_uuid() + self.member3_id = uuidutils.generate_uuid() + self.member4_id = uuidutils.generate_uuid() + + self.l7policy1_id = uuidutils.generate_uuid() + self.l7policy2_id = uuidutils.generate_uuid() + + self.l7rule1_id = uuidutils.generate_uuid() + self.l7rule2_id = uuidutils.generate_uuid() + + self.created_at = datetime.datetime.now() + self.updated_at = (datetime.datetime.now() + + datetime.timedelta(minutes=1)) + + self._common_test_dict = { + lib_consts.PROVISIONING_STATUS: constants.ACTIVE, + lib_consts.OPERATING_STATUS: constants.ONLINE, + lib_consts.PROJECT_ID: self.project_id, + constants.CREATED_AT: self.created_at, + constants.UPDATED_AT: self.updated_at, + constants.ENABLED: True} + + # Setup Health Monitors + self.test_hm1_dict = { + lib_consts.ID: self.hm1_id, + lib_consts.TYPE: constants.HEALTH_MONITOR_PING, + lib_consts.DELAY: 1, lib_consts.TIMEOUT: 3, + lib_consts.FALL_THRESHOLD: 1, lib_consts.RISE_THRESHOLD: 2, + lib_consts.HTTP_METHOD: lib_consts.HEALTH_MONITOR_HTTP_METHOD_GET, + lib_consts.URL_PATH: '/', lib_consts.EXPECTED_CODES: '200', + lib_consts.NAME: 'hm1', lib_consts.POOL_ID: self.pool1_id, + lib_consts.HTTP_VERSION: 1.0, lib_consts.DOMAIN_NAME: None, + lib_consts.PROJECT_ID: self.project_id} + + self.test_hm1_dict.update(self._common_test_dict) + + self.test_hm2_dict = copy.deepcopy(self.test_hm1_dict) + self.test_hm2_dict[lib_consts.ID] = self.hm2_id + self.test_hm2_dict[lib_consts.NAME] = 'hm2' + self.test_hm2_dict.update( + {lib_consts.HTTP_VERSION: 1.1, + lib_consts.DOMAIN_NAME: 'testdomainname.com'}) + + self.db_hm1 = data_models.HealthMonitor(**self.test_hm1_dict) + self.db_hm2 = data_models.HealthMonitor(**self.test_hm2_dict) + + self.provider_hm1_dict = { + lib_consts.ADMIN_STATE_UP: True, + lib_consts.DELAY: 1, lib_consts.EXPECTED_CODES: '200', + lib_consts.HEALTHMONITOR_ID: self.hm1_id, + lib_consts.HTTP_METHOD: lib_consts.HEALTH_MONITOR_HTTP_METHOD_GET, + lib_consts.MAX_RETRIES: 2, + lib_consts.MAX_RETRIES_DOWN: 1, + lib_consts.NAME: 'hm1', + lib_consts.POOL_ID: self.pool1_id, + lib_consts.PROJECT_ID: self.project_id, + lib_consts.TIMEOUT: 3, + lib_consts.TYPE: constants.HEALTH_MONITOR_PING, + lib_consts.URL_PATH: '/', + lib_consts.HTTP_VERSION: 1.0, + lib_consts.DOMAIN_NAME: None} + + self.provider_hm2_dict = copy.deepcopy(self.provider_hm1_dict) + self.provider_hm2_dict[lib_consts.HEALTHMONITOR_ID] = self.hm2_id + self.provider_hm2_dict[lib_consts.NAME] = 'hm2' + self.provider_hm2_dict.update( + {lib_consts.HTTP_VERSION: 1.1, + lib_consts.DOMAIN_NAME: 'testdomainname.com'}) + + self.provider_hm1 = driver_dm.HealthMonitor(**self.provider_hm1_dict) + self.provider_hm2 = driver_dm.HealthMonitor(**self.provider_hm2_dict) + + # Setup Members + self.test_member1_dict = { + lib_consts.ID: self.member1_id, + lib_consts.POOL_ID: self.pool1_id, + constants.IP_ADDRESS: '192.0.2.16', + lib_consts.PROTOCOL_PORT: 80, lib_consts.WEIGHT: 0, + lib_consts.BACKUP: False, + lib_consts.SUBNET_ID: self.subnet_id, + lib_consts.PROJECT_ID: self.project_id, + lib_consts.NAME: 'member1', + lib_consts.OPERATING_STATUS: lib_consts.ONLINE, + lib_consts.PROVISIONING_STATUS: lib_consts.ACTIVE, + constants.ENABLED: True, + constants.CREATED_AT: self.created_at, + constants.UPDATED_AT: self.updated_at, + lib_consts.MONITOR_ADDRESS: '192.0.2.26', + lib_consts.MONITOR_PORT: 81, + lib_consts.VNIC_TYPE: lib_consts.VNIC_TYPE_NORMAL} + + self.test_member1_dict.update(self._common_test_dict) + + self.test_member2_dict = copy.deepcopy(self.test_member1_dict) + self.test_member2_dict[lib_consts.ID] = self.member2_id + self.test_member2_dict[constants.IP_ADDRESS] = '192.0.2.17' + self.test_member2_dict[lib_consts.MONITOR_ADDRESS] = '192.0.2.27' + self.test_member2_dict[lib_consts.NAME] = 'member2' + self.test_member2_dict[ + lib_consts.VNIC_TYPE] = lib_consts.VNIC_TYPE_DIRECT + + self.test_member3_dict = copy.deepcopy(self.test_member1_dict) + self.test_member3_dict[lib_consts.ID] = self.member3_id + self.test_member3_dict[constants.IP_ADDRESS] = '192.0.2.18' + self.test_member3_dict[lib_consts.MONITOR_ADDRESS] = '192.0.2.28' + self.test_member3_dict[lib_consts.NAME] = 'member3' + self.test_member3_dict[lib_consts.POOL_ID] = self.pool2_id + + self.test_member4_dict = copy.deepcopy(self.test_member1_dict) + self.test_member4_dict[lib_consts.ID] = self.member4_id + self.test_member4_dict[constants.IP_ADDRESS] = '192.0.2.19' + self.test_member4_dict[lib_consts.MONITOR_ADDRESS] = '192.0.2.29' + self.test_member4_dict[lib_consts.NAME] = 'member4' + self.test_member4_dict[lib_consts.POOL_ID] = self.pool2_id + + self.test_pool1_members_dict = [self.test_member1_dict, + self.test_member2_dict] + self.test_pool2_members_dict = [self.test_member3_dict, + self.test_member4_dict] + + self.db_member1 = data_models.Member(**self.test_member1_dict) + self.db_member2 = data_models.Member(**self.test_member2_dict) + self.db_member3 = data_models.Member(**self.test_member3_dict) + self.db_member4 = data_models.Member(**self.test_member4_dict) + + self.db_pool1_members = [self.db_member1, self.db_member2] + self.db_pool2_members = [self.db_member3, self.db_member4] + + self.provider_member1_dict = { + lib_consts.ADDRESS: '192.0.2.16', + lib_consts.ADMIN_STATE_UP: True, + lib_consts.MEMBER_ID: self.member1_id, + lib_consts.MONITOR_ADDRESS: '192.0.2.26', + lib_consts.MONITOR_PORT: 81, + lib_consts.NAME: 'member1', + lib_consts.POOL_ID: self.pool1_id, + lib_consts.PROJECT_ID: self.project_id, + lib_consts.PROTOCOL_PORT: 80, + lib_consts.SUBNET_ID: self.subnet_id, + lib_consts.WEIGHT: 0, + lib_consts.BACKUP: False, + lib_consts.VNIC_TYPE: lib_consts.VNIC_TYPE_NORMAL} + + self.provider_member2_dict = copy.deepcopy(self.provider_member1_dict) + self.provider_member2_dict[lib_consts.MEMBER_ID] = self.member2_id + self.provider_member2_dict[lib_consts.ADDRESS] = '192.0.2.17' + self.provider_member2_dict[lib_consts.MONITOR_ADDRESS] = '192.0.2.27' + self.provider_member2_dict[lib_consts.NAME] = 'member2' + self.provider_member2_dict[ + lib_consts.VNIC_TYPE] = lib_consts.VNIC_TYPE_DIRECT + + self.provider_member3_dict = copy.deepcopy(self.provider_member1_dict) + self.provider_member3_dict[lib_consts.MEMBER_ID] = self.member3_id + self.provider_member3_dict[lib_consts.ADDRESS] = '192.0.2.18' + self.provider_member3_dict[lib_consts.MONITOR_ADDRESS] = '192.0.2.28' + self.provider_member3_dict[lib_consts.NAME] = 'member3' + self.provider_member3_dict[lib_consts.POOL_ID] = self.pool2_id + + self.provider_member4_dict = copy.deepcopy(self.provider_member1_dict) + self.provider_member4_dict[lib_consts.MEMBER_ID] = self.member4_id + self.provider_member4_dict[lib_consts.ADDRESS] = '192.0.2.19' + self.provider_member4_dict[lib_consts.MONITOR_ADDRESS] = '192.0.2.29' + self.provider_member4_dict[lib_consts.NAME] = 'member4' + self.provider_member4_dict[lib_consts.POOL_ID] = self.pool2_id + + self.provider_pool1_members_dict = [self.provider_member1_dict, + self.provider_member2_dict] + + self.provider_pool2_members_dict = [self.provider_member3_dict, + self.provider_member4_dict] + + self.provider_member1 = driver_dm.Member(**self.provider_member1_dict) + self.provider_member2 = driver_dm.Member(**self.provider_member2_dict) + self.provider_member3 = driver_dm.Member(**self.provider_member3_dict) + self.provider_member4 = driver_dm.Member(**self.provider_member4_dict) + + self.provider_pool1_members = [self.provider_member1, + self.provider_member2] + self.provider_pool2_members = [self.provider_member3, + self.provider_member4] + + # Setup test pools + self.test_pool1_dict = { + lib_consts.ID: self.pool1_id, + lib_consts.NAME: 'pool1', lib_consts.DESCRIPTION: 'Pool 1', + constants.LOAD_BALANCER_ID: self.lb_id, + lib_consts.PROJECT_ID: self.project_id, + lib_consts.PROTOCOL: lib_consts.PROTOCOL_TCP, + lib_consts.LB_ALGORITHM: lib_consts.LB_ALGORITHM_ROUND_ROBIN, + lib_consts.MEMBERS: self.test_pool1_members_dict, + constants.HEALTH_MONITOR: self.test_hm1_dict, + lib_consts.SESSION_PERSISTENCE: { + lib_consts.TYPE: lib_consts.LB_ALGORITHM_SOURCE_IP}, + lib_consts.LISTENERS: [], + lib_consts.L7POLICIES: [], + constants.TLS_CERTIFICATE_ID: self.pool_sni_container_ref, + constants.CA_TLS_CERTIFICATE_ID: self.pool_ca_container_ref, + constants.CRL_CONTAINER_ID: self.pool_crl_container_ref, + lib_consts.TLS_ENABLED: True, + lib_consts.TLS_CIPHERS: None, + lib_consts.TLS_VERSIONS: None, + lib_consts.ALPN_PROTOCOLS: + constants.AMPHORA_SUPPORTED_ALPN_PROTOCOLS + } + + self.test_pool1_dict.update(self._common_test_dict) + + self.test_pool2_dict = copy.deepcopy(self.test_pool1_dict) + self.test_pool2_dict[lib_consts.ID] = self.pool2_id + self.test_pool2_dict[lib_consts.NAME] = 'pool2' + self.test_pool2_dict[lib_consts.DESCRIPTION] = 'Pool 2' + self.test_pool2_dict[ + lib_consts.MEMBERS] = self.test_pool2_members_dict + del self.test_pool2_dict[constants.TLS_CERTIFICATE_ID] + del self.test_pool2_dict[constants.CA_TLS_CERTIFICATE_ID] + del self.test_pool2_dict[constants.CRL_CONTAINER_ID] + + self.test_pools = [self.test_pool1_dict, self.test_pool2_dict] + + self.db_pool1 = data_models.Pool(**self.test_pool1_dict) + self.db_pool1.health_monitor = self.db_hm1 + self.db_pool1.members = self.db_pool1_members + self.db_pool2 = data_models.Pool(**self.test_pool2_dict) + self.db_pool2.health_monitor = self.db_hm2 + self.db_pool2.members = self.db_pool2_members + + self.test_db_pools = [self.db_pool1, self.db_pool2] + pool_cert = data_models.TLSContainer(certificate='pool cert') + pool_ca_file_content = 'X509 POOL CA CERT FILE' + pool_crl_file_content = 'X509 POOL CRL FILE' + + self.provider_pool1_dict = { + lib_consts.ADMIN_STATE_UP: True, + lib_consts.DESCRIPTION: 'Pool 1', + lib_consts.HEALTHMONITOR: self.provider_hm1_dict, + lib_consts.LB_ALGORITHM: lib_consts.LB_ALGORITHM_ROUND_ROBIN, + lib_consts.LOADBALANCER_ID: self.lb_id, + lib_consts.MEMBERS: self.provider_pool1_members_dict, + lib_consts.NAME: 'pool1', + lib_consts.POOL_ID: self.pool1_id, + lib_consts.PROJECT_ID: self.project_id, + lib_consts.PROTOCOL: lib_consts.PROTOCOL_TCP, + lib_consts.SESSION_PERSISTENCE: { + lib_consts.TYPE: lib_consts.LB_ALGORITHM_SOURCE_IP}, + lib_consts.TLS_CONTAINER_REF: self.pool_sni_container_ref, + lib_consts.TLS_CONTAINER_DATA: pool_cert.to_dict(), + lib_consts.CA_TLS_CONTAINER_REF: self.pool_ca_container_ref, + lib_consts.CA_TLS_CONTAINER_DATA: pool_ca_file_content, + lib_consts.CRL_CONTAINER_REF: self.pool_crl_container_ref, + lib_consts.CRL_CONTAINER_DATA: pool_crl_file_content, + lib_consts.TLS_ENABLED: True, + lib_consts.TLS_CIPHERS: None, + lib_consts.TLS_VERSIONS: None, + lib_consts.ALPN_PROTOCOLS: + constants.AMPHORA_SUPPORTED_ALPN_PROTOCOLS + } + + self.provider_pool2_dict = copy.deepcopy(self.provider_pool1_dict) + self.provider_pool2_dict[lib_consts.POOL_ID] = self.pool2_id + self.provider_pool2_dict[lib_consts.NAME] = 'pool2' + self.provider_pool2_dict[lib_consts.DESCRIPTION] = 'Pool 2' + self.provider_pool2_dict[ + lib_consts.MEMBERS] = self.provider_pool2_members_dict + self.provider_pool2_dict[ + lib_consts.HEALTHMONITOR] = self.provider_hm2_dict + self.provider_pool2_dict[lib_consts.TLS_CONTAINER_REF] = None + del self.provider_pool2_dict[lib_consts.TLS_CONTAINER_DATA] + self.provider_pool2_dict[lib_consts.CA_TLS_CONTAINER_REF] = None + del self.provider_pool2_dict[lib_consts.CA_TLS_CONTAINER_DATA] + self.provider_pool2_dict[lib_consts.CRL_CONTAINER_REF] = None + del self.provider_pool2_dict[lib_consts.CRL_CONTAINER_DATA] + + self.provider_pool1 = driver_dm.Pool(**self.provider_pool1_dict) + self.provider_pool1.members = self.provider_pool1_members + self.provider_pool1.healthmonitor = self.provider_hm1 + self.provider_pool2 = driver_dm.Pool(**self.provider_pool2_dict) + self.provider_pool2.members = self.provider_pool2_members + self.provider_pool2.healthmonitor = self.provider_hm2 + + self.provider_pools = [self.provider_pool1, self.provider_pool2] + + # Setup L7Rules + self.test_l7rule1_dict = { + lib_consts.ID: self.l7rule1_id, + lib_consts.L7POLICY_ID: self.l7policy1_id, + lib_consts.TYPE: lib_consts.L7RULE_TYPE_PATH, + lib_consts.COMPARE_TYPE: lib_consts.L7RULE_COMPARE_TYPE_EQUAL_TO, + lib_consts.KEY: 'fake_key', + lib_consts.VALUE: 'fake_value', + lib_consts.PROJECT_ID: self.project_id, + constants.L7POLICY: None, + lib_consts.INVERT: False} + + self.test_l7rule1_dict.update(self._common_test_dict) + + self.test_l7rule2_dict = copy.deepcopy(self.test_l7rule1_dict) + self.test_l7rule2_dict[lib_consts.ID] = self.l7rule2_id + + self.test_l7rules = [self.test_l7rule1_dict, self.test_l7rule2_dict] + + self.db_l7Rule1 = data_models.L7Rule(**self.test_l7rule1_dict) + self.db_l7Rule2 = data_models.L7Rule(**self.test_l7rule2_dict) + + self.db_l7Rules = [self.db_l7Rule1, self.db_l7Rule2] + + self.provider_l7rule1_dict = { + lib_consts.ADMIN_STATE_UP: True, + lib_consts.COMPARE_TYPE: lib_consts.L7RULE_COMPARE_TYPE_EQUAL_TO, + lib_consts.INVERT: False, + lib_consts.KEY: 'fake_key', + lib_consts.L7POLICY_ID: self.l7policy1_id, + lib_consts.L7RULE_ID: self.l7rule1_id, + lib_consts.TYPE: lib_consts.L7RULE_TYPE_PATH, + lib_consts.PROJECT_ID: self.project_id, + lib_consts.VALUE: 'fake_value'} + + self.provider_l7rule2_dict = copy.deepcopy(self.provider_l7rule1_dict) + self.provider_l7rule2_dict[lib_consts.L7RULE_ID] = self.l7rule2_id + self.provider_l7rules_dicts = [self.provider_l7rule1_dict, + self.provider_l7rule2_dict] + + self.provider_l7rule1 = driver_dm.L7Rule(**self.provider_l7rule1_dict) + self.provider_l7rule2 = driver_dm.L7Rule(**self.provider_l7rule2_dict) + + self.provider_rules = [self.provider_l7rule1, self.provider_l7rule2] + + # Setup L7Policies + self.test_l7policy1_dict = { + lib_consts.ID: self.l7policy1_id, + lib_consts.NAME: 'l7policy_1', + lib_consts.DESCRIPTION: 'L7policy 1', + lib_consts.LISTENER_ID: self.listener1_id, + lib_consts.ACTION: lib_consts.L7POLICY_ACTION_REDIRECT_TO_URL, + lib_consts.REDIRECT_POOL_ID: None, + lib_consts.REDIRECT_URL: 'http://example.com/index.html', + lib_consts.REDIRECT_PREFIX: None, + lib_consts.PROJECT_ID: self.project_id, + lib_consts.POSITION: 1, + constants.LISTENER: None, + constants.REDIRECT_POOL: None, + lib_consts.L7RULES: self.test_l7rules, + lib_consts.REDIRECT_HTTP_CODE: 302} + + self.test_l7policy1_dict.update(self._common_test_dict) + + self.test_l7policy2_dict = copy.deepcopy(self.test_l7policy1_dict) + self.test_l7policy2_dict[lib_consts.ID] = self.l7policy2_id + self.test_l7policy2_dict[lib_consts.NAME] = 'l7policy_2' + self.test_l7policy2_dict[lib_consts.DESCRIPTION] = 'L7policy 2' + + self.test_l7policies = [self.test_l7policy1_dict, + self.test_l7policy2_dict] + + self.db_l7policy1 = data_models.L7Policy(**self.test_l7policy1_dict) + self.db_l7policy2 = data_models.L7Policy(**self.test_l7policy2_dict) + self.db_l7policy1.l7rules = self.db_l7Rules + self.db_l7policy2.l7rules = self.db_l7Rules + + self.db_l7policies = [self.db_l7policy1, self.db_l7policy2] + + self.provider_l7policy1_dict = { + lib_consts.ACTION: lib_consts.L7POLICY_ACTION_REDIRECT_TO_URL, + lib_consts.ADMIN_STATE_UP: True, + lib_consts.DESCRIPTION: 'L7policy 1', + lib_consts.L7POLICY_ID: self.l7policy1_id, + lib_consts.LISTENER_ID: self.listener1_id, + lib_consts.NAME: 'l7policy_1', + lib_consts.POSITION: 1, + lib_consts.PROJECT_ID: self.project_id, + lib_consts.REDIRECT_POOL_ID: None, + lib_consts.REDIRECT_URL: 'http://example.com/index.html', + lib_consts.REDIRECT_PREFIX: None, + lib_consts.RULES: self.provider_l7rules_dicts, + lib_consts.REDIRECT_HTTP_CODE: 302 + } + + self.provider_l7policy2_dict = copy.deepcopy( + self.provider_l7policy1_dict) + self.provider_l7policy2_dict[ + lib_consts.L7POLICY_ID] = self.l7policy2_id + self.provider_l7policy2_dict[lib_consts.NAME] = 'l7policy_2' + self.provider_l7policy2_dict[lib_consts.DESCRIPTION] = 'L7policy 2' + + self.provider_l7policies_dict = [self.provider_l7policy1_dict, + self.provider_l7policy2_dict] + + self.provider_l7policy1 = driver_dm.L7Policy( + **self.provider_l7policy1_dict) + self.provider_l7policy1.rules = self.provider_rules + self.provider_l7policy2 = driver_dm.L7Policy( + **self.provider_l7policy2_dict) + self.provider_l7policy2.rules = self.provider_rules + + self.provider_l7policies = [self.provider_l7policy1, + self.provider_l7policy2] + + # Setup Listeners + self.test_listener1_dict = { + lib_consts.ID: self.listener1_id, + lib_consts.NAME: 'listener_1', + lib_consts.DESCRIPTION: 'Listener 1', + lib_consts.DEFAULT_POOL_ID: self.pool1_id, + constants.LOAD_BALANCER_ID: self.lb_id, + lib_consts.PROJECT_ID: self.project_id, + lib_consts.PROTOCOL: lib_consts.PROTOCOL_TCP, + lib_consts.PROTOCOL_PORT: 90, + lib_consts.CONNECTION_LIMIT: 10000, + constants.TLS_CERTIFICATE_ID: self.default_tls_container_ref, + lib_consts.DEFAULT_POOL: self.test_pool1_dict, + constants.SNI_CONTAINERS: [self.sni_container_ref_1, + self.sni_container_ref_2], + constants.PEER_PORT: 55, + lib_consts.L7POLICIES: self.test_l7policies, + lib_consts.INSERT_HEADERS: {}, + lib_consts.TIMEOUT_CLIENT_DATA: 1000, + lib_consts.TIMEOUT_MEMBER_CONNECT: 2000, + lib_consts.TIMEOUT_MEMBER_DATA: 3000, + lib_consts.TIMEOUT_TCP_INSPECT: 4000, + constants.CLIENT_CA_TLS_CERTIFICATE_ID: + self.client_ca_tls_certificate_ref, + lib_consts.CLIENT_AUTHENTICATION: constants.CLIENT_AUTH_NONE, + constants.CLIENT_CRL_CONTAINER_ID: self.client_crl_container_ref, + lib_consts.ALLOWED_CIDRS: ['192.0.2.0/24', '198.51.100.0/24'], + lib_consts.TLS_CIPHERS: constants.CIPHERS_OWASP_SUITE_B, + lib_consts.TLS_VERSIONS: constants.TLS_VERSIONS_OWASP_SUITE_B, + lib_consts.ALPN_PROTOCOLS: + constants.AMPHORA_SUPPORTED_ALPN_PROTOCOLS, + lib_consts.HSTS_INCLUDE_SUBDOMAINS: False, + lib_consts.HSTS_MAX_AGE: None, + lib_consts.HSTS_PRELOAD: False, + } + + self.test_listener1_dict.update(self._common_test_dict) + + self.test_listener2_dict = copy.deepcopy(self.test_listener1_dict) + self.test_listener2_dict[lib_consts.ID] = self.listener2_id + self.test_listener2_dict[lib_consts.NAME] = 'listener_2' + self.test_listener2_dict[lib_consts.DESCRIPTION] = 'Listener 1' + self.test_listener2_dict[lib_consts.DEFAULT_POOL_ID] = self.pool2_id + self.test_listener2_dict[ + lib_consts.DEFAULT_POOL] = self.test_pool2_dict + self.test_listener2_dict[lib_consts.HSTS_INCLUDE_SUBDOMAINS] = True + self.test_listener2_dict[lib_consts.HSTS_MAX_AGE] = 10 + self.test_listener2_dict[lib_consts.HSTS_PRELOAD] = False + del self.test_listener2_dict[lib_consts.L7POLICIES] + del self.test_listener2_dict[constants.SNI_CONTAINERS] + del self.test_listener2_dict[constants.CLIENT_CA_TLS_CERTIFICATE_ID] + del self.test_listener2_dict[constants.CLIENT_CRL_CONTAINER_ID] + + self.test_listeners = [self.test_listener1_dict, + self.test_listener2_dict] + + self.db_listener1 = data_models.Listener(**self.test_listener1_dict) + self.db_listener2 = data_models.Listener(**self.test_listener2_dict) + self.db_listener1.default_pool = self.db_pool1 + self.db_listener2.default_pool = self.db_pool2 + self.db_listener1.l7policies = self.db_l7policies + self.db_listener1.sni_containers = [ + data_models.SNI(tls_container_id='2'), + data_models.SNI(tls_container_id='3')] + + self.test_db_listeners = [self.db_listener1, self.db_listener2] + + cert1 = data_models.TLSContainer(certificate='cert 1') + cert2 = data_models.TLSContainer(certificate='cert 2') + cert3 = data_models.TLSContainer(certificate='cert 3') + ca_cert = 'ca cert' + crl_file_content = 'X509 CRL FILE' + + self.provider_listener1_dict = { + lib_consts.ADMIN_STATE_UP: True, + lib_consts.ALLOWED_CIDRS: ['192.0.2.0/24', '198.51.100.0/24'], + lib_consts.ALPN_PROTOCOLS: [lib_consts.ALPN_PROTOCOL_HTTP_1_1], + lib_consts.CONNECTION_LIMIT: 10000, + lib_consts.DEFAULT_POOL: self.provider_pool1_dict, + lib_consts.DEFAULT_POOL_ID: self.pool1_id, + lib_consts.DEFAULT_TLS_CONTAINER_DATA: cert1.to_dict(), + lib_consts.DEFAULT_TLS_CONTAINER_REF: + self.default_tls_container_ref, + lib_consts.DESCRIPTION: 'Listener 1', + lib_consts.HSTS_INCLUDE_SUBDOMAINS: False, + lib_consts.HSTS_MAX_AGE: None, + lib_consts.HSTS_PRELOAD: False, + lib_consts.INSERT_HEADERS: {}, + lib_consts.L7POLICIES: self.provider_l7policies_dict, + lib_consts.LISTENER_ID: self.listener1_id, + lib_consts.LOADBALANCER_ID: self.lb_id, + lib_consts.NAME: 'listener_1', + lib_consts.PROJECT_ID: self.project_id, + lib_consts.PROTOCOL: lib_consts.PROTOCOL_TCP, + lib_consts.PROTOCOL_PORT: 90, + lib_consts.SNI_CONTAINER_DATA: [cert2.to_dict(), cert3.to_dict()], + lib_consts.SNI_CONTAINER_REFS: [self.sni_container_ref_1, + self.sni_container_ref_2], + lib_consts.TIMEOUT_CLIENT_DATA: 1000, + lib_consts.TIMEOUT_MEMBER_CONNECT: 2000, + lib_consts.TIMEOUT_MEMBER_DATA: 3000, + lib_consts.TIMEOUT_TCP_INSPECT: 4000, + lib_consts.CLIENT_CA_TLS_CONTAINER_REF: + self.client_ca_tls_certificate_ref, + lib_consts.CLIENT_CA_TLS_CONTAINER_DATA: ca_cert, + lib_consts.CLIENT_AUTHENTICATION: constants.CLIENT_AUTH_NONE, + lib_consts.CLIENT_CRL_CONTAINER_REF: self.client_crl_container_ref, + lib_consts.CLIENT_CRL_CONTAINER_DATA: crl_file_content, + lib_consts.TLS_CIPHERS: constants.CIPHERS_OWASP_SUITE_B, + lib_consts.TLS_VERSIONS: constants.TLS_VERSIONS_OWASP_SUITE_B, + lib_consts.ALPN_PROTOCOLS: + constants.AMPHORA_SUPPORTED_ALPN_PROTOCOLS + } + + self.provider_listener2_dict = copy.deepcopy( + self.provider_listener1_dict) + self.provider_listener2_dict[ + lib_consts.LISTENER_ID] = self.listener2_id + self.provider_listener2_dict[lib_consts.NAME] = 'listener_2' + self.provider_listener2_dict[lib_consts.DESCRIPTION] = 'Listener 1' + self.provider_listener2_dict[ + lib_consts.DEFAULT_POOL_ID] = self.pool2_id + self.provider_listener2_dict[ + lib_consts.DEFAULT_POOL] = self.provider_pool2_dict + del self.provider_listener2_dict[lib_consts.L7POLICIES] + self.provider_listener2_dict[ + lib_consts.CLIENT_CA_TLS_CONTAINER_REF] = None + del self.provider_listener2_dict[ + lib_consts.CLIENT_CA_TLS_CONTAINER_DATA] + self.provider_listener2_dict[lib_consts.CLIENT_AUTHENTICATION] = ( + constants.CLIENT_AUTH_NONE) + self.provider_listener2_dict[ + lib_consts.CLIENT_CRL_CONTAINER_REF] = None + del self.provider_listener2_dict[lib_consts.CLIENT_CRL_CONTAINER_DATA] + self.provider_listener2_dict[lib_consts.HSTS_INCLUDE_SUBDOMAINS] = True + self.provider_listener2_dict[lib_consts.HSTS_MAX_AGE] = 10 + self.provider_listener2_dict[lib_consts.HSTS_PRELOAD] = False + + self.provider_listener1 = driver_dm.Listener( + **self.provider_listener1_dict) + self.provider_listener2 = driver_dm.Listener( + **self.provider_listener2_dict) + self.provider_listener1.default_pool = self.provider_pool1 + self.provider_listener2.default_pool = self.provider_pool2 + self.provider_listener1.l7policies = self.provider_l7policies + + self.provider_listeners = [self.provider_listener1, + self.provider_listener2] + + self.test_vip_dict = {constants.IP_ADDRESS: self.ip_address, + constants.NETWORK_ID: self.network_id, + constants.PORT_ID: self.port_id, + lib_consts.SUBNET_ID: self.subnet_id, + constants.QOS_POLICY_ID: self.qos_policy_id, + constants.SG_IDS: self.vip_sg_ids, + constants.OCTAVIA_OWNED: None} + + self.provider_vip_dict = { + lib_consts.VIP_ADDRESS: self.ip_address, + lib_consts.VIP_NETWORK_ID: self.network_id, + lib_consts.VIP_PORT_ID: self.port_id, + lib_consts.VIP_SUBNET_ID: self.subnet_id, + lib_consts.VIP_QOS_POLICY_ID: self.qos_policy_id, + lib_consts.VIP_SG_IDS: self.vip_sg_ids, + constants.OCTAVIA_OWNED: None} + + self.test_additional_vip_dict = { + constants.IP_ADDRESS: self.ip_address2, + constants.NETWORK_ID: self.network_id, + constants.PORT_ID: self.port_id, + lib_consts.SUBNET_ID: self.subnet_id} + + self.provider_additional_vip_dict = { + constants.IP_ADDRESS: self.ip_address2, + constants.NETWORK_ID: self.network_id, + constants.PORT_ID: self.port_id, + lib_consts.SUBNET_ID: self.subnet_id} + + self.db_vip = data_models.Vip( + ip_address=self.ip_address, + network_id=self.network_id, + port_id=self.port_id, + subnet_id=self.subnet_id, + qos_policy_id=self.qos_policy_id, + sg_ids=self.vip_sg_ids) + + self.db_additional_vip = data_models.AdditionalVip( + ip_address=self.ip_address2, + network_id=self.network_id, + port_id=self.port_id, + subnet_id=self.subnet_id) + + self.test_loadbalancer1_dict = { + lib_consts.NAME: self.lb_name, + lib_consts.DESCRIPTION: self.lb_description, + constants.ENABLED: True, + lib_consts.PROVISIONING_STATUS: lib_consts.PENDING_UPDATE, + lib_consts.OPERATING_STATUS: lib_consts.OFFLINE, + constants.TOPOLOGY: constants.TOPOLOGY_ACTIVE_STANDBY, + constants.VRRP_GROUP: None, + constants.PROVIDER: constants.AMPHORA, + constants.SERVER_GROUP_ID: uuidutils.generate_uuid(), + lib_consts.PROJECT_ID: self.project_id, + lib_consts.ID: self.lb_id, constants.FLAVOR_ID: self.flavor_id, + constants.TAGS: ['test_tag']} + + self.provider_loadbalancer_dict = { + lib_consts.ADDITIONAL_VIPS: None, + lib_consts.ADMIN_STATE_UP: True, + lib_consts.DESCRIPTION: self.lb_description, + lib_consts.FLAVOR: {"something": "else"}, + lib_consts.LISTENERS: None, + lib_consts.LOADBALANCER_ID: self.lb_id, + lib_consts.NAME: self.lb_name, + lib_consts.POOLS: None, + lib_consts.PROJECT_ID: self.project_id, + lib_consts.VIP_ADDRESS: self.ip_address, + lib_consts.VIP_NETWORK_ID: self.network_id, + lib_consts.VIP_PORT_ID: self.port_id, + lib_consts.VIP_QOS_POLICY_ID: self.qos_policy_id, + lib_consts.VIP_SUBNET_ID: self.subnet_id} + + self.provider_loadbalancer_tree_dict = { + lib_consts.ADDITIONAL_VIPS: [], + lib_consts.ADMIN_STATE_UP: True, + lib_consts.AVAILABILITY_ZONE: None, + lib_consts.DESCRIPTION: self.lb_description, + lib_consts.FLAVOR: {"something": "else"}, + lib_consts.LISTENERS: None, + lib_consts.LOADBALANCER_ID: self.lb_id, + lib_consts.NAME: self.lb_name, + lib_consts.POOLS: None, + lib_consts.PROJECT_ID: self.project_id, + lib_consts.VIP_ADDRESS: self.ip_address, + lib_consts.VIP_NETWORK_ID: self.network_id, + lib_consts.VIP_PORT_ID: self.port_id, + lib_consts.VIP_QOS_POLICY_ID: self.qos_policy_id, + lib_consts.VIP_SUBNET_ID: self.subnet_id} diff --git a/octavia/tests/common/sample_haproxy_prometheus b/octavia/tests/common/sample_haproxy_prometheus new file mode 100644 index 0000000000..a8c7f8b545 --- /dev/null +++ b/octavia/tests/common/sample_haproxy_prometheus @@ -0,0 +1,886 @@ +# HELP haproxy_process_nbthread Configured number of threads. +# TYPE haproxy_process_nbthread gauge +haproxy_process_nbthread 1 +# HELP haproxy_process_nbproc Configured number of processes. +# TYPE haproxy_process_nbproc gauge +haproxy_process_nbproc 1 +# HELP haproxy_process_relative_process_id Relative process id, starting at 1. +# TYPE haproxy_process_relative_process_id gauge +haproxy_process_relative_process_id 1 +# HELP haproxy_process_start_time_seconds Start time in seconds. +# TYPE haproxy_process_start_time_seconds gauge +haproxy_process_start_time_seconds 1644875239 +# HELP haproxy_process_max_memory_bytes Per-process memory limit (in bytes); 0=unset. +# TYPE haproxy_process_max_memory_bytes gauge +haproxy_process_max_memory_bytes 0 +# HELP haproxy_process_pool_allocated_bytes Total amount of memory allocated in pools (in bytes). +# TYPE haproxy_process_pool_allocated_bytes gauge +haproxy_process_pool_allocated_bytes 111616 +# HELP haproxy_process_pool_used_bytes Total amount of memory used in pools (in bytes). +# TYPE haproxy_process_pool_used_bytes gauge +haproxy_process_pool_used_bytes 78848 +# HELP haproxy_process_pool_failures_total Total number of failed pool allocations. +# TYPE haproxy_process_pool_failures_total counter +haproxy_process_pool_failures_total 0 +# HELP haproxy_process_max_fds Maximum number of open file descriptors; 0=unset. +# TYPE haproxy_process_max_fds gauge +haproxy_process_max_fds 375033 +# HELP haproxy_process_max_sockets Maximum number of open sockets. +# TYPE haproxy_process_max_sockets gauge +haproxy_process_max_sockets 375033 +# HELP haproxy_process_max_connections Maximum number of concurrent connections. +# TYPE haproxy_process_max_connections gauge +haproxy_process_max_connections 150000 +# HELP haproxy_process_hard_max_connections Initial Maximum number of concurrent connections. +# TYPE haproxy_process_hard_max_connections gauge +haproxy_process_hard_max_connections 150000 +# HELP haproxy_process_current_connections Number of active sessions. +# TYPE haproxy_process_current_connections gauge +haproxy_process_current_connections 1 +# HELP haproxy_process_connections_total Total number of created sessions. +# TYPE haproxy_process_connections_total counter +haproxy_process_connections_total 680 +# HELP haproxy_process_requests_total Total number of requests (TCP or HTTP). +# TYPE haproxy_process_requests_total counter +haproxy_process_requests_total 680 +# HELP haproxy_process_max_ssl_connections Configured maximum number of concurrent SSL connections. +# TYPE haproxy_process_max_ssl_connections gauge +haproxy_process_max_ssl_connections 0 +# HELP haproxy_process_current_ssl_connections Number of opened SSL connections. +# TYPE haproxy_process_current_ssl_connections gauge +haproxy_process_current_ssl_connections 0 +# HELP haproxy_process_ssl_connections_total Total number of opened SSL connections. +# TYPE haproxy_process_ssl_connections_total counter +haproxy_process_ssl_connections_total 0 +# HELP haproxy_process_max_pipes Configured maximum number of pipes. +# TYPE haproxy_process_max_pipes gauge +haproxy_process_max_pipes 37500 +# HELP haproxy_process_pipes_used_total Number of pipes in used. +# TYPE haproxy_process_pipes_used_total counter +haproxy_process_pipes_used_total 0 +# HELP haproxy_process_pipes_free_total Number of pipes unused. +# TYPE haproxy_process_pipes_free_total counter +haproxy_process_pipes_free_total 0 +# HELP haproxy_process_current_connection_rate Current number of connections per second over last elapsed second. +# TYPE haproxy_process_current_connection_rate gauge +haproxy_process_current_connection_rate 1 +# HELP haproxy_process_limit_connection_rate Configured maximum number of connections per second. +# TYPE haproxy_process_limit_connection_rate gauge +haproxy_process_limit_connection_rate 0 +# HELP haproxy_process_max_connection_rate Maximum observed number of connections per second. +# TYPE haproxy_process_max_connection_rate gauge +haproxy_process_max_connection_rate 2 +# HELP haproxy_process_current_session_rate Current number of sessions per second over last elapsed second. +# TYPE haproxy_process_current_session_rate gauge +haproxy_process_current_session_rate 1 +# HELP haproxy_process_limit_session_rate Configured maximum number of sessions per second. +# TYPE haproxy_process_limit_session_rate gauge +haproxy_process_limit_session_rate 0 +# HELP haproxy_process_max_session_rate Maximum observed number of sessions per second. +# TYPE haproxy_process_max_session_rate gauge +haproxy_process_max_session_rate 2 +# HELP haproxy_process_current_ssl_rate Current number of SSL sessions per second over last elapsed second. +# TYPE haproxy_process_current_ssl_rate gauge +haproxy_process_current_ssl_rate 0 +# HELP haproxy_process_limit_ssl_rate Configured maximum number of SSL sessions per second. +# TYPE haproxy_process_limit_ssl_rate gauge +haproxy_process_limit_ssl_rate 0 +# HELP haproxy_process_max_ssl_rate Maximum observed number of SSL sessions per second. +# TYPE haproxy_process_max_ssl_rate gauge +haproxy_process_max_ssl_rate 0 +# HELP haproxy_process_current_frontend_ssl_key_rate Current frontend SSL Key computation per second over last elapsed second. +# TYPE haproxy_process_current_frontend_ssl_key_rate gauge +haproxy_process_current_frontend_ssl_key_rate 0 +# HELP haproxy_process_max_frontend_ssl_key_rate Maximum observed frontend SSL Key computation per second. +# TYPE haproxy_process_max_frontend_ssl_key_rate gauge +haproxy_process_max_frontend_ssl_key_rate 0 +# HELP haproxy_process_frontend_ssl_reuse SSL session reuse ratio (percent). +# TYPE haproxy_process_frontend_ssl_reuse gauge +haproxy_process_frontend_ssl_reuse 0 +# HELP haproxy_process_current_backend_ssl_key_rate Current backend SSL Key computation per second over last elapsed second. +# TYPE haproxy_process_current_backend_ssl_key_rate gauge +haproxy_process_current_backend_ssl_key_rate 0 +# HELP haproxy_process_max_backend_ssl_key_rate Maximum observed backend SSL Key computation per second. +# TYPE haproxy_process_max_backend_ssl_key_rate gauge +haproxy_process_max_backend_ssl_key_rate 0 +# HELP haproxy_process_ssl_cache_lookups_total Total number of SSL session cache lookups. +# TYPE haproxy_process_ssl_cache_lookups_total counter +haproxy_process_ssl_cache_lookups_total 0 +# HELP haproxy_process_ssl_cache_misses_total Total number of SSL session cache misses. +# TYPE haproxy_process_ssl_cache_misses_total counter +haproxy_process_ssl_cache_misses_total 0 +# HELP haproxy_process_http_comp_bytes_in_total Number of bytes per second over last elapsed second, before http compression. +# TYPE haproxy_process_http_comp_bytes_in_total counter +haproxy_process_http_comp_bytes_in_total 0 +# HELP haproxy_process_http_comp_bytes_out_total Number of bytes per second over last elapsed second, after http compression. +# TYPE haproxy_process_http_comp_bytes_out_total counter +haproxy_process_http_comp_bytes_out_total 0 +# HELP haproxy_process_limit_http_comp Configured maximum input compression rate in bytes. +# TYPE haproxy_process_limit_http_comp gauge +haproxy_process_limit_http_comp 0 +# HELP haproxy_process_current_zlib_memory Current memory used for zlib in bytes. +# TYPE haproxy_process_current_zlib_memory gauge +haproxy_process_current_zlib_memory 0 +# HELP haproxy_process_max_zlib_memory Configured maximum amount of memory for zlib in bytes. +# TYPE haproxy_process_max_zlib_memory gauge +haproxy_process_max_zlib_memory 0 +# HELP haproxy_process_current_tasks Current number of tasks. +# TYPE haproxy_process_current_tasks gauge +haproxy_process_current_tasks 20 +# HELP haproxy_process_current_run_queue Current number of tasks in the run-queue. +# TYPE haproxy_process_current_run_queue gauge +haproxy_process_current_run_queue 1 +# HELP haproxy_process_idle_time_percent Idle to total ratio over last sample (percent). +# TYPE haproxy_process_idle_time_percent gauge +haproxy_process_idle_time_percent 100 +# HELP haproxy_process_stopping Non zero means stopping in progress. +# TYPE haproxy_process_stopping gauge +haproxy_process_stopping 0 +# HELP haproxy_process_jobs Current number of active jobs (listeners, sessions, open devices). +# TYPE haproxy_process_jobs gauge +haproxy_process_jobs 7 +# HELP haproxy_process_unstoppable_jobs Current number of active jobs that can''t be stopped during a soft stop. +# TYPE haproxy_process_unstoppable_jobs gauge +haproxy_process_unstoppable_jobs 0 +# HELP haproxy_process_listeners Current number of active listeners. +# TYPE haproxy_process_listeners gauge +haproxy_process_listeners 6 +# HELP haproxy_process_active_peers Current number of active peers. +# TYPE haproxy_process_active_peers gauge +haproxy_process_active_peers 0 +# HELP haproxy_process_connected_peers Current number of connected peers. +# TYPE haproxy_process_connected_peers gauge +haproxy_process_connected_peers 0 +# HELP haproxy_process_dropped_logs_total Total number of dropped logs. +# TYPE haproxy_process_dropped_logs_total counter +haproxy_process_dropped_logs_total 0 +# HELP haproxy_process_busy_polling_enabled Non zero if the busy polling is enabled. +# TYPE haproxy_process_busy_polling_enabled gauge +haproxy_process_busy_polling_enabled 0 +# HELP haproxy_frontend_status Current status of the service (frontend: 0=STOP, 1=UP, 2=FULL - backend: 0=DOWN, 1=UP - server: 0=DOWN, 1=UP, 2=MAINT, 3=DRAIN, 4=NOLB). +# TYPE haproxy_frontend_status gauge +haproxy_frontend_status{proxy="prometheus-exporter-internal-endpoint"} 1 +haproxy_frontend_status{proxy="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 1 +haproxy_frontend_status{proxy="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 1 +haproxy_frontend_status{proxy="0b248d39-eb90-4180-91aa-5363be7df4db"} 1 +# HELP haproxy_frontend_current_sessions Current number of active sessions. +# TYPE haproxy_frontend_current_sessions gauge +haproxy_frontend_current_sessions{proxy="prometheus-exporter-internal-endpoint"} 1 +haproxy_frontend_current_sessions{proxy="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_frontend_current_sessions{proxy="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 0 +haproxy_frontend_current_sessions{proxy="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_frontend_max_sessions Maximum observed number of active sessions. +# TYPE haproxy_frontend_max_sessions gauge +haproxy_frontend_max_sessions{proxy="prometheus-exporter-internal-endpoint"} 1 +haproxy_frontend_max_sessions{proxy="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_frontend_max_sessions{proxy="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 1 +haproxy_frontend_max_sessions{proxy="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_frontend_limit_sessions Configured session limit. +# TYPE haproxy_frontend_limit_sessions gauge +haproxy_frontend_limit_sessions{proxy="prometheus-exporter-internal-endpoint"} 150000 +haproxy_frontend_limit_sessions{proxy="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 50000 +haproxy_frontend_limit_sessions{proxy="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 50000 +haproxy_frontend_limit_sessions{proxy="0b248d39-eb90-4180-91aa-5363be7df4db"} 50000 +# HELP haproxy_frontend_sessions_total Total number of sessions. +# TYPE haproxy_frontend_sessions_total counter +haproxy_frontend_sessions_total{proxy="prometheus-exporter-internal-endpoint"} 4 +haproxy_frontend_sessions_total{proxy="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_frontend_sessions_total{proxy="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 2 +haproxy_frontend_sessions_total{proxy="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_frontend_limit_session_rate Configured limit on new sessions per second. +# TYPE haproxy_frontend_limit_session_rate gauge +haproxy_frontend_limit_session_rate{proxy="prometheus-exporter-internal-endpoint"} 0 +haproxy_frontend_limit_session_rate{proxy="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_frontend_limit_session_rate{proxy="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 0 +haproxy_frontend_limit_session_rate{proxy="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_frontend_max_session_rate Maximum observed number of sessions per second. +# TYPE haproxy_frontend_max_session_rate gauge +haproxy_frontend_max_session_rate{proxy="prometheus-exporter-internal-endpoint"} 1 +haproxy_frontend_max_session_rate{proxy="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_frontend_max_session_rate{proxy="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 1 +haproxy_frontend_max_session_rate{proxy="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_frontend_connections_rate_max Maximum observed number of connections per second. +# TYPE haproxy_frontend_connections_rate_max gauge +haproxy_frontend_connections_rate_max{proxy="prometheus-exporter-internal-endpoint"} 1 +haproxy_frontend_connections_rate_max{proxy="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_frontend_connections_rate_max{proxy="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 1 +haproxy_frontend_connections_rate_max{proxy="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_frontend_connections_total Total number of connections. +# TYPE haproxy_frontend_connections_total counter +haproxy_frontend_connections_total{proxy="prometheus-exporter-internal-endpoint"} 4 +haproxy_frontend_connections_total{proxy="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_frontend_connections_total{proxy="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 2 +haproxy_frontend_connections_total{proxy="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_frontend_bytes_in_total Current total of incoming bytes. +# TYPE haproxy_frontend_bytes_in_total counter +haproxy_frontend_bytes_in_total{proxy="prometheus-exporter-internal-endpoint"} 312 +haproxy_frontend_bytes_in_total{proxy="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_frontend_bytes_in_total{proxy="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 176 +haproxy_frontend_bytes_in_total{proxy="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_frontend_bytes_out_total Current total of outgoing bytes. +# TYPE haproxy_frontend_bytes_out_total counter +haproxy_frontend_bytes_out_total{proxy="prometheus-exporter-internal-endpoint"} 177027 +haproxy_frontend_bytes_out_total{proxy="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_frontend_bytes_out_total{proxy="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 145918 +haproxy_frontend_bytes_out_total{proxy="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_frontend_requests_denied_total Total number of denied requests. +# TYPE haproxy_frontend_requests_denied_total counter +haproxy_frontend_requests_denied_total{proxy="prometheus-exporter-internal-endpoint"} 1 +haproxy_frontend_requests_denied_total{proxy="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_frontend_requests_denied_total{proxy="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 0 +haproxy_frontend_requests_denied_total{proxy="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_frontend_responses_denied_total Total number of denied responses. +# TYPE haproxy_frontend_responses_denied_total counter +haproxy_frontend_responses_denied_total{proxy="prometheus-exporter-internal-endpoint"} 0 +haproxy_frontend_responses_denied_total{proxy="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_frontend_responses_denied_total{proxy="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 0 +haproxy_frontend_responses_denied_total{proxy="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_frontend_request_errors_total Total number of request errors. +# TYPE haproxy_frontend_request_errors_total counter +haproxy_frontend_request_errors_total{proxy="prometheus-exporter-internal-endpoint"} 0 +haproxy_frontend_request_errors_total{proxy="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_frontend_request_errors_total{proxy="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 0 +haproxy_frontend_request_errors_total{proxy="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_frontend_denied_connections_total Total number of requests denied by "tcp-request connection" rules. +# TYPE haproxy_frontend_denied_connections_total counter +haproxy_frontend_denied_connections_total{proxy="prometheus-exporter-internal-endpoint"} 0 +haproxy_frontend_denied_connections_total{proxy="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_frontend_denied_connections_total{proxy="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 0 +haproxy_frontend_denied_connections_total{proxy="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_frontend_denied_sessions_total Total number of requests denied by "tcp-request session" rules. +# TYPE haproxy_frontend_denied_sessions_total counter +haproxy_frontend_denied_sessions_total{proxy="prometheus-exporter-internal-endpoint"} 0 +haproxy_frontend_denied_sessions_total{proxy="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_frontend_denied_sessions_total{proxy="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 0 +haproxy_frontend_denied_sessions_total{proxy="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_frontend_failed_header_rewriting_total Total number of failed header rewriting warnings. +# TYPE haproxy_frontend_failed_header_rewriting_total counter +haproxy_frontend_failed_header_rewriting_total{proxy="prometheus-exporter-internal-endpoint"} 0 +haproxy_frontend_failed_header_rewriting_total{proxy="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_frontend_failed_header_rewriting_total{proxy="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 0 +haproxy_frontend_failed_header_rewriting_total{proxy="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_frontend_http_requests_rate_max Maximum observed number of HTTP requests per second. +# TYPE haproxy_frontend_http_requests_rate_max gauge +haproxy_frontend_http_requests_rate_max{proxy="prometheus-exporter-internal-endpoint"} 1 +haproxy_frontend_http_requests_rate_max{proxy="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_frontend_http_requests_rate_max{proxy="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 1 +haproxy_frontend_http_requests_rate_max{proxy="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_frontend_http_requests_total Total number of HTTP requests received. +# TYPE haproxy_frontend_http_requests_total counter +haproxy_frontend_http_requests_total{proxy="prometheus-exporter-internal-endpoint"} 4 +haproxy_frontend_http_requests_total{proxy="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_frontend_http_requests_total{proxy="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 2 +haproxy_frontend_http_requests_total{proxy="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_frontend_http_responses_total Total number of HTTP responses. +# TYPE haproxy_frontend_http_responses_total counter +haproxy_frontend_http_responses_total{proxy="prometheus-exporter-internal-endpoint",code="1xx"} 0 +haproxy_frontend_http_responses_total{proxy="1a0c13c0-8e69-46c7-a68a-ca4937996d74",code="1xx"} 0 +haproxy_frontend_http_responses_total{proxy="4919eeb2-b3ed-40c5-8200-057400b83bb7",code="1xx"} 0 +haproxy_frontend_http_responses_total{proxy="0b248d39-eb90-4180-91aa-5363be7df4db",code="1xx"} 0 +haproxy_frontend_http_responses_total{proxy="prometheus-exporter-internal-endpoint",code="2xx"} 2 +haproxy_frontend_http_responses_total{proxy="1a0c13c0-8e69-46c7-a68a-ca4937996d74",code="2xx"} 0 +haproxy_frontend_http_responses_total{proxy="4919eeb2-b3ed-40c5-8200-057400b83bb7",code="2xx"} 2 +haproxy_frontend_http_responses_total{proxy="0b248d39-eb90-4180-91aa-5363be7df4db",code="2xx"} 0 +haproxy_frontend_http_responses_total{proxy="prometheus-exporter-internal-endpoint",code="3xx"} 0 +haproxy_frontend_http_responses_total{proxy="1a0c13c0-8e69-46c7-a68a-ca4937996d74",code="3xx"} 0 +haproxy_frontend_http_responses_total{proxy="4919eeb2-b3ed-40c5-8200-057400b83bb7",code="3xx"} 0 +haproxy_frontend_http_responses_total{proxy="0b248d39-eb90-4180-91aa-5363be7df4db",code="3xx"} 0 +haproxy_frontend_http_responses_total{proxy="prometheus-exporter-internal-endpoint",code="4xx"} 0 +haproxy_frontend_http_responses_total{proxy="1a0c13c0-8e69-46c7-a68a-ca4937996d74",code="4xx"} 0 +haproxy_frontend_http_responses_total{proxy="4919eeb2-b3ed-40c5-8200-057400b83bb7",code="4xx"} 0 +haproxy_frontend_http_responses_total{proxy="0b248d39-eb90-4180-91aa-5363be7df4db",code="4xx"} 0 +haproxy_frontend_http_responses_total{proxy="prometheus-exporter-internal-endpoint",code="5xx"} 0 +haproxy_frontend_http_responses_total{proxy="1a0c13c0-8e69-46c7-a68a-ca4937996d74",code="5xx"} 0 +haproxy_frontend_http_responses_total{proxy="4919eeb2-b3ed-40c5-8200-057400b83bb7",code="5xx"} 0 +haproxy_frontend_http_responses_total{proxy="0b248d39-eb90-4180-91aa-5363be7df4db",code="5xx"} 0 +haproxy_frontend_http_responses_total{proxy="prometheus-exporter-internal-endpoint",code="other"} 1 +haproxy_frontend_http_responses_total{proxy="1a0c13c0-8e69-46c7-a68a-ca4937996d74",code="other"} 0 +haproxy_frontend_http_responses_total{proxy="4919eeb2-b3ed-40c5-8200-057400b83bb7",code="other"} 0 +haproxy_frontend_http_responses_total{proxy="0b248d39-eb90-4180-91aa-5363be7df4db",code="other"} 0 +# HELP haproxy_frontend_intercepted_requests_total Total number of intercepted HTTP requests. +# TYPE haproxy_frontend_intercepted_requests_total counter +haproxy_frontend_intercepted_requests_total{proxy="prometheus-exporter-internal-endpoint"} 3 +haproxy_frontend_intercepted_requests_total{proxy="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_frontend_intercepted_requests_total{proxy="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 0 +haproxy_frontend_intercepted_requests_total{proxy="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_frontend_http_cache_lookups_total Total number of HTTP cache lookups. +# TYPE haproxy_frontend_http_cache_lookups_total counter +haproxy_frontend_http_cache_lookups_total{proxy="prometheus-exporter-internal-endpoint"} 0 +haproxy_frontend_http_cache_lookups_total{proxy="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_frontend_http_cache_lookups_total{proxy="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 0 +haproxy_frontend_http_cache_lookups_total{proxy="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_frontend_http_cache_hits_total Total number of HTTP cache hits. +# TYPE haproxy_frontend_http_cache_hits_total counter +haproxy_frontend_http_cache_hits_total{proxy="prometheus-exporter-internal-endpoint"} 0 +haproxy_frontend_http_cache_hits_total{proxy="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_frontend_http_cache_hits_total{proxy="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 0 +haproxy_frontend_http_cache_hits_total{proxy="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_frontend_http_comp_bytes_in_total Total number of HTTP response bytes fed to the compressor. +# TYPE haproxy_frontend_http_comp_bytes_in_total counter +haproxy_frontend_http_comp_bytes_in_total{proxy="prometheus-exporter-internal-endpoint"} 0 +haproxy_frontend_http_comp_bytes_in_total{proxy="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_frontend_http_comp_bytes_in_total{proxy="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 0 +haproxy_frontend_http_comp_bytes_in_total{proxy="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_frontend_http_comp_bytes_out_total Total number of HTTP response bytes emitted by the compressor. +# TYPE haproxy_frontend_http_comp_bytes_out_total counter +haproxy_frontend_http_comp_bytes_out_total{proxy="prometheus-exporter-internal-endpoint"} 0 +haproxy_frontend_http_comp_bytes_out_total{proxy="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_frontend_http_comp_bytes_out_total{proxy="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 0 +haproxy_frontend_http_comp_bytes_out_total{proxy="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_frontend_http_comp_bytes_bypassed_total Total number of bytes that bypassed the HTTP compressor (CPU/BW limit). +# TYPE haproxy_frontend_http_comp_bytes_bypassed_total counter +haproxy_frontend_http_comp_bytes_bypassed_total{proxy="prometheus-exporter-internal-endpoint"} 0 +haproxy_frontend_http_comp_bytes_bypassed_total{proxy="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_frontend_http_comp_bytes_bypassed_total{proxy="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 0 +haproxy_frontend_http_comp_bytes_bypassed_total{proxy="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_frontend_http_comp_responses_total Total number of HTTP responses that were compressed. +# TYPE haproxy_frontend_http_comp_responses_total counter +haproxy_frontend_http_comp_responses_total{proxy="prometheus-exporter-internal-endpoint"} 0 +haproxy_frontend_http_comp_responses_total{proxy="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_frontend_http_comp_responses_total{proxy="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 0 +haproxy_frontend_http_comp_responses_total{proxy="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_backend_status Current status of the service (frontend: 0=STOP, 1=UP, 2=FULL - backend: 0=DOWN, 1=UP - server: 0=DOWN, 1=UP, 2=MAINT, 3=DRAIN, 4=NOLB). +# TYPE haproxy_backend_status gauge +haproxy_backend_status{proxy="prometheus-exporter-internal"} 1 +haproxy_backend_status{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_backend_status{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_backend_current_sessions Current number of active sessions. +# TYPE haproxy_backend_current_sessions gauge +haproxy_backend_current_sessions{proxy="prometheus-exporter-internal"} 0 +haproxy_backend_current_sessions{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_backend_current_sessions{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_backend_max_sessions Maximum observed number of active sessions. +# TYPE haproxy_backend_max_sessions gauge +haproxy_backend_max_sessions{proxy="prometheus-exporter-internal"} 1 +haproxy_backend_max_sessions{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_backend_max_sessions{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_backend_limit_sessions Configured session limit. +# TYPE haproxy_backend_limit_sessions gauge +haproxy_backend_limit_sessions{proxy="prometheus-exporter-internal"} 5000 +haproxy_backend_limit_sessions{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 50000 +haproxy_backend_limit_sessions{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 50000 +# HELP haproxy_backend_sessions_total Total number of sessions. +# TYPE haproxy_backend_sessions_total counter +haproxy_backend_sessions_total{proxy="prometheus-exporter-internal"} 2 +haproxy_backend_sessions_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_backend_sessions_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_backend_max_session_rate Maximum observed number of sessions per second. +# TYPE haproxy_backend_max_session_rate gauge +haproxy_backend_max_session_rate{proxy="prometheus-exporter-internal"} 1 +haproxy_backend_max_session_rate{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_backend_max_session_rate{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_backend_last_session_seconds Number of seconds since last session assigned to server/backend. +# TYPE haproxy_backend_last_session_seconds gauge +haproxy_backend_last_session_seconds{proxy="prometheus-exporter-internal"} 1829 +haproxy_backend_last_session_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} -1 +haproxy_backend_last_session_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} -1 +# HELP haproxy_backend_current_queue Current number of queued requests. +# TYPE haproxy_backend_current_queue gauge +haproxy_backend_current_queue{proxy="prometheus-exporter-internal"} 0 +haproxy_backend_current_queue{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_backend_current_queue{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_backend_max_queue Maximum observed number of queued requests. +# TYPE haproxy_backend_max_queue gauge +haproxy_backend_max_queue{proxy="prometheus-exporter-internal"} 0 +haproxy_backend_max_queue{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_backend_max_queue{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_backend_connection_attempts_total Total number of connection establishment attempts. +# TYPE haproxy_backend_connection_attempts_total counter +haproxy_backend_connection_attempts_total{proxy="prometheus-exporter-internal"} 2 +haproxy_backend_connection_attempts_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_backend_connection_attempts_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_backend_connection_reuses_total Total number of connection reuses. +# TYPE haproxy_backend_connection_reuses_total counter +haproxy_backend_connection_reuses_total{proxy="prometheus-exporter-internal"} 0 +haproxy_backend_connection_reuses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_backend_connection_reuses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_backend_bytes_in_total Current total of incoming bytes. +# TYPE haproxy_backend_bytes_in_total counter +haproxy_backend_bytes_in_total{proxy="prometheus-exporter-internal"} 176 +haproxy_backend_bytes_in_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_backend_bytes_in_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_backend_bytes_out_total Current total of outgoing bytes. +# TYPE haproxy_backend_bytes_out_total counter +haproxy_backend_bytes_out_total{proxy="prometheus-exporter-internal"} 145918 +haproxy_backend_bytes_out_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_backend_bytes_out_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_backend_queue_time_average_seconds Avg. queue time for last 1024 successful connections. +# TYPE haproxy_backend_queue_time_average_seconds gauge +haproxy_backend_queue_time_average_seconds{proxy="prometheus-exporter-internal"} 0.000000 +haproxy_backend_queue_time_average_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0.000000 +haproxy_backend_queue_time_average_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0.000000 +# HELP haproxy_backend_connect_time_average_seconds Avg. connect time for last 1024 successful connections. +# TYPE haproxy_backend_connect_time_average_seconds gauge +haproxy_backend_connect_time_average_seconds{proxy="prometheus-exporter-internal"} 0.000000 +haproxy_backend_connect_time_average_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0.000000 +haproxy_backend_connect_time_average_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0.000000 +# HELP haproxy_backend_response_time_average_seconds Avg. response time for last 1024 successful connections. +# TYPE haproxy_backend_response_time_average_seconds gauge +haproxy_backend_response_time_average_seconds{proxy="prometheus-exporter-internal"} 0.001000 +haproxy_backend_response_time_average_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0.000000 +haproxy_backend_response_time_average_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0.000000 +# HELP haproxy_backend_total_time_average_seconds Avg. total time for last 1024 successful connections. +# TYPE haproxy_backend_total_time_average_seconds gauge +haproxy_backend_total_time_average_seconds{proxy="prometheus-exporter-internal"} 0.001000 +haproxy_backend_total_time_average_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0.000000 +haproxy_backend_total_time_average_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0.000000 +# HELP haproxy_backend_max_queue_time_seconds Maximum observed time spent in the queue +# TYPE haproxy_backend_max_queue_time_seconds gauge +haproxy_backend_max_queue_time_seconds{proxy="prometheus-exporter-internal"} 0.000000 +haproxy_backend_max_queue_time_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0.000000 +haproxy_backend_max_queue_time_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0.000000 +# HELP haproxy_backend_max_connect_time_seconds Maximum observed time spent waiting for a connection to complete +# TYPE haproxy_backend_max_connect_time_seconds gauge +haproxy_backend_max_connect_time_seconds{proxy="prometheus-exporter-internal"} 0.001000 +haproxy_backend_max_connect_time_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0.000000 +haproxy_backend_max_connect_time_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0.000000 +# HELP haproxy_backend_max_response_time_seconds Maximum observed time spent waiting for a server response +# TYPE haproxy_backend_max_response_time_seconds gauge +haproxy_backend_max_response_time_seconds{proxy="prometheus-exporter-internal"} 0.038000 +haproxy_backend_max_response_time_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0.000000 +haproxy_backend_max_response_time_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0.000000 +# HELP haproxy_backend_max_total_time_seconds Maximum observed total request+response time (request+queue+connect+response+processing) +# TYPE haproxy_backend_max_total_time_seconds gauge +haproxy_backend_max_total_time_seconds{proxy="prometheus-exporter-internal"} 0.038000 +haproxy_backend_max_total_time_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0.000000 +haproxy_backend_max_total_time_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0.000000 +# HELP haproxy_backend_requests_denied_total Total number of denied requests. +# TYPE haproxy_backend_requests_denied_total counter +haproxy_backend_requests_denied_total{proxy="prometheus-exporter-internal"} 0 +haproxy_backend_requests_denied_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_backend_requests_denied_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_backend_responses_denied_total Total number of denied responses. +# TYPE haproxy_backend_responses_denied_total counter +haproxy_backend_responses_denied_total{proxy="prometheus-exporter-internal"} 0 +haproxy_backend_responses_denied_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_backend_responses_denied_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_backend_connection_errors_total Total number of connection errors. +# TYPE haproxy_backend_connection_errors_total counter +haproxy_backend_connection_errors_total{proxy="prometheus-exporter-internal"} 0 +haproxy_backend_connection_errors_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_backend_connection_errors_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_backend_response_errors_total Total number of response errors. +# TYPE haproxy_backend_response_errors_total counter +haproxy_backend_response_errors_total{proxy="prometheus-exporter-internal"} 0 +haproxy_backend_response_errors_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_backend_response_errors_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_backend_retry_warnings_total Total number of retry warnings. +# TYPE haproxy_backend_retry_warnings_total counter +haproxy_backend_retry_warnings_total{proxy="prometheus-exporter-internal"} 0 +haproxy_backend_retry_warnings_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_backend_retry_warnings_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_backend_redispatch_warnings_total Total number of redispatch warnings. +# TYPE haproxy_backend_redispatch_warnings_total counter +haproxy_backend_redispatch_warnings_total{proxy="prometheus-exporter-internal"} 0 +haproxy_backend_redispatch_warnings_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_backend_redispatch_warnings_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_backend_failed_header_rewriting_total Total number of failed header rewriting warnings. +# TYPE haproxy_backend_failed_header_rewriting_total counter +haproxy_backend_failed_header_rewriting_total{proxy="prometheus-exporter-internal"} 0 +haproxy_backend_failed_header_rewriting_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_backend_failed_header_rewriting_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_backend_client_aborts_total Total number of data transfers aborted by the client. +# TYPE haproxy_backend_client_aborts_total counter +haproxy_backend_client_aborts_total{proxy="prometheus-exporter-internal"} 0 +haproxy_backend_client_aborts_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_backend_client_aborts_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_backend_server_aborts_total Total number of data transfers aborted by the server. +# TYPE haproxy_backend_server_aborts_total counter +haproxy_backend_server_aborts_total{proxy="prometheus-exporter-internal"} 0 +haproxy_backend_server_aborts_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_backend_server_aborts_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_backend_weight Service weight. +# TYPE haproxy_backend_weight gauge +haproxy_backend_weight{proxy="prometheus-exporter-internal"} 1 +haproxy_backend_weight{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_backend_weight{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_backend_active_servers Current number of active servers. +# TYPE haproxy_backend_active_servers gauge +haproxy_backend_active_servers{proxy="prometheus-exporter-internal"} 1 +haproxy_backend_active_servers{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_backend_active_servers{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_backend_backup_servers Current number of backup servers. +# TYPE haproxy_backend_backup_servers gauge +haproxy_backend_backup_servers{proxy="prometheus-exporter-internal"} 0 +haproxy_backend_backup_servers{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_backend_backup_servers{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_backend_check_up_down_total Total number of UP->DOWN transitions. +# TYPE haproxy_backend_check_up_down_total counter +haproxy_backend_check_up_down_total{proxy="prometheus-exporter-internal"} 0 +haproxy_backend_check_up_down_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 1 +haproxy_backend_check_up_down_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 1 +# HELP haproxy_backend_check_last_change_seconds Number of seconds since the last UP<->DOWN transition. +# TYPE haproxy_backend_check_last_change_seconds gauge +haproxy_backend_check_last_change_seconds{proxy="prometheus-exporter-internal"} 3378 +haproxy_backend_check_last_change_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 3352 +haproxy_backend_check_last_change_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 3349 +# HELP haproxy_backend_downtime_seconds_total Total downtime (in seconds) for the service. +# TYPE haproxy_backend_downtime_seconds_total counter +haproxy_backend_downtime_seconds_total{proxy="prometheus-exporter-internal"} 0 +haproxy_backend_downtime_seconds_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 3352 +haproxy_backend_downtime_seconds_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 3349 +# HELP haproxy_backend_loadbalanced_total Total number of times a service was selected, either for new sessions, or when redispatching. +# TYPE haproxy_backend_loadbalanced_total counter +haproxy_backend_loadbalanced_total{proxy="prometheus-exporter-internal"} 2 +haproxy_backend_loadbalanced_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_backend_loadbalanced_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_backend_http_requests_total Total number of HTTP requests received. +# TYPE haproxy_backend_http_requests_total counter +haproxy_backend_http_requests_total{proxy="prometheus-exporter-internal"} 2 +haproxy_backend_http_requests_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_backend_http_requests_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_backend_http_responses_total Total number of HTTP responses. +# TYPE haproxy_backend_http_responses_total counter +haproxy_backend_http_responses_total{proxy="prometheus-exporter-internal",code="1xx"} 0 +haproxy_backend_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",code="1xx"} 0 +haproxy_backend_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",code="1xx"} 0 +haproxy_backend_http_responses_total{proxy="prometheus-exporter-internal",code="2xx"} 2 +haproxy_backend_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",code="2xx"} 0 +haproxy_backend_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",code="2xx"} 0 +haproxy_backend_http_responses_total{proxy="prometheus-exporter-internal",code="3xx"} 0 +haproxy_backend_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",code="3xx"} 0 +haproxy_backend_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",code="3xx"} 0 +haproxy_backend_http_responses_total{proxy="prometheus-exporter-internal",code="4xx"} 0 +haproxy_backend_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",code="4xx"} 0 +haproxy_backend_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",code="4xx"} 0 +haproxy_backend_http_responses_total{proxy="prometheus-exporter-internal",code="5xx"} 0 +haproxy_backend_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",code="5xx"} 0 +haproxy_backend_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",code="5xx"} 0 +haproxy_backend_http_responses_total{proxy="prometheus-exporter-internal",code="other"} 0 +haproxy_backend_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",code="other"} 0 +haproxy_backend_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",code="other"} 0 +# HELP haproxy_backend_http_cache_lookups_total Total number of HTTP cache lookups. +# TYPE haproxy_backend_http_cache_lookups_total counter +haproxy_backend_http_cache_lookups_total{proxy="prometheus-exporter-internal"} 0 +haproxy_backend_http_cache_lookups_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_backend_http_cache_lookups_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_backend_http_cache_hits_total Total number of HTTP cache hits. +# TYPE haproxy_backend_http_cache_hits_total counter +haproxy_backend_http_cache_hits_total{proxy="prometheus-exporter-internal"} 0 +haproxy_backend_http_cache_hits_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_backend_http_cache_hits_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_backend_http_comp_bytes_in_total Total number of HTTP response bytes fed to the compressor. +# TYPE haproxy_backend_http_comp_bytes_in_total counter +haproxy_backend_http_comp_bytes_in_total{proxy="prometheus-exporter-internal"} 0 +haproxy_backend_http_comp_bytes_in_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_backend_http_comp_bytes_in_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_backend_http_comp_bytes_out_total Total number of HTTP response bytes emitted by the compressor. +# TYPE haproxy_backend_http_comp_bytes_out_total counter +haproxy_backend_http_comp_bytes_out_total{proxy="prometheus-exporter-internal"} 0 +haproxy_backend_http_comp_bytes_out_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_backend_http_comp_bytes_out_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_backend_http_comp_bytes_bypassed_total Total number of bytes that bypassed the HTTP compressor (CPU/BW limit). +# TYPE haproxy_backend_http_comp_bytes_bypassed_total counter +haproxy_backend_http_comp_bytes_bypassed_total{proxy="prometheus-exporter-internal"} 0 +haproxy_backend_http_comp_bytes_bypassed_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_backend_http_comp_bytes_bypassed_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_backend_http_comp_responses_total Total number of HTTP responses that were compressed. +# TYPE haproxy_backend_http_comp_responses_total counter +haproxy_backend_http_comp_responses_total{proxy="prometheus-exporter-internal"} 0 +haproxy_backend_http_comp_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +haproxy_backend_http_comp_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP haproxy_server_status Current status of the service (frontend: 0=STOP, 1=UP, 2=FULL - backend: 0=DOWN, 1=UP - server: 0=DOWN, 1=UP, 2=MAINT, 3=DRAIN, 4=NOLB). +# TYPE haproxy_server_status gauge +haproxy_server_status{proxy="prometheus-exporter-internal",server="prometheus-internal"} 1 +haproxy_server_status{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_status{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +haproxy_server_status{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_status{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP haproxy_server_current_sessions Current number of active sessions. +# TYPE haproxy_server_current_sessions gauge +haproxy_server_current_sessions{proxy="prometheus-exporter-internal",server="prometheus-internal"} 0 +haproxy_server_current_sessions{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_current_sessions{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +haproxy_server_current_sessions{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_current_sessions{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP haproxy_server_max_sessions Maximum observed number of active sessions. +# TYPE haproxy_server_max_sessions gauge +haproxy_server_max_sessions{proxy="prometheus-exporter-internal",server="prometheus-internal"} 1 +haproxy_server_max_sessions{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_max_sessions{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +haproxy_server_max_sessions{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_max_sessions{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP haproxy_server_limit_sessions Configured session limit. +# TYPE haproxy_server_limit_sessions gauge +haproxy_server_limit_sessions{proxy="prometheus-exporter-internal",server="prometheus-internal"} 0 +haproxy_server_limit_sessions{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_limit_sessions{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +haproxy_server_limit_sessions{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_limit_sessions{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP haproxy_server_sessions_total Total number of sessions. +# TYPE haproxy_server_sessions_total counter +haproxy_server_sessions_total{proxy="prometheus-exporter-internal",server="prometheus-internal"} 2 +haproxy_server_sessions_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_sessions_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +haproxy_server_sessions_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_sessions_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP haproxy_server_max_session_rate Maximum observed number of sessions per second. +# TYPE haproxy_server_max_session_rate gauge +haproxy_server_max_session_rate{proxy="prometheus-exporter-internal",server="prometheus-internal"} 1 +haproxy_server_max_session_rate{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_max_session_rate{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +haproxy_server_max_session_rate{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_max_session_rate{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP haproxy_server_last_session_seconds Number of seconds since last session assigned to server/backend. +# TYPE haproxy_server_last_session_seconds gauge +haproxy_server_last_session_seconds{proxy="prometheus-exporter-internal",server="prometheus-internal"} 1829 +haproxy_server_last_session_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} -1 +haproxy_server_last_session_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} -1 +haproxy_server_last_session_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} -1 +haproxy_server_last_session_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} -1 +# HELP haproxy_server_current_queue Current number of queued requests. +# TYPE haproxy_server_current_queue gauge +haproxy_server_current_queue{proxy="prometheus-exporter-internal",server="prometheus-internal"} 0 +haproxy_server_current_queue{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_current_queue{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +haproxy_server_current_queue{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_current_queue{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP haproxy_server_max_queue Maximum observed number of queued requests. +# TYPE haproxy_server_max_queue gauge +haproxy_server_max_queue{proxy="prometheus-exporter-internal",server="prometheus-internal"} 0 +haproxy_server_max_queue{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_max_queue{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +haproxy_server_max_queue{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_max_queue{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP haproxy_server_queue_limit Configured maxqueue for the server (0 meaning no limit). +# TYPE haproxy_server_queue_limit gauge +haproxy_server_queue_limit{proxy="prometheus-exporter-internal",server="prometheus-internal"} 0 +haproxy_server_queue_limit{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_queue_limit{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +haproxy_server_queue_limit{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_queue_limit{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP haproxy_server_bytes_in_total Current total of incoming bytes. +# TYPE haproxy_server_bytes_in_total counter +haproxy_server_bytes_in_total{proxy="prometheus-exporter-internal",server="prometheus-internal"} 176 +haproxy_server_bytes_in_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_bytes_in_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +haproxy_server_bytes_in_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_bytes_in_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP haproxy_server_bytes_out_total Current total of outgoing bytes. +# TYPE haproxy_server_bytes_out_total counter +haproxy_server_bytes_out_total{proxy="prometheus-exporter-internal",server="prometheus-internal"} 145918 +haproxy_server_bytes_out_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_bytes_out_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +haproxy_server_bytes_out_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_bytes_out_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP haproxy_server_queue_time_average_seconds Avg. queue time for last 1024 successful connections. +# TYPE haproxy_server_queue_time_average_seconds gauge +haproxy_server_queue_time_average_seconds{proxy="prometheus-exporter-internal",server="prometheus-internal"} 0.000000 +haproxy_server_queue_time_average_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0.000000 +haproxy_server_queue_time_average_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 0.000000 +haproxy_server_queue_time_average_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0.000000 +haproxy_server_queue_time_average_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 0.000000 +# HELP haproxy_server_connect_time_average_seconds Avg. connect time for last 1024 successful connections. +# TYPE haproxy_server_connect_time_average_seconds gauge +haproxy_server_connect_time_average_seconds{proxy="prometheus-exporter-internal",server="prometheus-internal"} 0.000000 +haproxy_server_connect_time_average_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0.000000 +haproxy_server_connect_time_average_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 0.000000 +haproxy_server_connect_time_average_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0.000000 +haproxy_server_connect_time_average_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 0.000000 +# HELP haproxy_server_response_time_average_seconds Avg. response time for last 1024 successful connections. +# TYPE haproxy_server_response_time_average_seconds gauge +haproxy_server_response_time_average_seconds{proxy="prometheus-exporter-internal",server="prometheus-internal"} 0.001000 +haproxy_server_response_time_average_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0.000000 +haproxy_server_response_time_average_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 0.000000 +haproxy_server_response_time_average_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0.000000 +haproxy_server_response_time_average_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 0.000000 +# HELP haproxy_server_total_time_average_seconds Avg. total time for last 1024 successful connections. +# TYPE haproxy_server_total_time_average_seconds gauge +haproxy_server_total_time_average_seconds{proxy="prometheus-exporter-internal",server="prometheus-internal"} 0.001000 +haproxy_server_total_time_average_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0.000000 +haproxy_server_total_time_average_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 0.000000 +haproxy_server_total_time_average_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0.000000 +haproxy_server_total_time_average_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 0.000000 +# HELP haproxy_server_max_queue_time_seconds Maximum observed time spent in the queue +# TYPE haproxy_server_max_queue_time_seconds gauge +haproxy_server_max_queue_time_seconds{proxy="prometheus-exporter-internal",server="prometheus-internal"} 0.000000 +haproxy_server_max_queue_time_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0.000000 +haproxy_server_max_queue_time_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 0.000000 +haproxy_server_max_queue_time_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0.000000 +haproxy_server_max_queue_time_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 0.000000 +# HELP haproxy_server_max_connect_time_seconds Maximum observed time spent waiting for a connection to complete +# TYPE haproxy_server_max_connect_time_seconds gauge +haproxy_server_max_connect_time_seconds{proxy="prometheus-exporter-internal",server="prometheus-internal"} 0.001000 +haproxy_server_max_connect_time_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0.000000 +haproxy_server_max_connect_time_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 0.000000 +haproxy_server_max_connect_time_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0.000000 +haproxy_server_max_connect_time_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 0.000000 +# HELP haproxy_server_max_response_time_seconds Maximum observed time spent waiting for a server response +# TYPE haproxy_server_max_response_time_seconds gauge +haproxy_server_max_response_time_seconds{proxy="prometheus-exporter-internal",server="prometheus-internal"} 0.038000 +haproxy_server_max_response_time_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0.000000 +haproxy_server_max_response_time_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 0.000000 +haproxy_server_max_response_time_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0.000000 +haproxy_server_max_response_time_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 0.000000 +# HELP haproxy_server_max_total_time_seconds Maximum observed total request+response time (request+queue+connect+response+processing) +# TYPE haproxy_server_max_total_time_seconds gauge +haproxy_server_max_total_time_seconds{proxy="prometheus-exporter-internal",server="prometheus-internal"} 0.038000 +haproxy_server_max_total_time_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0.000000 +haproxy_server_max_total_time_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 0.000000 +haproxy_server_max_total_time_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0.000000 +haproxy_server_max_total_time_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 0.000000 +# HELP haproxy_server_connection_attempts_total Total number of connection establishment attempts. +# TYPE haproxy_server_connection_attempts_total counter +haproxy_server_connection_attempts_total{proxy="prometheus-exporter-internal",server="prometheus-internal"} 2 +haproxy_server_connection_attempts_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_connection_attempts_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +haproxy_server_connection_attempts_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_connection_attempts_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP haproxy_server_connection_reuses_total Total number of connection reuses. +# TYPE haproxy_server_connection_reuses_total counter +haproxy_server_connection_reuses_total{proxy="prometheus-exporter-internal",server="prometheus-internal"} 0 +haproxy_server_connection_reuses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_connection_reuses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +haproxy_server_connection_reuses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_connection_reuses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP haproxy_server_responses_denied_total Total number of denied responses. +# TYPE haproxy_server_responses_denied_total counter +haproxy_server_responses_denied_total{proxy="prometheus-exporter-internal",server="prometheus-internal"} 0 +haproxy_server_responses_denied_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_responses_denied_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +haproxy_server_responses_denied_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_responses_denied_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP haproxy_server_connection_errors_total Total number of connection errors. +# TYPE haproxy_server_connection_errors_total counter +haproxy_server_connection_errors_total{proxy="prometheus-exporter-internal",server="prometheus-internal"} 0 +haproxy_server_connection_errors_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_connection_errors_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +haproxy_server_connection_errors_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_connection_errors_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP haproxy_server_response_errors_total Total number of response errors. +# TYPE haproxy_server_response_errors_total counter +haproxy_server_response_errors_total{proxy="prometheus-exporter-internal",server="prometheus-internal"} 0 +haproxy_server_response_errors_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_response_errors_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +haproxy_server_response_errors_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_response_errors_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP haproxy_server_retry_warnings_total Total number of retry warnings. +# TYPE haproxy_server_retry_warnings_total counter +haproxy_server_retry_warnings_total{proxy="prometheus-exporter-internal",server="prometheus-internal"} 0 +haproxy_server_retry_warnings_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_retry_warnings_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +haproxy_server_retry_warnings_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_retry_warnings_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP haproxy_server_redispatch_warnings_total Total number of redispatch warnings. +# TYPE haproxy_server_redispatch_warnings_total counter +haproxy_server_redispatch_warnings_total{proxy="prometheus-exporter-internal",server="prometheus-internal"} 0 +haproxy_server_redispatch_warnings_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_redispatch_warnings_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +haproxy_server_redispatch_warnings_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_redispatch_warnings_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP haproxy_server_failed_header_rewriting_total Total number of failed header rewriting warnings. +# TYPE haproxy_server_failed_header_rewriting_total counter +haproxy_server_failed_header_rewriting_total{proxy="prometheus-exporter-internal",server="prometheus-internal"} 0 +haproxy_server_failed_header_rewriting_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_failed_header_rewriting_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +haproxy_server_failed_header_rewriting_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_failed_header_rewriting_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP haproxy_server_client_aborts_total Total number of data transfers aborted by the client. +# TYPE haproxy_server_client_aborts_total counter +haproxy_server_client_aborts_total{proxy="prometheus-exporter-internal",server="prometheus-internal"} 0 +haproxy_server_client_aborts_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_client_aborts_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +haproxy_server_client_aborts_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_client_aborts_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP haproxy_server_server_aborts_total Total number of data transfers aborted by the server. +# TYPE haproxy_server_server_aborts_total counter +haproxy_server_server_aborts_total{proxy="prometheus-exporter-internal",server="prometheus-internal"} 0 +haproxy_server_server_aborts_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_server_aborts_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +haproxy_server_server_aborts_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_server_aborts_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP haproxy_server_weight Service weight. +# TYPE haproxy_server_weight gauge +haproxy_server_weight{proxy="prometheus-exporter-internal",server="prometheus-internal"} 1 +haproxy_server_weight{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 1 +haproxy_server_weight{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 1 +haproxy_server_weight{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 1 +haproxy_server_weight{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 1 +# HELP haproxy_server_check_failures_total Total number of failed check (Only counts checks failed when the server is up). +# TYPE haproxy_server_check_failures_total counter +haproxy_server_check_failures_total{proxy="prometheus-exporter-internal",server="prometheus-internal"} 0 +haproxy_server_check_failures_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 3 +haproxy_server_check_failures_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 3 +haproxy_server_check_failures_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 3 +haproxy_server_check_failures_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 3 +# HELP haproxy_server_check_up_down_total Total number of UP->DOWN transitions. +# TYPE haproxy_server_check_up_down_total counter +haproxy_server_check_up_down_total{proxy="prometheus-exporter-internal",server="prometheus-internal"} 0 +haproxy_server_check_up_down_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 1 +haproxy_server_check_up_down_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 1 +haproxy_server_check_up_down_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 1 +haproxy_server_check_up_down_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 1 +# HELP haproxy_server_downtime_seconds_total Total downtime (in seconds) for the service. +# TYPE haproxy_server_downtime_seconds_total counter +haproxy_server_downtime_seconds_total{proxy="prometheus-exporter-internal",server="prometheus-internal"} 0 +haproxy_server_downtime_seconds_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 3353 +haproxy_server_downtime_seconds_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 3352 +haproxy_server_downtime_seconds_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 3350 +haproxy_server_downtime_seconds_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 3349 +# HELP haproxy_server_check_last_change_seconds Number of seconds since the last UP<->DOWN transition. +# TYPE haproxy_server_check_last_change_seconds gauge +haproxy_server_check_last_change_seconds{proxy="prometheus-exporter-internal",server="prometheus-internal"} 3378 +haproxy_server_check_last_change_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 3353 +haproxy_server_check_last_change_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 3352 +haproxy_server_check_last_change_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 3350 +haproxy_server_check_last_change_seconds{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 3349 +# HELP haproxy_server_current_throttle Current throttle percentage for the server, when slowstart is active, or no value if not in slowstart. +# TYPE haproxy_server_current_throttle gauge +haproxy_server_current_throttle{proxy="prometheus-exporter-internal",server="prometheus-internal"} 100 +haproxy_server_current_throttle{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 100 +haproxy_server_current_throttle{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 100 +haproxy_server_current_throttle{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 100 +haproxy_server_current_throttle{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 100 +# HELP haproxy_server_loadbalanced_total Total number of times a service was selected, either for new sessions, or when redispatching. +# TYPE haproxy_server_loadbalanced_total counter +haproxy_server_loadbalanced_total{proxy="prometheus-exporter-internal",server="prometheus-internal"} 2 +haproxy_server_loadbalanced_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_loadbalanced_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +haproxy_server_loadbalanced_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_loadbalanced_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP haproxy_server_http_responses_total Total number of HTTP responses. +# TYPE haproxy_server_http_responses_total counter +haproxy_server_http_responses_total{proxy="prometheus-exporter-internal",server="prometheus-internal",code="1xx"} 0 +haproxy_server_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77",code="1xx"} 0 +haproxy_server_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f",code="1xx"} 0 +haproxy_server_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77",code="1xx"} 0 +haproxy_server_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f",code="1xx"} 0 +haproxy_server_http_responses_total{proxy="prometheus-exporter-internal",server="prometheus-internal",code="2xx"} 2 +haproxy_server_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77",code="2xx"} 0 +haproxy_server_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f",code="2xx"} 0 +haproxy_server_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77",code="2xx"} 0 +haproxy_server_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f",code="2xx"} 0 +haproxy_server_http_responses_total{proxy="prometheus-exporter-internal",server="prometheus-internal",code="3xx"} 0 +haproxy_server_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77",code="3xx"} 0 +haproxy_server_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f",code="3xx"} 0 +haproxy_server_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77",code="3xx"} 0 +haproxy_server_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f",code="3xx"} 0 +haproxy_server_http_responses_total{proxy="prometheus-exporter-internal",server="prometheus-internal",code="4xx"} 0 +haproxy_server_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77",code="4xx"} 0 +haproxy_server_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f",code="4xx"} 0 +haproxy_server_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77",code="4xx"} 0 +haproxy_server_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f",code="4xx"} 0 +haproxy_server_http_responses_total{proxy="prometheus-exporter-internal",server="prometheus-internal",code="5xx"} 0 +haproxy_server_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77",code="5xx"} 0 +haproxy_server_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f",code="5xx"} 0 +haproxy_server_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77",code="5xx"} 0 +haproxy_server_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f",code="5xx"} 0 +haproxy_server_http_responses_total{proxy="prometheus-exporter-internal",server="prometheus-internal",code="other"} 0 +haproxy_server_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77",code="other"} 0 +haproxy_server_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f",code="other"} 0 +haproxy_server_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77",code="other"} 0 +haproxy_server_http_responses_total{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f",code="other"} 0 +# HELP haproxy_server_idle_connections_current Current number of idle connections available for reuse +# TYPE haproxy_server_idle_connections_current gauge +haproxy_server_idle_connections_current{proxy="prometheus-exporter-internal",server="prometheus-internal"} 0 +haproxy_server_idle_connections_current{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_idle_connections_current{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +haproxy_server_idle_connections_current{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_idle_connections_current{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP haproxy_server_idle_connections_limit Limit on the number of available idle connections +# TYPE haproxy_server_idle_connections_limit gauge +haproxy_server_idle_connections_limit{proxy="prometheus-exporter-internal",server="prometheus-internal"} 0 +haproxy_server_idle_connections_limit{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_idle_connections_limit{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",server="88ee5795-d594-478b-8591-455acf30164f"} 0 +haproxy_server_idle_connections_limit{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +haproxy_server_idle_connections_limit{proxy="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",server="88ee5795-d594-478b-8591-455acf30164f"} 0 diff --git a/octavia/tests/common/sample_network_data.py b/octavia/tests/common/sample_network_data.py new file mode 100644 index 0000000000..891492fc53 --- /dev/null +++ b/octavia/tests/common/sample_network_data.py @@ -0,0 +1,198 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import collections + + +def create_iproute_ipv4_address(ip_address, broadcast_address, interface_name): + """Returns a netlink/iproute (pyroute2) IPv4 address.""" + Stats = collections.namedtuple('Stats', ('qsize', 'delta', 'delay')) + return ( + {'family': 2, 'prefixlen': 24, 'flags': 0, 'scope': 0, 'index': 2, + 'attrs': [('IFA_ADDRESS', ip_address), ('IFA_LOCAL', ip_address), + ('IFA_BROADCAST', broadcast_address), + ('IFA_LABEL', interface_name), ('IFA_FLAGS', 0), + ('IFA_CACHEINFO', {'ifa_preferred': 49256, + 'ifa_valid': 49256, 'cstamp': 1961, + 'tstamp': 73441020})], + 'header': {'length': 88, 'type': 20, 'flags': 2, + 'sequence_number': 258, 'pid': 7590, 'error': None, + 'stats': Stats(qsize=0, delta=0, delay=0)}, + 'event': 'RTM_NEWADDR'},) + + +def create_iproute_ipv6_address(ip_address, interface_name): + """Returns a netlink/iproute (pyroute2) IPv6 address.""" + Stats = collections.namedtuple('Stats', ('qsize', 'delta', 'delay')) + return ( + {'family': 10, 'prefixlen': 64, 'flags': 0, 'scope': 0, 'index': 2, + 'attrs': [('IFA_CACHEINFO', {'ifa_preferred': 604503, + 'ifa_valid': 2591703, 'cstamp': 2038, + 'tstamp': 77073215}), + ('IFA_ADDRESS', '2001:db8:ffff:ffff:ffff:ffff:ffff:ffff'), + ('IFA_FLAGS', 768)], + 'header': {'length': 72, 'type': 20, 'flags': 2, + 'sequence_number': 257, 'pid': 7590, 'error': None, + 'stats': Stats(qsize=0, delta=0, delay=0)}, + 'event': 'RTM_NEWADDR'}, + {'family': 10, 'prefixlen': 64, 'flags': 0, 'scope': 0, 'index': 2, + 'attrs': [('IFA_CACHEINFO', {'ifa_preferred': 604503, + 'ifa_valid': 2591703, 'cstamp': 2038, + 'tstamp': 77073215}), + ('IFA_ADDRESS', ip_address), ('IFA_FLAGS', 768)], + 'header': {'length': 72, 'type': 20, 'flags': 2, + 'sequence_number': 257, 'pid': 7590, 'error': None, + 'stats': Stats(qsize=0, delta=0, delay=0)}, + 'event': 'RTM_NEWADDR'},) + + +def create_iproute_interface(interface_name): + """Returns a netlink/iproute (pyroute2) interface.""" + Stats = collections.namedtuple('Stats', ('qsize', 'delta', 'delay')) + return [{ + 'family': 0, '__align': (), 'ifi_type': 1, 'index': 2, 'flags': 69699, + 'change': 0, + 'attrs': [('IFLA_TXQLEN', 1000), ('IFLA_IFNAME', interface_name), + ('IFLA_OPERSTATE', 'UP'), ('IFLA_LINKMODE', 0), + ('IFLA_MTU', 1500), ('IFLA_GROUP', 0), + ('IFLA_PROMISCUITY', 0), ('IFLA_NUM_TX_QUEUES', 1), + ('IFLA_GSO_MAX_SEGS', 65535), + ('IFLA_GSO_MAX_SIZE', 65536), ('IFLA_NUM_RX_QUEUES', 1), + ('IFLA_CARRIER', 1), ('IFLA_QDISC', 'fq_codel'), + ('IFLA_CARRIER_CHANGES', 2), ('IFLA_PROTO_DOWN', 0), + ('IFLA_CARRIER_UP_COUNT', 1), + ('IFLA_CARRIER_DOWN_COUNT', 1), + ('IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, + 'irq': 0, 'dma': 0, 'port': 0}), + ('IFLA_ADDRESS', '52:54:00:cf:37:9e'), + ('IFLA_BROADCAST', 'ff:ff:ff:ff:ff:ff'), + ('IFLA_STATS64', { + 'rx_packets': 756091, 'tx_packets': 780292, + 'rx_bytes': 234846748, 'tx_bytes': 208583687, + 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, + 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, + 'rx_length_errors': 0, 'rx_over_errors': 0, + 'rx_crc_errors': 0, 'rx_frame_errors': 0, + 'rx_fifo_errors': 0, 'rx_missed_errors': 0, + 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, + 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, + 'tx_window_errors': 0, 'rx_compressed': 0, + 'tx_compressed': 0}), + ('IFLA_STATS', { + 'rx_packets': 756091, 'tx_packets': 780292, + 'rx_bytes': 234846748, 'tx_bytes': 208583687, + 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, + 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, + 'rx_length_errors': 0, 'rx_over_errors': 0, + 'rx_crc_errors': 0, 'rx_frame_errors': 0, + 'rx_fifo_errors': 0, 'rx_missed_errors': 0, + 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, + 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, + 'tx_window_errors': 0, 'rx_compressed': 0, + 'tx_compressed': 0}), + ('IFLA_XDP', '05:00:02:00:00:00:00:00'), + ('IFLA_AF_SPEC', { + 'attrs': [ + ('AF_INET', { + 'dummy': 65664, 'forwarding': 1, + 'mc_forwarding': 0, 'proxy_arp': 0, + 'accept_redirects': 1, + 'secure_redirects': 1, + 'send_redirects': 1, 'shared_media': 1, + 'rp_filter': 1, 'accept_source_route': 1, + 'bootp_relay': 0, 'log_martians': 0, + 'tag': 0, 'arpfilter': 0, 'medium_id': 0, + 'noxfrm': 0, 'nopolicy': 0, + 'force_igmp_version': 0, 'arp_announce': 0, + 'arp_ignore': 0, 'promote_secondaries': 0, + 'arp_accept': 0, 'arp_notify': 0, + 'accept_local': 0, 'src_vmark': 0, + 'proxy_arp_pvlan': 0, 'route_localnet': 0, + 'igmpv2_unsolicited_report_interval': 10000, + 'igmpv3_unsolicited_report_interval': 1000}), + ('AF_INET6', { + 'attrs': [('IFLA_INET6_FLAGS', 2147483648), + ('IFLA_INET6_CACHEINFO', { + 'max_reasm_len': 65535, + 'tstamp': 1859, + 'reachable_time': 30708, + 'retrans_time': 1000}), + ('IFLA_INET6_CONF', { + 'forwarding': 1, 'hop_limit': 64, + 'mtu': 1500, 'accept_ra': 2, + 'accept_redirects': 1, + 'autoconf': 1, + 'dad_transmits': 1, + 'router_solicitations': 4294967295, + 'router_solicitation_interval': + 4000, + 'router_solicitation_delay': 1000, + 'use_tempaddr': 0, + 'temp_valid_lft': 604800, + 'temp_preferred_lft': 86400, + 'regen_max_retry': 3, + 'max_desync_factor': 600, + 'max_addresses': 16, + 'force_mld_version': 0, + 'accept_ra_defrtr': 1, + 'accept_ra_pinfo': 1, + 'accept_ra_rtr_pref': 1, + 'router_probe_interval': 60000, + 'accept_ra_rt_info_max_plen': 0, + 'proxy_ndp': 0, + 'optimistic_dad': 0, + 'accept_source_route': 0, + 'mc_forwarding': 0, + 'disable_ipv6': 0, + 'accept_dad': 1, 'force_tllao': 0, + 'ndisc_notify': 0}), + ('IFLA_INET6_STATS', { + 'num': 37, 'inpkts': 57817, + 'inoctets': 144065857, + 'indelivers': 36758, + 'outforwdatagrams': 0, + 'outpkts': 35062, + 'outoctets': 4796485, + 'inhdrerrors': 0, + 'intoobigerrors': 0, + 'innoroutes': 0, 'inaddrerrors': 0, + 'inunknownprotos': 0, + 'intruncatedpkts': 0, + 'indiscards': 0, + 'outdiscards': 0, 'outnoroutes': 0, + 'reasmtimeout': 0, 'reasmreqds': 0, + 'reasmoks': 0, 'reasmfails': 0, + 'fragoks': 0, 'fragfails': 0, + 'fragcreates': 0, + 'inmcastpkts': 23214, + 'outmcastpkts': 6546, + 'inbcastpkts': 0, + 'outbcastpkts': 0, + 'inmcastoctets': 2255059, + 'outmcastoctets': 589090, + 'inbcastoctets': 0, + 'outbcastoctets': 0, + 'csumerrors': 0, + 'noectpkts': 57860, + 'ect1pkts': 0, 'ect0pkts': 0, + 'cepkts': 0}), + ('IFLA_INET6_ICMP6STATS', { + 'num': 6, 'inmsgs': 2337, + 'inerrors': 0, 'outmsgs': 176, + 'outerrors': 0, 'csumerrors': 0}), + ('IFLA_INET6_TOKEN', '::'), + ('IFLA_INET6_ADDR_GEN_MODE', 0)]})]})], + 'header': {'length': 1304, 'type': 16, 'flags': 0, + 'sequence_number': 261, 'pid': 7590, 'error': None, + 'stats': Stats(qsize=0, delta=0, delay=0)}, + 'state': 'up', 'event': 'RTM_NEWLINK'}] diff --git a/octavia/tests/common/sample_octavia_prometheus b/octavia/tests/common/sample_octavia_prometheus new file mode 100644 index 0000000000..a7ddb7d536 --- /dev/null +++ b/octavia/tests/common/sample_octavia_prometheus @@ -0,0 +1,695 @@ +# HELP octavia_loadbalancer_cpu Load balancer CPU utilization (percentage). +# TYPE octavia_loadbalancer_cpu gauge +octavia_loadbalancer_cpu 50.0 +# HELP octavia_loadbalancer_memory Load balancer memory utilization (percentage). +# TYPE octavia_loadbalancer_memory gauge +octavia_loadbalancer_memory 23.5 +# HELP octavia_memory_pool_allocated_bytes Total amount of memory allocated in the memory pools (in bytes). +# TYPE octavia_memory_pool_allocated_bytes gauge +octavia_memory_pool_allocated_bytes 111616 +# HELP octavia_memory_pool_used_bytes Total amount of memory used in the memory pools (in bytes). +# TYPE octavia_memory_pool_used_bytes gauge +octavia_memory_pool_used_bytes 78848 +# HELP octavia_memory_pool_failures_total Total number of failed memory pool allocations. +# TYPE octavia_memory_pool_failures_total counter +octavia_memory_pool_failures_total 0 +# HELP octavia_loadbalancer_max_connections Maximum number of concurrent connections. +# TYPE octavia_loadbalancer_max_connections gauge +octavia_loadbalancer_max_connections 150000 +# HELP octavia_loadbalancer_current_connections Number of active sessions. +# TYPE octavia_loadbalancer_current_connections gauge +octavia_loadbalancer_current_connections 1 +# HELP octavia_loadbalancer_connections_total Total number of created sessions. +# TYPE octavia_loadbalancer_connections_total counter +octavia_loadbalancer_connections_total 680 +# HELP octavia_loadbalancer_requests_total Total number of requests (TCP or HTTP). +# TYPE octavia_loadbalancer_requests_total counter +octavia_loadbalancer_requests_total 680 +# HELP octavia_loadbalancer_max_ssl_connections Configured maximum number of concurrent SSL connections. +# TYPE octavia_loadbalancer_max_ssl_connections gauge +octavia_loadbalancer_max_ssl_connections 0 +# HELP octavia_loadbalancer_current_ssl_connections Number of active SSL connections. +# TYPE octavia_loadbalancer_current_ssl_connections gauge +octavia_loadbalancer_current_ssl_connections 0 +# HELP octavia_loadbalancer_ssl_connections_total Total number of opened SSL connections. +# TYPE octavia_loadbalancer_ssl_connections_total counter +octavia_loadbalancer_ssl_connections_total 0 +# HELP octavia_loadbalancer_current_connection_rate Current number of connections per second over last elapsed second. +# TYPE octavia_loadbalancer_current_connection_rate gauge +octavia_loadbalancer_current_connection_rate 1 +# HELP octavia_loadbalancer_limit_connection_rate Configured maximum number of connections per second. +# TYPE octavia_loadbalancer_limit_connection_rate gauge +octavia_loadbalancer_limit_connection_rate 0 +# HELP octavia_loadbalancer_max_connection_rate Maximum observed number of connections per second. +# TYPE octavia_loadbalancer_max_connection_rate gauge +octavia_loadbalancer_max_connection_rate 2 +# HELP octavia_loadbalancer_current_session_rate Current number of sessions per second over last elapsed second. +# TYPE octavia_loadbalancer_current_session_rate gauge +octavia_loadbalancer_current_session_rate 1 +# HELP octavia_loadbalancer_limit_session_rate Configured maximum number of sessions per second. +# TYPE octavia_loadbalancer_limit_session_rate gauge +octavia_loadbalancer_limit_session_rate 0 +# HELP octavia_loadbalancer_max_session_rate Maximum observed number of sessions per second. +# TYPE octavia_loadbalancer_max_session_rate gauge +octavia_loadbalancer_max_session_rate 2 +# HELP octavia_loadbalancer_current_ssl_rate Current number of SSL sessions per second over last elapsed second. +# TYPE octavia_loadbalancer_current_ssl_rate gauge +octavia_loadbalancer_current_ssl_rate 0 +# HELP octavia_loadbalancer_limit_ssl_rate Configured maximum number of SSL sessions per second. +# TYPE octavia_loadbalancer_limit_ssl_rate gauge +octavia_loadbalancer_limit_ssl_rate 0 +# HELP octavia_loadbalancer_max_ssl_rate Maximum observed number of SSL sessions per second. +# TYPE octavia_loadbalancer_max_ssl_rate gauge +octavia_loadbalancer_max_ssl_rate 0 +# HELP octavia_loadbalancer_current_frontend_ssl_key_rate Current frontend SSL Key computation per second over last elapsed second. +# TYPE octavia_loadbalancer_current_frontend_ssl_key_rate gauge +octavia_loadbalancer_current_frontend_ssl_key_rate 0 +# HELP octavia_loadbalancer_max_frontend_ssl_key_rate Maximum observed frontend SSL Key computation per second. +# TYPE octavia_loadbalancer_max_frontend_ssl_key_rate gauge +octavia_loadbalancer_max_frontend_ssl_key_rate 0 +# HELP octavia_loadbalancer_frontend_ssl_reuse SSL session reuse ratio (percent). +# TYPE octavia_loadbalancer_frontend_ssl_reuse gauge +octavia_loadbalancer_frontend_ssl_reuse 0 +# HELP octavia_loadbalancer_current_backend_ssl_key_rate Current backend SSL Key computation per second over last elapsed second. +# TYPE octavia_loadbalancer_current_backend_ssl_key_rate gauge +octavia_loadbalancer_current_backend_ssl_key_rate 0 +# HELP octavia_loadbalancer_max_backend_ssl_key_rate Maximum observed backend SSL Key computation per second. +# TYPE octavia_loadbalancer_max_backend_ssl_key_rate gauge +octavia_loadbalancer_max_backend_ssl_key_rate 0 +# HELP octavia_loadbalancer_ssl_cache_lookups_total Total number of SSL session cache lookups. +# TYPE octavia_loadbalancer_ssl_cache_lookups_total counter +octavia_loadbalancer_ssl_cache_lookups_total 0 +# HELP octavia_loadbalancer_ssl_cache_misses_total Total number of SSL session cache misses. +# TYPE octavia_loadbalancer_ssl_cache_misses_total counter +octavia_loadbalancer_ssl_cache_misses_total 0 +# HELP octavia_loadbalancer_http_comp_bytes_in_total Number of bytes per second over last elapsed second, before http compression. +# TYPE octavia_loadbalancer_http_comp_bytes_in_total counter +octavia_loadbalancer_http_comp_bytes_in_total 0 +# HELP octavia_loadbalancer_http_comp_bytes_out_total Number of bytes per second over last elapsed second, after http compression. +# TYPE octavia_loadbalancer_http_comp_bytes_out_total counter +octavia_loadbalancer_http_comp_bytes_out_total 0 +# HELP octavia_loadbalancer_limit_http_comp Configured maximum input compression rate in bytes. +# TYPE octavia_loadbalancer_limit_http_comp gauge +octavia_loadbalancer_limit_http_comp 0 +# HELP octavia_loadbalancer_listeners Current number of active listeners. +# TYPE octavia_loadbalancer_listeners gauge +octavia_loadbalancer_listeners 6 +# HELP octavia_loadbalancer_dropped_logs_total Total number of dropped logs. +# TYPE octavia_loadbalancer_dropped_logs_total counter +octavia_loadbalancer_dropped_logs_total 0 +# HELP octavia_listener_status Current status of the listener. +# TYPE octavia_listener_status gauge +octavia_listener_status{listener="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 1 +octavia_listener_status{listener="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 1 +octavia_listener_status{listener="0b248d39-eb90-4180-91aa-5363be7df4db"} 1 +# HELP octavia_listener_current_sessions Current number of active sessions. +# TYPE octavia_listener_current_sessions gauge +octavia_listener_current_sessions{listener="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_listener_current_sessions{listener="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 0 +octavia_listener_current_sessions{listener="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_listener_max_sessions Maximum observed number of active sessions. +# TYPE octavia_listener_max_sessions gauge +octavia_listener_max_sessions{listener="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_listener_max_sessions{listener="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 1 +octavia_listener_max_sessions{listener="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_listener_limit_sessions Configured session limit. +# TYPE octavia_listener_limit_sessions gauge +octavia_listener_limit_sessions{listener="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 50000 +octavia_listener_limit_sessions{listener="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 50000 +octavia_listener_limit_sessions{listener="0b248d39-eb90-4180-91aa-5363be7df4db"} 50000 +# HELP octavia_listener_sessions_total Total number of sessions. +# TYPE octavia_listener_sessions_total counter +octavia_listener_sessions_total{listener="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_listener_sessions_total{listener="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 2 +octavia_listener_sessions_total{listener="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_listener_limit_session_rate Configured limit on new sessions per second. +# TYPE octavia_listener_limit_session_rate gauge +octavia_listener_limit_session_rate{listener="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_listener_limit_session_rate{listener="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 0 +octavia_listener_limit_session_rate{listener="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_listener_max_session_rate Maximum observed number of sessions per second. +# TYPE octavia_listener_max_session_rate gauge +octavia_listener_max_session_rate{listener="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_listener_max_session_rate{listener="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 1 +octavia_listener_max_session_rate{listener="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_listener_connections_rate_max Maximum observed number of connections per second. +# TYPE octavia_listener_connections_rate_max gauge +octavia_listener_connections_rate_max{listener="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_listener_connections_rate_max{listener="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 1 +octavia_listener_connections_rate_max{listener="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_listener_connections_total Total number of connections. +# TYPE octavia_listener_connections_total counter +octavia_listener_connections_total{listener="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_listener_connections_total{listener="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 2 +octavia_listener_connections_total{listener="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_listener_bytes_in_total Current total of incoming bytes. +# TYPE octavia_listener_bytes_in_total counter +octavia_listener_bytes_in_total{listener="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_listener_bytes_in_total{listener="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 176 +octavia_listener_bytes_in_total{listener="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_listener_bytes_out_total Current total of outgoing bytes. +# TYPE octavia_listener_bytes_out_total counter +octavia_listener_bytes_out_total{listener="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_listener_bytes_out_total{listener="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 145918 +octavia_listener_bytes_out_total{listener="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_listener_requests_denied_total Total number of denied requests. +# TYPE octavia_listener_requests_denied_total counter +octavia_listener_requests_denied_total{listener="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_listener_requests_denied_total{listener="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 0 +octavia_listener_requests_denied_total{listener="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_listener_responses_denied_total Total number of denied responses. +# TYPE octavia_listener_responses_denied_total counter +octavia_listener_responses_denied_total{listener="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_listener_responses_denied_total{listener="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 0 +octavia_listener_responses_denied_total{listener="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_listener_request_errors_total Total number of request errors. +# TYPE octavia_listener_request_errors_total counter +octavia_listener_request_errors_total{listener="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_listener_request_errors_total{listener="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 0 +octavia_listener_request_errors_total{listener="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_listener_denied_connections_total Total number of requests denied by connection rules. +# TYPE octavia_listener_denied_connections_total counter +octavia_listener_denied_connections_total{listener="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_listener_denied_connections_total{listener="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 0 +octavia_listener_denied_connections_total{listener="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_listener_denied_sessions_total Total number of requests denied by session rules. +# TYPE octavia_listener_denied_sessions_total counter +octavia_listener_denied_sessions_total{listener="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_listener_denied_sessions_total{listener="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 0 +octavia_listener_denied_sessions_total{listener="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_listener_failed_header_rewriting_total Total number of failed header rewriting rules. +# TYPE octavia_listener_failed_header_rewriting_total counter +octavia_listener_failed_header_rewriting_total{listener="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_listener_failed_header_rewriting_total{listener="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 0 +octavia_listener_failed_header_rewriting_total{listener="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_listener_http_requests_rate_max Maximum observed number of HTTP requests per second. +# TYPE octavia_listener_http_requests_rate_max gauge +octavia_listener_http_requests_rate_max{listener="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_listener_http_requests_rate_max{listener="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 1 +octavia_listener_http_requests_rate_max{listener="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_listener_http_requests_total Total number of HTTP requests received. +# TYPE octavia_listener_http_requests_total counter +octavia_listener_http_requests_total{listener="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_listener_http_requests_total{listener="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 2 +octavia_listener_http_requests_total{listener="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_listener_http_responses_total Total number of HTTP responses. +# TYPE octavia_listener_http_responses_total counter +octavia_listener_http_responses_total{listener="1a0c13c0-8e69-46c7-a68a-ca4937996d74",code="1xx"} 0 +octavia_listener_http_responses_total{listener="4919eeb2-b3ed-40c5-8200-057400b83bb7",code="1xx"} 0 +octavia_listener_http_responses_total{listener="0b248d39-eb90-4180-91aa-5363be7df4db",code="1xx"} 0 +octavia_listener_http_responses_total{listener="1a0c13c0-8e69-46c7-a68a-ca4937996d74",code="2xx"} 0 +octavia_listener_http_responses_total{listener="4919eeb2-b3ed-40c5-8200-057400b83bb7",code="2xx"} 2 +octavia_listener_http_responses_total{listener="0b248d39-eb90-4180-91aa-5363be7df4db",code="2xx"} 0 +octavia_listener_http_responses_total{listener="1a0c13c0-8e69-46c7-a68a-ca4937996d74",code="3xx"} 0 +octavia_listener_http_responses_total{listener="4919eeb2-b3ed-40c5-8200-057400b83bb7",code="3xx"} 0 +octavia_listener_http_responses_total{listener="0b248d39-eb90-4180-91aa-5363be7df4db",code="3xx"} 0 +octavia_listener_http_responses_total{listener="1a0c13c0-8e69-46c7-a68a-ca4937996d74",code="4xx"} 0 +octavia_listener_http_responses_total{listener="4919eeb2-b3ed-40c5-8200-057400b83bb7",code="4xx"} 0 +octavia_listener_http_responses_total{listener="0b248d39-eb90-4180-91aa-5363be7df4db",code="4xx"} 0 +octavia_listener_http_responses_total{listener="1a0c13c0-8e69-46c7-a68a-ca4937996d74",code="5xx"} 0 +octavia_listener_http_responses_total{listener="4919eeb2-b3ed-40c5-8200-057400b83bb7",code="5xx"} 0 +octavia_listener_http_responses_total{listener="0b248d39-eb90-4180-91aa-5363be7df4db",code="5xx"} 0 +octavia_listener_http_responses_total{listener="1a0c13c0-8e69-46c7-a68a-ca4937996d74",code="other"} 0 +octavia_listener_http_responses_total{listener="4919eeb2-b3ed-40c5-8200-057400b83bb7",code="other"} 0 +octavia_listener_http_responses_total{listener="0b248d39-eb90-4180-91aa-5363be7df4db",code="other"} 0 +# HELP octavia_listener_intercepted_requests_total Total number of intercepted HTTP requests. +# TYPE octavia_listener_intercepted_requests_total counter +octavia_listener_intercepted_requests_total{listener="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_listener_intercepted_requests_total{listener="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 0 +octavia_listener_intercepted_requests_total{listener="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_listener_http_cache_lookups_total Total number of HTTP cache lookups. +# TYPE octavia_listener_http_cache_lookups_total counter +octavia_listener_http_cache_lookups_total{listener="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_listener_http_cache_lookups_total{listener="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 0 +octavia_listener_http_cache_lookups_total{listener="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_listener_http_cache_hits_total Total number of HTTP cache hits. +# TYPE octavia_listener_http_cache_hits_total counter +octavia_listener_http_cache_hits_total{listener="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_listener_http_cache_hits_total{listener="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 0 +octavia_listener_http_cache_hits_total{listener="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_listener_http_comp_bytes_in_total Total number of HTTP response bytes fed to the compressor. +# TYPE octavia_listener_http_comp_bytes_in_total counter +octavia_listener_http_comp_bytes_in_total{listener="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_listener_http_comp_bytes_in_total{listener="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 0 +octavia_listener_http_comp_bytes_in_total{listener="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_listener_http_comp_bytes_out_total Total number of HTTP response bytes emitted by the compressor. +# TYPE octavia_listener_http_comp_bytes_out_total counter +octavia_listener_http_comp_bytes_out_total{listener="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_listener_http_comp_bytes_out_total{listener="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 0 +octavia_listener_http_comp_bytes_out_total{listener="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_listener_http_comp_bytes_bypassed_total Total number of bytes that bypassed the HTTP compressor (CPU/BW limit). +# TYPE octavia_listener_http_comp_bytes_bypassed_total counter +octavia_listener_http_comp_bytes_bypassed_total{listener="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_listener_http_comp_bytes_bypassed_total{listener="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 0 +octavia_listener_http_comp_bytes_bypassed_total{listener="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_listener_http_comp_responses_total Total number of HTTP responses that were compressed. +# TYPE octavia_listener_http_comp_responses_total counter +octavia_listener_http_comp_responses_total{listener="1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_listener_http_comp_responses_total{listener="4919eeb2-b3ed-40c5-8200-057400b83bb7"} 0 +octavia_listener_http_comp_responses_total{listener="0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_pool_status Current status of the pool. +# TYPE octavia_pool_status gauge +octavia_pool_status{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_pool_status{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_pool_current_sessions Current number of active sessions. +# TYPE octavia_pool_current_sessions gauge +octavia_pool_current_sessions{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_pool_current_sessions{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_pool_max_sessions Maximum observed number of active sessions. +# TYPE octavia_pool_max_sessions gauge +octavia_pool_max_sessions{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_pool_max_sessions{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_pool_limit_sessions Configured session limit. +# TYPE octavia_pool_limit_sessions gauge +octavia_pool_limit_sessions{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 50000 +octavia_pool_limit_sessions{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 50000 +# HELP octavia_pool_sessions_total Total number of sessions. +# TYPE octavia_pool_sessions_total counter +octavia_pool_sessions_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_pool_sessions_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_pool_max_session_rate Maximum observed number of sessions per second. +# TYPE octavia_pool_max_session_rate gauge +octavia_pool_max_session_rate{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_pool_max_session_rate{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_pool_last_session_seconds Number of seconds since last session assigned to a member. +# TYPE octavia_pool_last_session_seconds gauge +octavia_pool_last_session_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} -1 +octavia_pool_last_session_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} -1 +# HELP octavia_pool_current_queue Current number of queued requests. +# TYPE octavia_pool_current_queue gauge +octavia_pool_current_queue{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_pool_current_queue{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_pool_max_queue Maximum observed number of queued requests. +# TYPE octavia_pool_max_queue gauge +octavia_pool_max_queue{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_pool_max_queue{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_pool_connection_attempts_total Total number of connection establishment attempts. +# TYPE octavia_pool_connection_attempts_total counter +octavia_pool_connection_attempts_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_pool_connection_attempts_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_pool_connection_reuses_total Total number of connection reuses. +# TYPE octavia_pool_connection_reuses_total counter +octavia_pool_connection_reuses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_pool_connection_reuses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_pool_bytes_in_total Current total of incoming bytes. +# TYPE octavia_pool_bytes_in_total counter +octavia_pool_bytes_in_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_pool_bytes_in_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_pool_bytes_out_total Current total of outgoing bytes. +# TYPE octavia_pool_bytes_out_total counter +octavia_pool_bytes_out_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_pool_bytes_out_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_pool_queue_time_average_seconds Avg. queue time for last 1024 successful connections. +# TYPE octavia_pool_queue_time_average_seconds gauge +octavia_pool_queue_time_average_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0.000000 +octavia_pool_queue_time_average_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0.000000 +# HELP octavia_pool_connect_time_average_seconds Avg. connect time for last 1024 successful connections. +# TYPE octavia_pool_connect_time_average_seconds gauge +octavia_pool_connect_time_average_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0.000000 +octavia_pool_connect_time_average_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0.000000 +# HELP octavia_pool_response_time_average_seconds Avg. response time for last 1024 successful connections. +# TYPE octavia_pool_response_time_average_seconds gauge +octavia_pool_response_time_average_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0.000000 +octavia_pool_response_time_average_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0.000000 +# HELP octavia_pool_total_time_average_seconds Avg. total time for last 1024 successful connections. +# TYPE octavia_pool_total_time_average_seconds gauge +octavia_pool_total_time_average_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0.000000 +octavia_pool_total_time_average_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0.000000 +# HELP octavia_pool_max_queue_time_seconds Maximum observed time spent in the queue +# TYPE octavia_pool_max_queue_time_seconds gauge +octavia_pool_max_queue_time_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0.000000 +octavia_pool_max_queue_time_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0.000000 +# HELP octavia_pool_max_connect_time_seconds Maximum observed time spent waiting for a connection to complete +# TYPE octavia_pool_max_connect_time_seconds gauge +octavia_pool_max_connect_time_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0.000000 +octavia_pool_max_connect_time_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0.000000 +# HELP octavia_pool_max_response_time_seconds Maximum observed time spent waiting for a member response. +# TYPE octavia_pool_max_response_time_seconds gauge +octavia_pool_max_response_time_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0.000000 +octavia_pool_max_response_time_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0.000000 +# HELP octavia_pool_max_total_time_seconds Maximum observed total request+response time (request+queue+connect+response+processing) +# TYPE octavia_pool_max_total_time_seconds gauge +octavia_pool_max_total_time_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0.000000 +octavia_pool_max_total_time_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0.000000 +# HELP octavia_pool_requests_denied_total Total number of denied requests. +# TYPE octavia_pool_requests_denied_total counter +octavia_pool_requests_denied_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_pool_requests_denied_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_pool_responses_denied_total Total number of denied responses. +# TYPE octavia_pool_responses_denied_total counter +octavia_pool_responses_denied_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_pool_responses_denied_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_pool_connection_errors_total Total number of connection errors. +# TYPE octavia_pool_connection_errors_total counter +octavia_pool_connection_errors_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_pool_connection_errors_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_pool_response_errors_total Total number of response errors. +# TYPE octavia_pool_response_errors_total counter +octavia_pool_response_errors_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_pool_response_errors_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_pool_retry_warnings_total Total number of retry warnings. +# TYPE octavia_pool_retry_warnings_total counter +octavia_pool_retry_warnings_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_pool_retry_warnings_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_pool_redispatch_warnings_total Total number of redispatch warnings. +# TYPE octavia_pool_redispatch_warnings_total counter +octavia_pool_redispatch_warnings_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_pool_redispatch_warnings_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_pool_failed_header_rewriting_total Total number of failed header rewriting warnings. +# TYPE octavia_pool_failed_header_rewriting_total counter +octavia_pool_failed_header_rewriting_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_pool_failed_header_rewriting_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_pool_client_aborts_total Total number of data transfers aborted by the client. +# TYPE octavia_pool_client_aborts_total counter +octavia_pool_client_aborts_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_pool_client_aborts_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_pool_member_aborts_total Total number of data transfers aborted by the server. +# TYPE octavia_pool_member_aborts_total counter +octavia_pool_member_aborts_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_pool_member_aborts_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_pool_active_members Current number of active members. +# TYPE octavia_pool_active_members gauge +octavia_pool_active_members{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_pool_active_members{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_pool_backup_members Current number of backup members. +# TYPE octavia_pool_backup_members gauge +octavia_pool_backup_members{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_pool_backup_members{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_pool_check_up_down_total Total number of UP->DOWN transitions. +# TYPE octavia_pool_check_up_down_total counter +octavia_pool_check_up_down_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 1 +octavia_pool_check_up_down_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 1 +# HELP octavia_pool_check_last_change_seconds Number of seconds since the last UP<->DOWN transition. +# TYPE octavia_pool_check_last_change_seconds gauge +octavia_pool_check_last_change_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 3352 +octavia_pool_check_last_change_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 3349 +# HELP octavia_pool_downtime_seconds_total Total downtime (in seconds) for the pool. +# TYPE octavia_pool_downtime_seconds_total counter +octavia_pool_downtime_seconds_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 3352 +octavia_pool_downtime_seconds_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 3349 +# HELP octavia_pool_loadbalanced_total Total number of times a pool was selected, either for new sessions, or when redispatching. +# TYPE octavia_pool_loadbalanced_total counter +octavia_pool_loadbalanced_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_pool_loadbalanced_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_pool_http_requests_total Total number of HTTP requests received. +# TYPE octavia_pool_http_requests_total counter +octavia_pool_http_requests_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_pool_http_requests_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_pool_http_responses_total Total number of HTTP responses. +# TYPE octavia_pool_http_responses_total counter +octavia_pool_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",code="1xx"} 0 +octavia_pool_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",code="1xx"} 0 +octavia_pool_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",code="2xx"} 0 +octavia_pool_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",code="2xx"} 0 +octavia_pool_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",code="3xx"} 0 +octavia_pool_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",code="3xx"} 0 +octavia_pool_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",code="4xx"} 0 +octavia_pool_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",code="4xx"} 0 +octavia_pool_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",code="5xx"} 0 +octavia_pool_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",code="5xx"} 0 +octavia_pool_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",code="other"} 0 +octavia_pool_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",code="other"} 0 +# HELP octavia_pool_http_cache_lookups_total Total number of HTTP cache lookups. +# TYPE octavia_pool_http_cache_lookups_total counter +octavia_pool_http_cache_lookups_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_pool_http_cache_lookups_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_pool_http_cache_hits_total Total number of HTTP cache hits. +# TYPE octavia_pool_http_cache_hits_total counter +octavia_pool_http_cache_hits_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_pool_http_cache_hits_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_pool_http_comp_bytes_in_total Total number of HTTP response bytes fed to the compressor. +# TYPE octavia_pool_http_comp_bytes_in_total counter +octavia_pool_http_comp_bytes_in_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_pool_http_comp_bytes_in_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_pool_http_comp_bytes_out_total Total number of HTTP response bytes emitted by the compressor. +# TYPE octavia_pool_http_comp_bytes_out_total counter +octavia_pool_http_comp_bytes_out_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_pool_http_comp_bytes_out_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_pool_http_comp_bytes_bypassed_total Total number of bytes that bypassed the HTTP compressor (CPU/BW limit). +# TYPE octavia_pool_http_comp_bytes_bypassed_total counter +octavia_pool_http_comp_bytes_bypassed_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_pool_http_comp_bytes_bypassed_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_pool_http_comp_responses_total Total number of HTTP responses that were compressed. +# TYPE octavia_pool_http_comp_responses_total counter +octavia_pool_http_comp_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74"} 0 +octavia_pool_http_comp_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db"} 0 +# HELP octavia_member_status Current status of the member. +# TYPE octavia_member_status gauge +octavia_member_status{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_status{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +octavia_member_status{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_status{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP octavia_member_current_sessions Current number of active sessions. +# TYPE octavia_member_current_sessions gauge +octavia_member_current_sessions{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_current_sessions{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +octavia_member_current_sessions{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_current_sessions{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP octavia_member_max_sessions Maximum observed number of active sessions. +# TYPE octavia_member_max_sessions gauge +octavia_member_max_sessions{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_max_sessions{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +octavia_member_max_sessions{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_max_sessions{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP octavia_member_limit_sessions Configured session limit. +# TYPE octavia_member_limit_sessions gauge +octavia_member_limit_sessions{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_limit_sessions{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +octavia_member_limit_sessions{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_limit_sessions{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP octavia_member_sessions_total Total number of sessions. +# TYPE octavia_member_sessions_total counter +octavia_member_sessions_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_sessions_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +octavia_member_sessions_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_sessions_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP octavia_member_max_session_rate Maximum observed number of sessions per second. +# TYPE octavia_member_max_session_rate gauge +octavia_member_max_session_rate{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_max_session_rate{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +octavia_member_max_session_rate{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_max_session_rate{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP octavia_member_last_session_seconds Number of seconds since last session assigned to the member. +# TYPE octavia_member_last_session_seconds gauge +octavia_member_last_session_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} -1 +octavia_member_last_session_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} -1 +octavia_member_last_session_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} -1 +octavia_member_last_session_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} -1 +# HELP octavia_member_current_queue Current number of queued requests. +# TYPE octavia_member_current_queue gauge +octavia_member_current_queue{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_current_queue{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +octavia_member_current_queue{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_current_queue{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP octavia_member_max_queue Maximum observed number of queued requests. +# TYPE octavia_member_max_queue gauge +octavia_member_max_queue{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_max_queue{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +octavia_member_max_queue{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_max_queue{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP octavia_member_queue_limit Configured maxqueue for the member (0 meaning no limit). +# TYPE octavia_member_queue_limit gauge +octavia_member_queue_limit{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_queue_limit{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +octavia_member_queue_limit{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_queue_limit{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP octavia_member_bytes_in_total Current total of incoming bytes. +# TYPE octavia_member_bytes_in_total counter +octavia_member_bytes_in_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_bytes_in_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +octavia_member_bytes_in_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_bytes_in_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP octavia_member_bytes_out_total Current total of outgoing bytes. +# TYPE octavia_member_bytes_out_total counter +octavia_member_bytes_out_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_bytes_out_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +octavia_member_bytes_out_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_bytes_out_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP octavia_member_queue_time_average_seconds Avg. queue time for last 1024 successful connections. +# TYPE octavia_member_queue_time_average_seconds gauge +octavia_member_queue_time_average_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0.000000 +octavia_member_queue_time_average_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 0.000000 +octavia_member_queue_time_average_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0.000000 +octavia_member_queue_time_average_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 0.000000 +# HELP octavia_member_connect_time_average_seconds Avg. connect time for last 1024 successful connections. +# TYPE octavia_member_connect_time_average_seconds gauge +octavia_member_connect_time_average_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0.000000 +octavia_member_connect_time_average_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 0.000000 +octavia_member_connect_time_average_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0.000000 +octavia_member_connect_time_average_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 0.000000 +# HELP octavia_member_response_time_average_seconds Avg. response time for last 1024 successful connections. +# TYPE octavia_member_response_time_average_seconds gauge +octavia_member_response_time_average_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0.000000 +octavia_member_response_time_average_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 0.000000 +octavia_member_response_time_average_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0.000000 +octavia_member_response_time_average_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 0.000000 +# HELP octavia_member_total_time_average_seconds Avg. total time for last 1024 successful connections. +# TYPE octavia_member_total_time_average_seconds gauge +octavia_member_total_time_average_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0.000000 +octavia_member_total_time_average_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 0.000000 +octavia_member_total_time_average_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0.000000 +octavia_member_total_time_average_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 0.000000 +# HELP octavia_member_max_queue_time_seconds Maximum observed time spent in the queue +# TYPE octavia_member_max_queue_time_seconds gauge +octavia_member_max_queue_time_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0.000000 +octavia_member_max_queue_time_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 0.000000 +octavia_member_max_queue_time_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0.000000 +octavia_member_max_queue_time_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 0.000000 +# HELP octavia_member_max_connect_time_seconds Maximum observed time spent waiting for a connection to complete +# TYPE octavia_member_max_connect_time_seconds gauge +octavia_member_max_connect_time_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0.000000 +octavia_member_max_connect_time_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 0.000000 +octavia_member_max_connect_time_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0.000000 +octavia_member_max_connect_time_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 0.000000 +# HELP octavia_member_max_response_time_seconds Maximum observed time spent waiting for a member response. +# TYPE octavia_member_max_response_time_seconds gauge +octavia_member_max_response_time_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0.000000 +octavia_member_max_response_time_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 0.000000 +octavia_member_max_response_time_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0.000000 +octavia_member_max_response_time_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 0.000000 +# HELP octavia_member_max_total_time_seconds Maximum observed total request+response time (request+queue+connect+response+processing) +# TYPE octavia_member_max_total_time_seconds gauge +octavia_member_max_total_time_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0.000000 +octavia_member_max_total_time_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 0.000000 +octavia_member_max_total_time_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0.000000 +octavia_member_max_total_time_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 0.000000 +# HELP octavia_member_connection_attempts_total Total number of connection establishment attempts. +# TYPE octavia_member_connection_attempts_total counter +octavia_member_connection_attempts_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_connection_attempts_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +octavia_member_connection_attempts_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_connection_attempts_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP octavia_member_connection_reuses_total Total number of connection reuses. +# TYPE octavia_member_connection_reuses_total counter +octavia_member_connection_reuses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_connection_reuses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +octavia_member_connection_reuses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_connection_reuses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP octavia_member_responses_denied_total Total number of denied responses. +# TYPE octavia_member_responses_denied_total counter +octavia_member_responses_denied_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_responses_denied_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +octavia_member_responses_denied_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_responses_denied_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP octavia_member_connection_errors_total Total number of connection errors. +# TYPE octavia_member_connection_errors_total counter +octavia_member_connection_errors_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_connection_errors_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +octavia_member_connection_errors_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_connection_errors_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP octavia_member_response_errors_total Total number of response errors. +# TYPE octavia_member_response_errors_total counter +octavia_member_response_errors_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_response_errors_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +octavia_member_response_errors_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_response_errors_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP octavia_member_retry_warnings_total Total number of retry warnings. +# TYPE octavia_member_retry_warnings_total counter +octavia_member_retry_warnings_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_retry_warnings_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +octavia_member_retry_warnings_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_retry_warnings_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP octavia_member_redispatch_warnings_total Total number of redispatch warnings. +# TYPE octavia_member_redispatch_warnings_total counter +octavia_member_redispatch_warnings_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_redispatch_warnings_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +octavia_member_redispatch_warnings_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_redispatch_warnings_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP octavia_member_failed_header_rewriting_total Total number of failed header rewriting warnings. +# TYPE octavia_member_failed_header_rewriting_total counter +octavia_member_failed_header_rewriting_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_failed_header_rewriting_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +octavia_member_failed_header_rewriting_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_failed_header_rewriting_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP octavia_member_client_aborts_total Total number of data transfers aborted by the client. +# TYPE octavia_member_client_aborts_total counter +octavia_member_client_aborts_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_client_aborts_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +octavia_member_client_aborts_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_client_aborts_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP octavia_member_server_aborts_total Total number of data transfers aborted by the server. +# TYPE octavia_member_server_aborts_total counter +octavia_member_server_aborts_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_server_aborts_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +octavia_member_server_aborts_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_server_aborts_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP octavia_member_weight Member weight. +# TYPE octavia_member_weight gauge +octavia_member_weight{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 1 +octavia_member_weight{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 1 +octavia_member_weight{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 1 +octavia_member_weight{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 1 +# HELP octavia_member_check_failures_total Total number of failed check (Only counts checks failed when the member is up). +# TYPE octavia_member_check_failures_total counter +octavia_member_check_failures_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 3 +octavia_member_check_failures_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 3 +octavia_member_check_failures_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 3 +octavia_member_check_failures_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 3 +# HELP octavia_member_check_up_down_total Total number of UP->DOWN transitions. +# TYPE octavia_member_check_up_down_total counter +octavia_member_check_up_down_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 1 +octavia_member_check_up_down_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 1 +octavia_member_check_up_down_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 1 +octavia_member_check_up_down_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 1 +# HELP octavia_member_downtime_seconds_total Total downtime (in seconds) for the member. +# TYPE octavia_member_downtime_seconds_total counter +octavia_member_downtime_seconds_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 3353 +octavia_member_downtime_seconds_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 3352 +octavia_member_downtime_seconds_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 3350 +octavia_member_downtime_seconds_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 3349 +# HELP octavia_member_check_last_change_seconds Number of seconds since the last UP<->DOWN transition. +# TYPE octavia_member_check_last_change_seconds gauge +octavia_member_check_last_change_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 3353 +octavia_member_check_last_change_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 3352 +octavia_member_check_last_change_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 3350 +octavia_member_check_last_change_seconds{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 3349 +# HELP octavia_member_current_throttle Current throttle percentage for the member, when slowstart is active, or no value if not in slowstart. +# TYPE octavia_member_current_throttle gauge +octavia_member_current_throttle{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 100 +octavia_member_current_throttle{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 100 +octavia_member_current_throttle{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 100 +octavia_member_current_throttle{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 100 +# HELP octavia_member_loadbalanced_total Total number of times a member was selected, either for new sessions, or when redispatching. +# TYPE octavia_member_loadbalanced_total counter +octavia_member_loadbalanced_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_loadbalanced_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +octavia_member_loadbalanced_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_loadbalanced_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP octavia_member_http_responses_total Total number of HTTP responses. +# TYPE octavia_member_http_responses_total counter +octavia_member_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77",code="1xx"} 0 +octavia_member_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f",code="1xx"} 0 +octavia_member_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77",code="1xx"} 0 +octavia_member_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f",code="1xx"} 0 +octavia_member_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77",code="2xx"} 0 +octavia_member_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f",code="2xx"} 0 +octavia_member_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77",code="2xx"} 0 +octavia_member_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f",code="2xx"} 0 +octavia_member_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77",code="3xx"} 0 +octavia_member_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f",code="3xx"} 0 +octavia_member_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77",code="3xx"} 0 +octavia_member_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f",code="3xx"} 0 +octavia_member_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77",code="4xx"} 0 +octavia_member_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f",code="4xx"} 0 +octavia_member_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77",code="4xx"} 0 +octavia_member_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f",code="4xx"} 0 +octavia_member_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77",code="5xx"} 0 +octavia_member_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f",code="5xx"} 0 +octavia_member_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77",code="5xx"} 0 +octavia_member_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f",code="5xx"} 0 +octavia_member_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77",code="other"} 0 +octavia_member_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f",code="other"} 0 +octavia_member_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77",code="other"} 0 +octavia_member_http_responses_total{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f",code="other"} 0 +# HELP octavia_member_idle_connections_current Current number of idle connections available for reuse +# TYPE octavia_member_idle_connections_current gauge +octavia_member_idle_connections_current{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_idle_connections_current{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +octavia_member_idle_connections_current{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_idle_connections_current{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +# HELP octavia_member_idle_connections_limit Limit on the number of available idle connections +# TYPE octavia_member_idle_connections_limit gauge +octavia_member_idle_connections_limit{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_idle_connections_limit{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:1a0c13c0-8e69-46c7-a68a-ca4937996d74",member="88ee5795-d594-478b-8591-455acf30164f"} 0 +octavia_member_idle_connections_limit{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="edf6ca81-b5d5-4d94-a656-f6028d8bbb77"} 0 +octavia_member_idle_connections_limit{pool="6034884d-ed47-49d5-ade7-8fa2c4501a74:0b248d39-eb90-4180-91aa-5363be7df4db",member="88ee5795-d594-478b-8591-455acf30164f"} 0 diff --git a/octavia/tests/common/utils.py b/octavia/tests/common/utils.py new file mode 100644 index 0000000000..d09fd1e557 --- /dev/null +++ b/octavia/tests/common/utils.py @@ -0,0 +1,112 @@ +# Copyright (c) 2016 Hewlett Packard Enterprise Development Company LP +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import ipaddress +from unittest import mock + +import fixtures + +from octavia.common import constants as consts + + +# Borrowed from neutron +# https://review.opendev.org/#/c/232716/ +class OpenFixture(fixtures.Fixture): + """Mock access to a specific file while preserving open for others.""" + + def __init__(self, filepath, contents=''): + self.path = filepath + self.contents = contents + + def _setUp(self): + self.mock_open = mock.mock_open(read_data=self.contents) + # work around for https://bugs.python.org/issue21258 + self.mock_open.return_value.__iter__ = ( + lambda self: iter(self.readline, '')) + self._orig_open = open + + def replacement_open(name, *args, **kwargs): + if name == self.path: + return self.mock_open(name, *args, **kwargs) + return self._orig_open(name, *args, **kwargs) + + self._patch = mock.patch('builtins.open', new=replacement_open) + self._patch.start() + self.addCleanup(self._patch.stop) + + +def assert_address_lists_equal(obj, l1, l2): + obj.assertEqual(len(l1), len(l2), + f"Address lists don't match: {l1} vs {l2}") + for a1, a2 in zip(l1, l2): + if consts.ADDRESS in a1 and consts.ADDRESS in a2: + obj.assertEqual( + ipaddress.ip_address(a1[consts.ADDRESS]), + ipaddress.ip_address(a2[consts.ADDRESS])) + obj.assertEqual(a1[consts.PREFIXLEN], + a2[consts.PREFIXLEN]) + else: + obj.assertEqual(a1, a2) + + +def assert_route_lists_equal(obj, l1, l2): + obj.assertEqual(len(l1), len(l2), + f"Routes don't match: {l1} vs {l2}") + for r1, r2 in zip(l1, l2): + obj.assertEqual( + ipaddress.ip_network(r1[consts.DST]), + ipaddress.ip_network(r2[consts.DST])) + if consts.GATEWAY in r1 and consts.GATEWAY in r2: + obj.assertEqual( + ipaddress.ip_address(r1[consts.GATEWAY]), + ipaddress.ip_address(r2[consts.GATEWAY])) + if consts.PREFSRC in r1 and consts.PREFSRC in r2: + obj.assertEqual( + ipaddress.ip_address(r1[consts.PREFSRC]), + ipaddress.ip_address(r2[consts.PREFSRC])) + for attr in (consts.ONLINK, consts.TABLE, consts.SCOPE): + obj.assertEqual(r1.get(attr), r2.get(attr)) + obj.assertEqual(r1.get(consts.OCTAVIA_OWNED, True), + r2.get(consts.OCTAVIA_OWNED, True)) + + +def assert_rule_lists_equal(obj, l1, l2): + obj.assertEqual(len(l1), len(l2)) + for r1, r2 in zip(l1, l2): + obj.assertEqual( + ipaddress.ip_address(r1[consts.SRC]), + ipaddress.ip_address(r2[consts.SRC])) + obj.assertEqual(r1[consts.SRC_LEN], r2[consts.SRC_LEN]) + obj.assertEqual(r1[consts.TABLE], r2[consts.TABLE]) + + +def assert_script_lists_equal(obj, l1, l2): + obj.assertEqual(l1, l2) + + +def assert_interface_files_equal(obj, i1, i2): + obj.assertEqual(i1[consts.NAME], i2[consts.NAME]) + obj.assertEqual(i1.get(consts.MTU), i2.get(consts.MTU)) + assert_address_lists_equal(obj, + i1[consts.ADDRESSES], + i2[consts.ADDRESSES]) + assert_route_lists_equal(obj, + i1[consts.ROUTES], + i2[consts.ROUTES]) + assert_rule_lists_equal(obj, + i1[consts.RULES], + i2[consts.RULES]) + assert_script_lists_equal(obj, + i1[consts.SCRIPTS], + i2[consts.SCRIPTS]) diff --git a/octavia/tests/fixtures.py b/octavia/tests/fixtures.py new file mode 100644 index 0000000000..4417116eca --- /dev/null +++ b/octavia/tests/fixtures.py @@ -0,0 +1,32 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import warnings + +import fixtures +from sqlalchemy import exc as sqla_exc + + +class WarningsFixture(fixtures.Fixture): + """Filters out warnings during test runs.""" + + def setUp(self): + super().setUp() + # Make deprecation warnings only happen once to avoid spamming + warnings.simplefilter('once', DeprecationWarning) + + # Enable deprecation warnings to capture upcoming SQLAlchemy changes + warnings.filterwarnings( + 'error', + category=sqla_exc.SADeprecationWarning) + + self.addCleanup(warnings.resetwarnings) diff --git a/octavia/tests/functional/__init__.py b/octavia/tests/functional/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/functional/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/functional/amphorae/__init__.py b/octavia/tests/functional/amphorae/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/functional/amphorae/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/functional/amphorae/backend/__init__.py b/octavia/tests/functional/amphorae/backend/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/functional/amphorae/backend/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/functional/amphorae/backend/agent/__init__.py b/octavia/tests/functional/amphorae/backend/agent/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/functional/amphorae/backend/agent/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/functional/amphorae/backend/agent/api_server/__init__.py b/octavia/tests/functional/amphorae/backend/agent/api_server/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/functional/amphorae/backend/agent/api_server/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/functional/amphorae/backend/agent/api_server/test_keepalivedlvs.py b/octavia/tests/functional/amphorae/backend/agent/api_server/test_keepalivedlvs.py new file mode 100644 index 0000000000..b9b3c38cf5 --- /dev/null +++ b/octavia/tests/functional/amphorae/backend/agent/api_server/test_keepalivedlvs.py @@ -0,0 +1,394 @@ +# Copyright 2015 Hewlett Packard Enterprise Development Company LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import stat +import subprocess +from unittest import mock + +import flask + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils + +from octavia.amphorae.backends.agent.api_server import keepalivedlvs +from octavia.amphorae.backends.agent.api_server import server +from octavia.amphorae.backends.agent.api_server import util +from octavia.common import constants as consts +from octavia.tests.common import utils as test_utils +from octavia.tests.unit import base + + +class KeepalivedLvsTestCase(base.TestCase): + FAKE_ID = uuidutils.generate_uuid() + LISTENER_ID = 'listener-1111-1111-1111-listenerid00' + POOL_ID = 'poolpool-1111-1111-1111-poolid000000' + MEMBER_ID1 = 'memberid-1111-1111-1111-memberid1111' + MEMBER_ID2 = 'memberid-2222-2222-2222-memberid2222' + HEALTHMONITOR_ID = 'hmidhmid-1111-1111-1111-healthmonito' + NORMAL_CFG_CONTENT = ( + "# Configuration for Listener %(listener_id)s\n\n" + "net_namespace haproxy-amphora\n\n" + "virtual_server 10.0.0.2 80 {\n" + " lb_algo rr\n" + " lb_kind NAT\n" + " protocol udp\n" + " delay_loop 30\n" + " delay_before_retry 31\n" + " retry 3\n\n\n" + " # Configuration for Pool %(pool_id)s\n" + " # Configuration for HealthMonitor %(hm_id)s\n" + " # Configuration for Member %(member1_id)s\n" + " real_server 10.0.0.99 82 {\n" + " weight 13\n" + " inhibit_on_failure\n" + " uthreshold 98\n" + " persistence_timeout 33\n" + " persistence_granularity 255.255.0.0\n" + " delay_before_retry 31\n" + " retry 3\n" + " MISC_CHECK {\n" + " misc_path \"/var/lib/octavia/lvs/check/" + "udp_check.sh 10.0.0.99 82\"\n" + " misc_timeout 30\n" + " misc_dynamic\n" + " }\n" + " }\n\n" + " # Configuration for Member %(member2_id)s\n" + " real_server 10.0.0.98 82 {\n" + " weight 13\n" + " inhibit_on_failure\n" + " uthreshold 98\n" + " persistence_timeout 33\n" + " persistence_granularity 255.255.0.0\n" + " delay_before_retry 31\n" + " retry 3\n" + " MISC_CHECK {\n" + " misc_path \"/var/lib/octavia/lvs/check/" + "udp_check.sh 10.0.0.98 82\"\n" + " misc_timeout 30\n" + " misc_dynamic\n" + " }\n" + " }\n\n" + "}\n\n") % {'listener_id': LISTENER_ID, 'pool_id': POOL_ID, + 'hm_id': HEALTHMONITOR_ID, 'member1_id': MEMBER_ID1, + 'member2_id': MEMBER_ID2} + PROC_CONTENT = ( + "IP Virtual Server version 1.2.1 (size=4096)\n" + "Prot LocalAddress:Port Scheduler Flags\n" + " -> RemoteAddress:Port Forward Weight ActiveConn InActConn\n" + "UDP 0A000002:0050 sh\n" + " -> 0A000063:0052 Masq 13 1 0\n" + " -> 0A000062:0052 Masq 13 1 0\n" + ) + NORMAL_PID_CONTENT = "1988" + TEST_URL = server.PATH_PREFIX + '/listeners/%s/%s/udp_listener' + + def setUp(self): + super().setUp() + self.app = flask.Flask(__name__) + self.client = self.app.test_client() + self._ctx = self.app.test_request_context() + self._ctx.push() + self.test_keepalivedlvs = keepalivedlvs.KeepalivedLvs() + self.app.add_url_rule( + rule=self.TEST_URL % ('', ''), + view_func=(lambda amphora_id, listener_id: + self.test_keepalivedlvs.upload_lvs_listener_config( + listener_id)), + methods=['PUT']) + + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'run_systemctl_command') + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'install_netns_systemd_service') + @mock.patch('pyroute2.NetNS', create=True) + @mock.patch('shutil.copy2') + @mock.patch('os.chmod') + @mock.patch('os.path.exists') + @mock.patch('os.makedirs') + @mock.patch('os.remove') + @mock.patch('subprocess.check_output') + def test_upload_lvs_listener_config_no_vrrp_check_dir( + self, m_check_output, m_os_rm, m_os_mkdir, m_exists, m_os_chmod, + m_copy2, mock_netns, mock_install_netns, mock_systemctl): + m_exists.side_effect = [False, False, True, True, False, False] + cfg_path = util.keepalived_lvs_cfg_path(self.FAKE_ID) + m = self.useFixture(test_utils.OpenFixture(cfg_path)).mock_open + + with mock.patch('os.open') as m_open, mock.patch.object(os, + 'fdopen', + m) as m_fdopen: + m_open.side_effect = ['TEST-WRITE-CFG', + 'TEST-WRITE-SYSINIT'] + + res = self.client.put(self.TEST_URL % ('123', self.FAKE_ID), + data=self.NORMAL_CFG_CONTENT) + + mock_install_netns.assert_called_once() + systemctl_calls = [ + mock.call(consts.ENABLE, + consts.AMP_NETNS_SVC_PREFIX, False), + mock.call( + consts.ENABLE, + 'octavia-keepalivedlvs-%s.service' % self.FAKE_ID), + ] + mock_systemctl.assert_has_calls(systemctl_calls) + os_mkdir_calls = [ + mock.call(util.keepalived_lvs_dir()), + mock.call(util.keepalived_backend_check_script_dir()) + ] + m_os_mkdir.assert_has_calls(os_mkdir_calls) + + m_os_chmod.assert_called_with( + util.keepalived_backend_check_script_path(), stat.S_IEXEC) + + flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC + mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + systemd_cfg_path = util.keepalived_lvs_init_path(self.FAKE_ID) + + m_open_calls = [ + mock.call(cfg_path, flags, mode), + mock.call(systemd_cfg_path, flags, mode) + ] + m_open.assert_has_calls(m_open_calls) + m_fdopen.assert_any_call('TEST-WRITE-CFG', 'wb') + m_fdopen.assert_any_call('TEST-WRITE-SYSINIT', 'w') + self.assertEqual(200, res.status_code) + + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'get_lvs_listeners') + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'get_loadbalancers') + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'run_systemctl_command') + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'install_netns_systemd_service') + @mock.patch('pyroute2.NetNS', create=True) + @mock.patch('shutil.copy2') + @mock.patch('os.chmod') + @mock.patch('os.path.exists') + @mock.patch('os.makedirs') + @mock.patch('os.remove') + @mock.patch('subprocess.check_output') + def test_upload_lvs_listener_config_with_vrrp_check_dir( + self, m_check_output, m_os_rm, m_os_mkdir, m_exists, m_os_chmod, + m_copy2, mock_netns, mock_install_netns, mock_systemctl, + mock_get_lbs, mock_get_lvs_listeners): + m_exists.side_effect = [False, False, True, True, False, False, False] + mock_get_lbs.return_value = [] + mock_get_lvs_listeners.return_value = [self.FAKE_ID] + cfg_path = util.keepalived_lvs_cfg_path(self.FAKE_ID) + m = self.useFixture(test_utils.OpenFixture(cfg_path)).mock_open + + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(group='controller_worker', + loadbalancer_topology=consts.TOPOLOGY_ACTIVE_STANDBY) + + with mock.patch('os.open') as m_open, mock.patch.object(os, + 'fdopen', + m) as m_fdopen: + m_open.side_effect = ['TEST-WRITE-CFG', + 'TEST-WRITE-SYSINIT', + 'TEST-WRITE-UDP-VRRP-CHECK'] + res = self.client.put(self.TEST_URL % ('123', self.FAKE_ID), + data=self.NORMAL_CFG_CONTENT) + os_mkdir_calls = [ + mock.call(util.keepalived_lvs_dir()), + mock.call(util.keepalived_backend_check_script_dir()) + ] + m_os_mkdir.assert_has_calls(os_mkdir_calls) + + mock_install_netns.assert_called_once() + systemctl_calls = [ + mock.call(consts.ENABLE, + consts.AMP_NETNS_SVC_PREFIX, False), + mock.call( + consts.ENABLE, + 'octavia-keepalivedlvs-%s.service' % self.FAKE_ID) + ] + mock_systemctl.assert_has_calls(systemctl_calls) + + m_os_chmod.assert_called_with( + util.keepalived_backend_check_script_path(), stat.S_IEXEC) + flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC + mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + systemd_cfg_path = util.keepalived_lvs_init_path(self.FAKE_ID) + script_path = os.path.join( + util.keepalived_check_scripts_dir(), + keepalivedlvs.KEEPALIVED_CHECK_SCRIPT_NAME) + m_open_calls = [ + mock.call(cfg_path, flags, mode), + mock.call(systemd_cfg_path, flags, mode), + mock.call(script_path, flags, stat.S_IEXEC) + ] + m_open.assert_has_calls(m_open_calls) + m_fdopen.assert_any_call('TEST-WRITE-CFG', 'wb') + m_fdopen.assert_any_call('TEST-WRITE-SYSINIT', 'w') + m_fdopen.assert_any_call('TEST-WRITE-UDP-VRRP-CHECK', 'w') + + m_os_rm.assert_called_once_with(util.haproxy_check_script_path()) + self.assertEqual(200, res.status_code) + + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'run_systemctl_command') + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'install_netns_systemd_service') + @mock.patch('shutil.copy2') + @mock.patch('os.chmod') + @mock.patch('os.path.exists') + @mock.patch('os.makedirs') + @mock.patch('os.remove') + @mock.patch('subprocess.check_output') + def test_upload_lvs_listener_config_start_service_failure( + self, m_check_output, m_os_rm, m_os_mkdir, m_exists, m_os_chmod, + m_copy2, mock_install_netns, mock_systemctl): + m_exists.side_effect = [False, False, True, True, False] + cfg_path = util.keepalived_lvs_cfg_path(self.FAKE_ID) + m = self.useFixture(test_utils.OpenFixture(cfg_path)).mock_open + mock_systemctl.side_effect = [mock.DEFAULT, Exception('boom')] + + with mock.patch('os.open') as m_open, mock.patch.object(os, + 'fdopen', + m) as m_fdopen: + m_open.side_effect = ['TEST-WRITE-CFG', + 'TEST-WRITE-SYSINIT'] + res = self.client.put(self.TEST_URL % ('123', self.FAKE_ID), + data=self.NORMAL_CFG_CONTENT) + os_mkdir_calls = [ + mock.call(util.keepalived_lvs_dir()), + mock.call(util.keepalived_backend_check_script_dir()) + ] + m_os_mkdir.assert_has_calls(os_mkdir_calls) + + mock_install_netns.assert_called_once() + systemctl_calls = [ + mock.call(consts.ENABLE, + consts.AMP_NETNS_SVC_PREFIX, False), + mock.call( + consts.ENABLE, + 'octavia-keepalivedlvs-%s.service' % self.FAKE_ID) + ] + mock_systemctl.assert_has_calls(systemctl_calls) + + m_os_chmod.assert_called_with( + util.keepalived_backend_check_script_path(), stat.S_IEXEC) + flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC + mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + systemd_cfg_path = util.keepalived_lvs_init_path(self.FAKE_ID) + m_open_calls = [ + mock.call(cfg_path, flags, mode), + mock.call(systemd_cfg_path, flags, mode) + ] + m_open.assert_has_calls(m_open_calls) + m_fdopen.assert_any_call('TEST-WRITE-CFG', 'wb') + m_fdopen.assert_any_call('TEST-WRITE-SYSINIT', 'w') + self.assertEqual(500, res.status_code) + + @mock.patch('subprocess.check_output') + @mock.patch('octavia.amphorae.backends.agent.api_server.' + 'keepalivedlvs.KeepalivedLvs.' + '_check_lvs_listener_exists') + def test_manage_lvs_listener(self, mock_lvs_exist, mock_check_output): + res = self.test_keepalivedlvs.manage_lvs_listener(self.FAKE_ID, + 'start') + cmd = f"systemctl start octavia-keepalivedlvs-{self.FAKE_ID}.service" + mock_check_output.assert_called_once_with(cmd.split(), + stderr=subprocess.STDOUT, + encoding='utf-8') + self.assertEqual(202, res.status_code) + + res = self.test_keepalivedlvs.manage_lvs_listener(self.FAKE_ID, + 'restart') + self.assertEqual(400, res.status_code) + + mock_check_output.side_effect = subprocess.CalledProcessError(1, + 'blah!') + + res = self.test_keepalivedlvs.manage_lvs_listener(self.FAKE_ID, + 'start') + self.assertEqual(500, res.status_code) + + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'get_lvs_listeners', return_value=[LISTENER_ID]) + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'get_keepalivedlvs_pid', return_value="12345") + @mock.patch('subprocess.check_output') + @mock.patch('os.remove') + @mock.patch('os.path.exists') + def test_delete_lvs_listener(self, m_exist, m_remove, m_check_output, + mget_pid, mget_lvs_listeners): + m_exist.return_value = True + res = self.test_keepalivedlvs.delete_lvs_listener(self.FAKE_ID) + + cmd1 = f"systemctl stop octavia-keepalivedlvs-{self.FAKE_ID}.service" + cmd2 = ("systemctl disable " + f"octavia-keepalivedlvs-{self.FAKE_ID}.service") + calls = [ + mock.call(cmd1.split(), stderr=subprocess.STDOUT, + encoding='utf-8'), + mock.call(cmd2.split(), stderr=subprocess.STDOUT, + encoding='utf-8') + ] + m_check_output.assert_has_calls(calls) + self.assertEqual(200, res.status_code) + + @mock.patch.object(keepalivedlvs, "webob") + @mock.patch('os.path.exists') + def test_delete_lvs_listener_not_exist(self, m_exist, m_webob): + m_exist.return_value = False + self.test_keepalivedlvs.delete_lvs_listener(self.FAKE_ID) + calls = [ + mock.call( + json=dict( + message='UDP Listener Not Found', + details=f"No UDP listener with UUID: {self.FAKE_ID}" + ), status=404), + mock.call(json={'message': 'OK'}) + ] + m_webob.Response.assert_has_calls(calls) + + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'get_keepalivedlvs_pid', return_value="12345") + @mock.patch('subprocess.check_output') + @mock.patch('os.path.exists') + def test_delete_lvs_listener_stop_service_fail(self, m_exist, + m_check_output, mget_pid): + m_exist.return_value = True + m_check_output.side_effect = subprocess.CalledProcessError(1, + 'Woops!') + res = self.test_keepalivedlvs.delete_lvs_listener(self.FAKE_ID) + self.assertEqual(500, res.status_code) + self.assertEqual({'message': 'Error stopping keepalivedlvs', + 'details': None}, res.json) + + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'get_keepalivedlvs_pid', return_value="12345") + @mock.patch('subprocess.check_output') + @mock.patch('os.remove') + @mock.patch('os.path.exists') + def test_delete_lvs_listener_disable_service_fail(self, m_exist, m_remove, + m_check_output, + mget_pid): + m_exist.return_value = True + m_check_output.side_effect = [True, + subprocess.CalledProcessError( + 1, 'Woops!')] + res = self.test_keepalivedlvs.delete_lvs_listener(self.FAKE_ID) + self.assertEqual(500, res.status_code) + self.assertEqual({ + 'message': f'Error disabling octavia-keepalivedlvs-{self.FAKE_ID} ' + f'service', + 'details': None}, res.json) diff --git a/octavia/tests/functional/amphorae/backend/agent/api_server/test_server.py b/octavia/tests/functional/amphorae/backend/agent/api_server/test_server.py new file mode 100644 index 0000000000..f2d8f0c515 --- /dev/null +++ b/octavia/tests/functional/amphorae/backend/agent/api_server/test_server.py @@ -0,0 +1,2936 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import hashlib +import os +import random +import socket +import stat +import subprocess +from unittest import mock + +import fixtures +from oslo_config import fixture as oslo_fixture +from oslo_serialization import jsonutils +from oslo_utils import uuidutils +import webob + +from octavia.amphorae.backends.agent import api_server +from octavia.amphorae.backends.agent.api_server import certificate_update +from octavia.amphorae.backends.agent.api_server import server +from octavia.amphorae.backends.agent.api_server import util +from octavia.common import config +from octavia.common import constants as consts +from octavia.common import utils as octavia_utils +from octavia.tests.common import utils as test_utils +import octavia.tests.unit.base as base + + +AMP_AGENT_CONF_PATH = '/etc/octavia/amphora-agent.conf' +RANDOM_ERROR = 'random error' +OK = dict(message='OK') +FAKE_INTERFACE = 'eth33' + + +class TestServerTestCase(base.TestCase): + app = None + + def setUp(self): + super().setUp() + self.conf = self.useFixture(oslo_fixture.Config(config.cfg.CONF)) + self.conf.config(group="haproxy_amphora", base_path='/var/lib/octavia') + self.conf.config(group="controller_worker", + loadbalancer_topology=consts.TOPOLOGY_SINGLE) + self.conf.load_raw_values(project='fake_project') + self.conf.load_raw_values(prog='fake_prog') + self.useFixture(fixtures.MockPatch( + 'oslo_config.cfg.find_config_files', + return_value=[AMP_AGENT_CONF_PATH])) + hm_queue = mock.MagicMock() + with mock.patch('distro.id', return_value='ubuntu'), mock.patch( + 'octavia.amphorae.backends.agent.api_server.plug.' + 'Plug.plug_lo'): + self.ubuntu_test_server = server.Server(hm_queue) + self.ubuntu_app = self.ubuntu_test_server.app.test_client() + + with mock.patch('distro.id', return_value='centos'), mock.patch( + 'octavia.amphorae.backends.agent.api_server.plug.' + 'Plug.plug_lo'): + self.centos_test_server = server.Server(hm_queue) + self.centos_app = self.centos_test_server.app.test_client() + + def test_ubuntu_haproxy(self): + self._test_haproxy(consts.UBUNTU) + + def test_centos_haproxy(self): + self._test_haproxy(consts.CENTOS) + + @mock.patch('octavia.amphorae.backends.agent.api_server.' + 'haproxy_compatibility.get_haproxy_versions') + @mock.patch('os.path.exists') + @mock.patch('os.makedirs') + @mock.patch('os.rename') + @mock.patch('subprocess.check_output') + def _test_haproxy(self, distro, + mock_subprocess, mock_rename, + mock_makedirs, mock_exists, mock_get_version): + + self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) + + mock_get_version.return_value = [1, 6] + + flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC + mock_exists.return_value = True + file_name = '/var/lib/octavia/123/haproxy.cfg.new' + m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open + + # happy case upstart file exists + with mock.patch('os.open') as mock_open, mock.patch.object( + os, 'fdopen', m) as mock_fdopen, mock.patch( + 'distro.id') as mock_distro_id: + mock_open.return_value = 123 + mock_distro_id.return_value = distro + if distro == consts.UBUNTU: + rv = self.ubuntu_app.put('/' + api_server.VERSION + + '/loadbalancer/amp_123/123/haproxy', + data='test') + elif distro == consts.CENTOS: + rv = self.centos_app.put('/' + api_server.VERSION + + '/loadbalancer/amp_123/123/haproxy', + data='test') + mode = stat.S_IRUSR | stat.S_IWUSR + mock_open.assert_called_with(file_name, flags, mode) + mock_fdopen.assert_called_with(123, 'w') + self.assertEqual(202, rv.status_code) + m().write.assert_called_once_with('test') + mock_subprocess.assert_any_call( + "haproxy -c -L {peer} -f {config_file} -f {haproxy_ug}".format( + config_file=file_name, + haproxy_ug=consts.HAPROXY_USER_GROUP_CFG, + peer=(octavia_utils. + base64_sha1_string('amp_123').rstrip('='))).split(), + stderr=subprocess.STDOUT, encoding='utf-8') + mock_rename.assert_called_with( + '/var/lib/octavia/123/haproxy.cfg.new', + '/var/lib/octavia/123/haproxy.cfg') + + mock_subprocess.assert_any_call( + ['systemctl', 'enable', 'haproxy-123.service'], + stderr=subprocess.STDOUT, + encoding='utf-8') + + # exception writing + m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open + m.side_effect = IOError() # open crashes + with mock.patch('os.open'), mock.patch.object( + os, 'fdopen', m), mock.patch('distro.id') as mock_distro_id: + mock_distro_id.return_value = distro + if distro == consts.UBUNTU: + rv = self.ubuntu_app.put('/' + api_server.VERSION + + '/loadbalancer/amp_123/123/haproxy', + data='test') + elif distro == consts.CENTOS: + rv = self.centos_app.put('/' + api_server.VERSION + + '/loadbalancer/amp_123/123/haproxy', + data='test') + self.assertEqual(500, rv.status_code) + + # check if files get created + mock_exists.return_value = False + init_path = consts.SYSTEMD_DIR + '/haproxy-123.service' + + m = self.useFixture(test_utils.OpenFixture(init_path)).mock_open + # happy case upstart file exists + with mock.patch('os.open') as mock_open, mock.patch.object( + os, 'fdopen', m) as mock_fdopen, mock.patch( + 'distro.id') as mock_distro_id: + mock_open.return_value = 123 + mock_distro_id.return_value = distro + + if distro == consts.UBUNTU: + rv = self.ubuntu_app.put('/' + api_server.VERSION + + '/loadbalancer/amp_123/123/haproxy', + data='test') + elif distro == consts.CENTOS: + rv = self.centos_app.put('/' + api_server.VERSION + + '/loadbalancer/amp_123/123/haproxy', + data='test') + + self.assertEqual(202, rv.status_code) + mode = (stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | + stat.S_IROTH) + mock_open.assert_called_with(init_path, flags, mode) + mock_fdopen.assert_called_with(123, 'w') + handle = mock_fdopen() + handle.write.assert_any_call('test') + # skip the template stuff + mock_makedirs.assert_called_with('/var/lib/octavia/123') + + # unhappy case haproxy check fails + mock_exists.return_value = True + mock_subprocess.side_effect = [subprocess.CalledProcessError( + 7, 'test', RANDOM_ERROR)] + with mock.patch('os.open') as mock_open, mock.patch.object( + os, 'fdopen', m) as mock_fdopen, mock.patch( + 'distro.id') as mock_distro_id: + mock_open.return_value = 123 + mock_distro_id.return_value = distro + if distro == consts.UBUNTU: + rv = self.ubuntu_app.put('/' + api_server.VERSION + + '/loadbalancer/amp_123/123/haproxy', + data='test') + elif distro == consts.CENTOS: + rv = self.centos_app.put('/' + api_server.VERSION + + '/loadbalancer/amp_123/123/haproxy', + data='test') + self.assertEqual(400, rv.status_code) + self.assertEqual( + {'message': 'Invalid request', 'details': 'random error'}, + jsonutils.loads(rv.data.decode('utf-8'))) + mode = stat.S_IRUSR | stat.S_IWUSR + mock_open.assert_called_with(file_name, flags, mode) + mock_fdopen.assert_called_with(123, 'w') + handle = mock_fdopen() + handle.write.assert_called_with('test') + mock_subprocess.assert_called_with( + "haproxy -c -L {peer} -f {config_file} -f {haproxy_ug}".format( + config_file=file_name, + haproxy_ug=consts.HAPROXY_USER_GROUP_CFG, + peer=(octavia_utils. + base64_sha1_string('amp_123').rstrip('='))).split(), + stderr=subprocess.STDOUT, encoding='utf-8') + mock_rename.assert_called_with( + '/var/lib/octavia/123/haproxy.cfg.new', + '/var/lib/octavia/123/haproxy.cfg.new-failed') + + def test_ubuntu_start(self): + self._test_start(consts.UBUNTU) + + def test_centos_start(self): + self._test_start(consts.CENTOS) + + @mock.patch('os.listdir') + @mock.patch('os.path.exists') + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'vrrp_check_script_update') + @mock.patch('subprocess.check_output') + def _test_start(self, distro, mock_subprocess, mock_vrrp, mock_exists, + mock_listdir): + self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) + if distro == consts.UBUNTU: + rv = self.ubuntu_app.put('/' + api_server.VERSION + + '/loadbalancer/123/error') + elif distro == consts.CENTOS: + rv = self.centos_app.put('/' + api_server.VERSION + + '/loadbalancer/123/error') + self.assertEqual(400, rv.status_code) + self.assertEqual( + {'message': 'Invalid Request', + 'details': 'Unknown action: error', }, + jsonutils.loads(rv.data.decode('utf-8'))) + + mock_exists.reset_mock() + mock_exists.return_value = False + if distro == consts.UBUNTU: + rv = self.ubuntu_app.put('/' + api_server.VERSION + + '/loadbalancer/123/start') + elif distro == consts.CENTOS: + rv = self.centos_app.put('/' + api_server.VERSION + + '/loadbalancer/123/start') + self.assertEqual(404, rv.status_code) + self.assertEqual( + {'message': 'Loadbalancer Not Found', + 'details': 'No loadbalancer with UUID: 123'}, + jsonutils.loads(rv.data.decode('utf-8'))) + mock_exists.assert_called_with('/var/lib/octavia') + + mock_exists.return_value = True + mock_listdir.return_value = ['123'] + if distro == consts.UBUNTU: + rv = self.ubuntu_app.put('/' + api_server.VERSION + + '/loadbalancer/123/start') + elif distro == consts.CENTOS: + rv = self.centos_app.put('/' + api_server.VERSION + + '/loadbalancer/123/start') + self.assertEqual(202, rv.status_code) + self.assertEqual( + {'message': 'OK', + 'details': 'Configuration file is valid\nhaproxy daemon for' + ' 123 started'}, + jsonutils.loads(rv.data.decode('utf-8'))) + mock_subprocess.assert_called_with( + ['systemctl', 'start', 'haproxy-123.service'], + stderr=subprocess.STDOUT, + encoding='utf-8') + + mock_exists.return_value = True + mock_subprocess.side_effect = subprocess.CalledProcessError( + 7, 'test', RANDOM_ERROR) + if distro == consts.UBUNTU: + rv = self.ubuntu_app.put('/' + api_server.VERSION + + '/loadbalancer/123/start') + elif distro == consts.CENTOS: + rv = self.centos_app.put('/' + api_server.VERSION + + '/loadbalancer/123/start') + self.assertEqual(500, rv.status_code) + self.assertEqual( + { + 'message': 'Error starting haproxy', + 'details': RANDOM_ERROR, + }, jsonutils.loads(rv.data.decode('utf-8'))) + mock_subprocess.assert_called_with( + ['systemctl', 'start', 'haproxy-123.service'], + stderr=subprocess.STDOUT, encoding='utf-8') + + def test_ubuntu_reload(self): + self._test_reload(consts.UBUNTU) + + def test_centos_reload(self): + self._test_reload(consts.CENTOS) + + @mock.patch('os.listdir') + @mock.patch('os.path.exists') + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'vrrp_check_script_update') + @mock.patch('octavia.amphorae.backends.agent.api_server.loadbalancer.' + 'Loadbalancer._check_haproxy_status') + @mock.patch('subprocess.check_output') + @mock.patch('octavia.amphorae.backends.utils.haproxy_query.HAProxyQuery') + def _test_reload(self, distro, mock_haproxy_query, mock_subprocess, + mock_haproxy_status, mock_vrrp, mock_exists, + mock_listdir): + + self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) + + # Process running so reload + mock_exists.return_value = True + mock_listdir.return_value = ['123'] + mock_haproxy_status.return_value = consts.ACTIVE + if distro == consts.UBUNTU: + rv = self.ubuntu_app.put('/' + api_server.VERSION + + '/loadbalancer/123/reload') + elif distro == consts.CENTOS: + rv = self.centos_app.put('/' + api_server.VERSION + + '/loadbalancer/123/reload') + self.assertEqual(202, rv.status_code) + self.assertEqual( + {'message': 'OK', + 'details': 'Listener 123 reloaded'}, + jsonutils.loads(rv.data.decode('utf-8'))) + mock_subprocess.assert_called_with( + ['systemctl', 'reload', 'haproxy-123.service'], + stderr=subprocess.STDOUT, encoding='utf-8') + + # Process not running so start + mock_exists.return_value = True + mock_haproxy_status.return_value = consts.OFFLINE + if distro == consts.UBUNTU: + rv = self.ubuntu_app.put('/' + api_server.VERSION + + '/loadbalancer/123/reload') + elif distro == consts.CENTOS: + rv = self.centos_app.put('/' + api_server.VERSION + + '/loadbalancer/123/reload') + self.assertEqual(202, rv.status_code) + self.assertEqual( + {'message': 'OK', + 'details': 'Configuration file is valid\nhaproxy daemon for' + ' 123 started'}, + jsonutils.loads(rv.data.decode('utf-8'))) + mock_subprocess.assert_called_with( + ['systemctl', 'start', 'haproxy-123.service'], + stderr=subprocess.STDOUT, encoding='utf-8') + + def test_ubuntu_info(self): + self._test_info(consts.UBUNTU) + + def test_centos_info(self): + self._test_info(consts.CENTOS) + + @mock.patch('octavia.amphorae.backends.agent.api_server.amphora_info.' + 'AmphoraInfo._get_extend_body_from_lvs_driver', + return_value={}) + @mock.patch('socket.gethostname') + @mock.patch('subprocess.check_output') + def _test_info(self, distro, mock_subbprocess, mock_hostname, + mock_get_extend_body): + self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) + mock_hostname.side_effect = ['test-host'] + mock_subbprocess.side_effect = ['9.9.99-9'] + + if distro == consts.UBUNTU: + rv = self.ubuntu_app.get('/' + api_server.VERSION + '/info') + elif distro == consts.CENTOS: + rv = self.centos_app.get('/' + api_server.VERSION + '/info') + + self.assertEqual(200, rv.status_code) + self.assertEqual(dict( + api_version='1.0', + haproxy_version='9.9.99-9', + hostname='test-host'), + jsonutils.loads(rv.data.decode('utf-8'))) + + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'get_backend_for_lb_object', return_value='HAPROXY') + def test_delete_ubuntu_listener(self, mock_get_proto): + self._test_delete_listener(consts.UBUNTU) + + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'get_backend_for_lb_object', return_value='HAPROXY') + def test_delete_centos_listener(self, mock_get_proto): + self._test_delete_listener(consts.CENTOS) + + @mock.patch('os.listdir') + @mock.patch('os.path.exists') + @mock.patch('subprocess.check_output') + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'vrrp_check_script_update') + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + + 'get_haproxy_pid') + @mock.patch('shutil.rmtree') + @mock.patch('os.remove') + def _test_delete_listener(self, distro, + mock_remove, mock_rmtree, mock_pid, mock_vrrp, + mock_check_output, mock_exists, mock_listdir): + self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) + # no listener + mock_exists.return_value = False + mock_listdir.return_value = ['123'] + if distro == consts.UBUNTU: + rv = self.ubuntu_app.delete('/' + api_server.VERSION + + '/listeners/123') + elif distro == consts.CENTOS: + rv = self.centos_app.delete('/' + api_server.VERSION + + '/listeners/123') + self.assertEqual(200, rv.status_code) + self.assertEqual(OK, jsonutils.loads(rv.data.decode('utf-8'))) + mock_exists.assert_called_once_with('/var/lib/octavia') + + # service is stopped + no upstart script + no vrrp + mock_exists.side_effect = [True, True, False, False, False] + if distro == consts.UBUNTU: + rv = self.ubuntu_app.delete('/' + api_server.VERSION + + '/listeners/123') + elif distro == consts.CENTOS: + rv = self.centos_app.delete('/' + api_server.VERSION + + '/listeners/123') + self.assertEqual(200, rv.status_code) + self.assertEqual({'message': 'OK'}, + jsonutils.loads(rv.data.decode('utf-8'))) + mock_rmtree.assert_called_with('/var/lib/octavia/123') + + mock_exists.assert_called_with(consts.SYSTEMD_DIR + + '/haproxy-123.service') + + mock_exists.assert_any_call('/var/lib/octavia/123/123.pid') + + # service is stopped + no upstart script + vrrp + mock_exists.side_effect = [True, True, False, True, False] + if distro == consts.UBUNTU: + rv = self.ubuntu_app.delete('/' + api_server.VERSION + + '/listeners/123') + elif distro == consts.CENTOS: + rv = self.centos_app.delete('/' + api_server.VERSION + + '/listeners/123') + self.assertEqual(200, rv.status_code) + self.assertEqual({'message': 'OK'}, + jsonutils.loads(rv.data.decode('utf-8'))) + mock_rmtree.assert_called_with('/var/lib/octavia/123') + + mock_exists.assert_called_with(consts.SYSTEMD_DIR + + '/haproxy-123.service') + + mock_exists.assert_any_call('/var/lib/octavia/123/123.pid') + + # service is stopped + upstart script + no vrrp + mock_exists.side_effect = [True, True, False, False, True] + if distro == consts.UBUNTU: + rv = self.ubuntu_app.delete('/' + api_server.VERSION + + '/listeners/123') + elif distro == consts.CENTOS: + rv = self.centos_app.delete('/' + api_server.VERSION + + '/listeners/123') + self.assertEqual(200, rv.status_code) + self.assertEqual({'message': 'OK'}, + jsonutils.loads(rv.data.decode('utf-8'))) + + mock_remove.assert_called_with(consts.SYSTEMD_DIR + + '/haproxy-123.service') + + # service is stopped + upstart script + vrrp + mock_exists.side_effect = [True, True, False, True, True] + if distro == consts.UBUNTU: + rv = self.ubuntu_app.delete('/' + api_server.VERSION + + '/listeners/123') + elif distro == consts.CENTOS: + rv = self.centos_app.delete('/' + api_server.VERSION + + '/listeners/123') + self.assertEqual(200, rv.status_code) + self.assertEqual({'message': 'OK'}, + jsonutils.loads(rv.data.decode('utf-8'))) + + mock_remove.assert_called_with(consts.SYSTEMD_DIR + + '/haproxy-123.service') + + # service is running + upstart script + no vrrp + mock_exists.side_effect = [True, True, True, True, False, True] + mock_pid.return_value = '456' + if distro == consts.UBUNTU: + rv = self.ubuntu_app.delete('/' + api_server.VERSION + + '/listeners/123') + elif distro == consts.CENTOS: + rv = self.centos_app.delete('/' + api_server.VERSION + + '/listeners/123') + self.assertEqual(200, rv.status_code) + self.assertEqual({'message': 'OK'}, + jsonutils.loads(rv.data.decode('utf-8'))) + mock_pid.assert_called_once_with('123') + mock_check_output.assert_any_call( + ['systemctl', 'stop', 'haproxy-123.service'], + stderr=subprocess.STDOUT, encoding='utf-8') + + mock_check_output.assert_any_call( + ['systemctl', 'disable', 'haproxy-123.service'], + stderr=subprocess.STDOUT, encoding='utf-8') + + # service is running + upstart script + vrrp + mock_exists.side_effect = [True, True, True, True, True, True] + mock_pid.return_value = '456' + if distro == consts.UBUNTU: + rv = self.ubuntu_app.delete('/' + api_server.VERSION + + '/listeners/123') + elif distro == consts.CENTOS: + rv = self.centos_app.delete('/' + api_server.VERSION + + '/listeners/123') + self.assertEqual(200, rv.status_code) + self.assertEqual({'message': 'OK'}, + jsonutils.loads(rv.data.decode('utf-8'))) + mock_pid.assert_called_with('123') + mock_check_output.assert_any_call( + ['systemctl', 'stop', 'haproxy-123.service'], + stderr=subprocess.STDOUT, encoding='utf-8') + + mock_check_output.assert_any_call( + ['systemctl', 'disable', 'haproxy-123.service'], + stderr=subprocess.STDOUT, encoding='utf-8') + + # service is running + stopping fails + mock_exists.side_effect = [True, True, True, True] + mock_check_output.side_effect = subprocess.CalledProcessError( + 7, 'test', RANDOM_ERROR) + if distro == consts.UBUNTU: + rv = self.ubuntu_app.delete('/' + api_server.VERSION + + '/listeners/123') + elif distro == consts.CENTOS: + rv = self.centos_app.delete('/' + api_server.VERSION + + '/listeners/123') + self.assertEqual(500, rv.status_code) + self.assertEqual( + {'details': 'random error', 'message': 'Error stopping haproxy'}, + jsonutils.loads(rv.data.decode('utf-8'))) + # that's the last call before exception + mock_exists.assert_called_with('/proc/456') + + def test_ubuntu_get_haproxy(self): + self._test_get_haproxy(consts.UBUNTU) + + def test_centos_get_haproxy(self): + self._test_get_haproxy(consts.CENTOS) + + @mock.patch('os.listdir') + @mock.patch('os.path.exists') + def _test_get_haproxy(self, distro, mock_exists, mock_listdir): + + self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) + + CONTENT = "bibble\nbibble" + mock_exists.side_effect = [False] + if distro == consts.UBUNTU: + rv = self.ubuntu_app.get('/' + api_server.VERSION + + '/loadbalancer/123/haproxy') + elif distro == consts.CENTOS: + rv = self.centos_app.get('/' + api_server.VERSION + + '/loadbalancer/123/haproxy') + self.assertEqual(404, rv.status_code) + + mock_exists.side_effect = [True, True] + + path = util.config_path('123') + self.useFixture(test_utils.OpenFixture(path, CONTENT)) + + mock_listdir.return_value = ['123'] + if distro == consts.UBUNTU: + rv = self.ubuntu_app.get('/' + api_server.VERSION + + '/loadbalancer/123/haproxy') + elif distro == consts.CENTOS: + rv = self.centos_app.get('/' + api_server.VERSION + + '/loadbalancer/123/haproxy') + self.assertEqual(200, rv.status_code) + self.assertEqual(octavia_utils.b(CONTENT), rv.data) + self.assertEqual('text/plain; charset=utf-8', + rv.headers['Content-Type'].lower()) + + def test_ubuntu_get_all_listeners(self): + self._test_get_all_listeners(consts.UBUNTU) + + def test_get_all_listeners(self): + self._test_get_all_listeners(consts.CENTOS) + + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'get_loadbalancers') + @mock.patch('octavia.amphorae.backends.agent.api_server.loadbalancer.' + 'Loadbalancer._check_haproxy_status') + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'parse_haproxy_file') + def _test_get_all_listeners(self, distro, mock_parse, mock_status, + mock_lbs): + self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) + + # no listeners + mock_lbs.side_effect = [[]] + if distro == consts.UBUNTU: + rv = self.ubuntu_app.get('/' + api_server.VERSION + '/listeners') + elif distro == consts.CENTOS: + rv = self.centos_app.get('/' + api_server.VERSION + '/listeners') + + self.assertEqual(200, rv.status_code) + self.assertFalse(jsonutils.loads(rv.data.decode('utf-8'))) + + # one listener ACTIVE + mock_lbs.side_effect = [['123']] + mock_parse.side_effect = [['fake_socket', {'123': {'mode': 'test'}}]] + mock_status.side_effect = [consts.ACTIVE] + if distro == consts.UBUNTU: + rv = self.ubuntu_app.get('/' + api_server.VERSION + '/listeners') + elif distro == consts.CENTOS: + rv = self.centos_app.get('/' + api_server.VERSION + '/listeners') + + self.assertEqual(200, rv.status_code) + self.assertEqual( + [{'status': consts.ACTIVE, 'type': 'test', 'uuid': '123'}], + jsonutils.loads(rv.data.decode('utf-8'))) + + # two listeners, two modes + mock_lbs.side_effect = [['123', '456']] + mock_parse.side_effect = [['fake_socket', {'123': {'mode': 'test'}}], + ['fake_socket', {'456': {'mode': 'http'}}]] + mock_status.return_value = consts.ACTIVE + if distro == consts.UBUNTU: + rv = self.ubuntu_app.get('/' + api_server.VERSION + '/listeners') + elif distro == consts.CENTOS: + rv = self.centos_app.get('/' + api_server.VERSION + '/listeners') + + self.assertEqual(200, rv.status_code) + self.assertEqual( + [{'status': consts.ACTIVE, 'type': 'test', 'uuid': '123'}, + {'status': consts.ACTIVE, 'type': 'http', 'uuid': '456'}], + jsonutils.loads(rv.data.decode('utf-8'))) + + def test_ubuntu_delete_cert(self): + self._test_delete_cert(consts.UBUNTU) + + def test_centos_delete_cert(self): + self._test_delete_cert(consts.CENTOS) + + @mock.patch('os.path.exists') + @mock.patch('os.remove') + def _test_delete_cert(self, distro, mock_remove, mock_exists): + self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) + mock_exists.side_effect = [False] + if distro == consts.UBUNTU: + rv = self.ubuntu_app.delete( + '/' + api_server.VERSION + + '/loadbalancer/123/certificates/test.pem') + elif distro == consts.CENTOS: + rv = self.centos_app.delete( + '/' + api_server.VERSION + + '/loadbalancer/123/certificates/test.pem') + self.assertEqual(200, rv.status_code) + self.assertEqual(OK, jsonutils.loads(rv.data.decode('utf-8'))) + mock_exists.assert_called_once_with( + '/var/lib/octavia/certs/123/test.pem') + + # wrong file name + mock_exists.side_effect = [True] + if distro == consts.UBUNTU: + rv = self.ubuntu_app.delete( + '/' + api_server.VERSION + + '/loadbalancer/123/certificates/test.bla') + elif distro == consts.CENTOS: + rv = self.centos_app.delete( + '/' + api_server.VERSION + + '/loadbalancer/123/certificates/test.bla') + self.assertEqual(400, rv.status_code) + + mock_exists.side_effect = [True] + if distro == consts.UBUNTU: + rv = self.ubuntu_app.delete( + '/' + api_server.VERSION + + '/loadbalancer/123/certificates/test.pem') + elif distro == consts.CENTOS: + rv = self.centos_app.delete( + '/' + api_server.VERSION + + '/loadbalancer/123/certificates/test.pem') + self.assertEqual(200, rv.status_code) + self.assertEqual(OK, jsonutils.loads(rv.data.decode('utf-8'))) + mock_remove.assert_called_once_with( + '/var/lib/octavia/certs/123/test.pem') + + def test_ubuntu_get_certificate_md5(self): + self._test_get_certificate_md5(consts.UBUNTU) + + def test_centos_get_certificate_md5(self): + self._test_get_certificate_md5(consts.CENTOS) + + @mock.patch('os.path.exists') + def _test_get_certificate_md5(self, distro, mock_exists): + + self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) + CONTENT = "TestTest" + + mock_exists.side_effect = [False] + + if distro == consts.UBUNTU: + rv = self.ubuntu_app.get('/' + api_server.VERSION + + '/loadbalancer/123/certificates/test.pem') + elif distro == consts.CENTOS: + rv = self.centos_app.get('/' + api_server.VERSION + + '/loadbalancer/123/certificates/test.pem') + self.assertEqual(404, rv.status_code) + self.assertEqual(dict( + details='No certificate with filename: test.pem', + message='Certificate Not Found'), + jsonutils.loads(rv.data.decode('utf-8'))) + mock_exists.assert_called_with('/var/lib/octavia/certs/123/test.pem') + + # wrong file name + mock_exists.side_effect = [True] + if distro == consts.UBUNTU: + rv = self.ubuntu_app.put('/' + api_server.VERSION + + '/loadbalancer/123/certificates/test.bla', + data='TestTest') + elif distro == consts.CENTOS: + rv = self.centos_app.put('/' + api_server.VERSION + + '/loadbalancer/123/certificates/test.bla', + data='TestTest') + self.assertEqual(400, rv.status_code) + + mock_exists.return_value = True + mock_exists.side_effect = None + if distro == consts.UBUNTU: + path = self.ubuntu_test_server._loadbalancer._cert_file_path( + '123', 'test.pem') + elif distro == consts.CENTOS: + path = self.centos_test_server._loadbalancer._cert_file_path( + '123', 'test.pem') + self.useFixture(test_utils.OpenFixture(path, CONTENT)) + if distro == consts.UBUNTU: + rv = self.ubuntu_app.get('/' + api_server.VERSION + + '/loadbalancer/123/certificates/test.pem') + elif distro == consts.CENTOS: + rv = self.centos_app.get('/' + api_server.VERSION + + '/loadbalancer/123/certificates/test.pem') + self.assertEqual(200, rv.status_code) + self.assertEqual( + dict(md5sum=hashlib.md5(octavia_utils.b(CONTENT), + usedforsecurity=False).hexdigest()), + jsonutils.loads(rv.data.decode('utf-8'))) + + def test_ubuntu_upload_certificate_md5(self): + self._test_upload_certificate_md5(consts.UBUNTU) + + def test_centos_upload_certificate_md5(self): + self._test_upload_certificate_md5(consts.CENTOS) + + @mock.patch('os.path.exists') + @mock.patch('os.makedirs') + def _test_upload_certificate_md5(self, distro, mock_makedir, mock_exists): + + self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) + # wrong file name + if distro == consts.UBUNTU: + rv = self.ubuntu_app.put('/' + api_server.VERSION + + '/loadbalancer/123/certificates/test.bla', + data='TestTest') + elif distro == consts.CENTOS: + rv = self.centos_app.put('/' + api_server.VERSION + + '/loadbalancer/123/certificates/test.bla', + data='TestTest') + self.assertEqual(400, rv.status_code) + + mock_exists.return_value = True + if distro == consts.UBUNTU: + path = self.ubuntu_test_server._loadbalancer._cert_file_path( + '123', 'test.pem') + elif distro == consts.CENTOS: + path = self.centos_test_server._loadbalancer._cert_file_path( + '123', 'test.pem') + + m = self.useFixture(test_utils.OpenFixture(path)).mock_open + with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m): + + if distro == consts.UBUNTU: + rv = self.ubuntu_app.put('/' + api_server.VERSION + + '/loadbalancer/123/certificates/' + 'test.pem', data='TestTest') + elif distro == consts.CENTOS: + rv = self.centos_app.put('/' + api_server.VERSION + + '/loadbalancer/123/certificates/' + 'test.pem', data='TestTest') + self.assertEqual(200, rv.status_code) + self.assertEqual(OK, jsonutils.loads(rv.data.decode('utf-8'))) + handle = m() + handle.write.assert_called_once_with(octavia_utils.b('TestTest')) + + mock_exists.return_value = False + m = self.useFixture(test_utils.OpenFixture(path)).mock_open + + with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m): + if distro == consts.UBUNTU: + rv = self.ubuntu_app.put('/' + api_server.VERSION + + '/loadbalancer/123/certificates/' + 'test.pem', data='TestTest') + elif distro == consts.CENTOS: + rv = self.centos_app.put('/' + api_server.VERSION + + '/loadbalancer/123/certificates/' + 'test.pem', data='TestTest') + self.assertEqual(200, rv.status_code) + self.assertEqual(OK, jsonutils.loads(rv.data.decode('utf-8'))) + handle = m() + handle.write.assert_called_once_with(octavia_utils.b('TestTest')) + mock_makedir.assert_called_once_with('/var/lib/octavia/certs/123') + + def test_ubuntu_upload_server_certificate(self): + self._test_upload_server_certificate(consts.UBUNTU) + + def test_centos_upload_server_certificate(self): + self._test_upload_server_certificate(consts.CENTOS) + + def _test_upload_server_certificate(self, distro): + certificate_update.BUFFER = 5 # test the while loop + path = '/etc/octavia/certs/server.pem' + m = self.useFixture(test_utils.OpenFixture(path)).mock_open + with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m): + if distro == consts.UBUNTU: + rv = self.ubuntu_app.put('/' + api_server.VERSION + + '/certificate', data='TestTest') + elif distro == consts.CENTOS: + rv = self.centos_app.put('/' + api_server.VERSION + + '/certificate', data='TestTest') + self.assertEqual(202, rv.status_code) + self.assertEqual(OK, jsonutils.loads(rv.data.decode('utf-8'))) + handle = m() + handle.write.assert_any_call(octavia_utils.b('TestT')) + handle.write.assert_any_call(octavia_utils.b('est')) + + def _check_centos_files(self, handle): + handle.write.assert_any_call( + '\n# Generated by Octavia agent\n' + '#!/bin/bash\n' + 'if [[ "$1" != "lo" ]]\n' + ' then\n' + ' /usr/local/bin/lvs-masquerade.sh add ipv4 $1\n' + ' /usr/local/bin/lvs-masquerade.sh add ipv6 $1\n' + 'fi') + handle.write.assert_any_call( + '\n# Generated by Octavia agent\n' + '#!/bin/bash\n' + 'if [[ "$1" != "lo" ]]\n' + ' then\n' + ' /usr/local/bin/lvs-masquerade.sh delete ipv4 $1\n' + ' /usr/local/bin/lvs-masquerade.sh delete ipv6 $1\n' + 'fi') + + def test_ubuntu_plug_network(self): + self._test_plug_network(consts.UBUNTU) + + def test_centos_plug_network(self): + self._test_plug_network(consts.CENTOS) + + @mock.patch('os.chmod') + @mock.patch('pyroute2.IPRoute', create=True) + @mock.patch('pyroute2.NetNS', create=True) + @mock.patch('subprocess.check_output') + @mock.patch('octavia.amphorae.backends.agent.api_server.' + 'plug.Plug._netns_interface_exists') + @mock.patch('octavia.amphorae.backends.agent.api_server.' + 'plug.Plug._netns_interface_by_mac') + @mock.patch('os.path.isfile') + def _test_plug_network(self, distro, mock_isfile, mock_int_by_mac, + mock_int_exists, mock_check_output, mock_netns, + mock_pyroute2, mock_os_chmod): + mock_ipr = mock.MagicMock() + mock_ipr_instance = mock.MagicMock() + mock_ipr_instance.link_lookup.side_effect = [ + [], [], [33], [33], [33], [33], [33], [33], [33], [33]] + mock_ipr_instance.get_links.return_value = ({ + 'attrs': [('IFLA_IFNAME', FAKE_INTERFACE)]},) + mock_ipr.__enter__.return_value = mock_ipr_instance + mock_pyroute2.return_value = mock_ipr + + self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) + port_info = {'mac_address': '123'} + test_int_num = random.randint(0, 9999) + + mock_int_exists.return_value = False + netns_handle = mock_netns.return_value.__enter__.return_value + netns_handle.get_links.return_value = [ + {'attrs': [['IFLA_IFNAME', f'eth{idx}']]} + for idx in range(test_int_num)] + mock_isfile.return_value = True + + mock_check_output.return_value = "1\n2\n3\n" + + test_int_num = str(test_int_num) + + # No interface at all + file_name = '/sys/bus/pci/rescan' + m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open + with mock.patch('os.open') as mock_open, mock.patch.object( + os, 'fdopen', m) as mock_fdopen: + mock_open.return_value = 123 + if distro == consts.UBUNTU: + rv = self.ubuntu_app.post('/' + api_server.VERSION + + "/plug/network", + content_type='application/json', + data=jsonutils.dumps(port_info)) + elif distro == consts.CENTOS: + rv = self.centos_app.post('/' + api_server.VERSION + + "/plug/network", + content_type='application/json', + data=jsonutils.dumps(port_info)) + mock_open.assert_called_with(file_name, os.O_WRONLY) + mock_fdopen.assert_called_with(123, 'w') + m().write.assert_called_once_with('1') + self.assertEqual(404, rv.status_code) + self.assertEqual(dict(details="No suitable network interface found"), + jsonutils.loads(rv.data.decode('utf-8'))) + + # No interface down + m().reset_mock() + with mock.patch('os.open') as mock_open, mock.patch.object( + os, 'fdopen', m) as mock_fdopen, mock.patch( + 'octavia.amphorae.backends.utils.interface_file.' + 'InterfaceFile.dump') as mock_dump: + mock_open.return_value = 123 + if distro == consts.UBUNTU: + rv = self.ubuntu_app.post('/' + api_server.VERSION + + "/plug/network", + content_type='application/json', + data=jsonutils.dumps(port_info)) + elif distro == consts.CENTOS: + rv = self.centos_app.post('/' + api_server.VERSION + + "/plug/network", + content_type='application/json', + data=jsonutils.dumps(port_info)) + mock_open.assert_called_with(file_name, os.O_WRONLY) + mock_fdopen.assert_called_with(123, 'w') + m().write.assert_called_once_with('1') + self.assertEqual(404, rv.status_code) + self.assertEqual(dict(details="No suitable network interface found"), + jsonutils.loads(rv.data.decode('utf-8'))) + + # One Interface down, Happy Path + mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + + file_name = f'/etc/octavia/interfaces/eth{test_int_num}.json' + flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC + + m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open + with mock.patch('os.open') as mock_open, mock.patch.object( + os, 'fdopen', m) as mock_fdopen, mock.patch( + 'octavia.amphorae.backends.utils.interface_file.' + 'InterfaceFile.dump') as mock_dump: + mock_open.return_value = 123 + if distro == consts.UBUNTU: + rv = self.ubuntu_app.post('/' + api_server.VERSION + + "/plug/network", + content_type='application/json', + data=jsonutils.dumps(port_info)) + elif distro == consts.CENTOS: + rv = self.centos_app.post('/' + api_server.VERSION + + "/plug/network", + content_type='application/json', + data=jsonutils.dumps(port_info)) + self.assertEqual(202, rv.status_code) + + mock_open.assert_any_call(file_name, flags, mode) + mock_fdopen.assert_any_call(123, 'w') + + plug_inf_file = '/var/lib/octavia/plugged_interfaces' + flags = os.O_RDWR | os.O_CREAT + mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + mock_open.assert_any_call(plug_inf_file, flags, mode) + mock_fdopen.assert_any_call(123, 'r+') + + expected_dict = { + consts.NAME: f"eth{test_int_num}", + consts.ADDRESSES: [ + { + consts.DHCP: True, + consts.IPV6AUTO: True + } + ], + consts.ROUTES: [ + ], + consts.RULES: [ + ], + consts.SCRIPTS: { + consts.IFACE_UP: [{ + consts.COMMAND: ( + "/usr/local/bin/lvs-masquerade.sh add ipv4 " + "eth{}".format(test_int_num)) + }, { + consts.COMMAND: ( + "/usr/local/bin/lvs-masquerade.sh add ipv6 " + "eth{}".format(test_int_num)) + }], + consts.IFACE_DOWN: [{ + consts.COMMAND: ( + "/usr/local/bin/lvs-masquerade.sh delete ipv4 " + "eth{}".format(test_int_num)) + }, { + consts.COMMAND: ( + "/usr/local/bin/lvs-masquerade.sh delete ipv6 " + "eth{}".format(test_int_num)) + }] + } + } + + mock_dump.assert_called_once() + args = mock_dump.mock_calls[0][1] + test_utils.assert_interface_files_equal( + self, args[0], expected_dict) + + mock_check_output.assert_called_with( + ['ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE, + 'amphora-interface', 'up', 'eth' + test_int_num], + stderr=subprocess.STDOUT, encoding='utf-8') + + # fixed IPs happy path + port_info = {'mac_address': '123', 'mtu': 1450, 'fixed_ips': [ + {'ip_address': '10.0.0.5', 'subnet_cidr': '10.0.0.0/24'}]} + + mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + + file_name = f'/etc/octavia/interfaces/eth{test_int_num}.json' + flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC + + m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open + with mock.patch('os.open') as mock_open, mock.patch.object( + os, 'fdopen', m) as mock_fdopen, mock.patch( + 'octavia.amphorae.backends.utils.interface_file.' + 'InterfaceFile.dump') as mock_dump: + mock_open.return_value = 123 + if distro == consts.UBUNTU: + rv = self.ubuntu_app.post('/' + api_server.VERSION + + "/plug/network", + content_type='application/json', + data=jsonutils.dumps(port_info)) + elif distro == consts.CENTOS: + rv = self.centos_app.post('/' + api_server.VERSION + + "/plug/network", + content_type='application/json', + data=jsonutils.dumps(port_info)) + self.assertEqual(202, rv.status_code) + + mock_open.assert_any_call(file_name, flags, mode) + mock_fdopen.assert_any_call(123, 'w') + + plug_inf_file = '/var/lib/octavia/plugged_interfaces' + flags = os.O_RDWR | os.O_CREAT + mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + mock_open.assert_any_call(plug_inf_file, flags, mode) + mock_fdopen.assert_any_call(123, 'r+') + + expected_dict = { + consts.NAME: f"eth{test_int_num}", + consts.MTU: 1450, + consts.ADDRESSES: [ + {consts.ADDRESS: '10.0.0.5', consts.PREFIXLEN: 24} + ], + consts.ROUTES: [], + consts.RULES: [], + consts.SCRIPTS: { + consts.IFACE_UP: [ + {consts.COMMAND: + '/usr/local/bin/lvs-masquerade.sh add ipv4 ' + 'eth{}'.format(test_int_num)}], + consts.IFACE_DOWN: [ + {consts.COMMAND: + '/usr/local/bin/lvs-masquerade.sh delete ipv4 ' + 'eth{}'.format(test_int_num)}] + } + } + + mock_dump.assert_called_once() + args = mock_dump.mock_calls[0][1] + test_utils.assert_interface_files_equal( + self, args[0], expected_dict) + + mock_check_output.assert_called_with( + ['ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE, + 'amphora-interface', 'up', 'eth' + test_int_num], + stderr=subprocess.STDOUT, encoding='utf-8') + + # fixed IPs happy path IPv6 + port_info = {'mac_address': '123', 'mtu': 1450, 'fixed_ips': [ + {'ip_address': '2001:db8::2', 'subnet_cidr': '2001:db8::/32'}]} + + mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + + file_name = f'/etc/octavia/interfaces/eth{test_int_num}.json' + flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC + + m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open + with mock.patch('os.open') as mock_open, mock.patch.object( + os, 'fdopen', m) as mock_fdopen, mock.patch( + 'octavia.amphorae.backends.utils.interface_file.' + 'InterfaceFile.dump') as mock_dump: + mock_open.return_value = 123 + if distro == consts.UBUNTU: + rv = self.ubuntu_app.post('/' + api_server.VERSION + + "/plug/network", + content_type='application/json', + data=jsonutils.dumps(port_info)) + elif distro == consts.CENTOS: + rv = self.centos_app.post('/' + api_server.VERSION + + "/plug/network", + content_type='application/json', + data=jsonutils.dumps(port_info)) + self.assertEqual(202, rv.status_code) + + mock_open.assert_any_call(file_name, flags, mode) + mock_fdopen.assert_any_call(123, 'w') + + plug_inf_file = '/var/lib/octavia/plugged_interfaces' + flags = os.O_RDWR | os.O_CREAT + mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + mock_open.assert_any_call(plug_inf_file, flags, mode) + mock_fdopen.assert_any_call(123, 'r+') + + expected_dict = { + consts.NAME: f"eth{test_int_num}", + consts.MTU: 1450, + consts.ADDRESSES: [ + {consts.ADDRESS: '2001:0db8::2', + consts.PREFIXLEN: 32}], + consts.ROUTES: [], + consts.RULES: [], + consts.SCRIPTS: { + consts.IFACE_UP: [ + {consts.COMMAND: + '/usr/local/bin/lvs-masquerade.sh add ipv6 ' + 'eth{}'.format(test_int_num)}], + consts.IFACE_DOWN: [ + {consts.COMMAND: + '/usr/local/bin/lvs-masquerade.sh delete ipv6 ' + 'eth{}'.format(test_int_num)}] + } + } + + mock_dump.assert_called_once() + args = mock_dump.mock_calls[0][1] + test_utils.assert_interface_files_equal( + self, args[0], expected_dict) + + mock_check_output.assert_called_with( + ['ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE, + 'amphora-interface', 'up', 'eth' + test_int_num], + stderr=subprocess.STDOUT, encoding='utf-8') + + # fixed IPs, bogus IP + port_info = {'mac_address': '123', 'fixed_ips': [ + {'ip_address': '10005', 'subnet_cidr': '10.0.0.0/24'}]} + + flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC + mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + file_name = f'/etc/octavia/interfaces/eth{test_int_num}.json' + m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open + with mock.patch('os.open') as mock_open, mock.patch.object( + os, 'fdopen', m) as mock_fdopen: + mock_open.return_value = 123 + if distro == consts.UBUNTU: + rv = self.ubuntu_app.post('/' + api_server.VERSION + + "/plug/network", + content_type='application/json', + data=jsonutils.dumps(port_info)) + elif distro == consts.CENTOS: + rv = self.centos_app.post('/' + api_server.VERSION + + "/plug/network", + content_type='application/json', + data=jsonutils.dumps(port_info)) + self.assertEqual(400, rv.status_code) + + # same as above but ifup fails + port_info = {'mac_address': '123', 'fixed_ips': [ + {'ip_address': '10.0.0.5', 'subnet_cidr': '10.0.0.0/24'}]} + mock_check_output.side_effect = [ + subprocess.CalledProcessError(7, 'test', RANDOM_ERROR), + subprocess.CalledProcessError(7, 'test', RANDOM_ERROR) + ] + + m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open + with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m): + if distro == consts.UBUNTU: + rv = self.ubuntu_app.post('/' + api_server.VERSION + + "/plug/network", + content_type='application/json', + data=jsonutils.dumps(port_info)) + elif distro == consts.CENTOS: + rv = self.centos_app.post('/' + api_server.VERSION + + "/plug/network", + content_type='application/json', + data=jsonutils.dumps(port_info)) + self.assertEqual(500, rv.status_code) + self.assertEqual( + {'details': RANDOM_ERROR, + 'message': 'Error plugging network'}, + jsonutils.loads(rv.data.decode('utf-8'))) + + # Bad port_info tests + port_info = 'Bad data' + if distro == consts.UBUNTU: + rv = self.ubuntu_app.post('/' + api_server.VERSION + + "/plug/network", + content_type='application/json', + data=jsonutils.dumps(port_info)) + elif distro == consts.CENTOS: + rv = self.centos_app.post('/' + api_server.VERSION + + "/plug/network", + content_type='application/json', + data=jsonutils.dumps(port_info)) + self.assertEqual(400, rv.status_code) + + port_info = {'fixed_ips': [{'ip_address': '10.0.0.5', + 'subnet_cidr': '10.0.0.0/24'}]} + if distro == consts.UBUNTU: + rv = self.ubuntu_app.post('/' + api_server.VERSION + + "/plug/network", + content_type='application/json', + data=jsonutils.dumps(port_info)) + elif distro == consts.CENTOS: + rv = self.centos_app.post('/' + api_server.VERSION + + "/plug/network", + content_type='application/json', + data=jsonutils.dumps(port_info)) + self.assertEqual(400, rv.status_code) + + def test_ubuntu_plug_network_host_routes(self): + self._test_plug_network_host_routes(consts.UBUNTU) + + def test_centos_plug_network_host_routes(self): + self._test_plug_network_host_routes(consts.CENTOS) + + @mock.patch('os.chmod') + @mock.patch('pyroute2.IPRoute', create=True) + @mock.patch('pyroute2.NetNS', create=True) + @mock.patch('subprocess.check_output') + def _test_plug_network_host_routes(self, distro, mock_check_output, + mock_netns, mock_pyroute2, + mock_os_chmod): + mock_ipr = mock.MagicMock() + mock_ipr_instance = mock.MagicMock() + mock_ipr_instance.link_lookup.return_value = [33] + mock_ipr_instance.get_links.return_value = ({ + 'attrs': [('IFLA_IFNAME', FAKE_INTERFACE)]},) + mock_ipr.__enter__.return_value = mock_ipr_instance + mock_pyroute2.return_value = mock_ipr + + self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) + + SUBNET_CIDR = '192.0.2.0/24' + PREFIXLEN = 24 + IP = '192.0.1.5' + MAC = '123' + DEST1 = '198.51.100.0/24' + DEST2 = '203.0.113.1/32' + NEXTHOP = '192.0.2.1' + + netns_handle = mock_netns.return_value.__enter__.return_value + netns_handle.get_links.return_value = [{ + 'attrs': [['IFLA_IFNAME', 'eth2']]}] + + port_info = {'mac_address': MAC, 'mtu': 1450, 'fixed_ips': [ + {'ip_address': IP, 'subnet_cidr': SUBNET_CIDR, + 'host_routes': [{'destination': DEST1, 'nexthop': NEXTHOP}, + {'destination': DEST2, 'nexthop': NEXTHOP}]}]} + + flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC + mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + file_name = '/etc/octavia/interfaces/eth3.json' + + m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open + with mock.patch('os.open') as mock_open, mock.patch.object( + os, 'fdopen', m) as mock_fdopen, mock.patch( + 'octavia.amphorae.backends.utils.interface_file.' + 'InterfaceFile.dump') as mock_dump: + mock_open.return_value = 123 + if distro == consts.UBUNTU: + rv = self.ubuntu_app.post('/' + api_server.VERSION + + "/plug/network", + content_type='application/json', + data=jsonutils.dumps(port_info)) + elif distro == consts.CENTOS: + rv = self.centos_app.post('/' + api_server.VERSION + + "/plug/network", + content_type='application/json', + data=jsonutils.dumps(port_info)) + self.assertEqual(202, rv.status_code) + + mock_open.assert_any_call(file_name, flags, mode) + mock_fdopen.assert_any_call(123, 'w') + + plug_inf_file = '/var/lib/octavia/plugged_interfaces' + flags = os.O_RDWR | os.O_CREAT + mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + mock_open.assert_any_call(plug_inf_file, flags, mode) + mock_fdopen.assert_any_call(123, 'r+') + + expected_dict = { + consts.NAME: 'eth3', + consts.MTU: 1450, + consts.ADDRESSES: [ + { + consts.ADDRESS: IP, + consts.PREFIXLEN: PREFIXLEN + } + ], + consts.ROUTES: [ + { + consts.DST: DEST1, + consts.GATEWAY: NEXTHOP + }, { + consts.DST: DEST2, + consts.GATEWAY: NEXTHOP + } + ], + consts.RULES: [ + ], + consts.SCRIPTS: { + consts.IFACE_UP: [{ + consts.COMMAND: ( + "/usr/local/bin/lvs-masquerade.sh add ipv4 eth3") + }], + consts.IFACE_DOWN: [{ + consts.COMMAND: ( + "/usr/local/bin/lvs-masquerade.sh delete ipv4 " + "eth3") + }] + } + } + + mock_dump.assert_called_once() + args = mock_dump.mock_calls[0][1] + test_utils.assert_interface_files_equal( + self, args[0], expected_dict) + + mock_check_output.assert_called_with( + ['ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE, + 'amphora-interface', 'up', 'eth3'], stderr=subprocess.STDOUT, + encoding='utf-8') + + def test_ubuntu_plug_VIP4(self): + self._test_plug_VIP4(consts.UBUNTU) + + def test_centos_plug_VIP4(self): + self._test_plug_VIP4(consts.CENTOS) + + @mock.patch('os.chmod') + @mock.patch('shutil.copy2') + @mock.patch('pyroute2.NSPopen', create=True) + @mock.patch('octavia.amphorae.backends.agent.api_server.' + 'plug.Plug._netns_interface_exists') + @mock.patch('pyroute2.IPRoute', create=True) + @mock.patch('pyroute2.NetNS', create=True) + @mock.patch('subprocess.check_output') + @mock.patch('shutil.copytree') + @mock.patch('os.makedirs') + @mock.patch('os.path.isfile') + def _test_plug_VIP4(self, distro, mock_isfile, mock_makedirs, + mock_copytree, mock_check_output, mock_netns, + mock_pyroute2, mock_int_exists, + mock_nspopen, mock_copy2, mock_os_chmod): + mock_ipr = mock.MagicMock() + mock_ipr_instance = mock.MagicMock() + mock_ipr_instance.link_lookup.side_effect = [[], [], [33], [33], [33], + [33], [33], [33]] + mock_ipr_instance.get_links.return_value = ({ + 'attrs': [('IFLA_IFNAME', FAKE_INTERFACE)]},) + mock_ipr.__enter__.return_value = mock_ipr_instance + mock_pyroute2.return_value = mock_ipr + + mock_isfile.return_value = True + + self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) + subnet_info = { + 'subnet_cidr': '203.0.113.0/24', + 'gateway': '203.0.113.1', + 'mac_address': '123' + } + + # malformed ip + if distro == consts.UBUNTU: + rv = self.ubuntu_app.post('/' + api_server.VERSION + + '/plug/vip/error', + data=jsonutils.dumps(subnet_info), + content_type='application/json') + elif distro == consts.CENTOS: + rv = self.centos_app.post('/' + api_server.VERSION + + '/plug/vip/error', + data=jsonutils.dumps(subnet_info), + content_type='application/json') + self.assertEqual(400, rv.status_code) + + # No subnet info + if distro == consts.UBUNTU: + rv = self.ubuntu_app.post('/' + api_server.VERSION + + '/plug/vip/error') + elif distro == consts.CENTOS: + rv = self.centos_app.post('/' + api_server.VERSION + + '/plug/vip/error') + + self.assertEqual(400, rv.status_code) + + # Interface already plugged + mock_int_exists.return_value = True + if distro == consts.UBUNTU: + rv = self.ubuntu_app.post('/' + api_server.VERSION + + "/plug/vip/203.0.113.2", + content_type='application/json', + data=jsonutils.dumps(subnet_info)) + elif distro == consts.CENTOS: + rv = self.centos_app.post('/' + api_server.VERSION + + "/plug/vip/203.0.113.2", + content_type='application/json', + data=jsonutils.dumps(subnet_info)) + self.assertEqual(409, rv.status_code) + self.assertEqual(dict(message="Interface already exists"), + jsonutils.loads(rv.data.decode('utf-8'))) + mock_int_exists.return_value = False + + # No interface at all + file_name = '/sys/bus/pci/rescan' + m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open + with mock.patch('os.open') as mock_open, mock.patch.object( + os, 'fdopen', m) as mock_fdopen: + mock_open.return_value = 123 + + if distro == consts.UBUNTU: + rv = self.ubuntu_app.post('/' + api_server.VERSION + + "/plug/vip/203.0.113.2", + content_type='application/json', + data=jsonutils.dumps(subnet_info)) + elif distro == consts.CENTOS: + rv = self.centos_app.post('/' + api_server.VERSION + + "/plug/vip/203.0.113.2", + content_type='application/json', + data=jsonutils.dumps(subnet_info)) + mock_open.assert_called_with(file_name, os.O_WRONLY) + mock_fdopen.assert_called_with(123, 'w') + m().write.assert_called_once_with('1') + self.assertEqual(404, rv.status_code) + self.assertEqual(dict(details="No suitable network interface found"), + jsonutils.loads(rv.data.decode('utf-8'))) + + # Two interfaces down + m().reset_mock() + with mock.patch('os.open') as mock_open, mock.patch.object( + os, 'fdopen', m) as mock_fdopen: + mock_open.return_value = 123 + + if distro == consts.UBUNTU: + rv = self.ubuntu_app.post('/' + api_server.VERSION + + "/plug/vip/203.0.113.2", + content_type='application/json', + data=jsonutils.dumps(subnet_info)) + elif distro == consts.CENTOS: + rv = self.centos_app.post('/' + api_server.VERSION + + "/plug/vip/203.0.113.2", + content_type='application/json', + data=jsonutils.dumps(subnet_info)) + mock_open.assert_called_with(file_name, os.O_WRONLY) + mock_fdopen.assert_called_with(123, 'w') + m().write.assert_called_once_with('1') + self.assertEqual(404, rv.status_code) + self.assertEqual(dict(details="No suitable network interface found"), + jsonutils.loads(rv.data.decode('utf-8'))) + + # Happy Path IPv4, with VRRP_IP and host route + full_subnet_info = { + 'subnet_cidr': '203.0.113.0/24', + 'gateway': '203.0.113.1', + 'mac_address': '123', + 'vrrp_ip': '203.0.113.4', + 'mtu': 1450, + 'host_routes': [{'destination': '203.0.114.0/24', + 'nexthop': '203.0.113.5'}, + {'destination': '203.0.115.1/32', + 'nexthop': '203.0.113.5'}] + } + + mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + + file_name = ('/etc/octavia/interfaces/{netns_int}.json'.format( + netns_int=consts.NETNS_PRIMARY_INTERFACE)) + flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC + + m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open + + with mock.patch('os.open') as mock_open, mock.patch.object( + os, 'fdopen', m) as mock_fdopen, mock.patch( + 'octavia.amphorae.backends.utils.interface_file.' + 'InterfaceFile.dump') as mock_dump: + mock_open.return_value = 123 + if distro == consts.UBUNTU: + rv = self.ubuntu_app.post('/' + api_server.VERSION + + "/plug/vip/203.0.113.2", + content_type='application/json', + data=jsonutils.dumps( + full_subnet_info)) + elif distro == consts.CENTOS: + rv = self.centos_app.post('/' + api_server.VERSION + + "/plug/vip/203.0.113.2", + content_type='application/json', + data=jsonutils.dumps( + full_subnet_info)) + self.assertEqual(202, rv.status_code) + mock_open.assert_any_call(file_name, flags, mode) + mock_fdopen.assert_any_call(123, 'w') + + plug_inf_file = '/var/lib/octavia/plugged_interfaces' + flags = os.O_RDWR | os.O_CREAT + mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + mock_open.assert_any_call(plug_inf_file, flags, mode) + mock_fdopen.assert_any_call(123, 'r+') + + expected_dict = { + consts.NAME: consts.NETNS_PRIMARY_INTERFACE, + consts.MTU: 1450, + consts.ADDRESSES: [ + { + consts.ADDRESS: "203.0.113.4", + consts.PREFIXLEN: 24 + }, { + consts.ADDRESS: "203.0.113.2", + consts.PREFIXLEN: 32 + } + ], + consts.ROUTES: [ + { + consts.DST: '0.0.0.0/0', + consts.GATEWAY: '203.0.113.1', + consts.FLAGS: [consts.ONLINK] + }, { + consts.DST: '0.0.0.0/0', + consts.GATEWAY: '203.0.113.1', + consts.TABLE: 1, + consts.FLAGS: [consts.ONLINK] + }, { + consts.DST: '203.0.113.0/24', + consts.SCOPE: 'link' + }, { + consts.DST: '203.0.113.0/24', + consts.PREFSRC: '203.0.113.2', + consts.SCOPE: 'link', + consts.TABLE: 1 + }, { + consts.DST: '203.0.114.0/24', + consts.GATEWAY: '203.0.113.5' + }, { + consts.DST: '203.0.115.1/32', + consts.GATEWAY: '203.0.113.5' + }, { + consts.DST: '203.0.114.0/24', + consts.GATEWAY: '203.0.113.5', + consts.TABLE: 1 + }, { + consts.DST: '203.0.115.1/32', + consts.GATEWAY: '203.0.113.5', + consts.TABLE: 1 + } + ], + consts.RULES: [ + { + consts.SRC: '203.0.113.2', + consts.SRC_LEN: 32, + consts.TABLE: 1 + } + ], + consts.SCRIPTS: { + consts.IFACE_UP: [{ + consts.COMMAND: ( + "/usr/local/bin/lvs-masquerade.sh add ipv4 " + "{}".format(consts.NETNS_PRIMARY_INTERFACE)) + }], + consts.IFACE_DOWN: [{ + consts.COMMAND: ( + "/usr/local/bin/lvs-masquerade.sh delete ipv4 " + "{}".format(consts.NETNS_PRIMARY_INTERFACE)) + }] + } + } + + mock_dump.assert_called_once() + args = mock_dump.mock_calls[0][1] + test_utils.assert_interface_files_equal( + self, args[0], expected_dict) + + mock_check_output.assert_called_with( + ['ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE, + 'amphora-interface', 'up', + consts.NETNS_PRIMARY_INTERFACE], stderr=subprocess.STDOUT, + encoding='utf-8') + + # One Interface down, Happy Path IPv4 + mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + + file_name = ('/etc/octavia/interfaces/{}.json'.format( + consts.NETNS_PRIMARY_INTERFACE)) + flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC + + m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open + + with mock.patch('os.open') as mock_open, mock.patch.object( + os, 'fdopen', m) as mock_fdopen, mock.patch( + 'octavia.amphorae.backends.utils.interface_file.' + 'InterfaceFile.dump') as mock_dump: + mock_open.return_value = 123 + if distro == consts.UBUNTU: + rv = self.ubuntu_app.post('/' + api_server.VERSION + + "/plug/vip/203.0.113.2", + content_type='application/json', + data=jsonutils.dumps(subnet_info)) + elif distro == consts.CENTOS: + rv = self.centos_app.post('/' + api_server.VERSION + + "/plug/vip/203.0.113.2", + content_type='application/json', + data=jsonutils.dumps(subnet_info)) + self.assertEqual(202, rv.status_code) + mock_open.assert_any_call(file_name, flags, mode) + mock_fdopen.assert_any_call(123, 'w') + + plug_inf_file = '/var/lib/octavia/plugged_interfaces' + flags = os.O_RDWR | os.O_CREAT + mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + mock_open.assert_any_call(plug_inf_file, flags, mode) + mock_fdopen.assert_any_call(123, 'r+') + + expected_dict = { + consts.NAME: consts.NETNS_PRIMARY_INTERFACE, + consts.ADDRESSES: [ + { + consts.DHCP: True + }, { + consts.ADDRESS: "203.0.113.2", + consts.PREFIXLEN: 32 + } + ], + consts.ROUTES: [ + { + consts.DST: '0.0.0.0/0', + consts.GATEWAY: '203.0.113.1', + consts.FLAGS: [consts.ONLINK] + }, { + consts.DST: '0.0.0.0/0', + consts.GATEWAY: '203.0.113.1', + consts.FLAGS: [consts.ONLINK], + consts.TABLE: 1 + }, { + consts.DST: '203.0.113.0/24', + consts.SCOPE: 'link' + }, { + consts.DST: '203.0.113.0/24', + consts.PREFSRC: '203.0.113.2', + consts.SCOPE: 'link', + consts.TABLE: 1 + } + ], + consts.RULES: [ + { + consts.SRC: '203.0.113.2', + consts.SRC_LEN: 32, + consts.TABLE: 1 + } + ], + consts.SCRIPTS: { + consts.IFACE_UP: [{ + consts.COMMAND: ( + "/usr/local/bin/lvs-masquerade.sh add ipv4 " + "{}".format(consts.NETNS_PRIMARY_INTERFACE)) + }], + consts.IFACE_DOWN: [{ + consts.COMMAND: ( + "/usr/local/bin/lvs-masquerade.sh delete ipv4 " + "{}".format(consts.NETNS_PRIMARY_INTERFACE)) + }] + } + } + + mock_dump.assert_called_once() + args = mock_dump.mock_calls[0][1] + test_utils.assert_interface_files_equal( + self, args[0], expected_dict) + + mock_check_output.assert_called_with( + ['ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE, + 'amphora-interface', 'up', + consts.NETNS_PRIMARY_INTERFACE], stderr=subprocess.STDOUT, + encoding='utf-8') + + mock_check_output.side_effect = [ + subprocess.CalledProcessError(7, 'test', RANDOM_ERROR), + subprocess.CalledProcessError(7, 'test', RANDOM_ERROR) + ] + + m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open + with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m): + if distro == consts.UBUNTU: + rv = self.ubuntu_app.post('/' + api_server.VERSION + + "/plug/vip/203.0.113.2", + content_type='application/json', + data=jsonutils.dumps(subnet_info)) + elif distro == consts.CENTOS: + rv = self.centos_app.post('/' + api_server.VERSION + + "/plug/vip/203.0.113.2", + content_type='application/json', + data=jsonutils.dumps(subnet_info)) + self.assertEqual(500, rv.status_code) + self.assertEqual( + {'details': RANDOM_ERROR, + 'message': 'Error plugging VIP'}, + jsonutils.loads(rv.data.decode('utf-8'))) + + def test_ubuntu_plug_VIP6(self): + self._test_plug_vip6(consts.UBUNTU) + + def test_centos_plug_VIP6(self): + self._test_plug_vip6(consts.CENTOS) + + @mock.patch('os.chmod') + @mock.patch('shutil.copy2') + @mock.patch('pyroute2.NSPopen', create=True) + @mock.patch('pyroute2.IPRoute', create=True) + @mock.patch('pyroute2.NetNS', create=True) + @mock.patch('subprocess.check_output') + @mock.patch('shutil.copytree') + @mock.patch('os.makedirs') + @mock.patch('os.path.isfile') + def _test_plug_vip6(self, distro, mock_isfile, mock_makedirs, + mock_copytree, mock_check_output, mock_netns, + mock_pyroute2, mock_nspopen, + mock_copy2, mock_os_chmod): + mock_ipr = mock.MagicMock() + mock_ipr_instance = mock.MagicMock() + mock_ipr_instance.link_lookup.side_effect = [[], [], [33], [33], [33], + [33], [33], [33]] + mock_ipr_instance.get_links.return_value = ({ + 'attrs': [('IFLA_IFNAME', FAKE_INTERFACE)]},) + mock_ipr.__enter__.return_value = mock_ipr_instance + mock_pyroute2.return_value = mock_ipr + + mock_isfile.return_value = True + + self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) + subnet_info = { + 'subnet_cidr': '2001:db8::/32', + 'gateway': '2001:db8::1', + 'mac_address': '123' + } + + # malformed ip + if distro == consts.UBUNTU: + rv = self.ubuntu_app.post('/' + api_server.VERSION + + '/plug/vip/error', + data=jsonutils.dumps( + subnet_info), + content_type='application/json') + elif distro == consts.CENTOS: + rv = self.centos_app.post('/' + api_server.VERSION + + '/plug/vip/error', + data=jsonutils.dumps( + subnet_info), + content_type='application/json') + self.assertEqual(400, rv.status_code) + + # No subnet info + if distro == consts.UBUNTU: + rv = self.ubuntu_app.post('/' + api_server.VERSION + + '/plug/vip/error', + data=jsonutils.dumps(subnet_info), + content_type='application/json') + elif distro == consts.CENTOS: + rv = self.centos_app.post('/' + api_server.VERSION + + '/plug/vip/error', + data=jsonutils.dumps(subnet_info), + content_type='application/json') + self.assertEqual(400, rv.status_code) + + # No interface at all + file_name = '/sys/bus/pci/rescan' + m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open + with mock.patch('os.open') as mock_open, mock.patch.object( + os, 'fdopen', m) as mock_fdopen: + mock_open.return_value = 123 + if distro == consts.UBUNTU: + rv = self.ubuntu_app.post('/' + api_server.VERSION + + "/plug/vip/2001:db8::2", + content_type='application/json', + data=jsonutils.dumps(subnet_info)) + elif distro == consts.CENTOS: + rv = self.centos_app.post('/' + api_server.VERSION + + "/plug/vip/2001:db8::2", + content_type='application/json', + data=jsonutils.dumps(subnet_info)) + mock_open.assert_called_with(file_name, os.O_WRONLY) + mock_fdopen.assert_called_with(123, 'w') + m().write.assert_called_once_with('1') + self.assertEqual(404, rv.status_code) + self.assertEqual(dict(details="No suitable network interface found"), + jsonutils.loads(rv.data.decode('utf-8'))) + + # Two interfaces down + m().reset_mock() + with mock.patch('os.open') as mock_open, mock.patch.object( + os, 'fdopen', m) as mock_fdopen: + mock_open.return_value = 123 + if distro == consts.UBUNTU: + rv = self.ubuntu_app.post('/' + api_server.VERSION + + "/plug/vip/2001:db8::2", + content_type='application/json', + data=jsonutils.dumps(subnet_info)) + elif distro == consts.CENTOS: + rv = self.centos_app.post('/' + api_server.VERSION + + "/plug/vip/2001:db8::2", + content_type='application/json', + data=jsonutils.dumps(subnet_info)) + mock_open.assert_called_with(file_name, os.O_WRONLY) + mock_fdopen.assert_called_with(123, 'w') + m().write.assert_called_once_with('1') + self.assertEqual(404, rv.status_code) + self.assertEqual(dict(details="No suitable network interface found"), + jsonutils.loads(rv.data.decode('utf-8'))) + + # Happy Path IPv6, with VRRP_IP and host route + full_subnet_info = { + 'subnet_cidr': '2001:db8::/32', + 'gateway': '2001:db8::1', + 'mac_address': '123', + 'vrrp_ip': '2001:db8::4', + 'mtu': 1450, + 'host_routes': [{'destination': '2001:db9::/32', + 'nexthop': '2001:db8::5'}, + {'destination': '2001:db9::1/128', + 'nexthop': '2001:db8::5'}] + } + + flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC + mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + + file_name = (f'/etc/octavia/interfaces/' + f'{consts.NETNS_PRIMARY_INTERFACE}.json') + m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open + + with mock.patch('os.open') as mock_open, mock.patch.object( + os, 'fdopen', m) as mock_fdopen, mock.patch( + 'octavia.amphorae.backends.utils.interface_file.' + 'InterfaceFile.dump') as mock_dump: + mock_open.return_value = 123 + if distro == consts.UBUNTU: + rv = self.ubuntu_app.post('/' + api_server.VERSION + + "/plug/vip/2001:db8::2", + content_type='application/json', + data=jsonutils.dumps( + full_subnet_info)) + elif distro == consts.CENTOS: + rv = self.centos_app.post('/' + api_server.VERSION + + "/plug/vip/2001:db8::2", + content_type='application/json', + data=jsonutils.dumps( + full_subnet_info)) + self.assertEqual(202, rv.status_code) + mock_open.assert_any_call(file_name, flags, mode) + mock_fdopen.assert_any_call(123, 'w') + + plug_inf_file = '/var/lib/octavia/plugged_interfaces' + flags = os.O_RDWR | os.O_CREAT + mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + mock_open.assert_any_call(plug_inf_file, flags, mode) + mock_fdopen.assert_any_call(123, 'r+') + expected_dict = { + consts.NAME: consts.NETNS_PRIMARY_INTERFACE, + consts.MTU: 1450, + consts.ADDRESSES: [ + { + consts.ADDRESS: '2001:db8::4', + consts.PREFIXLEN: 32 + }, { + consts.ADDRESS: '2001:0db8::2', + consts.PREFIXLEN: 128 + } + ], + consts.ROUTES: [ + { + consts.DST: '::/0', + consts.GATEWAY: '2001:db8::1', + consts.FLAGS: [consts.ONLINK] + }, { + consts.DST: '::/0', + consts.GATEWAY: '2001:db8::1', + consts.FLAGS: [consts.ONLINK], + consts.TABLE: 1 + }, { + consts.DST: '2001:0db8::/32', + consts.SCOPE: 'link' + }, { + consts.DST: '2001:0db8::/32', + consts.PREFSRC: '2001:0db8::2', + consts.SCOPE: 'link', + consts.TABLE: 1 + }, { + consts.DST: '2001:db9::/32', + consts.GATEWAY: '2001:db8::5' + }, { + consts.DST: '2001:db9::1/128', + consts.GATEWAY: '2001:db8::5' + }, { + consts.DST: '2001:db9::/32', + consts.GATEWAY: '2001:db8::5', + consts.TABLE: 1 + }, { + consts.DST: '2001:db9::1/128', + consts.GATEWAY: '2001:db8::5', + consts.TABLE: 1 + } + ], + consts.RULES: [ + { + consts.SRC: '2001:0db8::2', + consts.SRC_LEN: 128, + consts.TABLE: 1 + } + ], + consts.SCRIPTS: { + consts.IFACE_UP: [{ + consts.COMMAND: ( + "/usr/local/bin/lvs-masquerade.sh add ipv6 " + "{}".format(consts.NETNS_PRIMARY_INTERFACE)) + }], + consts.IFACE_DOWN: [{ + consts.COMMAND: ( + "/usr/local/bin/lvs-masquerade.sh delete ipv6 " + "{}".format(consts.NETNS_PRIMARY_INTERFACE)) + }] + } + } + + mock_dump.assert_called_once() + args = mock_dump.mock_calls[0][1] + test_utils.assert_interface_files_equal( + self, args[0], expected_dict) + + mock_check_output.assert_called_with( + [ + 'ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE, + 'amphora-interface', 'up', '{netns_int}'.format( + netns_int=consts.NETNS_PRIMARY_INTERFACE + ) + ], + stderr=subprocess.STDOUT, + encoding='utf-8') + + # One Interface down, Happy Path IPv6 + flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC + mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + + file_name = (f'/etc/octavia/interfaces/' + f'{consts.NETNS_PRIMARY_INTERFACE}.json') + m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open + + with mock.patch('os.open') as mock_open, mock.patch.object( + os, 'fdopen', m) as mock_fdopen, mock.patch( + 'octavia.amphorae.backends.utils.interface_file.' + 'InterfaceFile.dump') as mock_dump: + mock_open.return_value = 123 + if distro == consts.UBUNTU: + rv = self.ubuntu_app.post('/' + api_server.VERSION + + "/plug/vip/2001:db8::2", + content_type='application/json', + data=jsonutils.dumps(subnet_info)) + elif distro == consts.CENTOS: + rv = self.centos_app.post('/' + api_server.VERSION + + "/plug/vip/2001:db8::2", + content_type='application/json', + data=jsonutils.dumps(subnet_info)) + self.assertEqual(202, rv.status_code) + mock_open.assert_any_call(file_name, flags, mode) + mock_fdopen.assert_any_call(123, 'w') + + plug_inf_file = '/var/lib/octavia/plugged_interfaces' + flags = os.O_RDWR | os.O_CREAT + mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + mock_open.assert_any_call(plug_inf_file, flags, mode) + mock_fdopen.assert_any_call(123, 'r+') + + expected_dict = { + consts.NAME: consts.NETNS_PRIMARY_INTERFACE, + consts.MTU: None, + consts.ADDRESSES: [ + { + consts.IPV6AUTO: True + }, + { + consts.ADDRESS: '2001:db8::2', + consts.PREFIXLEN: 128 + } + ], + consts.ROUTES: [ + { + consts.DST: '::/0', + consts.GATEWAY: '2001:db8::1', + consts.FLAGS: [consts.ONLINK] + }, { + consts.DST: '::/0', + consts.GATEWAY: '2001:db8::1', + consts.FLAGS: [consts.ONLINK], + consts.TABLE: 1 + }, { + consts.DST: '2001:db8::/32', + consts.SCOPE: 'link' + }, { + consts.DST: '2001:db8::/32', + consts.PREFSRC: '2001:db8::2', + consts.SCOPE: 'link', + consts.TABLE: 1 + } + ], + consts.RULES: [ + { + consts.SRC: '2001:db8::2', + consts.SRC_LEN: 128, + consts.TABLE: 1 + } + ], + consts.SCRIPTS: { + consts.IFACE_UP: [{ + consts.COMMAND: ( + "/usr/local/bin/lvs-masquerade.sh add ipv6 " + "{}".format(consts.NETNS_PRIMARY_INTERFACE)) + }], + consts.IFACE_DOWN: [{ + consts.COMMAND: ( + "/usr/local/bin/lvs-masquerade.sh delete ipv6 " + "{}".format(consts.NETNS_PRIMARY_INTERFACE)) + }] + } + } + + mock_dump.assert_called_once() + args = mock_dump.mock_calls[0][1] + test_utils.assert_interface_files_equal( + self, args[0], expected_dict) + + mock_check_output.assert_called_with([ + 'ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE, + 'amphora-interface', 'up', consts.NETNS_PRIMARY_INTERFACE + ], stderr=subprocess.STDOUT, encoding='utf-8') + mock_check_output.side_effect = [ + subprocess.CalledProcessError(7, 'test', RANDOM_ERROR), + subprocess.CalledProcessError(7, 'test', RANDOM_ERROR) + ] + + m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open + with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m): + if distro == consts.UBUNTU: + rv = self.ubuntu_app.post('/' + api_server.VERSION + + "/plug/vip/2001:db8::2", + content_type='application/json', + data=jsonutils.dumps(subnet_info)) + elif distro == consts.CENTOS: + rv = self.centos_app.post('/' + api_server.VERSION + + "/plug/vip/2001:db8::2", + content_type='application/json', + data=jsonutils.dumps(subnet_info)) + self.assertEqual(500, rv.status_code) + self.assertEqual( + {'details': RANDOM_ERROR, + 'message': 'Error plugging VIP'}, + jsonutils.loads(rv.data.decode('utf-8'))) + + def test_ubuntu_plug_VIP_with_additional_VIP6(self): + self._test_plug_VIP_with_additional_VIP6(consts.UBUNTU) + + def test_centos_plug_VIP_with_additional_VIP6(self): + self._test_plug_VIP_with_additional_VIP6(consts.CENTOS) + + @mock.patch('os.chmod') + @mock.patch('shutil.copy2') + @mock.patch('pyroute2.NSPopen', create=True) + @mock.patch('octavia.amphorae.backends.agent.api_server.' + 'plug.Plug._netns_interface_exists') + @mock.patch('pyroute2.IPRoute', create=True) + @mock.patch('pyroute2.NetNS', create=True) + @mock.patch('subprocess.check_output') + @mock.patch('shutil.copytree') + @mock.patch('os.makedirs') + @mock.patch('os.path.isfile') + def _test_plug_VIP_with_additional_VIP6(self, distro, mock_isfile, + mock_makedirs, mock_copytree, + mock_check_output, mock_netns, + mock_pyroute2, + mock_int_exists, mock_nspopen, + mock_copy2, mock_os_chmod): + mock_ipr = mock.MagicMock() + mock_ipr_instance = mock.MagicMock() + mock_ipr_instance.link_lookup.return_value = [33] + mock_ipr_instance.get_links.return_value = ({ + 'attrs': [('IFLA_IFNAME', FAKE_INTERFACE)]},) + mock_ipr.__enter__.return_value = mock_ipr_instance + mock_pyroute2.return_value = mock_ipr + + mock_isfile.return_value = True + mock_int_exists.return_value = False + + self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) + + # Happy Path IPv4 with IPv6 additional VIP, with VRRP_IP and host + # route + full_subnet_info = { + 'subnet_cidr': '203.0.113.0/24', + 'gateway': '203.0.113.1', + 'mac_address': '123', + 'vrrp_ip': '203.0.113.4', + 'mtu': 1450, + 'host_routes': [{'destination': '203.0.114.0/24', + 'nexthop': '203.0.113.5'}, + {'destination': '203.0.115.1/32', + 'nexthop': '203.0.113.5'}], + 'additional_vips': [ + {'subnet_cidr': '2001:db8::/32', + 'gateway': '2001:db8::1', + 'ip_address': '2001:db8::4', + 'host_routes': [{'destination': '2001:db9::/32', + 'nexthop': '2001:db8::5'}, + {'destination': '2001:db9::1/128', + 'nexthop': '2001:db8::5'}] + }, + ], + } + + flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC + mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + + file_name = (f'/etc/octavia/interfaces/' + f'{consts.NETNS_PRIMARY_INTERFACE}.json') + m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open + + with mock.patch('os.open') as mock_open, mock.patch.object( + os, 'fdopen', m) as mock_fdopen, mock.patch( + 'octavia.amphorae.backends.utils.interface_file.' + 'InterfaceFile.dump') as mock_dump: + mock_open.return_value = 123 + if distro == consts.UBUNTU: + rv = self.ubuntu_app.post('/' + api_server.VERSION + + "/plug/vip/203.0.113.2", + content_type='application/json', + data=jsonutils.dumps( + full_subnet_info)) + elif distro == consts.CENTOS: + rv = self.centos_app.post('/' + api_server.VERSION + + "/plug/vip/203.0.113.2", + content_type='application/json', + data=jsonutils.dumps( + full_subnet_info)) + self.assertEqual(202, rv.status_code) + mock_open.assert_any_call(file_name, flags, mode) + mock_fdopen.assert_any_call(123, 'w') + + plug_inf_file = '/var/lib/octavia/plugged_interfaces' + flags = os.O_RDWR | os.O_CREAT + mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + mock_open.assert_any_call(plug_inf_file, flags, mode) + mock_fdopen.assert_any_call(123, 'r+') + + expected_dict = { + consts.NAME: consts.NETNS_PRIMARY_INTERFACE, + consts.MTU: 1450, + consts.ADDRESSES: [ + { + consts.ADDRESS: '203.0.113.4', + consts.PREFIXLEN: 24 + }, { + consts.ADDRESS: '203.0.113.2', + consts.PREFIXLEN: 32 + }, { + consts.ADDRESS: '2001:db8::4', + consts.PREFIXLEN: 128 + } + ], + consts.ROUTES: [ + { + consts.DST: '0.0.0.0/0', + consts.GATEWAY: '203.0.113.1', + consts.FLAGS: [consts.ONLINK] + }, { + consts.DST: '0.0.0.0/0', + consts.GATEWAY: '203.0.113.1', + consts.TABLE: 1, + consts.FLAGS: [consts.ONLINK] + }, { + consts.DST: '203.0.113.0/24', + consts.SCOPE: 'link', + }, { + consts.DST: '203.0.113.0/24', + consts.PREFSRC: '203.0.113.2', + consts.SCOPE: 'link', + consts.TABLE: 1 + }, { + consts.DST: '203.0.114.0/24', + consts.GATEWAY: '203.0.113.5' + }, { + consts.DST: '203.0.115.1/32', + consts.GATEWAY: '203.0.113.5' + }, { + consts.DST: '203.0.114.0/24', + consts.GATEWAY: '203.0.113.5', + consts.TABLE: 1 + }, { + consts.DST: '203.0.115.1/32', + consts.GATEWAY: '203.0.113.5', + consts.TABLE: 1 + }, { + consts.DST: '::/0', + consts.GATEWAY: '2001:db8::1', + consts.FLAGS: [consts.ONLINK] + }, { + consts.DST: '::/0', + consts.GATEWAY: '2001:db8::1', + consts.FLAGS: [consts.ONLINK], + consts.TABLE: 1 + }, { + consts.DST: '2001:db8::/32', + consts.SCOPE: 'link' + }, { + consts.DST: '2001:db8::/32', + consts.PREFSRC: '2001:db8::4', + consts.SCOPE: 'link', + consts.TABLE: 1 + }, { + consts.DST: '2001:db9::/32', + consts.GATEWAY: '2001:db8::5' + }, { + consts.DST: '2001:db9::1/128', + consts.GATEWAY: '2001:db8::5' + }, { + consts.DST: '2001:db9::/32', + consts.GATEWAY: '2001:db8::5', + consts.TABLE: 1 + }, { + consts.DST: '2001:db9::1/128', + consts.GATEWAY: '2001:db8::5', + consts.TABLE: 1 + } + ], + consts.RULES: [ + { + consts.SRC: '203.0.113.2', + consts.SRC_LEN: 32, + consts.TABLE: 1 + }, + { + consts.SRC: '2001:db8::4', + consts.SRC_LEN: 128, + consts.TABLE: 1 + } + ], + consts.SCRIPTS: { + consts.IFACE_UP: [{ + consts.COMMAND: ( + "/usr/local/bin/lvs-masquerade.sh add ipv4 " + "{}".format(consts.NETNS_PRIMARY_INTERFACE)) + }, { + consts.COMMAND: ( + "/usr/local/bin/lvs-masquerade.sh add ipv6 " + "{}".format(consts.NETNS_PRIMARY_INTERFACE)) + }], + consts.IFACE_DOWN: [{ + consts.COMMAND: ( + "/usr/local/bin/lvs-masquerade.sh delete ipv4 " + "{}".format(consts.NETNS_PRIMARY_INTERFACE)) + }, { + consts.COMMAND: ( + "/usr/local/bin/lvs-masquerade.sh delete ipv6 " + "{}".format(consts.NETNS_PRIMARY_INTERFACE)) + }] + } + } + + mock_dump.assert_called_once() + args = mock_dump.mock_calls[0][1] + test_utils.assert_interface_files_equal( + self, args[0], expected_dict) + + mock_check_output.assert_called_with([ + 'ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE, + 'amphora-interface', 'up', + consts.NETNS_PRIMARY_INTERFACE + ], stderr=subprocess.STDOUT, encoding='utf-8') + + def test_ubuntu_plug_VIP6_with_additional_VIP(self): + self._test_plug_VIP6_with_additional_VIP(consts.UBUNTU) + + def test_centos_plug_VIP6_with_additional_VIP(self): + self._test_plug_VIP6_with_additional_VIP(consts.CENTOS) + + @mock.patch('os.chmod') + @mock.patch('shutil.copy2') + @mock.patch('pyroute2.NSPopen', create=True) + @mock.patch('octavia.amphorae.backends.agent.api_server.' + 'plug.Plug._netns_interface_exists') + @mock.patch('pyroute2.IPRoute', create=True) + @mock.patch('pyroute2.NetNS', create=True) + @mock.patch('subprocess.check_output') + @mock.patch('shutil.copytree') + @mock.patch('os.makedirs') + @mock.patch('os.path.isfile') + def _test_plug_VIP6_with_additional_VIP(self, distro, mock_isfile, + mock_makedirs, mock_copytree, + mock_check_output, mock_netns, + mock_pyroute2, + mock_int_exists, mock_nspopen, + mock_copy2, mock_os_chmod): + mock_ipr = mock.MagicMock() + mock_ipr_instance = mock.MagicMock() + mock_ipr_instance.link_lookup.return_value = [33] + mock_ipr_instance.get_links.return_value = ({ + 'attrs': [('IFLA_IFNAME', FAKE_INTERFACE)]},) + mock_ipr.__enter__.return_value = mock_ipr_instance + mock_pyroute2.return_value = mock_ipr + + mock_isfile.return_value = True + mock_int_exists.return_value = False + + self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) + + # Happy Path IPv6 with IPv4 additional VIP, with VRRP_IP and host + # route + full_subnet_info = { + 'subnet_cidr': '2001:db8::/32', + 'gateway': '2001:db8::1', + 'vrrp_ip': '2001:db8::4', + 'host_routes': [{'destination': '2001:db9::/32', + 'nexthop': '2001:db8::5'}, + {'destination': '2001:db9::1/128', + 'nexthop': '2001:db8::5'}], + 'mac_address': '123', + 'mtu': 1450, + 'additional_vips': [ + {'subnet_cidr': '203.0.113.0/24', + 'gateway': '203.0.113.1', + 'ip_address': '203.0.113.4', + 'host_routes': [{'destination': '203.0.114.0/24', + 'nexthop': '203.0.113.5'}, + {'destination': '203.0.115.1/32', + 'nexthop': '203.0.113.5'}], + }, + ], + } + + flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC + mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + + file_name = (f'/etc/octavia/interfaces/' + f'{consts.NETNS_PRIMARY_INTERFACE}.json') + m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open + + with mock.patch('os.open') as mock_open, mock.patch.object( + os, 'fdopen', m) as mock_fdopen, mock.patch( + 'octavia.amphorae.backends.utils.interface_file.' + 'InterfaceFile.dump') as mock_dump: + mock_open.return_value = 123 + if distro == consts.UBUNTU: + rv = self.ubuntu_app.post('/' + api_server.VERSION + + "/plug/vip/2001:db8::2", + content_type='application/json', + data=jsonutils.dumps( + full_subnet_info)) + elif distro == consts.CENTOS: + rv = self.centos_app.post('/' + api_server.VERSION + + "/plug/vip/2001:db8::2", + content_type='application/json', + data=jsonutils.dumps( + full_subnet_info)) + self.assertEqual(202, rv.status_code) + mock_open.assert_any_call(file_name, flags, mode) + mock_fdopen.assert_any_call(123, 'w') + + plug_inf_file = '/var/lib/octavia/plugged_interfaces' + flags = os.O_RDWR | os.O_CREAT + mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + mock_open.assert_any_call(plug_inf_file, flags, mode) + mock_fdopen.assert_any_call(123, 'r+') + + expected_dict = { + consts.NAME: consts.NETNS_PRIMARY_INTERFACE, + consts.MTU: 1450, + consts.ADDRESSES: [ + { + consts.ADDRESS: '2001:db8::4', + consts.PREFIXLEN: 32 + }, { + consts.ADDRESS: '2001:db8::2', + consts.PREFIXLEN: 128 + }, { + consts.ADDRESS: '203.0.113.4', + consts.PREFIXLEN: 32 + } + ], + consts.ROUTES: [ + { + consts.DST: '::/0', + consts.GATEWAY: '2001:db8::1', + consts.FLAGS: [consts.ONLINK] + }, { + consts.DST: '::/0', + consts.GATEWAY: '2001:db8::1', + consts.FLAGS: [consts.ONLINK], + consts.TABLE: 1 + }, { + consts.DST: '2001:db8::/32', + consts.SCOPE: 'link' + }, { + consts.DST: '2001:db8::/32', + consts.PREFSRC: '2001:db8::2', + consts.SCOPE: 'link', + consts.TABLE: 1 + }, { + consts.DST: '2001:db9::/32', + consts.GATEWAY: '2001:db8::5' + }, { + consts.DST: '2001:db9::1/128', + consts.GATEWAY: '2001:db8::5' + }, { + consts.DST: '2001:db9::/32', + consts.GATEWAY: '2001:db8::5', + consts.TABLE: 1 + }, { + consts.DST: '2001:db9::1/128', + consts.GATEWAY: '2001:db8::5', + consts.TABLE: 1 + }, { + consts.DST: '0.0.0.0/0', + consts.GATEWAY: '203.0.113.1', + consts.FLAGS: [consts.ONLINK] + }, { + consts.DST: '0.0.0.0/0', + consts.GATEWAY: '203.0.113.1', + consts.TABLE: 1, + consts.FLAGS: [consts.ONLINK] + }, { + consts.DST: '203.0.113.0/24', + consts.SCOPE: 'link' + }, { + consts.DST: '203.0.113.0/24', + consts.PREFSRC: '203.0.113.4', + consts.SCOPE: 'link', + consts.TABLE: 1 + }, { + consts.DST: '203.0.114.0/24', + consts.GATEWAY: '203.0.113.5' + }, { + consts.DST: '203.0.115.1/32', + consts.GATEWAY: '203.0.113.5' + }, { + consts.DST: '203.0.114.0/24', + consts.GATEWAY: '203.0.113.5', + consts.TABLE: 1 + }, { + consts.DST: '203.0.115.1/32', + consts.GATEWAY: '203.0.113.5', + consts.TABLE: 1 + } + ], + consts.RULES: [ + { + consts.SRC: '2001:db8::2', + consts.SRC_LEN: 128, + consts.TABLE: 1 + }, { + consts.SRC: '203.0.113.4', + consts.SRC_LEN: 32, + consts.TABLE: 1 + }, + ], + consts.SCRIPTS: { + consts.IFACE_UP: [{ + consts.COMMAND: ( + "/usr/local/bin/lvs-masquerade.sh add ipv4 " + "{}".format(consts.NETNS_PRIMARY_INTERFACE)) + }, { + consts.COMMAND: ( + "/usr/local/bin/lvs-masquerade.sh add ipv6 " + "{}".format(consts.NETNS_PRIMARY_INTERFACE)) + }], + consts.IFACE_DOWN: [{ + consts.COMMAND: ( + "/usr/local/bin/lvs-masquerade.sh delete ipv4 " + "{}".format(consts.NETNS_PRIMARY_INTERFACE)) + }, { + consts.COMMAND: ( + "/usr/local/bin/lvs-masquerade.sh delete ipv6 " + "{}".format(consts.NETNS_PRIMARY_INTERFACE)) + }] + } + } + + mock_dump.assert_called_once() + args = mock_dump.mock_calls[0][1] + test_utils.assert_interface_files_equal( + self, args[0], expected_dict) + + mock_check_output.assert_called_with([ + 'ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE, + 'amphora-interface', 'up', consts.NETNS_PRIMARY_INTERFACE + ], stderr=subprocess.STDOUT, encoding='utf-8') + + def test_ubuntu_get_interface(self): + self._test_get_interface(consts.UBUNTU) + + def test_centos_get_interface(self): + self._test_get_interface(consts.CENTOS) + + @mock.patch('pyroute2.NetNS', create=True) + def _test_get_interface(self, distro, mock_netns): + + self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) + + netns_handle = mock_netns.return_value.__enter__.return_value + + interface_res = {'interface': 'eth0'} + + # Happy path + netns_handle.get_addr.return_value = [{ + 'index': 3, 'family': socket.AF_INET, + 'attrs': [['IFA_ADDRESS', '203.0.113.2']]}] + netns_handle.get_links.return_value = [{ + 'attrs': [['IFLA_IFNAME', 'eth0']]}] + if distro == consts.UBUNTU: + rv = self.ubuntu_app.get('/' + api_server.VERSION + + '/interface/203.0.113.2', + data=jsonutils.dumps(interface_res), + content_type='application/json') + elif distro == consts.CENTOS: + rv = self.centos_app.get('/' + api_server.VERSION + + '/interface/203.0.113.2', + data=jsonutils.dumps(interface_res), + content_type='application/json') + self.assertEqual(200, rv.status_code) + + # Happy path with IPv6 address normalization + netns_handle.get_addr.return_value = [{ + 'index': 3, 'family': socket.AF_INET6, + 'attrs': [['IFA_ADDRESS', + '0000:0000:0000:0000:0000:0000:0000:0001']]}] + netns_handle.get_links.return_value = [{ + 'attrs': [['IFLA_IFNAME', 'eth0']]}] + if distro == consts.UBUNTU: + rv = self.ubuntu_app.get('/' + api_server.VERSION + + '/interface/::1', + data=jsonutils.dumps(interface_res), + content_type='application/json') + elif distro == consts.CENTOS: + rv = self.centos_app.get('/' + api_server.VERSION + + '/interface/::1', + data=jsonutils.dumps(interface_res), + content_type='application/json') + self.assertEqual(200, rv.status_code) + + # Nonexistent interface + if distro == consts.UBUNTU: + rv = self.ubuntu_app.get('/' + api_server.VERSION + + '/interface/10.0.0.1', + data=jsonutils.dumps(interface_res), + content_type='application/json') + elif distro == consts.CENTOS: + rv = self.centos_app.get('/' + api_server.VERSION + + '/interface/10.0.0.1', + data=jsonutils.dumps(interface_res), + content_type='application/json') + self.assertEqual(404, rv.status_code) + + # Invalid IP address + if distro == consts.UBUNTU: + rv = self.ubuntu_app.get('/' + api_server.VERSION + + '/interface/00:00:00:00:00:00', + data=jsonutils.dumps(interface_res), + content_type='application/json') + elif distro == consts.CENTOS: + rv = self.centos_app.get('/' + api_server.VERSION + + '/interface/00:00:00:00:00:00', + data=jsonutils.dumps(interface_res), + content_type='application/json') + self.assertEqual(400, rv.status_code) + + def test_ubuntu_upload_keepalived_config(self): + with mock.patch('distro.id', return_value='ubuntu'): + self._test_upload_keepalived_config(consts.UBUNTU) + + def test_centos_upload_keepalived_config(self): + with mock.patch('distro.id', return_value='centos'): + self._test_upload_keepalived_config(consts.CENTOS) + + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'vrrp_check_script_update') + @mock.patch('os.path.exists') + @mock.patch('os.makedirs') + @mock.patch('os.rename') + @mock.patch('subprocess.check_output') + @mock.patch('os.remove') + def _test_upload_keepalived_config(self, distro, mock_remove, + mock_subprocess, mock_rename, + mock_makedirs, mock_exists, + mock_vrrp_check): + + self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) + flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC + + mock_exists.return_value = True + cfg_path = util.keepalived_cfg_path() + m = self.useFixture(test_utils.OpenFixture(cfg_path)).mock_open + + with mock.patch('os.open') as mock_open, mock.patch.object( + os, 'fdopen', m) as mock_fdopen: + mock_open.return_value = 123 + if distro == consts.UBUNTU: + rv = self.ubuntu_app.put('/' + api_server.VERSION + + '/vrrp/upload', data='test') + elif distro == consts.CENTOS: + rv = self.centos_app.put('/' + api_server.VERSION + + '/vrrp/upload', data='test') + + mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + mock_open.assert_called_with(cfg_path, flags, mode) + mock_fdopen.assert_called_with(123, 'wb') + self.assertEqual(200, rv.status_code) + mock_vrrp_check.assert_called_once_with(None, + consts.AMP_ACTION_START) + + mock_exists.return_value = False + mock_vrrp_check.reset_mock() + script_path = util.keepalived_check_script_path() + m = self.useFixture(test_utils.OpenFixture(script_path)).mock_open + + with mock.patch('os.open') as mock_open, mock.patch.object( + os, 'fdopen', m) as mock_fdopen: + mock_open.return_value = 123 + if distro == consts.UBUNTU: + rv = self.ubuntu_app.put('/' + api_server.VERSION + + '/vrrp/upload', data='test') + elif distro == consts.CENTOS: + rv = self.centos_app.put('/' + api_server.VERSION + + '/vrrp/upload', data='test') + mode = (stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | + stat.S_IROTH | stat.S_IXOTH) + mock_open.assert_called_with(script_path, flags, mode) + mock_fdopen.assert_called_with(123, 'w') + self.assertEqual(200, rv.status_code) + mock_vrrp_check.assert_called_once_with(None, + consts.AMP_ACTION_START) + + def test_ubuntu_manage_service_vrrp(self): + self._test_manage_service_vrrp(consts.UBUNTU) + + def test_centos_manage_service_vrrp(self): + self._test_manage_service_vrrp(consts.CENTOS) + + @mock.patch('subprocess.check_output') + def _test_manage_service_vrrp(self, distro, mock_check_output): + self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) + + if distro == consts.UBUNTU: + rv = self.ubuntu_app.put('/' + api_server.VERSION + '/vrrp/start') + elif distro == consts.CENTOS: + rv = self.centos_app.put('/' + api_server.VERSION + '/vrrp/start') + + self.assertEqual(202, rv.status_code) + + if distro == consts.UBUNTU: + rv = self.ubuntu_app.put('/' + api_server.VERSION + + '/vrrp/restart') + elif distro == consts.CENTOS: + rv = self.centos_app.put('/' + api_server.VERSION + + '/vrrp/restart') + self.assertEqual(400, rv.status_code) + + mock_check_output.side_effect = subprocess.CalledProcessError(1, + 'blah!') + + if distro == consts.UBUNTU: + rv = self.ubuntu_app.put('/' + api_server.VERSION + '/vrrp/start') + elif distro == consts.CENTOS: + rv = self.centos_app.put('/' + api_server.VERSION + '/vrrp/start') + self.assertEqual(500, rv.status_code) + + def test_ubuntu_details(self): + self._test_details(consts.UBUNTU) + + def test_centos_details(self): + self._test_details(consts.CENTOS) + + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'get_lvs_listeners', + return_value=[]) + @mock.patch('octavia.amphorae.backends.agent.api_server.' + 'amphora_info.AmphoraInfo.' + '_get_extend_body_from_lvs_driver', + return_value={ + "keepalived_version": '1.1.11-1', + "ipvsadm_version": '2.2.22-2' + }) + @mock.patch('octavia.amphorae.backends.agent.api_server.' + 'amphora_info.AmphoraInfo.' + '_count_lvs_listener_processes', return_value=0) + @mock.patch('octavia.amphorae.backends.agent.api_server.amphora_info.' + 'AmphoraInfo._count_haproxy_processes') + @mock.patch('octavia.amphorae.backends.agent.api_server.amphora_info.' + 'AmphoraInfo._get_networks') + @mock.patch('octavia.amphorae.backends.agent.api_server.amphora_info.' + 'AmphoraInfo._load') + @mock.patch('os.statvfs') + @mock.patch('octavia.amphorae.backends.agent.api_server.amphora_info.' + 'AmphoraInfo._cpu') + @mock.patch('octavia.amphorae.backends.agent.api_server.amphora_info.' + 'AmphoraInfo._get_meminfo') + @mock.patch('octavia.amphorae.backends.agent.api_server.' + 'util.get_listeners') + @mock.patch('socket.gethostname') + @mock.patch('subprocess.check_output') + def _test_details(self, distro, mock_subbprocess, mock_hostname, + mock_get_listeners, mock_get_mem, mock_cpu, + mock_statvfs, mock_load, mock_get_nets, + mock_count_haproxy, mock_count_lvs_listeners, + mock_get_ext_from_lvs_driver, mock_get_lvs_listeners): + + self.assertIn(distro, [consts.UBUNTU, consts.CENTOS]) + + listener_id = uuidutils.generate_uuid() + mock_get_listeners.return_value = [listener_id] + + mock_hostname.side_effect = ['test-host'] + + mock_subbprocess.side_effect = ['9.9.99-9'] + + MemTotal = random.randrange(0, 1000) + MemFree = random.randrange(0, 1000) + Buffers = random.randrange(0, 1000) + Cached = random.randrange(0, 1000) + SwapCached = random.randrange(0, 1000) + Shmem = random.randrange(0, 1000) + Slab = random.randrange(0, 1000) + + memory_dict = {'CmaFree': 0, 'Mapped': 38244, 'CommitLimit': 508048, + 'MemFree': MemFree, 'AnonPages': 92384, + 'DirectMap2M': 997376, 'SwapTotal': 0, + 'NFS_Unstable': 0, 'SReclaimable': 34168, + 'Writeback': 0, 'PageTables': 3760, 'Shmem': Shmem, + 'Hugepagesize': 2048, 'MemAvailable': 738356, + 'HardwareCorrupted': 0, 'SwapCached': SwapCached, + 'Dirty': 80, 'Active': 237060, 'VmallocUsed': 0, + 'Inactive(anon)': 2752, 'Slab': Slab, 'Cached': Cached, + 'Inactive(file)': 149588, 'SUnreclaim': 17796, + 'Mlocked': 3656, 'AnonHugePages': 6144, 'SwapFree': 0, + 'Active(file)': 145512, 'CmaTotal': 0, + 'Unevictable': 3656, 'KernelStack': 2368, + 'Inactive': 152340, 'MemTotal': MemTotal, 'Bounce': 0, + 'Committed_AS': 401884, 'Active(anon)': 91548, + 'VmallocTotal': 34359738367, 'VmallocChunk': 0, + 'DirectMap4k': 51072, 'WritebackTmp': 0, + 'Buffers': Buffers} + mock_get_mem.return_value = memory_dict + + cpu_total = random.randrange(0, 1000) + cpu_user = random.randrange(0, 1000) + cpu_system = random.randrange(0, 1000) + cpu_softirq = random.randrange(0, 1000) + + cpu_dict = {'idle': '7168848', 'system': cpu_system, + 'total': cpu_total, 'softirq': cpu_softirq, 'nice': '31', + 'iowait': '902', 'user': cpu_user, 'irq': '0'} + + mock_cpu.return_value = cpu_dict + + f_blocks = random.randrange(0, 1000) + f_bfree = random.randrange(0, 1000) + f_frsize = random.randrange(0, 1000) + f_bavail = random.randrange(0, 1000) + + stats = mock.MagicMock() + stats.f_blocks = f_blocks + stats.f_bfree = f_bfree + stats.f_frsize = f_frsize + stats.f_bavail = f_bavail + disk_used = (f_blocks - f_bfree) * f_frsize + disk_available = f_bavail * f_frsize + + mock_statvfs.return_value = stats + + load_1min = random.randrange(0, 10) + load_5min = random.randrange(0, 10) + load_15min = random.randrange(0, 10) + + mock_load.return_value = [load_1min, load_5min, load_15min] + + eth1_rx = random.randrange(0, 1000) + eth1_tx = random.randrange(0, 1000) + eth2_rx = random.randrange(0, 1000) + eth2_tx = random.randrange(0, 1000) + eth3_rx = random.randrange(0, 1000) + eth3_tx = random.randrange(0, 1000) + + net_dict = {'eth2': {'network_rx': eth2_rx, 'network_tx': eth2_tx}, + 'eth1': {'network_rx': eth1_rx, 'network_tx': eth1_tx}, + 'eth3': {'network_rx': eth3_rx, 'network_tx': eth3_tx}} + + mock_get_nets.return_value = net_dict + + haproxy_count = random.randrange(0, 100) + mock_count_haproxy.return_value = haproxy_count + tuned_profiles = "virtual-guest optimize-serial-console amphora" + + expected_dict = {'active': True, + 'active_tuned_profiles': tuned_profiles, + 'api_version': '1.0', + 'cpu': {'soft_irq': cpu_softirq, 'system': cpu_system, + 'total': cpu_total, 'user': cpu_user}, + 'cpu_count': os.cpu_count(), + 'disk': {'available': disk_available, + 'used': disk_used}, + 'haproxy_count': haproxy_count, + 'haproxy_version': '9.9.99-9', + 'hostname': 'test-host', + 'ipvsadm_version': '2.2.22-2', + 'keepalived_version': '1.1.11-1', + 'listeners': [listener_id], + 'load': [load_1min, load_5min, load_15min], + 'memory': {'buffers': Buffers, + 'cached': Cached, + 'free': MemFree, + 'shared': Shmem, + 'slab': Slab, + 'swap_used': SwapCached, + 'total': MemTotal}, + 'networks': {'eth1': {'network_rx': eth1_rx, + 'network_tx': eth1_tx}, + 'eth2': {'network_rx': eth2_rx, + 'network_tx': eth2_tx}, + 'eth3': {'network_rx': eth3_rx, + 'network_tx': eth3_tx}}, + 'packages': {}, + 'topology': consts.TOPOLOGY_SINGLE, + 'topology_status': consts.TOPOLOGY_STATUS_OK, + 'lvs_listener_process_count': 0} + + with mock.patch("octavia.amphorae.backends.agent.api_server" + ".amphora_info.open", + mock.mock_open(read_data=tuned_profiles)): + if distro == consts.UBUNTU: + rv = self.ubuntu_app.get('/' + api_server.VERSION + '/details') + elif distro == consts.CENTOS: + rv = self.centos_app.get('/' + api_server.VERSION + '/details') + + self.assertEqual(200, rv.status_code) + self.assertEqual(expected_dict, + jsonutils.loads(rv.data.decode('utf-8'))) + + def test_ubuntu_upload_config(self): + self._test_upload_config(consts.UBUNTU) + + def test_centos_upload_config(self): + self._test_upload_config(consts.CENTOS) + + @mock.patch('oslo_config.cfg.CONF.mutate_config_files') + def _test_upload_config(self, distro, mock_mutate): + server.BUFFER = 5 # test the while loop + m = self.useFixture( + test_utils.OpenFixture(AMP_AGENT_CONF_PATH)).mock_open + with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m): + if distro == consts.UBUNTU: + rv = self.ubuntu_app.put('/' + api_server.VERSION + + '/config', data='TestTest') + elif distro == consts.CENTOS: + rv = self.centos_app.put('/' + api_server.VERSION + + '/config', data='TestTest') + self.assertEqual(202, rv.status_code) + self.assertEqual(OK, jsonutils.loads(rv.data.decode('utf-8'))) + handle = m() + handle.write.assert_any_call(octavia_utils.b('TestT')) + handle.write.assert_any_call(octavia_utils.b('est')) + mock_mutate.assert_called_once_with() + + # Test the exception handling + mock_mutate.side_effect = Exception('boom') + if distro == consts.UBUNTU: + rv = self.ubuntu_app.put('/' + api_server.VERSION + + '/config', data='TestTest') + elif distro == consts.CENTOS: + rv = self.centos_app.put('/' + api_server.VERSION + + '/config', data='TestTest') + self.assertEqual(500, rv.status_code) + + def test_version_discovery(self): + hm_queue = mock.MagicMock() + with mock.patch('distro.id', return_value='ubuntu'), mock.patch( + 'octavia.amphorae.backends.agent.api_server.plug.' + 'Plug.plug_lo'): + self.test_client = server.Server(hm_queue).app.test_client() + expected_dict = {'api_version': api_server.VERSION} + rv = self.test_client.get('/') + self.assertEqual(200, rv.status_code) + self.assertEqual(expected_dict, + jsonutils.loads(rv.data.decode('utf-8'))) + + @mock.patch('octavia.amphorae.backends.utils.nftable_utils.' + 'load_nftables_file') + @mock.patch('octavia.amphorae.backends.utils.nftable_utils.' + 'write_nftable_rules_file') + @mock.patch('octavia.amphorae.backends.agent.api_server.amphora_info.' + 'AmphoraInfo.get_interface') + def test_set_interface_rules(self, mock_get_int, mock_write_rules, + mock_load_rules): + mock_get_int.side_effect = [ + webob.Response(status=400), + webob.Response(status=200, json={'interface': 'fake1'}), + webob.Response(status=200, json={'interface': 'fake1'})] + + # Test can't find interface + rv = self.ubuntu_app.put('/' + api_server.VERSION + + '/interface/192.0.2.10/rules', data='fake') + self.assertEqual(400, rv.status_code) + mock_write_rules.assert_not_called() + + # Test schema validation failure + rv = self.ubuntu_app.put('/' + api_server.VERSION + + '/interface/192.0.2.10/rules', data='fake') + self.assertEqual('400 Bad Request', rv.status) + + # Test successful path + rules_json = ('[{"protocol":"TCP","cidr":"192.0.2.0/24","port":8080},' + '{"protocol":"UDP","cidr":null,"port":80}]') + rv = self.ubuntu_app.put('/' + api_server.VERSION + + '/interface/192.0.2.10/rules', + data=rules_json, + content_type='application/json') + self.assertEqual('200 OK', rv.status) + mock_write_rules.assert_called_once_with('fake1', + jsonutils.loads(rules_json)) + mock_load_rules.assert_called_once() diff --git a/octavia/tests/functional/api/__init__.py b/octavia/tests/functional/api/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/functional/api/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/functional/api/drivers/__init__.py b/octavia/tests/functional/api/drivers/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/functional/api/drivers/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/functional/api/drivers/driver_agent/__init__.py b/octavia/tests/functional/api/drivers/driver_agent/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/functional/api/drivers/driver_agent/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/functional/api/drivers/driver_agent/test_driver_agent.py b/octavia/tests/functional/api/drivers/driver_agent/test_driver_agent.py new file mode 100644 index 0000000000..9dc62fcdae --- /dev/null +++ b/octavia/tests/functional/api/drivers/driver_agent/test_driver_agent.py @@ -0,0 +1,634 @@ +# Copyright 2019 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import multiprocessing +from unittest import mock + +from octavia_lib.api.drivers import driver_lib as octavia_driver_lib +from octavia_lib.common import constants as lib_consts +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils +from stevedore import driver as stevedore_driver + +from octavia.api.drivers.driver_agent import driver_listener +from octavia.common import config +from octavia.common import constants +from octavia.db import repositories +from octavia.tests.common import sample_certs +from octavia.tests.common import sample_data_models +from octavia.tests.functional.db import base + +from oslo_log import log as logging +LOG = logging.getLogger(__name__) + +CONF = cfg.CONF + + +class DriverAgentTest(base.OctaviaDBTestBase): + + def _process_cleanup(self): + self.exit_event.set() + self.status_listener_proc.join(5) + self.stats_listener_proc.join(5) + self.get_listener_proc.join(5) + + def setUp(self): + status_socket_file = (f'/tmp/octavia-{uuidutils.generate_uuid()}' + f'.status.sock') + stats_socket_file = (f'/tmp/octavia-{uuidutils.generate_uuid()}' + f'.stats.sock') + get_socket_file = f'/tmp/octavia-{uuidutils.generate_uuid()}.get.sock' + sqlite_db_file = f'/tmp/octavia-{uuidutils.generate_uuid()}.sqlite.db' + sqlite_db_connection = f'sqlite:///{sqlite_db_file}' + + # Note that because the driver agent is a multi-process + # agent we must use a sqlite file rather than an + # in-memory instance. + super().setUp( + connection_string=sqlite_db_connection) + + conf = self.useFixture(oslo_fixture.Config(config.cfg.CONF)) + conf.config(group="driver_agent", + status_socket_path=status_socket_file) + conf.config(group="driver_agent", + stats_socket_path=stats_socket_file) + conf.config(group="driver_agent", status_request_timeout=1) + conf.config(group="driver_agent", get_socket_path=get_socket_file) + conf.config(group="certificates", cert_manager='local_cert_manager') + conf.config(group="certificates", storage_path='/tmp') + + # Set up the certificate + cert_manager = stevedore_driver.DriverManager( + namespace='octavia.cert_manager', + name=CONF.certificates.cert_manager, + invoke_on_load=True, + ).driver + self.cert_ref = cert_manager.store_cert( + None, + sample_certs.X509_CERT, + sample_certs.X509_CERT_KEY_ENCRYPTED, + sample_certs.X509_IMDS, + private_key_passphrase=sample_certs.X509_CERT_KEY_PASSPHRASE) + self.addCleanup(cert_manager.delete_cert, None, self.cert_ref) + + self.exit_event = multiprocessing.Event() + + self.status_listener_proc = multiprocessing.Process( + name='status_listener', target=driver_listener.status_listener, + args=(self.exit_event,)) + # TODO(johnsom) Remove once https://bugs.python.org/issue6721 + # is resolved. + self.status_listener_proc.daemon = True + + self.status_listener_proc.start() + + self.stats_listener_proc = multiprocessing.Process( + name='stats_listener', target=driver_listener.stats_listener, + args=(self.exit_event,)) + # TODO(johnsom) Remove once https://bugs.python.org/issue6721 + # is resolved. + self.stats_listener_proc.daemon = True + + self.stats_listener_proc.start() + + self.get_listener_proc = multiprocessing.Process( + name='get_listener', target=driver_listener.get_listener, + args=(self.exit_event,)) + # TODO(johnsom) Remove once https://bugs.python.org/issue6721 + # is resolved. + self.get_listener_proc.daemon = True + + self.get_listener_proc.start() + + self.addCleanup(self._process_cleanup) + + self.driver_lib = octavia_driver_lib.DriverLibrary( + status_socket=status_socket_file, + stats_socket=stats_socket_file, + get_socket=get_socket_file) + + self.sample_data = sample_data_models.SampleDriverDataModels() + self.repos = repositories.Repositories() + + # Create the full load balancer in the database + self.tls_container_dict = { + lib_consts.CERTIFICATE: sample_certs.X509_CERT.decode('utf-8'), + lib_consts.ID: sample_certs.X509_CERT_SHA1, + lib_consts.INTERMEDIATES: [ + i.decode('utf-8') for i in sample_certs.X509_IMDS_LIST], + lib_consts.PASSPHRASE: None, + lib_consts.PRIMARY_CN: sample_certs.X509_CERT_CN, + lib_consts.PRIVATE_KEY: sample_certs.X509_CERT_KEY.decode('utf-8')} + + # ### Create load balancer + self.repos.flavor_profile.create( + self.session, id=self.sample_data.flavor_profile_id, + provider_name=constants.AMPHORA, + flavor_data='{"something": "else"}') + self.repos.flavor.create( + self.session, id=self.sample_data.flavor_id, + enabled=True, flavor_profile_id=self.sample_data.flavor_profile_id) + self.repos.create_load_balancer_and_vip( + self.session, self.sample_data.test_loadbalancer1_dict, + self.sample_data.test_vip_dict) + + # ### Create Pool + pool_dict = copy.deepcopy(self.sample_data.test_pool1_dict) + + pool_dict[constants.LOAD_BALANCER_ID] = self.sample_data.lb_id + + # Use a live certificate + pool_dict[constants.TLS_CERTIFICATE_ID] = self.cert_ref + pool_dict[constants.CA_TLS_CERTIFICATE_ID] = self.cert_ref + pool_dict[constants.CRL_CONTAINER_ID] = self.cert_ref + + # Remove items that are linked in the DB + del pool_dict[lib_consts.MEMBERS] + del pool_dict[constants.HEALTH_MONITOR] + del pool_dict[lib_consts.SESSION_PERSISTENCE] + del pool_dict[lib_consts.LISTENERS] + del pool_dict[lib_consts.L7POLICIES] + + self.repos.pool.create(self.session, **pool_dict) + + self.repos.session_persistence.create( + self.session, pool_id=self.sample_data.pool1_id, + type=lib_consts.SESSION_PERSISTENCE_SOURCE_IP) + + self.provider_pool_dict = copy.deepcopy( + self.sample_data.provider_pool1_dict) + self.provider_pool_dict[ + constants.LISTENER_ID] = self.sample_data.listener1_id + + # Fix for render_unsets = True + self.provider_pool_dict[ + lib_consts.SESSION_PERSISTENCE][lib_consts.COOKIE_NAME] = None + self.provider_pool_dict[lib_consts.SESSION_PERSISTENCE][ + lib_consts.PERSISTENCE_GRANULARITY] = None + self.provider_pool_dict[lib_consts.SESSION_PERSISTENCE][ + lib_consts.PERSISTENCE_TIMEOUT] = None + + # Use a live certificate + self.provider_pool_dict[ + lib_consts.TLS_CONTAINER_DATA] = self.tls_container_dict + self.provider_pool_dict[lib_consts.TLS_CONTAINER_REF] = self.cert_ref + self.provider_pool_dict[ + lib_consts.CA_TLS_CONTAINER_DATA] = ( + sample_certs.X509_CERT.decode('utf-8')) + self.provider_pool_dict[ + lib_consts.CA_TLS_CONTAINER_REF] = self.cert_ref + self.provider_pool_dict[ + lib_consts.CRL_CONTAINER_DATA] = ( + sample_certs.X509_CERT.decode('utf-8')) + self.provider_pool_dict[lib_consts.CRL_CONTAINER_REF] = self.cert_ref + + # ### Create Member + member_dict = copy.deepcopy(self.sample_data.test_member1_dict) + self.repos.member.create(self.session, **member_dict) + self.provider_pool_dict[lib_consts.MEMBERS] = [ + self.sample_data.provider_member1_dict] + + # ### Create Health Monitor + hm_dict = copy.deepcopy(self.sample_data.test_hm1_dict) + self.repos.health_monitor.create(self.session, **hm_dict) + self.provider_pool_dict[ + lib_consts.HEALTHMONITOR] = self.sample_data.provider_hm1_dict + + # ### Create Listener + listener_dict = copy.deepcopy(self.sample_data.test_listener1_dict) + listener_dict[lib_consts.DEFAULT_POOL_ID] = self.sample_data.pool1_id + + # Remove items that are linked in the DB + del listener_dict[lib_consts.L7POLICIES] + del listener_dict[lib_consts.DEFAULT_POOL] + del listener_dict[constants.SNI_CONTAINERS] + + # Use a live certificate + listener_dict[constants.TLS_CERTIFICATE_ID] = self.cert_ref + listener_dict[constants.CLIENT_CA_TLS_CERTIFICATE_ID] = self.cert_ref + listener_dict[constants.CLIENT_CRL_CONTAINER_ID] = self.cert_ref + + self.repos.listener.create(self.session, + **listener_dict) + self.repos.sni.create(self.session, + listener_id=self.sample_data.listener1_id, + tls_container_id=self.cert_ref, position=1) + + # Add our live certs in that differ from the fake certs in sample_data + self.provider_listener_dict = copy.deepcopy( + self.sample_data.provider_listener1_dict) + self.provider_listener_dict[ + lib_consts.DEFAULT_TLS_CONTAINER_REF] = self.cert_ref + self.provider_listener_dict[ + lib_consts.DEFAULT_TLS_CONTAINER_DATA] = self.tls_container_dict + self.provider_listener_dict[ + lib_consts.CLIENT_CA_TLS_CONTAINER_REF] = self.cert_ref + self.provider_listener_dict[ + lib_consts.CLIENT_CA_TLS_CONTAINER_DATA] = ( + sample_certs.X509_CERT.decode('utf-8')) + self.provider_listener_dict[ + lib_consts.CLIENT_CRL_CONTAINER_REF] = self.cert_ref + self.provider_listener_dict[ + lib_consts.CLIENT_CRL_CONTAINER_DATA] = ( + sample_certs.X509_CERT.decode('utf-8')) + self.provider_listener_dict[ + lib_consts.SNI_CONTAINER_DATA] = [self.tls_container_dict] + self.provider_listener_dict[ + lib_consts.SNI_CONTAINER_REFS] = [self.cert_ref] + + self.provider_listener_dict[ + lib_consts.DEFAULT_POOL] = self.provider_pool_dict + self.provider_listener_dict[ + lib_consts.DEFAULT_POOL_ID] = self.sample_data.pool1_id + + self.provider_listener_dict[lib_consts.L7POLICIES] = [ + self.sample_data.provider_l7policy1_dict] + + # ### Create L7 Policy + l7policy_dict = copy.deepcopy(self.sample_data.test_l7policy1_dict) + del l7policy_dict[lib_consts.L7RULES] + self.repos.l7policy.create(self.session, **l7policy_dict) + + # ### Create L7 Rules + l7rule_dict = copy.deepcopy(self.sample_data.test_l7rule1_dict) + self.repos.l7rule.create(self.session, **l7rule_dict) + l7rule2_dict = copy.deepcopy(self.sample_data.test_l7rule2_dict) + self.repos.l7rule.create(self.session, **l7rule2_dict) + + self.session.commit() + + self.provider_lb_dict = copy.deepcopy( + self.sample_data.provider_loadbalancer_tree_dict) + self.provider_lb_dict[lib_consts.POOLS] = [self.provider_pool_dict] + self.provider_lb_dict[ + lib_consts.LISTENERS] = [self.provider_listener_dict] + + def _compare_load_balancer_dicts(self, + provider_lb_dict, + result_dict): + for key in (lib_consts.LOADBALANCER_ID, + lib_consts.NAME, + lib_consts.DESCRIPTION, + lib_consts.PROJECT_ID, + lib_consts.ADMIN_STATE_UP, + lib_consts.VIP_ADDRESS, + lib_consts.VIP_NETWORK_ID, + lib_consts.VIP_SUBNET_ID, + lib_consts.VIP_PORT_ID, + lib_consts.VIP_QOS_POLICY_ID, + lib_consts.FLAVOR, + lib_consts.AVAILABILITY_ZONE, + lib_consts.ADDITIONAL_VIPS): + self.assertEqual(provider_lb_dict.get(key), + result_dict.get(key)) + + provider_listener_dicts = provider_lb_dict[lib_consts.LISTENERS] + result_listener_dicts = result_dict[lib_consts.LISTENERS] + + self.assertEqual(len(provider_listener_dicts), + len(result_listener_dicts)) + + for listener_dicts in zip(provider_listener_dicts, + result_listener_dicts): + provider_listener_dict = listener_dicts[0] + result_listener_dict = listener_dicts[1] + self._compare_listener_dicts(provider_listener_dict, + result_listener_dict) + + self.assertEqual(len(provider_lb_dict[lib_consts.POOLS]), + len(result_dict[lib_consts.POOLS])) + + for pool_dicts in zip(provider_lb_dict[lib_consts.POOLS], + result_dict[lib_consts.POOLS]): + provider_pool_dict = pool_dicts[0] + result_pool_dict = pool_dicts[1] + self._compare_pool_dicts(provider_pool_dict, + result_pool_dict) + + def _compare_listener_dicts(self, + provider_listener_dict, + result_listener_dict): + for key in (lib_consts.LISTENER_ID, + lib_consts.LOADBALANCER_ID, + lib_consts.NAME, + lib_consts.DESCRIPTION, + lib_consts.PROJECT_ID, + lib_consts.ADMIN_STATE_UP, + lib_consts.PROTOCOL, + lib_consts.PROTOCOL_PORT, + lib_consts.CONNECTION_LIMIT, + lib_consts.DEFAULT_POOL_ID, + lib_consts.TIMEOUT_CLIENT_DATA, + lib_consts.TIMEOUT_MEMBER_CONNECT, + lib_consts.TIMEOUT_MEMBER_DATA, + lib_consts.TIMEOUT_TCP_INSPECT, + lib_consts.INSERT_HEADERS, + lib_consts.ALLOWED_CIDRS, + lib_consts.DEFAULT_TLS_CONTAINER_REF, + lib_consts.DEFAULT_TLS_CONTAINER_DATA, + lib_consts.SNI_CONTAINER_REFS, + lib_consts.SNI_CONTAINER_DATA, + lib_consts.CLIENT_CA_TLS_CONTAINER_REF, + lib_consts.CLIENT_CA_TLS_CONTAINER_DATA, + lib_consts.CLIENT_AUTHENTICATION, + lib_consts.CLIENT_CRL_CONTAINER_REF, + lib_consts.CLIENT_CRL_CONTAINER_DATA, + lib_consts.TLS_CIPHERS, + lib_consts.TLS_VERSIONS): + self.assertEqual(provider_listener_dict.get(key), + result_listener_dict.get(key)) + + provider_l7policy_dicts = provider_listener_dict.get( + lib_consts.L7POLICIES) + result_l7policy_dicts = result_listener_dict.get( + lib_consts.L7POLICIES) + + self.assertEqual(len(provider_l7policy_dicts), + len(result_l7policy_dicts)) + + for l7policy_dicts in zip(provider_l7policy_dicts, + result_l7policy_dicts): + provider_l7policy_dict = l7policy_dicts[0] + result_l7policy_dict = l7policy_dicts[1] + self._compare_l7policy_dicts(provider_l7policy_dict, + result_l7policy_dict) + + def _compare_l7policy_dicts(self, + provider_l7policy_dict, + result_l7policy_dict): + for key in (lib_consts.L7POLICY_ID, + lib_consts.LISTENER_ID, + lib_consts.NAME, + lib_consts.DESCRIPTION, + lib_consts.PROJECT_ID, + lib_consts.ADMIN_STATE_UP, + lib_consts.ACTION, + lib_consts.POSITION, + lib_consts.REDIRECT_POOL_ID, + lib_consts.REDIRECT_URL, + lib_consts.REDIRECT_PREFIX, + lib_consts.REDIRECT_HTTP_CODE): + self.assertEqual(provider_l7policy_dict.get(key), + result_l7policy_dict.get(key)) + + provider_l7rule_dicts = provider_l7policy_dict.get(lib_consts.L7RULES) + result_l7rule_dicts = result_l7policy_dict.get(lib_consts.L7RULES) + + if provider_l7rule_dicts or result_l7rule_dicts: + self.assertIsNotNone(provider_l7rule_dicts) + self.assertIsNotNone(result_l7rule_dicts) + + self.assertEqual(len(provider_l7rule_dicts), + len(result_l7rule_dicts)) + + for l7rule_dicts in zip(provider_l7rule_dicts, + result_l7rule_dicts): + provider_l7rule_dict = l7rule_dicts[0] + result_l7rule_dict = l7rule_dicts[1] + self._compare_l7rule_dicts(provider_l7rule_dict, + result_l7rule_dict) + + def _compare_l7rule_dicts(self, + provider_l7rule_dict, + result_l7rule_dict): + for key in (lib_consts.L7RULE_ID, + lib_consts.L7POLICY_ID, + lib_consts.LISTENER_ID, + lib_consts.NAME, + lib_consts.DESCRIPTION, + lib_consts.PROJECT_ID, + lib_consts.ADMIN_STATE_UP, + lib_consts.TYPE, + lib_consts.COMPARE_TYPE, + lib_consts.KEY, + lib_consts.VALUE, + lib_consts.INVERT): + self.assertEqual(provider_l7rule_dict.get(key), + result_l7rule_dict.get(key)) + + def _compare_pool_dicts(self, + provider_pool_dict, + result_pool_dict): + for key in (lib_consts.POOL_ID, + lib_consts.NAME, + lib_consts.DESCRIPTION, + lib_consts.PROJECT_ID, + lib_consts.ADMIN_STATE_UP, + lib_consts.LB_ALGORITHM, + lib_consts.LOADBALANCER_ID, + lib_consts.PROTOCOL, + lib_consts.SESSION_PERSISTENCE, + lib_consts.TLS_ENABLED, + lib_consts.TLS_CONTAINER_REF, + lib_consts.TLS_CONTAINER_DATA, + lib_consts.CA_TLS_CONTAINER_REF, + lib_consts.CA_TLS_CONTAINER_DATA, + lib_consts.CRL_CONTAINER_REF, + lib_consts.CRL_CONTAINER_DATA, + lib_consts.TLS_CIPHERS, + lib_consts.TLS_VERSIONS, + lib_consts.ALPN_PROTOCOLS): + self.assertEqual(provider_pool_dict.get(key), + result_pool_dict.get(key)) + + provider_hm_dict = provider_pool_dict.get( + lib_consts.HEALTHMONITOR) + result_hm_dict = result_pool_dict.get( + lib_consts.HEALTHMONITOR) + if provider_hm_dict or result_hm_dict: + self._compare_hm_dicts(provider_hm_dict, + result_hm_dict) + + provider_member_dicts = provider_pool_dict.get( + lib_consts.MEMBERS) + result_member_dicts = result_pool_dict.get( + lib_consts.MEMBERS) + self.assertEqual(len(provider_member_dicts), + len(result_member_dicts)) + + for member_dicts in zip(provider_member_dicts, + result_member_dicts): + provider_member_dict = member_dicts[0] + result_member_dict = member_dicts[1] + self._compare_member_dicts(provider_member_dict, + result_member_dict) + + def _compare_hm_dicts(self, + provider_hm_dict, + result_hm_dict): + for key in (lib_consts.HEALTHMONITOR_ID, + lib_consts.POOL_ID, + lib_consts.NAME, + lib_consts.DESCRIPTION, + lib_consts.PROJECT_ID, + lib_consts.ADMIN_STATE_UP, + lib_consts.TYPE, + lib_consts.DELAY, + lib_consts.TIMEOUT, + lib_consts.MAX_RETRIES, + lib_consts.MAX_RETRIES_DOWN, + lib_consts.DOMAIN_NAME, + lib_consts.EXPECTED_CODES, + lib_consts.HTTP_METHOD, + lib_consts.HTTP_VERSION, + lib_consts.URL_PATH): + self.assertEqual(provider_hm_dict.get(key), + result_hm_dict.get(key)) + + def _compare_member_dicts(self, + provider_member_dict, + result_member_dict): + for key in (lib_consts.MEMBER_ID, + lib_consts.POOL_ID, + lib_consts.NAME, + lib_consts.DESCRIPTION, + lib_consts.PROJECT_ID, + lib_consts.ADMIN_STATE_UP, + lib_consts.ADDRESS, + lib_consts.PROTOCOL_PORT, + lib_consts.MONITOR_ADDRESS, + lib_consts.MONITOR_PORT, + lib_consts.SUBNET_ID, + lib_consts.WEIGHT, + lib_consts.BACKUP, + lib_consts.VNIC_TYPE): + self.assertEqual(provider_member_dict.get(key), + result_member_dict.get(key)) + + @mock.patch('octavia_lib.api.drivers.driver_lib.SOCKET_TIMEOUT', 30) + def test_get_loadbalancer(self): + result = self.driver_lib.get_loadbalancer(self.sample_data.lb_id) + result_dict = result.to_dict(render_unsets=True, recurse=True) + + self._compare_load_balancer_dicts(self.provider_lb_dict, + result_dict) + + # Test non-existent load balancer + result = self.driver_lib.get_loadbalancer('bogus') + self.assertIsNone(result) + + @mock.patch('octavia_lib.api.drivers.driver_lib.SOCKET_TIMEOUT', 30) + def test_get_listener(self): + result = self.driver_lib.get_listener(self.sample_data.listener1_id) + result_dict = result.to_dict(render_unsets=True, recurse=True) + + self._compare_listener_dicts(self.provider_listener_dict, + result_dict) + + # Test non-existent listener + result = self.driver_lib.get_listener('bogus') + self.assertIsNone(result) + + @mock.patch('octavia_lib.api.drivers.driver_lib.SOCKET_TIMEOUT', 30) + def test_get_pool(self): + result = self.driver_lib.get_pool(self.sample_data.pool1_id) + result_dict = result.to_dict(render_unsets=True, recurse=True) + + self._compare_pool_dicts(self.provider_pool_dict, + result_dict) + + # Test non-existent pool + result = self.driver_lib.get_pool('bogus') + self.assertIsNone(result) + + @mock.patch('octavia_lib.api.drivers.driver_lib.SOCKET_TIMEOUT', 30) + def test_get_member(self): + result = self.driver_lib.get_member(self.sample_data.member1_id) + result_dict = result.to_dict(render_unsets=True) + + self._compare_member_dicts(self.sample_data.provider_member1_dict, + result_dict) + + # Test non-existent member + result = self.driver_lib.get_member('bogus') + self.assertIsNone(result) + + @mock.patch('octavia_lib.api.drivers.driver_lib.SOCKET_TIMEOUT', 30) + def test_get_healthmonitor(self): + result = self.driver_lib.get_healthmonitor(self.sample_data.hm1_id) + result_dict = result.to_dict(render_unsets=True) + + self._compare_hm_dicts(self.sample_data.provider_hm1_dict, + result_dict) + + # Test non-existent health monitor + result = self.driver_lib.get_healthmonitor('bogus') + self.assertIsNone(result) + + @mock.patch('octavia_lib.api.drivers.driver_lib.SOCKET_TIMEOUT', 30) + def test_get_l7policy(self): + result = self.driver_lib.get_l7policy(self.sample_data.l7policy1_id) + result_dict = result.to_dict(render_unsets=True, recurse=True) + + self._compare_l7policy_dicts( + self.sample_data.provider_l7policy1_dict, + result_dict) + + # Test non-existent L7 policy + result = self.driver_lib.get_l7policy('bogus') + self.assertIsNone(result) + + @mock.patch('octavia_lib.api.drivers.driver_lib.SOCKET_TIMEOUT', 30) + def test_get_l7rule(self): + result = self.driver_lib.get_l7rule(self.sample_data.l7rule1_id) + result_dict = result.to_dict(render_unsets=True) + + self._compare_l7rule_dicts( + self.sample_data.provider_l7rule1_dict, + result_dict) + + # Test non-existent L7 rule + result = self.driver_lib.get_l7rule('bogus') + self.assertIsNone(result) + + def test_update_load_balancer_status(self): + # Add a new member + member_dict = copy.deepcopy(self.sample_data.test_member2_dict) + self.repos.member.create(self.session, **member_dict) + self.session.commit() + + result = self.driver_lib.get_member(member_dict[lib_consts.ID]) + self._compare_member_dicts( + self.sample_data.provider_member2_dict, + result.to_dict(render_unsets=True)) + + # Test deleting a member + status = { + "loadbalancers": [ + {"id": self.sample_data.lb_id, + "provisioning_status": "ACTIVE", + "operating_status": "ONLINE"} + ], + "healthmonitors": [], + "l7policies": [], + "l7rules": [], + "listeners": [], + "members": [ + {"id": member_dict[lib_consts.ID], + "provisioning_status": "DELETED"} + ], + "pools": [] + } + + self.driver_lib.update_loadbalancer_status(status) + result = self.driver_lib.get_member(member_dict[lib_consts.ID]) + self.assertIsNone(result) + + # Test deleting an already deleted member + # It should be silently ignored + self.driver_lib.update_loadbalancer_status(status) + result = self.driver_lib.get_member(member_dict[lib_consts.ID]) + self.assertIsNone(result) diff --git a/octavia/tests/functional/api/test_content_types.py b/octavia/tests/functional/api/test_content_types.py new file mode 100644 index 0000000000..e1bddfb058 --- /dev/null +++ b/octavia/tests/functional/api/test_content_types.py @@ -0,0 +1,149 @@ +# Copyright 2022 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +import pecan + +from octavia.api import config as pconfig +from octavia.common import constants +from octavia.tests.functional.db import base as base_db_test + + +class TestContentTypes(base_db_test.OctaviaDBTestBase): + + def setUp(self): + super().setUp() + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + # Mock log_opt_values, it prevents the dump of the configuration + # with LOG.info for each test. It saves a lot of time when running + # the functional tests. + self.conf.conf.log_opt_values = mock.MagicMock() + + # Note: we need to set argv=() to stop the wsgi setup_app from + # pulling in the testing tool sys.argv + self.app = pecan.testing.load_test_app({'app': pconfig.app, + 'wsme': pconfig.wsme}, + argv=()) + + def reset_pecan(): + pecan.set_config({}, overwrite=True) + + self.addCleanup(reset_pecan) + + self.test_url = '/' + + def test_no_accept_header(self): + response = self.app.get(self.test_url, status=200, expect_errors=False) + self.assertEqual(200, response.status_code) + self.assertEqual(constants.APPLICATION_JSON, response.content_type) + + # TODO(johnsom) Testing for an empty string is a workaround for an + # openstacksdk bug present up to the initial + # antelope release of openstacksdk. This means the + # octavia dashboard would also be impacted. + # This test should change to a 406 error once the workaround + # is removed. + # See: https://review.opendev.org/c/openstack/openstacksdk/+/876669 + def test_empty_accept_header(self): + response = self.app.get( + self.test_url, status=200, expect_errors=False, + headers={constants.ACCEPT: ''}) + self.assertEqual(200, response.status_code) + self.assertEqual(constants.APPLICATION_JSON, response.content_type) + + # Note: webob will treat invalid content types as no accept header provided + def test_bogus_accept_header(self): + response = self.app.get( + self.test_url, status=200, expect_errors=False, + headers={constants.ACCEPT: 'bogus'}) + self.assertEqual(200, response.status_code) + self.assertEqual(constants.APPLICATION_JSON, response.content_type) + + def test_valid_accept_header(self): + response = self.app.get( + self.test_url, status=200, expect_errors=False, + headers={constants.ACCEPT: constants.APPLICATION_JSON}) + self.assertEqual(200, response.status_code) + self.assertEqual(constants.APPLICATION_JSON, response.content_type) + + def test_valid_mixed_accept_header(self): + response = self.app.get( + self.test_url, status=200, expect_errors=False, + headers={constants.ACCEPT: + 'text/html,' + constants.APPLICATION_JSON}) + self.assertEqual(200, response.status_code) + self.assertEqual(constants.APPLICATION_JSON, response.content_type) + + def test_wildcard_accept_header(self): + response = self.app.get( + self.test_url, status=200, expect_errors=False, + headers={constants.ACCEPT: '*/*'}) + self.assertEqual(200, response.status_code) + self.assertEqual(constants.APPLICATION_JSON, response.content_type) + + def test_json_wildcard_accept_header(self): + response = self.app.get( + self.test_url, status=200, expect_errors=False, + headers={constants.ACCEPT: constants.APPLICATION_JSON + ', */*'}) + self.assertEqual(200, response.status_code) + self.assertEqual(constants.APPLICATION_JSON, response.content_type) + + def test_json_plain_wildcard_accept_header(self): + response = self.app.get( + self.test_url, status=200, expect_errors=False, + headers={constants.ACCEPT: constants.APPLICATION_JSON + + ', text/plain, */*'}) + self.assertEqual(200, response.status_code) + self.assertEqual(constants.APPLICATION_JSON, response.content_type) + + def test_wildcard_mixed_accept_header(self): + response = self.app.get( + self.test_url, status=200, expect_errors=False, + headers={constants.ACCEPT: + 'text/html,*/*'}) + self.assertEqual(200, response.status_code) + self.assertEqual(constants.APPLICATION_JSON, response.content_type) + + def test_valid_mixed_weighted_accept_header(self): + response = self.app.get( + self.test_url, status=200, expect_errors=False, + headers={constants.ACCEPT: + 'text/html,' + constants.APPLICATION_JSON + ';q=0.8'}) + self.assertEqual(200, response.status_code) + self.assertEqual(constants.APPLICATION_JSON, response.content_type) + + def test_invalid_accept_header(self): + response = self.app.get( + self.test_url, status=406, expect_errors=False, + headers={constants.ACCEPT: 'application/xml'}) + self.assertEqual(406, response.status_code) + self.assertEqual(constants.APPLICATION_JSON, response.content_type) + self.assertEqual(406, response.json[constants.CODE]) + self.assertEqual('Not Acceptable', response.json[constants.TITLE]) + self.assertEqual('Only content type application/json is accepted.', + response.json[constants.DESCRIPTION]) + + def test_invalid_mixed_accept_header(self): + response = self.app.get( + self.test_url, status=406, expect_errors=False, + headers={constants.ACCEPT: 'application/xml,text/html'}) + self.assertEqual(406, response.status_code) + self.assertEqual(constants.APPLICATION_JSON, response.content_type) + self.assertEqual(406, response.json[constants.CODE]) + self.assertEqual('Not Acceptable', response.json[constants.TITLE]) + self.assertEqual('Only content type application/json is accepted.', + response.json[constants.DESCRIPTION]) diff --git a/octavia/tests/functional/api/test_healthcheck.py b/octavia/tests/functional/api/test_healthcheck.py new file mode 100644 index 0000000000..b6cae5ac88 --- /dev/null +++ b/octavia/tests/functional/api/test_healthcheck.py @@ -0,0 +1,288 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +import pecan + +from octavia.api import config as pconfig +from octavia.api.healthcheck import healthcheck_plugins +from octavia.tests.functional.db import base as base_db_test + + +class TestHealthCheck(base_db_test.OctaviaDBTestBase): + + def setUp(self): + super().setUp() + + # We need to define these early as they are late loaded in oslo + # middleware and our configuration overrides would not apply. + # Note: These must match exactly the option definitions in + # oslo.middleware healthcheck! If not you will get duplicate option + # errors. + healthcheck_opts = [ + cfg.BoolOpt( + 'detailed', default=False, + help='Show more detailed information as part of the response. ' + 'Security note: Enabling this option may expose ' + 'sensitive details about the service being monitored. ' + 'Be sure to verify that it will not violate your ' + 'security policies.'), + cfg.ListOpt( + 'backends', default=[], + help='Additional backends that can perform health checks and ' + 'report that information back as part of a request.'), + ] + cfg.CONF.register_opts(healthcheck_opts, group='healthcheck') + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + # Mock log_opt_values, it prevents the dump of the configuration + # with LOG.info for each test. It saves a lot of time when running + # the functional tests. + self.conf.conf.log_opt_values = mock.MagicMock() + self.conf.config(group='healthcheck', backends=['octavia_db_check']) + self.conf.config(group='api_settings', healthcheck_refresh_interval=5) + self.UNAVAILABLE = (healthcheck_plugins.OctaviaDBHealthcheck. + UNAVAILABLE_REASON) + + def reset_pecan(): + pecan.set_config({}, overwrite=True) + + self.addCleanup(reset_pecan) + + def _make_app(self): + # Note: we need to set argv=() to stop the wsgi setup_app from + # pulling in the testing tool sys.argv + return pecan.testing.load_test_app({'app': pconfig.app, + 'wsme': pconfig.wsme}, argv=()) + + def _get_enabled_app(self): + self.conf.config(group='api_settings', healthcheck_enabled=True) + return self._make_app() + + def _get_disabled_app(self): + self.conf.config(group='api_settings', healthcheck_enabled=False) + return self._make_app() + + def _get(self, app, path, params=None, headers=None, status=200, + expect_errors=False): + response = app.get(path, params=params, headers=headers, status=status, + expect_errors=expect_errors) + return response + + def _head(self, app, path, headers=None, status=204, expect_errors=False): + response = app.head(path, headers=headers, status=status, + expect_errors=expect_errors) + return response + + def _post(self, app, path, body, headers=None, status=201, + expect_errors=False): + response = app.post_json(path, params=body, headers=headers, + status=status, expect_errors=expect_errors) + return response + + def _put(self, app, path, body, headers=None, status=200, + expect_errors=False): + response = app.put_json(path, params=body, headers=headers, + status=status, expect_errors=expect_errors) + return response + + def _delete(self, app, path, params=None, headers=None, status=204, + expect_errors=False): + response = app.delete(path, headers=headers, status=status, + expect_errors=expect_errors) + return response + + def test_healthcheck_get_text(self): + self.conf.config(group='healthcheck', detailed=False) + response = self._get(self._get_enabled_app(), '/healthcheck') + self.assertEqual(200, response.status_code) + self.assertEqual('OK', response.text) + + # Note: For whatever reason, detailed=True text has no additional info + def test_healthcheck_get_text_detailed(self): + self.conf.config(group='healthcheck', detailed=True) + response = self._get(self._get_enabled_app(), '/healthcheck') + self.assertEqual(200, response.status_code) + self.assertEqual('OK', response.text) + + def test_healthcheck_get_text_plain(self): + self.conf.config(group='healthcheck', detailed=False) + response = self._get(self._get_enabled_app(), '/healthcheck', + headers={'Accept': 'text/plain'}) + self.assertEqual(200, response.status_code) + self.assertEqual('OK', response.text) + + def test_healthcheck_get_text_plain_detailed(self): + self.conf.config(group='healthcheck', detailed=True) + response = self._get(self._get_enabled_app(), '/healthcheck', + headers={'Accept': 'text/plain'}) + self.assertEqual(200, response.status_code) + self.assertEqual('OK', response.text) + + def test_healthcheck_get_json(self): + self.conf.config(group='healthcheck', detailed=False) + response = self._get(self._get_enabled_app(), '/healthcheck', + headers={'Accept': 'application/json'}) + self.assertEqual(200, response.status_code) + self.assertFalse(response.json['detailed']) + self.assertEqual(['OK'], response.json['reasons']) + + def test_healthcheck_get_json_detailed(self): + self.conf.config(group='healthcheck', detailed=True) + response = self._get(self._get_enabled_app(), '/healthcheck', + headers={'Accept': 'application/json'}) + self.assertEqual(200, response.status_code) + self.assertTrue(response.json['detailed']) + self.assertEqual('OK', response.json['reasons'][0]['reason']) + self.assertTrue(response.json['gc']) + + def test_healthcheck_get_html(self): + self.conf.config(group='healthcheck', detailed=False) + response = self._get(self._get_enabled_app(), '/healthcheck', + headers={'Accept': 'text/html'}) + self.assertEqual(200, response.status_code) + self.assertIn('OK', response.text) + + def test_healthcheck_get_html_detailed(self): + self.conf.config(group='healthcheck', detailed=True) + response = self._get(self._get_enabled_app(), '/healthcheck', + headers={'Accept': 'text/html'}) + self.assertEqual(200, response.status_code) + self.assertIn('OK', response.text) + self.assertIn('Garbage collector', response.text) + + def test_healthcheck_get_text_cached(self): + self.conf.config(group='healthcheck', detailed=False) + app = self._get_enabled_app() + for i in range(10): + response = self._get(app, '/healthcheck') + self.assertEqual(200, response.status_code) + self.assertEqual('OK', response.text) + + def test_healthcheck_disabled_get(self): + self._get(self._get_disabled_app(), '/healthcheck', status=404) + + def test_healthcheck_head(self): + response = self._head(self._get_enabled_app(), '/healthcheck') + self.assertEqual(204, response.status_code) + + def test_healthcheck_disabled_head(self): + self._head(self._get_disabled_app(), '/healthcheck', status=404) + + # These should be denied by the API + def test_healthcheck_post(self): + self._post(self._get_enabled_app(), '/healthcheck', + {'foo': 'bar'}, status=405) + + def test_healthcheck_put(self): + self._put(self._get_enabled_app(), '/healthcheck', + {'foo': 'bar'}, status=405) + + def test_healthcheck_delete(self): + self._delete(self._get_enabled_app(), '/healthcheck', + status=405) + + @mock.patch('octavia.db.api.get_session') + def test_healthcheck_get_failed(self, mock_get_session): + mock_session = mock.MagicMock() + mock_session.execute.side_effect = [Exception('boom')] + mock_get_session.return_value = mock_session + response = self._get(self._get_enabled_app(), '/healthcheck', + status=503) + self.assertEqual(503, response.status_code) + self.assertEqual(self.UNAVAILABLE, response.text) + + @mock.patch('octavia.db.api.get_session') + def test_healthcheck_head_failed(self, mock_get_session): + mock_session = mock.MagicMock() + mock_session.execute.side_effect = [Exception('boom')] + mock_get_session.return_value = mock_session + response = self._head(self._get_enabled_app(), '/healthcheck', + status=503) + self.assertEqual(503, response.status_code) + + @mock.patch('octavia.db.healthcheck.check_database_connection', + side_effect=Exception('boom')) + def test_healthcheck_get_failed_check(self, mock_db_check): + response = self._get(self._get_enabled_app(), '/healthcheck', + status=503) + self.assertEqual(503, response.status_code) + self.assertEqual(self.UNAVAILABLE, response.text) + + @mock.patch('octavia.db.api.get_session') + def test_healthcheck_get_json_failed(self, mock_get_session): + self.conf.config(group='healthcheck', detailed=False) + mock_session = mock.MagicMock() + mock_session.execute.side_effect = [Exception('boom')] + mock_get_session.return_value = mock_session + response = self._get(self._get_enabled_app(), '/healthcheck', + headers={'Accept': 'application/json'}, + status=503) + self.assertEqual(503, response.status_code) + self.assertFalse(response.json['detailed']) + self.assertEqual([self.UNAVAILABLE], + response.json['reasons']) + + @mock.patch('octavia.db.api.get_session') + def test_healthcheck_get_json_detailed_failed(self, mock_get_session): + self.conf.config(group='healthcheck', detailed=True) + mock_session = mock.MagicMock() + mock_session.execute.side_effect = [Exception('boom')] + mock_get_session.return_value = mock_session + response = self._get(self._get_enabled_app(), '/healthcheck', + headers={'Accept': 'application/json'}, + status=503) + self.assertEqual(503, response.status_code) + self.assertTrue(response.json['detailed']) + self.assertEqual(self.UNAVAILABLE, + response.json['reasons'][0]['reason']) + self.assertIn('boom', response.json['reasons'][0]['details']) + + @mock.patch('octavia.db.api.get_session') + def test_healthcheck_get_html_failed(self, mock_get_session): + self.conf.config(group='healthcheck', detailed=False) + mock_session = mock.MagicMock() + mock_session.execute.side_effect = [Exception('boom')] + mock_get_session.return_value = mock_session + response = self._get(self._get_enabled_app(), '/healthcheck', + headers={'Accept': 'text/html'}, status=503) + self.assertEqual(503, response.status_code) + self.assertIn(self.UNAVAILABLE, response.text) + + @mock.patch('octavia.db.api.get_session') + def test_healthcheck_get_html_detailed_failed(self, mock_get_session): + self.conf.config(group='healthcheck', detailed=True) + mock_session = mock.MagicMock() + mock_session.execute.side_effect = [Exception('boom')] + mock_get_session.return_value = mock_session + response = self._get(self._get_enabled_app(), '/healthcheck', + headers={'Accept': 'text/html'}, status=503) + self.assertEqual(503, response.status_code) + self.assertIn(self.UNAVAILABLE, response.text) + self.assertIn('boom', response.text) + self.assertIn('Garbage collector', response.text) + + # Note: For whatever reason, detailed=True text has no additional info + @mock.patch('octavia.db.api.get_session') + def test_healthcheck_get_text_detailed_failed(self, mock_get_session): + self.conf.config(group='healthcheck', detailed=True) + mock_session = mock.MagicMock() + mock_session.execute.side_effect = [Exception('boom')] + mock_get_session.return_value = mock_session + response = self._get(self._get_enabled_app(), '/healthcheck', + status=503) + self.assertEqual(503, response.status_code) + self.assertEqual(self.UNAVAILABLE, response.text) diff --git a/octavia/tests/functional/api/test_root_controller.py b/octavia/tests/functional/api/test_root_controller.py new file mode 100644 index 0000000000..a65e757fd0 --- /dev/null +++ b/octavia/tests/functional/api/test_root_controller.py @@ -0,0 +1,60 @@ +# Copyright 2017 GoDaddy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +import pecan.testing + +from octavia.api import config as pconfig +from octavia.common import constants +from octavia.tests.functional.db import base as base_db_test + + +class TestRootController(base_db_test.OctaviaDBTestBase): + + def get(self, app, path, params=None, headers=None, status=200, + expect_errors=False): + response = app.get( + path, params=params, headers=headers, status=status, + expect_errors=expect_errors) + return response + + def setUp(self): + super().setUp() + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + self.conf.config(group='api_settings', auth_strategy=constants.NOAUTH) + + def _get_versions_with_config(self): + # Note: we need to set argv=() to stop the wsgi setup_app from + # pulling in the testing tool sys.argv + app = pecan.testing.load_test_app({'app': pconfig.app, + 'wsme': pconfig.wsme}, argv=()) + return self.get(app=app, path='/').json.get('versions', None) + + def test_api_versions(self): + versions = self._get_versions_with_config() + version_ids = tuple(v.get('id') for v in versions) + expected_versions = (f"v2.{i}" for i in range(28)) + for version in expected_versions: + self.assertIn(version, version_ids) + + # Each version should have a 'self' 'href' to the API version URL + # [{u'rel': u'self', u'href': u'http://localhost/v2'}] + # Validate that the URL exists in the response + version_url = 'http://localhost/v2' + for version in versions: + links = version['links'] + # Note, there may be other links present, this test is for 'self' + version_link = [link for link in links if link['rel'] == 'self'] + self.assertEqual(version_url, version_link[0]['href']) diff --git a/octavia/tests/functional/api/v2/__init__.py b/octavia/tests/functional/api/v2/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/functional/api/v2/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/functional/api/v2/base.py b/octavia/tests/functional/api/v2/base.py new file mode 100644 index 0000000000..5b9e2da4f4 --- /dev/null +++ b/octavia/tests/functional/api/v2/base.py @@ -0,0 +1,597 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils +import pecan +import pecan.testing + +from octavia.api import config as pconfig +from octavia.common import constants +from octavia.common import exceptions +from octavia.db import api as db_api +from octavia.db import repositories +from octavia.tests.functional.db import base as base_db_test + + +class BaseAPITest(base_db_test.OctaviaDBTestBase): + + BASE_PATH = '/v2' + BASE_PATH_v2_0 = '/v2.0' + + # /lbaas/flavors + FLAVORS_PATH = '/flavors' + FLAVOR_PATH = FLAVORS_PATH + '/{flavor_id}' + + # /lbaas/flavorprofiles + FPS_PATH = '/flavorprofiles' + FP_PATH = FPS_PATH + '/{fp_id}' + + # /lbaas/availabilityzones + AZS_PATH = '/availabilityzones' + AZ_PATH = AZS_PATH + '/{az_name}' + + # /lbaas/availabilityzoneprofiles + AZPS_PATH = '/availabilityzoneprofiles' + AZP_PATH = AZPS_PATH + '/{azp_id}' + + # /lbaas/loadbalancers + LBS_PATH = '/lbaas/loadbalancers' + LB_PATH = LBS_PATH + '/{lb_id}' + LB_STATUS_PATH = LB_PATH + '/statuses' + LB_STATS_PATH = LB_PATH + '/stats' + + # /lbaas/listeners/ + LISTENERS_PATH = '/lbaas/listeners' + LISTENER_PATH = LISTENERS_PATH + '/{listener_id}' + LISTENER_STATS_PATH = LISTENER_PATH + '/stats' + + # /lbaas/pools + POOLS_PATH = '/lbaas/pools' + POOL_PATH = POOLS_PATH + '/{pool_id}' + + # /lbaas/pools/{pool_id}/members + MEMBERS_PATH = POOL_PATH + '/members' + MEMBER_PATH = MEMBERS_PATH + '/{member_id}' + + # /lbaas/healthmonitors + HMS_PATH = '/lbaas/healthmonitors' + HM_PATH = HMS_PATH + '/{healthmonitor_id}' + + # /lbaas/l7policies + L7POLICIES_PATH = '/lbaas/l7policies' + L7POLICY_PATH = L7POLICIES_PATH + '/{l7policy_id}' + L7RULES_PATH = L7POLICY_PATH + '/rules' + L7RULE_PATH = L7RULES_PATH + '/{l7rule_id}' + + QUOTAS_PATH = '/lbaas/quotas' + QUOTA_PATH = QUOTAS_PATH + '/{project_id}' + QUOTA_DEFAULT_PATH = QUOTAS_PATH + '/{project_id}/default' + + AMPHORAE_PATH = '/octavia/amphorae' + AMPHORA_PATH = AMPHORAE_PATH + '/{amphora_id}' + AMPHORA_FAILOVER_PATH = AMPHORA_PATH + '/failover' + AMPHORA_STATS_PATH = AMPHORA_PATH + '/stats' + AMPHORA_CONFIG_PATH = AMPHORA_PATH + '/config' + + PROVIDERS_PATH = '/lbaas/providers' + FLAVOR_CAPABILITIES_PATH = ( + PROVIDERS_PATH + '/{provider}/flavor_capabilities') + AVAILABILITY_ZONE_CAPABILITIES_PATH = ( + PROVIDERS_PATH + '/{provider}/availability_zone_capabilities') + + NOT_AUTHORIZED_BODY = { + 'debuginfo': None, 'faultcode': 'Client', + 'faultstring': 'Policy does not allow this request to be performed.'} + + def setUp(self): + super().setUp() + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + # Mock log_opt_values, it prevents the dump of the configuration + # with LOG.info for each test. It saves a lot of time when running + # the functional tests. + self.conf.conf.log_opt_values = mock.MagicMock() + self.conf.config(group="controller_worker", + network_driver='network_noop_driver') + self.conf.config(group='api_settings', auth_strategy=constants.NOAUTH) + self.conf.config(group='api_settings', + default_provider_driver='noop_driver') + # We still need to test with the "octavia" alias + self.conf.config(group='api_settings', + enabled_provider_drivers={ + 'amphora': 'Amp driver.', + 'noop_driver': 'NoOp driver.', + 'noop_driver-alt': 'NoOp driver alt alisas.', + 'octavia': 'Octavia driver.'}) + self.lb_repo = repositories.LoadBalancerRepository() + self.listener_repo = repositories.ListenerRepository() + self.listener_stats_repo = repositories.ListenerStatisticsRepository() + self.pool_repo = repositories.PoolRepository() + self.member_repo = repositories.MemberRepository() + self.l7policy_repo = repositories.L7PolicyRepository() + self.l7rule_repo = repositories.L7RuleRepository() + self.health_monitor_repo = repositories.HealthMonitorRepository() + self.amphora_repo = repositories.AmphoraRepository() + self.flavor_repo = repositories.FlavorRepository() + self.flavor_profile_repo = repositories.FlavorProfileRepository() + patcher2 = mock.patch('octavia.certificates.manager.barbican.' + 'BarbicanCertManager') + self.cert_manager_mock = patcher2.start() + self.app = self._make_app() + self.project_id = uuidutils.generate_uuid() + + def reset_pecan(): + pecan.set_config({}, overwrite=True) + + self.addCleanup(reset_pecan) + + def start_quota_mock(self, object_type): + def mock_quota(session, _class, project_id, count=1): + return _class == object_type + check_quota_met_true_mock = mock.patch( + 'octavia.db.repositories.Repositories.check_quota_met', + side_effect=mock_quota) + check_quota_met_true_mock.start() + self.addCleanup(check_quota_met_true_mock.stop) + + def _make_app(self): + # Note: we need to set argv=() to stop the wsgi setup_app from + # pulling in the testing tool sys.argv + return pecan.testing.load_test_app({'app': pconfig.app, + 'wsme': pconfig.wsme}, argv=()) + + def _get_full_path(self, path): + return ''.join([self.BASE_PATH, path]) + + def _get_full_path_v2_0(self, path): + return ''.join([self.BASE_PATH_v2_0, path]) + + def _build_body(self, json): + return {self.root_tag: json} + + def delete(self, path, headers=None, params=None, status=204, + expect_errors=False): + headers = headers or {} + params = params or {} + full_path = self._get_full_path(path) + param_string = "" + for k, v in params.items(): + param_string += f"{k}={v}&" + if param_string: + full_path = f"{full_path}?{param_string.rstrip('&')}" + response = self.app.delete(full_path, + headers=headers, + status=status, + expect_errors=expect_errors) + return response + + def post(self, path, body, headers=None, status=201, expect_errors=False, + use_v2_0=False): + headers = headers or {} + if use_v2_0: + full_path = self._get_full_path_v2_0(path) + else: + full_path = self._get_full_path(path) + response = self.app.post_json(full_path, + params=body, + headers=headers, + status=status, + expect_errors=expect_errors) + return response + + def put(self, path, body, headers=None, status=200, expect_errors=False): + headers = headers or {} + full_path = self._get_full_path(path) + response = self.app.put_json(full_path, + params=body, + headers=headers, + status=status, + expect_errors=expect_errors) + return response + + def get(self, path, params=None, headers=None, status=200, + expect_errors=False): + full_path = self._get_full_path(path) + response = self.app.get(full_path, + params=params, + headers=headers, + status=status, + expect_errors=expect_errors) + return response + + def create_flavor(self, name, description, flavor_profile_id, enabled): + req_dict = {'name': name, 'description': description, + 'flavor_profile_id': flavor_profile_id, + 'enabled': enabled} + body = {'flavor': req_dict} + response = self.post(self.FLAVORS_PATH, body) + return response.json.get('flavor') + + def create_flavor_profile(self, name, provider_name, flavor_data): + req_dict = {'name': name, 'provider_name': provider_name, + constants.FLAVOR_DATA: flavor_data} + body = {'flavorprofile': req_dict} + response = self.post(self.FPS_PATH, body) + return response.json.get('flavorprofile') + + def create_availability_zone(self, name, description, + availability_zone_profile_id, enabled): + req_dict = { + 'name': name, 'description': description, + 'availability_zone_profile_id': availability_zone_profile_id, + 'enabled': enabled} + body = {'availability_zone': req_dict} + response = self.post(self.AZS_PATH, body) + return response.json.get('availability_zone') + + def create_availability_zone_profile(self, name, provider_name, + availability_zone_data): + req_dict = {'name': name, 'provider_name': provider_name, + constants.AVAILABILITY_ZONE_DATA: availability_zone_data} + body = {'availability_zone_profile': req_dict} + response = self.post(self.AZPS_PATH, body) + return response.json.get('availability_zone_profile') + + def create_load_balancer(self, vip_subnet_id, + **optionals): + req_dict = {'vip_subnet_id': vip_subnet_id, + 'project_id': self.project_id} + req_dict.update(optionals) + body = {'loadbalancer': req_dict} + response = self.post(self.LBS_PATH, body) + return response.json + + def create_listener(self, protocol, protocol_port, lb_id, + status=None, **optionals): + req_dict = {'protocol': protocol, 'protocol_port': protocol_port, + 'loadbalancer_id': lb_id} + req_dict.update(optionals) + path = self.LISTENERS_PATH + body = {'listener': req_dict} + status = {'status': status} if status else {} + response = self.post(path, body, **status) + return response.json + + def create_listener_stats(self, listener_id, amphora_id): + db_ls = self.listener_stats_repo.create( + self.session, listener_id=listener_id, + amphora_id=amphora_id, bytes_in=0, + bytes_out=0, active_connections=0, total_connections=0, + request_errors=0) + return db_ls.to_dict() + + def create_listener_stats_dynamic(self, listener_id, amphora_id, + bytes_in=0, bytes_out=0, + active_connections=0, + total_connections=0, request_errors=0): + db_ls = self.listener_stats_repo.create( + self.session, listener_id=listener_id, + amphora_id=amphora_id, bytes_in=bytes_in, + bytes_out=bytes_out, active_connections=active_connections, + total_connections=total_connections, request_errors=request_errors) + return db_ls.to_dict() + + def create_amphora(self, amphora_id, loadbalancer_id, **optionals): + # We need to default these values in the request. + opts = {'compute_id': uuidutils.generate_uuid(), + 'status': constants.ACTIVE} + opts.update(optionals) + amphora = self.amphora_repo.create( + self.session, id=amphora_id, + load_balancer_id=loadbalancer_id, + **opts) + return amphora + + def get_listener(self, listener_id): + path = self.LISTENER_PATH.format(listener_id=listener_id) + response = self.get(path) + return response.json + + def create_pool_with_listener(self, lb_id, listener_id, protocol, + lb_algorithm, **optionals): + req_dict = {'loadbalancer_id': lb_id, 'listener_id': listener_id, + 'protocol': protocol, 'lb_algorithm': lb_algorithm} + req_dict.update(optionals) + body = {'pool': req_dict} + path = self.POOLS_PATH + response = self.post(path, body) + return response.json + + def create_pool(self, lb_id, protocol, lb_algorithm, + status=None, **optionals): + req_dict = {'loadbalancer_id': lb_id, 'protocol': protocol, + 'lb_algorithm': lb_algorithm} + req_dict.update(optionals) + body = {'pool': req_dict} + path = self.POOLS_PATH + status = {'status': status} if status else {} + response = self.post(path, body, **status) + return response.json + + def create_member(self, pool_id, address, protocol_port, + status=None, **optionals): + req_dict = {'address': address, 'protocol_port': protocol_port} + req_dict.update(optionals) + body = {'member': req_dict} + path = self.MEMBERS_PATH.format(pool_id=pool_id) + status = {'status': status} if status else {} + response = self.post(path, body, **status) + return response.json + + def create_health_monitor(self, pool_id, type, delay, timeout, + max_retries_down, max_retries, + status=None, **optionals): + req_dict = {'pool_id': pool_id, + 'type': type, + 'delay': delay, + 'timeout': timeout, + 'max_retries_down': max_retries_down, + 'max_retries': max_retries} + req_dict.update(optionals) + body = {'healthmonitor': req_dict} + path = self.HMS_PATH + status = {'status': status} if status else {} + response = self.post(path, body, **status) + return response.json + + def create_l7policy(self, listener_id, action, status=None, **optionals): + req_dict = {'listener_id': listener_id, 'action': action} + req_dict.update(optionals) + body = {'l7policy': req_dict} + path = self.L7POLICIES_PATH + status = {'status': status} if status else {} + response = self.post(path, body, **status) + return response.json + + def create_l7rule(self, l7policy_id, type, compare_type, + value, status=None, **optionals): + req_dict = {'type': type, 'compare_type': compare_type, 'value': value} + req_dict.update(optionals) + body = {'rule': req_dict} + path = self.L7RULES_PATH.format(l7policy_id=l7policy_id) + status = {'status': status} if status else {} + response = self.post(path, body, **status) + return response.json + + def create_quota(self, project_id=-1, lb_quota=None, listener_quota=None, + pool_quota=None, hm_quota=None, member_quota=None): + if project_id == -1: + project_id = self.project_id + req_dict = {'load_balancer': lb_quota, + 'listener': listener_quota, + 'pool': pool_quota, + 'health_monitor': hm_quota, + 'member': member_quota} + req_dict = {k: v for k, v in req_dict.items() if v is not None} + body = {'quota': req_dict} + path = self.QUOTA_PATH.format(project_id=project_id) + response = self.put(path, body, status=202) + return response.json + + # NOTE: This method should be used cautiously. On load balancers with a + # significant amount of children resources, it will update the status for + # each and every resource and thus taking a lot of DB time. + def _set_lb_and_children_statuses(self, lb_id, prov_status, op_status, + autodetect=True): + self.set_object_status(self.lb_repo, lb_id, + provisioning_status=prov_status, + operating_status=op_status) + lb_listeners, _ = self.listener_repo.get_all( + db_api.get_session(), load_balancer_id=lb_id) + for listener in lb_listeners: + if autodetect and (listener.provisioning_status == + constants.PENDING_DELETE): + listener_prov = constants.DELETED + else: + listener_prov = prov_status + self.set_object_status(self.listener_repo, listener.id, + provisioning_status=listener_prov, + operating_status=op_status) + lb_l7policies, _ = self.l7policy_repo.get_all( + db_api.get_session(), listener_id=listener.id) + for l7policy in lb_l7policies: + if autodetect and (l7policy.provisioning_status == + constants.PENDING_DELETE): + l7policy_prov = constants.DELETED + else: + l7policy_prov = prov_status + self.set_object_status(self.l7policy_repo, l7policy.id, + provisioning_status=l7policy_prov, + operating_status=op_status) + l7rules, _ = self.l7rule_repo.get_all( + db_api.get_session(), l7policy_id=l7policy.id) + for l7rule in l7rules: + if autodetect and (l7rule.provisioning_status == + constants.PENDING_DELETE): + l7rule_prov = constants.DELETED + else: + l7rule_prov = prov_status + self.set_object_status(self.l7rule_repo, l7rule.id, + provisioning_status=l7rule_prov, + operating_status=op_status) + lb_pools, _ = self.pool_repo.get_all(db_api.get_session(), + load_balancer_id=lb_id) + for pool in lb_pools: + if autodetect and (pool.provisioning_status == + constants.PENDING_DELETE): + pool_prov = constants.DELETED + else: + pool_prov = prov_status + self.set_object_status(self.pool_repo, pool.id, + provisioning_status=pool_prov, + operating_status=op_status) + for member in pool.members: + if autodetect and (member.provisioning_status == + constants.PENDING_DELETE): + member_prov = constants.DELETED + else: + member_prov = prov_status + self.set_object_status(self.member_repo, member.id, + provisioning_status=member_prov, + operating_status=op_status) + if pool.health_monitor: + if autodetect and (pool.health_monitor.provisioning_status == + constants.PENDING_DELETE): + hm_prov = constants.DELETED + else: + hm_prov = prov_status + self.set_object_status(self.health_monitor_repo, + pool.health_monitor.id, + provisioning_status=hm_prov, + operating_status=op_status) + + # NOTE: This method should be used cautiously. On load balancers with a + # significant amount of children resources, it will update the status for + # each and every resource and thus taking a lot of DB time. + def set_lb_status(self, lb_id, status=None): + explicit_status = True if status is not None else False + if not explicit_status: + status = constants.ACTIVE + + if status == constants.DELETED: + op_status = constants.OFFLINE + elif status == constants.ACTIVE: + op_status = constants.ONLINE + else: + db_lb = self.lb_repo.get(db_api.get_session(), id=lb_id) + op_status = db_lb.operating_status + self._set_lb_and_children_statuses(lb_id, status, op_status, + autodetect=not explicit_status) + if status != constants.DELETED: + return self.get(self.LB_PATH.format(lb_id=lb_id)).json + + @staticmethod + def set_object_status(repo, id_, provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE): + session = db_api.get_session() + repo.update(session, id_, + provisioning_status=provisioning_status, + operating_status=operating_status) + session.commit() + + def assert_final_listener_statuses(self, lb_id, listener_id, delete=False): + expected_prov_status = constants.ACTIVE + expected_op_status = constants.ONLINE + self.set_lb_status(lb_id, status=expected_prov_status) + try: + self.assert_correct_listener_status(expected_prov_status, + expected_op_status, + listener_id) + except exceptions.NotFound: + if not delete: + raise + + def assert_correct_lb_status(self, lb_id, + operating_status, provisioning_status): + api_lb = self.get( + self.LB_PATH.format(lb_id=lb_id)).json.get('loadbalancer') + self.assertEqual(provisioning_status, + api_lb.get('provisioning_status')) + self.assertEqual(operating_status, + api_lb.get('operating_status')) + + def assert_correct_listener_status(self, provisioning_status, + operating_status, listener_id): + api_listener = self.get(self.LISTENER_PATH.format( + listener_id=listener_id)).json.get('listener') + self.assertEqual(provisioning_status, + api_listener.get('provisioning_status')) + self.assertEqual(operating_status, + api_listener.get('operating_status')) + + def assert_correct_pool_status(self, provisioning_status, operating_status, + pool_id): + api_pool = self.get(self.POOL_PATH.format( + pool_id=pool_id)).json.get('pool') + self.assertEqual(provisioning_status, + api_pool.get('provisioning_status')) + self.assertEqual(operating_status, + api_pool.get('operating_status')) + + def assert_correct_member_status(self, provisioning_status, + operating_status, pool_id, member_id): + api_member = self.get(self.MEMBER_PATH.format( + pool_id=pool_id, member_id=member_id)).json.get('member') + self.assertEqual(provisioning_status, + api_member.get('provisioning_status')) + self.assertEqual(operating_status, + api_member.get('operating_status')) + + def assert_correct_l7policy_status(self, provisioning_status, + operating_status, l7policy_id): + api_l7policy = self.get(self.L7POLICY_PATH.format( + l7policy_id=l7policy_id)).json.get('l7policy') + self.assertEqual(provisioning_status, + api_l7policy.get('provisioning_status')) + self.assertEqual(operating_status, + api_l7policy.get('operating_status')) + + def assert_correct_l7rule_status(self, provisioning_status, + operating_status, l7policy_id, l7rule_id): + api_l7rule = self.get(self.L7RULE_PATH.format( + l7policy_id=l7policy_id, l7rule_id=l7rule_id)).json.get('rule') + self.assertEqual(provisioning_status, + api_l7rule.get('provisioning_status')) + self.assertEqual(operating_status, + api_l7rule.get('operating_status')) + + def assert_correct_hm_status(self, provisioning_status, + operating_status, hm_id): + api_hm = self.get(self.HM_PATH.format( + healthmonitor_id=hm_id)).json.get('healthmonitor') + self.assertEqual(provisioning_status, + api_hm.get('provisioning_status')) + self.assertEqual(operating_status, + api_hm.get('operating_status')) + + def assert_correct_status(self, lb_id=None, listener_id=None, pool_id=None, + member_id=None, l7policy_id=None, l7rule_id=None, + hm_id=None, + lb_prov_status=constants.ACTIVE, + listener_prov_status=constants.ACTIVE, + pool_prov_status=constants.ACTIVE, + member_prov_status=constants.ACTIVE, + l7policy_prov_status=constants.ACTIVE, + l7rule_prov_status=constants.ACTIVE, + hm_prov_status=constants.ACTIVE, + lb_op_status=constants.ONLINE, + listener_op_status=constants.ONLINE, + pool_op_status=constants.ONLINE, + member_op_status=constants.ONLINE, + l7policy_op_status=constants.ONLINE, + l7rule_op_status=constants.ONLINE, + hm_op_status=constants.ONLINE): + if lb_id: + self.assert_correct_lb_status(lb_id, lb_op_status, lb_prov_status) + if listener_id: + self.assert_correct_listener_status( + listener_prov_status, listener_op_status, listener_id) + if pool_id: + self.assert_correct_pool_status( + pool_prov_status, pool_op_status, pool_id) + if member_id: + self.assert_correct_member_status( + member_prov_status, member_op_status, pool_id, member_id) + if l7policy_id: + self.assert_correct_l7policy_status( + l7policy_prov_status, l7policy_op_status, l7policy_id) + if l7rule_id: + self.assert_correct_l7rule_status( + l7rule_prov_status, l7rule_op_status, l7policy_id, l7rule_id) + if hm_id: + self.assert_correct_hm_status( + hm_prov_status, hm_op_status, hm_id) diff --git a/octavia/tests/functional/api/v2/test_amphora.py b/octavia/tests/functional/api/v2/test_amphora.py new file mode 100644 index 0000000000..e384496e3f --- /dev/null +++ b/octavia/tests/functional/api/v2/test_amphora.py @@ -0,0 +1,673 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils + +from octavia.common import constants +import octavia.common.context +from octavia.common import exceptions +from octavia.tests.functional.api.v2 import base + + +class TestAmphora(base.BaseAPITest): + + root_tag = 'amphora' + root_tag_list = 'amphorae' + root_tag_links = 'amphorae_links' + root_tag_stats = 'amphora_stats' + + def setUp(self): + super().setUp() + self.lb = self.create_load_balancer( + uuidutils.generate_uuid()).get('loadbalancer') + self.lb_id = self.lb.get('id') + self.project_id = self.lb.get('project_id') + self.set_lb_status(self.lb_id) + self.amp_args = { + 'id': uuidutils.generate_uuid(), + 'load_balancer_id': self.lb_id, + 'compute_id': uuidutils.generate_uuid(), + 'lb_network_ip': '192.168.1.2', + 'vrrp_ip': '192.168.1.5', + 'ha_ip': '192.168.1.10', + 'vrrp_port_id': uuidutils.generate_uuid(), + 'ha_port_id': uuidutils.generate_uuid(), + 'cert_expiration': datetime.datetime.now(), + 'cert_busy': False, + 'role': constants.ROLE_STANDALONE, + 'status': constants.AMPHORA_ALLOCATED, + 'vrrp_interface': 'eth1', + 'vrrp_id': 1, + 'vrrp_priority': 100, + 'cached_zone': None, + 'created_at': datetime.datetime.now(), + 'updated_at': datetime.datetime.now(), + 'image_id': uuidutils.generate_uuid(), + 'compute_flavor': uuidutils.generate_uuid(), + } + self.amp = self.amphora_repo.create(self.session, **self.amp_args) + self.amp_id = self.amp.id + self.amp_args['id'] = self.amp_id + self.listener1_id = uuidutils.generate_uuid() + self.create_listener_stats_dynamic(self.listener1_id, self.amp_id, + bytes_in=1, bytes_out=2, + active_connections=3, + total_connections=4, + request_errors=5) + self.listener2_id = uuidutils.generate_uuid() + self.create_listener_stats_dynamic(self.listener2_id, self.amp_id, + bytes_in=6, bytes_out=7, + active_connections=8, + total_connections=9, + request_errors=10) + self.listener1_amp_stats = {'active_connections': 3, + 'bytes_in': 1, 'bytes_out': 2, + 'id': self.amp_id, + 'listener_id': self.listener1_id, + 'loadbalancer_id': self.lb_id, + 'request_errors': 5, + 'total_connections': 4} + self.listener2_amp_stats = {'active_connections': 8, + 'bytes_in': 6, 'bytes_out': 7, + 'id': self.amp_id, + 'listener_id': self.listener2_id, + 'loadbalancer_id': self.lb_id, + 'request_errors': 10, + 'total_connections': 9} + self.ref_amp_stats = [self.listener1_amp_stats, + self.listener2_amp_stats] + self.session.commit() + + def _create_additional_amp(self): + amp_args = { + 'id': uuidutils.generate_uuid(), + 'load_balancer_id': None, + 'compute_id': uuidutils.generate_uuid(), + 'lb_network_ip': '192.168.1.2', + 'vrrp_ip': '192.168.1.5', + 'ha_ip': '192.168.1.10', + 'vrrp_port_id': uuidutils.generate_uuid(), + 'ha_port_id': uuidutils.generate_uuid(), + 'cert_expiration': None, + 'cert_busy': False, + 'role': constants.ROLE_MASTER, + 'status': constants.AMPHORA_ALLOCATED, + 'vrrp_interface': 'eth1', + 'vrrp_id': 1, + 'vrrp_priority': 100, + } + with self.session.begin(): + return self.amphora_repo.create(self.session, **amp_args) + + def _assert_amp_equal(self, source, response): + self.assertEqual(source.pop('load_balancer_id'), + response.pop('loadbalancer_id')) + self.assertEqual(source.pop('cert_expiration').isoformat(), + response.pop('cert_expiration')) + self.assertEqual(source.pop('created_at').isoformat(), + response.pop('created_at')) + self.assertEqual(source.pop('updated_at').isoformat(), + response.pop('updated_at')) + self.assertEqual(source, response) + + def test_get(self): + response = self.get(self.AMPHORA_PATH.format( + amphora_id=self.amp_id)).json.get(self.root_tag) + self._assert_amp_equal(self.amp_args, response) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_delete(self, mock_cast): + self.amp_args = { + 'id': uuidutils.generate_uuid(), + 'status': constants.ERROR, + } + with self.session.begin(): + amp = self.amphora_repo.create(self.session, **self.amp_args) + + self.delete(self.AMPHORA_PATH.format( + amphora_id=amp.id), status=204) + + response = self.get(self.AMPHORA_PATH.format( + amphora_id=amp.id)).json.get(self.root_tag) + + self.assertEqual(constants.PENDING_DELETE, response[constants.STATUS]) + + payload = {constants.AMPHORA_ID: amp.id} + mock_cast.assert_called_with({}, 'delete_amphora', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_delete_not_found(self, mock_cast): + self.delete(self.AMPHORA_PATH.format(amphora_id='bogus-id'), + status=404) + mock_cast.assert_not_called() + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_delete_immutable(self, mock_cast): + self.amp_args = { + 'id': uuidutils.generate_uuid(), + 'status': constants.AMPHORA_ALLOCATED, + } + with self.session.begin(): + amp = self.amphora_repo.create(self.session, **self.amp_args) + + self.delete(self.AMPHORA_PATH.format( + amphora_id=amp.id), status=409) + + mock_cast.assert_not_called() + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_delete_authorized(self, mock_cast): + self.amp_args = { + 'id': uuidutils.generate_uuid(), + 'status': constants.ERROR, + } + with self.session.begin(): + amp = self.amphora_repo.create(self.session, **self.amp_args) + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': True, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + self.delete(self.AMPHORA_PATH.format(amphora_id=amp.id), + status=204) + # Reset api auth setting + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + response = self.get(self.AMPHORA_PATH.format( + amphora_id=amp.id)).json.get(self.root_tag) + + self.assertEqual(constants.PENDING_DELETE, response[constants.STATUS]) + + payload = {constants.AMPHORA_ID: amp.id} + mock_cast.assert_called_with({}, 'delete_amphora', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_delete_not_authorized(self, mock_cast): + self.amp_args = { + 'id': uuidutils.generate_uuid(), + 'status': constants.ERROR, + } + with self.session.begin(): + amp = self.amphora_repo.create(self.session, **self.amp_args) + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=self.project_id) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + self.delete(self.AMPHORA_PATH.format(amphora_id=amp.id), + status=403) + # Reset api auth setting + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + mock_cast.assert_not_called() + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_failover(self, mock_cast): + self.put(self.AMPHORA_FAILOVER_PATH.format( + amphora_id=self.amp_id), body={}, status=202) + payload = {constants.AMPHORA_ID: self.amp_id} + mock_cast.assert_called_with({}, 'failover_amphora', **payload) + + def test_failover_deleted(self): + new_amp = self._create_additional_amp() + self.amphora_repo.update(self.session, new_amp.id, + status=constants.DELETED) + self.put(self.AMPHORA_FAILOVER_PATH.format( + amphora_id=new_amp.id), body={}, status=404) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_failover_bad_amp_id(self, mock_cast): + self.put(self.AMPHORA_FAILOVER_PATH.format( + amphora_id='asdf'), body={}, status=404) + self.assertFalse(mock_cast.called) + + def test_get_authorized(self): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': True, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.get(self.AMPHORA_PATH.format( + amphora_id=self.amp_id)).json.get(self.root_tag) + # Reset api auth setting + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + self._assert_amp_equal(self.amp_args, response) + + def test_get_not_authorized(self): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=uuidutils.generate_uuid()) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + response = self.get(self.AMPHORA_PATH.format( + amphora_id=self.amp_id), status=403) + # Reset api auth setting + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_failover_authorized(self, mock_cast): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': True, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + self.put(self.AMPHORA_FAILOVER_PATH.format( + amphora_id=self.amp_id), body={}, status=202) + + # Reset api auth setting + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + payload = {constants.AMPHORA_ID: self.amp_id} + mock_cast.assert_called_once_with({}, 'failover_amphora', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_failover_not_authorized(self, mock_cast): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=uuidutils.generate_uuid()) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + response = self.put(self.AMPHORA_FAILOVER_PATH.format( + amphora_id=self.amp_id), body={}, status=403) + # Reset api auth setting + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) + mock_cast.assert_not_called() + + def test_get_deleted_gives_404(self): + new_amp = self._create_additional_amp() + + self.amphora_repo.update(self.session, new_amp.id, + status=constants.DELETED) + self.get(self.AMPHORA_PATH.format(amphora_id=new_amp.id), status=404) + + def test_bad_get(self): + self.get(self.AMPHORA_PATH.format( + amphora_id=uuidutils.generate_uuid()), status=404) + + def test_get_all(self): + amps = self.get(self.AMPHORAE_PATH).json.get(self.root_tag_list) + self.assertIsInstance(amps, list) + self.assertEqual(1, len(amps)) + self.assertEqual(self.amp_id, amps[0].get('id')) + + def test_get_all_authorized(self): + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': True, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + amps = self.get(self.AMPHORAE_PATH).json.get( + self.root_tag_list) + + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertIsInstance(amps, list) + self.assertEqual(1, len(amps)) + self.assertEqual(self.amp_id, amps[0].get('id')) + + def test_get_all_not_authorized(self): + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=uuidutils.generate_uuid()) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + amps = self.get(self.AMPHORAE_PATH, status=403).json + + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, amps) + + def test_get_all_hides_deleted(self): + new_amp = self._create_additional_amp() + + response = self.get(self.AMPHORAE_PATH) + objects = response.json.get(self.root_tag_list) + self.assertEqual(len(objects), 2) + self.amphora_repo.update(self.session, new_amp.id, + status=constants.DELETED) + response = self.get(self.AMPHORAE_PATH) + objects = response.json.get(self.root_tag_list) + self.assertEqual(len(objects), 1) + + def test_get_by_loadbalancer_id(self): + amps = self.get( + self.AMPHORAE_PATH, + params={'loadbalancer_id': self.lb_id} + ).json.get(self.root_tag_list) + + self.assertEqual(1, len(amps)) + amps = self.get( + self.AMPHORAE_PATH, + params={'loadbalancer_id': uuidutils.generate_uuid()} + ).json.get(self.root_tag_list) + self.assertEqual(0, len(amps)) + + def test_get_by_project_id(self): + amps = self.get( + self.AMPHORAE_PATH, + params={'project_id': self.project_id} + ).json.get(self.root_tag_list) + self.assertEqual(1, len(amps)) + + false_project_id = uuidutils.generate_uuid() + amps = self.get( + self.AMPHORAE_PATH, + params={'project_id': false_project_id} + ).json.get(self.root_tag_list) + + self.assertEqual(int(false_project_id == self.project_id), + len(amps)) + + def test_get_all_sorted(self): + self._create_additional_amp() + + response = self.get(self.AMPHORAE_PATH, params={'sort': 'role:desc'}) + amps_desc = response.json.get(self.root_tag_list) + response = self.get(self.AMPHORAE_PATH, params={'sort': 'role:asc'}) + amps_asc = response.json.get(self.root_tag_list) + + self.assertEqual(2, len(amps_desc)) + self.assertEqual(2, len(amps_asc)) + + amp_id_roles_desc = [(amp.get('id'), amp.get('role')) + for amp in amps_desc] + amp_id_roles_asc = [(amp.get('id'), amp.get('role')) + for amp in amps_asc] + self.assertEqual(amp_id_roles_asc, list(reversed(amp_id_roles_desc))) + + def test_get_all_limited(self): + self._create_additional_amp() + self._create_additional_amp() + + # First two -- should have 'next' link + first_two = self.get(self.AMPHORAE_PATH, params={'limit': 2}).json + objs = first_two[self.root_tag_list] + links = first_two[self.root_tag_links] + self.assertEqual(2, len(objs)) + self.assertEqual(1, len(links)) + self.assertEqual('next', links[0]['rel']) + + # Third + off the end -- should have previous link + third = self.get(self.AMPHORAE_PATH, params={ + 'limit': 2, + 'marker': first_two[self.root_tag_list][1]['id']}).json + objs = third[self.root_tag_list] + links = third[self.root_tag_links] + self.assertEqual(1, len(objs)) + self.assertEqual(1, len(links)) + self.assertEqual('previous', links[0]['rel']) + + # Middle -- should have both links + middle = self.get(self.AMPHORAE_PATH, params={ + 'limit': 1, + 'marker': first_two[self.root_tag_list][0]['id']}).json + objs = middle[self.root_tag_list] + links = middle[self.root_tag_links] + self.assertEqual(1, len(objs)) + self.assertEqual(2, len(links)) + self.assertCountEqual(['previous', 'next'], + [link['rel'] for link in links]) + + def test_get_all_fields_filter(self): + amps = self.get(self.AMPHORAE_PATH, params={ + 'fields': ['id', 'role']}).json + for amp in amps['amphorae']: + self.assertIn('id', amp) + self.assertIn('role', amp) + self.assertNotIn('ha_port_id', amp) + + def test_get_one_fields_filter(self): + amp = self.get( + self.AMPHORA_PATH.format(amphora_id=self.amp_id), + params={'fields': ['id', 'role']}).json.get(self.root_tag) + self.assertIn('id', amp) + self.assertIn('role', amp) + self.assertNotIn('ha_port_id', amp) + + def test_get_all_filter(self): + self._create_additional_amp() + + amps = self.get(self.AMPHORAE_PATH, params={ + 'id': self.amp_id}).json.get(self.root_tag_list) + self.assertEqual(1, len(amps)) + self.assertEqual(self.amp_id, + amps[0]['id']) + + def test_empty_get_all(self): + self.amphora_repo.delete(self.session, id=self.amp_id) + response = self.get(self.AMPHORAE_PATH).json.get(self.root_tag_list) + self.assertIsInstance(response, list) + self.assertEqual(0, len(response)) + + def test_get_stats_authorized(self): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': True, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.get(self.AMPHORA_STATS_PATH.format( + amphora_id=self.amp_id)).json.get(self.root_tag_stats) + # Reset api auth setting + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.ref_amp_stats, response) + + def test_get_stats_not_authorized(self): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=uuidutils.generate_uuid()) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + response = self.get(self.AMPHORA_STATS_PATH.format( + amphora_id=self.amp_id), status=403) + # Reset api auth setting + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) + + def test_get_stats_bad_amp_id(self): + self.get(self.AMPHORA_STATS_PATH.format( + amphora_id='bogus_id'), status=404) + + def test_get_stats_no_listeners(self): + self.lb2 = self.create_load_balancer( + uuidutils.generate_uuid()).get('loadbalancer') + self.lb2_id = self.lb2.get('id') + self.set_lb_status(self.lb2_id) + self.amp2_args = { + 'id': uuidutils.generate_uuid(), + 'load_balancer_id': self.lb2_id, + 'compute_id': uuidutils.generate_uuid(), + 'lb_network_ip': '192.168.1.20', + 'vrrp_ip': '192.168.1.5', + 'ha_ip': '192.168.1.100', + 'vrrp_port_id': uuidutils.generate_uuid(), + 'ha_port_id': uuidutils.generate_uuid(), + 'cert_expiration': datetime.datetime.now(), + 'cert_busy': False, + 'role': constants.ROLE_STANDALONE, + 'status': constants.AMPHORA_ALLOCATED, + 'vrrp_interface': 'eth1', + 'vrrp_id': 1, + 'vrrp_priority': 100, + 'cached_zone': None, + 'created_at': datetime.datetime.now(), + 'updated_at': datetime.datetime.now(), + 'image_id': uuidutils.generate_uuid(), + } + self.amp2 = self.amphora_repo.create(self.session, **self.amp2_args) + self.amp2_id = self.amp2.id + self.get(self.AMPHORA_STATS_PATH.format( + amphora_id=self.amp2_id), status=404) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_config(self, mock_cast): + self.put(self.AMPHORA_CONFIG_PATH.format( + amphora_id=self.amp_id), body={}, status=202) + payload = {constants.AMPHORA_ID: self.amp_id} + mock_cast.assert_called_with({}, 'update_amphora_agent_config', + **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_config_deleted(self, mock_cast): + new_amp = self._create_additional_amp() + self.amphora_repo.update(self.session, new_amp.id, + status=constants.DELETED) + self.put(self.AMPHORA_CONFIG_PATH.format( + amphora_id=new_amp.id), body={}, status=404) + self.assertFalse(mock_cast.called) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_config_bad_amp_id(self, mock_cast): + self.put(self.AMPHORA_CONFIG_PATH.format( + amphora_id='bogus'), body={}, status=404) + self.assertFalse(mock_cast.called) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_config_exception(self, mock_cast): + mock_cast.side_effect = exceptions.OctaviaException('boom') + self.put(self.AMPHORA_CONFIG_PATH.format( + amphora_id=self.amp_id), body={}, status=500) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_config_authorized(self, mock_cast): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': True, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + + self.put(self.AMPHORA_CONFIG_PATH.format( + amphora_id=self.amp_id), body={}, status=202) + # Reset api auth setting + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + payload = {constants.AMPHORA_ID: self.amp_id} + mock_cast.assert_called_with({}, 'update_amphora_agent_config', + **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_config_not_authorized(self, mock_cast): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=uuidutils.generate_uuid()) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + self.put(self.AMPHORA_CONFIG_PATH.format( + amphora_id=self.amp_id), body={}, status=403) + # Reset api auth setting + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertFalse(mock_cast.called) + + def test_bogus_path(self): + self.put(self.AMPHORA_PATH.format(amphora_id=self.amp_id) + '/bogus', + body={}, status=405) diff --git a/octavia/tests/functional/api/v2/test_availability_zone_profiles.py b/octavia/tests/functional/api/v2/test_availability_zone_profiles.py new file mode 100644 index 0000000000..265a1bd464 --- /dev/null +++ b/octavia/tests/functional/api/v2/test_availability_zone_profiles.py @@ -0,0 +1,578 @@ +# Copyright 2019 Verizon Media +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_db import exception as odb_exceptions +from oslo_utils import uuidutils + +from octavia.common import constants +from octavia.tests.functional.api.v2 import base + + +class TestAvailabilityZoneProfiles(base.BaseAPITest): + root_tag = 'availability_zone_profile' + root_tag_list = 'availability_zone_profiles' + root_tag_links = 'availability_zone_profile_links' + + def _assert_request_matches_response(self, req, resp, **optionals): + self.assertTrue(uuidutils.is_uuid_like(resp.get('id'))) + self.assertEqual(req.get('name'), resp.get('name')) + self.assertEqual(req.get(constants.PROVIDER_NAME), + resp.get(constants.PROVIDER_NAME)) + self.assertEqual(req.get(constants.AVAILABILITY_ZONE_DATA), + resp.get(constants.AVAILABILITY_ZONE_DATA)) + + def test_empty_list(self): + response = self.get(self.AZPS_PATH) + api_list = response.json.get(self.root_tag_list) + self.assertEqual([], api_list) + + def test_create(self): + az_json = {'name': 'test1', constants.PROVIDER_NAME: 'noop_driver', + constants.AVAILABILITY_ZONE_DATA: '{"hello": "world"}'} + body = self._build_body(az_json) + response = self.post(self.AZPS_PATH, body) + api_azp = response.json.get(self.root_tag) + self._assert_request_matches_response(az_json, api_azp) + + def test_create_with_missing_name(self): + az_json = {constants.PROVIDER_NAME: 'pr1', + constants.AVAILABILITY_ZONE_DATA: '{"x": "y"}'} + body = self._build_body(az_json) + response = self.post(self.AZPS_PATH, body, status=400) + err_msg = ("Invalid input for field/attribute name. Value: " + "'None'. Mandatory field missing.") + self.assertEqual(err_msg, response.json.get('faultstring')) + + def test_create_with_missing_provider(self): + az_json = {'name': 'xyz', + constants.AVAILABILITY_ZONE_DATA: '{"x": "y"}'} + body = self._build_body(az_json) + response = self.post(self.AZPS_PATH, body, status=400) + err_msg = ("Invalid input for field/attribute provider_name. " + "Value: 'None'. Mandatory field missing.") + self.assertEqual(err_msg, response.json.get('faultstring')) + + def test_create_with_missing_availability_zone_data(self): + az_json = {'name': 'xyz', constants.PROVIDER_NAME: 'pr1'} + body = self._build_body(az_json) + response = self.post(self.AZPS_PATH, body, status=400) + err_msg = ("Invalid input for field/attribute availability_zone_data. " + "Value: 'None'. Mandatory field missing.") + self.assertEqual(err_msg, response.json.get('faultstring')) + + def test_create_with_empty_availability_zone_data(self): + az_json = {'name': 'test1', constants.PROVIDER_NAME: 'noop_driver', + constants.AVAILABILITY_ZONE_DATA: '{}'} + body = self._build_body(az_json) + response = self.post(self.AZPS_PATH, body) + api_azp = response.json.get(self.root_tag) + self._assert_request_matches_response(az_json, api_azp) + + def test_create_with_long_name(self): + az_json = {'name': 'n' * 256, constants.PROVIDER_NAME: 'test1', + constants.AVAILABILITY_ZONE_DATA: '{"hello": "world"}'} + body = self._build_body(az_json) + self.post(self.AZPS_PATH, body, status=400) + + def test_create_with_long_provider(self): + az_json = {'name': 'name1', constants.PROVIDER_NAME: 'n' * 256, + constants.AVAILABILITY_ZONE_DATA: '{"hello": "world"}'} + body = self._build_body(az_json) + self.post(self.AZPS_PATH, body, status=400) + + def test_create_with_long_availability_zone_data(self): + az_json = {'name': 'name1', constants.PROVIDER_NAME: 'amp', + constants.AVAILABILITY_ZONE_DATA: 'n' * 4097} + body = self._build_body(az_json) + self.post(self.AZPS_PATH, body, status=400) + + def test_create_authorized(self): + az_json = {'name': 'test1', constants.PROVIDER_NAME: 'noop_driver', + constants.AVAILABILITY_ZONE_DATA: '{"hello": "world"}'} + body = self._build_body(az_json) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + project_id = uuidutils.generate_uuid() + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': True, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.post(self.AZPS_PATH, body) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + api_azp = response.json.get(self.root_tag) + self._assert_request_matches_response(az_json, api_azp) + + def test_create_not_authorized(self): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + az_json = {'name': 'name', + constants.PROVIDER_NAME: 'xyz', + constants.AVAILABILITY_ZONE_DATA: '{"x": "y"}'} + body = self._build_body(az_json) + response = self.post(self.AZPS_PATH, body, status=403) + api_azp = response.json + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, api_azp) + + def test_create_db_failure(self): + az_json = {'name': 'test1', constants.PROVIDER_NAME: 'noop_driver', + constants.AVAILABILITY_ZONE_DATA: '{"hello": "world"}'} + body = self._build_body(az_json) + with mock.patch( + "octavia.db.repositories.AvailabilityZoneProfileRepository." + "create") as mock_create: + mock_create.side_effect = Exception + self.post(self.AZPS_PATH, body, status=500) + + mock_create.side_effect = odb_exceptions.DBDuplicateEntry + self.post(self.AZPS_PATH, body, status=409) + + def test_create_with_invalid_json(self): + az_json = {'name': 'test1', constants.PROVIDER_NAME: 'noop_driver', + constants.AVAILABILITY_ZONE_DATA: '{hello: "world"}'} + body = self._build_body(az_json) + self.post(self.AZPS_PATH, body, status=400) + + def test_get(self): + azp = self.create_availability_zone_profile( + 'name', 'noop_driver', '{"x": "y"}') + self.assertTrue(uuidutils.is_uuid_like(azp.get('id'))) + response = self.get( + self.AZP_PATH.format( + azp_id=azp.get('id'))).json.get(self.root_tag) + self.assertEqual('name', response.get('name')) + self.assertEqual(azp.get('id'), response.get('id')) + + def test_get_one_deleted_id(self): + response = self.get(self.AZP_PATH.format(azp_id=constants.NIL_UUID), + status=404) + self.assertEqual('Availability Zone Profile {} not found.'.format( + constants.NIL_UUID), response.json.get('faultstring')) + + def test_get_one_fields_filter(self): + azp = self.create_availability_zone_profile( + 'name', 'noop_driver', '{"x": "y"}') + self.assertTrue(uuidutils.is_uuid_like(azp.get('id'))) + response = self.get( + self.AZP_PATH.format(azp_id=azp.get('id')), params={ + 'fields': ['id', constants.PROVIDER_NAME]} + ).json.get(self.root_tag) + self.assertEqual(azp.get('id'), response.get('id')) + self.assertIn('id', response) + self.assertIn(constants.PROVIDER_NAME, response) + self.assertNotIn('name', response) + self.assertNotIn(constants.AVAILABILITY_ZONE_DATA, response) + + def test_get_authorized(self): + azp = self.create_availability_zone_profile( + 'name', 'noop_driver', '{"x": "y"}') + self.assertTrue(uuidutils.is_uuid_like(azp.get('id'))) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + project_id = uuidutils.generate_uuid() + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': True, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.get( + self.AZP_PATH.format( + azp_id=azp.get('id'))).json.get(self.root_tag) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual('name', response.get('name')) + self.assertEqual(azp.get('id'), response.get('id')) + + def test_get_not_authorized(self): + azp = self.create_availability_zone_profile( + 'name', 'noop_driver', '{"x": "y"}') + self.assertTrue(uuidutils.is_uuid_like(azp.get('id'))) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + self.get(self.AZP_PATH.format(azp_id=azp.get('id')), status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + def test_get_all(self): + fp1 = self.create_availability_zone_profile( + 'test1', 'noop_driver', '{"compute_zone": "my_az_1"}') + ref_fp_1 = {'availability_zone_data': '{"compute_zone": "my_az_1"}', + 'id': fp1.get('id'), 'name': 'test1', + constants.PROVIDER_NAME: 'noop_driver'} + self.assertTrue(uuidutils.is_uuid_like(fp1.get('id'))) + fp2 = self.create_availability_zone_profile( + 'test2', 'noop_driver-alt', '{"compute_zone": "my_az_1"}') + ref_fp_2 = {'availability_zone_data': '{"compute_zone": "my_az_1"}', + 'id': fp2.get('id'), 'name': 'test2', + constants.PROVIDER_NAME: 'noop_driver-alt'} + self.assertTrue(uuidutils.is_uuid_like(fp2.get('id'))) + + response = self.get(self.AZPS_PATH) + api_list = response.json.get(self.root_tag_list) + self.assertEqual(2, len(api_list)) + self.assertIn(ref_fp_1, api_list) + self.assertIn(ref_fp_2, api_list) + + def test_get_all_fields_filter(self): + fp1 = self.create_availability_zone_profile( + 'test1', 'noop_driver', '{"compute_zone": "my_az_1"}') + self.assertTrue(uuidutils.is_uuid_like(fp1.get('id'))) + fp2 = self.create_availability_zone_profile( + 'test2', 'noop_driver-alt', '{"compute_zone": "my_az_1"}') + self.assertTrue(uuidutils.is_uuid_like(fp2.get('id'))) + + response = self.get(self.AZPS_PATH, params={ + 'fields': ['id', 'name']}) + api_list = response.json.get(self.root_tag_list) + self.assertEqual(2, len(api_list)) + for profile in api_list: + self.assertIn('id', profile) + self.assertIn('name', profile) + self.assertNotIn(constants.PROVIDER_NAME, profile) + self.assertNotIn(constants.AVAILABILITY_ZONE_DATA, profile) + + def test_get_all_authorized(self): + fp1 = self.create_availability_zone_profile( + 'test1', 'noop_driver', '{"compute_zone": "my_az_1"}') + self.assertTrue(uuidutils.is_uuid_like(fp1.get('id'))) + fp2 = self.create_availability_zone_profile( + 'test2', 'noop_driver-alt', '{"compute_zone": "my_az_1"}') + self.assertTrue(uuidutils.is_uuid_like(fp2.get('id'))) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + project_id = uuidutils.generate_uuid() + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': True, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.get(self.AZPS_PATH) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + api_list = response.json.get(self.root_tag_list) + self.assertEqual(2, len(api_list)) + + def test_get_all_not_authorized(self): + fp1 = self.create_availability_zone_profile( + 'test1', 'noop_driver', '{"compute_zone": "my_az_1"}') + self.assertTrue(uuidutils.is_uuid_like(fp1.get('id'))) + fp2 = self.create_availability_zone_profile( + 'test2', 'noop_driver-alt', '{"compute_zone": "my_az_1"}') + self.assertTrue(uuidutils.is_uuid_like(fp2.get('id'))) + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + self.get(self.AZPS_PATH, status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + def test_update(self): + azp = self.create_availability_zone_profile( + 'test_profile', 'noop_driver', '{"x": "y"}') + update_data = {'name': 'the_profile', + constants.PROVIDER_NAME: 'noop_driver-alt', + constants.AVAILABILITY_ZONE_DATA: '{"hello": "world"}'} + body = self._build_body(update_data) + self.put(self.AZP_PATH.format(azp_id=azp.get('id')), body) + response = self.get( + self.AZP_PATH.format(azp_id=azp.get('id'))).json.get(self.root_tag) + self.assertEqual('the_profile', response.get('name')) + self.assertEqual('noop_driver-alt', + response.get(constants.PROVIDER_NAME)) + self.assertEqual('{"hello": "world"}', + response.get(constants.AVAILABILITY_ZONE_DATA)) + + def test_update_deleted_id(self): + update_data = {'name': 'fake_profile'} + body = self._build_body(update_data) + response = self.put(self.AZP_PATH.format(azp_id=constants.NIL_UUID), + body, status=404) + self.assertEqual('Availability Zone Profile {} not found.'.format( + constants.NIL_UUID), response.json.get('faultstring')) + + def test_update_nothing(self): + azp = self.create_availability_zone_profile( + 'test_profile', 'noop_driver', '{"x": "y"}') + body = self._build_body({}) + self.put(self.AZP_PATH.format(azp_id=azp.get('id')), body) + response = self.get( + self.AZP_PATH.format(azp_id=azp.get('id'))).json.get(self.root_tag) + self.assertEqual('test_profile', response.get('name')) + self.assertEqual('noop_driver', response.get(constants.PROVIDER_NAME)) + self.assertEqual('{"x": "y"}', + response.get(constants.AVAILABILITY_ZONE_DATA)) + + def test_update_name_none(self): + self._test_update_param_none(constants.NAME) + + def test_update_provider_name_none(self): + self._test_update_param_none(constants.PROVIDER_NAME) + + def test_update_availability_zone_data_none(self): + self._test_update_param_none(constants.AVAILABILITY_ZONE_DATA) + + def _test_update_param_none(self, param_name): + azp = self.create_availability_zone_profile( + 'test_profile', 'noop_driver', '{"x": "y"}') + expect_error_msg = f"None is not a valid option for {param_name}" + body = self._build_body({param_name: None}) + response = self.put(self.AZP_PATH.format(azp_id=azp.get('id')), body, + status=400) + self.assertEqual(expect_error_msg, response.json['faultstring']) + + def test_update_no_availability_zone_data(self): + azp = self.create_availability_zone_profile( + 'test_profile', 'noop_driver', '{"x": "y"}') + update_data = {'name': 'the_profile', + constants.PROVIDER_NAME: 'noop_driver-alt'} + body = self._build_body(update_data) + response = self.put(self.AZP_PATH.format(azp_id=azp.get('id')), body) + response = self.get( + self.AZP_PATH.format(azp_id=azp.get('id'))).json.get(self.root_tag) + self.assertEqual('the_profile', response.get('name')) + self.assertEqual('noop_driver-alt', + response.get(constants.PROVIDER_NAME)) + self.assertEqual('{"x": "y"}', + response.get(constants.AVAILABILITY_ZONE_DATA)) + + def test_update_authorized(self): + azp = self.create_availability_zone_profile( + 'test_profile', 'noop_driver', '{"x": "y"}') + update_data = {'name': 'the_profile', + constants.PROVIDER_NAME: 'noop_driver-alt', + constants.AVAILABILITY_ZONE_DATA: '{"hello": "world"}'} + body = self._build_body(update_data) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + project_id = uuidutils.generate_uuid() + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': True, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.put(self.AZP_PATH.format(azp_id=azp.get('id')), + body) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + response = self.get( + self.AZP_PATH.format(azp_id=azp.get('id'))).json.get(self.root_tag) + self.assertEqual('the_profile', response.get('name')) + self.assertEqual('noop_driver-alt', + response.get(constants.PROVIDER_NAME)) + self.assertEqual('{"hello": "world"}', + response.get(constants.AVAILABILITY_ZONE_DATA)) + + def test_update_not_authorized(self): + azp = self.create_availability_zone_profile( + 'test_profile', 'noop_driver', '{"x": "y"}') + update_data = {'name': 'the_profile', constants.PROVIDER_NAME: 'amp', + constants.AVAILABILITY_ZONE_DATA: '{"hello": "world"}'} + body = self._build_body(update_data) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + response = self.put(self.AZP_PATH.format(azp_id=azp.get('id')), + body, status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + response = self.get( + self.AZP_PATH.format(azp_id=azp.get('id'))).json.get(self.root_tag) + self.assertEqual('test_profile', response.get('name')) + self.assertEqual('noop_driver', response.get(constants.PROVIDER_NAME)) + self.assertEqual('{"x": "y"}', + response.get(constants.AVAILABILITY_ZONE_DATA)) + + def test_update_in_use(self): + azp = self.create_availability_zone_profile( + 'test_profile', 'noop_driver', '{"x": "y"}') + self.create_availability_zone( + 'name1', 'description', azp.get('id'), True) + + # Test updating provider while in use is not allowed + update_data = {'name': 'the_profile', + constants.PROVIDER_NAME: 'noop_driver-alt'} + body = self._build_body(update_data) + response = self.put(self.AZP_PATH.format(azp_id=azp.get('id')), body, + status=409) + err_msg = ("Availability Zone Profile {} is in use and cannot be " + "modified.".format(azp.get('id'))) + self.assertEqual(err_msg, response.json.get('faultstring')) + response = self.get( + self.AZP_PATH.format(azp_id=azp.get('id'))).json.get(self.root_tag) + self.assertEqual('test_profile', response.get('name')) + self.assertEqual('noop_driver', response.get(constants.PROVIDER_NAME)) + self.assertEqual('{"x": "y"}', + response.get(constants.AVAILABILITY_ZONE_DATA)) + + # Test updating availability zone data while in use is not allowed + update_data = {'name': 'the_profile', + constants.AVAILABILITY_ZONE_DATA: '{"hello": "world"}'} + body = self._build_body(update_data) + response = self.put(self.AZP_PATH.format(azp_id=azp.get('id')), body, + status=409) + err_msg = ("Availability Zone Profile {} is in use and cannot be " + "modified.".format(azp.get('id'))) + self.assertEqual(err_msg, response.json.get('faultstring')) + response = self.get( + self.AZP_PATH.format(azp_id=azp.get('id'))).json.get(self.root_tag) + self.assertEqual('test_profile', response.get('name')) + self.assertEqual('noop_driver', response.get(constants.PROVIDER_NAME)) + self.assertEqual('{"x": "y"}', + response.get(constants.AVAILABILITY_ZONE_DATA)) + + # Test that you can still update the name when in use + update_data = {'name': 'the_profile'} + body = self._build_body(update_data) + response = self.put(self.AZP_PATH.format(azp_id=azp.get('id')), body) + response = self.get( + self.AZP_PATH.format(azp_id=azp.get('id'))).json.get(self.root_tag) + self.assertEqual('the_profile', response.get('name')) + self.assertEqual('noop_driver', response.get(constants.PROVIDER_NAME)) + self.assertEqual('{"x": "y"}', + response.get(constants.AVAILABILITY_ZONE_DATA)) + + def test_delete(self): + azp = self.create_availability_zone_profile( + 'test1', 'noop_driver', '{"compute_zone": "my_az_1"}') + self.assertTrue(uuidutils.is_uuid_like(azp.get('id'))) + self.delete(self.AZP_PATH.format(azp_id=azp.get('id'))) + response = self.get(self.AZP_PATH.format( + azp_id=azp.get('id')), status=404) + err_msg = f"Availability Zone Profile {azp.get('id')} not found." + self.assertEqual(err_msg, response.json.get('faultstring')) + + def test_delete_deleted_id(self): + response = self.delete(self.AZP_PATH.format(azp_id=constants.NIL_UUID), + status=404) + self.assertEqual('Availability Zone Profile {} not found.'.format( + constants.NIL_UUID), response.json.get('faultstring')) + + def test_delete_nonexistent_id(self): + response = self.delete(self.AZP_PATH.format(azp_id='bogus_id'), + status=404) + self.assertEqual('Availability Zone Profile bogus_id not found.', + response.json.get('faultstring')) + + def test_delete_authorized(self): + azp = self.create_availability_zone_profile( + 'test1', 'noop_driver', '{"compute_zone": "my_az_1"}') + self.session.commit() + self.assertTrue(uuidutils.is_uuid_like(azp.get('id'))) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + project_id = uuidutils.generate_uuid() + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': True, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + self.delete(self.AZP_PATH.format(azp_id=azp.get('id'))) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + response = self.get(self.AZP_PATH.format( + azp_id=azp.get('id')), status=404) + err_msg = f"Availability Zone Profile {azp.get('id')} not found." + self.assertEqual(err_msg, response.json.get('faultstring')) + + def test_delete_not_authorized(self): + azp = self.create_availability_zone_profile( + 'test1', 'noop_driver', '{"compute_zone": "my_az_1"}') + self.assertTrue(uuidutils.is_uuid_like(azp.get('id'))) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + + response = self.delete(self.AZP_PATH.format( + azp_id=azp.get('id')), status=403) + api_azp = response.json + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, api_azp) + response = self.get( + self.AZP_PATH.format(azp_id=azp.get('id'))).json.get(self.root_tag) + self.assertEqual('test1', response.get('name')) + + def test_delete_in_use(self): + azp = self.create_availability_zone_profile( + 'test1', 'noop_driver', '{"compute_zone": "my_az_1"}') + self.create_availability_zone( + 'name1', 'description', azp.get('id'), True) + response = self.delete(self.AZP_PATH.format(azp_id=azp.get('id')), + status=409) + err_msg = ("Availability Zone Profile {} is in use and cannot be " + "modified.".format(azp.get('id'))) + self.assertEqual(err_msg, response.json.get('faultstring')) + response = self.get( + self.AZP_PATH.format(azp_id=azp.get('id'))).json.get(self.root_tag) + self.assertEqual('test1', response.get('name')) diff --git a/octavia/tests/functional/api/v2/test_availability_zones.py b/octavia/tests/functional/api/v2/test_availability_zones.py new file mode 100644 index 0000000000..f5e22c1fc4 --- /dev/null +++ b/octavia/tests/functional/api/v2/test_availability_zones.py @@ -0,0 +1,573 @@ +# Copyright 2019 Verizon Media +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from oslo_utils import uuidutils + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture + +from octavia.common import constants +import octavia.common.context +from octavia.common import exceptions +from octavia.tests.functional.api.v2 import base + + +class TestAvailabilityZones(base.BaseAPITest): + root_tag = 'availability_zone' + root_tag_list = 'availability_zones' + root_tag_links = 'availability_zones_links' + + def setUp(self): + super().setUp() + self.azp = self.create_availability_zone_profile( + 'test1', 'noop_driver', '{"compute_zone": "my_az_1"}') + + def _assert_request_matches_response(self, req, resp, **optionals): + self.assertNotIn('id', resp) # AZs do not expose an ID + req_description = req.get('description') + self.assertEqual(req.get('name'), resp.get('name')) + if not req_description: + self.assertEqual('', resp.get('description')) + else: + self.assertEqual(req.get('description'), resp.get('description')) + self.assertEqual(req.get('availability_zone_profile_id'), + resp.get('availability_zone_profile_id')) + self.assertEqual(req.get('enabled', True), + resp.get('enabled')) + + def test_empty_list(self): + response = self.get(self.AZS_PATH) + api_list = response.json.get(self.root_tag_list) + self.assertEqual([], api_list) + + def test_create(self): + az_json = {'name': 'test1', + 'availability_zone_profile_id': self.azp.get('id')} + body = self._build_body(az_json) + response = self.post(self.AZS_PATH, body) + api_az = response.json.get(self.root_tag) + self._assert_request_matches_response(az_json, api_az) + + def test_create_with_missing_name(self): + az_json = {'availability_zone_profile_id': self.azp.get('id')} + body = self._build_body(az_json) + response = self.post(self.AZS_PATH, body, status=400) + err_msg = ("Invalid input for field/attribute name. Value: " + "'None'. Mandatory field missing.") + self.assertEqual(err_msg, response.json.get('faultstring')) + + def test_create_with_long_name(self): + az_json = {'name': 'n' * 256, + 'availability_zone_profile_id': self.azp.get('id')} + body = self._build_body(az_json) + self.post(self.AZS_PATH, body, status=400) + + def test_create_with_long_description(self): + az_json = {'name': 'test-az', + 'description': 'n' * 256, + 'availability_zone_profile_id': self.azp.get('id')} + body = self._build_body(az_json) + self.post(self.AZS_PATH, body, status=400) + + def test_create_with_missing_availability_zone_profile(self): + az_json = {'name': 'xyz'} + body = self._build_body(az_json) + response = self.post(self.AZS_PATH, body, status=400) + err_msg = ( + "Invalid input for field/attribute availability_zone_profile_id. " + "Value: 'None'. Mandatory field missing.") + self.assertEqual(err_msg, response.json.get('faultstring')) + + def test_create_with_bad_availability_zone_profile(self): + az_json = {'name': 'xyz', 'availability_zone_profile_id': 'bogus'} + body = self._build_body(az_json) + response = self.post(self.AZS_PATH, body, status=400) + err_msg = ( + "Invalid input for field/attribute availability_zone_profile_id. " + "Value: 'bogus'. Value should be UUID format") + self.assertEqual(err_msg, response.json.get('faultstring')) + + def test_create_duplicate_names(self): + self.create_availability_zone( + 'name', 'description', self.azp.get('id'), True) + az_json = {'name': 'name', + 'availability_zone_profile_id': self.azp.get('id')} + body = self._build_body(az_json) + response = self.post(self.AZS_PATH, body, status=409) + err_msg = "A availability zone of name already exists." + self.assertEqual(err_msg, response.json.get('faultstring')) + + def test_create_authorized(self): + az_json = {'name': 'test1', + 'availability_zone_profile_id': self.azp.get('id')} + body = self._build_body(az_json) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + project_id = uuidutils.generate_uuid() + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': True, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.post(self.AZS_PATH, body) + api_az = response.json.get(self.root_tag) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self._assert_request_matches_response(az_json, api_az) + + def test_create_not_authorized(self): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + az_json = {'name': 'name', + 'availability_zone_profile_id': self.azp.get('id')} + body = self._build_body(az_json) + response = self.post(self.AZS_PATH, body, status=403) + api_az = response.json + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, api_az) + + def test_create_db_failure(self): + az_json = {'name': 'test1', + 'availability_zone_profile_id': self.azp.get('id')} + body = self._build_body(az_json) + with mock.patch("octavia.db.repositories.AvailabilityZoneRepository." + "create") as mock_create: + mock_create.side_effect = Exception + self.post(self.AZS_PATH, body, status=500) + + def test_get(self): + az = self.create_availability_zone( + 'name', 'description', self.azp.get('id'), True) + response = self.get( + self.AZ_PATH.format( + az_name=az.get('name'))).json.get(self.root_tag) + self.assertEqual('name', response.get('name')) + self.assertEqual('description', response.get('description')) + self.assertEqual(az.get('name'), response.get('name')) + self.assertEqual(self.azp.get('id'), + response.get('availability_zone_profile_id')) + self.assertTrue(response.get('enabled')) + + def test_get_one_fields_filter(self): + az = self.create_availability_zone( + 'name', 'description', self.azp.get('id'), True) + response = self.get( + self.AZ_PATH.format(az_name=az.get('name')), params={ + 'fields': ['name', 'availability_zone_profile_id']} + ).json.get(self.root_tag) + self.assertEqual(az.get('name'), response.get('name')) + self.assertEqual(self.azp.get('id'), + response.get('availability_zone_profile_id')) + self.assertIn('availability_zone_profile_id', response) + self.assertNotIn('description', response) + self.assertNotIn('enabled', response) + + def test_get_one_deleted_name(self): + response = self.get( + self.AZ_PATH.format(az_name=constants.NIL_UUID), status=404) + self.assertEqual( + f'Availability Zone {constants.NIL_UUID} not found.', + response.json.get('faultstring')) + + def test_get_authorized(self): + az = self.create_availability_zone( + 'name', 'description', self.azp.get('id'), True) + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + project_id = uuidutils.generate_uuid() + test_context = octavia.common.context.RequestContext( + project_id=project_id) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.get( + self.AZ_PATH.format( + az_name=az.get('name'))).json.get(self.root_tag) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual('name', response.get('name')) + self.assertEqual('description', response.get('description')) + self.assertEqual(self.azp.get('id'), + response.get('availability_zone_profile_id')) + self.assertTrue(response.get('enabled')) + + def test_get_not_authorized(self): + az = self.create_availability_zone( + 'name', 'description', self.azp.get('id'), True) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + response = self.get(self.AZ_PATH.format( + az_name=az.get('name')), status=403).json + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, response) + + def test_get_all(self): + self.create_availability_zone( + 'name1', 'description', self.azp.get('id'), True) + ref_az_1 = { + 'description': 'description', 'enabled': True, + 'availability_zone_profile_id': self.azp.get('id'), + 'name': 'name1'} + self.create_availability_zone( + 'name2', 'description', self.azp.get('id'), True) + ref_az_2 = { + 'description': 'description', 'enabled': True, + 'availability_zone_profile_id': self.azp.get('id'), + 'name': 'name2'} + response = self.get(self.AZS_PATH) + api_list = response.json.get(self.root_tag_list) + self.assertEqual(2, len(api_list)) + self.assertIn(ref_az_1, api_list) + self.assertIn(ref_az_2, api_list) + + def test_get_all_fields_filter(self): + self.create_availability_zone( + 'name1', 'description', self.azp.get('id'), True) + self.create_availability_zone( + 'name2', 'description', self.azp.get('id'), True) + response = self.get(self.AZS_PATH, params={ + 'fields': ['id', 'name']}) + api_list = response.json.get(self.root_tag_list) + self.assertEqual(2, len(api_list)) + for az in api_list: + self.assertIn('name', az) + self.assertNotIn('availability_zone_profile_id', az) + self.assertNotIn('description', az) + self.assertNotIn('enabled', az) + + def test_get_all_authorized(self): + self.create_availability_zone( + 'name1', 'description', self.azp.get('id'), True) + self.create_availability_zone( + 'name2', 'description', self.azp.get('id'), True) + response = self.get(self.AZS_PATH) + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + project_id = uuidutils.generate_uuid() + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + api_list = response.json.get(self.root_tag_list) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(2, len(api_list)) + + def test_get_all_not_authorized(self): + self.create_availability_zone( + 'name1', 'description', self.azp.get('id'), True) + self.create_availability_zone( + 'name2', 'description', self.azp.get('id'), True) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + response = self.get(self.AZS_PATH, status=403).json + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, response) + + def test_update(self): + az_json = {'name': 'Fancy_Availability_Zone', + 'description': 'A great az. Pick me!', + 'availability_zone_profile_id': self.azp.get('id')} + body = self._build_body(az_json) + response = self.post(self.AZS_PATH, body) + api_az = response.json.get(self.root_tag) + availability_zone_name = api_az.get('name') + + az_json = {'description': 'An even better az. Pick me!', + 'enabled': False} + body = self._build_body(az_json) + self.put(self.AZ_PATH.format(az_name=availability_zone_name), body) + + updated_az = self.get(self.AZ_PATH.format( + az_name=availability_zone_name)).json.get(self.root_tag) + self.assertEqual('An even better az. Pick me!', + updated_az.get('description')) + self.assertEqual(availability_zone_name, updated_az.get('name')) + self.assertEqual(self.azp.get('id'), + updated_az.get('availability_zone_profile_id')) + self.assertFalse(updated_az.get('enabled')) + + def test_update_deleted_name(self): + update_json = {'description': 'fake_desc'} + body = self._build_body(update_json) + response = self.put( + self.AZ_PATH.format(az_name=constants.NIL_UUID), body, + status=404) + self.assertEqual( + f'Availability Zone {constants.NIL_UUID} not found.', + response.json.get('faultstring')) + + def test_update_none(self): + az_json = {'name': 'Fancy_Availability_Zone', + 'description': 'A great az. Pick me!', + 'availability_zone_profile_id': self.azp.get('id')} + body = self._build_body(az_json) + response = self.post(self.AZS_PATH, body) + api_az = response.json.get(self.root_tag) + availability_zone_name = api_az.get('name') + + az_json = {} + body = self._build_body(az_json) + self.put(self.AZ_PATH.format(az_name=availability_zone_name), body) + + updated_az = self.get(self.AZ_PATH.format( + az_name=availability_zone_name)).json.get(self.root_tag) + self.assertEqual('Fancy_Availability_Zone', updated_az.get('name')) + self.assertEqual('A great az. Pick me!', + updated_az.get('description')) + self.assertEqual(availability_zone_name, updated_az.get('name')) + self.assertEqual(self.azp.get('id'), + updated_az.get('availability_zone_profile_id')) + self.assertTrue(updated_az.get('enabled')) + + def test_update_availability_zone_profile_id(self): + az_json = {'name': 'Fancy_Availability_Zone', + 'description': 'A great az. Pick me!', + 'availability_zone_profile_id': self.azp.get('id')} + body = self._build_body(az_json) + response = self.post(self.AZS_PATH, body) + api_az = response.json.get(self.root_tag) + availability_zone_name = api_az.get('name') + + az_json = {'availability_zone_profile_id': uuidutils.generate_uuid()} + body = self._build_body(az_json) + self.put(self.AZ_PATH.format(az_name=availability_zone_name), + body, status=400) + updated_az = self.get(self.AZ_PATH.format( + az_name=availability_zone_name)).json.get(self.root_tag) + self.assertEqual(self.azp.get('id'), + updated_az.get('availability_zone_profile_id')) + + def test_update_authorized(self): + az_json = {'name': 'Fancy_Availability_Zone', + 'description': 'A great az. Pick me!', + 'availability_zone_profile_id': self.azp.get('id')} + body = self._build_body(az_json) + response = self.post(self.AZS_PATH, body) + api_az = response.json.get(self.root_tag) + availability_zone_name = api_az.get('name') + + az_json = {'description': 'An even better az. Pick me!', + 'enabled': False} + body = self._build_body(az_json) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + project_id = uuidutils.generate_uuid() + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': True, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + self.put(self.AZ_PATH.format(az_name=availability_zone_name), + body) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + updated_az = self.get(self.AZ_PATH.format( + az_name=availability_zone_name)).json.get(self.root_tag) + self.assertEqual('An even better az. Pick me!', + updated_az.get('description')) + self.assertEqual(availability_zone_name, updated_az.get('name')) + self.assertEqual(self.azp.get('id'), + updated_az.get('availability_zone_profile_id')) + self.assertFalse(updated_az.get('enabled')) + + def test_update_not_authorized(self): + az_json = {'name': 'Fancy_Availability_Zone', + 'description': 'A great az. Pick me!', + 'availability_zone_profile_id': self.azp.get('id')} + body = self._build_body(az_json) + response = self.post(self.AZS_PATH, body) + api_az = response.json.get(self.root_tag) + availability_zone_name = api_az.get('name') + + az_json = {'description': 'An even better az. Pick me!', + 'enabled': False} + body = self._build_body(az_json) + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + self.put(self.AZ_PATH.format(az_name=availability_zone_name), + body, status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + updated_az = self.get(self.AZ_PATH.format( + az_name=availability_zone_name)).json.get(self.root_tag) + self.assertEqual('A great az. Pick me!', + updated_az.get('description')) + self.assertEqual(availability_zone_name, updated_az.get('name')) + self.assertEqual(self.azp.get('id'), + updated_az.get('availability_zone_profile_id')) + self.assertTrue(updated_az.get('enabled')) + + @mock.patch('octavia.db.repositories.AvailabilityZoneRepository.update') + def test_update_exception(self, mock_update): + mock_update.side_effect = [exceptions.OctaviaException()] + update_json = {'description': 'Some availability zone.'} + body = self._build_body(update_json) + response = self.put(self.AZ_PATH.format(az_name='bogus'), body, + status=500) + self.assertEqual('An unknown exception occurred.', + response.json.get('faultstring')) + + def test_delete(self): + az = self.create_availability_zone( + 'name1', 'description', self.azp.get('id'), True) + self.delete(self.AZ_PATH.format(az_name=az.get('name'))) + response = self.get(self.AZ_PATH.format(az_name=az.get('name')), + status=404) + err_msg = f"Availability Zone {az.get('name')} not found." + self.assertEqual(err_msg, response.json.get('faultstring')) + + def test_delete_nonexistent_name(self): + response = self.delete( + self.AZ_PATH.format(az_name='bogus_name'), status=404) + self.assertEqual('Availability Zone bogus_name not found.', + response.json.get('faultstring')) + + def test_delete_deleted_name(self): + response = self.delete( + self.AZ_PATH.format(az_name=constants.NIL_UUID), status=404) + self.assertEqual( + f'Availability Zone {constants.NIL_UUID} not found.', + response.json.get('faultstring')) + + def test_delete_authorized(self): + az = self.create_availability_zone( + 'name1', 'description', self.azp.get('id'), True) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + project_id = uuidutils.generate_uuid() + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': True, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + self.delete( + self.AZ_PATH.format(az_name=az.get('name'))) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + response = self.get(self.AZ_PATH.format(az_name=az.get('name')), + status=404) + err_msg = f"Availability Zone {az.get('name')} not found." + self.assertEqual(err_msg, response.json.get('faultstring')) + + def test_delete_not_authorized(self): + az = self.create_availability_zone( + 'name1', 'description', self.azp.get('id'), True) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + + response = self.delete(self.AZ_PATH.format(az_name=az.get('name')), + status=403) + api_az = response.json + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, api_az) + + response = self.get(self.AZ_PATH.format( + az_name=az.get('name'))).json.get(self.root_tag) + self.assertEqual('name1', response.get('name')) + + def test_delete_in_use(self): + az = self.create_availability_zone( + 'name1', 'description', self.azp.get('id'), True) + project_id = uuidutils.generate_uuid() + lb_id = uuidutils.generate_uuid() + self.create_load_balancer(lb_id, name='lb1', + project_id=project_id, + description='desc1', + availability_zone=az.get('name'), + admin_state_up=False) + self.delete(self.AZ_PATH.format(az_name=az.get('name')), + status=409) + response = self.get(self.AZ_PATH.format( + az_name=az.get('name'))).json.get(self.root_tag) + self.assertEqual('name1', response.get('name')) + + @mock.patch('octavia.db.repositories.AvailabilityZoneRepository.delete') + def test_delete_exception(self, mock_delete): + mock_delete.side_effect = [exceptions.OctaviaException()] + response = self.delete(self.AZ_PATH.format(az_name='bogus'), + status=500) + self.assertEqual('An unknown exception occurred.', + response.json.get('faultstring')) diff --git a/octavia/tests/functional/api/v2/test_flavor_profiles.py b/octavia/tests/functional/api/v2/test_flavor_profiles.py new file mode 100644 index 0000000000..e4edb4a0ef --- /dev/null +++ b/octavia/tests/functional/api/v2/test_flavor_profiles.py @@ -0,0 +1,569 @@ +# Copyright 2017 Walmart Stores Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_db import exception as odb_exceptions +from oslo_utils import uuidutils + +from octavia.common import constants +from octavia.tests.functional.api.v2 import base + + +class TestFlavorProfiles(base.BaseAPITest): + root_tag = 'flavorprofile' + root_tag_list = 'flavorprofiles' + root_tag_links = 'flavorprofile_links' + + def _assert_request_matches_response(self, req, resp, **optionals): + self.assertTrue(uuidutils.is_uuid_like(resp.get('id'))) + self.assertEqual(req.get('name'), resp.get('name')) + self.assertEqual(req.get(constants.PROVIDER_NAME), + resp.get(constants.PROVIDER_NAME)) + self.assertEqual(req.get(constants.FLAVOR_DATA), + resp.get(constants.FLAVOR_DATA)) + + def test_empty_list(self): + response = self.get(self.FPS_PATH) + api_list = response.json.get(self.root_tag_list) + self.assertEqual([], api_list) + + def test_create(self): + fp_json = {'name': 'test1', constants.PROVIDER_NAME: 'noop_driver', + constants.FLAVOR_DATA: '{"hello": "world"}'} + body = self._build_body(fp_json) + response = self.post(self.FPS_PATH, body) + api_fp = response.json.get(self.root_tag) + self._assert_request_matches_response(fp_json, api_fp) + + def test_create_with_missing_name(self): + fp_json = {constants.PROVIDER_NAME: 'pr1', + constants.FLAVOR_DATA: '{"x": "y"}'} + body = self._build_body(fp_json) + response = self.post(self.FPS_PATH, body, status=400) + err_msg = ("Invalid input for field/attribute name. Value: " + "'None'. Mandatory field missing.") + self.assertEqual(err_msg, response.json.get('faultstring')) + + def test_create_with_missing_provider(self): + fp_json = {'name': 'xyz', constants.FLAVOR_DATA: '{"x": "y"}'} + body = self._build_body(fp_json) + response = self.post(self.FPS_PATH, body, status=400) + err_msg = ("Invalid input for field/attribute provider_name. " + "Value: 'None'. Mandatory field missing.") + self.assertEqual(err_msg, response.json.get('faultstring')) + + def test_create_with_missing_flavor_data(self): + fp_json = {'name': 'xyz', constants.PROVIDER_NAME: 'pr1'} + body = self._build_body(fp_json) + response = self.post(self.FPS_PATH, body, status=400) + err_msg = ("Invalid input for field/attribute flavor_data. " + "Value: 'None'. Mandatory field missing.") + self.assertEqual(err_msg, response.json.get('faultstring')) + + def test_create_with_empty_flavor_data(self): + fp_json = {'name': 'test1', constants.PROVIDER_NAME: 'noop_driver', + constants.FLAVOR_DATA: '{}'} + body = self._build_body(fp_json) + response = self.post(self.FPS_PATH, body) + api_fp = response.json.get(self.root_tag) + self._assert_request_matches_response(fp_json, api_fp) + + def test_create_with_long_name(self): + fp_json = {'name': 'n' * 256, constants.PROVIDER_NAME: 'test1', + constants.FLAVOR_DATA: '{"hello": "world"}'} + body = self._build_body(fp_json) + self.post(self.FPS_PATH, body, status=400) + + def test_create_with_long_provider(self): + fp_json = {'name': 'name1', constants.PROVIDER_NAME: 'n' * 256, + constants.FLAVOR_DATA: '{"hello": "world"}'} + body = self._build_body(fp_json) + self.post(self.FPS_PATH, body, status=400) + + def test_create_with_long_flavor_data(self): + fp_json = {'name': 'name1', constants.PROVIDER_NAME: 'amp', + constants.FLAVOR_DATA: 'n' * 4097} + body = self._build_body(fp_json) + self.post(self.FPS_PATH, body, status=400) + + def test_create_authorized(self): + fp_json = {'name': 'test1', constants.PROVIDER_NAME: 'noop_driver', + constants.FLAVOR_DATA: '{"hello": "world"}'} + body = self._build_body(fp_json) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + project_id = uuidutils.generate_uuid() + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': True, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.post(self.FPS_PATH, body) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + api_fp = response.json.get(self.root_tag) + self._assert_request_matches_response(fp_json, api_fp) + + def test_create_not_authorized(self): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + fp_json = {'name': 'name', + constants.PROVIDER_NAME: 'xyz', + constants.FLAVOR_DATA: '{"x": "y"}'} + body = self._build_body(fp_json) + response = self.post(self.FPS_PATH, body, status=403) + api_fp = response.json + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, api_fp) + + def test_create_db_failure(self): + fp_json = {'name': 'test1', constants.PROVIDER_NAME: 'noop_driver', + constants.FLAVOR_DATA: '{"hello": "world"}'} + body = self._build_body(fp_json) + with mock.patch("octavia.db.repositories.FlavorProfileRepository." + "create") as mock_create: + mock_create.side_effect = Exception + self.post(self.FPS_PATH, body, status=500) + + mock_create.side_effect = odb_exceptions.DBDuplicateEntry + self.post(self.FPS_PATH, body, status=409) + + def test_create_with_invalid_json(self): + fp_json = {'name': 'test1', constants.PROVIDER_NAME: 'noop_driver', + constants.FLAVOR_DATA: '{hello: "world"}'} + body = self._build_body(fp_json) + self.post(self.FPS_PATH, body, status=400) + + def test_get(self): + fp = self.create_flavor_profile('name', 'noop_driver', + '{"x": "y"}') + self.assertTrue(uuidutils.is_uuid_like(fp.get('id'))) + response = self.get( + self.FP_PATH.format( + fp_id=fp.get('id'))).json.get(self.root_tag) + self.assertEqual('name', response.get('name')) + self.assertEqual(fp.get('id'), response.get('id')) + + def test_get_one_deleted_id(self): + response = self.get(self.FP_PATH.format(fp_id=constants.NIL_UUID), + status=404) + self.assertEqual('Flavor profile {} not found.'.format( + constants.NIL_UUID), response.json.get('faultstring')) + + def test_get_one_fields_filter(self): + fp = self.create_flavor_profile('name', 'noop_driver', + '{"x": "y"}') + self.assertTrue(uuidutils.is_uuid_like(fp.get('id'))) + response = self.get( + self.FP_PATH.format(fp_id=fp.get('id')), params={ + 'fields': ['id', constants.PROVIDER_NAME]} + ).json.get(self.root_tag) + self.assertEqual(fp.get('id'), response.get('id')) + self.assertIn('id', response) + self.assertIn(constants.PROVIDER_NAME, response) + self.assertNotIn('name', response) + self.assertNotIn(constants.FLAVOR_DATA, response) + + def test_get_authorized(self): + fp = self.create_flavor_profile('name', 'noop_driver', + '{"x": "y"}') + self.assertTrue(uuidutils.is_uuid_like(fp.get('id'))) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + project_id = uuidutils.generate_uuid() + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': True, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.get( + self.FP_PATH.format( + fp_id=fp.get('id'))).json.get(self.root_tag) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual('name', response.get('name')) + self.assertEqual(fp.get('id'), response.get('id')) + + def test_get_not_authorized(self): + fp = self.create_flavor_profile('name', 'noop_driver', + '{"x": "y"}') + self.assertTrue(uuidutils.is_uuid_like(fp.get('id'))) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + self.get(self.FP_PATH.format(fp_id=fp.get('id')), status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + def test_get_all(self): + fp1 = self.create_flavor_profile('test1', 'noop_driver', + '{"image": "ubuntu"}') + ref_fp_1 = {'flavor_data': '{"image": "ubuntu"}', + 'id': fp1.get('id'), 'name': 'test1', + constants.PROVIDER_NAME: 'noop_driver'} + self.assertTrue(uuidutils.is_uuid_like(fp1.get('id'))) + fp2 = self.create_flavor_profile('test2', 'noop_driver-alt', + '{"image": "ubuntu"}') + ref_fp_2 = {'flavor_data': '{"image": "ubuntu"}', + 'id': fp2.get('id'), 'name': 'test2', + constants.PROVIDER_NAME: 'noop_driver-alt'} + self.assertTrue(uuidutils.is_uuid_like(fp2.get('id'))) + + response = self.get(self.FPS_PATH) + api_list = response.json.get(self.root_tag_list) + self.assertEqual(2, len(api_list)) + self.assertIn(ref_fp_1, api_list) + self.assertIn(ref_fp_2, api_list) + + def test_get_all_fields_filter(self): + fp1 = self.create_flavor_profile('test1', 'noop_driver', + '{"image": "ubuntu"}') + self.assertTrue(uuidutils.is_uuid_like(fp1.get('id'))) + fp2 = self.create_flavor_profile('test2', 'noop_driver-alt', + '{"image": "ubuntu"}') + self.assertTrue(uuidutils.is_uuid_like(fp2.get('id'))) + + response = self.get(self.FPS_PATH, params={ + 'fields': ['id', 'name']}) + api_list = response.json.get(self.root_tag_list) + self.assertEqual(2, len(api_list)) + for profile in api_list: + self.assertIn('id', profile) + self.assertIn('name', profile) + self.assertNotIn(constants.PROVIDER_NAME, profile) + self.assertNotIn(constants.FLAVOR_DATA, profile) + + def test_get_all_authorized(self): + fp1 = self.create_flavor_profile('test1', 'noop_driver', + '{"image": "ubuntu"}') + self.assertTrue(uuidutils.is_uuid_like(fp1.get('id'))) + fp2 = self.create_flavor_profile('test2', 'noop_driver-alt', + '{"image": "ubuntu"}') + self.assertTrue(uuidutils.is_uuid_like(fp2.get('id'))) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + project_id = uuidutils.generate_uuid() + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': True, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.get(self.FPS_PATH) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + api_list = response.json.get(self.root_tag_list) + self.assertEqual(2, len(api_list)) + + def test_get_all_not_authorized(self): + fp1 = self.create_flavor_profile('test1', 'noop_driver', + '{"image": "ubuntu"}') + self.assertTrue(uuidutils.is_uuid_like(fp1.get('id'))) + fp2 = self.create_flavor_profile('test2', 'noop_driver-alt', + '{"image": "ubuntu"}') + self.assertTrue(uuidutils.is_uuid_like(fp2.get('id'))) + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + self.get(self.FPS_PATH, status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + def test_update(self): + fp = self.create_flavor_profile('test_profile', 'noop_driver', + '{"x": "y"}') + update_data = {'name': 'the_profile', + constants.PROVIDER_NAME: 'noop_driver-alt', + constants.FLAVOR_DATA: '{"hello": "world"}'} + body = self._build_body(update_data) + response = self.put(self.FP_PATH.format(fp_id=fp.get('id')), body) + response = self.get( + self.FP_PATH.format(fp_id=fp.get('id'))).json.get(self.root_tag) + self.assertEqual('the_profile', response.get('name')) + self.assertEqual('noop_driver-alt', + response.get(constants.PROVIDER_NAME)) + self.assertEqual('{"hello": "world"}', + response.get(constants.FLAVOR_DATA)) + + def test_update_deleted_id(self): + update_data = {'name': 'fake_profile'} + body = self._build_body(update_data) + response = self.put(self.FP_PATH.format(fp_id=constants.NIL_UUID), + body, status=404) + self.assertEqual('Flavor profile {} not found.'.format( + constants.NIL_UUID), response.json.get('faultstring')) + + def test_update_nothing(self): + fp = self.create_flavor_profile('test_profile', 'noop_driver', + '{"x": "y"}') + body = self._build_body({}) + response = self.put(self.FP_PATH.format(fp_id=fp.get('id')), body) + response = self.get( + self.FP_PATH.format(fp_id=fp.get('id'))).json.get(self.root_tag) + self.assertEqual('test_profile', response.get('name')) + self.assertEqual('noop_driver', response.get(constants.PROVIDER_NAME)) + self.assertEqual('{"x": "y"}', + response.get(constants.FLAVOR_DATA)) + + def test_update_name_none(self): + self._test_update_param_none(constants.NAME) + + def test_update_provider_name_none(self): + self._test_update_param_none(constants.PROVIDER_NAME) + + def test_update_flavor_data_none(self): + self._test_update_param_none(constants.FLAVOR_DATA) + + def _test_update_param_none(self, param_name): + fp = self.create_flavor_profile('test_profile', 'noop_driver', + '{"x": "y"}') + expect_error_msg = f"None is not a valid option for {param_name}" + body = self._build_body({param_name: None}) + response = self.put(self.FP_PATH.format(fp_id=fp.get('id')), body, + status=400) + self.assertEqual(expect_error_msg, response.json['faultstring']) + + def test_update_no_flavor_data(self): + fp = self.create_flavor_profile('test_profile', 'noop_driver', + '{"x": "y"}') + update_data = {'name': 'the_profile', + constants.PROVIDER_NAME: 'noop_driver-alt'} + body = self._build_body(update_data) + response = self.put(self.FP_PATH.format(fp_id=fp.get('id')), body) + response = self.get( + self.FP_PATH.format(fp_id=fp.get('id'))).json.get(self.root_tag) + self.assertEqual('the_profile', response.get('name')) + self.assertEqual('noop_driver-alt', + response.get(constants.PROVIDER_NAME)) + self.assertEqual('{"x": "y"}', response.get(constants.FLAVOR_DATA)) + + def test_update_authorized(self): + fp = self.create_flavor_profile('test_profile', 'noop_driver', + '{"x": "y"}') + update_data = {'name': 'the_profile', + constants.PROVIDER_NAME: 'noop_driver-alt', + constants.FLAVOR_DATA: '{"hello": "world"}'} + body = self._build_body(update_data) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + project_id = uuidutils.generate_uuid() + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': True, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.put(self.FP_PATH.format(fp_id=fp.get('id')), + body) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + response = self.get( + self.FP_PATH.format(fp_id=fp.get('id'))).json.get(self.root_tag) + self.assertEqual('the_profile', response.get('name')) + self.assertEqual('noop_driver-alt', + response.get(constants.PROVIDER_NAME)) + self.assertEqual('{"hello": "world"}', + response.get(constants.FLAVOR_DATA)) + + def test_update_not_authorized(self): + fp = self.create_flavor_profile('test_profile', 'noop_driver', + '{"x": "y"}') + update_data = {'name': 'the_profile', constants.PROVIDER_NAME: 'amp', + constants.FLAVOR_DATA: '{"hello": "world"}'} + body = self._build_body(update_data) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + response = self.put(self.FP_PATH.format(fp_id=fp.get('id')), + body, status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + response = self.get( + self.FP_PATH.format(fp_id=fp.get('id'))).json.get(self.root_tag) + self.assertEqual('test_profile', response.get('name')) + self.assertEqual('noop_driver', response.get(constants.PROVIDER_NAME)) + self.assertEqual('{"x": "y"}', + response.get(constants.FLAVOR_DATA)) + + def test_update_in_use(self): + fp = self.create_flavor_profile('test_profile', 'noop_driver', + '{"x": "y"}') + self.create_flavor('name1', 'description', fp.get('id'), True) + + # Test updating provider while in use is not allowed + update_data = {'name': 'the_profile', + constants.PROVIDER_NAME: 'noop_driver-alt'} + body = self._build_body(update_data) + response = self.put(self.FP_PATH.format(fp_id=fp.get('id')), body, + status=409) + err_msg = (f"Flavor profile {fp.get('id')} is in use and cannot be " + f"modified.") + self.assertEqual(err_msg, response.json.get('faultstring')) + response = self.get( + self.FP_PATH.format(fp_id=fp.get('id'))).json.get(self.root_tag) + self.assertEqual('test_profile', response.get('name')) + self.assertEqual('noop_driver', response.get(constants.PROVIDER_NAME)) + self.assertEqual('{"x": "y"}', response.get(constants.FLAVOR_DATA)) + + # Test updating flavor data while in use is not allowed + update_data = {'name': 'the_profile', + constants.FLAVOR_DATA: '{"hello": "world"}'} + body = self._build_body(update_data) + response = self.put(self.FP_PATH.format(fp_id=fp.get('id')), body, + status=409) + err_msg = (f"Flavor profile {fp.get('id')} is in use and cannot " + f"be modified.") + self.assertEqual(err_msg, response.json.get('faultstring')) + response = self.get( + self.FP_PATH.format(fp_id=fp.get('id'))).json.get(self.root_tag) + self.assertEqual('test_profile', response.get('name')) + self.assertEqual('noop_driver', response.get(constants.PROVIDER_NAME)) + self.assertEqual('{"x": "y"}', response.get(constants.FLAVOR_DATA)) + + # Test that you can still update the name when in use + update_data = {'name': 'the_profile'} + body = self._build_body(update_data) + response = self.put(self.FP_PATH.format(fp_id=fp.get('id')), body) + response = self.get( + self.FP_PATH.format(fp_id=fp.get('id'))).json.get(self.root_tag) + self.assertEqual('the_profile', response.get('name')) + self.assertEqual('noop_driver', response.get(constants.PROVIDER_NAME)) + self.assertEqual('{"x": "y"}', response.get(constants.FLAVOR_DATA)) + + def test_delete(self): + fp = self.create_flavor_profile('test1', 'noop_driver', + '{"image": "ubuntu"}') + self.assertTrue(uuidutils.is_uuid_like(fp.get('id'))) + self.delete(self.FP_PATH.format(fp_id=fp.get('id'))) + response = self.get(self.FP_PATH.format( + fp_id=fp.get('id')), status=404) + err_msg = f"Flavor Profile {fp.get('id')} not found." + self.assertEqual(err_msg, response.json.get('faultstring')) + + def test_delete_deleted_id(self): + response = self.delete(self.FP_PATH.format(fp_id=constants.NIL_UUID), + status=404) + self.assertEqual('Flavor profile {} not found.'.format( + constants.NIL_UUID), response.json.get('faultstring')) + + def test_delete_nonexistent_id(self): + response = self.delete(self.FP_PATH.format(fp_id='bogus_id'), + status=404) + self.assertEqual('Flavor profile bogus_id not found.', + response.json.get('faultstring')) + + def test_delete_authorized(self): + fp = self.create_flavor_profile('test1', 'noop_driver', + '{"image": "ubuntu"}') + self.assertTrue(uuidutils.is_uuid_like(fp.get('id'))) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + project_id = uuidutils.generate_uuid() + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': True, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + self.delete(self.FP_PATH.format(fp_id=fp.get('id'))) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + response = self.get(self.FP_PATH.format( + fp_id=fp.get('id')), status=404) + err_msg = f"Flavor Profile {fp.get('id')} not found." + self.assertEqual(err_msg, response.json.get('faultstring')) + + def test_delete_not_authorized(self): + fp = self.create_flavor_profile('test1', 'noop_driver', + '{"image": "ubuntu"}') + self.assertTrue(uuidutils.is_uuid_like(fp.get('id'))) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + + response = self.delete(self.FP_PATH.format( + fp_id=fp.get('id')), status=403) + api_fp = response.json + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, api_fp) + response = self.get( + self.FP_PATH.format(fp_id=fp.get('id'))).json.get(self.root_tag) + self.assertEqual('test1', response.get('name')) + + def test_delete_in_use(self): + fp = self.create_flavor_profile('test1', 'noop_driver', + '{"image": "ubuntu"}') + self.create_flavor('name1', 'description', fp.get('id'), True) + response = self.delete(self.FP_PATH.format(fp_id=fp.get('id')), + status=409) + err_msg = (f"Flavor profile {fp.get('id')} is in use and cannot be " + f"modified.") + self.assertEqual(err_msg, response.json.get('faultstring')) + response = self.get( + self.FP_PATH.format(fp_id=fp.get('id'))).json.get(self.root_tag) + self.assertEqual('test1', response.get('name')) diff --git a/octavia/tests/functional/api/v2/test_flavors.py b/octavia/tests/functional/api/v2/test_flavors.py new file mode 100644 index 0000000000..9eee27f718 --- /dev/null +++ b/octavia/tests/functional/api/v2/test_flavors.py @@ -0,0 +1,597 @@ +# Copyright 2017 Walmart Stores Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from oslo_utils import uuidutils + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture + +from octavia.common import constants +import octavia.common.context +from octavia.common import exceptions +from octavia.tests.functional.api.v2 import base + + +class TestFlavors(base.BaseAPITest): + root_tag = 'flavor' + root_tag_list = 'flavors' + root_tag_links = 'flavors_links' + + def setUp(self): + super().setUp() + self.fp = self.create_flavor_profile('test1', 'noop_driver', + '{"image": "ubuntu"}') + + def _assert_request_matches_response(self, req, resp, **optionals): + self.assertTrue(uuidutils.is_uuid_like(resp.get('id'))) + req_description = req.get('description') + self.assertEqual(req.get('name'), resp.get('name')) + if not req_description: + self.assertEqual('', resp.get('description')) + else: + self.assertEqual(req.get('description'), resp.get('description')) + self.assertEqual(req.get('flavor_profile_id'), + resp.get('flavor_profile_id')) + self.assertEqual(req.get('enabled', True), + resp.get('enabled')) + + def test_empty_list(self): + response = self.get(self.FLAVORS_PATH) + api_list = response.json.get(self.root_tag_list) + self.assertEqual([], api_list) + + def test_create(self): + flavor_json = {'name': 'test1', + 'flavor_profile_id': self.fp.get('id')} + body = self._build_body(flavor_json) + response = self.post(self.FLAVORS_PATH, body) + api_flavor = response.json.get(self.root_tag) + self._assert_request_matches_response(flavor_json, api_flavor) + + def test_create_with_missing_name(self): + flavor_json = {'flavor_profile_id': self.fp.get('id')} + body = self._build_body(flavor_json) + response = self.post(self.FLAVORS_PATH, body, status=400) + err_msg = ("Invalid input for field/attribute name. Value: " + "'None'. Mandatory field missing.") + self.assertEqual(err_msg, response.json.get('faultstring')) + + def test_create_with_long_name(self): + flavor_json = {'name': 'n' * 256, + 'flavor_profile_id': self.fp.get('id')} + body = self._build_body(flavor_json) + self.post(self.FLAVORS_PATH, body, status=400) + + def test_create_with_long_description(self): + flavor_json = {'name': 'test-flavor', + 'description': 'n' * 256, + 'flavor_profile_id': self.fp.get('id')} + body = self._build_body(flavor_json) + self.post(self.FLAVORS_PATH, body, status=400) + + def test_create_with_missing_flavor_profile(self): + flavor_json = {'name': 'xyz'} + body = self._build_body(flavor_json) + response = self.post(self.FLAVORS_PATH, body, status=400) + err_msg = ("Invalid input for field/attribute flavor_profile_id. " + "Value: 'None'. Mandatory field missing.") + self.assertEqual(err_msg, response.json.get('faultstring')) + + def test_create_with_bad_flavor_profile(self): + flavor_json = {'name': 'xyz', 'flavor_profile_id': 'bogus'} + body = self._build_body(flavor_json) + response = self.post(self.FLAVORS_PATH, body, status=400) + err_msg = ("Invalid input for field/attribute flavor_profile_id. " + "Value: 'bogus'. Value should be UUID format") + self.assertEqual(err_msg, response.json.get('faultstring')) + + def test_create_duplicate_names(self): + flavor1 = self.create_flavor('name', 'description', self.fp.get('id'), + True) + self.assertTrue(uuidutils.is_uuid_like(flavor1.get('id'))) + flavor_json = {'name': 'name', + 'flavor_profile_id': self.fp.get('id')} + body = self._build_body(flavor_json) + response = self.post(self.FLAVORS_PATH, body, status=409) + err_msg = "A flavor of name already exists." + self.assertEqual(err_msg, response.json.get('faultstring')) + + def test_create_authorized(self): + flavor_json = {'name': 'test1', + 'flavor_profile_id': self.fp.get('id')} + body = self._build_body(flavor_json) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + project_id = uuidutils.generate_uuid() + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': True, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.post(self.FLAVORS_PATH, body) + api_flavor = response.json.get(self.root_tag) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self._assert_request_matches_response(flavor_json, api_flavor) + + def test_create_not_authorized(self): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + flavor_json = {'name': 'name', + 'flavor_profile_id': self.fp.get('id')} + body = self._build_body(flavor_json) + response = self.post(self.FLAVORS_PATH, body, status=403) + api_flavor = response.json + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, api_flavor) + + def test_create_db_failure(self): + flavor_json = {'name': 'test1', + 'flavor_profile_id': self.fp.get('id')} + body = self._build_body(flavor_json) + with mock.patch("octavia.db.repositories.FlavorRepository." + "create") as mock_create: + mock_create.side_effect = Exception + self.post(self.FLAVORS_PATH, body, status=500) + + def test_get(self): + flavor = self.create_flavor('name', 'description', self.fp.get('id'), + True) + self.assertTrue(uuidutils.is_uuid_like(flavor.get('id'))) + response = self.get( + self.FLAVOR_PATH.format( + flavor_id=flavor.get('id'))).json.get(self.root_tag) + self.assertEqual('name', response.get('name')) + self.assertEqual('description', response.get('description')) + self.assertEqual(flavor.get('id'), response.get('id')) + self.assertEqual(self.fp.get('id'), response.get('flavor_profile_id')) + self.assertTrue(response.get('enabled')) + + def test_get_one_fields_filter(self): + flavor = self.create_flavor('name', 'description', self.fp.get('id'), + True) + self.assertTrue(uuidutils.is_uuid_like(flavor.get('id'))) + response = self.get( + self.FLAVOR_PATH.format(flavor_id=flavor.get('id')), params={ + 'fields': ['id', 'flavor_profile_id']}).json.get(self.root_tag) + self.assertEqual(flavor.get('id'), response.get('id')) + self.assertEqual(self.fp.get('id'), response.get('flavor_profile_id')) + self.assertIn('id', response) + self.assertIn('flavor_profile_id', response) + self.assertNotIn('name', response) + self.assertNotIn('description', response) + self.assertNotIn('enabled', response) + + def test_get_one_deleted_id(self): + response = self.get( + self.FLAVOR_PATH.format(flavor_id=constants.NIL_UUID), status=404) + self.assertEqual(f'Flavor {constants.NIL_UUID} not found.', + response.json.get('faultstring')) + + def test_get_authorized(self): + flavor = self.create_flavor('name', 'description', self.fp.get('id'), + True) + self.assertTrue(uuidutils.is_uuid_like(flavor.get('id'))) + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + project_id = uuidutils.generate_uuid() + test_context = octavia.common.context.RequestContext( + project_id=project_id) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.get( + self.FLAVOR_PATH.format( + flavor_id=flavor.get('id'))).json.get(self.root_tag) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual('name', response.get('name')) + self.assertEqual('description', response.get('description')) + self.assertEqual(flavor.get('id'), response.get('id')) + self.assertEqual(self.fp.get('id'), response.get('flavor_profile_id')) + self.assertTrue(response.get('enabled')) + + def test_get_not_authorized(self): + flavor = self.create_flavor('name', 'description', self.fp.get('id'), + True) + self.assertTrue(uuidutils.is_uuid_like(flavor.get('id'))) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + response = self.get(self.FLAVOR_PATH.format( + flavor_id=flavor.get('id')), status=403).json + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, response) + + def test_get_all(self): + flavor1 = self.create_flavor('name1', 'description', self.fp.get('id'), + True) + self.assertTrue(uuidutils.is_uuid_like(flavor1.get('id'))) + ref_flavor_1 = { + 'description': 'description', 'enabled': True, + 'flavor_profile_id': self.fp.get('id'), + 'id': flavor1.get('id'), + 'name': 'name1'} + flavor2 = self.create_flavor('name2', 'description', self.fp.get('id'), + True) + self.assertTrue(uuidutils.is_uuid_like(flavor2.get('id'))) + ref_flavor_2 = { + 'description': 'description', 'enabled': True, + 'flavor_profile_id': self.fp.get('id'), + 'id': flavor2.get('id'), + 'name': 'name2'} + response = self.get(self.FLAVORS_PATH) + api_list = response.json.get(self.root_tag_list) + self.assertEqual(2, len(api_list)) + self.assertIn(ref_flavor_1, api_list) + self.assertIn(ref_flavor_2, api_list) + + def test_get_all_fields_filter(self): + flavor1 = self.create_flavor('name1', 'description', self.fp.get('id'), + True) + self.assertTrue(uuidutils.is_uuid_like(flavor1.get('id'))) + flavor2 = self.create_flavor('name2', 'description', self.fp.get('id'), + True) + self.assertTrue(uuidutils.is_uuid_like(flavor2.get('id'))) + response = self.get(self.FLAVORS_PATH, params={ + 'fields': ['id', 'name']}) + api_list = response.json.get(self.root_tag_list) + self.assertEqual(2, len(api_list)) + for flavor in api_list: + self.assertIn('id', flavor) + self.assertIn('name', flavor) + self.assertNotIn('flavor_profile_id', flavor) + self.assertNotIn('description', flavor) + self.assertNotIn('enabled', flavor) + + def test_get_all_authorized(self): + flavor1 = self.create_flavor('name1', 'description', self.fp.get('id'), + True) + self.assertTrue(uuidutils.is_uuid_like(flavor1.get('id'))) + flavor2 = self.create_flavor('name2', 'description', self.fp.get('id'), + True) + self.assertTrue(uuidutils.is_uuid_like(flavor2.get('id'))) + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + project_id = uuidutils.generate_uuid() + test_context = octavia.common.context.RequestContext( + project_id=project_id) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.get(self.FLAVORS_PATH) + api_list = response.json.get(self.root_tag_list) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(2, len(api_list)) + + def test_get_all_not_authorized(self): + flavor1 = self.create_flavor('name1', 'description', self.fp.get('id'), + True) + self.assertTrue(uuidutils.is_uuid_like(flavor1.get('id'))) + flavor2 = self.create_flavor('name2', 'description', self.fp.get('id'), + True) + self.assertTrue(uuidutils.is_uuid_like(flavor2.get('id'))) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + response = self.get(self.FLAVORS_PATH, status=403).json + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, response) + + def test_update(self): + flavor_json = {'name': 'Fancy_Flavor', + 'description': 'A great flavor. Pick me!', + 'flavor_profile_id': self.fp.get('id')} + body = self._build_body(flavor_json) + response = self.post(self.FLAVORS_PATH, body) + api_flavor = response.json.get(self.root_tag) + flavor_id = api_flavor.get('id') + + flavor_json = {'name': 'Better_Flavor', + 'description': 'An even better flavor. Pick me!', + 'enabled': False} + body = self._build_body(flavor_json) + response = self.put(self.FLAVOR_PATH.format(flavor_id=flavor_id), body) + + updated_flavor = self.get(self.FLAVOR_PATH.format( + flavor_id=flavor_id)).json.get(self.root_tag) + self.assertEqual('Better_Flavor', updated_flavor.get('name')) + self.assertEqual('An even better flavor. Pick me!', + updated_flavor.get('description')) + self.assertEqual(flavor_id, updated_flavor.get('id')) + self.assertEqual(self.fp.get('id'), + updated_flavor.get('flavor_profile_id')) + self.assertFalse(updated_flavor.get('enabled')) + + def test_update_deleted_id(self): + update_json = {'name': 'fake_name'} + body = self._build_body(update_json) + response = self.put( + self.FLAVOR_PATH.format(flavor_id=constants.NIL_UUID), body, + status=404) + self.assertEqual(f'Flavor {constants.NIL_UUID} not found.', + response.json.get('faultstring')) + + def test_update_none(self): + flavor_json = {'name': 'Fancy_Flavor', + 'description': 'A great flavor. Pick me!', + 'flavor_profile_id': self.fp.get('id')} + body = self._build_body(flavor_json) + response = self.post(self.FLAVORS_PATH, body) + api_flavor = response.json.get(self.root_tag) + flavor_id = api_flavor.get('id') + + flavor_json = {} + body = self._build_body(flavor_json) + response = self.put(self.FLAVOR_PATH.format(flavor_id=flavor_id), body) + + updated_flavor = self.get(self.FLAVOR_PATH.format( + flavor_id=flavor_id)).json.get(self.root_tag) + self.assertEqual('Fancy_Flavor', updated_flavor.get('name')) + self.assertEqual('A great flavor. Pick me!', + updated_flavor.get('description')) + self.assertEqual(flavor_id, updated_flavor.get('id')) + self.assertEqual(self.fp.get('id'), + updated_flavor.get('flavor_profile_id')) + self.assertTrue(updated_flavor.get('enabled')) + + def test_update_flavor_profile_id(self): + flavor_json = {'name': 'Fancy_Flavor', + 'description': 'A great flavor. Pick me!', + 'flavor_profile_id': self.fp.get('id')} + body = self._build_body(flavor_json) + response = self.post(self.FLAVORS_PATH, body) + api_flavor = response.json.get(self.root_tag) + flavor_id = api_flavor.get('id') + + flavor_json = {'flavor_profile_id': uuidutils.generate_uuid()} + body = self._build_body(flavor_json) + response = self.put(self.FLAVOR_PATH.format(flavor_id=flavor_id), + body, status=400) + updated_flavor = self.get(self.FLAVOR_PATH.format( + flavor_id=flavor_id)).json.get(self.root_tag) + self.assertEqual(self.fp.get('id'), + updated_flavor.get('flavor_profile_id')) + + def test_update_authorized(self): + flavor_json = {'name': 'Fancy_Flavor', + 'description': 'A great flavor. Pick me!', + 'flavor_profile_id': self.fp.get('id')} + body = self._build_body(flavor_json) + response = self.post(self.FLAVORS_PATH, body) + api_flavor = response.json.get(self.root_tag) + flavor_id = api_flavor.get('id') + + flavor_json = {'name': 'Better_Flavor', + 'description': 'An even better flavor. Pick me!', + 'enabled': False} + body = self._build_body(flavor_json) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + project_id = uuidutils.generate_uuid() + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': True, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.put(self.FLAVOR_PATH.format( + flavor_id=flavor_id), body) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + updated_flavor = self.get(self.FLAVOR_PATH.format( + flavor_id=flavor_id)).json.get(self.root_tag) + self.assertEqual('Better_Flavor', updated_flavor.get('name')) + self.assertEqual('An even better flavor. Pick me!', + updated_flavor.get('description')) + self.assertEqual(flavor_id, updated_flavor.get('id')) + self.assertEqual(self.fp.get('id'), + updated_flavor.get('flavor_profile_id')) + self.assertFalse(updated_flavor.get('enabled')) + + def test_update_not_authorized(self): + flavor_json = {'name': 'Fancy_Flavor', + 'description': 'A great flavor. Pick me!', + 'flavor_profile_id': self.fp.get('id')} + body = self._build_body(flavor_json) + response = self.post(self.FLAVORS_PATH, body) + api_flavor = response.json.get(self.root_tag) + flavor_id = api_flavor.get('id') + + flavor_json = {'name': 'Better_Flavor', + 'description': 'An even better flavor. Pick me!', + 'enabled': False} + body = self._build_body(flavor_json) + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + response = self.put(self.FLAVOR_PATH.format(flavor_id=flavor_id), + body, status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + updated_flavor = self.get(self.FLAVOR_PATH.format( + flavor_id=flavor_id)).json.get(self.root_tag) + self.assertEqual('Fancy_Flavor', updated_flavor.get('name')) + self.assertEqual('A great flavor. Pick me!', + updated_flavor.get('description')) + self.assertEqual(flavor_id, updated_flavor.get('id')) + self.assertEqual(self.fp.get('id'), + updated_flavor.get('flavor_profile_id')) + self.assertTrue(updated_flavor.get('enabled')) + + @mock.patch('octavia.db.repositories.FlavorRepository.update') + def test_update_exception(self, mock_update): + mock_update.side_effect = [exceptions.OctaviaException()] + update_json = {'name': 'A_Flavor'} + body = self._build_body(update_json) + response = self.put(self.FLAVOR_PATH.format(flavor_id='bogus'), body, + status=500) + self.assertEqual('An unknown exception occurred.', + response.json.get('faultstring')) + + def test_delete(self): + flavor = self.create_flavor('name1', 'description', self.fp.get('id'), + True) + self.assertTrue(uuidutils.is_uuid_like(flavor.get('id'))) + self.delete(self.FLAVOR_PATH.format(flavor_id=flavor.get('id'))) + response = self.get(self.FLAVOR_PATH.format( + flavor_id=flavor.get('id')), status=404) + err_msg = f"Flavor {flavor.get('id')} not found." + self.assertEqual(err_msg, response.json.get('faultstring')) + + def test_delete_nonexistent_id(self): + response = self.delete( + self.FLAVOR_PATH.format(flavor_id='bogus_id'), status=404) + self.assertEqual('Flavor bogus_id not found.', + response.json.get('faultstring')) + + def test_delete_deleted_id(self): + response = self.delete( + self.FLAVOR_PATH.format(flavor_id=constants.NIL_UUID), status=404) + self.assertEqual(f'Flavor {constants.NIL_UUID} not found.', + response.json.get('faultstring')) + + def test_delete_authorized(self): + flavor = self.create_flavor('name1', 'description', self.fp.get('id'), + True) + self.assertTrue(uuidutils.is_uuid_like(flavor.get('id'))) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + project_id = uuidutils.generate_uuid() + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': True, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + self.delete( + self.FLAVOR_PATH.format(flavor_id=flavor.get('id'))) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + response = self.get(self.FLAVOR_PATH.format( + flavor_id=flavor.get('id')), status=404) + err_msg = f"Flavor {flavor.get('id')} not found." + self.assertEqual(err_msg, response.json.get('faultstring')) + + def test_delete_not_authorized(self): + flavor = self.create_flavor('name1', 'description', self.fp.get('id'), + True) + self.assertTrue(uuidutils.is_uuid_like(flavor.get('id'))) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + + response = self.delete(self.FLAVOR_PATH.format( + flavor_id=flavor.get('id')), status=403) + api_flavor = response.json + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, api_flavor) + + response = self.get(self.FLAVOR_PATH.format( + flavor_id=flavor.get('id'))).json.get(self.root_tag) + self.assertEqual('name1', response.get('name')) + + def test_delete_in_use(self): + flavor = self.create_flavor('name1', 'description', self.fp.get('id'), + True) + self.assertTrue(uuidutils.is_uuid_like(flavor.get('id'))) + project_id = uuidutils.generate_uuid() + lb_id = uuidutils.generate_uuid() + self.create_load_balancer(lb_id, name='lb1', + project_id=project_id, + description='desc1', + flavor_id=flavor.get('id'), + admin_state_up=False) + self.delete(self.FLAVOR_PATH.format(flavor_id=flavor.get('id')), + status=409) + response = self.get(self.FLAVOR_PATH.format( + flavor_id=flavor.get('id'))).json.get(self.root_tag) + self.assertEqual('name1', response.get('name')) + + @mock.patch('octavia.db.repositories.FlavorRepository.delete') + def test_delete_exception(self, mock_delete): + mock_delete.side_effect = [exceptions.OctaviaException()] + response = self.delete(self.FLAVOR_PATH.format(flavor_id='bogus'), + status=500) + self.assertEqual('An unknown exception occurred.', + response.json.get('faultstring')) diff --git a/octavia/tests/functional/api/v2/test_health_monitor.py b/octavia/tests/functional/api/v2/test_health_monitor.py new file mode 100644 index 0000000000..1cb5eddce9 --- /dev/null +++ b/octavia/tests/functional/api/v2/test_health_monitor.py @@ -0,0 +1,2211 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils + +from octavia.common import constants +import octavia.common.context +from octavia.common import data_models +from octavia.common import exceptions +from octavia.db import repositories +from octavia.tests.functional.api.v2 import base +from octavia_lib.common import constants as lib_consts + + +class TestHealthMonitor(base.BaseAPITest): + + root_tag = 'healthmonitor' + root_tag_list = 'healthmonitors' + root_tag_links = 'healthmonitors_links' + + def setUp(self): + super().setUp() + self.lb = self.create_load_balancer( + uuidutils.generate_uuid()).get('loadbalancer') + self.lb_id = self.lb.get('id') + self.project_id = self.lb.get('project_id') + self.set_lb_status(self.lb_id) + self.listener = self.create_listener( + constants.PROTOCOL_HTTP, 80, + self.lb_id).get('listener') + self.listener_id = self.listener.get('id') + self.set_lb_status(self.lb_id) + self.pool = self.create_pool(self.lb_id, constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN) + self.pool_id = self.pool.get('pool').get('id') + self.set_lb_status(self.lb_id) + self.pool_with_listener = self.create_pool( + self.lb_id, constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id) + self.pool_with_listener_id = ( + self.pool_with_listener.get('pool').get('id')) + self.set_lb_status(self.lb_id) + self.pool_repo = repositories.PoolRepository() + self._setup_udp_lb_resources() + self._setup_sctp_lb_resources() + + def _setup_udp_lb_resources(self): + self.udp_lb = self.create_load_balancer(uuidutils.generate_uuid()).get( + 'loadbalancer') + self.udp_lb_id = self.udp_lb.get('id') + self.set_lb_status(self.udp_lb_id) + + self.udp_listener = self.create_listener( + constants.PROTOCOL_UDP, 8888, + self.udp_lb_id).get('listener') + self.udp_listener_id = self.udp_listener.get('id') + self.set_lb_status(self.udp_lb_id) + + self.udp_pool_with_listener = self.create_pool( + None, constants.PROTOCOL_UDP, constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.udp_listener_id) + self.udp_pool_with_listener_id = ( + self.udp_pool_with_listener.get('pool').get('id')) + self.set_lb_status(self.udp_lb_id) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + self.conf.config( + group='api_settings', + udp_connect_min_interval_health_monitor='3') + + def _setup_sctp_lb_resources(self): + self.sctp_lb = self.create_load_balancer( + uuidutils.generate_uuid()).get('loadbalancer') + self.sctp_lb_id = self.sctp_lb.get('id') + self.set_lb_status(self.sctp_lb_id) + + self.sctp_listener = self.create_listener( + lib_consts.PROTOCOL_SCTP, 8888, + self.sctp_lb_id).get('listener') + self.sctp_listener_id = self.sctp_listener.get('id') + self.set_lb_status(self.sctp_lb_id) + + self.sctp_pool_with_listener = self.create_pool( + None, lib_consts.PROTOCOL_SCTP, constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.sctp_listener_id) + self.sctp_pool_with_listener_id = ( + self.sctp_pool_with_listener.get('pool').get('id')) + self.set_lb_status(self.sctp_lb_id) + + def test_get(self): + api_hm = self.create_health_monitor( + self.pool_id, constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1, tags=['test_tag']).get(self.root_tag) + # Set status to ACTIVE/ONLINE because set_lb_status did it in the db + api_hm['provisioning_status'] = constants.ACTIVE + api_hm['operating_status'] = constants.ONLINE + api_hm.pop('updated_at') + self.set_lb_status(self.lb_id) + response = self.get(self.HM_PATH.format( + healthmonitor_id=api_hm.get('id'))).json.get(self.root_tag) + response.pop('updated_at') + self.assertEqual(api_hm, response) + + def test_get_authorized(self): + api_hm = self.create_health_monitor( + self.pool_id, constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1).get(self.root_tag) + # Set status to ACTIVE/ONLINE because set_lb_status did it in the db + api_hm['provisioning_status'] = constants.ACTIVE + api_hm['operating_status'] = constants.ONLINE + api_hm.pop('updated_at') + self.set_lb_status(self.lb_id) + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + + response = self.get(self.HM_PATH.format( + healthmonitor_id=api_hm.get('id'))).json.get(self.root_tag) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + response.pop('updated_at') + self.assertEqual(api_hm, response) + + def test_get_not_authorized(self): + api_hm = self.create_health_monitor( + self.pool_id, constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1).get(self.root_tag) + # Set status to ACTIVE/ONLINE because set_lb_status did it in the db + api_hm['provisioning_status'] = constants.ACTIVE + api_hm['operating_status'] = constants.ONLINE + api_hm.pop('updated_at') + self.set_lb_status(self.lb_id) + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=uuidutils.generate_uuid()) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + response = self.get(self.HM_PATH.format( + healthmonitor_id=api_hm.get('id')), status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) + + def test_get_deleted_gives_404(self): + api_hm = self.create_health_monitor( + self.pool_id, constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1).get(self.root_tag) + + self.set_object_status(self.health_monitor_repo, api_hm.get('id'), + provisioning_status=constants.DELETED) + self.get(self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + status=404) + + def test_bad_get(self): + self.get(self.HM_PATH.format( + healthmonitor_id=uuidutils.generate_uuid()), status=404) + + def test_get_all(self): + api_hm = self.create_health_monitor( + self.pool_id, constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1, tags=['test_tag']).get(self.root_tag) + self.set_lb_status(self.lb_id) + hms = self.get(self.HMS_PATH).json.get(self.root_tag_list) + self.assertIsInstance(hms, list) + self.assertEqual(1, len(hms)) + self.assertEqual(api_hm.get('id'), hms[0].get('id')) + self.assertEqual(api_hm['tags'], hms[0]['tags']) + + def test_get_all_not_authorized(self): + self.create_health_monitor( + self.pool_id, constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(self.lb_id) + + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=uuidutils.generate_uuid()) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + hms = self.get(self.HMS_PATH, status=403).json + + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, hms) + + def test_get_all_admin(self): + project_id = uuidutils.generate_uuid() + lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', + project_id=project_id) + lb1_id = lb1.get('loadbalancer').get('id') + self.set_lb_status(lb1_id) + pool1 = self.create_pool( + lb1_id, constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_lb_status(lb1_id) + pool2 = self.create_pool( + lb1_id, constants.PROTOCOL_HTTPS, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_lb_status(lb1_id) + pool3 = self.create_pool( + lb1_id, constants.PROTOCOL_TCP, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_lb_status(lb1_id) + hm1 = self.create_health_monitor( + pool1.get('id'), constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(lb1_id) + hm2 = self.create_health_monitor( + pool2.get('id'), constants.HEALTH_MONITOR_PING, + 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(lb1_id) + hm3 = self.create_health_monitor( + pool3.get('id'), constants.HEALTH_MONITOR_TLS_HELLO, + 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(lb1_id) + hms = self.get(self.HMS_PATH).json.get(self.root_tag_list) + self.assertEqual(3, len(hms)) + hm_id_protocols = [(hm.get('id'), hm.get('type')) for hm in hms] + self.assertIn((hm1.get('id'), hm1.get('type')), hm_id_protocols) + self.assertIn((hm2.get('id'), hm2.get('type')), hm_id_protocols) + self.assertIn((hm3.get('id'), hm3.get('type')), hm_id_protocols) + + def test_get_all_non_admin(self): + project_id = uuidutils.generate_uuid() + lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', + project_id=project_id) + lb1_id = lb1.get('loadbalancer').get('id') + self.set_lb_status(lb1_id) + pool1 = self.create_pool( + lb1_id, constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_lb_status(lb1_id) + pool2 = self.create_pool( + lb1_id, constants.PROTOCOL_HTTPS, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_lb_status(lb1_id) + self.create_health_monitor( + pool1.get('id'), constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(lb1_id) + self.create_health_monitor( + pool2.get('id'), constants.HEALTH_MONITOR_PING, + 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(lb1_id) + hm3 = self.create_health_monitor( + self.pool_id, constants.HEALTH_MONITOR_TCP, + 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(self.lb_id) + + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', + auth_strategy=constants.KEYSTONE) + test_context = octavia.common.context.RequestContext( + project_id=hm3['project_id']) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + hms = self.get(self.HMS_PATH).json.get(self.root_tag_list) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + self.assertEqual(1, len(hms)) + hm_id_protocols = [(hm.get('id'), hm.get('type')) for hm in hms] + self.assertIn((hm3.get('id'), hm3.get('type')), hm_id_protocols) + + def test_get_all_unscoped_token(self): + project_id = uuidutils.generate_uuid() + lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', + project_id=project_id) + lb1_id = lb1.get('loadbalancer').get('id') + self.set_lb_status(lb1_id) + pool1 = self.create_pool( + lb1_id, constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_lb_status(lb1_id) + pool2 = self.create_pool( + lb1_id, constants.PROTOCOL_HTTPS, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_lb_status(lb1_id) + self.create_health_monitor( + pool1.get('id'), constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(lb1_id) + self.create_health_monitor( + pool2.get('id'), constants.HEALTH_MONITOR_PING, + 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(lb1_id) + self.create_health_monitor( + self.pool_id, constants.HEALTH_MONITOR_TCP, + 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(self.lb_id) + + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', + auth_strategy=constants.KEYSTONE) + test_context = octavia.common.context.RequestContext( + project_id=None) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': None} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + result = self.get(self.HMS_PATH, status=403).json + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, result) + + def test_get_all_non_admin_global_observer(self): + project_id = uuidutils.generate_uuid() + lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', + project_id=project_id) + lb1_id = lb1.get('loadbalancer').get('id') + self.set_lb_status(lb1_id) + pool1 = self.create_pool( + lb1_id, constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_lb_status(lb1_id) + pool2 = self.create_pool( + lb1_id, constants.PROTOCOL_HTTPS, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_lb_status(lb1_id) + pool3 = self.create_pool( + lb1_id, constants.PROTOCOL_TCP, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_lb_status(lb1_id) + hm1 = self.create_health_monitor( + pool1.get('id'), constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(lb1_id) + hm2 = self.create_health_monitor( + pool2.get('id'), constants.HEALTH_MONITOR_PING, + 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(lb1_id) + hm3 = self.create_health_monitor( + pool3.get('id'), constants.HEALTH_MONITOR_TCP, + 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(lb1_id) + + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', + auth_strategy=constants.KEYSTONE) + test_context = octavia.common.context.RequestContext( + project_id=hm3['project_id']) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['admin'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + + hms = self.get(self.HMS_PATH).json.get(self.root_tag_list) + + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(3, len(hms)) + hm_id_protocols = [(hm.get('id'), hm.get('type')) for hm in hms] + self.assertIn((hm1.get('id'), hm1.get('type')), hm_id_protocols) + self.assertIn((hm2.get('id'), hm2.get('type')), hm_id_protocols) + self.assertIn((hm3.get('id'), hm3.get('type')), hm_id_protocols) + + def test_get_all_hides_deleted(self): + api_hm = self.create_health_monitor( + self.pool_id, constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1).get(self.root_tag) + + response = self.get(self.HMS_PATH) + objects = response.json.get(self.root_tag_list) + self.assertEqual(len(objects), 1) + self.set_object_status(self.health_monitor_repo, api_hm.get('id'), + provisioning_status=constants.DELETED) + response = self.get(self.HMS_PATH) + objects = response.json.get(self.root_tag_list) + self.assertEqual(len(objects), 0) + + def test_get_by_project_id(self): + project1_id = uuidutils.generate_uuid() + project2_id = uuidutils.generate_uuid() + lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', + project_id=project1_id) + lb1_id = lb1.get('loadbalancer').get('id') + self.set_lb_status(lb1_id) + lb2 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb2', + project_id=project2_id) + lb2_id = lb2.get('loadbalancer').get('id') + self.set_lb_status(lb2_id) + pool1 = self.create_pool( + lb1_id, constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_lb_status(lb1_id) + pool2 = self.create_pool( + lb1_id, constants.PROTOCOL_HTTPS, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_lb_status(lb1_id) + pool3 = self.create_pool( + lb2_id, constants.PROTOCOL_TCP, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_lb_status(lb2_id) + hm1 = self.create_health_monitor( + pool1.get('id'), constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(lb1_id) + hm2 = self.create_health_monitor( + pool2.get('id'), constants.HEALTH_MONITOR_PING, + 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(lb1_id) + hm3 = self.create_health_monitor( + pool3.get('id'), constants.HEALTH_MONITOR_TCP, + 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(lb2_id) + hms = self.get( + self.HMS_PATH, + params={'project_id': project1_id}).json.get(self.root_tag_list) + + self.assertEqual(2, len(hms)) + hm_id_protocols = [(hm.get('id'), hm.get('type')) for hm in hms] + self.assertIn((hm1.get('id'), hm1.get('type')), hm_id_protocols) + self.assertIn((hm2.get('id'), hm2.get('type')), hm_id_protocols) + hms = self.get( + self.HMS_PATH, + params={'project_id': project2_id}).json.get(self.root_tag_list) + self.assertEqual(1, len(hms)) + hm_id_protocols = [(hm.get('id'), hm.get('type')) for hm in hms] + self.assertIn((hm3.get('id'), hm3.get('type')), hm_id_protocols) + + def test_get_all_sorted(self): + pool1 = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool1').get('pool') + self.set_lb_status(self.lb_id) + pool2 = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool2').get('pool') + self.set_lb_status(self.lb_id) + pool3 = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool3').get('pool') + self.set_lb_status(self.lb_id) + self.create_health_monitor( + pool1.get('id'), constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1, name='hm1').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.create_health_monitor( + pool2.get('id'), constants.HEALTH_MONITOR_PING, + 1, 1, 1, 1, name='hm2').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.create_health_monitor( + pool3.get('id'), constants.HEALTH_MONITOR_TCP, + 1, 1, 1, 1, name='hm3').get(self.root_tag) + self.set_lb_status(self.lb_id) + + response = self.get(self.HMS_PATH, params={'sort': 'name:desc'}) + hms_desc = response.json.get(self.root_tag_list) + response = self.get(self.HMS_PATH, params={'sort': 'name:asc'}) + hms_asc = response.json.get(self.root_tag_list) + + self.assertEqual(3, len(hms_desc)) + self.assertEqual(3, len(hms_asc)) + + hm_id_names_desc = [(hm.get('id'), hm.get('name')) for hm in hms_desc] + hm_id_names_asc = [(hm.get('id'), hm.get('name')) for hm in hms_asc] + self.assertEqual(hm_id_names_asc, list(reversed(hm_id_names_desc))) + + def test_get_all_sorted_by_max_retries(self): + pool1 = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool1').get('pool') + self.set_lb_status(self.lb_id) + pool2 = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool2').get('pool') + self.set_lb_status(self.lb_id) + pool3 = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool3').get('pool') + self.set_lb_status(self.lb_id) + hm1 = self.create_health_monitor( + pool1.get('id'), constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 2, name='hm1').get(self.root_tag) + self.set_lb_status(self.lb_id) + hm2 = self.create_health_monitor( + pool2.get('id'), constants.HEALTH_MONITOR_PING, + 1, 1, 1, 1, name='hm2').get(self.root_tag) + self.set_lb_status(self.lb_id) + hm3 = self.create_health_monitor( + pool3.get('id'), constants.HEALTH_MONITOR_TCP, + 1, 1, 1, 3, name='hm3').get(self.root_tag) + self.set_lb_status(self.lb_id) + + response = self.get(self.HMS_PATH, params={'sort': 'max_retries:desc'}) + hms_desc = response.json.get(self.root_tag_list) + response = self.get(self.HMS_PATH, params={'sort': 'max_retries:asc'}) + hms_asc = response.json.get(self.root_tag_list) + + self.assertEqual(3, len(hms_desc)) + self.assertEqual(3, len(hms_asc)) + + hm_id_names_desc = [(hm.get('id'), hm.get('name')) for hm in hms_desc] + hm_id_names_asc = [(hm.get('id'), hm.get('name')) for hm in hms_asc] + self.assertEqual(hm_id_names_asc, list(reversed(hm_id_names_desc))) + + self.assertEqual(hm2[constants.MAX_RETRIES], + hms_asc[0][constants.MAX_RETRIES]) + self.assertEqual(hm1[constants.MAX_RETRIES], + hms_asc[1][constants.MAX_RETRIES]) + self.assertEqual(hm3[constants.MAX_RETRIES], + hms_asc[2][constants.MAX_RETRIES]) + + self.assertEqual(hm3[constants.MAX_RETRIES], + hms_desc[0][constants.MAX_RETRIES]) + self.assertEqual(hm1[constants.MAX_RETRIES], + hms_desc[1][constants.MAX_RETRIES]) + self.assertEqual(hm2[constants.MAX_RETRIES], + hms_desc[2][constants.MAX_RETRIES]) + + def test_get_all_limited(self): + pool1 = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool1').get('pool') + self.set_lb_status(self.lb_id) + pool2 = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool2').get('pool') + self.set_lb_status(self.lb_id) + pool3 = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool3').get('pool') + self.set_lb_status(self.lb_id) + self.create_health_monitor( + pool1.get('id'), constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1, name='hm1').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.create_health_monitor( + pool2.get('id'), constants.HEALTH_MONITOR_PING, + 1, 1, 1, 1, name='hm2').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.create_health_monitor( + pool3.get('id'), constants.HEALTH_MONITOR_TCP, + 1, 1, 1, 1, name='hm3').get(self.root_tag) + self.set_lb_status(self.lb_id) + + # First two -- should have 'next' link + first_two = self.get(self.HMS_PATH, params={'limit': 2}).json + objs = first_two[self.root_tag_list] + links = first_two[self.root_tag_links] + self.assertEqual(2, len(objs)) + self.assertEqual(1, len(links)) + self.assertEqual('next', links[0]['rel']) + + # Third + off the end -- should have previous link + third = self.get(self.HMS_PATH, params={ + 'limit': 2, + 'marker': first_two[self.root_tag_list][1]['id']}).json + objs = third[self.root_tag_list] + links = third[self.root_tag_links] + self.assertEqual(1, len(objs)) + self.assertEqual(1, len(links)) + self.assertEqual('previous', links[0]['rel']) + + # Middle -- should have both links + middle = self.get(self.HMS_PATH, params={ + 'limit': 1, + 'marker': first_two[self.root_tag_list][0]['id']}).json + objs = middle[self.root_tag_list] + links = middle[self.root_tag_links] + self.assertEqual(1, len(objs)) + self.assertEqual(2, len(links)) + self.assertCountEqual(['previous', 'next'], + [link['rel'] for link in links]) + + def test_get_all_fields_filter(self): + pool1 = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool1').get('pool') + self.set_lb_status(self.lb_id) + pool2 = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool2').get('pool') + self.set_lb_status(self.lb_id) + pool3 = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool3').get('pool') + self.set_lb_status(self.lb_id) + self.create_health_monitor( + pool1.get('id'), constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1, name='hm1').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.create_health_monitor( + pool2.get('id'), constants.HEALTH_MONITOR_PING, + 1, 1, 1, 1, name='hm2').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.create_health_monitor( + pool3.get('id'), constants.HEALTH_MONITOR_TCP, + 1, 1, 1, 1, name='hm3').get(self.root_tag) + self.set_lb_status(self.lb_id) + + hms = self.get(self.HMS_PATH, params={ + 'fields': ['id', 'project_id']}).json + for hm in hms['healthmonitors']: + self.assertIn('id', hm) + self.assertIn('project_id', hm) + self.assertNotIn('description', hm) + + def test_get_one_fields_filter(self): + pool1 = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool1').get('pool') + self.set_lb_status(self.lb_id) + + self.set_lb_status(self.lb_id) + hm1 = self.create_health_monitor( + pool1.get('id'), constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1, name='hm1').get(self.root_tag) + self.set_lb_status(self.lb_id) + + hm = self.get( + self.HM_PATH.format(healthmonitor_id=hm1.get('id')), + params={'fields': ['id', 'project_id']}).json.get(self.root_tag) + self.assertIn('id', hm) + self.assertIn('project_id', hm) + self.assertNotIn('description', hm) + + def test_get_all_filter(self): + pool1 = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool1').get('pool') + self.set_lb_status(self.lb_id) + pool2 = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool2').get('pool') + self.set_lb_status(self.lb_id) + pool3 = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool3').get('pool') + self.set_lb_status(self.lb_id) + hm1 = self.create_health_monitor( + pool1.get('id'), constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1, name='hm1').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.create_health_monitor( + pool2.get('id'), constants.HEALTH_MONITOR_PING, + 1, 1, 1, 1, name='hm2').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.create_health_monitor( + pool3.get('id'), constants.HEALTH_MONITOR_TCP, + 1, 1, 1, 1, name='hm3').get(self.root_tag) + self.set_lb_status(self.lb_id) + + hms = self.get(self.HMS_PATH, params={ + 'id': hm1['id']}).json + self.assertEqual(1, len(hms['healthmonitors'])) + self.assertEqual(hm1['id'], + hms['healthmonitors'][0]['id']) + + def test_get_all_tags_filter(self): + pool1 = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool1').get('pool') + self.set_lb_status(self.lb_id) + pool2 = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool2').get('pool') + self.set_lb_status(self.lb_id) + pool3 = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool3').get('pool') + self.set_lb_status(self.lb_id) + hm1 = self.create_health_monitor( + pool1.get('id'), constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1, tags=['test_tag1', 'test_tag2']).get(self.root_tag) + self.set_lb_status(self.lb_id) + hm2 = self.create_health_monitor( + pool2.get('id'), constants.HEALTH_MONITOR_PING, + 1, 1, 1, 1, tags=['test_tag2', 'test_tag3']).get(self.root_tag) + self.set_lb_status(self.lb_id) + hm3 = self.create_health_monitor( + pool3.get('id'), constants.HEALTH_MONITOR_TCP, + 1, 1, 1, 1, tags=['test_tag4', 'test_tag5']).get(self.root_tag) + self.set_lb_status(self.lb_id) + + hms = self.get( + self.HMS_PATH, + params={'tags': 'test_tag2'} + ).json.get(self.root_tag_list) + self.assertIsInstance(hms, list) + self.assertEqual(2, len(hms)) + self.assertEqual( + [hm1.get('id'), hm2.get('id')], + [hm.get('id') for hm in hms] + ) + + hms = self.get( + self.HMS_PATH, + params={'tags': ['test_tag2', 'test_tag3']} + ).json.get(self.root_tag_list) + self.assertIsInstance(hms, list) + self.assertEqual(1, len(hms)) + self.assertEqual( + [hm2.get('id')], + [hm.get('id') for hm in hms] + ) + + hms = self.get( + self.HMS_PATH, + params={'tags-any': 'test_tag2'} + ).json.get(self.root_tag_list) + self.assertIsInstance(hms, list) + self.assertEqual(2, len(hms)) + self.assertEqual( + [hm1.get('id'), hm2.get('id')], + [hm.get('id') for hm in hms] + ) + + hms = self.get( + self.HMS_PATH, + params={'not-tags': 'test_tag2'} + ).json.get(self.root_tag_list) + self.assertIsInstance(hms, list) + self.assertEqual(1, len(hms)) + self.assertEqual( + [hm3.get('id')], + [hm.get('id') for hm in hms] + ) + + hms = self.get( + self.HMS_PATH, + params={'not-tags-any': ['test_tag2', 'test_tag4']} + ).json.get(self.root_tag_list) + self.assertIsInstance(hms, list) + self.assertEqual(0, len(hms)) + + hms = self.get( + self.HMS_PATH, + params={'tags': 'test_tag2', + 'tags-any': ['test_tag1', 'test_tag3']} + ).json.get(self.root_tag_list) + self.assertIsInstance(hms, list) + self.assertEqual(2, len(hms)) + self.assertEqual( + [hm1.get('id'), hm2.get('id')], + [hm.get('id') for hm in hms] + ) + + hms = self.get( + self.HMS_PATH, + params={'tags': 'test_tag2', 'not-tags': 'test_tag2'} + ).json.get(self.root_tag_list) + self.assertIsInstance(hms, list) + self.assertEqual(0, len(hms)) + + def test_empty_get_all(self): + response = self.get(self.HMS_PATH).json.get(self.root_tag_list) + self.assertIsInstance(response, list) + self.assertEqual(0, len(response)) + + def test_create_http_monitor_with_relative_path(self): + api_hm = self.create_health_monitor( + self.pool_id, constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1, url_path="/").get(self.root_tag) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_id, hm_id=api_hm.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.ACTIVE, + pool_prov_status=constants.PENDING_UPDATE, + hm_prov_status=constants.PENDING_CREATE, + hm_op_status=constants.OFFLINE) + + def test_create_http_monitor_with_url_path(self): + api_hm = self.create_health_monitor( + self.pool_id, constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1, url_path="/v2/api/index").get(self.root_tag) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_id, hm_id=api_hm.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.ACTIVE, + pool_prov_status=constants.PENDING_UPDATE, + hm_prov_status=constants.PENDING_CREATE, + hm_op_status=constants.OFFLINE) + + def test_create_sans_listener(self): + api_hm = self.create_health_monitor( + self.pool_id, constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1).get(self.root_tag) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_id, hm_id=api_hm.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.ACTIVE, + pool_prov_status=constants.PENDING_UPDATE, + hm_prov_status=constants.PENDING_CREATE, + hm_op_status=constants.OFFLINE) + self.set_lb_status(self.lb_id) + self.assertEqual(constants.HEALTH_MONITOR_HTTP, api_hm.get('type')) + self.assertEqual(1, api_hm.get('delay')) + self.assertEqual(1, api_hm.get('timeout')) + self.assertEqual(1, api_hm.get('max_retries_down')) + self.assertEqual(1, api_hm.get('max_retries')) + # Verify optional field defaults + self.assertEqual('GET', api_hm.get('http_method')) + self.assertEqual('/', api_hm.get('url_path')) + self.assertEqual('200', api_hm.get('expected_codes')) + + def test_create_http_full(self): + api_hm = self.create_health_monitor( + self.pool_id, constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1, admin_state_up=False, expected_codes='200', + http_method='GET', name='Test HM', url_path='/').get(self.root_tag) + self.assertEqual(constants.HEALTH_MONITOR_HTTP, api_hm.get('type')) + self.assertEqual(1, api_hm.get('delay')) + self.assertEqual(1, api_hm.get('timeout')) + self.assertEqual(1, api_hm.get('max_retries_down')) + self.assertEqual(1, api_hm.get('max_retries')) + self.assertFalse(api_hm.get('admin_state_up')) + self.assertEqual('Test HM', api_hm.get('name')) + self.assertEqual('GET', api_hm.get('http_method')) + self.assertEqual('/', api_hm.get('url_path')) + self.assertEqual('200', api_hm.get('expected_codes')) + + def test_create_https_full(self): + api_hm = self.create_health_monitor( + self.pool_id, constants.HEALTH_MONITOR_HTTPS, + 1, 1, 1, 1, admin_state_up=False, expected_codes='200', + http_method='GET', name='Test HM', url_path='/').get(self.root_tag) + self.assertEqual(constants.HEALTH_MONITOR_HTTPS, api_hm.get('type')) + self.assertEqual(1, api_hm.get('delay')) + self.assertEqual(1, api_hm.get('timeout')) + self.assertEqual(1, api_hm.get('max_retries_down')) + self.assertEqual(1, api_hm.get('max_retries')) + self.assertFalse(api_hm.get('admin_state_up')) + self.assertEqual('Test HM', api_hm.get('name')) + self.assertEqual('GET', api_hm.get('http_method')) + self.assertEqual('/', api_hm.get('url_path')) + self.assertEqual('200', api_hm.get('expected_codes')) + + def test_create_udp_case_with_udp_connect_type(self): + # create with UDP-CONNECT type + api_hm = self.create_health_monitor( + self.udp_pool_with_listener_id, + constants.HEALTH_MONITOR_UDP_CONNECT, + 3, 1, 1, 1).get(self.root_tag) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id, + pool_id=self.udp_pool_with_listener_id, hm_id=api_hm.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE, + hm_prov_status=constants.PENDING_CREATE, + hm_op_status=constants.OFFLINE) + self.set_lb_status(self.udp_lb_id) + self.assertEqual(constants.HEALTH_MONITOR_UDP_CONNECT, + api_hm.get('type')) + self.assertEqual(3, api_hm.get('delay')) + self.assertEqual(1, api_hm.get('timeout')) + self.assertEqual(1, api_hm.get('max_retries_down')) + self.assertEqual(1, api_hm.get('max_retries')) + # Verify the L7 fields is None + self.assertIsNone(api_hm.get('http_method')) + self.assertIsNone(api_hm.get('url_path')) + self.assertIsNone(api_hm.get('expected_codes')) + + def test_create_udp_case_with_tcp_type(self): + # create with TCP type + api_hm = self.create_health_monitor( + self.udp_pool_with_listener_id, constants.HEALTH_MONITOR_TCP, + 3, 1, 1, 1).get(self.root_tag) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id, + pool_id=self.udp_pool_with_listener_id, hm_id=api_hm.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE, + hm_prov_status=constants.PENDING_CREATE, + hm_op_status=constants.OFFLINE) + self.set_lb_status(self.udp_lb_id) + self.assertEqual(constants.HEALTH_MONITOR_TCP, api_hm.get('type')) + self.assertEqual(3, api_hm.get('delay')) + self.assertEqual(1, api_hm.get('timeout')) + self.assertEqual(1, api_hm.get('max_retries_down')) + self.assertEqual(1, api_hm.get('max_retries')) + self.assertIsNone(api_hm.get('http_method')) + self.assertIsNone(api_hm.get('url_path')) + self.assertIsNone(api_hm.get('expected_codes')) + + def test_create_udp_case_with_http_type(self): + # create with HTTP type + api_hm = self.create_health_monitor( + self.udp_pool_with_listener_id, constants.HEALTH_MONITOR_HTTP, + 3, 1, 1, 1, url_path='/test.html', + http_method=constants.HEALTH_MONITOR_HTTP_METHOD_GET, + expected_codes='200-201').get(self.root_tag) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id, + pool_id=self.udp_pool_with_listener_id, hm_id=api_hm.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE, + hm_prov_status=constants.PENDING_CREATE, + hm_op_status=constants.OFFLINE) + self.set_lb_status(self.udp_lb_id) + self.assertEqual(constants.HEALTH_MONITOR_HTTP, api_hm.get('type')) + self.assertEqual(3, api_hm.get('delay')) + self.assertEqual(1, api_hm.get('timeout')) + self.assertEqual(1, api_hm.get('max_retries_down')) + self.assertEqual(1, api_hm.get('max_retries')) + self.assertEqual(3, api_hm.get('delay')) + self.assertEqual(constants.HEALTH_MONITOR_HTTP_METHOD_GET, + api_hm.get('http_method')) + self.assertEqual('/test.html', api_hm.get('url_path')) + self.assertEqual('200-201', api_hm.get('expected_codes')) + + def test_create_udp_case_with_sctp_type(self): + # create with SCTP type + api_hm = self.create_health_monitor( + self.udp_pool_with_listener_id, + lib_consts.HEALTH_MONITOR_SCTP, + 3, 1, 1, 1).get(self.root_tag) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id, + pool_id=self.udp_pool_with_listener_id, hm_id=api_hm.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE, + hm_prov_status=constants.PENDING_CREATE, + hm_op_status=constants.OFFLINE) + self.set_lb_status(self.udp_lb_id) + self.assertEqual(lib_consts.HEALTH_MONITOR_SCTP, + api_hm.get('type')) + self.assertEqual(3, api_hm.get('delay')) + self.assertEqual(1, api_hm.get('timeout')) + self.assertEqual(1, api_hm.get('max_retries_down')) + self.assertEqual(1, api_hm.get('max_retries')) + # Verify the L7 fields is None + self.assertIsNone(api_hm.get('http_method')) + self.assertIsNone(api_hm.get('url_path')) + self.assertIsNone(api_hm.get('expected_codes')) + + def test_udp_case_when_udp_connect_min_interval_health_monitor_set(self): + # negative case first + req_dict = {'pool_id': self.udp_pool_with_listener_id, + 'type': constants.HEALTH_MONITOR_UDP_CONNECT, + 'delay': 1, + 'timeout': 1, + 'max_retries_down': 1, + 'max_retries': 1} + res = self.post(self.HMS_PATH, self._build_body(req_dict), status=400, + expect_errors=True) + expect_error_msg = ("Validation failure: The request delay value 1 " + "should be larger than 3 for %s health monitor " + "type.") % constants.HEALTH_MONITOR_UDP_CONNECT + + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id, + pool_id=self.udp_pool_with_listener_id) + + # pass cases + self.conf.config( + group='api_settings', + udp_connect_min_interval_health_monitor='-3') + res = self.post(self.HMS_PATH, self._build_body(req_dict)) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id, + pool_id=self.udp_pool_with_listener_id, + hm_id=res.json['healthmonitor']['id'], + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE, + hm_prov_status=constants.PENDING_CREATE, + hm_op_status=constants.OFFLINE) + + def test_negative_create_udp_case(self): + req_dict = {'pool_id': self.udp_pool_with_listener_id, + 'delay': 3, + 'timeout': 1, + 'max_retries_down': 1, + 'max_retries': 1} + expect_error_msg = ("Validation failure: The associated pool protocol " + "is %(pool_protocol)s, so only a %(types)s health " + "monitor is supported.") % { + 'pool_protocol': constants.PROTOCOL_UDP, + 'types': '/'.join([constants.HEALTH_MONITOR_UDP_CONNECT, + lib_consts.HEALTH_MONITOR_SCTP, + constants.HEALTH_MONITOR_TCP, + constants.HEALTH_MONITOR_HTTP])} + + # Not allowed types specified. + update_req = {'type': constants.HEALTH_MONITOR_TLS_HELLO} + req_dict.update(update_req) + res = self.post(self.HMS_PATH, self._build_body(req_dict), status=400, + expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id, + pool_id=self.udp_pool_with_listener_id) + + # Hit error during create with a non-UDP pool + req_dict = {'pool_id': self.pool_with_listener_id, + 'delay': 1, + 'timeout': 1, + 'max_retries_down': 1, + 'max_retries': 1, + 'type': constants.HEALTH_MONITOR_UDP_CONNECT} + expect_error_msg = ("Validation failure: The %(type)s type is only " + "supported for pools of type " + "%(protocol)s.") % { + 'type': constants.HEALTH_MONITOR_UDP_CONNECT, + 'protocol': '/'.join((constants.PROTOCOL_UDP, + lib_consts.PROTOCOL_SCTP))} + res = self.post(self.HMS_PATH, self._build_body(req_dict), + status=400, + expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id, + pool_id=self.udp_pool_with_listener_id) + + def test_create_sctp_case_with_udp_connect_type(self): + # create with UDP-CONNECT type + api_hm = self.create_health_monitor( + self.sctp_pool_with_listener_id, + constants.HEALTH_MONITOR_UDP_CONNECT, + 3, 1, 1, 1).get(self.root_tag) + self.assert_correct_status( + lb_id=self.sctp_lb_id, listener_id=self.sctp_listener_id, + pool_id=self.sctp_pool_with_listener_id, hm_id=api_hm.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE, + hm_prov_status=constants.PENDING_CREATE, + hm_op_status=constants.OFFLINE) + self.set_lb_status(self.sctp_lb_id) + self.assertEqual(constants.HEALTH_MONITOR_UDP_CONNECT, + api_hm.get('type')) + self.assertEqual(3, api_hm.get('delay')) + self.assertEqual(1, api_hm.get('timeout')) + self.assertEqual(1, api_hm.get('max_retries_down')) + self.assertEqual(1, api_hm.get('max_retries')) + # Verify the L7 fields is None + self.assertIsNone(api_hm.get('http_method')) + self.assertIsNone(api_hm.get('url_path')) + self.assertIsNone(api_hm.get('expected_codes')) + + def test_create_sctp_case_with_tcp_type(self): + # create with TCP type + api_hm = self.create_health_monitor( + self.sctp_pool_with_listener_id, constants.HEALTH_MONITOR_TCP, + 3, 1, 1, 1).get(self.root_tag) + self.assert_correct_status( + lb_id=self.sctp_lb_id, listener_id=self.sctp_listener_id, + pool_id=self.sctp_pool_with_listener_id, hm_id=api_hm.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE, + hm_prov_status=constants.PENDING_CREATE, + hm_op_status=constants.OFFLINE) + self.set_lb_status(self.sctp_lb_id) + self.assertEqual(constants.HEALTH_MONITOR_TCP, api_hm.get('type')) + self.assertEqual(3, api_hm.get('delay')) + self.assertEqual(1, api_hm.get('timeout')) + self.assertEqual(1, api_hm.get('max_retries_down')) + self.assertEqual(1, api_hm.get('max_retries')) + self.assertIsNone(api_hm.get('http_method')) + self.assertIsNone(api_hm.get('url_path')) + self.assertIsNone(api_hm.get('expected_codes')) + + def test_create_sctp_case_with_http_type(self): + # create with HTTP type + api_hm = self.create_health_monitor( + self.sctp_pool_with_listener_id, constants.HEALTH_MONITOR_HTTP, + 3, 1, 1, 1, url_path='/test.html', + http_method=constants.HEALTH_MONITOR_HTTP_METHOD_GET, + expected_codes='200-201').get(self.root_tag) + self.assert_correct_status( + lb_id=self.sctp_lb_id, listener_id=self.sctp_listener_id, + pool_id=self.sctp_pool_with_listener_id, hm_id=api_hm.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE, + hm_prov_status=constants.PENDING_CREATE, + hm_op_status=constants.OFFLINE) + self.set_lb_status(self.sctp_lb_id) + self.assertEqual(constants.HEALTH_MONITOR_HTTP, api_hm.get('type')) + self.assertEqual(3, api_hm.get('delay')) + self.assertEqual(1, api_hm.get('timeout')) + self.assertEqual(1, api_hm.get('max_retries_down')) + self.assertEqual(1, api_hm.get('max_retries')) + self.assertEqual(3, api_hm.get('delay')) + self.assertEqual(constants.HEALTH_MONITOR_HTTP_METHOD_GET, + api_hm.get('http_method')) + self.assertEqual('/test.html', api_hm.get('url_path')) + self.assertEqual('200-201', api_hm.get('expected_codes')) + + def test_create_sctp_case_with_sctp_type(self): + # create with SCTP type + api_hm = self.create_health_monitor( + self.sctp_pool_with_listener_id, + lib_consts.HEALTH_MONITOR_SCTP, + 3, 1, 1, 1).get(self.root_tag) + self.assert_correct_status( + lb_id=self.sctp_lb_id, listener_id=self.sctp_listener_id, + pool_id=self.sctp_pool_with_listener_id, hm_id=api_hm.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE, + hm_prov_status=constants.PENDING_CREATE, + hm_op_status=constants.OFFLINE) + self.set_lb_status(self.sctp_lb_id) + self.assertEqual(lib_consts.HEALTH_MONITOR_SCTP, + api_hm.get('type')) + self.assertEqual(3, api_hm.get('delay')) + self.assertEqual(1, api_hm.get('timeout')) + self.assertEqual(1, api_hm.get('max_retries_down')) + self.assertEqual(1, api_hm.get('max_retries')) + # Verify the L7 fields is None + self.assertIsNone(api_hm.get('http_method')) + self.assertIsNone(api_hm.get('url_path')) + self.assertIsNone(api_hm.get('expected_codes')) + + def test_ensure_L7_fields_filled_during_create(self): + # Create a health monitor with a load balancer pool + api_hm = self.create_health_monitor( + self.pool_id, + constants.PROTOCOL_HTTP, + 1, 1, 1, 1).get(self.root_tag) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_id, hm_id=api_hm.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE, + hm_prov_status=constants.PENDING_CREATE, + hm_op_status=constants.OFFLINE) + self.set_lb_status(self.lb_id) + self.assertEqual(constants.HEALTH_MONITOR_HTTP_DEFAULT_METHOD, + api_hm.get('http_method')) + self.assertEqual(constants.HEALTH_MONITOR_DEFAULT_URL_PATH, + api_hm.get('url_path')) + self.assertEqual(constants.HEALTH_MONITOR_DEFAULT_EXPECTED_CODES, + api_hm.get('expected_codes')) + + def test_create_authorized(self): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + + api_hm = self.create_health_monitor( + self.pool_id, constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1).get(self.root_tag) + + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_id, hm_id=api_hm.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.ACTIVE, + pool_prov_status=constants.PENDING_UPDATE, + hm_prov_status=constants.PENDING_CREATE, + hm_op_status=constants.OFFLINE) + self.set_lb_status(self.lb_id) + self.assertEqual(constants.HEALTH_MONITOR_HTTP, api_hm.get('type')) + self.assertEqual(1, api_hm.get('delay')) + self.assertEqual(1, api_hm.get('timeout')) + self.assertEqual(1, api_hm.get('max_retries_down')) + self.assertEqual(1, api_hm.get('max_retries')) + # Verify optional field defaults + self.assertEqual('GET', api_hm.get('http_method')) + self.assertEqual('/', api_hm.get('url_path')) + self.assertEqual('200', api_hm.get('expected_codes')) + + def test_create_not_authorized(self): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + + test_context = octavia.common.context.RequestContext( + project_id=uuidutils.generate_uuid()) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + api_hm = self.create_health_monitor( + self.pool_id, constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1, status=403) + + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, api_hm) + + def test_create_pool_in_error(self): + project_id = uuidutils.generate_uuid() + lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', + project_id=project_id) + lb1_id = lb1.get('loadbalancer').get('id') + self.set_lb_status(lb1_id) + pool1 = self.create_pool( + lb1_id, constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + pool1_id = pool1.get('id') + self.set_lb_status(lb1_id) + self.set_object_status(self.pool_repo, pool1_id, + provisioning_status=constants.ERROR) + api_hm = self.create_health_monitor( + pool1_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1, status=409) + ref_msg = f'Pool {pool1_id} is immutable and cannot be updated.' + self.assertEqual(ref_msg, api_hm.get('faultstring')) + + def test_create_with_listener(self): + api_hm = self.create_health_monitor( + self.pool_with_listener_id, constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1).get(self.root_tag) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_with_listener_id, hm_id=api_hm.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE, + hm_prov_status=constants.PENDING_CREATE, + hm_op_status=constants.OFFLINE) + self.set_lb_status(self.lb_id) + self.assertEqual(constants.HEALTH_MONITOR_HTTP, api_hm.get('type')) + self.assertEqual(1, api_hm.get('delay')) + self.assertEqual(1, api_hm.get('timeout')) + self.assertEqual(1, api_hm.get('max_retries_down')) + self.assertEqual(1, api_hm.get('max_retries')) + # Verify optional field defaults + self.assertEqual('GET', api_hm.get('http_method')) + self.assertEqual('/', api_hm.get('url_path')) + self.assertEqual('200', api_hm.get('expected_codes')) + + def test_pool_returns_hm_id(self): + api_hm = self.create_health_monitor( + self.pool_id, constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(self.lb_id) + pool = self.get(self.POOL_PATH.format( + pool_id=self.pool_id)).json.get("pool") + self.assertEqual(pool.get('healthmonitor_id'), api_hm.get('id')) + + # TODO(rm_work) Remove after deprecation of project_id in POST (R series) + def test_create_with_project_id_is_ignored(self): + pid = uuidutils.generate_uuid() + api_hm = self.create_health_monitor( + self.pool_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1, + project_id=pid).get(self.root_tag) + self.assertEqual(self.project_id, api_hm.get('project_id')) + + def test_create_with_default_http_version(self): + # Use the default HTTP/1.0 + api_hm = self.create_health_monitor( + self.pool_id, constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1, admin_state_up=False, expected_codes='200', + http_method='GET', name='Test HM', url_path='/', + http_version='1.0').get(self.root_tag) + self.assertEqual(1.0, api_hm.get('http_version')) + + def test_create_without_http_version(self): + # Check the default http_version is 1.0 + api_hm = self.create_health_monitor( + self.pool_id, constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1, admin_state_up=False, expected_codes='200', + http_method='GET', name='Test HM', url_path='/').get(self.root_tag) + self.assertIsNone(api_hm.get('http_version')) + + def test_create_with_http_version_11_and_domain_name(self): + # Create with http_version 1.1 and domain_name + api_hm = self.create_health_monitor( + self.pool_id, constants.HEALTH_MONITOR_HTTPS, + 1, 1, 1, 1, admin_state_up=False, expected_codes='200', + http_method='GET', name='Test HM', url_path='/', + http_version=1.1, domain_name='testlab.com').get(self.root_tag) + self.assertEqual(1.1, api_hm.get('http_version')) + self.assertEqual('testlab.com', api_hm.get('domain_name')) + + def test_create_with_http_version_11(self): + # Create with http_version 1.1 + api_hm = self.create_health_monitor( + self.pool_id, constants.HEALTH_MONITOR_HTTPS, + 1, 1, 1, 1, admin_state_up=False, expected_codes='200', + http_method='GET', name='Test HM', url_path='/', + http_version=1.1).get(self.root_tag) + self.assertEqual(1.1, api_hm.get('http_version')) + + def test_bad_create(self): + hm_json = {'name': 'test1', 'pool_id': self.pool_id} + self.post(self.HMS_PATH, self._build_body(hm_json), status=400) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_id) + + def test_bad_create_with_invalid_url_path(self): + req_dict = {'pool_id': self.pool_id, + 'type': constants.HEALTH_MONITOR_HTTP, + 'delay': 1, + 'timeout': 1, + 'max_retries_down': 1, + 'max_retries': 1, + 'url_path': 'https://openstack.org/'} + self.post(self.HMS_PATH, self._build_body(req_dict), status=400) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_id) + + def test_create_ping_when_ping_disabled(self): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + self.conf.config(group='api_settings', + allow_ping_health_monitors=False) + req_dict = {'pool_id': self.pool_id, + 'type': constants.HEALTH_MONITOR_PING, + 'delay': 1, + 'timeout': 1, + 'max_retries_down': 1, + 'max_retries': 1} + self.post(self.HMS_PATH, self._build_body(req_dict), status=400) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_id) + + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_create_with_bad_provider(self, mock_provider): + mock_provider.side_effect = exceptions.ProviderDriverError( + prov='bad_driver', user_msg='broken') + req_dict = {'pool_id': self.pool_id, + 'type': constants.HEALTH_MONITOR_HTTP, + 'delay': 1, + 'timeout': 1, + 'max_retries_down': 1, + 'max_retries': 1, + 'url_path': '/foo'} + response = self.post(self.HMS_PATH, self._build_body(req_dict), + status=500) + self.assertIn('Provider \'bad_driver\' reports error: broken', + response.json.get('faultstring')) + + def test_create_with_type_none(self): + req_dict = {'pool_id': self.pool_id, + 'type': None, + 'delay': 1, + 'timeout': 1, + 'max_retries_down': 1, + 'max_retries': 1, + 'url_path': '/'} + self.post(self.HMS_PATH, self._build_body(req_dict), status=400) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_id) + + def test_create_with_delay_none(self): + req_dict = {'pool_id': self.pool_id, + 'type': constants.HEALTH_MONITOR_HTTP, + 'delay': None, + 'timeout': 1, + 'max_retries_down': 1, + 'max_retries': 1, + 'url_path': '/'} + self.post(self.HMS_PATH, self._build_body(req_dict), status=400) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_id) + + def test_create_with_max_retries_none(self): + req_dict = {'pool_id': self.pool_id, + 'type': constants.HEALTH_MONITOR_HTTP, + 'delay': 1, + 'timeout': 1, + 'max_retries_down': 1, + 'max_retries': None, + 'url_path': '/'} + self.post(self.HMS_PATH, self._build_body(req_dict), status=400) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_id) + + def test_create_with_timeout_none(self): + req_dict = {'pool_id': self.pool_id, + 'type': constants.HEALTH_MONITOR_HTTP, + 'delay': 1, + 'timeout': None, + 'max_retries_down': 1, + 'max_retries': 1, + 'url_path': '/'} + self.post(self.HMS_PATH, self._build_body(req_dict), status=400) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_id) + + def test_create_with_pool_id_none(self): + req_dict = {'pool_id': None, + 'type': constants.HEALTH_MONITOR_HTTP, + 'delay': 1, + 'timeout': 1, + 'max_retries_down': 1, + 'max_retries': 1, + 'url_path': '/'} + self.post(self.HMS_PATH, self._build_body(req_dict), status=404) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_id) + + def test_create_TCP_with_http_method(self): + req_dict = {'pool_id': self.pool_id, + 'type': constants.HEALTH_MONITOR_TCP, + 'delay': 1, + 'timeout': 1, + 'max_retries_down': 1, + 'max_retries': 1, + 'http_method': constants.HEALTH_MONITOR_HTTP_METHOD_GET} + self.post(self.HMS_PATH, self._build_body(req_dict), status=400) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_id) + + def test_create_TCP_with_url_path(self): + req_dict = {'pool_id': self.pool_id, + 'type': constants.HEALTH_MONITOR_TCP, + 'delay': 1, + 'timeout': 1, + 'max_retries_down': 1, + 'max_retries': 1, + 'url_path': '/'} + self.post(self.HMS_PATH, self._build_body(req_dict), status=400) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_id) + + def test_create_TCP_with_expected_codes(self): + req_dict = {'pool_id': self.pool_id, + 'type': constants.HEALTH_MONITOR_TCP, + 'delay': 1, + 'timeout': 1, + 'max_retries_down': 1, + 'max_retries': 1, + 'expected_codes': '200'} + self.post(self.HMS_PATH, self._build_body(req_dict), status=400) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_id) + + def test_duplicate_create(self): + self.create_health_monitor( + self.pool_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1) + self.set_lb_status(self.lb_id) + self.create_health_monitor( + self.pool_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1, + status=409) + + def test_create_over_quota(self): + self.start_quota_mock(data_models.HealthMonitor) + hm = {'pool_id': self.pool_id, + 'type': constants.HEALTH_MONITOR_HTTP, + 'delay': 1, + 'timeout': 1, + 'max_retries_down': 1, + 'max_retries': 1} + self.post(self.HMS_PATH, self._build_body(hm), status=403) + + def test_bad_create_with_http_version_and_domain_name_cases(self): + hm_json = {'pool_id': self.pool_id, + 'type': constants.HEALTH_MONITOR_HTTP, + 'delay': 1, + 'timeout': 1, + 'max_retries_down': 1, + 'max_retries': 1, + 'expected_codes': '200', + 'http_version': 1.00, 'domain_name': 'testlab.com'} + api_hm = self.post( + self.HMS_PATH, self._build_body(hm_json), status=400).json + expect_error_msg = ("http_version 1.0 is not a valid option for " + "health monitors HTTP 1.1 domain name health " + "check") + self.assertEqual(expect_error_msg, api_hm['faultstring']) + for bad_case in [{'http_version': 1.0, 'domain_name': '^testla&b.com'}, + {'http_version': 1.1, + 'domain_name': 'testla\nb.com'}]: + hm_json = {'pool_id': self.pool_id, + 'type': constants.HEALTH_MONITOR_HTTP, + 'delay': 1, + 'timeout': 1, + 'max_retries_down': 1, + 'max_retries': 1, + 'expected_codes': '200'} + hm_json.update(bad_case) + api_hm = self.post( + self.HMS_PATH, self._build_body(hm_json), status=400).json + expect_error_msg = ( + "Invalid input for field/attribute domain_name. Value: '%s'. " + "Value should match the pattern %s") % (bad_case[ + 'domain_name'], constants.DOMAIN_NAME_REGEX) + self.assertEqual(expect_error_msg, api_hm['faultstring']) + + def test_update(self): + api_hm = self.create_health_monitor( + self.pool_with_listener_id, + constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1, + tags=['old_tag']).get(self.root_tag) + self.set_lb_status(self.lb_id) + new_hm = {'max_retries': 2, 'tags': ['new_tag']} + self.put( + self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + self._build_body(new_hm)) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_with_listener_id, hm_id=api_hm.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE, + hm_prov_status=constants.PENDING_UPDATE) + response = self.get(self.HM_PATH.format( + healthmonitor_id=api_hm.get('id'))).json.get(self.root_tag) + self.assertEqual(2, response[constants.MAX_RETRIES]) + self.assertEqual(['new_tag'], response['tags']) + + def test_update_HTTPS(self): + api_hm = self.create_health_monitor( + self.pool_with_listener_id, + constants.HEALTH_MONITOR_HTTPS, 1, 1, 1, 1, + admin_state_up=False, expected_codes='200', + http_method='GET', name='Test HM', url_path='/').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.assertEqual('/', api_hm[constants.URL_PATH]) + new_hm = {constants.URL_PATH: '/health'} + self.put( + self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + self._build_body(new_hm)) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_with_listener_id, hm_id=api_hm.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE, + hm_prov_status=constants.PENDING_UPDATE) + response = self.get(self.HM_PATH.format( + healthmonitor_id=api_hm.get('id'))).json.get(self.root_tag) + self.assertEqual('/health', response[constants.URL_PATH]) + + def test_update_http_version_and_domain_name(self): + api_hm = self.create_health_monitor( + self.pool_with_listener_id, constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1, admin_state_up=False, expected_codes='200', + http_method='GET', name='Test HM', url_path='/').get(self.root_tag) + self.set_lb_status(self.lb_id) + new_hm = {'http_version': 1.1, 'domain_name': 'testlab.com'} + self.put( + self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + self._build_body(new_hm)) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_with_listener_id, hm_id=api_hm.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE, + hm_prov_status=constants.PENDING_UPDATE) + response = self.get(self.HM_PATH.format( + healthmonitor_id=api_hm.get('id'))).json.get(self.root_tag) + self.assertEqual(1.1, response['http_version']) + self.assertEqual('testlab.com', response['domain_name']) + + def test_update_TCP(self): + api_hm = self.create_health_monitor( + self.pool_with_listener_id, + constants.HEALTH_MONITOR_TCP, 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(self.lb_id) + new_hm = {'max_retries': 2} + self.put( + self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + self._build_body(new_hm)) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_with_listener_id, hm_id=api_hm.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE, + hm_prov_status=constants.PENDING_UPDATE) + response = self.get(self.HM_PATH.format( + healthmonitor_id=api_hm.get('id'))).json.get(self.root_tag) + self.assertEqual(2, response[constants.MAX_RETRIES]) + + def test_update_authorized(self): + api_hm = self.create_health_monitor( + self.pool_with_listener_id, + constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(self.lb_id) + new_hm = {'max_retries': 2} + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + + self.put( + self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + self._build_body(new_hm)) + + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_with_listener_id, hm_id=api_hm.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE, + hm_prov_status=constants.PENDING_UPDATE) + + def test_update_not_authorized(self): + api_hm = self.create_health_monitor( + self.pool_with_listener_id, + constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(self.lb_id) + new_hm = {'max_retries': 2} + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=uuidutils.generate_uuid()) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + response = self.put( + self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + self._build_body(new_hm), status=403) + + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_with_listener_id, hm_id=api_hm.get('id'), + lb_prov_status=constants.ACTIVE, + listener_prov_status=constants.ACTIVE, + pool_prov_status=constants.ACTIVE, + hm_prov_status=constants.ACTIVE) + + def test_update_udp_case(self): + api_hm = self.create_health_monitor( + self.udp_pool_with_listener_id, + constants.HEALTH_MONITOR_TCP, 3, 1, 1, 1).get( + self.root_tag) + self.set_lb_status(self.udp_lb_id) + new_hm = {'timeout': 2} + self.put( + self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + self._build_body(new_hm)) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id, + pool_id=self.udp_pool_with_listener_id, hm_id=api_hm.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE, + hm_prov_status=constants.PENDING_UPDATE) + + def test_update_udp_case_with_udp_hm(self): + api_hm = self.create_health_monitor( + self.udp_pool_with_listener_id, + constants.HEALTH_MONITOR_UDP_CONNECT, 3, 1, 1, 1).get( + self.root_tag) + self.set_lb_status(self.udp_lb_id) + new_hm = {'timeout': 2} + self.put( + self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + self._build_body(new_hm)) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id, + pool_id=self.udp_pool_with_listener_id, hm_id=api_hm.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE, + hm_prov_status=constants.PENDING_UPDATE) + + def test_negative_update_udp_case(self): + api_hm = self.create_health_monitor( + self.udp_pool_with_listener_id, + constants.HEALTH_MONITOR_UDP_CONNECT, 3, 1, 1, 1).get( + self.root_tag) + self.set_lb_status(self.udp_lb_id) + + # Hit error during update with invalid parameter + req_dict = {'delay': 3, + 'timeout': 1, + 'max_retries_down': 1, + 'max_retries': 1, + 'http_method': constants.HEALTH_MONITOR_HTTP_METHOD_GET} + expect_error_msg = (("http_method is not a valid option for health " + "monitors of type %s") % + constants.HEALTH_MONITOR_UDP_CONNECT) + res = self.put(self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + self._build_body(req_dict), + status=400, expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id, + pool_id=self.udp_pool_with_listener_id) + + # Hit error during update with smaller delay value + req_dict = {'delay': 1} + expect_error_msg = ("Validation failure: The request delay value 1 " + "should be larger than 3 for %s health monitor " + "type.") % constants.HEALTH_MONITOR_UDP_CONNECT + res = self.put(self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + self._build_body(req_dict), + status=400, expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id, + pool_id=self.udp_pool_with_listener_id) + + def test_bad_update(self): + api_hm = self.create_health_monitor(self.pool_with_listener_id, + constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1).get(self.root_tag) + new_hm = {'http_method': 'bad_method', 'delay': 2} + self.set_lb_status(self.lb_id) + self.put(self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + self._build_body(new_hm), status=400) + + def test_update_delay_none(self): + api_hm = self.create_health_monitor(self.pool_with_listener_id, + constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1).get(self.root_tag) + new_hm = {constants.DELAY: None} + self.set_lb_status(self.lb_id) + expect_error_msg = f"None is not a valid option for {constants.DELAY}" + res = self.put(self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + self._build_body(new_hm), status=400) + self.assertEqual(expect_error_msg, res.json['faultstring']) + + def test_update_max_retries_none(self): + api_hm = self.create_health_monitor(self.pool_with_listener_id, + constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1).get(self.root_tag) + new_hm = {constants.MAX_RETRIES: None} + self.set_lb_status(self.lb_id) + expect_error_msg = (f"None is not a valid option for " + f"{constants.MAX_RETRIES}") + res = self.put(self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + self._build_body(new_hm), status=400) + self.assertEqual(expect_error_msg, res.json['faultstring']) + + def test_update_timeout_none(self): + api_hm = self.create_health_monitor(self.pool_with_listener_id, + constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1).get(self.root_tag) + new_hm = {constants.TIMEOUT: None} + self.set_lb_status(self.lb_id) + expect_error_msg = (f"None is not a valid option for " + f"{constants.TIMEOUT}") + res = self.put(self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + self._build_body(new_hm), status=400) + self.assertEqual(expect_error_msg, res.json['faultstring']) + + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_update_with_bad_provider(self, mock_provider): + api_hm = self.create_health_monitor( + self.pool_with_listener_id, + constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(self.lb_id) + new_hm = {'max_retries': 2} + mock_provider.side_effect = exceptions.ProviderDriverError( + prov='bad_driver', user_msg='broken') + response = self.put( + self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + self._build_body(new_hm), status=500) + self.assertIn('Provider \'bad_driver\' reports error: broken', + response.json.get('faultstring')) + + def test_update_TCP_setting_http_method(self): + api_hm = self.create_health_monitor( + self.pool_with_listener_id, + constants.HEALTH_MONITOR_TCP, 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(self.lb_id) + new_hm = {'http_method': constants.HEALTH_MONITOR_HTTP_METHOD_GET} + self.put( + self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + self._build_body(new_hm), status=400) + + def test_update_TCP_setting_url_path(self): + api_hm = self.create_health_monitor( + self.pool_with_listener_id, + constants.HEALTH_MONITOR_TCP, 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(self.lb_id) + new_hm = {'url_path': '/'} + self.put( + self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + self._build_body(new_hm), status=400) + + def test_update_TCP_setting_expected_codes(self): + api_hm = self.create_health_monitor( + self.pool_with_listener_id, + constants.HEALTH_MONITOR_TCP, 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(self.lb_id) + new_hm = {'expected_codes': '200'} + self.put( + self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + self._build_body(new_hm), status=400) + + def test_update_HTTP_http_method_none(self): + api_hm = self.create_health_monitor( + self.pool_with_listener_id, + constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(self.lb_id) + new_hm = {'http_method': None} + self.put( + self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + self._build_body(new_hm)) + response = self.get(self.HM_PATH.format( + healthmonitor_id=api_hm.get('id'))).json.get(self.root_tag) + self.assertEqual(constants.HEALTH_MONITOR_HTTP_METHOD_GET, + response['http_method']) + + def test_update_HTTP_url_path_none(self): + api_hm = self.create_health_monitor( + self.pool_with_listener_id, + constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(self.lb_id) + new_hm = {'url_path': None} + self.put( + self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + self._build_body(new_hm)) + response = self.get(self.HM_PATH.format( + healthmonitor_id=api_hm.get('id'))).json.get(self.root_tag) + self.assertEqual(constants.HEALTH_MONITOR_DEFAULT_URL_PATH, + response['url_path']) + + def test_update_HTTP_expected_codes_none(self): + api_hm = self.create_health_monitor( + self.pool_with_listener_id, + constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(self.lb_id) + new_hm = {'expected_codes': None} + self.put( + self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + self._build_body(new_hm)) + response = self.get(self.HM_PATH.format( + healthmonitor_id=api_hm.get('id'))).json.get(self.root_tag) + self.assertEqual(constants.HEALTH_MONITOR_DEFAULT_EXPECTED_CODES, + response['expected_codes']) + + def test_bad_update_http_version_and_domain_name(self): + api_hm = self.create_health_monitor( + self.pool_id, constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1, admin_state_up=False, expected_codes='200', + http_method='GET', name='Test HM', url_path='/').get(self.root_tag) + self.set_lb_status(self.lb_id) + new_hm = {'http_version': 1.0, 'domain_name': 'testlab.com'} + response = self.put( + self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + self._build_body(new_hm), status=400) + expect_error_msg = ("http_version 1.0 is not a valid option for " + "health monitors HTTP 1.1 domain name health " + "check") + self.assertEqual(expect_error_msg, response.json['faultstring']) + + new_hm = {'http_version': 1.0, 'domain_name': '^testla&b.com'} + response = self.put( + self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + self._build_body(new_hm), status=400) + expect_error_msg = ( + "Invalid input for field/attribute domain_name. Value: '%s'. " + "Value should match the pattern %s") % (new_hm[ + 'domain_name'], constants.DOMAIN_NAME_REGEX) + self.assertEqual(expect_error_msg, response.json['faultstring']) + + def test_update_unset_defaults(self): + api_hm = self.create_health_monitor( + self.pool_with_listener_id, constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1, name='test', domain_name='test.example.com', + expected_codes='400', http_method='HEAD', http_version='1.1', + url_path='/test').get(self.root_tag) + new_hm = {constants.DOMAIN_NAME: None, constants.EXPECTED_CODES: None, + constants.HTTP_METHOD: None, constants.HTTP_VERSION: None, + constants.MAX_RETRIES_DOWN: None, 'name': None, + constants.URL_PATH: None} + self.set_lb_status(self.lb_id) + res = self.put(self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + self._build_body(new_hm)).json.get(self.root_tag) + self.assertIsNone(res[constants.DOMAIN_NAME]) + self.assertEqual(constants.HEALTH_MONITOR_DEFAULT_EXPECTED_CODES, + res[constants.EXPECTED_CODES]) + self.assertEqual(constants.HEALTH_MONITOR_HTTP_DEFAULT_METHOD, + res[constants.HTTP_METHOD]) + self.assertIsNone(res[constants.HTTP_VERSION]) + self.assertEqual(constants.DEFAULT_MAX_RETRIES_DOWN, + res[constants.MAX_RETRIES_DOWN]) + self.assertEqual('', res['name']) + self.assertEqual(constants.HEALTH_MONITOR_DEFAULT_URL_PATH, + res[constants.URL_PATH]) + + def test_delete(self): + api_hm = self.create_health_monitor( + self.pool_with_listener_id, + constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(self.lb_id) + hm = self.get(self.HM_PATH.format( + healthmonitor_id=api_hm.get('id'))).json.get(self.root_tag) + api_hm['provisioning_status'] = constants.ACTIVE + api_hm['operating_status'] = constants.ONLINE + self.assertIsNone(api_hm.pop('updated_at')) + self.assertIsNotNone(hm.pop('updated_at')) + self.assertEqual(api_hm, hm) + self.delete(self.HM_PATH.format(healthmonitor_id=api_hm.get('id'))) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_with_listener_id, hm_id=api_hm.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE, + hm_prov_status=constants.PENDING_DELETE) + + def test_delete_authorized(self): + api_hm = self.create_health_monitor( + self.pool_with_listener_id, + constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(self.lb_id) + hm = self.get(self.HM_PATH.format( + healthmonitor_id=api_hm.get('id'))).json.get(self.root_tag) + api_hm['provisioning_status'] = constants.ACTIVE + api_hm['operating_status'] = constants.ONLINE + self.assertIsNone(api_hm.pop('updated_at')) + self.assertIsNotNone(hm.pop('updated_at')) + self.assertEqual(api_hm, hm) + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + self.delete( + self.HM_PATH.format(healthmonitor_id=api_hm.get('id'))) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_with_listener_id, hm_id=api_hm.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE, + hm_prov_status=constants.PENDING_DELETE) + + def test_delete_not_authorized(self): + api_hm = self.create_health_monitor( + self.pool_with_listener_id, + constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(self.lb_id) + hm = self.get(self.HM_PATH.format( + healthmonitor_id=api_hm.get('id'))).json.get(self.root_tag) + api_hm['provisioning_status'] = constants.ACTIVE + api_hm['operating_status'] = constants.ONLINE + self.assertIsNone(api_hm.pop('updated_at')) + self.assertIsNotNone(hm.pop('updated_at')) + self.assertEqual(api_hm, hm) + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=uuidutils.generate_uuid()) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + self.delete( + self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_with_listener_id, hm_id=api_hm.get('id'), + lb_prov_status=constants.ACTIVE, + listener_prov_status=constants.ACTIVE, + pool_prov_status=constants.ACTIVE, + hm_prov_status=constants.ACTIVE) + + def test_bad_delete(self): + self.delete( + self.HM_PATH.format(healthmonitor_id=uuidutils.generate_uuid()), + status=404) + + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_delete_with_bad_provider(self, mock_provider): + api_hm = self.create_health_monitor( + self.pool_with_listener_id, + constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(self.lb_id) + hm = self.get(self.HM_PATH.format( + healthmonitor_id=api_hm.get('id'))).json.get(self.root_tag) + api_hm['provisioning_status'] = constants.ACTIVE + api_hm['operating_status'] = constants.ONLINE + self.assertIsNone(api_hm.pop('updated_at')) + self.assertIsNotNone(hm.pop('updated_at')) + self.assertEqual(api_hm, hm) + + mock_provider.side_effect = exceptions.ProviderDriverError( + prov='bad_driver', user_msg='broken') + self.delete(self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + status=500) + + def test_create_when_lb_pending_update(self): + self.put(self.LB_PATH.format(lb_id=self.lb_id), + body={'loadbalancer': {'name': 'test_name_change'}}) + self.create_health_monitor( + self.pool_with_listener_id, + constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1, status=409) + + def test_update_when_lb_pending_update(self): + api_hm = self.create_health_monitor( + self.pool_id, constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(self.lb_id) + self.put(self.LB_PATH.format(lb_id=self.lb_id), + body={'loadbalancer': {'name': 'test_name_change'}}) + new_hm = {'max_retries': 2} + self.put(self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + body=self._build_body(new_hm), status=409) + + def test_delete_when_lb_pending_update(self): + api_hm = self.create_health_monitor( + self.pool_id, constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(self.lb_id) + self.put(self.LB_PATH.format(lb_id=self.lb_id), + body={'loadbalancer': {'name': 'test_name_change'}}) + self.delete(self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + status=409) + + def test_create_when_lb_pending_delete(self): + self.delete(self.LB_PATH.format(lb_id=self.lb_id), + params={'cascade': "true"}) + self.create_health_monitor( + self.pool_id, + constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1, status=409) + + def test_update_when_lb_pending_delete(self): + api_hm = self.create_health_monitor( + self.pool_id, constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(self.lb_id) + self.delete(self.LB_PATH.format(lb_id=self.lb_id), + params={'cascade': "true"}) + new_hm = {'max_retries': 2} + self.put(self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + body=self._build_body(new_hm), status=409) + + def test_update_already_deleted(self): + api_hm = self.create_health_monitor( + self.pool_id, constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1).get(self.root_tag) + # This updates the child objects + self.set_lb_status(self.lb_id, status=constants.DELETED) + self.put(self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + body=self._build_body({'max_retries': 2}), status=404) + + def test_delete_when_lb_pending_delete(self): + api_hm = self.create_health_monitor( + self.pool_id, constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(self.lb_id) + self.delete(self.LB_PATH.format(lb_id=self.lb_id), + params={'cascade': "true"}) + self.delete(self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + status=409) + + def test_delete_already_deleted(self): + api_hm = self.create_health_monitor( + self.pool_id, constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1).get(self.root_tag) + # This updates the child objects + self.set_lb_status(self.lb_id, status=constants.DELETED) + self.delete(self.HM_PATH.format(healthmonitor_id=api_hm.get('id')), + status=404) diff --git a/octavia/tests/functional/api/v2/test_l7policy.py b/octavia/tests/functional/api/v2/test_l7policy.py new file mode 100644 index 0000000000..5010983d32 --- /dev/null +++ b/octavia/tests/functional/api/v2/test_l7policy.py @@ -0,0 +1,1462 @@ +# Copyright 2016 Blue Box, an IBM Company +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from octavia_lib.common import constants as lib_consts +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils + +from octavia.common import constants +import octavia.common.context +from octavia.common import data_models +from octavia.common import exceptions +from octavia.tests.common import constants as c_const +from octavia.tests.functional.api.v2 import base + + +class TestL7Policy(base.BaseAPITest): + + root_tag = 'l7policy' + root_tag_list = 'l7policies' + root_tag_links = 'l7policies_links' + + def setUp(self): + super().setUp() + self.lb = self.create_load_balancer(uuidutils.generate_uuid()) + self.lb_id = self.lb.get('loadbalancer').get('id') + self.project_id = self.lb.get('loadbalancer').get('project_id') + self.set_lb_status(self.lb_id) + self.listener = self.create_listener( + constants.PROTOCOL_HTTP, 80, lb_id=self.lb_id) + self.listener_id = self.listener.get('listener').get('id') + self.set_lb_status(self.lb_id) + self.pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN) + self.pool_id = self.pool.get('pool').get('id') + self.set_lb_status(self.lb_id) + + def test_get(self): + api_l7policy = self.create_l7policy( + self.listener_id, + constants.L7POLICY_ACTION_REJECT, + tags=['test_tag']).get(self.root_tag) + response = self.get(self.L7POLICY_PATH.format( + l7policy_id=api_l7policy.get('id'))).json.get(self.root_tag) + self.assertEqual(api_l7policy, response) + + def test_get_authorized(self): + api_l7policy = self.create_l7policy( + self.listener_id, + constants.L7POLICY_ACTION_REJECT).get(self.root_tag) + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.get(self.L7POLICY_PATH.format( + l7policy_id=api_l7policy.get('id'))) + response = response.json.get(self.root_tag) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(api_l7policy, response) + + def test_get_not_authorized(self): + api_l7policy = self.create_l7policy( + self.listener_id, + constants.L7POLICY_ACTION_REJECT).get(self.root_tag) + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=uuidutils.generate_uuid()) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + response = self.get(self.L7POLICY_PATH.format( + l7policy_id=api_l7policy.get('id')), status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) + + def test_get_deleted_gives_404(self): + api_l7policy = self.create_l7policy( + self.listener_id, + constants.L7POLICY_ACTION_REJECT).get(self.root_tag) + + self.set_object_status(self.l7policy_repo, api_l7policy.get('id'), + provisioning_status=constants.DELETED) + self.get(self.L7POLICY_PATH.format(l7policy_id=api_l7policy.get('id')), + status=404) + + def test_bad_get(self): + self.get(self.L7POLICY_PATH.format( + l7policy_id=uuidutils.generate_uuid()), status=404) + + def test_get_all(self): + api_l7policy = self.create_l7policy(self.listener_id, + constants.L7POLICY_ACTION_REJECT, + tags=['test_tag'] + ).get(self.root_tag) + self.set_lb_status(self.lb_id) + policies = self.get(self.L7POLICIES_PATH).json.get(self.root_tag_list) + self.assertIsInstance(policies, list) + self.assertEqual(1, len(policies)) + self.assertEqual(api_l7policy.get('id'), policies[0].get('id')) + self.assertEqual(api_l7policy['tags'], policies[0]['tags']) + + def test_get_all_admin(self): + project_id = uuidutils.generate_uuid() + lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', + project_id=project_id) + lb1_id = lb1.get('loadbalancer').get('id') + self.set_lb_status(lb1_id) + listener1 = self.create_listener(constants.PROTOCOL_HTTP, 80, + lb1_id) + listener1_id = listener1.get('listener').get('id') + self.set_lb_status(lb1_id) + pool1 = self.create_pool(lb1_id, constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN) + pool1_id = pool1.get('pool').get('id') + self.set_lb_status(lb1_id) + api_l7p_a = self.create_l7policy( + listener1_id, + constants.L7POLICY_ACTION_REJECT).get(self.root_tag) + self.set_lb_status(lb1_id) + api_l7p_b = self.create_l7policy( + listener1_id, constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + position=2, redirect_pool_id=pool1_id).get(self.root_tag) + self.set_lb_status(lb1_id) + api_l7p_c = self.create_l7policy( + listener1_id, constants.L7POLICY_ACTION_REDIRECT_TO_URL, + position=3, redirect_url='http://localhost/').get(self.root_tag) + self.set_lb_status(lb1_id) + policies = self.get(self.L7POLICIES_PATH).json.get(self.root_tag_list) + self.assertEqual(3, len(policies)) + policy_id_actions = [(p.get('id'), p.get('action')) for p in policies] + self.assertIn((api_l7p_a.get('id'), api_l7p_a.get('action')), + policy_id_actions) + self.assertIn((api_l7p_b.get('id'), api_l7p_b.get('action')), + policy_id_actions) + self.assertIn((api_l7p_c.get('id'), api_l7p_c.get('action')), + policy_id_actions) + + def test_get_all_non_admin(self): + project_id = uuidutils.generate_uuid() + lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', + project_id=project_id) + lb1_id = lb1.get('loadbalancer').get('id') + self.set_lb_status(lb1_id) + listener1 = self.create_listener(constants.PROTOCOL_HTTP, 80, + lb1_id) + listener1_id = listener1.get('listener').get('id') + self.set_lb_status(lb1_id) + pool1 = self.create_pool(lb1_id, constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN) + pool1_id = pool1.get('pool').get('id') + self.set_lb_status(lb1_id) + self.create_l7policy( + listener1_id, + constants.L7POLICY_ACTION_REJECT).get(self.root_tag) + self.set_lb_status(lb1_id) + self.create_l7policy( + listener1_id, constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + position=2, redirect_pool_id=pool1_id).get(self.root_tag) + self.set_lb_status(lb1_id) + api_l7p_c = self.create_l7policy( + self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_URL, + redirect_url='http://localhost/').get(self.root_tag) + self.set_lb_status(lb1_id) + + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', + auth_strategy=constants.KEYSTONE) + test_context = octavia.common.context.RequestContext( + project_id=api_l7p_c.get('project_id')) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + policies = self.get( + self.L7POLICIES_PATH).json.get(self.root_tag_list) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + self.assertEqual(1, len(policies)) + policy_id_actions = [(p.get('id'), p.get('action')) for p in policies] + self.assertIn((api_l7p_c.get('id'), api_l7p_c.get('action')), + policy_id_actions) + + def test_get_all_unscoped_token(self): + project_id = uuidutils.generate_uuid() + lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', + project_id=project_id) + lb1_id = lb1.get('loadbalancer').get('id') + self.set_lb_status(lb1_id) + listener1 = self.create_listener(constants.PROTOCOL_HTTP, 80, + lb1_id) + listener1_id = listener1.get('listener').get('id') + self.set_lb_status(lb1_id) + pool1 = self.create_pool(lb1_id, constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN) + pool1_id = pool1.get('pool').get('id') + self.set_lb_status(lb1_id) + self.create_l7policy( + listener1_id, + constants.L7POLICY_ACTION_REJECT).get(self.root_tag) + self.set_lb_status(lb1_id) + self.create_l7policy( + listener1_id, constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + position=2, redirect_pool_id=pool1_id).get(self.root_tag) + self.set_lb_status(lb1_id) + self.create_l7policy( + self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_URL, + redirect_url='http://localhost/').get(self.root_tag) + self.set_lb_status(lb1_id) + + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', + auth_strategy=constants.KEYSTONE) + test_context = octavia.common.context.RequestContext( + project_id=None) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': None} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + result = self.get(self.L7POLICIES_PATH, status=403).json + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, result) + + def test_get_all_non_admin_global_observer(self): + project_id = uuidutils.generate_uuid() + lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', + project_id=project_id) + lb1_id = lb1.get('loadbalancer').get('id') + self.set_lb_status(lb1_id) + listener1 = self.create_listener(constants.PROTOCOL_HTTP, 80, + lb1_id) + listener1_id = listener1.get('listener').get('id') + self.set_lb_status(lb1_id) + pool1 = self.create_pool(lb1_id, constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN) + pool1_id = pool1.get('pool').get('id') + self.set_lb_status(lb1_id) + api_l7p_a = self.create_l7policy( + listener1_id, + constants.L7POLICY_ACTION_REJECT).get(self.root_tag) + self.set_lb_status(lb1_id) + api_l7p_b = self.create_l7policy( + listener1_id, constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + position=2, redirect_pool_id=pool1_id).get(self.root_tag) + self.set_lb_status(lb1_id) + api_l7p_c = self.create_l7policy( + self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_URL, + redirect_url='http://localhost/').get(self.root_tag) + self.set_lb_status(lb1_id) + + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', + auth_strategy=constants.KEYSTONE) + test_context = octavia.common.context.RequestContext( + project_id=api_l7p_c.get('project_id')) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['admin'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + policies = self.get( + self.L7POLICIES_PATH).json.get(self.root_tag_list) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + self.assertEqual(3, len(policies)) + policy_id_actions = [(p.get('id'), p.get('action')) for p in policies] + self.assertIn((api_l7p_a.get('id'), api_l7p_a.get('action')), + policy_id_actions) + self.assertIn((api_l7p_b.get('id'), api_l7p_b.get('action')), + policy_id_actions) + self.assertIn((api_l7p_c.get('id'), api_l7p_c.get('action')), + policy_id_actions) + + def test_get_all_not_authorized(self): + self.create_l7policy(self.listener_id, + constants.L7POLICY_ACTION_REJECT, + ).get(self.root_tag) + self.set_lb_status(self.lb_id) + + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=uuidutils.generate_uuid()) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + policies = self.get(self.L7POLICIES_PATH, status=403).json + + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, policies) + + def test_get_all_hides_deleted(self): + api_l7policy = self.create_l7policy( + self.listener_id, + constants.L7POLICY_ACTION_REJECT).get(self.root_tag) + + response = self.get(self.L7POLICIES_PATH) + objects = response.json.get(self.root_tag_list) + self.assertEqual(len(objects), 1) + self.set_object_status(self.l7policy_repo, api_l7policy.get('id'), + provisioning_status=constants.DELETED) + response = self.get(self.L7POLICIES_PATH) + objects = response.json.get(self.root_tag_list) + self.assertEqual(len(objects), 0) + + def test_get_by_project_id(self): + project1_id = uuidutils.generate_uuid() + project2_id = uuidutils.generate_uuid() + lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', + project_id=project1_id) + lb1_id = lb1.get('loadbalancer').get('id') + self.set_lb_status(lb1_id) + lb2 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb2', + project_id=project2_id) + lb2_id = lb2.get('loadbalancer').get('id') + self.set_lb_status(lb2_id) + listener1 = self.create_listener(constants.PROTOCOL_HTTP, 80, + lb1_id) + listener1_id = listener1.get('listener').get('id') + self.set_lb_status(lb1_id) + listener2 = self.create_listener(constants.PROTOCOL_HTTP, 80, + lb2_id) + listener2_id = listener2.get('listener').get('id') + self.set_lb_status(lb2_id) + pool1 = self.create_pool(lb1_id, constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN) + pool1_id = pool1.get('pool').get('id') + self.set_lb_status(lb1_id) + api_l7p_a = self.create_l7policy( + listener1_id, + constants.L7POLICY_ACTION_REJECT).get(self.root_tag) + self.set_lb_status(lb1_id) + api_l7p_b = self.create_l7policy( + listener1_id, constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + position=2, redirect_pool_id=pool1_id).get(self.root_tag) + self.set_lb_status(lb1_id) + api_l7p_c = self.create_l7policy( + listener2_id, constants.L7POLICY_ACTION_REDIRECT_TO_URL, + redirect_url='http://localhost/').get(self.root_tag) + self.set_lb_status(lb2_id) + policies = self.get( + self.L7POLICIES_PATH, + params={'project_id': project1_id}).json.get(self.root_tag_list) + + self.assertEqual(2, len(policies)) + policy_id_actions = [(p.get('id'), p.get('action')) for p in policies] + self.assertIn((api_l7p_a.get('id'), api_l7p_a.get('action')), + policy_id_actions) + self.assertIn((api_l7p_b.get('id'), api_l7p_b.get('action')), + policy_id_actions) + policies = self.get( + self.L7POLICIES_PATH, + params={'project_id': project2_id}).json.get(self.root_tag_list) + self.assertEqual(1, len(policies)) + policy_id_actions = [(p.get('id'), p.get('action')) for p in policies] + self.assertIn((api_l7p_c.get('id'), api_l7p_c.get('action')), + policy_id_actions) + + def test_get_all_sorted(self): + self.create_l7policy( + self.listener_id, constants.L7POLICY_ACTION_REJECT, + name='policy3').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.create_l7policy( + self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + position=2, redirect_pool_id=self.pool_id, + name='policy2').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.create_l7policy( + self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_URL, + redirect_url='http://localhost/', + name='policy1').get(self.root_tag) + self.set_lb_status(self.lb_id) + + response = self.get(self.L7POLICIES_PATH, + params={'sort': 'position:desc'}) + policies_desc = response.json.get(self.root_tag_list) + response = self.get(self.L7POLICIES_PATH, + params={'sort': 'position:asc'}) + policies_asc = response.json.get(self.root_tag_list) + + self.assertEqual(3, len(policies_desc)) + self.assertEqual(3, len(policies_asc)) + + policy_id_names_desc = [(policy.get('id'), policy.get('position')) + for policy in policies_desc] + policy_id_names_asc = [(policy.get('id'), policy.get('position')) + for policy in policies_asc] + self.assertEqual(policy_id_names_asc, + list(reversed(policy_id_names_desc))) + + def test_get_all_limited(self): + self.create_l7policy( + self.listener_id, constants.L7POLICY_ACTION_REJECT, + name='policy1').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.create_l7policy( + self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + position=2, redirect_pool_id=self.pool_id, + name='policy2').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.create_l7policy( + self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_URL, + redirect_url='http://localhost/', + name='policy3').get(self.root_tag) + self.set_lb_status(self.lb_id) + + # First two -- should have 'next' link + first_two = self.get(self.L7POLICIES_PATH, params={'limit': 2}).json + objs = first_two[self.root_tag_list] + links = first_two[self.root_tag_links] + self.assertEqual(2, len(objs)) + self.assertEqual(1, len(links)) + self.assertEqual('next', links[0]['rel']) + + # Third + off the end -- should have previous link + third = self.get(self.L7POLICIES_PATH, params={ + 'limit': 2, + 'marker': first_two[self.root_tag_list][1]['id']}).json + objs = third[self.root_tag_list] + links = third[self.root_tag_links] + self.assertEqual(1, len(objs)) + self.assertEqual(1, len(links)) + self.assertEqual('previous', links[0]['rel']) + + # Middle -- should have both links + middle = self.get(self.L7POLICIES_PATH, params={ + 'limit': 1, + 'marker': first_two[self.root_tag_list][0]['id']}).json + objs = middle[self.root_tag_list] + links = middle[self.root_tag_links] + self.assertEqual(1, len(objs)) + self.assertEqual(2, len(links)) + self.assertCountEqual(['previous', 'next'], + [link['rel'] for link in links]) + + def test_get_all_fields_filter(self): + self.create_l7policy( + self.listener_id, constants.L7POLICY_ACTION_REJECT, + name='policy1').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.create_l7policy( + self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + position=2, redirect_pool_id=self.pool_id, + name='policy2').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.create_l7policy( + self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_URL, + redirect_url='http://localhost/', + name='policy3').get(self.root_tag) + self.set_lb_status(self.lb_id) + + l7pos = self.get(self.L7POLICIES_PATH, params={ + 'fields': ['id', 'project_id']}).json + for l7po in l7pos['l7policies']: + self.assertIn('id', l7po) + self.assertIn('project_id', l7po) + self.assertNotIn('description', l7po) + + def test_get_one_fields_filter(self): + l7p1 = self.create_l7policy( + self.listener_id, constants.L7POLICY_ACTION_REJECT, + name='policy1').get(self.root_tag) + self.set_lb_status(self.lb_id) + + l7po = self.get( + self.L7POLICY_PATH.format(l7policy_id=l7p1.get('id')), + params={'fields': ['id', 'project_id']}).json.get(self.root_tag) + self.assertIn('id', l7po) + self.assertIn('project_id', l7po) + self.assertNotIn('description', l7po) + + def test_get_all_filter(self): + policy1 = self.create_l7policy( + self.listener_id, constants.L7POLICY_ACTION_REJECT, + name='policy1').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.create_l7policy( + self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + position=2, redirect_pool_id=self.pool_id, + name='policy2').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.create_l7policy( + self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_URL, + redirect_url='http://localhost/', + name='policy3').get(self.root_tag) + self.set_lb_status(self.lb_id) + + l7pos = self.get(self.L7POLICIES_PATH, params={ + 'id': policy1['id']}).json + self.assertEqual(1, len(l7pos['l7policies'])) + self.assertEqual(policy1['id'], + l7pos['l7policies'][0]['id']) + + def test_get_all_tags_filter(self): + policy1 = self.create_l7policy( + self.listener_id, constants.L7POLICY_ACTION_REJECT, + tags=['test_tag1', 'test_tag2']).get(self.root_tag) + self.set_lb_status(self.lb_id) + policy2 = self.create_l7policy( + self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + position=2, redirect_pool_id=self.pool_id, + tags=['test_tag2', 'test_tag3']).get(self.root_tag) + self.set_lb_status(self.lb_id) + policy3 = self.create_l7policy( + self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_URL, + redirect_url='http://localhost/', + tags=['test_tag4', 'test_tag5']).get(self.root_tag) + self.set_lb_status(self.lb_id) + + policies = self.get( + self.L7POLICIES_PATH, + params={'tags': 'test_tag2'} + ).json.get(self.root_tag_list) + self.assertIsInstance(policies, list) + self.assertEqual(2, len(policies)) + self.assertEqual( + [policy1.get('id'), policy2.get('id')], + [policy.get('id') for policy in policies] + ) + + policies = self.get( + self.L7POLICIES_PATH, + params={'tags': ['test_tag2', 'test_tag3']} + ).json.get(self.root_tag_list) + self.assertIsInstance(policies, list) + self.assertEqual(1, len(policies)) + self.assertEqual( + [policy2.get('id')], + [policy.get('id') for policy in policies] + ) + + policies = self.get( + self.L7POLICIES_PATH, + params={'tags-any': 'test_tag2'} + ).json.get(self.root_tag_list) + self.assertIsInstance(policies, list) + self.assertEqual(2, len(policies)) + self.assertEqual( + [policy1.get('id'), policy2.get('id')], + [policy.get('id') for policy in policies] + ) + + policies = self.get( + self.L7POLICIES_PATH, + params={'not-tags': 'test_tag2'} + ).json.get(self.root_tag_list) + self.assertIsInstance(policies, list) + self.assertEqual(1, len(policies)) + self.assertEqual( + [policy3.get('id')], + [policy.get('id') for policy in policies] + ) + + policies = self.get( + self.L7POLICIES_PATH, + params={'not-tags-any': ['test_tag2', 'test_tag4']} + ).json.get(self.root_tag_list) + self.assertIsInstance(policies, list) + self.assertEqual(0, len(policies)) + + policies = self.get( + self.L7POLICIES_PATH, + params={'tags': 'test_tag2', + 'tags-any': ['test_tag1', 'test_tag3']} + ).json.get(self.root_tag_list) + self.assertIsInstance(policies, list) + self.assertEqual(2, len(policies)) + self.assertEqual( + [policy1.get('id'), policy2.get('id')], + [policy.get('id') for policy in policies] + ) + + policies = self.get( + self.L7POLICIES_PATH, + params={'tags': 'test_tag2', 'not-tags': 'test_tag2'} + ).json.get(self.root_tag_list) + self.assertIsInstance(policies, list) + self.assertEqual(0, len(policies)) + + def test_empty_get_all(self): + response = self.get(self.L7POLICIES_PATH).json.get(self.root_tag_list) + self.assertIsInstance(response, list) + self.assertEqual(0, len(response)) + + def test_create_reject_policy(self): + api_l7policy = self.create_l7policy(self.listener_id, + constants.L7POLICY_ACTION_REJECT, + ).get(self.root_tag) + self.assertEqual(constants.L7POLICY_ACTION_REJECT, + api_l7policy['action']) + self.assertEqual(1, api_l7policy['position']) + self.assertIsNone(api_l7policy['redirect_pool_id']) + self.assertIsNone(api_l7policy['redirect_url']) + self.assertTrue(api_l7policy['admin_state_up']) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + l7policy_id=api_l7policy.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + l7policy_prov_status=constants.PENDING_CREATE, + l7policy_op_status=constants.OFFLINE) + + def test_create_policy_authorized(self): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + api_l7policy = self.create_l7policy( + self.listener_id, + constants.L7POLICY_ACTION_REJECT).get(self.root_tag) + + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(constants.L7POLICY_ACTION_REJECT, + api_l7policy['action']) + self.assertEqual(1, api_l7policy['position']) + self.assertIsNone(api_l7policy['redirect_pool_id']) + self.assertIsNone(api_l7policy['redirect_url']) + self.assertTrue(api_l7policy['admin_state_up']) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + l7policy_id=api_l7policy.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + l7policy_prov_status=constants.PENDING_CREATE, + l7policy_op_status=constants.OFFLINE) + + def test_create_policy_not_authorized(self): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + + test_context = octavia.common.context.RequestContext( + project_id=self.project_id) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + api_l7policy = self.create_l7policy( + self.listener_id, + constants.L7POLICY_ACTION_REJECT, status=403) + + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, api_l7policy) + + def test_create_redirect_to_pool(self): + api_l7policy = self.create_l7policy( + self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + redirect_pool_id=self.pool_id).get(self.root_tag) + self.assertEqual(constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + api_l7policy['action']) + self.assertEqual(1, api_l7policy['position']) + self.assertEqual(self.pool_id, api_l7policy['redirect_pool_id']) + self.assertIsNone(api_l7policy['redirect_url']) + self.assertTrue(api_l7policy['admin_state_up']) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + l7policy_id=api_l7policy.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + l7policy_prov_status=constants.PENDING_CREATE, + l7policy_op_status=constants.OFFLINE) + + def test_create_redirect_to_url(http://github.com/self): + api_l7policy = self.create_l7policy( + self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_URL, + redirect_url='http://www.example.com/').get(self.root_tag) + self.assertEqual(constants.L7POLICY_ACTION_REDIRECT_TO_URL, + api_l7policy['action']) + self.assertEqual(1, api_l7policy['position']) + self.assertIsNone(api_l7policy.get('redirect_pool_id')) + self.assertEqual('http://www.example.com/', + api_l7policy['redirect_url']) + self.assertTrue(api_l7policy['admin_state_up']) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + l7policy_id=api_l7policy.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + l7policy_prov_status=constants.PENDING_CREATE, + l7policy_op_status=constants.OFFLINE) + + def test_create_with_redirect_http_code(self): + action_key_values = { + constants.L7POLICY_ACTION_REDIRECT_PREFIX: { + 'redirect_prefix': 'https://example.com/', + 'redirect_http_code': 302}, + constants.L7POLICY_ACTION_REDIRECT_TO_URL: { + 'redirect_url': 'http://www.example.com/', + 'redirect_http_code': 301}} + count = 1 + # First, test with redirect actions + for action in [constants.L7POLICY_ACTION_REDIRECT_TO_URL, + constants.L7POLICY_ACTION_REDIRECT_PREFIX]: + api_l7policy = self.create_l7policy( + self.listener_id, action, + **action_key_values[action]).get(self.root_tag) + self.assertEqual(action, api_l7policy['action']) + self.assertEqual(count, api_l7policy['position']) + self.assertIsNone(api_l7policy.get('redirect_pool_id')) + if api_l7policy.get('redirect_url'): + self.assertEqual(action_key_values[action]['redirect_url'], + api_l7policy['redirect_url']) + elif api_l7policy.get('redirect_prefix'): + self.assertEqual(action_key_values[action]['redirect_prefix'], + api_l7policy['redirect_prefix']) + self.assertEqual(action_key_values[action]['redirect_http_code'], + api_l7policy['redirect_http_code']) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + l7policy_id=api_l7policy.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + l7policy_prov_status=constants.PENDING_CREATE, + l7policy_op_status=constants.OFFLINE) + self.set_lb_status(self.lb_id) + count += 1 + + # test with redirect_pool action + api_l7policy = self.create_l7policy( + self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + redirect_pool_id=self.pool_id, + redirect_http_code=308).get(self.root_tag) + self.assertEqual(constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + api_l7policy['action']) + self.assertEqual(self.pool_id, api_l7policy.get('redirect_pool_id')) + self.assertIsNone(api_l7policy.get('redirect_url')) + self.assertIsNone(api_l7policy.get('redirect_prefix')) + self.assertIsNone(api_l7policy.get('redirect_http_code')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + l7policy_id=api_l7policy.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + l7policy_prov_status=constants.PENDING_CREATE, + l7policy_op_status=constants.OFFLINE) + + def test_bad_create(self): + l7policy = {'listener_id': self.listener_id, + 'name': 'test1'} + self.post(self.L7POLICIES_PATH, self._build_body(l7policy), status=400) + + def test_bad_create_redirect_to_pool(self): + l7policy = { + 'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + 'listener_id': self.listener_id, + 'redirect_pool_id': uuidutils.generate_uuid()} + self.post(self.L7POLICIES_PATH, self._build_body(l7policy), status=404) + + def test_bad_create_redirect_to_url(http://github.com/self): + l7policy = {'listener_id': self.listener_id, + 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, + 'redirect_url': 'bad url'} + self.post(self.L7POLICIES_PATH, self._build_body(l7policy), status=400) + + def test_bad_create_with_redirect_http_code(self): + for test_code in [1, '', 'HTTPCODE']: + l7policy = {'listener_id': self.listener_id, + 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, + 'redirect_url': 'http://www.example.com/', + 'redirect_http_code': test_code} + self.post(self.L7POLICIES_PATH, self._build_body(l7policy), + status=400) + + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_create_with_bad_provider(self, mock_provider): + mock_provider.side_effect = exceptions.ProviderDriverError( + prov='bad_driver', user_msg='broken') + l7policy = {'listener_id': self.listener_id, + 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, + 'redirect_url': 'http://a.com/'} + response = self.post(self.L7POLICIES_PATH, self._build_body(l7policy), + status=500) + self.assertIn('Provider \'bad_driver\' reports error: broken', + response.json.get('faultstring')) + + def test_create_over_quota(self): + self.start_quota_mock(data_models.L7Policy) + l7policy = {'listener_id': self.listener_id, + 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, + 'redirect_url': 'http://a.com/'} + self.post(self.L7POLICIES_PATH, self._build_body(l7policy), status=403) + + def test_negative_create_prometheus_listener(self): + prometheus_listener = self.create_listener( + lib_consts.PROTOCOL_PROMETHEUS, 8123, lb_id=self.lb_id) + prometheus_listener_id = prometheus_listener.get('listener').get('id') + self.set_lb_status(self.lb_id) + l7policy = {'listener_id': prometheus_listener_id, 'name': 'test1'} + self.post(self.L7POLICIES_PATH, self._build_body(l7policy), status=400) + + def test_update(self): + api_l7policy = self.create_l7policy(self.listener_id, + constants.L7POLICY_ACTION_REJECT, + tags=['old_tag'] + ).get(self.root_tag) + self.set_lb_status(self.lb_id) + new_l7policy = { + 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, + 'redirect_url': 'http://www.example.com/', + 'tags': ['new_tag']} + response = self.put(self.L7POLICY_PATH.format( + l7policy_id=api_l7policy.get('id')), + self._build_body(new_l7policy)).json.get(self.root_tag) + self.assertEqual(constants.L7POLICY_ACTION_REDIRECT_TO_URL, + response.get('action')) + self.assertEqual(['new_tag'], response['tags']) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + l7policy_id=api_l7policy.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + l7policy_prov_status=constants.PENDING_UPDATE) + + def test_update_authorized(self): + api_l7policy = self.create_l7policy(self.listener_id, + constants.L7POLICY_ACTION_REJECT, + ).get(self.root_tag) + self.set_lb_status(self.lb_id) + new_l7policy = { + 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, + 'redirect_url': 'http://www.example.com/'} + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.put(self.L7POLICY_PATH.format( + l7policy_id=api_l7policy.get('id')), + self._build_body(new_l7policy)).json.get(self.root_tag) + + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(constants.L7POLICY_ACTION_REDIRECT_TO_URL, + response.get('action')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + l7policy_id=api_l7policy.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + l7policy_prov_status=constants.PENDING_UPDATE) + + def test_update_not_authorized(self): + api_l7policy = self.create_l7policy(self.listener_id, + constants.L7POLICY_ACTION_REJECT, + ).get(self.root_tag) + self.set_lb_status(self.lb_id) + new_l7policy = { + 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, + 'redirect_url': 'http://www.example.com/'} + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=self.project_id) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + response = self.put(self.L7POLICY_PATH.format( + l7policy_id=api_l7policy.get('id')), + self._build_body(new_l7policy), status=403) + + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + l7policy_id=api_l7policy.get('id'), + lb_prov_status=constants.ACTIVE, + listener_prov_status=constants.ACTIVE, + l7policy_prov_status=constants.ACTIVE) + + def test_bad_update(self): + api_l7policy = self.create_l7policy(self.listener_id, + constants.L7POLICY_ACTION_REJECT, + ).get(self.root_tag) + new_l7policy = {'listener_id': self.listener_id, + 'action': 'bad action'} + self.put(self.L7POLICY_PATH.format( + l7policy_id=api_l7policy.get('id')), + self._build_body(new_l7policy), status=400) + + def test_bad_update_redirect_to_pool(self): + api_l7policy = self.create_l7policy(self.listener_id, + constants.L7POLICY_ACTION_REJECT, + ).get(self.root_tag) + new_l7policy = { + 'listener_id': self.listener_id, + 'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + 'redirect_pool_id': uuidutils.generate_uuid()} + self.put(self.L7POLICY_PATH.format( + l7policy_id=api_l7policy.get('id')), + self._build_body(new_l7policy), status=400) + + def test_bad_update_redirect_to_url(http://github.com/self): + api_l7policy = self.create_l7policy(self.listener_id, + constants.L7POLICY_ACTION_REJECT, + ).get(self.root_tag) + new_l7policy = { + 'listener_id': self.listener_id, + 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, + 'redirect_url': 'bad url'} + self.put(self.L7POLICY_PATH.format( + l7policy_id=api_l7policy.get('id')), + self._build_body(new_l7policy), status=400) + + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_update_with_bad_provider(self, mock_provider): + api_l7policy = self.create_l7policy(self.listener_id, + constants.L7POLICY_ACTION_REJECT, + ).get(self.root_tag) + self.set_lb_status(self.lb_id) + new_l7policy = { + 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, + 'redirect_url': 'http://www.example.com/'} + mock_provider.side_effect = exceptions.ProviderDriverError( + prov='bad_driver', user_msg='broken') + response = self.put(self.L7POLICY_PATH.format( + l7policy_id=api_l7policy.get('id')), + self._build_body(new_l7policy), status=500) + self.assertIn('Provider \'bad_driver\' reports error: broken', + response.json.get('faultstring')) + + def test_update_redirect_to_pool_bad_pool_id(self): + api_l7policy = self.create_l7policy(self.listener_id, + constants.L7POLICY_ACTION_REJECT, + ).get(self.root_tag) + self.set_lb_status(self.lb_id) + new_l7policy = {'redirect_pool_id': uuidutils.generate_uuid()} + self.put(self.L7POLICY_PATH.format(l7policy_id=api_l7policy.get('id')), + self._build_body(new_l7policy), status=404) + + def test_update_redirect_to_pool_minimal(self): + api_l7policy = self.create_l7policy(self.listener_id, + constants.L7POLICY_ACTION_REJECT + ).get(self.root_tag) + self.set_lb_status(self.lb_id) + new_l7policy = {'redirect_pool_id': self.pool_id} + self.put(self.L7POLICY_PATH.format(l7policy_id=api_l7policy.get('id')), + self._build_body(new_l7policy)) + + def test_update_redirect_to_url_bad_url(http://github.com/self): + api_l7policy = self.create_l7policy(self.listener_id, + constants.L7POLICY_ACTION_REJECT, + ).get(self.root_tag) + self.set_lb_status(self.lb_id) + new_l7policy = {'listener_id': self.listener_id, + 'redirect_url': 'bad-url'} + self.put(self.L7POLICY_PATH.format(l7policy_id=api_l7policy.get('id')), + self._build_body(new_l7policy), status=400) + + def test_update_redirect_to_url_minimal(self): + api_l7policy = self.create_l7policy(self.listener_id, + constants.L7POLICY_ACTION_REJECT, + ).get(self.root_tag) + self.set_lb_status(self.lb_id) + new_l7policy = {'redirect_url': 'http://www.example.com/'} + self.put(self.L7POLICY_PATH.format(l7policy_id=api_l7policy.get('id')), + self._build_body(new_l7policy)) + + def test_update_with_redirect_http_code(self): + # test from non exist + api_l7policy = self.create_l7policy(self.listener_id, + constants.L7POLICY_ACTION_REJECT, + ).get(self.root_tag) + self.set_lb_status(self.lb_id) + new_l7policy = { + 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, + 'redirect_url': 'http://www.example.com/', + 'redirect_http_code': 308} + response = self.put(self.L7POLICY_PATH.format( + l7policy_id=api_l7policy.get('id')), + self._build_body(new_l7policy)).json.get(self.root_tag) + self.assertEqual(constants.L7POLICY_ACTION_REDIRECT_TO_URL, + response.get('action')) + self.assertEqual(308, response.get('redirect_http_code')) + self.set_lb_status(self.lb_id) + + # test from exist to new + api_l7policy = self.create_l7policy( + self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_URL, + redirect_url='http://www.example.com/', + redirect_http_code=302).get(self.root_tag) + self.set_lb_status(self.lb_id) + new_l7policy = { + 'redirect_http_code': 308} + response = self.put(self.L7POLICY_PATH.format( + l7policy_id=api_l7policy.get('id')), + self._build_body(new_l7policy)).json.get(self.root_tag) + self.assertEqual(constants.L7POLICY_ACTION_REDIRECT_TO_URL, + response.get('action')) + self.assertEqual(308, response.get('redirect_http_code')) + self.set_lb_status(self.lb_id) + + # test from exist to null + new_l7policy = { + 'redirect_http_code': None} + response = self.put(self.L7POLICY_PATH.format( + l7policy_id=api_l7policy.get('id')), + self._build_body(new_l7policy)).json.get(self.root_tag) + self.assertIsNone(response.get('redirect_http_code')) + + def test_bad_update_with_redirect_http_code(self): + api_l7policy = self.create_l7policy(self.listener_id, + constants.L7POLICY_ACTION_REJECT, + ).get(self.root_tag) + self.set_lb_status(self.lb_id) + new_l7policy = { + 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, + 'redirect_url': 'http://www.example.com/', + 'redirect_http_code': ''} + self.put(self.L7POLICY_PATH.format( + l7policy_id=api_l7policy.get('id')), + self._build_body(new_l7policy), status=400).json.get(self.root_tag) + + def test_delete(self): + api_l7policy = self.create_l7policy( + self.listener_id, + constants.L7POLICY_ACTION_REJECT).get(self.root_tag) + self.set_lb_status(self.lb_id) + # Set status to ACTIVE/ONLINE because set_lb_status did it in the db + api_l7policy['provisioning_status'] = constants.ACTIVE + api_l7policy['operating_status'] = constants.ONLINE + api_l7policy.pop('updated_at') + + response = self.get(self.L7POLICY_PATH.format( + l7policy_id=api_l7policy.get('id'))).json.get(self.root_tag) + response.pop('updated_at') + self.assertEqual(api_l7policy, response) + + self.delete(self.L7POLICY_PATH.format( + l7policy_id=api_l7policy.get('id'))) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + l7policy_id=api_l7policy.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + l7policy_prov_status=constants.PENDING_DELETE) + + def test_delete_authorized(self): + api_l7policy = self.create_l7policy( + self.listener_id, + constants.L7POLICY_ACTION_REJECT).get(self.root_tag) + self.set_lb_status(self.lb_id) + # Set status to ACTIVE/ONLINE because set_lb_status did it in the db + api_l7policy['provisioning_status'] = constants.ACTIVE + api_l7policy['operating_status'] = constants.ONLINE + api_l7policy.pop('updated_at') + + response = self.get(self.L7POLICY_PATH.format( + l7policy_id=api_l7policy.get('id'))).json.get(self.root_tag) + response.pop('updated_at') + self.assertEqual(api_l7policy, response) + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + + self.delete(self.L7POLICY_PATH.format( + l7policy_id=api_l7policy.get('id'))) + + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + l7policy_id=api_l7policy.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + l7policy_prov_status=constants.PENDING_DELETE) + + def test_delete_not_authorized(self): + api_l7policy = self.create_l7policy( + self.listener_id, + constants.L7POLICY_ACTION_REJECT).get(self.root_tag) + self.set_lb_status(self.lb_id) + # Set status to ACTIVE/ONLINE because set_lb_status did it in the db + api_l7policy['provisioning_status'] = constants.ACTIVE + api_l7policy['operating_status'] = constants.ONLINE + api_l7policy.pop('updated_at') + + response = self.get(self.L7POLICY_PATH.format( + l7policy_id=api_l7policy.get('id'))).json.get(self.root_tag) + response.pop('updated_at') + self.assertEqual(api_l7policy, response) + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=uuidutils.generate_uuid()) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + self.delete(self.L7POLICY_PATH.format( + l7policy_id=api_l7policy.get('id')), status=403) + + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + l7policy_id=api_l7policy.get('id'), + lb_prov_status=constants.ACTIVE, + listener_prov_status=constants.ACTIVE, + l7policy_prov_status=constants.ACTIVE) + + def test_bad_delete(self): + self.delete(self.L7POLICY_PATH.format( + l7policy_id=uuidutils.generate_uuid()), status=404) + + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_delete_with_bad_provider(self, mock_provider): + api_l7policy = self.create_l7policy(self.listener_id, + constants.L7POLICY_ACTION_REJECT, + ).get(self.root_tag) + self.set_lb_status(self.lb_id) + # Set status to ACTIVE/ONLINE because set_lb_status did it in the db + api_l7policy['provisioning_status'] = constants.ACTIVE + api_l7policy['operating_status'] = constants.ONLINE + response = self.get(self.L7POLICY_PATH.format( + l7policy_id=api_l7policy.get('id'))).json.get(self.root_tag) + + self.assertIsNone(api_l7policy.pop('updated_at')) + self.assertIsNotNone(response.pop('updated_at')) + self.assertEqual(api_l7policy, response) + mock_provider.side_effect = exceptions.ProviderDriverError( + prov='bad_driver', user_msg='broken') + self.delete(self.L7POLICY_PATH.format( + l7policy_id=api_l7policy.get('id')), status=500) + + def test_create_when_lb_pending_update(self): + self.create_l7policy(self.listener_id, + constants.L7POLICY_ACTION_REJECT, + ) + self.set_lb_status(self.lb_id) + self.put(self.LB_PATH.format(lb_id=self.lb_id), + body={'loadbalancer': {'name': 'test_name_change'}}) + new_l7policy = { + 'listener_id': self.listener_id, + 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, + 'redirect_url': 'http://www.example.com/'} + self.post(self.L7POLICIES_PATH, body=self._build_body(new_l7policy), + status=409) + + def test_update_when_lb_pending_update(self): + l7policy = self.create_l7policy(self.listener_id, + constants.L7POLICY_ACTION_REJECT, + ).get(self.root_tag) + self.set_lb_status(self.lb_id) + self.put(self.LB_PATH.format(lb_id=self.lb_id), + body={'loadbalancer': {'name': 'test_name_change'}}) + new_l7policy = {'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, + 'redirect_url': 'http://www.example.com/'} + self.put(self.L7POLICY_PATH.format( + l7policy_id=l7policy.get('id')), + body=self._build_body(new_l7policy), status=409) + + def test_delete_when_lb_pending_update(self): + l7policy = self.create_l7policy(self.listener_id, + constants.L7POLICY_ACTION_REJECT, + ).get(self.root_tag) + self.set_lb_status(self.lb_id) + self.put(self.LB_PATH.format(lb_id=self.lb_id), + body={'loadbalancer': {'name': 'test_name_change'}}) + self.delete(self.L7POLICY_PATH.format( + l7policy_id=l7policy.get('id')), + status=409) + + def test_create_when_lb_pending_delete(self): + self.delete(self.LB_PATH.format(lb_id=self.lb_id), + params={'cascade': "true"}) + new_l7policy = { + 'listener_id': self.listener_id, + 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, + 'redirect_url': 'http://www.example.com/'} + self.post(self.L7POLICIES_PATH, body=self._build_body(new_l7policy), + status=409) + + def test_update_when_lb_pending_delete(self): + l7policy = self.create_l7policy(self.listener_id, + constants.L7POLICY_ACTION_REJECT, + ).get(self.root_tag) + self.set_lb_status(self.lb_id) + self.delete(self.LB_PATH.format(lb_id=self.lb_id), + params={'cascade': "true"}) + new_l7policy = { + 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, + 'redirect_url': 'http://www.example.com/'} + self.put(self.L7POLICY_PATH.format( + l7policy_id=l7policy.get('id')), + body=self._build_body(new_l7policy), status=409) + + def test_delete_when_lb_pending_delete(self): + l7policy = self.create_l7policy(self.listener_id, + constants.L7POLICY_ACTION_REJECT, + ).get(self.root_tag) + self.set_lb_status(self.lb_id) + self.delete(self.LB_PATH.format(lb_id=self.lb_id), + params={'cascade': "true"}) + self.delete(self.L7POLICY_PATH.format( + l7policy_id=l7policy.get('id')), + status=409) + + def test_update_already_deleted(self): + l7policy = self.create_l7policy(self.listener_id, + constants.L7POLICY_ACTION_REJECT, + ).get(self.root_tag) + # This updates the child objects + self.set_lb_status(self.lb_id, status=constants.DELETED) + new_l7policy = { + 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, + 'redirect_url': 'http://www.example.com/'} + self.put(self.L7POLICY_PATH.format(l7policy_id=l7policy.get('id')), + body=self._build_body(new_l7policy), status=404) + + def test_delete_already_deleted(self): + l7policy = self.create_l7policy(self.listener_id, + constants.L7POLICY_ACTION_REJECT, + ).get(self.root_tag) + # This updates the child objects + self.set_lb_status(self.lb_id, status=constants.DELETED) + self.delete(self.L7POLICY_PATH.format( + l7policy_id=l7policy.get('id')), + status=404) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_listener_pool_protocol_map_post(self, mock_cert_data): + cert = data_models.TLSContainer(certificate='cert') + mock_cert_data.return_value = {'sni_certs': [cert]} + valid_map = constants.VALID_LISTENER_POOL_PROTOCOL_MAP + port = 1 + l7policy = {'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL} + for listener_proto in valid_map: + for pool_proto in valid_map[listener_proto]: + port = port + 1 + opts = {} + if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: + opts['sni_container_refs'] = [uuidutils.generate_uuid()] + listener = self.create_listener( + listener_proto, port, self.lb_id, **opts).get('listener') + self.set_object_status(self.lb_repo, self.lb_id) + pool = self.create_pool( + self.lb_id, pool_proto, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + + l7policy['listener_id'] = listener.get('id') + l7policy['redirect_pool_id'] = pool.get('id') + self.set_object_status(self.lb_repo, self.lb_id) + self.post(self.L7POLICIES_PATH, + self._build_body(l7policy), status=201) + self.set_object_status(self.lb_repo, self.lb_id) + + invalid_map = c_const.INVALID_LISTENER_POOL_PROTOCOL_MAP + port = 100 + for listener_proto in invalid_map: + opts = {} + if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: + opts['sni_container_refs'] = [uuidutils.generate_uuid()] + listener = self.create_listener( + listener_proto, port, self.lb_id, **opts).get('listener') + self.set_object_status(self.lb_repo, self.lb_id) + port = port + 1 + for pool_proto in invalid_map[listener_proto]: + if pool_proto == constants.PROTOCOL_TERMINATED_HTTPS: + pool = self.create_pool( + self.lb_id, pool_proto, + constants.LB_ALGORITHM_ROUND_ROBIN, status=400) + self.assertIn("Invalid input", pool['faultstring']) + else: + pool = self.create_pool( + self.lb_id, pool_proto, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_object_status(self.lb_repo, self.lb_id) + + l7policy['listener_id'] = listener.get('id') + l7policy['redirect_pool_id'] = pool.get('id') + expect_error_msg = ( + "Validation failure: The pool protocol '%s' is " + "invalid while the listener protocol is '%s'.") % ( + pool_proto, listener_proto) + res = self.post(self.L7POLICIES_PATH, + self._build_body(l7policy), status=400) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status(lb_id=self.lb_id) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_listener_pool_protocol_map_put(self, mock_cert_data): + cert = data_models.TLSContainer(certificate='cert') + mock_cert_data.return_value = {'sni_certs': [cert]} + valid_map = constants.VALID_LISTENER_POOL_PROTOCOL_MAP + port = 1 + new_l7policy = {'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL} + for listener_proto in valid_map: + for pool_proto in valid_map[listener_proto]: + port = port + 1 + opts = {} + if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: + opts['sni_container_refs'] = [uuidutils.generate_uuid()] + listener = self.create_listener( + listener_proto, port, self.lb_id, **opts).get('listener') + self.set_object_status(self.lb_repo, self.lb_id) + pool = self.create_pool( + self.lb_id, pool_proto, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_object_status(self.lb_repo, self.lb_id) + l7policy = self.create_l7policy( + listener.get('id'), + constants.L7POLICY_ACTION_REJECT).get(self.root_tag) + self.set_object_status(self.lb_repo, self.lb_id) + new_l7policy['redirect_pool_id'] = pool.get('id') + + self.put( + self.L7POLICY_PATH.format(l7policy_id=l7policy.get('id')), + self._build_body(new_l7policy), status=200) + self.set_object_status(self.lb_repo, self.lb_id) + + invalid_map = c_const.INVALID_LISTENER_POOL_PROTOCOL_MAP + port = 100 + for listener_proto in invalid_map: + opts = {} + if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: + opts['sni_container_refs'] = [uuidutils.generate_uuid()] + listener = self.create_listener( + listener_proto, port, self.lb_id, **opts).get('listener') + self.set_object_status(self.lb_repo, self.lb_id) + port = port + 1 + for pool_proto in invalid_map[listener_proto]: + if pool_proto == constants.PROTOCOL_TERMINATED_HTTPS: + pool = self.create_pool( + self.lb_id, pool_proto, + constants.LB_ALGORITHM_ROUND_ROBIN, status=400) + self.assertIn("Invalid input", pool['faultstring']) + else: + pool = self.create_pool( + self.lb_id, pool_proto, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_object_status(self.lb_repo, self.lb_id) + l7policy = self.create_l7policy( + listener.get('id'), + constants.L7POLICY_ACTION_REJECT).get(self.root_tag) + self.set_object_status(self.lb_repo, self.lb_id) + new_l7policy['redirect_pool_id'] = pool.get('id') + expect_error_msg = ( + "Validation failure: The pool protocol '%s' is " + "invalid while the listener protocol is '%s'.") % ( + pool_proto, listener_proto) + res = self.put(self.L7POLICY_PATH.format( + l7policy_id=l7policy.get('id')), + self._build_body(new_l7policy), status=400) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status(lb_id=self.lb_id) diff --git a/octavia/tests/functional/api/v2/test_l7rule.py b/octavia/tests/functional/api/v2/test_l7rule.py new file mode 100644 index 0000000000..0c1fc81679 --- /dev/null +++ b/octavia/tests/functional/api/v2/test_l7rule.py @@ -0,0 +1,1319 @@ +# Copyright 2016 Blue Box, an IBM Company +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils + +from octavia.common import constants +import octavia.common.context +from octavia.common import data_models +from octavia.common import exceptions +from octavia.db import repositories +from octavia.tests.functional.api.v2 import base + + +class TestL7Rule(base.BaseAPITest): + + root_tag = 'rule' + root_tag_list = 'rules' + root_tag_links = 'rules_links' + + def setUp(self): + super().setUp() + self.lb = self.create_load_balancer(uuidutils.generate_uuid()) + self.lb_id = self.lb.get('loadbalancer').get('id') + self.project_id = self.lb.get('loadbalancer').get('project_id') + self.set_lb_status(self.lb_id) + self.listener = self.create_listener( + constants.PROTOCOL_HTTP, 80, lb_id=self.lb_id) + self.listener_id = self.listener.get('listener').get('id') + self.set_lb_status(self.lb_id) + self.l7policy = self.create_l7policy( + self.listener_id, constants.L7POLICY_ACTION_REJECT) + self.l7policy_id = self.l7policy.get('l7policy').get('id') + self.set_lb_status(self.lb_id) + self.l7rules_path = self.L7RULES_PATH.format( + l7policy_id=self.l7policy_id) + self.l7rule_path = self.l7rules_path + '/{l7rule_id}' + self.l7policy_repo = repositories.L7PolicyRepository() + + def test_get(self): + l7rule = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api', tags=['test_tag']).get(self.root_tag) + response = self.get(self.l7rule_path.format( + l7rule_id=l7rule.get('id'))).json.get(self.root_tag) + self.assertEqual(l7rule, response) + + def test_get_authorized(self): + l7rule = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api').get(self.root_tag) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.get(self.l7rule_path.format( + l7rule_id=l7rule.get('id'))).json.get(self.root_tag) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(l7rule, response) + + def test_get_not_authorized(self): + l7rule = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api').get(self.root_tag) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=self.project_id) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + response = self.get(self.l7rule_path.format( + l7rule_id=l7rule.get('id')), status=403).json + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, response) + + def test_get_deleted_gives_404(self): + api_l7rule = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api').get(self.root_tag) + + self.set_object_status(self.l7rule_repo, api_l7rule.get('id'), + provisioning_status=constants.DELETED) + self.get(self.l7rule_path.format(l7rule_id=api_l7rule.get('id')), + status=404) + + def test_get_bad_parent_policy(self): + bad_path = (self.L7RULES_PATH.format( + lb_id=self.lb_id, listener_id=self.listener_id, + l7policy_id=uuidutils.generate_uuid()) + '/' + + uuidutils.generate_uuid()) + self.get(bad_path, status=404) + + def test_bad_get(self): + self.get(self.l7rule_path.format( + l7rule_id=uuidutils.generate_uuid()), status=404) + + def test_get_all(self): + api_l7r_a = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api', tags=['test_tag1']).get(self.root_tag) + self.set_lb_status(self.lb_id) + api_l7r_b = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_COOKIE, + constants.L7RULE_COMPARE_TYPE_CONTAINS, 'some-value', + key='some-cookie', tags=['test_tag2']).get(self.root_tag) + self.set_lb_status(self.lb_id) + rules = self.get(self.l7rules_path).json.get(self.root_tag_list) + self.assertIsInstance(rules, list) + self.assertEqual(2, len(rules)) + rule_id_types = [(r.get('id'), r.get('type'), + r['tags']) for r in rules] + self.assertIn((api_l7r_a.get('id'), api_l7r_a.get('type'), + api_l7r_a['tags']), + rule_id_types) + self.assertIn((api_l7r_b.get('id'), api_l7r_b.get('type'), + api_l7r_b['tags']), + rule_id_types) + + def test_get_all_authorized(self): + api_l7r_a = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api').get(self.root_tag) + self.set_lb_status(self.lb_id) + api_l7r_b = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_COOKIE, + constants.L7RULE_COMPARE_TYPE_CONTAINS, 'some-value', + key='some-cookie').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + rules = self.get( + self.l7rules_path).json.get(self.root_tag_list) + + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertIsInstance(rules, list) + self.assertEqual(2, len(rules)) + rule_id_types = [(r.get('id'), r.get('type')) for r in rules] + self.assertIn((api_l7r_a.get('id'), api_l7r_a.get('type')), + rule_id_types) + self.assertIn((api_l7r_b.get('id'), api_l7r_b.get('type')), + rule_id_types) + + def test_get_all_unscoped_token(self): + self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_COOKIE, + constants.L7RULE_COMPARE_TYPE_CONTAINS, 'some-value', + key='some-cookie').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': None} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + result = self.get(self.l7rules_path, status=403).json + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, result) + + def test_get_all_not_authorized(self): + self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_COOKIE, + constants.L7RULE_COMPARE_TYPE_CONTAINS, 'some-value', + key='some-cookie').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=self.project_id) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + rules = self.get(self.l7rules_path, status=403) + + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, rules.json) + + def test_get_all_sorted(self): + self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_COOKIE, + constants.L7RULE_COMPARE_TYPE_CONTAINS, 'some-value', + key='some-cookie').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_HOST_NAME, + constants.L7RULE_COMPARE_TYPE_EQUAL_TO, + 'www.example.com').get(self.root_tag) + self.set_lb_status(self.lb_id) + + response = self.get(self.l7rules_path, + params={'sort': 'type:desc'}) + rules_desc = response.json.get(self.root_tag_list) + response = self.get(self.l7rules_path, + params={'sort': 'type:asc'}) + rules_asc = response.json.get(self.root_tag_list) + + self.assertEqual(3, len(rules_desc)) + self.assertEqual(3, len(rules_asc)) + + rule_id_types_desc = [(rule.get('id'), rule.get('type')) + for rule in rules_desc] + rule_id_types_asc = [(rule.get('id'), rule.get('type')) + for rule in rules_asc] + self.assertEqual(rule_id_types_asc, + list(reversed(rule_id_types_desc))) + + def test_get_all_limited(self): + self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_COOKIE, + constants.L7RULE_COMPARE_TYPE_CONTAINS, 'some-value', + key='some-cookie').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_HOST_NAME, + constants.L7RULE_COMPARE_TYPE_EQUAL_TO, + 'www.example.com').get(self.root_tag) + self.set_lb_status(self.lb_id) + + # First two -- should have 'next' link + first_two = self.get(self.l7rules_path, params={'limit': 2}).json + objs = first_two[self.root_tag_list] + links = first_two[self.root_tag_links] + self.assertEqual(2, len(objs)) + self.assertEqual(1, len(links)) + self.assertEqual('next', links[0]['rel']) + + # Third + off the end -- should have previous link + third = self.get(self.l7rules_path, params={ + 'limit': 2, + 'marker': first_two[self.root_tag_list][1]['id']}).json + objs = third[self.root_tag_list] + links = third[self.root_tag_links] + self.assertEqual(1, len(objs)) + self.assertEqual(1, len(links)) + self.assertEqual('previous', links[0]['rel']) + + # Middle -- should have both links + middle = self.get(self.l7rules_path, params={ + 'limit': 1, + 'marker': first_two[self.root_tag_list][0]['id']}).json + objs = middle[self.root_tag_list] + links = middle[self.root_tag_links] + self.assertEqual(1, len(objs)) + self.assertEqual(2, len(links)) + self.assertCountEqual(['previous', 'next'], + [link['rel'] for link in links]) + + def test_get_all_fields_filter(self): + self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_COOKIE, + constants.L7RULE_COMPARE_TYPE_CONTAINS, 'some-value', + key='some-cookie').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_HOST_NAME, + constants.L7RULE_COMPARE_TYPE_EQUAL_TO, + 'www.example.com').get(self.root_tag) + self.set_lb_status(self.lb_id) + + l7rus = self.get(self.l7rules_path, params={ + 'fields': ['id', 'compare_type']}).json + for l7ru in l7rus['rules']: + self.assertIn('id', l7ru) + self.assertIn('compare_type', l7ru) + self.assertNotIn('project_id', l7ru) + + def test_get_one_fields_filter(self): + l7r1 = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api').get(self.root_tag) + self.set_lb_status(self.lb_id) + + l7ru = self.get( + self.l7rule_path.format(l7rule_id=l7r1.get('id')), + params={'fields': ['id', 'compare_type']}).json.get(self.root_tag) + self.assertIn('id', l7ru) + self.assertIn('compare_type', l7ru) + self.assertNotIn('project_id', l7ru) + + def test_get_all_filter(self): + ru1 = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_COOKIE, + constants.L7RULE_COMPARE_TYPE_CONTAINS, 'some-value', + key='some-cookie').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_HOST_NAME, + constants.L7RULE_COMPARE_TYPE_EQUAL_TO, + 'www.example.com').get(self.root_tag) + self.set_lb_status(self.lb_id) + + l7rus = self.get(self.l7rules_path, params={ + 'id': ru1['id']}).json + + self.assertEqual(1, len(l7rus['rules'])) + self.assertEqual(ru1['id'], + l7rus['rules'][0]['id']) + + def test_get_all_tags_filter(self): + rule1 = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api', tags=['test_tag1', 'test_tag2']).get(self.root_tag) + self.set_lb_status(self.lb_id) + rule2 = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_COOKIE, + constants.L7RULE_COMPARE_TYPE_CONTAINS, 'some-value', + key='some-cookie', + tags=['test_tag2', 'test_tag3']).get(self.root_tag) + self.set_lb_status(self.lb_id) + rule3 = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_HOST_NAME, + constants.L7RULE_COMPARE_TYPE_EQUAL_TO, + 'www.example.com', + tags=['test_tag4', 'test_tag5']).get(self.root_tag) + self.set_lb_status(self.lb_id) + + rules = self.get( + self.l7rules_path, + params={'tags': 'test_tag2'} + ).json.get(self.root_tag_list) + self.assertIsInstance(rules, list) + self.assertEqual(2, len(rules)) + self.assertEqual( + [rule1.get('id'), rule2.get('id')], + [rule.get('id') for rule in rules] + ) + + rules = self.get( + self.l7rules_path, + params={'tags': ['test_tag2', 'test_tag3']} + ).json.get(self.root_tag_list) + self.assertIsInstance(rules, list) + self.assertEqual(1, len(rules)) + self.assertEqual( + [rule2.get('id')], + [rule.get('id') for rule in rules] + ) + + rules = self.get( + self.l7rules_path, + params={'tags-any': 'test_tag2'} + ).json.get(self.root_tag_list) + self.assertIsInstance(rules, list) + self.assertEqual(2, len(rules)) + self.assertEqual( + [rule1.get('id'), rule2.get('id')], + [rule.get('id') for rule in rules] + ) + + rules = self.get( + self.l7rules_path, + params={'not-tags': 'test_tag2'} + ).json.get(self.root_tag_list) + self.assertIsInstance(rules, list) + self.assertEqual(1, len(rules)) + self.assertEqual( + [rule3.get('id')], + [rule.get('id') for rule in rules] + ) + + rules = self.get( + self.l7rules_path, + params={'not-tags-any': ['test_tag2', 'test_tag4']} + ).json.get(self.root_tag_list) + self.assertIsInstance(rules, list) + self.assertEqual(0, len(rules)) + + rules = self.get( + self.l7rules_path, + params={'tags': 'test_tag2', + 'tags-any': ['test_tag1', 'test_tag3']} + ).json.get(self.root_tag_list) + self.assertIsInstance(rules, list) + self.assertEqual(2, len(rules)) + self.assertEqual( + [rule1.get('id'), rule2.get('id')], + [rule.get('id') for rule in rules] + ) + + rules = self.get( + self.l7rules_path, + params={'tags': 'test_tag2', 'not-tags': 'test_tag2'} + ).json.get(self.root_tag_list) + self.assertIsInstance(rules, list) + self.assertEqual(0, len(rules)) + + def test_empty_get_all(self): + response = self.get(self.l7rules_path).json.get(self.root_tag_list) + self.assertIsInstance(response, list) + self.assertEqual(0, len(response)) + + def test_get_all_hides_deleted(self): + api_l7rule = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api').get(self.root_tag) + + response = self.get(self.l7rules_path) + objects = response.json.get(self.root_tag_list) + self.assertEqual(len(objects), 1) + self.set_object_status(self.l7rule_repo, api_l7rule.get('id'), + provisioning_status=constants.DELETED) + response = self.get(self.l7rules_path) + objects = response.json.get(self.root_tag_list) + self.assertEqual(len(objects), 0) + + def test_create_host_name_rule(self): + api_l7rule = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_HOST_NAME, + constants.L7RULE_COMPARE_TYPE_EQUAL_TO, + 'www.example.com').get(self.root_tag) + self.assertEqual(constants.L7RULE_TYPE_HOST_NAME, + api_l7rule.get('type')) + self.assertEqual(constants.L7RULE_COMPARE_TYPE_EQUAL_TO, + api_l7rule.get('compare_type')) + self.assertEqual('www.example.com', api_l7rule.get('value')) + self.assertIsNone(api_l7rule.get('key')) + self.assertFalse(api_l7rule.get('invert')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + l7policy_id=self.l7policy_id, l7rule_id=api_l7rule.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + l7policy_prov_status=constants.PENDING_UPDATE, + l7rule_prov_status=constants.PENDING_CREATE, + l7rule_op_status=constants.OFFLINE) + + def test_create_rule_authorized(self): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + api_l7rule = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_HOST_NAME, + constants.L7RULE_COMPARE_TYPE_EQUAL_TO, + 'www.example.com').get(self.root_tag) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(constants.L7RULE_TYPE_HOST_NAME, + api_l7rule.get('type')) + self.assertEqual(constants.L7RULE_COMPARE_TYPE_EQUAL_TO, + api_l7rule.get('compare_type')) + self.assertEqual('www.example.com', api_l7rule.get('value')) + self.assertIsNone(api_l7rule.get('key')) + self.assertFalse(api_l7rule.get('invert')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + l7policy_id=self.l7policy_id, l7rule_id=api_l7rule.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + l7policy_prov_status=constants.PENDING_UPDATE, + l7rule_prov_status=constants.PENDING_CREATE, + l7rule_op_status=constants.OFFLINE) + + def test_create_rule_not_authorized(self): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + + test_context = octavia.common.context.RequestContext( + project_id=self.project_id) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + api_l7rule = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_HOST_NAME, + constants.L7RULE_COMPARE_TYPE_EQUAL_TO, + 'www.example.com', status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, api_l7rule) + + def test_create_l7policy_in_error(self): + l7policy = self.create_l7policy( + self.listener_id, constants.L7POLICY_ACTION_REJECT) + l7policy_id = l7policy.get('l7policy').get('id') + self.set_lb_status(self.lb_id) + self.set_object_status(self.l7policy_repo, l7policy_id, + provisioning_status=constants.ERROR) + api_l7rule = self.create_l7rule( + l7policy_id, constants.L7RULE_TYPE_HOST_NAME, + constants.L7RULE_COMPARE_TYPE_EQUAL_TO, + 'www.example.com', status=409) + ref_msg = f'L7Policy {l7policy_id} is immutable and cannot be updated.' + self.assertEqual(ref_msg, api_l7rule.get('faultstring')) + + def test_create_path_rule(self): + api_l7rule = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, '/api', + invert=True).get(self.root_tag) + self.assertEqual(constants.L7RULE_TYPE_PATH, api_l7rule.get('type')) + self.assertEqual(constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + api_l7rule.get('compare_type')) + self.assertEqual('/api', api_l7rule.get('value')) + self.assertIsNone(api_l7rule.get('key')) + self.assertTrue(api_l7rule.get('invert')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + l7policy_id=self.l7policy_id, l7rule_id=api_l7rule.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + l7policy_prov_status=constants.PENDING_UPDATE, + l7rule_prov_status=constants.PENDING_CREATE, + l7rule_op_status=constants.OFFLINE) + + def test_create_file_type_rule(self): + api_l7rule = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_FILE_TYPE, + constants.L7RULE_COMPARE_TYPE_REGEX, 'jpg|png').get(self.root_tag) + self.assertEqual(constants.L7RULE_TYPE_FILE_TYPE, + api_l7rule.get('type')) + self.assertEqual(constants.L7RULE_COMPARE_TYPE_REGEX, + api_l7rule.get('compare_type')) + self.assertEqual('jpg|png', api_l7rule.get('value')) + self.assertIsNone(api_l7rule.get('key')) + self.assertFalse(api_l7rule.get('invert')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + l7policy_id=self.l7policy_id, l7rule_id=api_l7rule.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + l7policy_prov_status=constants.PENDING_UPDATE, + l7rule_prov_status=constants.PENDING_CREATE, + l7rule_op_status=constants.OFFLINE) + + def test_create_header_rule(self): + api_l7rule = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_HEADER, + constants.L7RULE_COMPARE_TYPE_ENDS_WITH, '"some string"', + key='Some-header').get(self.root_tag) + self.assertEqual(constants.L7RULE_TYPE_HEADER, api_l7rule.get('type')) + self.assertEqual(constants.L7RULE_COMPARE_TYPE_ENDS_WITH, + api_l7rule.get('compare_type')) + self.assertEqual('"some string"', api_l7rule.get('value')) + self.assertEqual('Some-header', api_l7rule.get('key')) + self.assertFalse(api_l7rule.get('invert')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + l7policy_id=self.l7policy_id, l7rule_id=api_l7rule.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + l7policy_prov_status=constants.PENDING_UPDATE, + l7rule_prov_status=constants.PENDING_CREATE, + l7rule_op_status=constants.OFFLINE) + + def test_create_cookie_rule(self): + api_l7rule = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_COOKIE, + constants.L7RULE_COMPARE_TYPE_CONTAINS, 'some-value', + key='some-cookie').get(self.root_tag) + self.assertEqual(constants.L7RULE_TYPE_COOKIE, api_l7rule.get('type')) + self.assertEqual(constants.L7RULE_COMPARE_TYPE_CONTAINS, + api_l7rule.get('compare_type')) + self.assertEqual('some-value', api_l7rule.get('value')) + self.assertEqual('some-cookie', api_l7rule.get('key')) + self.assertFalse(api_l7rule.get('invert')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + l7policy_id=self.l7policy_id, l7rule_id=api_l7rule.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + l7policy_prov_status=constants.PENDING_UPDATE, + l7rule_prov_status=constants.PENDING_CREATE, + l7rule_op_status=constants.OFFLINE) + + @mock.patch('octavia.common.constants.MAX_L7RULES_PER_L7POLICY', new=2) + def test_create_too_many_rules(self): + for i in range(0, constants.MAX_L7RULES_PER_L7POLICY): + self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api').get(self.root_tag) + self.set_lb_status(self.lb_id) + body = {'type': constants.L7RULE_TYPE_PATH, + 'compare_type': constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + 'value': '/api'} + self.post(self.l7rules_path, self._build_body(body), status=409) + + def test_bad_create(self): + l7rule = {'name': 'test1'} + self.post(self.l7rules_path, self._build_body(l7rule), status=400) + + def test_bad_create_host_name_rule(self): + l7rule = {'type': constants.L7RULE_TYPE_HOST_NAME, + 'compare_type': constants.L7RULE_COMPARE_TYPE_STARTS_WITH} + self.post(self.l7rules_path, self._build_body(l7rule), status=400) + + def test_bad_create_path_rule(self): + l7rule = {'type': constants.L7RULE_TYPE_PATH, + 'compare_type': constants.L7RULE_COMPARE_TYPE_REGEX, + 'value': 'bad string\\'} + self.post(self.l7rules_path, self._build_body(l7rule), status=400) + + def test_bad_create_file_type_rule(self): + l7rule = {'type': constants.L7RULE_TYPE_FILE_TYPE, + 'compare_type': constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + 'value': 'png'} + self.post(self.l7rules_path, self._build_body(l7rule), status=400) + + def test_bad_create_header_rule(self): + l7rule = {'type': constants.L7RULE_TYPE_HEADER, + 'compare_type': constants.L7RULE_COMPARE_TYPE_CONTAINS, + 'value': 'some-string'} + self.post(self.l7rules_path, self._build_body(l7rule), status=400) + + def test_bad_create_cookie_rule(self): + l7rule = {'type': constants.L7RULE_TYPE_COOKIE, + 'compare_type': constants.L7RULE_COMPARE_TYPE_EQUAL_TO, + 'key': 'bad cookie name', + 'value': 'some-string'} + self.post(self.l7rules_path, self._build_body(l7rule), status=400) + + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_create_with_bad_provider(self, mock_provider): + mock_provider.side_effect = exceptions.ProviderDriverError( + prov='bad_driver', user_msg='broken') + l7rule = {'compare_type': 'REGEX', + 'invert': False, + 'type': 'PATH', + 'value': '/images*', + 'admin_state_up': True} + response = self.post(self.l7rules_path, self._build_body(l7rule), + status=500) + self.assertIn('Provider \'bad_driver\' reports error: broken', + response.json.get('faultstring')) + + def test_create_with_ssl_rule_types(self): + test_mapping = { + constants.L7RULE_TYPE_SSL_CONN_HAS_CERT: { + 'value': 'tRuE', + 'compare_type': constants.L7RULE_COMPARE_TYPE_EQUAL_TO}, + constants.L7RULE_TYPE_SSL_VERIFY_RESULT: { + 'value': '0', + 'compare_type': constants.L7RULE_COMPARE_TYPE_EQUAL_TO}, + constants.L7RULE_TYPE_SSL_DN_FIELD: { + 'key': 'st-1', 'value': 'ST-FIELD1-PREFIX', + 'compare_type': constants.L7RULE_COMPARE_TYPE_STARTS_WITH} + } + for l7rule_type, test_body in test_mapping.items(): + self.set_lb_status(self.lb_id) + test_body.update({'type': l7rule_type}) + api_l7rule = self.create_l7rule( + self.l7policy_id, l7rule_type, + test_body['compare_type'], test_body['value'], + key=test_body.get('key')).get(self.root_tag) + self.assertEqual(l7rule_type, api_l7rule.get('type')) + self.assertEqual(test_body['compare_type'], + api_l7rule.get('compare_type')) + self.assertEqual(test_body['value'], api_l7rule.get('value')) + if test_body.get('key'): + self.assertEqual(test_body['key'], api_l7rule.get('key')) + self.assertFalse(api_l7rule.get('invert')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + l7policy_id=self.l7policy_id, l7rule_id=api_l7rule.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + l7policy_prov_status=constants.PENDING_UPDATE, + l7rule_prov_status=constants.PENDING_CREATE, + l7rule_op_status=constants.OFFLINE) + + def _test_bad_cases_with_ssl_rule_types(self, is_create=True, + rule_id=None): + if is_create: + req_func = self.post + first_req_arg = self.l7rules_path + else: + req_func = self.put + first_req_arg = self.l7rule_path.format(l7rule_id=rule_id) + + # test bad cases of L7RULE_TYPE_SSL_CONN_HAS_CERT + l7rule = {'compare_type': constants.L7RULE_COMPARE_TYPE_EQUAL_TO, + 'invert': False, + 'type': constants.L7RULE_TYPE_SSL_CONN_HAS_CERT, + 'value': 'true', + 'admin_state_up': True, + 'key': 'no-need-key'} + response = req_func(first_req_arg, self._build_body(l7rule), + status=400).json + self.assertIn('L7rule type {} does not use the "key" field.'.format( + constants.L7RULE_TYPE_SSL_CONN_HAS_CERT), + response.get('faultstring')) + + l7rule.pop('key') + l7rule['value'] = 'not-true-string' + response = req_func(first_req_arg, self._build_body(l7rule), + status=400).json + self.assertIn( + 'L7rule value {} is not a boolean True string.'.format( + l7rule['value']), response.get('faultstring')) + + l7rule['value'] = 'tRUe' + l7rule['compare_type'] = constants.L7RULE_COMPARE_TYPE_STARTS_WITH + response = req_func(first_req_arg, self._build_body(l7rule), + status=400).json + self.assertIn( + 'L7rule type {} only supports the {} compare type.'.format( + constants.L7RULE_TYPE_SSL_CONN_HAS_CERT, + constants.L7RULE_COMPARE_TYPE_EQUAL_TO), + response.get('faultstring')) + + # test bad cases of L7RULE_TYPE_SSL_VERIFY_RES + l7rule = {'compare_type': constants.L7RULE_COMPARE_TYPE_EQUAL_TO, + 'invert': False, + 'type': constants.L7RULE_TYPE_SSL_VERIFY_RESULT, + 'value': 'true', + 'admin_state_up': True, + 'key': 'no-need-key'} + response = req_func(first_req_arg, self._build_body(l7rule), + status=400).json + self.assertIn( + 'L7rule type {} does not use the "key" field.'.format( + l7rule['type']), response.get('faultstring')) + + l7rule.pop('key') + response = req_func(first_req_arg, self._build_body(l7rule), + status=400).json + self.assertIn( + 'L7rule type {} needs a int value, which is >= 0'.format( + l7rule['type']), response.get('faultstring')) + + l7rule['value'] = '0' + l7rule['compare_type'] = constants.L7RULE_COMPARE_TYPE_STARTS_WITH + response = req_func(first_req_arg, self._build_body(l7rule), + status=400).json + self.assertIn( + 'L7rule type {} only supports the {} compare type.'.format( + l7rule['type'], constants.L7RULE_COMPARE_TYPE_EQUAL_TO), + response.get('faultstring')) + + # test bad cases of L7RULE_TYPE_SSL_DN_FIELD + l7rule = {'compare_type': constants.L7RULE_COMPARE_TYPE_REGEX, + 'invert': False, + 'type': constants.L7RULE_TYPE_SSL_DN_FIELD, + 'value': 'bad regex\\', + 'admin_state_up': True} + # This case just test that fail to parse the regex from the value + req_func(first_req_arg, self._build_body(l7rule), status=400).json + + l7rule['value'] = '^.test*$' + response = req_func(first_req_arg, self._build_body(l7rule), + status=400).json + self.assertIn( + 'L7rule type {} needs to specify a key and a value.'.format( + l7rule['type']), response.get('faultstring')) + + l7rule['key'] = 'NOT_SUPPORTED_DN_FIELD' + response = req_func(first_req_arg, self._build_body(l7rule), + status=400).json + self.assertIn('Invalid L7rule distinguished name field.', + response.get('faultstring')) + + def test_create_bad_cases_with_ssl_rule_types(self): + self._test_bad_cases_with_ssl_rule_types() + + def test_create_over_quota(self): + self.start_quota_mock(data_models.L7Rule) + l7rule = {'compare_type': 'REGEX', + 'invert': False, + 'type': 'PATH', + 'value': '/images*', + 'admin_state_up': True} + self.post(self.l7rules_path, self._build_body(l7rule), status=403) + + def test_update(self): + api_l7rule = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api', tags=['old_tag']).get(self.root_tag) + self.set_lb_status(self.lb_id) + new_l7rule = {'value': '/images', 'tags': ['new_tag']} + response = self.put(self.l7rule_path.format( + l7rule_id=api_l7rule.get('id')), + self._build_body(new_l7rule)).json.get(self.root_tag) + self.assertEqual('/images', response.get('value')) + self.assertEqual(['new_tag'], response['tags']) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + l7policy_id=self.l7policy_id, l7rule_id=api_l7rule.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + l7policy_prov_status=constants.PENDING_UPDATE, + l7rule_prov_status=constants.PENDING_UPDATE) + + def test_update_authorized(self): + api_l7rule = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api').get(self.root_tag) + self.set_lb_status(self.lb_id) + new_l7rule = {'value': '/images'} + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.put(self.l7rule_path.format( + l7rule_id=api_l7rule.get('id')), + self._build_body(new_l7rule)).json.get(self.root_tag) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual('/images', response.get('value')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + l7policy_id=self.l7policy_id, l7rule_id=api_l7rule.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + l7policy_prov_status=constants.PENDING_UPDATE, + l7rule_prov_status=constants.PENDING_UPDATE) + + def test_update_not_authorized(self): + api_l7rule = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api').get(self.root_tag) + self.set_lb_status(self.lb_id) + new_l7rule = {'value': '/images'} + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=self.project_id) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + response = self.put(self.l7rule_path.format( + l7rule_id=api_l7rule.get('id')), + self._build_body(new_l7rule), status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + l7policy_id=self.l7policy_id, l7rule_id=api_l7rule.get('id'), + lb_prov_status=constants.ACTIVE, + listener_prov_status=constants.ACTIVE, + l7policy_prov_status=constants.ACTIVE, + l7rule_prov_status=constants.ACTIVE) + + def test_bad_update(self): + l7rule = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api').get(self.root_tag) + new_l7rule = {'type': 'bad type'} + self.put(self.l7rule_path.format(l7rule_id=l7rule.get('id')), + self._build_body(new_l7rule), status=400) + + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_update_with_bad_provider(self, mock_provider): + api_l7rule = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api').get(self.root_tag) + self.set_lb_status(self.lb_id) + new_l7rule = {'value': '/images'} + mock_provider.side_effect = exceptions.ProviderDriverError( + prov='bad_driver', user_msg='broken') + response = self.put( + self.l7rule_path.format(l7rule_id=api_l7rule.get('id')), + self._build_body(new_l7rule), status=500) + self.assertIn('Provider \'bad_driver\' reports error: broken', + response.json.get('faultstring')) + + def test_update_with_invalid_rule(self): + api_l7rule = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api').get(self.root_tag) + self.set_lb_status(self.lb_id) + new_l7rule = {'compare_type': constants.L7RULE_COMPARE_TYPE_REGEX, + 'value': 'bad string\\'} + self.put(self.l7rule_path.format( + l7rule_id=api_l7rule.get('id')), self._build_body(new_l7rule), + status=400) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + l7policy_id=self.l7policy_id, l7rule_id=api_l7rule.get('id'), + l7rule_prov_status=constants.ACTIVE) + + def test_update_with_ssl_rule_types(self): + test_mapping = { + constants.L7RULE_TYPE_SSL_CONN_HAS_CERT: { + 'value': 'tRuE', + 'compare_type': constants.L7RULE_COMPARE_TYPE_EQUAL_TO}, + constants.L7RULE_TYPE_SSL_VERIFY_RESULT: { + 'value': '0', + 'compare_type': constants.L7RULE_COMPARE_TYPE_EQUAL_TO}, + constants.L7RULE_TYPE_SSL_DN_FIELD: { + 'key': 'st-1', 'value': 'ST-FIELD1-PREFIX', + 'compare_type': constants.L7RULE_COMPARE_TYPE_STARTS_WITH} + } + + for l7rule_type, test_body in test_mapping.items(): + self.set_lb_status(self.lb_id) + api_l7rule = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api').get(self.root_tag) + self.set_lb_status(self.lb_id) + test_body.update({'type': l7rule_type}) + response = self.put(self.l7rule_path.format( + l7rule_id=api_l7rule.get('id')), + self._build_body(test_body)).json.get(self.root_tag) + self.assertEqual(l7rule_type, response.get('type')) + self.assertEqual(test_body['compare_type'], + response.get('compare_type')) + self.assertEqual(test_body['value'], response.get('value')) + if test_body.get('key'): + self.assertEqual(test_body['key'], response.get('key')) + self.assertFalse(response.get('invert')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + l7policy_id=self.l7policy_id, l7rule_id=response.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + l7policy_prov_status=constants.PENDING_UPDATE, + l7rule_prov_status=constants.PENDING_UPDATE) + + def test_update_bad_cases_with_ssl_rule_types(self): + api_l7rule = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api').get(self.root_tag) + self._test_bad_cases_with_ssl_rule_types( + is_create=False, rule_id=api_l7rule.get('id')) + + def test_update_invert_none(self): + api_l7rule = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api', tags=['old_tag'], invert=True).get(self.root_tag) + self.set_lb_status(self.lb_id) + new_l7rule = {'invert': None} + response = self.put(self.l7rule_path.format( + l7rule_id=api_l7rule.get('id')), + self._build_body(new_l7rule)).json.get(self.root_tag) + self.assertFalse(response.get('invert')) + + def test_delete(self): + api_l7rule = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api').get(self.root_tag) + self.set_lb_status(self.lb_id) + # Set status to ACTIVE/ONLINE because set_lb_status did it in the db + api_l7rule['provisioning_status'] = constants.ACTIVE + api_l7rule['operating_status'] = constants.ONLINE + api_l7rule.pop('updated_at') + + response = self.get(self.l7rule_path.format( + l7rule_id=api_l7rule.get('id'))).json.get(self.root_tag) + response.pop('updated_at') + self.assertEqual(api_l7rule, response) + + self.delete(self.l7rule_path.format(l7rule_id=api_l7rule.get('id'))) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + l7policy_id=self.l7policy_id, l7rule_id=api_l7rule.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + l7policy_prov_status=constants.PENDING_UPDATE, + l7rule_prov_status=constants.PENDING_DELETE) + self.set_lb_status(self.lb_id) + + def test_delete_authorized(self): + api_l7rule = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api').get(self.root_tag) + self.set_lb_status(self.lb_id) + # Set status to ACTIVE/ONLINE because set_lb_status did it in the db + api_l7rule['provisioning_status'] = constants.ACTIVE + api_l7rule['operating_status'] = constants.ONLINE + api_l7rule.pop('updated_at') + + response = self.get(self.l7rule_path.format( + l7rule_id=api_l7rule.get('id'))).json.get(self.root_tag) + response.pop('updated_at') + self.assertEqual(api_l7rule, response) + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + + self.delete( + self.l7rule_path.format(l7rule_id=api_l7rule.get('id'))) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + l7policy_id=self.l7policy_id, l7rule_id=api_l7rule.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + l7policy_prov_status=constants.PENDING_UPDATE, + l7rule_prov_status=constants.PENDING_DELETE) + self.set_lb_status(self.lb_id) + + def test_delete_not_authorized(self): + api_l7rule = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api').get(self.root_tag) + self.set_lb_status(self.lb_id) + # Set status to ACTIVE/ONLINE because set_lb_status did it in the db + api_l7rule['provisioning_status'] = constants.ACTIVE + api_l7rule['operating_status'] = constants.ONLINE + api_l7rule.pop('updated_at') + + response = self.get(self.l7rule_path.format( + l7rule_id=api_l7rule.get('id'))).json.get(self.root_tag) + response.pop('updated_at') + self.assertEqual(api_l7rule, response) + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=self.project_id) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + self.delete( + self.l7rule_path.format(l7rule_id=api_l7rule.get('id')), + status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + l7policy_id=self.l7policy_id, l7rule_id=api_l7rule.get('id'), + lb_prov_status=constants.ACTIVE, + listener_prov_status=constants.ACTIVE, + l7policy_prov_status=constants.ACTIVE, + l7rule_prov_status=constants.ACTIVE) + + def test_bad_delete(self): + self.delete(self.l7rule_path.format( + l7rule_id=uuidutils.generate_uuid()), status=404) + + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_delete_with_bad_provider(self, mock_provider): + api_l7rule = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api').get(self.root_tag) + self.set_lb_status(self.lb_id) + # Set status to ACTIVE/ONLINE because set_lb_status did it in the db + api_l7rule['provisioning_status'] = constants.ACTIVE + api_l7rule['operating_status'] = constants.ONLINE + response = self.get(self.l7rule_path.format( + l7rule_id=api_l7rule.get('id'))).json.get(self.root_tag) + + self.assertIsNone(api_l7rule.pop('updated_at')) + self.assertIsNotNone(response.pop('updated_at')) + self.assertEqual(api_l7rule, response) + + mock_provider.side_effect = exceptions.ProviderDriverError( + prov='bad_driver', user_msg='broken') + self.delete(self.l7rule_path.format(l7rule_id=api_l7rule.get('id')), + status=500) + + def test_create_when_lb_pending_update(self): + self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.put(self.LB_PATH.format(lb_id=self.lb_id), + body={'loadbalancer': {'name': 'test_name_change'}}) + new_l7rule = {'type': constants.L7RULE_TYPE_PATH, + 'compare_type': constants.L7RULE_COMPARE_TYPE_EQUAL_TO, + 'value': '/api'} + self.post(self.l7rules_path, body=self._build_body(new_l7rule), + status=409) + + def test_update_when_lb_pending_update(self): + l7rule = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.put(self.LB_PATH.format(lb_id=self.lb_id), + body={'loadbalancer': {'name': 'test_name_change'}}) + new_l7rule = {'type': constants.L7RULE_TYPE_HOST_NAME, + 'compare_type': constants.L7RULE_COMPARE_TYPE_REGEX, + 'value': '.*.example.com'} + self.put(self.l7rule_path.format(l7rule_id=l7rule.get('id')), + body=self._build_body(new_l7rule), status=409) + + def test_delete_when_lb_pending_update(self): + l7rule = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.put(self.LB_PATH.format(lb_id=self.lb_id), + body={'loadbalancer': {'name': 'test_name_change'}}) + self.delete(self.l7rule_path.format(l7rule_id=l7rule.get('id')), + status=409) + + def test_create_when_lb_pending_delete(self): + self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.delete(self.LB_PATH.format(lb_id=self.lb_id), + params={'cascade': "true"}) + new_l7rule = {'type': constants.L7RULE_TYPE_HEADER, + 'compare_type': + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + 'value': 'some-string', + 'key': 'Some-header'} + self.post(self.l7rules_path, body=self._build_body(new_l7rule), + status=409) + + def test_update_when_lb_pending_delete(self): + l7rule = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.delete(self.LB_PATH.format(lb_id=self.lb_id), + params={'cascade': "true"}) + new_l7rule = {'type': constants.L7RULE_TYPE_COOKIE, + 'compare_type': + constants.L7RULE_COMPARE_TYPE_ENDS_WITH, + 'value': 'some-string', + 'key': 'some-cookie'} + self.put(self.l7rule_path.format(l7rule_id=l7rule.get('id')), + body=self._build_body(new_l7rule), status=409) + + def test_delete_when_lb_pending_delete(self): + l7rule = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.delete(self.LB_PATH.format(lb_id=self.lb_id), + params={'cascade': "true"}) + self.delete(self.l7rule_path.format(l7rule_id=l7rule.get('id')), + status=409) + + def test_update_already_deleted(self): + l7rule = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api').get(self.root_tag) + # This updates the child objects + self.set_lb_status(self.lb_id, status=constants.DELETED) + new_l7rule = {'type': constants.L7RULE_TYPE_COOKIE, + 'compare_type': + constants.L7RULE_COMPARE_TYPE_ENDS_WITH, + 'value': 'some-string', + 'key': 'some-cookie'} + self.put(self.l7rule_path.format(l7rule_id=l7rule.get('id')), + body=self._build_body(new_l7rule), status=404) + + def test_delete_already_deleted(self): + l7rule = self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api').get(self.root_tag) + # This updates the child objects + self.set_lb_status(self.lb_id, status=constants.DELETED) + self.delete(self.l7rule_path.format(l7rule_id=l7rule.get('id')), + status=404) + + @mock.patch("octavia.api.drivers.noop_driver.driver.NoopManager." + "l7rule_create") + def test_create_with_exception_in_provider_driver(self, + l7rule_create_mock): + l7rule_create_mock.side_effect = Exception("Provider error") + + self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api', status=500) + + lb = self.get(self.LB_PATH.format(lb_id=self.lb_id)).json.get( + "loadbalancer") + self.assertEqual(lb[constants.PROVISIONING_STATUS], + constants.ACTIVE) diff --git a/octavia/tests/functional/api/v2/test_listener.py b/octavia/tests/functional/api/v2/test_listener.py new file mode 100644 index 0000000000..29e783653e --- /dev/null +++ b/octavia/tests/functional/api/v2/test_listener.py @@ -0,0 +1,3097 @@ +# Copyright 2014 Rackspace +# Copyright 2016 Blue Box, an IBM Company +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import random +from unittest import mock + +from octavia_lib.common import constants as lib_consts +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils + +from octavia.common import constants +import octavia.common.context +from octavia.common import data_models +from octavia.common import exceptions +from octavia.db import api as db_api +from octavia.tests.common import constants as c_const +from octavia.tests.common import sample_certs +from octavia.tests.functional.api.v2 import base + + +class TestListener(base.BaseAPITest): + + root_tag = 'listener' + root_tag_list = 'listeners' + root_tag_links = 'listeners_links' + + def setUp(self): + super().setUp() + self.lb = self.create_load_balancer(uuidutils.generate_uuid()) + self.lb_id = self.lb.get('loadbalancer').get('id') + self.project_id = self.lb.get('loadbalancer').get('project_id') + self.set_lb_status(self.lb_id) + self.listener_path = self.LISTENERS_PATH + '/{listener_id}' + self.pool = self.create_pool( + self.lb_id, constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN) + self.pool_id = self.pool.get('pool').get('id') + self.set_lb_status(self.lb_id) + + def test_get_all_admin(self): + project_id = uuidutils.generate_uuid() + lb1 = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', project_id=project_id) + lb1_id = lb1.get('loadbalancer').get('id') + self.set_lb_status(lb1_id) + listener1 = self.create_listener( + constants.PROTOCOL_HTTP, 80, lb1_id, + tags=['test_tag1']).get(self.root_tag) + self.set_lb_status(lb1_id) + listener2 = self.create_listener( + constants.PROTOCOL_HTTP, 81, lb1_id, + tags=['test_tag2']).get(self.root_tag) + self.set_lb_status(lb1_id) + listener3 = self.create_listener( + constants.PROTOCOL_HTTP, 82, lb1_id, + tags=['test_tag3']).get(self.root_tag) + self.set_lb_status(lb1_id) + listeners = self.get(self.LISTENERS_PATH).json.get(self.root_tag_list) + self.assertEqual(3, len(listeners)) + listener_id_ports = [(li.get('id'), li.get('protocol_port'), + li.get('tags')) + for li in listeners] + self.assertIn((listener1.get('id'), listener1.get('protocol_port'), + listener1.get('tags')), + listener_id_ports) + self.assertIn((listener2.get('id'), listener2.get('protocol_port'), + listener2.get('tags')), + listener_id_ports) + self.assertIn((listener3.get('id'), listener3.get('protocol_port'), + listener3.get('tags')), + listener_id_ports) + + def test_get_all_non_admin(self): + project_id = uuidutils.generate_uuid() + lb1 = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', project_id=project_id) + lb1_id = lb1.get('loadbalancer').get('id') + self.set_lb_status(lb1_id) + self.create_listener(constants.PROTOCOL_HTTP, 80, + lb1_id) + self.set_lb_status(lb1_id) + self.create_listener(constants.PROTOCOL_HTTP, 81, + lb1_id) + self.set_lb_status(lb1_id) + listener3 = self.create_listener(constants.PROTOCOL_HTTP, 82, + self.lb_id).get(self.root_tag) + self.set_lb_status(self.lb_id) + + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', + auth_strategy=constants.KEYSTONE) + test_context = octavia.common.context.RequestContext( + project_id=listener3['project_id']) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + listeners = self.get( + self.LISTENERS_PATH).json.get(self.root_tag_list) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + self.assertEqual(1, len(listeners)) + listener_id_ports = [(li.get('id'), li.get('protocol_port')) + for li in listeners] + self.assertIn((listener3.get('id'), listener3.get('protocol_port')), + listener_id_ports) + + def test_get_all_unscoped_token(self): + project_id = uuidutils.generate_uuid() + lb1 = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', project_id=project_id) + lb1_id = lb1.get('loadbalancer').get('id') + self.set_lb_status(lb1_id) + self.create_listener(constants.PROTOCOL_HTTP, 80, + lb1_id) + self.set_lb_status(lb1_id) + self.create_listener(constants.PROTOCOL_HTTP, 81, + lb1_id) + self.set_lb_status(lb1_id) + self.create_listener(constants.PROTOCOL_HTTP, 82, + self.lb_id).get(self.root_tag) + self.set_lb_status(self.lb_id) + + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', + auth_strategy=constants.KEYSTONE) + test_context = octavia.common.context.RequestContext( + project_id=None) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': None} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + result = self.get(self.LISTENERS_PATH, status=403).json + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, result) + + def test_get_all_non_admin_global_observer(self): + project_id = uuidutils.generate_uuid() + lb1 = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', project_id=project_id) + lb1_id = lb1.get('loadbalancer').get('id') + self.set_lb_status(lb1_id) + listener1 = self.create_listener( + constants.PROTOCOL_HTTP, 80, lb1_id).get(self.root_tag) + self.set_lb_status(lb1_id) + listener2 = self.create_listener( + constants.PROTOCOL_HTTP, 81, lb1_id).get(self.root_tag) + self.set_lb_status(lb1_id) + listener3 = self.create_listener( + constants.PROTOCOL_HTTP, 82, lb1_id).get(self.root_tag) + self.set_lb_status(lb1_id) + + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', + auth_strategy=constants.KEYSTONE) + test_context = octavia.common.context.RequestContext( + project_id=self.project_id) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['admin'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + listeners = self.get(self.LISTENERS_PATH) + listeners = listeners.json.get(self.root_tag_list) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + self.assertEqual(3, len(listeners)) + listener_id_ports = [(li.get('id'), li.get('protocol_port')) + for li in listeners] + self.assertIn((listener1.get('id'), listener1.get('protocol_port')), + listener_id_ports) + self.assertIn((listener2.get('id'), listener2.get('protocol_port')), + listener_id_ports) + self.assertIn((listener3.get('id'), listener3.get('protocol_port')), + listener_id_ports) + + def test_get_all_not_authorized(self): + project_id = uuidutils.generate_uuid() + lb1 = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', project_id=project_id) + lb1_id = lb1.get('loadbalancer').get('id') + self.set_lb_status(lb1_id) + self.create_listener(constants.PROTOCOL_HTTP, 80, + lb1_id) + self.set_lb_status(lb1_id) + self.create_listener(constants.PROTOCOL_HTTP, 81, + lb1_id) + self.set_lb_status(lb1_id) + self.create_listener(constants.PROTOCOL_HTTP, 82, + self.lb_id).get(self.root_tag) + self.set_lb_status(self.lb_id) + + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', + auth_strategy=constants.KEYSTONE) + test_context = octavia.common.context.RequestContext( + project_id=uuidutils.generate_uuid()) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + listeners = self.get(self.LISTENERS_PATH, status=403).json + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, listeners) + + def test_get_all_by_project_id(self): + project1_id = uuidutils.generate_uuid() + project2_id = uuidutils.generate_uuid() + lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', + project_id=project1_id) + lb1_id = lb1.get('loadbalancer').get('id') + self.set_lb_status(lb1_id) + lb2 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb2', + project_id=project2_id) + lb2_id = lb2.get('loadbalancer').get('id') + + self.set_lb_status(lb2_id) + listener1 = self.create_listener(constants.PROTOCOL_HTTP, 80, lb1_id, + name='listener1').get(self.root_tag) + self.set_lb_status(lb1_id) + listener2 = self.create_listener(constants.PROTOCOL_HTTP, 81, lb1_id, + name='listener2').get(self.root_tag) + self.set_lb_status(lb1_id) + listener3 = self.create_listener(constants.PROTOCOL_HTTP, 82, lb2_id, + name='listener3').get(self.root_tag) + self.set_lb_status(lb2_id) + listeners = self.get( + self.LISTENERS_PATH, + params={'project_id': project1_id}).json.get(self.root_tag_list) + + self.assertEqual(2, len(listeners)) + listener_id_ports = [(li.get('id'), li.get('protocol_port')) + for li in listeners] + self.assertIn((listener1.get('id'), listener1.get('protocol_port')), + listener_id_ports) + self.assertIn((listener2.get('id'), listener2.get('protocol_port')), + listener_id_ports) + listeners = self.get( + self.LISTENERS_PATH, + params={'project_id': project2_id}).json.get(self.root_tag_list) + listener_id_ports = [(li.get('id'), li.get('protocol_port')) + for li in listeners] + self.assertEqual(1, len(listeners)) + self.assertIn((listener3.get('id'), listener3.get('protocol_port')), + listener_id_ports) + + def test_get_all_sorted(self): + self.create_listener(constants.PROTOCOL_HTTP, 80, + self.lb_id, + name='listener1') + self.set_lb_status(self.lb_id) + self.create_listener(constants.PROTOCOL_HTTP, 81, + self.lb_id, + name='listener2') + self.set_lb_status(self.lb_id) + self.create_listener(constants.PROTOCOL_HTTP, 82, + self.lb_id, + name='listener3') + self.set_lb_status(self.lb_id) + response = self.get(self.LISTENERS_PATH, + params={'sort': 'name:desc'}) + listeners_desc = response.json.get(self.root_tag_list) + response = self.get(self.LISTENERS_PATH, + params={'sort': 'name:asc'}) + listeners_asc = response.json.get(self.root_tag_list) + + self.assertEqual(3, len(listeners_desc)) + self.assertEqual(3, len(listeners_asc)) + + listener_id_names_desc = [(listener.get('id'), listener.get('name')) + for listener in listeners_desc] + listener_id_names_asc = [(listener.get('id'), listener.get('name')) + for listener in listeners_asc] + self.assertEqual(listener_id_names_asc, + list(reversed(listener_id_names_desc))) + + def test_get_all_limited(self): + self.create_listener(constants.PROTOCOL_HTTP, 80, + self.lb_id, + name='listener1') + self.set_lb_status(self.lb_id) + self.create_listener(constants.PROTOCOL_HTTP, 81, + self.lb_id, + name='listener2') + self.set_lb_status(self.lb_id) + self.create_listener(constants.PROTOCOL_HTTP, 82, + self.lb_id, + name='listener3') + self.set_lb_status(self.lb_id) + + # First two -- should have 'next' link + first_two = self.get(self.LISTENERS_PATH, params={'limit': 2}).json + objs = first_two[self.root_tag_list] + links = first_two[self.root_tag_links] + self.assertEqual(2, len(objs)) + self.assertEqual(1, len(links)) + self.assertEqual('next', links[0]['rel']) + + # Third + off the end -- should have previous link + third = self.get(self.LISTENERS_PATH, params={ + 'limit': 2, + 'marker': first_two[self.root_tag_list][1]['id']}).json + objs = third[self.root_tag_list] + links = third[self.root_tag_links] + self.assertEqual(1, len(objs)) + self.assertEqual(1, len(links)) + self.assertEqual('previous', links[0]['rel']) + + # Middle -- should have both links + middle = self.get(self.LISTENERS_PATH, params={ + 'limit': 1, + 'marker': first_two[self.root_tag_list][0]['id']}).json + objs = middle[self.root_tag_list] + links = middle[self.root_tag_links] + self.assertEqual(1, len(objs)) + self.assertEqual(2, len(links)) + self.assertCountEqual(['previous', 'next'], + [link['rel'] for link in links]) + + def test_get_all_fields_filter(self): + self.create_listener(constants.PROTOCOL_HTTP, 80, + self.lb_id, + name='listener1') + self.set_lb_status(self.lb_id) + self.create_listener(constants.PROTOCOL_HTTP, 81, + self.lb_id, + name='listener2') + self.set_lb_status(self.lb_id) + self.create_listener(constants.PROTOCOL_HTTP, 82, + self.lb_id, + name='listener3') + self.set_lb_status(self.lb_id) + + lis = self.get(self.LISTENERS_PATH, params={ + 'fields': ['id', 'project_id']}).json + for li in lis['listeners']: + self.assertIn('id', li) + self.assertIn('project_id', li) + self.assertNotIn('description', li) + + def test_get_one_fields_filter(self): + listener1 = self.create_listener( + constants.PROTOCOL_HTTP, 80, self.lb_id, + name='listener1').get(self.root_tag) + self.set_lb_status(self.lb_id) + + li = self.get( + self.LISTENER_PATH.format(listener_id=listener1.get('id')), + params={'fields': ['id', 'project_id']}).json.get(self.root_tag) + self.assertIn('id', li) + self.assertIn('project_id', li) + self.assertNotIn('description', li) + + def test_get_all_filter(self): + li1 = self.create_listener(constants.PROTOCOL_HTTP, + 80, + self.lb_id, + name='listener1').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.create_listener(constants.PROTOCOL_HTTP, + 81, + self.lb_id, + name='listener2').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.create_listener(constants.PROTOCOL_HTTP, + 82, + self.lb_id, + name='listener3').get(self.root_tag) + self.set_lb_status(self.lb_id) + + lis = self.get(self.LISTENERS_PATH, params={ + 'id': li1['id']}).json + self.assertEqual(1, len(lis['listeners'])) + self.assertEqual(li1['id'], + lis['listeners'][0]['id']) + + def test_get_all_tags_filter(self): + listener1 = self.create_listener( + constants.PROTOCOL_HTTP, + 80, + self.lb_id, + name='listener1', + tags=['test_tag1', 'test_tag2'] + ).get(self.root_tag) + self.set_lb_status(self.lb_id) + listener2 = self.create_listener( + constants.PROTOCOL_HTTP, + 81, + self.lb_id, + name='listener2', + tags=['test_tag2', 'test_tag3'] + ).get(self.root_tag) + self.set_lb_status(self.lb_id) + listener3 = self.create_listener( + constants.PROTOCOL_HTTP, + 82, + self.lb_id, + name='listener3', + tags=['test_tag4', 'test_tag5'] + ).get(self.root_tag) + self.set_lb_status(self.lb_id) + + listeners = self.get( + self.LISTENERS_PATH, + params={'tags': 'test_tag2'} + ).json.get(self.root_tag_list) + self.assertIsInstance(listeners, list) + self.assertEqual(2, len(listeners)) + self.assertEqual( + [listener1.get('id'), listener2.get('id')], + [listener.get('id') for listener in listeners] + ) + + listeners = self.get( + self.LISTENERS_PATH, + params={'tags': ['test_tag2', 'test_tag3']} + ).json.get(self.root_tag_list) + self.assertIsInstance(listeners, list) + self.assertEqual(1, len(listeners)) + self.assertEqual( + [listener2.get('id')], + [listener.get('id') for listener in listeners] + ) + + listeners = self.get( + self.LISTENERS_PATH, + params={'tags-any': 'test_tag2'} + ).json.get(self.root_tag_list) + self.assertIsInstance(listeners, list) + self.assertEqual(2, len(listeners)) + self.assertEqual( + [listener1.get('id'), listener2.get('id')], + [listener.get('id') for listener in listeners] + ) + + listeners = self.get( + self.LISTENERS_PATH, + params={'not-tags': 'test_tag2'} + ).json.get(self.root_tag_list) + self.assertIsInstance(listeners, list) + self.assertEqual(1, len(listeners)) + self.assertEqual( + [listener3.get('id')], + [listener.get('id') for listener in listeners] + ) + + listeners = self.get( + self.LISTENERS_PATH, + params={'not-tags-any': ['test_tag2', 'test_tag4']} + ).json.get(self.root_tag_list) + self.assertIsInstance(listeners, list) + self.assertEqual(0, len(listeners)) + + listeners = self.get( + self.LISTENERS_PATH, + params={'tags': 'test_tag2', + 'tags-any': ['test_tag1', 'test_tag3']} + ).json.get(self.root_tag_list) + self.assertIsInstance(listeners, list) + self.assertEqual(2, len(listeners)) + self.assertEqual( + [listener1.get('id'), listener2.get('id')], + [listener.get('id') for listener in listeners] + ) + + listeners = self.get( + self.LISTENERS_PATH, + params={'tags': 'test_tag2', 'not-tags': 'test_tag2'} + ).json.get(self.root_tag_list) + self.assertIsInstance(listeners, list) + self.assertEqual(0, len(listeners)) + + def test_get_all_hides_deleted(self): + api_listener = self.create_listener( + constants.PROTOCOL_HTTP, 80, self.lb_id).get(self.root_tag) + + response = self.get(self.LISTENERS_PATH) + objects = response.json.get(self.root_tag_list) + self.assertEqual(len(objects), 1) + self.set_object_status(self.listener_repo, api_listener.get('id'), + provisioning_status=constants.DELETED) + response = self.get(self.LISTENERS_PATH) + objects = response.json.get(self.root_tag_list) + self.assertEqual(len(objects), 0) + + def test_get(self): + listener = self.create_listener( + constants.PROTOCOL_HTTP, 80, self.lb_id).get(self.root_tag) + response = self.get(self.listener_path.format( + listener_id=listener['id'])) + api_listener = response.json.get(self.root_tag) + self.assertEqual(listener, api_listener) + self.assertEqual([], api_listener['tags']) + + def test_get_authorized(self): + listener = self.create_listener( + constants.PROTOCOL_HTTP, 80, self.lb_id).get(self.root_tag) + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', + auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + + response = self.get(self.listener_path.format( + listener_id=listener['id'])) + api_listener = response.json.get(self.root_tag) + self.assertEqual(listener, api_listener) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + def test_get_not_authorized(self): + listener = self.create_listener( + constants.PROTOCOL_HTTP, 80, self.lb_id).get(self.root_tag) + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', + auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=uuidutils.generate_uuid()) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + response = self.get(self.listener_path.format( + listener_id=listener['id']), status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) + + def test_get_deleted_gives_404(self): + api_listener = self.create_listener( + constants.PROTOCOL_HTTP, 80, self.lb_id).get(self.root_tag) + + self.set_object_status(self.listener_repo, api_listener.get('id'), + provisioning_status=constants.DELETED) + self.get(self.LISTENER_PATH.format(listener_id=api_listener.get('id')), + status=404) + + def test_get_bad_listener_id(self): + listener_path = self.listener_path + self.get(listener_path.format(listener_id='SEAN-CONNERY'), status=404) + + # TODO(johnsom) Fix this when there is a noop certificate manager + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_create(self, mock_cert_data, + response_status=201, **optionals): + cert1 = data_models.TLSContainer(certificate='cert 1') + cert2 = data_models.TLSContainer(certificate='cert 2') + cert3 = data_models.TLSContainer(certificate='cert 3') + mock_cert_data.return_value = {'tls_cert': cert1, + 'sni_certs': [cert2, cert3]} + sni1 = uuidutils.generate_uuid() + sni2 = uuidutils.generate_uuid() + lb_listener = {'name': 'listener1', 'default_pool_id': None, + 'description': 'desc1', + 'admin_state_up': False, + 'protocol': constants.PROTOCOL_TERMINATED_HTTPS, + 'protocol_port': 80, 'connection_limit': 10, + 'default_tls_container_ref': uuidutils.generate_uuid(), + 'sni_container_refs': [sni1, sni2], + 'insert_headers': {}, + 'project_id': self.project_id, + 'loadbalancer_id': self.lb_id, + 'tags': ['test_tag']} + lb_listener.update(optionals) + body = self._build_body(lb_listener) + response = self.post(self.LISTENERS_PATH, body, status=response_status) + if response_status >= 300: + return response + listener_api = response.json['listener'] + extra_expects = {'provisioning_status': constants.PENDING_CREATE, + 'operating_status': constants.OFFLINE} + lb_listener.update(extra_expects) + self.assertTrue(uuidutils.is_uuid_like(listener_api.get('id'))) + for key, value in optionals.items(): + self.assertEqual(value, listener_api.get(key)) + lb_listener['id'] = listener_api.get('id') + lb_listener.pop('sni_container_refs') + sni_ex = [sni1, sni2] + sni_resp = listener_api.pop('sni_container_refs') + self.assertEqual(2, len(sni_resp)) + for sni in sni_resp: + self.assertIn(sni, sni_ex) + self.assertIsNotNone(listener_api.pop('created_at')) + self.assertIsNone(listener_api.pop('updated_at')) + self.assertEqual(['test_tag'], listener_api['tags']) + self.assertNotEqual(lb_listener, listener_api) + self.assert_correct_lb_status(self.lb_id, constants.ONLINE, + constants.PENDING_UPDATE) + self.assert_final_listener_statuses(self.lb_id, listener_api.get('id')) + return listener_api + + def test_create_with_timeouts(self): + optionals = { + 'timeout_client_data': 1, + 'timeout_member_connect': 2, + 'timeout_member_data': constants.MIN_TIMEOUT, + 'timeout_tcp_inspect': constants.MAX_TIMEOUT, + } + listener_api = self.test_create(**optionals) + self.assertEqual(1, listener_api.get('timeout_client_data')) + self.assertEqual(2, listener_api.get('timeout_member_connect')) + self.assertEqual(constants.MIN_TIMEOUT, + listener_api.get('timeout_member_data')) + self.assertEqual(constants.MAX_TIMEOUT, + listener_api.get('timeout_tcp_inspect')) + + def test_create_with_default_timeouts(self): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + self.conf.config(group='haproxy_amphora', timeout_client_data=20) + self.conf.config(group='haproxy_amphora', timeout_member_connect=21) + self.conf.config(group='haproxy_amphora', + timeout_member_data=constants.MIN_TIMEOUT) + self.conf.config(group='haproxy_amphora', + timeout_tcp_inspect=constants.MAX_TIMEOUT) + + listener_api = self.test_create() + self.assertEqual(20, listener_api.get('timeout_client_data')) + self.assertEqual(21, listener_api.get('timeout_member_connect')) + self.assertEqual(constants.MIN_TIMEOUT, + listener_api.get('timeout_member_data')) + self.assertEqual(constants.MAX_TIMEOUT, + listener_api.get('timeout_tcp_inspect')) + + def test_create_with_timeouts_too_high(self): + optionals = { + 'timeout_client_data': 1, + 'timeout_member_connect': 1, + 'timeout_member_data': 1, + 'timeout_tcp_inspect': 1, + } + for field in optionals.items(): + optionals.update({field[0]: constants.MAX_TIMEOUT + 1}) + resp = self.test_create(response_status=400, **optionals).json + optionals.update({field[0]: 1}) + fault = resp.get('faultstring') + self.assertIn( + f'Invalid input for field/attribute {field[0]}', fault) + self.assertIn( + f'Value should be lower or equal to {constants.MAX_TIMEOUT}', + fault) + + def test_create_with_timeouts_too_low(self): + optionals = { + 'timeout_client_data': 1, + 'timeout_member_connect': 2, + 'timeout_member_data': 3, + 'timeout_tcp_inspect': constants.MIN_TIMEOUT - 1, + } + resp = self.test_create(response_status=400, **optionals).json + fault = resp.get('faultstring') + self.assertIn( + 'Invalid input for field/attribute timeout_tcp_inspect', fault) + self.assertIn( + f'Value should be greater or equal to {constants.MIN_TIMEOUT}', + fault) + + def test_create_udp_case(self): + api_listener = self.create_listener(constants.PROTOCOL_UDP, 6666, + self.lb_id).get(self.root_tag) + self.assertEqual(constants.PROTOCOL_UDP, api_listener.get('protocol')) + self.assertEqual(6666, api_listener.get('protocol_port')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=api_listener.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_CREATE, + listener_op_status=constants.OFFLINE) + + def test_negative_create_udp_case(self): + sni1 = uuidutils.generate_uuid() + sni2 = uuidutils.generate_uuid() + req_dict = {'name': 'listener1', 'default_pool_id': None, + 'description': 'desc1', + 'admin_state_up': False, + 'protocol': constants.PROTOCOL_UDP, + 'protocol_port': 6666, 'connection_limit': 10, + 'default_tls_container_ref': uuidutils.generate_uuid(), + 'sni_container_refs': [sni1, sni2], + 'insert_headers': {}, + 'loadbalancer_id': self.lb_id} + expect_error_msg = ("Validation failure: %s protocol listener does " + "not support TLS.") % constants.PROTOCOL_UDP + res = self.post(self.LISTENERS_PATH, self._build_body(req_dict), + status=400, expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status(lb_id=self.lb_id) + + # Default pool protocol is udp which is different with listener + # protocol. + udp_pool_id = self.create_pool( + self.lb_id, constants.PROTOCOL_UDP, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool').get('id') + self.set_lb_status(self.lb_id) + lb_listener = {'name': 'listener1', + 'default_pool_id': udp_pool_id, + 'description': 'desc1', + 'admin_state_up': False, + 'protocol': constants.PROTOCOL_HTTP, + 'protocol_port': 80, + 'loadbalancer_id': self.lb_id} + expect_error_msg = ("Validation failure: The pool protocol '%s' is " + "invalid while the listener protocol is '%s'.") % ( + constants.PROTOCOL_UDP, + lb_listener['protocol']) + res = self.post(self.LISTENERS_PATH, self._build_body(lb_listener), + status=400, expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status(lb_id=self.lb_id) + + def test_create_duplicate_fails(self): + self.create_listener(constants.PROTOCOL_HTTP, 80, self.lb_id) + self.set_lb_status(self.lb_id) + self.create_listener(constants.PROTOCOL_HTTP, 80, self.lb_id, + status=409) + + def test_create_bad_tls_ref(self): + sni1 = uuidutils.generate_uuid() + sni2 = uuidutils.generate_uuid() + tls_ref = uuidutils.generate_uuid() + lb_listener = {'name': 'listener1', 'default_pool_id': None, + 'protocol': constants.PROTOCOL_TERMINATED_HTTPS, + 'protocol_port': 80, + 'sni_container_refs': [sni1, sni2], + 'default_tls_container_ref': tls_ref, + 'loadbalancer_id': self.lb_id} + + body = self._build_body(lb_listener) + self.cert_manager_mock().get_cert.side_effect = [ + Exception("bad cert"), None, Exception("bad_cert")] + response = self.post(self.LISTENERS_PATH, body, status=400).json + self.assertIn(sni1, response['faultstring']) + self.assertNotIn(sni2, response['faultstring']) + self.assertIn(tls_ref, response['faultstring']) + + def test_create_with_certs_not_terminated_https(self): + optionals = { + 'default_tls_container_ref': uuidutils.generate_uuid(), + 'protocol': constants.PROTOCOL_TCP + } + resp = self.test_create(response_status=400, **optionals).json + fault = resp.get('faultstring') + self.assertIn( + 'Certificate container references are not allowed on ', fault) + self.assertIn( + f'{constants.PROTOCOL_TCP} protocol listeners.', fault) + + def test_create_without_certs_if_terminated_https(self): + optionals = { + 'default_tls_container_ref': None, + 'sni_container_refs': None, + 'protocol': constants.PROTOCOL_TERMINATED_HTTPS + } + resp = self.test_create(response_status=400, **optionals).json + fault = resp.get('faultstring') + self.assertIn( + 'An SNI or default certificate container reference must ', fault) + self.assertIn( + 'be provided for {} protocol listeners.'.format( + constants.PROTOCOL_TERMINATED_HTTPS), fault) + + def test_create_client_ca_cert_without_tls_cert(self): + optionals = { + 'default_tls_container_ref': None, + 'sni_container_refs': None, + 'client_ca_tls_container_ref': uuidutils.generate_uuid(), + 'protocol': constants.PROTOCOL_TERMINATED_HTTPS + } + resp = self.test_create(response_status=400, **optionals).json + fault = resp.get('faultstring') + self.assertIn( + 'An SNI or default certificate container reference must ', fault) + self.assertIn( + 'be provided with a client CA container reference.', fault) + + def test_create_crl_without_ca_cert(self): + optionals = { + 'protocol': constants.PROTOCOL_TERMINATED_HTTPS, + 'client_ca_tls_container_ref': None, + 'client_crl_container_ref': uuidutils.generate_uuid() + } + resp = self.test_create(response_status=400, **optionals).json + fault = resp.get('faultstring') + self.assertIn( + 'A client authentication CA reference is required to specify a ' + 'client authentication revocation list.', fault) + + def test_create_with_default_pool_id(self): + lb_listener = {'name': 'listener1', + 'default_pool_id': self.pool_id, + 'description': 'desc1', + 'admin_state_up': False, + 'protocol': constants.PROTOCOL_HTTP, + 'protocol_port': 80, + 'loadbalancer_id': self.lb_id} + body = self._build_body(lb_listener) + response = self.post(self.LISTENERS_PATH, body) + api_listener = response.json['listener'] + self.assertEqual(api_listener.get('default_pool_id'), + self.pool_id) + + def test_create_with_bad_default_pool_id(self): + lb_listener = {'name': 'listener1', + 'default_pool_id': uuidutils.generate_uuid(), + 'description': 'desc1', + 'admin_state_up': False, + 'protocol': constants.PROTOCOL_HTTP, + 'protocol_port': 80, + 'loadbalancer_id': self.lb_id} + body = self._build_body(lb_listener) + self.post(self.LISTENERS_PATH, body, status=404) + + def test_create_with_shared_default_pool_id(self): + lb_listener1 = {'name': 'listener1', + 'default_pool_id': self.pool_id, + 'description': 'desc1', + 'admin_state_up': False, + 'protocol': constants.PROTOCOL_HTTP, + 'protocol_port': 80, + 'loadbalancer_id': self.lb_id} + lb_listener2 = {'name': 'listener2', + 'default_pool_id': self.pool_id, + 'description': 'desc2', + 'admin_state_up': False, + 'protocol': constants.PROTOCOL_HTTP, + 'protocol_port': 81, + 'loadbalancer_id': self.lb_id} + body1 = self._build_body(lb_listener1) + body2 = self._build_body(lb_listener2) + listener1 = self.post(self.LISTENERS_PATH, body1).json['listener'] + self.set_lb_status(self.lb_id, constants.ACTIVE) + listener2 = self.post(self.LISTENERS_PATH, body2).json['listener'] + self.assertEqual(listener1['default_pool_id'], self.pool_id) + self.assertEqual(listener1['default_pool_id'], + listener2['default_pool_id']) + + def test_create_with_project_id(self): + self.test_create(project_id=self.project_id) + + def test_create_defaults(self): + defaults = {'name': None, 'default_pool_id': None, + 'description': None, 'admin_state_up': True, + 'connection_limit': None, + 'default_tls_container_ref': None, + 'sni_container_refs': [], 'project_id': None, + 'insert_headers': {}} + lb_listener = {'protocol': constants.PROTOCOL_HTTP, + 'protocol_port': 80, + 'loadbalancer_id': self.lb_id} + body = self._build_body(lb_listener) + response = self.post(self.LISTENERS_PATH, body) + listener_api = response.json['listener'] + extra_expects = {'provisioning_status': constants.PENDING_CREATE, + 'operating_status': constants.OFFLINE} + lb_listener.update(extra_expects) + lb_listener.update(defaults) + self.assertTrue(uuidutils.is_uuid_like(listener_api.get('id'))) + lb_listener['id'] = listener_api.get('id') + self.assertIsNotNone(listener_api.pop('created_at')) + self.assertIsNone(listener_api.pop('updated_at')) + self.assertNotEqual(lb_listener, listener_api) + self.assert_correct_lb_status(self.lb_id, constants.ONLINE, + constants.PENDING_UPDATE) + self.assert_final_listener_statuses(self.lb_id, listener_api['id']) + + def test_create_over_quota(self): + self.start_quota_mock(data_models.Listener) + lb_listener = {'name': 'listener1', + 'protocol': constants.PROTOCOL_HTTP, + 'protocol_port': 80, + 'loadbalancer_id': self.lb_id} + body = self._build_body(lb_listener) + self.post(self.LISTENERS_PATH, body, status=403) + + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_create_with_bad_provider(self, mock_provider): + mock_provider.side_effect = exceptions.ProviderDriverError( + prov='bad_driver', user_msg='broken') + lb_listener = {'name': 'listener1', + 'protocol': constants.PROTOCOL_HTTP, + 'protocol_port': 80, + 'loadbalancer_id': self.lb_id} + body = self._build_body(lb_listener) + response = self.post(self.LISTENERS_PATH, body, status=500) + self.assertIn('Provider \'bad_driver\' reports error: broken', + response.json.get('faultstring')) + + def test_create_authorized(self, **optionals): + lb_listener = {'name': 'listener1', 'default_pool_id': None, + 'description': 'desc1', + 'admin_state_up': False, + 'protocol': constants.PROTOCOL_HTTP, + 'protocol_port': 80, 'connection_limit': 10, + 'default_tls_container_ref': None, + 'sni_container_refs': None, + 'insert_headers': {}, + 'project_id': self.project_id, + 'loadbalancer_id': self.lb_id} + lb_listener.update(optionals) + body = self._build_body(lb_listener) + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', + auth_strategy=constants.TESTING) + + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.post(self.LISTENERS_PATH, body) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + listener_api = response.json['listener'] + extra_expects = {'provisioning_status': constants.PENDING_CREATE, + 'operating_status': constants.OFFLINE} + lb_listener.update(extra_expects) + self.assertTrue(uuidutils.is_uuid_like(listener_api.get('id'))) + for key, value in optionals.items(): + self.assertEqual(value, listener_api.get(key)) + lb_listener['id'] = listener_api.get('id') + self.assertIsNotNone(listener_api.pop('created_at')) + self.assertIsNone(listener_api.pop('updated_at')) + self.assertNotEqual(lb_listener, listener_api) + self.assert_correct_lb_status(self.lb_id, constants.ONLINE, + constants.PENDING_UPDATE) + self.assert_final_listener_statuses(self.lb_id, listener_api.get('id')) + + def test_create_not_authorized(self, **optionals): + lb_listener = {'name': 'listener1', 'default_pool_id': None, + 'description': 'desc1', + 'admin_state_up': False, + 'protocol': constants.PROTOCOL_HTTP, + 'protocol_port': 80, 'connection_limit': 10, + 'default_tls_container_ref': None, + 'sni_container_refs': None, + 'insert_headers': {}, + 'project_id': self.project_id, + 'loadbalancer_id': self.lb_id} + lb_listener.update(optionals) + body = self._build_body(lb_listener) + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', + auth_strategy=constants.TESTING) + + test_context = octavia.common.context.RequestContext( + project_id=uuidutils.generate_uuid()) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + response = self.post(self.LISTENERS_PATH, body, status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) + + def test_create_with_ca_cert(self): + self.cert_manager_mock().get_secret.return_value = ( + sample_certs.X509_CA_CERT) + + optionals = { + 'client_ca_tls_container_ref': uuidutils.generate_uuid() + } + listener_api = self.test_create(**optionals) + self.assertEqual(optionals['client_ca_tls_container_ref'], + listener_api.get('client_ca_tls_container_ref')) + self.assertEqual(constants.CLIENT_AUTH_NONE, + listener_api.get('client_authentication')) + + def test_create_tls_with_no_subject_no_alt_names(self): + tls_cert_mock = mock.MagicMock() + tls_cert_mock.get_certificate.return_value = ( + sample_certs.NOCN_NOSUBALT_CRT) + self.cert_manager_mock().get_cert.return_value = tls_cert_mock + + lb_listener = {'name': 'listener1-no-subject-no-alt-names', + 'default_pool_id': None, + 'description': 'desc1', + 'admin_state_up': False, + 'protocol': constants.PROTOCOL_TERMINATED_HTTPS, + 'protocol_port': 80, 'connection_limit': 10, + 'default_tls_container_ref': uuidutils.generate_uuid(), + 'insert_headers': {}, + 'project_id': self.project_id, + 'loadbalancer_id': self.lb_id, + 'tags': ['test_tag']} + body = self._build_body(lb_listener) + response = self.post(self.LISTENERS_PATH, body, status=400) + self.assertIn("No CN or DNSName", response) + + def test_create_tls_with_no_subject_with_alt_names(self): + tls_cert_mock = mock.MagicMock() + tls_cert_mock.get_certificate.return_value = ( + sample_certs.NOCN_SUBALT_CRT) + tls_cert_mock.get_private_key.return_value = ( + sample_certs.NOCN_SUBALT_KEY) + tls_cert_mock.get_private_key_passphrase.return_value = None + self.cert_manager_mock().get_cert.return_value = tls_cert_mock + + lb_listener = {'name': 'listener1-no-subject', + 'default_pool_id': None, + 'description': 'desc1', + 'admin_state_up': False, + 'protocol': constants.PROTOCOL_TERMINATED_HTTPS, + 'protocol_port': 80, 'connection_limit': 10, + 'default_tls_container_ref': uuidutils.generate_uuid(), + 'insert_headers': {}, + 'project_id': self.project_id, + 'loadbalancer_id': self.lb_id, + 'tags': ['test_tag']} + body = self._build_body(lb_listener) + response = self.post(self.LISTENERS_PATH, body, status=201) + self.assertIn("PENDING_CREATE", response) + + def test_create_with_ca_cert_and_option(self): + self.cert_manager_mock().get_secret.return_value = ( + sample_certs.X509_CA_CERT) + optionals = { + 'client_ca_tls_container_ref': uuidutils.generate_uuid(), + 'client_authentication': constants.CLIENT_AUTH_MANDATORY + } + listener_api = self.test_create(**optionals) + self.assertEqual(optionals['client_ca_tls_container_ref'], + listener_api.get('client_ca_tls_container_ref')) + self.assertEqual(optionals['client_authentication'], + listener_api.get('client_authentication')) + + def test_create_with_ca_cert_and_crl(self): + # Load up sample certs to test the validation + self.cert_manager_mock().get_secret.side_effect = [ + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL] + + optionals = { + 'client_ca_tls_container_ref': uuidutils.generate_uuid(), + 'client_crl_container_ref': uuidutils.generate_uuid() + } + listener_api = self.test_create(**optionals) + self.assertEqual(optionals['client_ca_tls_container_ref'], + listener_api.get('client_ca_tls_container_ref')) + self.assertEqual(constants.CLIENT_AUTH_NONE, + listener_api.get('client_authentication')) + self.assertEqual(optionals['client_crl_container_ref'], + listener_api.get('client_crl_container_ref')) + + # TODO(johnsom) Fix this when there is a noop certificate manager + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_create_with_crl_mismatch_ca_cert(self, mock_cert_data): + cert1 = data_models.TLSContainer(certificate='cert 1') + cert2 = data_models.TLSContainer(certificate='cert 2') + cert3 = data_models.TLSContainer(certificate='cert 3') + mock_cert_data.return_value = {'tls_cert': cert1, + 'sni_certs': [cert2, cert3]} + self.cert_manager_mock().get_secret.side_effect = [ + sample_certs.X509_CERT, sample_certs.X509_CA_CRL, + sample_certs.X509_CERT, sample_certs.X509_CA_CRL] + + sni1 = uuidutils.generate_uuid() + sni2 = uuidutils.generate_uuid() + lb_listener = { + 'name': 'listener1', 'default_pool_id': None, + 'description': 'desc1', + 'admin_state_up': False, + 'protocol': constants.PROTOCOL_TERMINATED_HTTPS, + 'protocol_port': 80, + 'default_tls_container_ref': uuidutils.generate_uuid(), + 'sni_container_refs': [sni1, sni2], + 'project_id': self.project_id, + 'loadbalancer_id': self.lb_id, + 'client_ca_tls_container_ref': uuidutils.generate_uuid(), + 'client_crl_container_ref': uuidutils.generate_uuid() + } + body = self._build_body(lb_listener) + response = self.post(self.LISTENERS_PATH, body, status=400).json + self.assertEqual( + "Validation failure: The CRL specified is not valid for client " + "certificate authority reference supplied.", + response['faultstring']) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_create_with_ca_cert_negative_cases(self, mock_load_cert): + # create just with option or crl, + # no client_ca_tls_container_ref specified. + sni1 = uuidutils.generate_uuid() + sni2 = uuidutils.generate_uuid() + + for opt in [{'client_authentication': constants.CLIENT_AUTH_MANDATORY, + 'client_crl_container_ref': uuidutils.generate_uuid()}, + {'client_authentication': constants.CLIENT_AUTH_OPTIONAL, + 'client_crl_container_ref': uuidutils.generate_uuid()}]: + lb_listener = { + 'name': 'listener1', 'default_pool_id': None, + 'description': 'desc1', + 'admin_state_up': False, + 'protocol': constants.PROTOCOL_TERMINATED_HTTPS, + 'protocol_port': 80, + 'default_tls_container_ref': uuidutils.generate_uuid(), + 'sni_container_refs': [sni1, sni2], + 'project_id': self.project_id, + 'loadbalancer_id': self.lb_id} + lb_listener.update(opt) + body = self._build_body(lb_listener) + response = self.post(self.LISTENERS_PATH, body, status=400).json + self.assertEqual( + "Validation failure: Client authentication setting %s " + "requires a client CA container reference." % + opt['client_authentication'], response['faultstring']) + + def test_create_with_bad_ca_cert_ref(self): + sni1 = uuidutils.generate_uuid() + sni2 = uuidutils.generate_uuid() + lb_listener = { + 'name': 'listener1', 'default_pool_id': None, + 'description': 'desc1', + 'admin_state_up': False, + 'protocol': constants.PROTOCOL_TERMINATED_HTTPS, + 'protocol_port': 80, + 'default_tls_container_ref': uuidutils.generate_uuid(), + 'sni_container_refs': [sni1, sni2], + 'project_id': self.project_id, + 'loadbalancer_id': self.lb_id, + 'client_ca_tls_container_ref': uuidutils.generate_uuid()} + body = self._build_body(lb_listener) + self.cert_manager_mock().get_cert.side_effect = [ + 'cert 1', 'cert 2', 'cert 3'] + self.cert_manager_mock().get_secret.side_effect = [ + Exception('bad ca cert')] + response = self.post(self.LISTENERS_PATH, body, status=400).json + self.assertEqual("Could not retrieve certificate: ['%s']" % + lb_listener['client_ca_tls_container_ref'], + response['faultstring']) + + def test_create_with_unreachable_crl(self): + sni1 = uuidutils.generate_uuid() + sni2 = uuidutils.generate_uuid() + lb_listener = { + 'name': 'listener1', 'default_pool_id': None, + 'description': 'desc1', + 'admin_state_up': False, + 'protocol': constants.PROTOCOL_TERMINATED_HTTPS, + 'protocol_port': 80, + 'default_tls_container_ref': uuidutils.generate_uuid(), + 'sni_container_refs': [sni1, sni2], + 'project_id': self.project_id, + 'loadbalancer_id': self.lb_id, + 'client_ca_tls_container_ref': uuidutils.generate_uuid(), + 'client_crl_container_ref': uuidutils.generate_uuid()} + body = self._build_body(lb_listener) + self.cert_manager_mock().get_secret.side_effect = Exception( + 'bad CRL ref') + response = self.post(self.LISTENERS_PATH, body, status=400).json + self.assertIn(lb_listener['client_crl_container_ref'], + response['faultstring']) + + def test_create_with_bad_ca_cert(self): + sni1 = uuidutils.generate_uuid() + sni2 = uuidutils.generate_uuid() + lb_listener = { + 'name': 'listener1', 'default_pool_id': None, + 'description': 'desc1', + 'admin_state_up': False, + 'protocol': constants.PROTOCOL_TERMINATED_HTTPS, + 'protocol_port': 80, + 'default_tls_container_ref': uuidutils.generate_uuid(), + 'sni_container_refs': [sni1, sni2], + 'project_id': self.project_id, + 'loadbalancer_id': self.lb_id, + 'client_ca_tls_container_ref': uuidutils.generate_uuid()} + body = self._build_body(lb_listener) + self.cert_manager_mock().get_cert.side_effect = [ + 'cert 1', 'cert 2', 'cert 3'] + self.cert_manager_mock().get_secret.return_value = 'bad cert' + response = self.post(self.LISTENERS_PATH, body, status=400).json + self.assertIn("The client authentication CA certificate is invalid. " + "It must be a valid x509 PEM format certificate.", + response['faultstring']) + + def _test_create_with_allowed_cidrs(self, allowed_cidrs, lb_id): + listener = self.create_listener(constants.PROTOCOL_TCP, + 80, lb_id, + allowed_cidrs=allowed_cidrs) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['listener']['id']) + get_listener = self.get(listener_path).json['listener'] + self.assertEqual(allowed_cidrs, get_listener.get('allowed_cidrs')) + + def test_create_with_allowed_cidrs_ipv4(self): + allowed_cidrs = ['10.0.1.0/24', '172.16.55.0/25'] + self._test_create_with_allowed_cidrs(allowed_cidrs, self.lb_id) + + def test_create_with_allowed_cidrs_ipv6(self): + lb_ipv6 = self.create_load_balancer( + uuidutils.generate_uuid(), + vip_address='2001:db9:a1b:13f0::1', + ) + lb_id = lb_ipv6.get('loadbalancer').get('id') + self.set_lb_status(lb_id) + allowed_cidrs = ['2001:db8:a0b:12f0::/64', '2a02:8071:69e::/64'] + self._test_create_with_allowed_cidrs(allowed_cidrs, lb_id) + + def test_create_with_bad_allowed_cidrs(self): + allowed_cidrs = ['10.0.1.0/33', '172.16.55.1.0/25'] + lb_listener = { + 'protocol': constants.PROTOCOL_TCP, + 'protocol_port': 80, + 'project_id': self.project_id, + 'loadbalancer_id': self.lb_id, + 'allowed_cidrs': allowed_cidrs} + body = self._build_body(lb_listener) + response = self.post(self.LISTENERS_PATH, body, status=400).json + self.assertIn("Invalid input for field/attribute allowed_cidrs. " + "Value: '%s'. Value should be IPv4 or IPv6 CIDR format" + % allowed_cidrs, response['faultstring']) + + def test_create_with_incompatible_allowed_cidrs_ipv6(self): + lb_listener = { + 'protocol': constants.PROTOCOL_TCP, + 'protocol_port': 80, + 'project_id': self.project_id, + 'loadbalancer_id': self.lb_id, + 'allowed_cidrs': ['2001:db8:a0b:12f0::/64']} + body = self._build_body(lb_listener) + response = self.post(self.LISTENERS_PATH, body, status=400).json + self.assertIn("Validation failure: CIDR 2001:db8:a0b:12f0::/64 IP " + "version incompatible with all VIPs ['198.0.2.5'] IP " + "version.", + response['faultstring']) + + def test_create_with_incompatible_allowed_cidrs_ipv4(self): + lb_ipv6 = self.create_load_balancer( + uuidutils.generate_uuid(), + vip_address='2001:db9:a1b:13f0::1', + ) + lb_id = lb_ipv6.get('loadbalancer').get('id') + self.set_lb_status(lb_id) + lb_listener = { + 'protocol': constants.PROTOCOL_TCP, + 'protocol_port': 80, + 'project_id': self.project_id, + 'loadbalancer_id': lb_id, + 'allowed_cidrs': ['10.0.1.0/24']} + body = self._build_body(lb_listener) + response = self.post(self.LISTENERS_PATH, body, status=400).json + self.assertIn("Validation failure: CIDR 10.0.1.0/24 IP version " + "incompatible with all VIPs " + "['2001:db9:a1b:13f0::1'] IP version.", + response['faultstring']) + + def test_create_with_mixed_version_allowed_cidrs(self): + lb_dualstack = self.create_load_balancer( + uuidutils.generate_uuid(), + additional_vips=[{'subnet_id': uuidutils.generate_uuid(), + 'ip_address': '2001:db9:a1b:13f0::1', + }], + ) + lb_id = lb_dualstack.get('loadbalancer').get('id') + self.set_lb_status(lb_id) + self._test_create_with_allowed_cidrs(['10.0.1.0/24', + '2001:db9:a1b:13f0::/64'], + lb_id) + + def test_create_with_duplicated_allowed_cidrs(self): + allowed_cidrs = ['10.0.1.0/24', '10.0.2.0/24', '10.0.2.0/24'] + self.create_listener(constants.PROTOCOL_TCP, 80, + self.lb_id, allowed_cidrs=allowed_cidrs) + + # TODO(johnsom) Fix this when there is a noop certificate manager + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_create_with_tls_versions(self, mock_cert_data): + cert1 = data_models.TLSContainer(certificate='cert 1') + mock_cert_data.return_value = {'tls_cert': cert1} + cert_id = uuidutils.generate_uuid() + tls_versions = constants.TLS_VERSIONS_OWASP_SUITE_B + listener = self.create_listener(constants.PROTOCOL_TERMINATED_HTTPS, + 80, self.lb_id, + default_tls_container_ref=cert_id, + tls_versions=tls_versions) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['listener']['id']) + get_listener = self.get(listener_path).json['listener'] + self.assertEqual(tls_versions, get_listener['tls_versions']) + + # TODO(johnsom) Fix this when there is a noop certificate manager + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_create_with_tls_versions_negative(self, mock_cert_data): + cert1 = data_models.TLSContainer(certificate='cert 1') + mock_cert_data.return_value = {'tls_cert': cert1} + cert_id = uuidutils.generate_uuid() + req_dict = {'protocol': constants.PROTOCOL_TERMINATED_HTTPS, + 'protocol_port': 80, + 'loadbalancer_id': self.lb_id, + 'default_tls_container_ref': cert_id, + 'tls_versions': [lib_consts.TLS_VERSION_1_3, 'insecure']} + res = self.post(self.LISTENERS_PATH, self._build_body(req_dict), + status=400) + fault = res.json['faultstring'] + self.assertIn('Validation failure: Invalid TLS versions', fault) + self.assert_correct_status(lb_id=self.lb_id) + + # TODO(johnsom) Fix this when there is a noop certificate manager + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_create_with_tls_ciphers(self, mock_cert_data): + cert1 = data_models.TLSContainer(certificate='cert 1') + mock_cert_data.return_value = {'tls_cert': cert1} + cert_id = uuidutils.generate_uuid() + tls_ciphers = constants.CIPHERS_OWASP_SUITE_B + listener = self.create_listener(constants.PROTOCOL_TERMINATED_HTTPS, + 80, self.lb_id, + default_tls_container_ref=cert_id, + tls_ciphers=tls_ciphers) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['listener']['id']) + get_listener = self.get(listener_path).json['listener'] + self.assertEqual(tls_ciphers, get_listener['tls_ciphers']) + + # TODO(johnsom) Fix this when there is a noop certificate manager + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_create_with_tls_ciphers_negative(self, mock_cert_data): + cert1 = data_models.TLSContainer(certificate='cert 1') + mock_cert_data.return_value = {'tls_cert': cert1} + cert_id = uuidutils.generate_uuid() + req_dict = {'protocol': constants.PROTOCOL_TERMINATED_HTTPS, + 'protocol_port': 80, + 'loadbalancer_id': self.lb_id, + 'default_tls_container_ref': cert_id, + 'tls_ciphers': ['cipher-insecure']} + res = self.post(self.LISTENERS_PATH, self._build_body(req_dict), + status=400) + fault = res.json['faultstring'] + self.assertIn('Invalid input for field/attribute tls_ciphers', fault) + self.assert_correct_status(lb_id=self.lb_id) + + def _test_negative_create_with_headers(self, protocol): + req_dict = {'name': 'listener1', 'default_pool_id': None, + 'description': 'desc1', + 'admin_state_up': False, + 'protocol': protocol, + 'protocol_port': 6666, 'connection_limit': 10, + 'insert_headers': { + "X-Forwarded-Port": "true", + "X-Forwarded-For": "true"}, + 'loadbalancer_id': self.lb_id} + res = self.post(self.LISTENERS_PATH, self._build_body(req_dict), + status=400) + self.assertIn(protocol, res.json['faultstring']) + self.assert_correct_status(lb_id=self.lb_id) + + def test_negative_create_HTTPS_with_headers(self): + self._test_negative_create_with_headers(constants.PROTOCOL_HTTPS) + + def test_negative_create_PROXY_with_headers(self): + self._test_negative_create_with_headers(constants.PROTOCOL_PROXY) + + def test_negative_create_TCP_with_headers(self): + self._test_negative_create_with_headers(constants.PROTOCOL_TCP) + + def test_negative_create_UDP_with_headers(self): + self._test_negative_create_with_headers(constants.PROTOCOL_UDP) + + def test_create_prometheus(self): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + self.conf.config(group='api_settings', allow_prometheus_listeners=True) + listener = self.create_listener(lib_consts.PROTOCOL_PROMETHEUS, + 80, self.lb_id) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['listener']['id']) + get_listener = self.get(listener_path).json['listener'] + self.assertEqual(lib_consts.PROTOCOL_PROMETHEUS, + get_listener['protocol']) + + def test_create_prometheus_disabled(self): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + self.conf.config(group='api_settings', + allow_prometheus_listeners=False) + req_dict = {'name': 'create_listener_prometheus_disabled', + 'protocol': lib_consts.PROTOCOL_PROMETHEUS, + 'protocol_port': 6666, + 'loadbalancer_id': self.lb_id} + self.post(self.LISTENERS_PATH, self._build_body(req_dict), status=400) + + def test_update_allowed_cidrs(self): + allowed_cidrs = ['10.0.1.0/24', '10.0.2.0/24'] + new_cidrs = ['10.0.1.0/24', '10.0.3.0/24'] + listener = self.create_listener(constants.PROTOCOL_TCP, + 80, self.lb_id, + allowed_cidrs=allowed_cidrs) + self.set_lb_status(self.lb_id) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['listener']['id']) + lb_listener = {'allowed_cidrs': new_cidrs} + body = self._build_body(lb_listener) + response = self.put(listener_path, body).json.get(self.root_tag) + self.assertEqual(new_cidrs, response.get('allowed_cidrs')) + + def test_update_unset_allowed_cidrs(self): + allowed_cidrs = ['10.0.1.0/24', '10.0.2.0/24'] + listener = self.create_listener(constants.PROTOCOL_TCP, + 80, self.lb_id, + allowed_cidrs=allowed_cidrs) + self.set_lb_status(self.lb_id) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['listener']['id']) + lb_listener = {'allowed_cidrs': None} + body = self._build_body(lb_listener) + api_listener = self.put(listener_path, body).json.get(self.root_tag) + self.assertIsNone(api_listener.get('allowed_cidrs')) + + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_update_with_bad_provider(self, mock_provider): + api_listener = self.create_listener( + constants.PROTOCOL_HTTP, 80, + self.lb_id).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + new_listener = {'name': 'new_name'} + mock_provider.side_effect = exceptions.ProviderDriverError( + prov='bad_driver', user_msg='broken') + response = self.put( + self.LISTENER_PATH.format(listener_id=api_listener.get('id')), + self._build_body(new_listener), status=500) + self.assertIn('Provider \'bad_driver\' reports error: broken', + response.json.get('faultstring')) + + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_delete_with_bad_provider(self, mock_provider): + api_listener = self.create_listener( + constants.PROTOCOL_HTTP, 80, + self.lb_id).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + # Set status to ACTIVE/ONLINE because set_lb_status did it in the db + api_listener['provisioning_status'] = constants.ACTIVE + api_listener['operating_status'] = constants.ONLINE + response = self.get(self.LISTENER_PATH.format( + listener_id=api_listener.get('id'))).json.get(self.root_tag) + self.assertIsNone(api_listener.pop('updated_at')) + self.assertIsNotNone(response.pop('updated_at')) + self.assertEqual(api_listener, response) + mock_provider.side_effect = exceptions.ProviderDriverError( + prov='bad_driver', user_msg='broken') + self.delete(self.LISTENER_PATH.format( + listener_id=api_listener.get('id')), status=500) + + # TODO(johnsom) Fix this when there is a noop certificate manager + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_update(self, mock_cert_data, **options): + cert1 = data_models.TLSContainer(certificate='cert 1') + mock_cert_data.return_value = {'tls_cert': cert1} + self.cert_manager_mock().get_secret.return_value = ( + sample_certs.X509_CA_CERT) + tls_uuid = uuidutils.generate_uuid() + ca_tls_uuid = uuidutils.generate_uuid() + listener = self.create_listener( + constants.PROTOCOL_TERMINATED_HTTPS, 80, self.lb_id, + name='listener1', description='desc1', + admin_state_up=False, connection_limit=10, + default_tls_container_ref=tls_uuid, + default_pool_id=None, tags=['old_tag'], + client_ca_tls_container_ref=ca_tls_uuid).get(self.root_tag) + ori_listener = copy.deepcopy(listener) + self.set_lb_status(self.lb_id) + new_listener = {'name': 'listener2', 'admin_state_up': True, + 'default_pool_id': self.pool_id, + 'timeout_client_data': 1, + 'timeout_member_connect': 2, + 'timeout_member_data': 3, + 'timeout_tcp_inspect': 4, + 'tags': ['new_tag']} + new_listener.update(options) + body = self._build_body(new_listener) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['id']) + api_listener = self.put(listener_path, body).json.get(self.root_tag) + update_expect = {'provisioning_status': constants.PENDING_UPDATE, + 'operating_status': constants.ONLINE} + update_expect.update(new_listener) + listener.update(update_expect) + self.assertEqual(listener['created_at'], api_listener['created_at']) + self.assertNotEqual(listener['updated_at'], api_listener['updated_at']) + self.assertEqual(['new_tag'], api_listener['tags']) + self.assertNotEqual(listener, api_listener) + self.assert_correct_lb_status(self.lb_id, constants.ONLINE, + constants.PENDING_UPDATE) + self.assert_final_listener_statuses(self.lb_id, + api_listener['id']) + return ori_listener, api_listener + + def test_update_with_bad_tls_ref(self): + listener = self.create_listener(constants.PROTOCOL_TCP, + 443, self.lb_id) + tls_uuid = uuidutils.generate_uuid() + self.set_lb_status(self.lb_id) + self.listener_repo.update(db_api.get_session(), + listener['listener']['id'], + tls_certificate_id=tls_uuid, + protocol=constants.PROTOCOL_TERMINATED_HTTPS) + + listener_path = self.LISTENER_PATH.format( + listener_id=listener['listener']['id']) + update_data = {'name': 'listener2'} + body = self._build_body(update_data) + api_listener = self.put(listener_path, body).json.get(self.root_tag) + response = self.get(self.listener_path.format( + listener_id=listener['listener']['id'])) + api_listener = response.json.get(self.root_tag) + self.assertEqual('listener2', api_listener['name']) + + # TODO(johnsom) Fix this when there is a noop certificate manager + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_update_with_tls_versions(self, mock_cert_data): + cert_id = uuidutils.generate_uuid() + cert1 = data_models.TLSContainer(certificate='cert 1') + mock_cert_data.return_value = {'tls_cert': cert1} + tls_versions_orig = [lib_consts.TLS_VERSION_1_1] + tls_versions = [lib_consts.TLS_VERSION_1_2, lib_consts.TLS_VERSION_1_3] + listener = self.create_listener( + constants.PROTOCOL_TERMINATED_HTTPS, 80, self.lb_id, + default_tls_container_ref=cert_id, + tls_versions=tls_versions_orig) + self.set_lb_status(self.lb_id) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['listener']['id']) + get_listener = self.get(listener_path).json['listener'] + self.assertEqual(tls_versions_orig, + get_listener.get('tls_versions')) + self.put(listener_path, + self._build_body({'tls_versions': tls_versions})) + get_listener = self.get(listener_path).json['listener'] + self.assertEqual(tls_versions, get_listener.get('tls_versions')) + + # TODO(johnsom) Fix this when there is a noop certificate manager + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_update_with_tls_versions_negative(self, mock_cert_data): + cert_id = uuidutils.generate_uuid() + cert1 = data_models.TLSContainer(certificate='cert 1') + mock_cert_data.return_value = {'tls_cert': cert1} + tls_versions_orig = [lib_consts.TLS_VERSION_1_1] + listener = self.create_listener( + constants.PROTOCOL_TERMINATED_HTTPS, 80, self.lb_id, + default_tls_container_ref=cert_id, + tls_versions=tls_versions_orig) + self.set_lb_status(self.lb_id) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['listener']['id']) + get_listener = self.get(listener_path).json['listener'] + self.assertEqual(tls_versions_orig, get_listener.get('tls_versions')) + + req_dict = {'tls_versions': [lib_consts.TLS_VERSION_1_3, 'insecure']} + res = self.put(listener_path, self._build_body(req_dict), + status=400) + fault = res.json['faultstring'] + self.assertIn('Validation failure: Invalid TLS versions:', fault) + self.assert_correct_status(lb_id=self.lb_id) + + # TODO(johnsom) Fix this when there is a noop certificate manager + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_update_with_tls_ciphers_negative(self, mock_cert_data): + cert_id = uuidutils.generate_uuid() + cert1 = data_models.TLSContainer(certificate='cert 1') + mock_cert_data.return_value = {'tls_cert': cert1} + tls_ciphers_orig = constants.CIPHERS_OWASP_SUITE_B + listener = self.create_listener( + constants.PROTOCOL_TERMINATED_HTTPS, 80, self.lb_id, + default_tls_container_ref=cert_id, + tls_ciphers=tls_ciphers_orig) + self.set_lb_status(self.lb_id) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['listener']['id']) + get_listener = self.get(listener_path).json['listener'] + self.assertEqual(tls_ciphers_orig, get_listener.get('tls_ciphers')) + + req_dict = {'tls_ciphers': ['cipher-insecure']} + res = self.put(listener_path, self._build_body(req_dict), + status=400) + fault = res.json['faultstring'] + self.assertIn('Invalid input for field/attribute tls_ciphers', fault) + self.assert_correct_status(lb_id=self.lb_id) + + def test_negative_update_udp_case(self): + api_listener = self.create_listener(constants.PROTOCOL_UDP, 6666, + self.lb_id).get(self.root_tag) + self.set_lb_status(self.lb_id) + sni1 = uuidutils.generate_uuid() + sni2 = uuidutils.generate_uuid() + new_listener = {'name': 'new-listener', + 'admin_state_up': True, + 'connection_limit': 10, + 'default_tls_container_ref': + uuidutils.generate_uuid(), + 'sni_container_refs': [sni1, sni2], + 'insert_headers': { + "X-Forwarded-Port": "true", + "X-Forwarded-For": "true"}} + listener_path = self.LISTENER_PATH.format( + listener_id=api_listener['id']) + expect_error_msg = ( + "Validation failure: %s protocol listener does not support TLS or " + "header insertion.") % constants.PROTOCOL_UDP + res = self.put(listener_path, self._build_body(new_listener), + status=400, expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status(lb_id=self.lb_id) + + def test_update_bad_listener_id(self): + self.put(self.listener_path.format(listener_id='SEAN-CONNERY'), + body={}, status=404) + + def test_update_with_bad_default_pool_id(self): + bad_pool_uuid = uuidutils.generate_uuid() + listener = self.create_listener( + constants.PROTOCOL_TCP, 80, self.lb_id, + name='listener1', description='desc1', + admin_state_up=False, connection_limit=10, + default_pool_id=self.pool_id) + self.set_lb_status(self.lb_id) + new_listener = {'name': 'listener2', 'admin_state_up': True, + 'default_pool_id': bad_pool_uuid} + body = self._build_body(new_listener) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['listener']['id']) + self.put(listener_path, body, status=404) + self.assert_correct_lb_status(self.lb_id, constants.ONLINE, + constants.ACTIVE) + self.assert_final_listener_statuses(self.lb_id, + listener['listener']['id']) + + def test_update_with_certs_not_terminated_https(self): + listener = self.create_listener( + constants.PROTOCOL_TCP, 80, self.lb_id, + name='listener1', description='desc1', + admin_state_up=False, connection_limit=10, + default_pool_id=None,).get(self.root_tag) + self.set_lb_status(self.lb_id) + lb_listener = { + 'default_tls_container_ref': uuidutils.generate_uuid()} + body = self._build_body(lb_listener) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['id']) + response = self.put(listener_path, body, status=400).json + fault = response.get('faultstring') + self.assertIn( + 'Certificate container references are not allowed on ', fault) + self.assertIn(f'{constants.PROTOCOL_TCP} protocol listeners.', fault) + + def test_update_with_ca_cert(self): + self.cert_manager_mock().get_secret.return_value = ( + sample_certs.X509_CA_CERT) + + optionals = { + 'client_ca_tls_container_ref': uuidutils.generate_uuid() + } + ori_listener, update_listener = self.test_update(**optionals) + self.assertEqual(optionals['client_ca_tls_container_ref'], + update_listener.get('client_ca_tls_container_ref')) + self.assertNotEqual(ori_listener['client_ca_tls_container_ref'], + optionals['client_ca_tls_container_ref']) + + def test_update_with_only_client_auth_option(self): + optionals = { + 'client_authentication': constants.CLIENT_AUTH_OPTIONAL + } + ori_listener, update_listener = self.test_update(**optionals) + self.assertEqual(optionals['client_authentication'], + update_listener.get('client_authentication')) + self.assertNotEqual(ori_listener['client_authentication'], + optionals['client_authentication']) + + def test_update_with_crl(self): + # Load up sample certs to test the validation + self.cert_manager_mock().get_secret.side_effect = [ + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL] + + optionals = { + 'client_crl_container_ref': uuidutils.generate_uuid() + } + ori_listener, update_listener = self.test_update(**optionals) + self.assertEqual(optionals['client_crl_container_ref'], + update_listener.get('client_crl_container_ref')) + self.assertNotEqual(ori_listener['client_crl_container_ref'], + optionals['client_crl_container_ref']) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_update_from_nonexist_ca_cert_to_new_ca_cert(self, mock_cert_data): + cert1 = data_models.TLSContainer(certificate='cert 1') + mock_cert_data.return_value = {'tls_cert': cert1} + self.cert_manager_mock().get_secret.return_value = ( + sample_certs.X509_CA_CERT) + tls_uuid = uuidutils.generate_uuid() + listener = self.create_listener( + constants.PROTOCOL_TERMINATED_HTTPS, 80, self.lb_id, + name='listener1', description='desc1', + admin_state_up=False, connection_limit=10, + default_tls_container_ref=tls_uuid, + default_pool_id=None).get(self.root_tag) + self.set_lb_status(self.lb_id) + ca_tls_uuid = uuidutils.generate_uuid() + new_listener = { + 'client_ca_tls_container_ref': ca_tls_uuid} + body = self._build_body(new_listener) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['id']) + api_listener = self.put(listener_path, body).json.get(self.root_tag) + update_expect = {'provisioning_status': constants.PENDING_UPDATE, + 'operating_status': constants.ONLINE} + update_expect.update(new_listener) + listener.update(update_expect) + self.assertEqual(ca_tls_uuid, + api_listener['client_ca_tls_container_ref']) + self.assertEqual(constants.CLIENT_AUTH_NONE, + api_listener['client_authentication']) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_update_with_ca_cert_missing(self, mock_cert_data): + # update a listener, no ca cert exist + cert1 = data_models.TLSContainer(certificate='cert 1') + mock_cert_data.return_value = {'tls_cert': cert1} + tls_uuid = uuidutils.generate_uuid() + listener = self.create_listener( + constants.PROTOCOL_TERMINATED_HTTPS, 80, self.lb_id, + name='listener1', description='desc1', + admin_state_up=False, connection_limit=10, + default_tls_container_ref=tls_uuid, + default_pool_id=None).get(self.root_tag) + self.set_lb_status(self.lb_id) + for opt in [{'client_authentication': constants.CLIENT_AUTH_OPTIONAL, + 'client_crl_container_ref': uuidutils.generate_uuid()}, + {'client_authentication': constants.CLIENT_AUTH_MANDATORY, + 'client_crl_container_ref': uuidutils.generate_uuid()}]: + body = self._build_body(opt) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['id']) + response = self.put(listener_path, body, status=400).json + self.assertEqual( + "Validation failure: Client authentication setting %s " + "requires a client CA container reference." % + opt['client_authentication'], response['faultstring']) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_update_with_crl_but_ca_cert_missing(self, mock_cert_data): + # update a listener, no ca cert exist + cert1 = data_models.TLSContainer(certificate='cert 1') + mock_cert_data.return_value = {'tls_cert': cert1, + 'client_ca_cert': None} + tls_uuid = uuidutils.generate_uuid() + listener = self.create_listener( + constants.PROTOCOL_TERMINATED_HTTPS, 80, self.lb_id, + name='listener1', description='desc1', + admin_state_up=False, connection_limit=10, + default_tls_container_ref=tls_uuid, + default_pool_id=None).get(self.root_tag) + self.set_lb_status(self.lb_id) + body = self._build_body( + {'client_crl_container_ref': uuidutils.generate_uuid()}) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['id']) + response = self.put(listener_path, body, status=400).json + self.assertEqual( + "Validation failure: A client authentication CA reference is " + "required to specify a client authentication revocation list.", + response['faultstring']) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_update_unset_ca_cert(self, mock_cert_data): + cert1 = data_models.TLSContainer(certificate='cert 1') + mock_cert_data.return_value = {'tls_cert': cert1} + self.cert_manager_mock().get_secret.return_value = ( + sample_certs.X509_CA_CERT) + tls_uuid = uuidutils.generate_uuid() + ca_tls_uuid = uuidutils.generate_uuid() + listener = self.create_listener( + constants.PROTOCOL_TERMINATED_HTTPS, 80, self.lb_id, + name='listener1', description='desc1', + admin_state_up=False, connection_limit=10, + default_tls_container_ref=tls_uuid, + default_pool_id=None, + client_ca_tls_container_ref=ca_tls_uuid).get(self.root_tag) + self.set_lb_status(self.lb_id) + lb_listener = {'client_ca_tls_container_ref': None} + body = self._build_body(lb_listener) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['id']) + api_listener = self.put(listener_path, body).json.get(self.root_tag) + self.assertIsNone(api_listener.get('client_ca_tls_container_ref')) + self.assertIsNone(api_listener.get('client_auth_option')) + self.assertIsNone(api_listener.get('client_crl_container_ref')) + + @mock.patch( + 'octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_update_unset_crl(self, mock_cert_data): + # Load up sample certs to test the validation + self.cert_manager_mock().get_secret.side_effect = [ + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL] + + cert1 = data_models.TLSContainer(certificate='cert 1') + mock_cert_data.return_value = {'tls_cert': cert1} + listener = self.create_listener( + constants.PROTOCOL_TERMINATED_HTTPS, 80, self.lb_id, + name='listener1', description='desc1', + admin_state_up=False, connection_limit=10, + default_tls_container_ref=uuidutils.generate_uuid(), + default_pool_id=None, + client_ca_tls_container_ref=uuidutils.generate_uuid(), + client_crl_container_ref=uuidutils.generate_uuid(), + client_authentication=constants.CLIENT_AUTH_MANDATORY).get( + self.root_tag) + self.set_lb_status(self.lb_id) + lb_listener = {'client_crl_container_ref': None} + body = self._build_body(lb_listener) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['id']) + api_listener = self.put(listener_path, body).json.get(self.root_tag) + self.assertEqual(listener.get('client_ca_tls_container_ref'), + api_listener.get('client_ca_tls_container_ref')) + self.assertEqual(listener.get('client_authentication'), + api_listener.get('client_authentication')) + self.assertIsNone(api_listener.get('client_crl_container_ref')) + + # TODO(johnsom) Fix this when there is a noop certificate manager + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_update_unset_defaults(self, mock_cert_data): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + self.conf.config(group='haproxy_amphora', timeout_client_data=20) + self.conf.config(group='haproxy_amphora', timeout_member_connect=21) + self.conf.config(group='haproxy_amphora', timeout_member_data=22) + self.conf.config(group='haproxy_amphora', timeout_tcp_inspect=23) + self.conf.config(group='api_settings', + default_listener_tls_versions=( + constants.TLS_VERSIONS_OWASP_SUITE_B)) + self.conf.config(group='api_settings', + default_listener_ciphers=( + constants.CIPHERS_OWASP_SUITE_B)) + self.conf.config(group='api_settings', + default_listener_alpn_protocols=['http/1.1']) + + self.cert_manager_mock().get_secret.side_effect = [ + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL] + cert1 = data_models.TLSContainer(certificate='cert 1') + mock_cert_data.return_value = {'tls_cert': cert1} + self.cert_manager_mock().get_secret.return_value = ( + sample_certs.X509_CA_CERT) + tls_uuid = uuidutils.generate_uuid() + ca_tls_uuid = uuidutils.generate_uuid() + crl_tls_uuid = uuidutils.generate_uuid() + listener = self.create_listener( + constants.PROTOCOL_TERMINATED_HTTPS, 80, self.lb_id, + name='listener1', description='desc1', + admin_state_up=False, connection_limit=10, + default_tls_container_ref=tls_uuid, + default_pool_id=self.pool_id, tags=['old_tag'], + insert_headers={'X-Forwarded-For': 'true'}, + timeout_client_data=1, timeout_member_connect=2, + timeout_member_data=3, timeout_tcp_inspect=4, + client_authentication=constants.CLIENT_AUTH_OPTIONAL, + client_crl_container_ref=crl_tls_uuid, + client_ca_tls_container_ref=ca_tls_uuid, + tls_versions=[lib_consts.TLS_VERSION_1_3], + tls_ciphers='TLS_AES_256_GCM_SHA384', + alpn_protocols=['http/1.0'], + hsts_max_age=20, hsts_include_subdomains=True, + hsts_preload=True, + ).get(self.root_tag) + self.set_lb_status(self.lb_id) + unset_params = { + 'name': None, 'description': None, 'connection_limit': None, + 'default_tls_container_ref': None, 'sni_container_refs': None, + 'insert_headers': None, 'timeout_client_data': None, + 'timeout_member_connect': None, 'timeout_member_data': None, + 'timeout_tcp_inspect': None, 'client_ca_tls_container_ref': None, + 'client_authentication': None, 'default_pool_id': None, + 'client_crl_container_ref': None, 'tls_versions': None, + 'tls_ciphers': None, 'alpn_protocols': None, + 'hsts_max_age': None, 'hsts_include_subdomains': None, + 'hsts_preload': None, + } + body = self._build_body(unset_params) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['id']) + api_listener = self.put(listener_path, body).json.get(self.root_tag) + + self.assertEqual('', api_listener['name']) + self.assertEqual('', api_listener['description']) + self.assertEqual(constants.DEFAULT_CONNECTION_LIMIT, + api_listener['connection_limit']) + self.assertIsNone(api_listener['default_tls_container_ref']) + self.assertEqual([], api_listener['sni_container_refs']) + self.assertEqual({}, api_listener['insert_headers']) + self.assertEqual(20, api_listener['timeout_client_data']) + self.assertEqual(21, api_listener['timeout_member_connect']) + self.assertEqual(22, api_listener['timeout_member_data']) + self.assertEqual(23, api_listener['timeout_tcp_inspect']) + self.assertIsNone(api_listener['client_ca_tls_container_ref']) + self.assertIsNone(api_listener['client_crl_container_ref']) + self.assertEqual(constants.CLIENT_AUTH_NONE, + api_listener['client_authentication']) + self.assertIsNone(api_listener['default_pool_id']) + self.assertEqual(constants.TLS_VERSIONS_OWASP_SUITE_B, + api_listener['tls_versions']) + self.assertEqual(constants.CIPHERS_OWASP_SUITE_B, + api_listener['tls_ciphers']) + self.assertEqual(['http/1.1'], api_listener['alpn_protocols']) + self.assertIsNone(api_listener['hsts_max_age']) + self.assertFalse(api_listener['hsts_include_subdomains']) + self.assertFalse(api_listener['hsts_preload']) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_update_with_bad_ca_cert(self, mock_cert_data): + cert1 = data_models.TLSContainer(certificate='cert 1') + mock_cert_data.return_value = {'tls_cert': cert1} + self.cert_manager_mock().get_secret.return_value = ( + sample_certs.X509_CA_CERT) + + tls_uuid = uuidutils.generate_uuid() + ca_tls_uuid = uuidutils.generate_uuid() + listener = self.create_listener( + constants.PROTOCOL_TERMINATED_HTTPS, 80, self.lb_id, + name='listener1', description='desc1', + admin_state_up=False, connection_limit=10, + default_tls_container_ref=tls_uuid, + default_pool_id=None, + client_ca_tls_container_ref=ca_tls_uuid).get(self.root_tag) + self.set_lb_status(self.lb_id) + self.cert_manager_mock().get_secret.side_effect = Exception( + 'bad ca cert') + self.cert_manager_mock().get_secret.side_effect = Exception( + 'bad secret') + lb_listener = { + 'client_ca_tls_container_ref': uuidutils.generate_uuid()} + body = self._build_body(lb_listener) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['id']) + response = self.put(listener_path, body, status=400).json + self.assertIn(lb_listener['client_ca_tls_container_ref'], + response['faultstring']) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_update_with_unreachable_crl(self, mock_cert_data): + # Load up sample certs to test the validation + tls_cert_mock = mock.MagicMock() + tls_cert_mock.get_certificate.return_value = sample_certs.X509_CA_CERT + self.cert_manager_mock().get_cert.return_value = tls_cert_mock + self.cert_manager_mock().get_secret.side_effect = [ + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, + sample_certs.X509_CA_CERT, Exception('bad CRL ref')] + + cert1 = data_models.TLSContainer(certificate='cert 1') + mock_cert_data.return_value = {'tls_cert': cert1} + listener = self.create_listener( + constants.PROTOCOL_TERMINATED_HTTPS, 80, self.lb_id, + name='listener1', description='desc1', + admin_state_up=False, connection_limit=10, + default_tls_container_ref=uuidutils.generate_uuid(), + default_pool_id=None, + client_ca_tls_container_ref=uuidutils.generate_uuid(), + client_crl_container_ref=uuidutils.generate_uuid()).get( + self.root_tag) + self.set_lb_status(self.lb_id) + lb_listener = { + 'client_crl_container_ref': uuidutils.generate_uuid()} + body = self._build_body(lb_listener) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['id']) + response = self.put(listener_path, body, status=400).json + self.assertIn(lb_listener['client_crl_container_ref'], + response['faultstring']) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_update_with_bad_crl(self, mock_cert_data): + # Load up sample certs to test the validation + tls_cert_mock = mock.MagicMock() + tls_cert_mock.get_certificate.return_value = sample_certs.X509_CA_CERT + self.cert_manager_mock().get_cert.return_value = tls_cert_mock + self.cert_manager_mock().get_secret.side_effect = [ + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, + sample_certs.X509_CA_CERT, 'bad CRL'] + + cert1 = data_models.TLSContainer(certificate='cert 1') + mock_cert_data.return_value = {'tls_cert': cert1} + listener = self.create_listener( + constants.PROTOCOL_TERMINATED_HTTPS, 80, self.lb_id, + name='listener1', description='desc1', + admin_state_up=False, connection_limit=10, + default_tls_container_ref=uuidutils.generate_uuid(), + default_pool_id=None, + client_ca_tls_container_ref=uuidutils.generate_uuid(), + client_crl_container_ref=uuidutils.generate_uuid()).get( + self.root_tag) + self.set_lb_status(self.lb_id) + lb_listener = { + 'client_crl_container_ref': uuidutils.generate_uuid()} + body = self._build_body(lb_listener) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['id']) + response = self.put(listener_path, body, status=400).json + self.assertIn("The client authentication certificate revocation list " + "is invalid. It must be a valid x509 PEM format " + "certificate revocation list.", + response['faultstring']) + + def test_update_authorized(self): + listener = self.create_listener( + constants.PROTOCOL_TCP, 80, self.lb_id, + name='listener1', description='desc1', + admin_state_up=False, connection_limit=10, + default_pool_id=None).get(self.root_tag) + self.set_lb_status(self.lb_id) + new_listener = {'name': 'listener2', 'admin_state_up': True, + 'default_pool_id': self.pool_id} + body = self._build_body(new_listener) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['id']) + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', + auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + api_listener = self.put(listener_path, body) + api_listener = api_listener.json.get(self.root_tag) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + update_expect = {'name': 'listener2', 'admin_state_up': True, + 'default_pool_id': self.pool_id, + 'provisioning_status': constants.PENDING_UPDATE, + 'operating_status': constants.ONLINE} + listener.update(update_expect) + self.assertEqual(listener['created_at'], api_listener['created_at']) + self.assertNotEqual(listener['updated_at'], api_listener['updated_at']) + self.assertNotEqual(listener, api_listener) + self.assert_correct_lb_status(self.lb_id, constants.ONLINE, + constants.PENDING_UPDATE) + self.assert_final_listener_statuses(self.lb_id, + api_listener['id']) + + def test_update_not_authorized(self): + listener = self.create_listener( + constants.PROTOCOL_TCP, 80, self.lb_id, + name='listener1', description='desc1', + admin_state_up=False, connection_limit=10, + default_pool_id=None).get(self.root_tag) + self.set_lb_status(self.lb_id) + new_listener = {'name': 'listener2', 'admin_state_up': True, + 'default_pool_id': self.pool_id} + body = self._build_body(new_listener) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['id']) + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', + auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=uuidutils.generate_uuid()) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + api_listener = self.put(listener_path, body, status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, api_listener.json) + self.assert_correct_lb_status(self.lb_id, constants.ONLINE, + constants.ACTIVE) + + def test_create_listeners_same_port(self): + listener1 = self.create_listener(constants.PROTOCOL_TCP, 80, + self.lb_id) + self.set_lb_status(self.lb_id) + listener2_post = {'protocol': listener1['listener']['protocol'], + 'protocol_port': + listener1['listener']['protocol_port'], + 'loadbalancer_id': self.lb_id} + body = self._build_body(listener2_post) + self.post(self.LISTENERS_PATH, body, status=409) + + def test_create_listeners_tcp_https_same_port(self): + listener1 = self.create_listener(constants.PROTOCOL_TCP, 80, + self.lb_id) + self.set_lb_status(self.lb_id) + listener2_post = {'protocol': constants.PROTOCOL_HTTPS, + 'protocol_port': + listener1['listener']['protocol_port'], + 'loadbalancer_id': self.lb_id} + body = self._build_body(listener2_post) + self.post(self.LISTENERS_PATH, body, status=409) + + def test_create_listeners_tcp_udp_same_port(self): + listener1 = self.create_listener(constants.PROTOCOL_TCP, 80, + self.lb_id) + self.set_lb_status(self.lb_id) + listener2_post = {'protocol': constants.PROTOCOL_UDP, + 'protocol_port': + listener1['listener']['protocol_port'], + 'loadbalancer_id': self.lb_id} + body = self._build_body(listener2_post) + self.post(self.LISTENERS_PATH, body, status=201) + + def test_delete(self): + listener = self.create_listener(constants.PROTOCOL_HTTP, 80, + self.lb_id) + self.set_lb_status(self.lb_id) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['listener']['id']) + self.delete(listener_path) + response = self.get(listener_path) + api_listener = response.json['listener'] + expected = {'name': None, 'default_pool_id': None, + 'description': None, 'admin_state_up': True, + 'operating_status': constants.ONLINE, + 'provisioning_status': constants.PENDING_DELETE, + 'connection_limit': None} + listener['listener'].update(expected) + + self.assertIsNone(listener['listener'].pop('updated_at')) + self.assertIsNotNone(api_listener.pop('updated_at')) + self.assertNotEqual(listener, api_listener) + self.assert_correct_lb_status(self.lb_id, constants.ONLINE, + constants.PENDING_UPDATE) + self.assert_final_listener_statuses(self.lb_id, api_listener['id'], + delete=True) + + # Problems with TLS certs should not block a delete + def test_delete_with_bad_tls_ref(self): + listener = self.create_listener(constants.PROTOCOL_TCP, + 443, self.lb_id) + tls_uuid = uuidutils.generate_uuid() + self.set_lb_status(self.lb_id) + self.listener_repo.update(db_api.get_session(), + listener['listener']['id'], + tls_certificate_id=tls_uuid, + protocol=constants.PROTOCOL_TERMINATED_HTTPS) + + listener_path = self.LISTENER_PATH.format( + listener_id=listener['listener']['id']) + self.delete(listener_path) + response = self.get(listener_path) + api_listener = response.json['listener'] + expected = {'name': None, 'default_pool_id': None, + 'description': None, 'admin_state_up': True, + 'operating_status': constants.ONLINE, + 'provisioning_status': constants.PENDING_DELETE, + 'connection_limit': None} + listener['listener'].update(expected) + + self.assertIsNone(listener['listener'].pop('updated_at')) + self.assertIsNotNone(api_listener.pop('updated_at')) + self.assertNotEqual(listener, api_listener) + self.assert_correct_lb_status(self.lb_id, constants.ONLINE, + constants.PENDING_UPDATE) + self.assert_final_listener_statuses(self.lb_id, api_listener['id'], + delete=True) + + def test_delete_authorized(self): + listener = self.create_listener(constants.PROTOCOL_HTTP, 80, + self.lb_id) + self.set_lb_status(self.lb_id) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['listener']['id']) + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', + auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + + self.delete(listener_path) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + response = self.get(listener_path) + api_listener = response.json['listener'] + expected = {'name': None, 'default_pool_id': None, + 'description': None, 'admin_state_up': True, + 'operating_status': constants.ONLINE, + 'provisioning_status': constants.PENDING_DELETE, + 'connection_limit': None} + listener['listener'].update(expected) + + self.assertIsNone(listener['listener'].pop('updated_at')) + self.assertIsNotNone(api_listener.pop('updated_at')) + self.assertNotEqual(listener, api_listener) + self.assert_correct_lb_status(self.lb_id, constants.ONLINE, + constants.PENDING_UPDATE) + self.assert_final_listener_statuses(self.lb_id, api_listener['id'], + delete=True) + + def test_delete_not_authorized(self): + listener = self.create_listener(constants.PROTOCOL_HTTP, 80, + self.lb_id) + self.set_lb_status(self.lb_id) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['listener']['id']) + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=uuidutils.generate_uuid()) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + self.delete(listener_path, status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assert_correct_lb_status(self.lb_id, constants.ONLINE, + constants.ACTIVE) + + def test_delete_bad_listener_id(self): + listener_path = self.LISTENER_PATH.format(listener_id='SEAN-CONNERY') + self.delete(listener_path, status=404) + + def test_create_listener_bad_protocol(self): + lb_listener = {'protocol': 'SEAN_CONNERY', + 'protocol_port': 80} + self.post(self.LISTENERS_PATH, lb_listener, status=400) + + def test_create_listener_proxy_protocol(self): + lb_listener = {'protocol': 'PROXY', + 'protocol_port': 80} + self.post(self.LISTENERS_PATH, lb_listener, status=400) + + def test_update_listener_bad_protocol(self): + listener = self.create_listener(constants.PROTOCOL_TCP, 80, self.lb_id) + self.set_lb_status(self.lb_id) + new_listener = {'protocol': 'SEAN_CONNERY', + 'protocol_port': 80} + listener_path = self.LISTENER_PATH.format( + listener_id=listener['listener'].get('id')) + self.put(listener_path, new_listener, status=400) + + def test_update_pending_create(self): + lb = self.create_load_balancer(uuidutils.generate_uuid()) + optionals = {'name': 'lb1', 'description': 'desc1', + 'admin_state_up': False} + lb.update(optionals) + + lb_listener = {'name': 'listener1', 'description': 'desc1', + 'admin_state_up': False, + 'protocol': constants.PROTOCOL_HTTP, + 'protocol_port': 80, 'connection_limit': 10, + 'loadbalancer_id': lb['loadbalancer']['id']} + body = self._build_body(lb_listener) + self.post(self.LISTENERS_PATH, body, status=409) + + def test_delete_pending_update(self): + lb = self.create_load_balancer(uuidutils.generate_uuid()) + optionals = {'name': 'lb1', 'description': 'desc1', + 'admin_state_up': False} + lb.update(optionals) + + self.set_lb_status(lb['loadbalancer']['id']) + lb_listener = {'name': 'listener1', 'description': 'desc1', + 'admin_state_up': False, + 'protocol': constants.PROTOCOL_HTTP, + 'protocol_port': 80, 'connection_limit': 10, + 'loadbalancer_id': lb['loadbalancer']['id']} + body = self._build_body(lb_listener) + api_listener = self.post( + self.LISTENERS_PATH, body).json['listener'] + listener_path = self.LISTENER_PATH.format( + listener_id=api_listener['id']) + self.delete(listener_path, status=409) + + def test_update_empty_body(self): + listener = self.create_listener(constants.PROTOCOL_TCP, 80, self.lb_id) + self.set_lb_status(self.lb_id) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['listener'].get('id')) + self.put(listener_path, {}, status=400) + + # TODO(johnsom) Fix this when there is a noop certificate manager + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_update_bad_tls_ref(self, mock_cert_data): + cert2 = data_models.TLSContainer(certificate='cert 2') + cert3 = data_models.TLSContainer(certificate='cert 3') + mock_cert_data.return_value = {'sni_certs': [cert2, cert3]} + sni1 = uuidutils.generate_uuid() + sni2 = uuidutils.generate_uuid() + tls_ref = uuidutils.generate_uuid() + tls_ref2 = uuidutils.generate_uuid() + lb_listener = {'name': 'listener1', 'default_pool_id': None, + 'protocol': constants.PROTOCOL_TERMINATED_HTTPS, + 'protocol_port': 80, + 'sni_container_refs': [sni1, sni2], + 'default_tls_container_ref': tls_ref, + 'loadbalancer_id': self.lb_id} + + body = self._build_body(lb_listener) + api_listener = self.post( + self.LISTENERS_PATH, body).json['listener'] + self.set_lb_status(self.lb_id) + lb_listener_put = { + 'default_tls_container_ref': tls_ref2, + 'sni_container_refs': [sni1, sni2] + } + body = self._build_body(lb_listener_put) + listener_path = self.LISTENER_PATH.format( + listener_id=api_listener['id']) + self.cert_manager_mock().get_cert.side_effect = [ + Exception("bad cert"), None, Exception("bad cert")] + self.cert_manager_mock().get_secret.side_effect = [ + Exception("bad secret"), Exception("bad secret")] + response = self.put(listener_path, body, status=400).json + self.assertIn(tls_ref2, response['faultstring']) + self.assertIn(sni1, response['faultstring']) + self.assertNotIn(sni2, response['faultstring']) + self.assertNotIn(tls_ref, response['faultstring']) + + def test_update_pending_update(self): + lb = self.create_load_balancer(uuidutils.generate_uuid()) + optionals = {'name': 'lb1', 'description': 'desc1', + 'admin_state_up': False} + lb.update(optionals) + self.set_lb_status(lb['loadbalancer']['id']) + lb_listener = {'name': 'listener1', 'description': 'desc1', + 'admin_state_up': False, + 'protocol': constants.PROTOCOL_HTTP, + 'protocol_port': 80, 'connection_limit': 10, + 'loadbalancer_id': lb['loadbalancer']['id']} + body = self._build_body(lb_listener) + api_listener = self.post( + self.LISTENERS_PATH, body).json['listener'] + self.set_lb_status(lb['loadbalancer']['id']) + self.put(self.LB_PATH.format(lb_id=lb['loadbalancer']['id']), + {'loadbalancer': {'name': 'hi'}}) + lb_listener_put = {'name': 'listener1_updated'} + body = self._build_body(lb_listener_put) + listener_path = self.LISTENER_PATH.format( + listener_id=api_listener['id']) + self.put(listener_path, body, status=409) + + def test_update_pending_delete(self): + lb = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', description='desc1', + admin_state_up=False) + lb_id = lb['loadbalancer'].get('id') + self.set_lb_status(lb_id) + lb_listener = {'name': 'listener1', 'description': 'desc1', + 'admin_state_up': False, + 'protocol': constants.PROTOCOL_HTTP, + 'protocol_port': 80, 'connection_limit': 10, + 'loadbalancer_id': lb_id} + body = self._build_body(lb_listener) + api_listener = self.post( + self.LISTENERS_PATH, body).json.get(self.root_tag) + self.set_lb_status(lb_id) + self.delete(self.LB_PATH.format(lb_id=lb_id), + params={'cascade': "true"}) + lb_listener_put = {'name': 'listener1_updated'} + body = self._build_body(lb_listener_put) + listener_path = self.LISTENER_PATH.format( + listener_id=api_listener['id']) + self.put(listener_path, body, status=409) + + def test_update_deleted(self): + lb = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', description='desc1', + admin_state_up=False) + lb_id = lb['loadbalancer'].get('id') + self.set_lb_status(lb_id) + lb_listener = {'name': 'listener1', 'description': 'desc1', + 'admin_state_up': False, + 'protocol': constants.PROTOCOL_HTTP, + 'protocol_port': 80, 'connection_limit': 10, + 'loadbalancer_id': lb_id} + body = self._build_body(lb_listener) + api_listener = self.post( + self.LISTENERS_PATH, body).json.get(self.root_tag) + # This updates the child objects + self.set_lb_status(lb_id, status=constants.DELETED) + lb_listener_put = {'name': 'listener1_updated'} + body = self._build_body(lb_listener_put) + listener_path = self.LISTENER_PATH.format( + listener_id=api_listener['id']) + self.put(listener_path, body, status=404) + + def test_delete_pending_delete(self): + lb = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', description='desc1', + admin_state_up=False) + lb_id = lb['loadbalancer'].get('id') + self.set_lb_status(lb_id) + lb_listener = {'name': 'listener1', 'description': 'desc1', + 'admin_state_up': False, + 'protocol': constants.PROTOCOL_HTTP, + 'protocol_port': 80, 'connection_limit': 10, + 'loadbalancer_id': lb_id} + body = self._build_body(lb_listener) + api_listener = self.post( + self.LISTENERS_PATH, body).json.get(self.root_tag) + self.set_lb_status(lb_id) + self.delete(self.LB_PATH.format(lb_id=lb_id), + params={'cascade': "true"}) + listener_path = self.LISTENER_PATH.format( + listener_id=api_listener['id']) + self.delete(listener_path, status=409) + + def test_delete_already_deleted(self): + lb = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', description='desc1', + admin_state_up=False) + lb_id = lb['loadbalancer'].get('id') + self.set_lb_status(lb_id) + lb_listener = {'name': 'listener1', 'description': 'desc1', + 'admin_state_up': False, + 'protocol': constants.PROTOCOL_HTTP, + 'protocol_port': 80, 'connection_limit': 10, + 'loadbalancer_id': lb_id} + body = self._build_body(lb_listener) + api_listener = self.post( + self.LISTENERS_PATH, body).json.get(self.root_tag) + # This updates the child objects + self.set_lb_status(lb_id, status=constants.DELETED) + listener_path = self.LISTENER_PATH.format( + listener_id=api_listener['id']) + self.delete(listener_path, status=404) + + # TODO(johnsom) Fix this when there is a noop certificate manager + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_create_with_tls_termination_data(self, mock_cert_data): + cert1 = data_models.TLSContainer(certificate='cert 1') + cert2 = data_models.TLSContainer(certificate='cert 2') + cert3 = data_models.TLSContainer(certificate='cert 3') + mock_cert_data.return_value = {'tls_cert': cert1, + 'sni_certs': [cert2, cert3]} + cert_id = uuidutils.generate_uuid() + listener = self.create_listener(constants.PROTOCOL_TERMINATED_HTTPS, + 80, self.lb_id, + default_tls_container_ref=cert_id) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['listener']['id']) + get_listener = self.get(listener_path).json['listener'] + self.assertEqual(cert_id, get_listener['default_tls_container_ref']) + + # TODO(johnsom) Fix this when there is a noop certificate manager + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_update_with_tls_termination_data(self, mock_cert_data): + cert_id_orig = uuidutils.generate_uuid() + cert1 = data_models.TLSContainer(certificate='cert 1') + mock_cert_data.return_value = {'tls_cert': cert1} + cert_id = uuidutils.generate_uuid() + listener = self.create_listener( + constants.PROTOCOL_TERMINATED_HTTPS, 80, self.lb_id, + default_tls_container_ref=cert_id_orig) + self.set_lb_status(self.lb_id) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['listener']['id']) + get_listener = self.get(listener_path).json['listener'] + self.assertEqual(cert_id_orig, + get_listener.get('default_tls_container_ref')) + self.put(listener_path, + self._build_body({'default_tls_container_ref': cert_id})) + get_listener = self.get(listener_path).json['listener'] + self.assertEqual(cert_id, + get_listener.get('default_tls_container_ref')) + + def test_create_with_tls_termination_disabled(self): + self.conf.config(group='api_settings', + allow_tls_terminated_listeners=False) + cert_id = uuidutils.generate_uuid() + listener = self.create_listener(constants.PROTOCOL_TERMINATED_HTTPS, + 80, self.lb_id, + default_tls_container_ref=cert_id, + status=400) + self.assertIn( + 'The selected protocol is not allowed in this deployment: {}' + .format(constants.PROTOCOL_TERMINATED_HTTPS), + listener.get('faultstring')) + + # TODO(johnsom) Fix this when there is a noop certificate manager + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_create_with_alpn(self, mock_cert_data): + cert1 = data_models.TLSContainer(certificate='cert 1') + mock_cert_data.return_value = {'tls_cert': cert1} + cert_id = uuidutils.generate_uuid() + alpn_protocols = [lib_consts.ALPN_PROTOCOL_HTTP_2, + lib_consts.ALPN_PROTOCOL_HTTP_1_1] + listener = self.create_listener(constants.PROTOCOL_TERMINATED_HTTPS, + 80, self.lb_id, + default_tls_container_ref=cert_id, + alpn_protocols=['h2', 'http/1.1']) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['listener']['id']) + get_listener = self.get(listener_path).json['listener'] + self.assertEqual(alpn_protocols, get_listener['alpn_protocols']) + + # TODO(johnsom) Fix this when there is a noop certificate manager + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_create_with_alpn_negative(self, mock_cert_data): + cert1 = data_models.TLSContainer(certificate='cert 1') + mock_cert_data.return_value = {'tls_cert': cert1} + cert_id = uuidutils.generate_uuid() + req_dict = {'protocol': constants.PROTOCOL_TERMINATED_HTTPS, + 'protocol_port': 80, + 'loadbalancer_id': self.lb_id, + 'default_tls_container_ref': cert_id, + 'alpn_protocols': [lib_consts.ALPN_PROTOCOL_HTTP_1_1, + 'invalid-proto']} + res = self.post(self.LISTENERS_PATH, self._build_body(req_dict), + status=400) + fault = res.json['faultstring'] + self.assertIn( + 'Invalid input for field/attribute alpn_protocols', fault) + self.assertIn('Value should be a valid ALPN protocol ID', fault) + self.assert_correct_status(lb_id=self.lb_id) + + # TODO(johnsom) Fix this when there is a noop certificate manager + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_update_with_alpn(self, mock_cert_data): + cert_id = uuidutils.generate_uuid() + cert1 = data_models.TLSContainer(certificate='cert 1') + mock_cert_data.return_value = {'tls_cert': cert1} + alpn_protocols_orig = [lib_consts.ALPN_PROTOCOL_HTTP_1_0] + alpn_protocols = [lib_consts.ALPN_PROTOCOL_HTTP_2, + lib_consts.ALPN_PROTOCOL_HTTP_1_1] + listener = self.create_listener( + constants.PROTOCOL_TERMINATED_HTTPS, 80, self.lb_id, + default_tls_container_ref=cert_id, + alpn_protocols=alpn_protocols_orig) + self.set_lb_status(self.lb_id) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['listener']['id']) + get_listener = self.get(listener_path).json['listener'] + self.assertEqual(alpn_protocols_orig, + get_listener.get('alpn_protocols')) + self.put(listener_path, + self._build_body({'alpn_protocols': alpn_protocols})) + get_listener = self.get(listener_path).json['listener'] + self.assertEqual(alpn_protocols, get_listener.get('alpn_protocols')) + + # TODO(johnsom) Fix this when there is a noop certificate manager + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_update_with_alpn_negative(self, mock_cert_data): + cert_id = uuidutils.generate_uuid() + cert1 = data_models.TLSContainer(certificate='cert 1') + mock_cert_data.return_value = {'tls_cert': cert1} + alpn_protocols_orig = [lib_consts.ALPN_PROTOCOL_HTTP_1_0] + listener = self.create_listener( + constants.PROTOCOL_TERMINATED_HTTPS, 80, self.lb_id, + default_tls_container_ref=cert_id, + alpn_protocols=alpn_protocols_orig) + self.set_lb_status(self.lb_id) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['listener']['id']) + get_listener = self.get(listener_path).json['listener'] + self.assertEqual(alpn_protocols_orig, + get_listener.get('alpn_protocols')) + + req_dict = {'alpn_protocols': [ + lib_consts.ALPN_PROTOCOL_HTTP_1_1, 'invalid-proto']} + res = self.put(self.LISTENERS_PATH, self._build_body(req_dict), + status=400) + fault = res.json['faultstring'] + self.assertIn( + 'Invalid input for field/attribute alpn_protocols', fault) + self.assertIn('Value should be a valid ALPN protocol ID', fault) + self.assert_correct_status(lb_id=self.lb_id) + + # TODO(johnsom) Fix this when there is a noop certificate manager + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_create_with_sni_data(self, mock_cert_data): + cert1 = data_models.TLSContainer(certificate='cert 1') + cert2 = data_models.TLSContainer(certificate='cert 2') + cert3 = data_models.TLSContainer(certificate='cert 3') + mock_cert_data.return_value = {'tls_cert': cert1, + 'sni_certs': [cert2, cert3]} + sni_id1 = uuidutils.generate_uuid() + sni_id2 = uuidutils.generate_uuid() + listener = self.create_listener(constants.PROTOCOL_TERMINATED_HTTPS, + 80, self.lb_id, + sni_container_refs=[sni_id1, sni_id2]) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['listener']['id']) + get_listener = self.get(listener_path).json['listener'] + self.assertCountEqual([sni_id1, sni_id2], + get_listener['sni_container_refs']) + + # TODO(johnsom) Fix this when there is a noop certificate manager + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_update_tls_terminated_with_sni_data(self, mock_cert_data): + cert2 = data_models.TLSContainer(certificate='cert 2') + cert3 = data_models.TLSContainer(certificate='cert 3') + mock_cert_data.return_value = {'sni_certs': [cert2, cert3]} + sni_id1 = uuidutils.generate_uuid() + sni_id2 = uuidutils.generate_uuid() + listener = self.create_listener( + constants.PROTOCOL_TERMINATED_HTTPS, 80, self.lb_id, + default_tls_container_ref=uuidutils.generate_uuid()) + self.set_lb_status(self.lb_id) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['listener']['id']) + get_listener = self.get(listener_path).json['listener'] + self.assertEqual([], get_listener.get('sni_container_refs')) + self.put(listener_path, + self._build_body({'sni_container_refs': [sni_id1, sni_id2]})) + get_listener = self.get(listener_path).json['listener'] + self.assertCountEqual([sni_id1, sni_id2], + get_listener.get('sni_container_refs')) + + # TODO(johnsom) Fix this when there is a noop certificate manager + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_update_non_tls_terminated_with_sni_data(self, mock_cert_data): + cert2 = data_models.TLSContainer(certificate='cert 2') + cert3 = data_models.TLSContainer(certificate='cert 3') + mock_cert_data.return_value = {'sni_certs': [cert2, cert3]} + sni_id1 = uuidutils.generate_uuid() + sni_id2 = uuidutils.generate_uuid() + listener = self.create_listener(constants.PROTOCOL_HTTP, 80, + self.lb_id) + self.set_lb_status(self.lb_id) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['listener']['id']) + get_listener = self.get(listener_path).json['listener'] + self.assertEqual([], get_listener.get('sni_container_refs')) + body = self._build_body({'sni_container_refs': [sni_id1, sni_id2]}) + response = self.put(listener_path, body, status=400).json + self.assertEqual( + "Validation failure: Certificate container references are not " + "allowed on HTTP protocol listeners.", + response['faultstring']) + get_listener = self.get(listener_path).json['listener'] + self.assertEqual([], get_listener.get('sni_container_refs')) + + # TODO(johnsom) Fix this when there is a noop certificate manager + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_create_with_valid_insert_headers(self, mock_cert_data): + cert1 = data_models.TLSContainer(certificate='cert 1') + mock_cert_data.return_value = {'tls_cert': cert1} + lb_listener = {'protocol': 'HTTP', + 'protocol_port': 80, + 'loadbalancer_id': self.lb_id, + 'insert_headers': {'X-Forwarded-For': 'true', + 'X-Forwarded-Port': 'true', + 'X-Forwarded-Proto': 'true'}} + body = self._build_body(lb_listener) + self.post(self.LISTENERS_PATH, body, status=201) + + # test client certificate http headers + self.set_lb_status(self.lb_id) + header = {} + for name in constants.SUPPORTED_SSL_HEADERS: + header[name] = 'true' + lb_listener = {'protocol': constants.PROTOCOL_TERMINATED_HTTPS, + 'protocol_port': 1801, + 'loadbalancer_id': self.lb_id, + 'insert_headers': header, + 'default_tls_container_ref': uuidutils.generate_uuid()} + body = self._build_body(lb_listener) + self.post(self.LISTENERS_PATH, body, status=201) + + def test_create_with_bad_insert_headers(self): + lb_listener = {'protocol': constants.PROTOCOL_HTTP, + 'protocol_port': 80, + 'loadbalancer_id': self.lb_id, + 'insert_headers': {'X-Forwarded-Four': 'true'}} + body = self._build_body(lb_listener) + self.post(self.LISTENERS_PATH, body, status=400) + + # test client certificate http headers + for name in constants.SUPPORTED_SSL_HEADERS: + header = {} + header[name] = 'true' + lb_listener['insert_headers'] = header + body = self._build_body(lb_listener) + listener = self.post(self.LISTENERS_PATH, body, status=400).json + self.assertIn('{} is not a valid option for {}'.format( + [name], + f'{constants.PROTOCOL_HTTP} protocol listener.'), + listener.get('faultstring')) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_update_with_valid_insert_headers(self, mock_cert_data): + cert1 = data_models.TLSContainer(certificate='cert 1') + mock_cert_data.return_value = {'tls_cert': cert1} + listener = self.create_listener( + constants.PROTOCOL_HTTP, 80, self.lb_id) + self.set_lb_status(self.lb_id) + new_listener = self._build_body( + {'insert_headers': {'X-Forwarded-For': 'true', + 'X-Forwarded-Port': 'true', + 'X-Forwarded-Proto': 'true'}}) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['listener'].get('id')) + update_listener = self.put( + listener_path, new_listener, status=200).json + self.assertNotEqual( + listener[self.root_tag]['insert_headers'], + update_listener[self.root_tag]['insert_headers']) + + self.set_lb_status(self.lb_id) + # test client certificate http headers + cert1_id = uuidutils.generate_uuid() + listener = self.create_listener( + constants.PROTOCOL_TERMINATED_HTTPS, 443, self.lb_id, + default_tls_container_ref=cert1_id) + self.set_lb_status(self.lb_id) + header = {} + for name in constants.SUPPORTED_SSL_HEADERS: + header[name] = 'true' + new_listener[self.root_tag]['insert_headers'] = header + listener_path = self.LISTENER_PATH.format( + listener_id=listener['listener'].get('id')) + update_listener = self.put( + listener_path, new_listener, status=200).json + self.assertNotEqual( + listener[self.root_tag]['insert_headers'], + update_listener[self.root_tag]['insert_headers']) + + def test_update_with_bad_insert_headers(self): + listener = self.create_listener( + constants.PROTOCOL_HTTP, 80, self.lb_id) + self.set_lb_status(self.lb_id) + new_listener = self._build_body( + {'insert_headers': {'X-Bad-Header': 'true'}}) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['listener'].get('id')) + update_listener = self.put( + listener_path, new_listener, status=400).json + self.assertIn('[\'X-Bad-Header\'] is not a valid option for ' + 'insert_headers', + update_listener.get('faultstring')) + + # test client certificate http headers + header = {} + for name in constants.SUPPORTED_SSL_HEADERS: + header[name] = 'true' + new_listener[self.root_tag]['insert_headers'] = header + # as the order of output faultstring is not stable, so we just check + # the status. + self.put(listener_path, new_listener, status=400).json + + def _test_update_protocol_insert_headers_mismatch(self, protocol): + listener = self.create_listener( + protocol, 80, self.lb_id) + self.set_lb_status(self.lb_id) + new_listener = self._build_body( + {'insert_headers': {'X-Forwarded-Port': 'true'}}) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['listener'].get('id')) + update_listener = self.put( + listener_path, new_listener, status=400).json + self.assertIn(protocol, update_listener['faultstring']) + self.assert_correct_status(lb_id=self.lb_id) + + def test_update_protocol_HTTPS_insert_headers(self): + self._test_update_protocol_insert_headers_mismatch( + constants.PROTOCOL_HTTPS) + + def test_update_protocol_TCP_insert_headers(self): + self._test_update_protocol_insert_headers_mismatch( + constants.PROTOCOL_TCP) + + def test_update_protocol_UDP_insert_headers(self): + self._test_update_protocol_insert_headers_mismatch( + constants.PROTOCOL_UDP) + + def _getStats(self, listener_id): + res = self.get(self.LISTENER_PATH.format( + listener_id=listener_id + "/stats")) + return res.json.get('stats') + + def test_statistics(self): + lb = self.create_load_balancer( + uuidutils.generate_uuid()).get('loadbalancer') + self.set_lb_status(lb['id']) + li = self.create_listener( + constants.PROTOCOL_HTTP, 80, lb.get('id')).get('listener') + amphora = self.create_amphora(uuidutils.generate_uuid(), lb['id']) + ls = self.create_listener_stats_dynamic( + listener_id=li.get('id'), + amphora_id=amphora.id, + bytes_in=random.randint(1, 9), + bytes_out=random.randint(1, 9), + total_connections=random.randint(1, 9), + request_errors=random.randint(1, 9)) + self.session.commit() + + response = self._getStats(li['id']) + self.assertEqual(ls['bytes_in'], response['bytes_in']) + self.assertEqual(ls['bytes_out'], response['bytes_out']) + self.assertEqual(ls['total_connections'], + response['total_connections']) + self.assertEqual(ls['active_connections'], + response['active_connections']) + self.assertEqual(ls['request_errors'], + response['request_errors']) + + def test_statistics_authorized(self): + project_id = uuidutils.generate_uuid() + lb = self.create_load_balancer( + uuidutils.generate_uuid(), + project_id=project_id).get('loadbalancer') + self.set_lb_status(lb['id']) + li = self.create_listener( + constants.PROTOCOL_HTTP, 80, lb.get('id')).get('listener') + amphora = self.create_amphora(uuidutils.generate_uuid(), lb['id']) + ls = self.create_listener_stats_dynamic( + listener_id=li.get('id'), + amphora_id=amphora.id, + bytes_in=random.randint(1, 9), + bytes_out=random.randint(1, 9), + total_connections=random.randint(1, 9), + request_errors=random.randint(1, 9)) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + self.session.commit() + + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self._getStats(li['id']) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + self.assertEqual(ls['bytes_in'], response['bytes_in']) + self.assertEqual(ls['bytes_out'], response['bytes_out']) + self.assertEqual(ls['total_connections'], + response['total_connections']) + self.assertEqual(ls['active_connections'], + response['active_connections']) + self.assertEqual(ls['request_errors'], + response['request_errors']) + + def test_statistics_not_authorized(self): + lb = self.create_load_balancer( + uuidutils.generate_uuid()).get('loadbalancer') + self.set_lb_status(lb['id']) + li = self.create_listener( + constants.PROTOCOL_HTTP, 80, lb.get('id')).get('listener') + amphora = self.create_amphora(uuidutils.generate_uuid(), lb['id']) + self.create_listener_stats_dynamic( + listener_id=li.get('id'), + amphora_id=amphora.id, + bytes_in=random.randint(1, 9), + bytes_out=random.randint(1, 9), + total_connections=random.randint(1, 9), + request_errors=random.randint(1, 9)) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=uuidutils.generate_uuid()) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + res = self.get(self.LISTENER_PATH.format( + listener_id=li['id'] + "/stats"), status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, res.json) + + def test_statistics_get_deleted(self): + lb = self.create_load_balancer( + uuidutils.generate_uuid()).get('loadbalancer') + self.set_lb_status(lb['id']) + li = self.create_listener( + constants.PROTOCOL_HTTP, 80, lb.get('id')).get('listener') + amphora = self.create_amphora(uuidutils.generate_uuid(), lb['id']) + self.create_listener_stats_dynamic( + listener_id=li.get('id'), + amphora_id=amphora.id, + bytes_in=random.randint(1, 9), + bytes_out=random.randint(1, 9), + total_connections=random.randint(1, 9), + request_errors=random.randint(1, 9)) + self.set_lb_status(lb['id'], status=constants.DELETED) + self.get(self.LISTENER_PATH.format( + listener_id=li.get('id') + "/stats"), status=404) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_listener_pool_protocol_map_post(self, mock_cert_data): + cert = data_models.TLSContainer(certificate='cert') + mock_cert_data.return_value = {'sni_certs': [cert]} + valid_map = constants.VALID_LISTENER_POOL_PROTOCOL_MAP + port = 1 + for listener_proto in valid_map: + for pool_proto in valid_map[listener_proto]: + port = port + 1 + pool = self.create_pool( + self.lb_id, pool_proto, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_object_status(self.lb_repo, self.lb_id) + listener = {'protocol': listener_proto, + 'protocol_port': port, + 'loadbalancer_id': self.lb_id, + 'default_pool_id': pool.get('id')} + if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: + listener.update( + {'sni_container_refs': [uuidutils.generate_uuid()]}) + body = self._build_body(listener) + self.post(self.LISTENERS_PATH, body, status=201) + self.set_object_status(self.lb_repo, self.lb_id) + + invalid_map = c_const.INVALID_LISTENER_POOL_PROTOCOL_MAP + port = 1 + for listener_proto in invalid_map: + for pool_proto in invalid_map[listener_proto]: + port = port + 1 + if pool_proto == constants.PROTOCOL_TERMINATED_HTTPS: + pool = self.create_pool( + self.lb_id, pool_proto, + constants.LB_ALGORITHM_ROUND_ROBIN, status=400) + self.assertIn("Invalid input", pool['faultstring']) + else: + pool = self.create_pool( + self.lb_id, pool_proto, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_object_status(self.lb_repo, self.lb_id) + expect_error_msg = ( + "Validation failure: The pool protocol '%s' is " + "invalid while the listener protocol is '%s'.") % ( + pool_proto, listener_proto) + listener = {'protocol': listener_proto, + 'protocol_port': port, + 'loadbalancer_id': self.lb_id, + 'default_pool_id': pool.get('id')} + body = self._build_body(listener) + res = self.post(self.LISTENERS_PATH, body, + status=400, expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status(lb_id=self.lb_id) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_listener_pool_protocol_map_put(self, mock_cert_data): + cert = data_models.TLSContainer(certificate='cert') + mock_cert_data.return_value = {'sni_certs': [cert]} + valid_map = constants.VALID_LISTENER_POOL_PROTOCOL_MAP + port = 1 + for listener_proto in valid_map: + for pool_proto in valid_map[listener_proto]: + port = port + 1 + pool = self.create_pool( + self.lb_id, pool_proto, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_object_status(self.lb_repo, self.lb_id) + opts = {} + if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: + opts['sni_container_refs'] = [uuidutils.generate_uuid()] + listener = self.create_listener( + listener_proto, port, self.lb_id, **opts).get('listener') + self.set_object_status(self.lb_repo, self.lb_id) + new_listener = {'default_pool_id': pool.get('id')} + res = self.put( + self.LISTENER_PATH.format(listener_id=listener.get('id')), + self._build_body(new_listener), status=200) + self.set_object_status(self.lb_repo, self.lb_id) + + invalid_map = c_const.INVALID_LISTENER_POOL_PROTOCOL_MAP + port = 100 + for listener_proto in invalid_map: + opts = {} + if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: + opts['sni_container_refs'] = [uuidutils.generate_uuid()] + listener = self.create_listener( + listener_proto, port, self.lb_id, **opts).get('listener') + self.set_object_status(self.lb_repo, self.lb_id) + port = port + 1 + for pool_proto in invalid_map[listener_proto]: + expect_error_msg = ("Validation failure: The pool protocol " + "'%s' is invalid while the listener " + "protocol is '%s'.") % (pool_proto, + listener_proto) + if pool_proto == constants.PROTOCOL_TERMINATED_HTTPS: + pool = self.create_pool( + self.lb_id, pool_proto, + constants.LB_ALGORITHM_ROUND_ROBIN, status=400) + self.assertIn("Invalid input", pool['faultstring']) + else: + pool = self.create_pool( + self.lb_id, pool_proto, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_object_status(self.lb_repo, self.lb_id) + new_listener = {'default_pool_id': pool.get('id')} + res = self.put( + self.LISTENER_PATH.format( + listener_id=listener.get('id')), + self._build_body(new_listener), status=400) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status(lb_id=self.lb_id) diff --git a/octavia/tests/functional/api/v2/test_load_balancer.py b/octavia/tests/functional/api/v2/test_load_balancer.py new file mode 100644 index 0000000000..f41d043a4e --- /dev/null +++ b/octavia/tests/functional/api/v2/test_load_balancer.py @@ -0,0 +1,4312 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import random +from unittest import mock + +from octavia_lib.api.drivers import exceptions as lib_exceptions +from octavia_lib.common import constants as lib_consts +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils +from sqlalchemy.orm import exc as sa_exception + +from octavia.common import constants +import octavia.common.context +from octavia.common import data_models +from octavia.common import exceptions +from octavia.common import utils +from octavia.network import base as network_base +from octavia.network import data_models as network_models +from octavia.tests.functional.api.v2 import base + + +class TestLoadBalancer(base.BaseAPITest): + root_tag = 'loadbalancer' + root_tag_list = 'loadbalancers' + root_tag_links = 'loadbalancers_links' + + def _assert_request_matches_response(self, req, resp, **optionals): + self.assertTrue(uuidutils.is_uuid_like(resp.get('id'))) + req_name = req.get('name') + req_description = req.get('description') + if not req_name: + self.assertEqual('', resp.get('name')) + else: + self.assertEqual(req.get('name'), resp.get('name')) + if not req_description: + self.assertEqual('', resp.get('description')) + else: + self.assertEqual(req.get('description'), resp.get('description')) + self.assertEqual(constants.PENDING_CREATE, + resp.get('provisioning_status')) + self.assertEqual(constants.OFFLINE, resp.get('operating_status')) + self.assertEqual(req.get('admin_state_up', True), + resp.get('admin_state_up')) + self.assertIsNotNone(resp.get('created_at')) + self.assertIsNone(resp.get('updated_at')) + for key, value in optionals.items(): + self.assertEqual(value, req.get(key)) + + def test_empty_list(self): + response = self.get(self.LBS_PATH) + api_list = response.json.get(self.root_tag_list) + self.assertEqual([], api_list) + + def _test_create_noop(self, **optionals): + self.conf.config(group='controller_worker', + network_driver='network_noop_driver') + self.conf.config(group='controller_worker', + compute_driver='compute_noop_driver') + self.conf.config(group='controller_worker', + amphora_driver='amphora_noop_driver') + lb_json = {'name': 'test_noop', + 'project_id': self.project_id + } + lb_json.update(optionals) + body = self._build_body(lb_json) + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_request_matches_response(lb_json, api_lb) + return api_lb + + def test_create_noop_subnet_only(self): + self._test_create_noop(vip_subnet_id=uuidutils.generate_uuid()) + + def test_create_noop_network_only(self): + self._test_create_noop(vip_network_id=uuidutils.generate_uuid()) + + def test_create_noop_network_and_subnet(self): + self._test_create_noop(vip_network_id=uuidutils.generate_uuid(), + vip_subnet_id=uuidutils.generate_uuid()) + + def test_create(self, **optionals): + lb_json = {'name': 'test1', + 'vip_subnet_id': uuidutils.generate_uuid(), + 'project_id': self.project_id, + 'tags': ['test_tag1', 'test_tag2'] + } + lb_json.update(optionals) + body = self._build_body(lb_json) + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_request_matches_response(lb_json, api_lb) + return api_lb + + # Make sure the /v2.0 alias is maintained for the life of the v2 API + def test_create_v2_0(self, **optionals): + lb_json = {'name': 'test1', + 'vip_subnet_id': uuidutils.generate_uuid(), + 'project_id': self.project_id + } + lb_json.update(optionals) + body = self._build_body(lb_json) + response = self.post(self.LBS_PATH, body, use_v2_0=True) + api_lb = response.json.get(self.root_tag) + self._assert_request_matches_response(lb_json, api_lb) + return api_lb + + def test_create_using_tenant_id(self): + lb_json = {'name': 'test1', + 'vip_subnet_id': uuidutils.generate_uuid(), + 'tenant_id': self.project_id + } + body = self._build_body(lb_json) + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_request_matches_response(lb_json, api_lb) + return api_lb + + def test_create_without_vip(self): + lb_json = {'name': 'test1', + 'project_id': self.project_id} + body = self._build_body(lb_json) + response = self.post(self.LBS_PATH, body, status=400) + err_msg = ('Validation failure: VIP must contain one of: ' + 'vip_port_id, vip_network_id, vip_subnet_id.') + self.assertEqual(err_msg, response.json.get('faultstring')) + + def test_create_with_empty_vip(self): + lb_json = {'vip_subnet_id': '', + 'project_id': self.project_id} + body = self._build_body(lb_json) + response = self.post(self.LBS_PATH, body, status=400) + err_msg = ("Invalid input for field/attribute vip_subnet_id. " + "Value: ''. Value should be UUID format") + self.assertEqual(err_msg, response.json.get('faultstring')) + + def test_create_with_invalid_vip_subnet(self): + subnet_id = uuidutils.generate_uuid() + lb_json = {'vip_subnet_id': subnet_id, + 'project_id': self.project_id} + body = self._build_body(lb_json) + with mock.patch("octavia.network.drivers.noop_driver.driver" + ".NoopManager.get_subnet") as mock_get_subnet: + mock_get_subnet.side_effect = network_base.SubnetNotFound + response = self.post(self.LBS_PATH, body, status=400) + err_msg = f'Subnet {subnet_id} not found.' + self.assertEqual(err_msg, response.json.get('faultstring')) + + def test_create_with_invalid_vip_network_subnet(self): + network = network_models.Network(id=uuidutils.generate_uuid(), + subnets=[]) + subnet_id = uuidutils.generate_uuid() + lb_json = { + 'vip_subnet_id': subnet_id, + 'vip_network_id': network.id, + 'project_id': self.project_id} + body = self._build_body(lb_json) + with mock.patch("octavia.network.drivers.noop_driver.driver" + ".NoopManager.get_network") as mock_get_network: + mock_get_network.return_value = network + response = self.post(self.LBS_PATH, body, status=400) + err_msg = f'Subnet {subnet_id} not found.' + self.assertEqual(err_msg, response.json.get('faultstring')) + + def test_create_with_vip_subnet_fills_network(self): + subnet = network_models.Subnet(id=uuidutils.generate_uuid(), + network_id=uuidutils.generate_uuid()) + lb_json = {'vip_subnet_id': subnet.id, + 'project_id': self.project_id} + body = self._build_body(lb_json) + with mock.patch("octavia.network.drivers.noop_driver.driver" + ".NoopManager.get_subnet") as mock_get_subnet: + mock_get_subnet.return_value = subnet + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_request_matches_response(lb_json, api_lb) + self.assertEqual(subnet.id, api_lb.get('vip_subnet_id')) + self.assertEqual(subnet.network_id, api_lb.get('vip_network_id')) + + def test_create_with_vip_network_has_no_subnet(self): + network = network_models.Network(id=uuidutils.generate_uuid(), + subnets=[]) + lb_json = { + 'vip_network_id': network.id, + 'project_id': self.project_id} + body = self._build_body(lb_json) + with mock.patch("octavia.network.drivers.noop_driver.driver" + ".NoopManager.get_network") as mock_get_network: + mock_get_network.return_value = network + response = self.post(self.LBS_PATH, body, status=400) + err_msg = ("Validation failure: " + "Supplied network does not contain a subnet.") + self.assertEqual(err_msg, response.json.get('faultstring')) + + def test_create_with_vip_network_picks_subnet_ipv4(self): + network_id = uuidutils.generate_uuid() + subnet1 = network_models.Subnet(id=uuidutils.generate_uuid(), + network_id=network_id, + ip_version=6) + subnet2 = network_models.Subnet(id=uuidutils.generate_uuid(), + network_id=network_id, + ip_version=4) + network = network_models.Network(id=network_id, + subnets=[subnet1.id, subnet2.id]) + lb_json = {'vip_network_id': network.id, + 'project_id': self.project_id} + body = self._build_body(lb_json) + with mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_network") as mock_get_network, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_subnet") as mock_get_subnet: + mock_get_network.return_value = network + mock_get_subnet.side_effect = [subnet1, subnet2, subnet2] + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_request_matches_response(lb_json, api_lb) + self.assertEqual(subnet2.id, api_lb.get('vip_subnet_id')) + self.assertEqual(network_id, api_lb.get('vip_network_id')) + + def test_create_with_vip_network_picks_subnet_ipv6(self): + network_id = uuidutils.generate_uuid() + subnet = network_models.Subnet(id=uuidutils.generate_uuid(), + network_id=network_id, + ip_version=6) + network = network_models.Network(id=network_id, + subnets=[subnet.id]) + lb_json = {'vip_network_id': network_id, + 'project_id': self.project_id} + body = self._build_body(lb_json) + with mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_network") as mock_get_network, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_subnet") as mock_get_subnet: + mock_get_network.return_value = network + mock_get_subnet.return_value = subnet + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_request_matches_response(lb_json, api_lb) + self.assertEqual(subnet.id, api_lb.get('vip_subnet_id')) + self.assertEqual(network_id, api_lb.get('vip_network_id')) + + def test_create_with_vip_network_picks_subnet_ipv4_avail_ips(self): + self.conf.config( + group='controller_worker', + loadbalancer_topology=constants.TOPOLOGY_ACTIVE_STANDBY) + network_id = uuidutils.generate_uuid() + subnet1 = network_models.Subnet(id=uuidutils.generate_uuid(), + network_id=network_id, + ip_version=4) + subnet2 = network_models.Subnet(id=uuidutils.generate_uuid(), + network_id=network_id, + ip_version=4) + subnet3 = network_models.Subnet(id=uuidutils.generate_uuid(), + network_id=network_id, + ip_version=4) + network = network_models.Network(id=network_id, + subnets=[subnet1.id, subnet2.id, + subnet3.id]) + subnet_ip_availability = [{'subnet_id': subnet1.id, 'used_ips': 254, + 'total_ips': 254}, {'subnet_id': subnet2.id, + 'used_ips': 128, 'total_ips': 254}, + {'subnet_id': subnet3.id, 'used_ips': 254, + 'total_ips': 254}] + ip_avail = network_models.Network_IP_Availability( + network_id=network.id, + subnet_ip_availability=subnet_ip_availability) + lb_json = {'vip_network_id': network.id, + 'project_id': self.project_id} + body = self._build_body(lb_json) + with mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_network") as mock_get_network, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_subnet") as mock_get_subnet, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_network_ip_availability") as ( + mock_get_network_ip_availability): + mock_get_network.return_value = network + mock_get_subnet.side_effect = [subnet1, subnet2, subnet3] + mock_get_network_ip_availability.return_value = ip_avail + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_request_matches_response(lb_json, api_lb) + self.assertEqual(subnet2.id, api_lb.get('vip_subnet_id')) + self.assertEqual(network_id, api_lb.get('vip_network_id')) + + def test_create_with_vip_network_not_enough_avail_ips(self): + self.conf.config( + group='controller_worker', + loadbalancer_topology=constants.TOPOLOGY_ACTIVE_STANDBY) + network_id = uuidutils.generate_uuid() + subnet1 = network_models.Subnet(id=uuidutils.generate_uuid(), + network_id=network_id, + ip_version=4) + subnet2 = network_models.Subnet(id=uuidutils.generate_uuid(), + network_id=network_id, + ip_version=4) + network = network_models.Network(id=network_id, + subnets=[subnet1.id, subnet2.id]) + subnet_ip_availability = [{'subnet_id': subnet1.id, 'used_ips': 254, + 'total_ips': 254}, {'subnet_id': subnet2.id, + 'used_ips': 254, 'total_ips': 254}] + ip_avail = network_models.Network_IP_Availability( + network_id=network.id, + subnet_ip_availability=subnet_ip_availability) + lb_json = {'vip_network_id': network.id, + 'project_id': self.project_id} + body = self._build_body(lb_json) + with mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_network") as mock_get_network, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_subnet") as mock_get_subnet, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_network_ip_availability") as ( + mock_get_network_ip_availability): + mock_get_network.return_value = network + mock_get_subnet.side_effect = [subnet1, subnet2] + mock_get_network_ip_availability.return_value = ip_avail + response = self.post(self.LBS_PATH, body, status=400) + err_msg = ('Validation failure: Subnet(s) in the supplied network do ' + 'not contain enough available IPs.') + self.assertEqual(err_msg, response.json.get('faultstring')) + + def test_create_with_vip_network_and_address(self): + ip_address = '198.51.100.10' + network_id = uuidutils.generate_uuid() + subnet1 = network_models.Subnet(id=uuidutils.generate_uuid(), + network_id=network_id, + cidr='2001:DB8::/32', + ip_version=6) + subnet2 = network_models.Subnet(id=uuidutils.generate_uuid(), + network_id=network_id, + cidr='198.51.100.0/24', + ip_version=4) + network = network_models.Network(id=network_id, + subnets=[subnet1.id, subnet2.id]) + lb_json = {'vip_network_id': network.id, + 'vip_address': ip_address, + 'project_id': self.project_id} + body = self._build_body(lb_json) + with mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_network") as mock_get_network, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_subnet") as mock_get_subnet: + mock_get_network.return_value = network + mock_get_subnet.side_effect = [subnet1, subnet2, subnet2] + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_request_matches_response(lb_json, api_lb) + self.assertEqual(subnet2.id, api_lb.get('vip_subnet_id')) + self.assertEqual(network.id, api_lb.get('vip_network_id')) + self.assertEqual(ip_address, api_lb.get('vip_address')) + + def test_create_with_vip_network_and_address_no_subnet_match(self): + ip_address = '198.51.100.10' + network_id = uuidutils.generate_uuid() + subnet1 = network_models.Subnet(id=uuidutils.generate_uuid(), + network_id=network_id, + cidr='2001:DB8::/32', + ip_version=6) + subnet2 = network_models.Subnet(id=uuidutils.generate_uuid(), + network_id=network_id, + cidr='203.0.113.0/24', + ip_version=4) + network = network_models.Network(id=network_id, + subnets=[subnet1.id, subnet2.id]) + lb_json = {'vip_network_id': network.id, + 'vip_address': ip_address, + 'project_id': self.project_id} + body = self._build_body(lb_json) + with mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_network") as mock_get_network, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_subnet") as mock_get_subnet: + mock_get_network.return_value = network + mock_get_subnet.side_effect = [subnet1, subnet2] + response = self.post(self.LBS_PATH, body, status=400) + err_msg = ('Validation failure: Supplied network does not contain a ' + 'subnet for VIP address specified.') + self.assertEqual(err_msg, response.json.get('faultstring')) + + def test_create_with_vip_network_and_address_ipv6(self): + ip_address = '2001:DB8::10' + network_id = uuidutils.generate_uuid() + subnet1 = network_models.Subnet(id=uuidutils.generate_uuid(), + network_id=network_id, + cidr='2001:DB8::/32', + ip_version=6) + subnet2 = network_models.Subnet(id=uuidutils.generate_uuid(), + network_id=network_id, + cidr='198.51.100.0/24', + ip_version=4) + network = network_models.Network(id=network_id, + subnets=[subnet1.id, subnet2.id]) + lb_json = {'vip_network_id': network.id, + 'vip_address': ip_address, + 'project_id': self.project_id} + body = self._build_body(lb_json) + with mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_network") as mock_get_network, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_subnet") as mock_get_subnet: + mock_get_network.return_value = network + mock_get_subnet.side_effect = [subnet1, subnet2] + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_request_matches_response(lb_json, api_lb) + self.assertEqual(subnet1.id, api_lb.get('vip_subnet_id')) + self.assertEqual(network.id, api_lb.get('vip_network_id')) + self.assertEqual(ip_address, api_lb.get('vip_address')) + + # Note: This test is using the unique local address range to + # validate that we handle a fully expanded IP address properly. + # This is not possible with the documentation/testnet range. + def test_create_with_vip_network_and_address_full_ipv6(self): + ip_address = 'fdff:ffff:ffff:ffff:ffff:ffff:ffff:ffff' + network_id = uuidutils.generate_uuid() + subnet1 = network_models.Subnet(id=uuidutils.generate_uuid(), + network_id=network_id, + cidr='fc00::/7', + ip_version=6) + subnet2 = network_models.Subnet(id=uuidutils.generate_uuid(), + network_id=network_id, + cidr='198.51.100.0/24', + ip_version=4) + network = network_models.Network(id=network_id, + subnets=[subnet1.id, subnet2.id]) + lb_json = {'vip_network_id': network.id, + 'vip_address': ip_address, + 'project_id': self.project_id} + body = self._build_body(lb_json) + with mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_network") as mock_get_network, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_subnet") as mock_get_subnet: + mock_get_network.return_value = network + mock_get_subnet.side_effect = [subnet1, subnet2] + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_request_matches_response(lb_json, api_lb) + self.assertEqual(subnet1.id, api_lb.get('vip_subnet_id')) + self.assertEqual(network.id, api_lb.get('vip_network_id')) + self.assertEqual(ip_address, api_lb.get('vip_address')) + + def test_create_with_vip_port_1_fixed_ip(self): + ip_address = '198.51.100.1' + subnet = network_models.Subnet(id=uuidutils.generate_uuid(), + network_id=uuidutils.generate_uuid()) + network = network_models.Network(id=subnet.network_id, + subnets=[subnet]) + fixed_ip = network_models.FixedIP(subnet_id=subnet.id, + ip_address=ip_address) + port = network_models.Port(id=uuidutils.generate_uuid(), + fixed_ips=[fixed_ip], + network_id=network.id) + lb_json = { + 'name': 'test1', 'description': 'test1_desc', + 'vip_port_id': port.id, 'admin_state_up': False, + 'project_id': self.project_id} + body = self._build_body(lb_json) + # This test needs the provider driver to not supply the VIP port + # so mocking noop to not supply a VIP port. + with mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_network") as mock_get_network, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_port") as mock_get_port, mock.patch( + "octavia.api.drivers.noop_driver.driver.NoopManager." + "create_vip_port") as mock_provider, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager." + "get_subnet") as mock_get_subnet: + mock_get_network.return_value = network + mock_get_port.return_value = port + mock_provider.side_effect = lib_exceptions.NotImplementedError() + mock_get_subnet.return_value = subnet + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_request_matches_response(lb_json, api_lb) + self.assertEqual(ip_address, api_lb.get('vip_address')) + self.assertEqual(subnet.id, api_lb.get('vip_subnet_id')) + self.assertEqual(network.id, api_lb.get('vip_network_id')) + self.assertEqual(port.id, api_lb.get('vip_port_id')) + + def test_create_with_vip_port_2_fixed_ip(self): + ip_address = '198.51.100.1' + subnet = network_models.Subnet(id=uuidutils.generate_uuid()) + network = network_models.Network(id=uuidutils.generate_uuid(), + subnets=[subnet]) + fixed_ip = network_models.FixedIP(subnet_id=subnet.id, + ip_address=ip_address) + fixed_ip_2 = network_models.FixedIP( + subnet_id=uuidutils.generate_uuid(), ip_address='203.0.113.5') + port = network_models.Port(id=uuidutils.generate_uuid(), + fixed_ips=[fixed_ip, fixed_ip_2], + network_id=network.id) + lb_json = { + 'name': 'test1', 'description': 'test1_desc', + 'vip_port_id': port.id, 'admin_state_up': False, + 'project_id': self.project_id} + body = self._build_body(lb_json) + with mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_network") as mock_get_network, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_port") as mock_get_port: + mock_get_network.return_value = network + mock_get_port.return_value = port + response = self.post(self.LBS_PATH, body, status=400) + err_msg = ("Validation failure: " + "VIP port's subnet could not be determined. Please " + "specify either a VIP subnet or address.") + self.assertEqual(err_msg, response.json.get('faultstring')) + + def test_create_with_vip_port_and_address(self): + ip_address = '198.51.100.1' + subnet = network_models.Subnet(id=uuidutils.generate_uuid(), + network_id=uuidutils.generate_uuid()) + network = network_models.Network(id=subnet.network_id, + subnets=[subnet]) + fixed_ip = network_models.FixedIP(subnet_id=subnet.id, + ip_address=ip_address) + port = network_models.Port(id=uuidutils.generate_uuid(), + fixed_ips=[fixed_ip], + network_id=network.id) + lb_json = { + 'name': 'test1', 'description': 'test1_desc', + 'vip_port_id': port.id, 'vip_address': ip_address, + 'admin_state_up': False, 'project_id': self.project_id} + body = self._build_body(lb_json) + with mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_network") as mock_get_network, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_port") as mock_get_port, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager." + "get_subnet") as mock_get_subnet: + mock_get_network.return_value = network + mock_get_port.return_value = port + mock_get_subnet.return_value = subnet + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_request_matches_response(lb_json, api_lb) + self.assertEqual(ip_address, api_lb.get('vip_address')) + self.assertEqual(subnet.id, api_lb.get('vip_subnet_id')) + self.assertEqual(network.id, api_lb.get('vip_network_id')) + self.assertEqual(port.id, api_lb.get('vip_port_id')) + + def test_create_with_vip_port_and_bad_address(self): + ip_address = '198.51.100.1' + subnet = network_models.Subnet(id=uuidutils.generate_uuid()) + network = network_models.Network(id=uuidutils.generate_uuid(), + subnets=[subnet]) + fixed_ip = network_models.FixedIP(subnet_id=subnet.id, + ip_address=ip_address) + port = network_models.Port(id=uuidutils.generate_uuid(), + fixed_ips=[fixed_ip], + network_id=network.id) + lb_json = { + 'name': 'test1', 'description': 'test1_desc', + 'vip_port_id': port.id, 'vip_address': '203.0.113.7', + 'admin_state_up': False, 'project_id': self.project_id} + body = self._build_body(lb_json) + with mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_network") as mock_get_network, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_port") as mock_get_port: + mock_get_network.return_value = network + mock_get_port.return_value = port + response = self.post(self.LBS_PATH, body, status=400) + err_msg = ("Validation failure: " + "Specified VIP address not found on the specified VIP " + "port.") + self.assertEqual(err_msg, response.json.get('faultstring')) + + def test_create_with_vip_full(self): + subnet = network_models.Subnet(id=uuidutils.generate_uuid(), + network_id=uuidutils.generate_uuid()) + network = network_models.Network(id=subnet.network_id, + subnets=[subnet]) + port = network_models.Port(id=uuidutils.generate_uuid(), + network_id=network.id) + lb_json = { + 'name': 'test1', 'description': 'test1_desc', + 'vip_address': '10.0.0.1', 'vip_subnet_id': subnet.id, + 'vip_network_id': network.id, 'vip_port_id': port.id, + 'admin_state_up': False, 'project_id': self.project_id} + body = self._build_body(lb_json) + with mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_network") as mock_get_network, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_port") as mock_get_port, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager." + "get_subnet") as mock_get_subnet: + mock_get_network.return_value = network + mock_get_port.return_value = port + mock_get_subnet.return_value = subnet + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_request_matches_response(lb_json, api_lb) + self.assertEqual('10.0.0.1', api_lb.get('vip_address')) + self.assertEqual(subnet.id, api_lb.get('vip_subnet_id')) + self.assertEqual(network.id, api_lb.get('vip_network_id')) + self.assertEqual(port.id, api_lb.get('vip_port_id')) + + def test_create_with_multiple_vips(self): + subnet1 = network_models.Subnet(id=uuidutils.generate_uuid(), + cidr='10.0.0.0/24', + ip_version=4, + network_id=uuidutils.generate_uuid()) + subnet2 = network_models.Subnet(id=uuidutils.generate_uuid(), + cidr='fc00::/7', + ip_version=6, + network_id=subnet1.network_id) + subnet3 = network_models.Subnet(id=uuidutils.generate_uuid(), + cidr='10.1.0.0/24', + ip_version=4, + network_id=subnet1.network_id) + network = network_models.Network(id=subnet1.network_id, + subnets=[subnet1, subnet2, subnet3]) + port = network_models.Port(id=uuidutils.generate_uuid(), + network_id=network.id) + lb_json = { + 'name': 'test1', 'description': 'test1_desc', + 'vip_address': '10.0.0.1', 'vip_subnet_id': subnet1.id, + 'vip_network_id': network.id, 'vip_port_id': port.id, + 'project_id': self.project_id, + 'additional_vips': [ + {'subnet_id': subnet2.id, + 'ip_address': 'fdff:ffff:ffff:ffff:ffff:ffff:ffff:ffff'}, + {'subnet_id': subnet3.id, + 'ip_address': '10.1.0.1'}, + ], + + } + body = self._build_body(lb_json) + with mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_network") as mock_get_network, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_port") as mock_get_port, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_subnet") as mock_get_subnet: + mock_get_network.return_value = network + mock_get_port.return_value = port + mock_get_subnet.side_effect = [subnet1, subnet2, subnet3, subnet1] + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_request_matches_response(lb_json, api_lb) + self.assertEqual('10.0.0.1', api_lb.get('vip_address')) + self.assertEqual(subnet1.id, api_lb.get('vip_subnet_id')) + self.assertEqual(network.id, api_lb.get('vip_network_id')) + + expected_add_vips = [] + for add_vip in lb_json['additional_vips']: + add_vip.update(port_id=port.id) + expected_add_vips.append(add_vip) + + self.assertEqual( + # Sort by ip_address so the list order will be guaranteed + sorted(expected_add_vips, key=lambda x: x['ip_address']), + sorted(api_lb['additional_vips'], key=lambda x: x['ip_address'])) + + def test_create_neutron_failure(self): + + class TestNeutronException(network_base.AllocateVIPException): + def __init__(self, message, orig_msg, orig_code): + super().__init__( + message, orig_msg=orig_msg, orig_code=orig_code, + ) + + def __str__(self): + return repr(self.message) + + subnet = network_models.Subnet(id=uuidutils.generate_uuid(), + network_id=uuidutils.generate_uuid()) + network = network_models.Network(id=subnet.network_id, + subnets=[subnet]) + port = network_models.Port(id=uuidutils.generate_uuid(), + network_id=network.id) + lb_json = { + 'name': 'test1', 'description': 'test1_desc', + 'vip_address': '10.0.0.1', 'vip_subnet_id': subnet.id, + 'vip_network_id': network.id, 'vip_port_id': port.id, + 'admin_state_up': False, 'project_id': self.project_id} + body = self._build_body(lb_json) + # This test needs the provider driver to not supply the VIP port + # so mocking noop to not supply a VIP port. + with mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_network") as mock_get_network, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_port") as mock_get_port, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".allocate_vip") as mock_allocate_vip, mock.patch( + "octavia.api.drivers.noop_driver.driver.NoopManager." + "create_vip_port") as mock_provider, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_subnet") as mock_get_subnet: + mock_get_network.return_value = network + mock_get_port.return_value = port + mock_allocate_vip.side_effect = TestNeutronException( + "octavia_msg", "neutron_msg", 409) + mock_provider.side_effect = lib_exceptions.NotImplementedError() + mock_get_subnet.return_value = subnet + response = self.post(self.LBS_PATH, body, status=409) + # Make sure the faultstring contains the neutron error and not + # the octavia error message + self.assertIn("neutron_msg", response.json.get("faultstring")) + + def test_create_with_qos(self): + subnet = network_models.Subnet(id=uuidutils.generate_uuid(), + network_id=uuidutils.generate_uuid()) + qos_policy_id = uuidutils.generate_uuid() + # Test with specific vip_qos_policy_id + lb_json = {'vip_subnet_id': subnet.id, + 'project_id': self.project_id, + 'vip_qos_policy_id': qos_policy_id} + body = self._build_body(lb_json) + with mock.patch("octavia.network.drivers.noop_driver.driver" + ".NoopManager.get_subnet") as mock_get_subnet: + with mock.patch("octavia.common.validate." + "qos_policy_exists") as mock_get_qos: + mock_get_subnet.return_value = subnet + mock_get_qos.return_value = qos_policy_id + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_request_matches_response(lb_json, api_lb) + self.assertEqual(subnet.id, api_lb.get('vip_subnet_id')) + self.assertEqual(qos_policy_id, api_lb.get('vip_qos_policy_id')) + + def test_create_with_qos_vip_port(self): + # Test with vip_port_id which applied qos_policy + subnet = network_models.Subnet(id=uuidutils.generate_uuid(), + network_id=uuidutils.generate_uuid()) + port_qos_policy_id = uuidutils.generate_uuid() + ip_address = '192.168.50.50' + network = network_models.Network(id=subnet.network_id, + subnets=[subnet]) + fixed_ip = network_models.FixedIP(subnet_id=subnet.id, + ip_address=ip_address) + port = network_models.Port(id=uuidutils.generate_uuid(), + fixed_ips=[fixed_ip], + network_id=network.id, + qos_policy_id=port_qos_policy_id) + lb_json = {'vip_port_id': port.id, + 'project_id': self.project_id} + body = self._build_body(lb_json) + with mock.patch( + "octavia.network.drivers.noop_driver.driver." + "NoopManager.get_network") as m_get_network, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_port") as mock_get_port, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".allocate_vip") as mock_allocate_vip, mock.patch( + "octavia.common.validate." + "qos_policy_exists") as m_get_qos, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager." + "get_subnet") as mock_get_subnet: + m_get_qos.return_value = port_qos_policy_id + mock_allocate_vip.return_value = data_models.Vip( + ip_address=ip_address, subnet_id=subnet.id, + network_id=network.id, port_id=port.id) + m_get_network.return_value = network + mock_get_port.return_value = port + mock_get_subnet.return_value = subnet + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_request_matches_response(lb_json, api_lb) + self.assertEqual(port.id, api_lb.get('vip_port_id')) + self.assertEqual(subnet.id, api_lb.get('vip_subnet_id')) + self.assertEqual(network.id, api_lb.get('vip_network_id')) + self.assertEqual(port_qos_policy_id, api_lb.get( + 'vip_qos_policy_id')) + + def test_create_with_qos_vip_port_and_vip_qos(self): + subnet = network_models.Subnet(id=uuidutils.generate_uuid(), + network_id=uuidutils.generate_uuid()) + port_qos_policy_id = uuidutils.generate_uuid() + new_qos_policy_id = uuidutils.generate_uuid() + ip_address = '192.168.50.50' + network = network_models.Network(id=subnet.network_id, + subnets=[subnet]) + fixed_ip = network_models.FixedIP(subnet_id=subnet.id, + ip_address=ip_address) + port = network_models.Port(id=uuidutils.generate_uuid(), + fixed_ips=[fixed_ip], + network_id=network.id, + qos_policy_id=port_qos_policy_id) + lb_json = {'vip_port_id': port.id, + 'project_id': self.project_id, + 'vip_qos_policy_id': new_qos_policy_id} + body = self._build_body(lb_json) + with mock.patch( + "octavia.network.drivers.noop_driver.driver." + "NoopManager.get_network") as m_get_network, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_port") as mock_get_port, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".allocate_vip") as mock_allocate_vip, mock.patch( + "octavia.common.validate." + "qos_policy_exists") as m_get_qos, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager." + "get_subnet") as mock_get_subnet: + m_get_qos.return_value = mock.ANY + mock_allocate_vip.return_value = data_models.Vip( + ip_address=ip_address, subnet_id=subnet.id, + network_id=network.id, port_id=port.id) + m_get_network.return_value = network + mock_get_port.return_value = port + mock_get_subnet.return_value = subnet + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_request_matches_response(lb_json, api_lb) + self.assertEqual(port.id, api_lb.get('vip_port_id')) + self.assertEqual(subnet.id, api_lb.get('vip_subnet_id')) + self.assertEqual(network.id, api_lb.get('vip_network_id')) + self.assertEqual(new_qos_policy_id, api_lb.get( + 'vip_qos_policy_id')) + + def test_create_with_non_exist_qos_policy_id(self): + subnet = network_models.Subnet(id=uuidutils.generate_uuid(), + network_id=uuidutils.generate_uuid()) + qos_policy_id = uuidutils.generate_uuid() + lb_json = {'vip_subnet_id': subnet.id, + 'project_id': self.project_id, + 'vip_qos_policy_id': qos_policy_id} + body = self._build_body(lb_json) + with mock.patch("octavia.network.drivers.noop_driver.driver" + ".NoopManager.get_subnet") as mock_get_subnet: + with mock.patch("octavia.network.drivers.noop_driver." + "driver.NoopManager." + "get_qos_policy") as mock_get_qos: + mock_get_subnet.return_value = subnet + mock_get_qos.side_effect = Exception() + response = self.post(self.LBS_PATH, body, status=400) + err_msg = f"qos_policy {qos_policy_id} not found." + self.assertEqual(err_msg, response.json.get('faultstring')) + + def test_create_with_long_name(self): + lb_json = {'name': 'n' * 256, + 'vip_subnet_id': uuidutils.generate_uuid(), + 'project_id': self.project_id} + response = self.post(self.LBS_PATH, self._build_body(lb_json), + status=400) + self.assertIn('Invalid input for field/attribute name', + response.json.get('faultstring')) + + def test_create_with_long_description(self): + lb_json = {'description': 'n' * 256, + 'vip_subnet_id': uuidutils.generate_uuid(), + 'project_id': self.project_id} + response = self.post(self.LBS_PATH, self._build_body(lb_json), + status=400) + self.assertIn('Invalid input for field/attribute description', + response.json.get('faultstring')) + + def test_create_with_nonuuid_vip_attributes(self): + lb_json = {'vip_subnet_id': 'HI', + 'project_id': self.project_id} + response = self.post(self.LBS_PATH, self._build_body(lb_json), + status=400) + self.assertIn('Invalid input for field/attribute vip_subnet_id', + response.json.get('faultstring')) + + def test_create_with_allowed_network_id(self): + network_id = uuidutils.generate_uuid() + self.conf.config(group="networking", valid_vip_networks=[network_id]) + subnet = network_models.Subnet(id=uuidutils.generate_uuid(), + network_id=network_id, + ip_version=4) + network = network_models.Network(id=network_id, subnets=[subnet.id]) + lb_json = {'vip_network_id': network.id, + 'project_id': self.project_id} + body = self._build_body(lb_json) + with mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_network") as mock_get_network, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_subnet") as mock_get_subnet: + mock_get_network.return_value = network + mock_get_subnet.return_value = subnet + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_request_matches_response(lb_json, api_lb) + self.assertEqual(subnet.id, api_lb.get('vip_subnet_id')) + self.assertEqual(network_id, api_lb.get('vip_network_id')) + + def test_create_with_disallowed_network_id(self): + network_id1 = uuidutils.generate_uuid() + network_id2 = uuidutils.generate_uuid() + self.conf.config(group="networking", valid_vip_networks=[network_id1]) + subnet = network_models.Subnet(id=uuidutils.generate_uuid(), + network_id=network_id2, + ip_version=4) + network = network_models.Network(id=network_id2, subnets=[subnet.id]) + lb_json = {'vip_network_id': network.id, + 'project_id': self.project_id} + body = self._build_body(lb_json) + with mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_network") as mock_get_network, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_subnet") as mock_get_subnet: + mock_get_network.return_value = network + mock_get_subnet.return_value = subnet + response = self.post(self.LBS_PATH, body, status=400) + self.assertIn("Supplied VIP network_id is not allowed", + response.json.get('faultstring')) + + def test_create_with_disallowed_vip_objects(self): + self.conf.config(group="networking", allow_vip_network_id=False) + self.conf.config(group="networking", allow_vip_subnet_id=False) + self.conf.config(group="networking", allow_vip_port_id=False) + + lb_json = {'vip_network_id': uuidutils.generate_uuid(), + 'project_id': self.project_id} + response = self.post(self.LBS_PATH, self._build_body(lb_json), + status=400) + self.assertIn('use of vip_network_id is disallowed', + response.json.get('faultstring')) + + lb_json = {'vip_subnet_id': uuidutils.generate_uuid(), + 'project_id': self.project_id} + response = self.post(self.LBS_PATH, self._build_body(lb_json), + status=400) + self.assertIn('use of vip_subnet_id is disallowed', + response.json.get('faultstring')) + + lb_json = {'vip_port_id': uuidutils.generate_uuid(), + 'project_id': self.project_id} + response = self.post(self.LBS_PATH, self._build_body(lb_json), + status=400) + self.assertIn('use of vip_port_id is disallowed', + response.json.get('faultstring')) + + def test_create_with_project_id(self): + project_id = uuidutils.generate_uuid() + api_lb = self.test_create(project_id=project_id) + self.assertEqual(project_id, api_lb.get('project_id')) + + def test_create_no_project_id(self, **optionals): + lb_json = {'name': 'test1', + 'vip_subnet_id': uuidutils.generate_uuid() + } + lb_json.update(optionals) + body = self._build_body(lb_json) + self.post(self.LBS_PATH, body, status=400) + + def test_create_context_project_id(self, **optionals): + lb_json = {'name': 'test1', + 'vip_subnet_id': uuidutils.generate_uuid() + } + lb_json.update(optionals) + body = self._build_body(lb_json) + test_context = octavia.common.context.RequestContext( + project_id=self.project_id) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_request_matches_response(lb_json, api_lb) + + def test_create_authorized(self, **optionals): + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + project_id = uuidutils.generate_uuid() + lb_json = {'name': 'test1', + 'vip_subnet_id': uuidutils.generate_uuid(), + 'project_id': project_id + } + lb_json.update(optionals) + body = self._build_body(lb_json) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self._assert_request_matches_response(lb_json, api_lb) + + def test_create_not_authorized(self, **optionals): + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + lb_json = {'name': 'test1', + 'vip_subnet_id': uuidutils.generate_uuid(), + 'project_id': uuidutils.generate_uuid() + } + lb_json.update(optionals) + body = self._build_body(lb_json) + test_context = octavia.common.context.RequestContext( + project_id=uuidutils.generate_uuid()) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + response = self.post(self.LBS_PATH, body, status=403) + api_lb = response.json + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, api_lb) + + def test_create_provider_octavia(self, **optionals): + lb_json = {'name': 'test1', + 'vip_subnet_id': uuidutils.generate_uuid(), + 'project_id': self.project_id, + 'provider': constants.OCTAVIA + } + lb_json.update(optionals) + body = self._build_body(lb_json) + with mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_network") as mock_get_network, mock.patch( + 'oslo_messaging.get_rpc_transport'), mock.patch( + 'oslo_messaging.Target'), mock.patch( + 'oslo_messaging.get_rpc_client'): + mock_get_network.return_value = mock.MagicMock() + mock_get_network.return_value.port_security_enabled = True + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_request_matches_response(lb_json, api_lb) + return api_lb + + def test_create_provider_octavia_no_port_sec(self, **optionals): + lb_json = {'name': 'test1', + 'vip_subnet_id': uuidutils.generate_uuid(), + 'project_id': self.project_id, + 'provider': constants.OCTAVIA + } + lb_json.update(optionals) + body = self._build_body(lb_json) + with mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_network") as mock_get_network, mock.patch( + 'oslo_messaging.get_rpc_transport'), mock.patch( + 'oslo_messaging.Target'), mock.patch( + 'oslo_messaging.get_rpc_client'): + mock_get_network.return_value = mock.MagicMock() + mock_get_network.return_value.port_security_enabled = False + response = self.post(self.LBS_PATH, body, status=500) + self.assertIn("Port security must be enabled on the VIP network.", + response.json.get('faultstring')) + + def test_create_provider_bogus(self, **optionals): + lb_json = {'name': 'test1', + 'vip_subnet_id': uuidutils.generate_uuid(), + 'project_id': self.project_id, + 'provider': 'BOGUS' + } + lb_json.update(optionals) + body = self._build_body(lb_json) + response = self.post(self.LBS_PATH, body, status=400) + self.assertIn("Provider 'BOGUS' is not enabled.", + response.json.get('faultstring')) + + def test_create_flavor_bad_type(self, **optionals): + lb_json = {'name': 'test1', + 'vip_subnet_id': uuidutils.generate_uuid(), + 'project_id': self.project_id, + 'flavor_id': 'BOGUS' + } + lb_json.update(optionals) + body = self._build_body(lb_json) + response = self.post(self.LBS_PATH, body, status=400) + self.assertIn("Invalid input for field/attribute flavor_id. Value: " + "'BOGUS'. Value should be UUID format", + response.json.get('faultstring')) + + def test_create_flavor_invalid(self, **optionals): + lb_json = {'name': 'test1', + 'vip_subnet_id': uuidutils.generate_uuid(), + 'project_id': self.project_id, + 'flavor_id': uuidutils.generate_uuid() + } + lb_json.update(optionals) + body = self._build_body(lb_json) + response = self.post(self.LBS_PATH, body, status=400) + self.assertIn("Validation failure: Invalid flavor_id.", + response.json.get('faultstring')) + + def test_create_flavor_disabled(self, **optionals): + fp = self.create_flavor_profile('test1', 'noop_driver', + '{"image": "ubuntu"}') + flavor = self.create_flavor('name1', 'description', + fp.get('id'), False) + test_flavor_id = flavor.get('id') + lb_json = {'name': 'test1', + 'vip_subnet_id': uuidutils.generate_uuid(), + 'project_id': self.project_id, + 'flavor_id': test_flavor_id, + } + lb_json.update(optionals) + body = self._build_body(lb_json) + response = self.post(self.LBS_PATH, body, status=400) + ref_faultstring = ('The selected flavor is not allowed in this ' + 'deployment: {}'.format(test_flavor_id)) + self.assertEqual(ref_faultstring, response.json.get('faultstring')) + + def test_create_flavor_missing(self, **optionals): + fp = self.create_flavor_profile('test1', 'noop_driver', + '{"image": "ubuntu"}') + flavor = self.create_flavor('name1', 'description', fp.get('id'), True) + test_flavor_id = flavor.get('id') + lb_json = {'name': 'test1', + 'vip_subnet_id': uuidutils.generate_uuid(), + 'project_id': self.project_id, + 'flavor_id': test_flavor_id + } + lb_json.update(optionals) + body = self._build_body(lb_json) + with mock.patch('octavia.db.repositories.FlavorRepository.' + 'get_flavor_metadata_dict', + side_effect=sa_exception.NoResultFound): + response = self.post(self.LBS_PATH, body, status=400) + self.assertIn("Validation failure: Invalid flavor_id.", + response.json.get('faultstring')) + + def test_create_flavor_no_provider(self, **optionals): + fp = self.create_flavor_profile('test1', 'noop_driver', + '{"image": "ubuntu"}') + flavor = self.create_flavor('name1', 'description', fp.get('id'), True) + test_flavor_id = flavor.get('id') + lb_json = {'name': 'test1', + 'vip_subnet_id': uuidutils.generate_uuid(), + 'project_id': self.project_id, + 'flavor_id': test_flavor_id, + } + lb_json.update(optionals) + body = self._build_body(lb_json) + response = self.post(self.LBS_PATH, body, status=201) + api_lb = response.json.get(self.root_tag) + self.assertEqual('noop_driver', api_lb.get('provider')) + self.assertEqual(test_flavor_id, api_lb.get('flavor_id')) + + def test_create_with_availability_zone(self, **optionals): + zone_name = 'nova' + azp = self.create_availability_zone_profile( + 'test1', 'noop_driver', '{"compute_zone": "%s"}' % zone_name) + az = self.create_availability_zone(zone_name, 'description', + azp.get('id'), True) + + api_lb = self.test_create(availability_zone=az.get('name')) + self.assertEqual(zone_name, api_lb.get('availability_zone')) + + def test_create_az_disabled(self, **optionals): + zone_name = 'nova' + azp = self.create_availability_zone_profile( + 'test1', 'noop_driver', '{"compute_zone": "%s"}' % zone_name) + az = self.create_availability_zone(zone_name, 'description', + azp.get('id'), False) + + lb_json = {'name': 'test1', + 'vip_subnet_id': uuidutils.generate_uuid(), + 'project_id': self.project_id, + 'availability_zone': az.get('name'), + } + lb_json.update(optionals) + body = self._build_body(lb_json) + response = self.post(self.LBS_PATH, body, status=400) + ref_faultstring = ('The selected availability_zone is not allowed in ' + 'this deployment: {}'.format(zone_name)) + self.assertEqual(ref_faultstring, response.json.get('faultstring')) + + def test_create_az_missing(self, **optionals): + lb_json = {'name': 'test1', + 'vip_subnet_id': uuidutils.generate_uuid(), + 'project_id': self.project_id, + 'availability_zone': 'bogus-az', + } + lb_json.update(optionals) + body = self._build_body(lb_json) + response = self.post(self.LBS_PATH, body, status=400) + ref_faultstring = 'Validation failure: Invalid availability zone.' + self.assertEqual(ref_faultstring, response.json.get('faultstring')) + + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_create_az_unsupported(self, mock_provider): + zone_name = 'nova' + azp = self.create_availability_zone_profile( + 'test1', 'noop_driver', '{"compute_zone": "%s"}' % zone_name) + az = self.create_availability_zone(zone_name, 'description', + azp.get('id'), True) + mock_provider.side_effect = NotImplementedError + + lb_json = {'name': 'test1', + 'vip_subnet_id': uuidutils.generate_uuid(), + 'project_id': self.project_id, + 'availability_zone': az.get('name'), + } + body = self._build_body(lb_json) + response = self.post(self.LBS_PATH, body, status=501) + ref_faultstring = ("Provider \'noop_driver\' does not support a " + "requested action: This provider does not support " + "availability zones.") + self.assertEqual(ref_faultstring, response.json.get('faultstring')) + + def test_matching_providers(self, **optionals): + fp = self.create_flavor_profile('test1', 'noop_driver', + '{"image": "ubuntu"}') + flavor = self.create_flavor('name1', 'description', fp.get('id'), True) + test_flavor_id = flavor.get('id') + lb_json = {'name': 'test1', + 'vip_subnet_id': uuidutils.generate_uuid(), + 'project_id': self.project_id, + 'flavor_id': test_flavor_id, + 'provider': 'noop_driver' + } + lb_json.update(optionals) + body = self._build_body(lb_json) + response = self.post(self.LBS_PATH, body, status=201) + api_lb = response.json.get(self.root_tag) + self.assertEqual('noop_driver', api_lb.get('provider')) + self.assertEqual(test_flavor_id, api_lb.get('flavor_id')) + + def test_conflicting_providers(self, **optionals): + fp = self.create_flavor_profile('test1', 'noop_driver', + '{"image": "ubuntu"}') + flavor = self.create_flavor('name1', 'description', fp.get('id'), True) + test_flavor_id = flavor.get('id') + lb_json = {'name': 'test1', + 'vip_subnet_id': uuidutils.generate_uuid(), + 'project_id': self.project_id, + 'flavor_id': test_flavor_id, + 'provider': 'noop_driver-alt' + } + lb_json.update(optionals) + body = self._build_body(lb_json) + response = self.post(self.LBS_PATH, body, status=400) + self.assertIn("Flavor '{}' is not compatible with provider " + "'noop_driver-alt'".format(test_flavor_id), + response.json.get('faultstring')) + + def test_get_all_admin(self): + project_id = uuidutils.generate_uuid() + lb1 = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', project_id=self.project_id, + tags=['test_tag1']) + lb2 = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb2', project_id=project_id, + tags=['test_tag2']) + lb3 = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb3', project_id=project_id, + tags=['test_tag3']) + response = self.get(self.LBS_PATH) + lbs = response.json.get(self.root_tag_list) + self.assertEqual(3, len(lbs)) + lb_id_names = [(lb.get('id'), + lb.get('name'), + lb.get('tags')) for lb in lbs] + lb1 = lb1.get(self.root_tag) + lb2 = lb2.get(self.root_tag) + lb3 = lb3.get(self.root_tag) + self.assertIn((lb1.get('id'), lb1.get('name'), lb1.get('tags')), + lb_id_names) + self.assertIn((lb2.get('id'), lb2.get('name'), lb2.get('tags')), + lb_id_names) + self.assertIn((lb3.get('id'), lb3.get('name'), lb3.get('tags')), + lb_id_names) + + def test_get_all_non_admin(self): + project_id = uuidutils.generate_uuid() + self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', project_id=project_id) + self.create_load_balancer(uuidutils.generate_uuid(), + name='lb2', project_id=project_id) + lb3 = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb3', project_id=self.project_id) + lb3 = lb3.get(self.root_tag) + + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', + auth_strategy=constants.KEYSTONE) + test_context = octavia.common.context.RequestContext( + project_id=self.project_id) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.get(self.LBS_PATH) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + lbs = response.json.get(self.root_tag_list) + self.assertEqual(1, len(lbs)) + lb_id_names = [(lb.get('id'), lb.get('name')) for lb in lbs] + self.assertIn((lb3.get('id'), lb3.get('name')), lb_id_names) + + def test_get_all_unscoped_token(self): + project_id = uuidutils.generate_uuid() + self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', project_id=project_id) + self.create_load_balancer(uuidutils.generate_uuid(), + name='lb2', project_id=project_id) + lb3 = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb3', project_id=self.project_id) + lb3 = lb3.get(self.root_tag) + + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', + auth_strategy=constants.KEYSTONE) + test_context = octavia.common.context.RequestContext( + project_id=None) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': None} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + result = self.get(self.LBS_PATH, status=403).json + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, result) + + def test_get_all_non_admin_global_observer(self): + project_id = uuidutils.generate_uuid() + lb1 = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', project_id=project_id) + lb2 = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb2', project_id=project_id) + lb3 = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb3', project_id=self.project_id) + lb1 = lb1.get(self.root_tag) + lb2 = lb2.get(self.root_tag) + lb3 = lb3.get(self.root_tag) + + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', + auth_strategy=constants.KEYSTONE) + test_context = octavia.common.context.RequestContext( + project_id=self.project_id) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['admin'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.get(self.LBS_PATH) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + lbs = response.json.get(self.root_tag_list) + self.assertEqual(3, len(lbs)) + lb_id_names = [(lb.get('id'), lb.get('name')) for lb in lbs] + self.assertIn((lb1.get('id'), lb1.get('name')), lb_id_names) + self.assertIn((lb2.get('id'), lb2.get('name')), lb_id_names) + self.assertIn((lb3.get('id'), lb3.get('name')), lb_id_names) + + def test_get_all_not_authorized(self): + project_id = uuidutils.generate_uuid() + self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', project_id=self.project_id) + self.create_load_balancer(uuidutils.generate_uuid(), + name='lb2', project_id=project_id) + self.create_load_balancer(uuidutils.generate_uuid(), + name='lb3', project_id=project_id) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + LB_PROJECT_PATH = f'{self.LBS_PATH}?project_id={project_id}' + test_context = octavia.common.context.RequestContext( + project_id=self.project_id) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + response = self.get(LB_PROJECT_PATH, status=403) + api_lb = response.json + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, api_lb) + + def test_get_all_by_project_id(self): + project1_id = uuidutils.generate_uuid() + project2_id = uuidutils.generate_uuid() + lb1 = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=project1_id) + lb2 = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb2', + project_id=project1_id) + lb3 = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb3', + project_id=project2_id) + response = self.get(self.LBS_PATH, + params={'project_id': project1_id}) + lbs = response.json.get(self.root_tag_list) + + self.assertEqual(2, len(lbs)) + + lb_id_names = [(lb.get('id'), lb.get('name')) for lb in lbs] + lb1 = lb1.get(self.root_tag) + lb2 = lb2.get(self.root_tag) + lb3 = lb3.get(self.root_tag) + self.assertIn((lb1.get('id'), lb1.get('name')), lb_id_names) + self.assertIn((lb2.get('id'), lb2.get('name')), lb_id_names) + response = self.get(self.LBS_PATH, + params={'project_id': project2_id}) + lbs = response.json.get(self.root_tag_list) + lb_id_names = [(lb.get('id'), lb.get('name')) for lb in lbs] + self.assertEqual(1, len(lbs)) + self.assertIn((lb3.get('id'), lb3.get('name')), lb_id_names) + + def test_get_all_sorted(self): + self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=self.project_id) + self.create_load_balancer(uuidutils.generate_uuid(), + name='lb2', + project_id=self.project_id) + self.create_load_balancer(uuidutils.generate_uuid(), + name='lb3', + project_id=self.project_id) + response = self.get(self.LBS_PATH, + params={'sort': 'name:desc'}) + lbs_desc = response.json.get(self.root_tag_list) + response = self.get(self.LBS_PATH, + params={'sort': 'name:asc'}) + lbs_asc = response.json.get(self.root_tag_list) + + self.assertEqual(3, len(lbs_desc)) + self.assertEqual(3, len(lbs_asc)) + + lb_id_names_desc = [(lb.get('id'), lb.get('name')) for lb in lbs_desc] + lb_id_names_asc = [(lb.get('id'), lb.get('name')) for lb in lbs_asc] + self.assertEqual(lb_id_names_asc, list(reversed(lb_id_names_desc))) + + def test_get_all_sorted_by_vip_ip_address(self): + self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=self.project_id, + vip_address='198.51.100.2') + self.create_load_balancer(uuidutils.generate_uuid(), + name='lb2', + project_id=self.project_id, + vip_address='198.51.100.1') + self.create_load_balancer(uuidutils.generate_uuid(), + name='lb3', + project_id=self.project_id, + vip_address='198.51.100.3') + response = self.get(self.LBS_PATH, + params={'sort': 'vip_address:desc'}) + lbs_desc = response.json.get(self.root_tag_list) + response = self.get(self.LBS_PATH, + params={'sort': 'vip_address:asc'}) + lbs_asc = response.json.get(self.root_tag_list) + + self.assertEqual(3, len(lbs_desc)) + self.assertEqual(3, len(lbs_asc)) + + lb_id_names_desc = [(lb.get('id'), lb.get('name')) for lb in lbs_desc] + lb_id_names_asc = [(lb.get('id'), lb.get('name')) for lb in lbs_asc] + self.assertEqual(lb_id_names_asc, list(reversed(lb_id_names_desc))) + + self.assertEqual('198.51.100.1', lbs_asc[0][constants.VIP_ADDRESS]) + self.assertEqual('198.51.100.2', lbs_asc[1][constants.VIP_ADDRESS]) + self.assertEqual('198.51.100.3', lbs_asc[2][constants.VIP_ADDRESS]) + + self.assertEqual('198.51.100.3', lbs_desc[0][constants.VIP_ADDRESS]) + self.assertEqual('198.51.100.2', lbs_desc[1][constants.VIP_ADDRESS]) + self.assertEqual('198.51.100.1', lbs_desc[2][constants.VIP_ADDRESS]) + + def test_get_all_limited(self): + self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=self.project_id) + self.create_load_balancer(uuidutils.generate_uuid(), + name='lb2', + project_id=self.project_id) + self.create_load_balancer(uuidutils.generate_uuid(), + name='lb3', + project_id=self.project_id) + + # First two -- should have 'next' link + first_two = self.get(self.LBS_PATH, params={'limit': 2}).json + objs = first_two[self.root_tag_list] + links = first_two[self.root_tag_links] + self.assertEqual(2, len(objs)) + self.assertEqual(1, len(links)) + self.assertEqual('next', links[0]['rel']) + + # Third + off the end -- should have previous link + third = self.get(self.LBS_PATH, params={ + 'limit': 2, + 'marker': first_two[self.root_tag_list][1]['id']}).json + objs = third[self.root_tag_list] + links = third[self.root_tag_links] + self.assertEqual(1, len(objs)) + self.assertEqual(1, len(links)) + self.assertEqual('previous', links[0]['rel']) + + # Middle -- should have both links + middle = self.get(self.LBS_PATH, params={ + 'limit': 1, + 'marker': first_two[self.root_tag_list][0]['id']}).json + objs = middle[self.root_tag_list] + links = middle[self.root_tag_links] + self.assertEqual(1, len(objs)) + self.assertEqual(2, len(links)) + self.assertCountEqual(['previous', 'next'], + [link['rel'] for link in links]) + + def test_get_all_fields_filter(self): + self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=self.project_id) + self.create_load_balancer(uuidutils.generate_uuid(), + name='lb2', + project_id=self.project_id) + self.create_load_balancer(uuidutils.generate_uuid(), + name='lb3', + project_id=self.project_id) + + lbs = self.get(self.LBS_PATH, params={ + 'fields': ['id', 'project_id']}).json + for lb in lbs['loadbalancers']: + self.assertIn('id', lb) + self.assertIn('project_id', lb) + self.assertNotIn('description', lb) + + def test_get_one_fields_filter(self): + lb1 = self.create_load_balancer( + uuidutils.generate_uuid(), + name='lb1', project_id=self.project_id).get(self.root_tag) + + lb = self.get( + self.LB_PATH.format(lb_id=lb1.get('id')), + params={'fields': ['id', 'project_id']}).json.get(self.root_tag) + self.assertIn('id', lb) + self.assertIn('project_id', lb) + self.assertNotIn('description', lb) + + def test_get_all_admin_state_up_filter(self): + self.create_load_balancer(uuidutils.generate_uuid(), + admin_state_up=True, + name='lb1', + project_id=self.project_id) + self.create_load_balancer(uuidutils.generate_uuid(), + admin_state_up=False, + name='lb2', + project_id=self.project_id) + + lbs = self.get(self.LBS_PATH, params={'admin_state_up': 'false'}).json + self.assertEqual(1, len(lbs['loadbalancers'])) + self.assertFalse(lbs['loadbalancers'][0]['admin_state_up']) + self.assertEqual('lb2', lbs['loadbalancers'][0]['name']) + + def test_get_all_filter(self): + lb1 = self.create_load_balancer( + uuidutils.generate_uuid(), + name='lb1', + project_id=self.project_id, + vip_address='10.0.0.1').get(self.root_tag) + self.create_load_balancer( + uuidutils.generate_uuid(), + name='lb2', + project_id=self.project_id).get(self.root_tag) + self.create_load_balancer( + uuidutils.generate_uuid(), + name='lb3', + project_id=self.project_id).get(self.root_tag) + lbs = self.get(self.LBS_PATH, params={ + 'id': lb1['id'], 'vip_address': lb1['vip_address']}).json + self.assertEqual(1, len(lbs['loadbalancers'])) + self.assertEqual(lb1['id'], + lbs['loadbalancers'][0]['id']) + + def test_get_all_tags_filter(self): + lb1 = self.create_load_balancer( + uuidutils.generate_uuid(), + name='lb1', + project_id=self.project_id, + vip_address='10.0.0.1', + tags=['test_tag1', 'test_tag2'] + ).get(self.root_tag) + lb2 = self.create_load_balancer( + uuidutils.generate_uuid(), + name='lb2', + project_id=self.project_id, + tags=['test_tag2', 'test_tag3'] + ).get(self.root_tag) + lb3 = self.create_load_balancer( + uuidutils.generate_uuid(), + name='lb3', + project_id=self.project_id, + tags=['test_tag4', 'test_tag5'] + ).get(self.root_tag) + + lbs = self.get( + self.LBS_PATH, + params={'tags': 'test_tag2'} + ).json.get(self.root_tag_list) + self.assertIsInstance(lbs, list) + self.assertEqual(2, len(lbs)) + self.assertEqual( + [lb1.get('id'), lb2.get('id')], + [lb.get('id') for lb in lbs] + ) + + lbs = self.get( + self.LBS_PATH, + params={'tags': ['test_tag2', 'test_tag3']} + ).json.get(self.root_tag_list) + self.assertIsInstance(lbs, list) + self.assertEqual(1, len(lbs)) + self.assertEqual( + [lb2.get('id')], + [lb.get('id') for lb in lbs] + ) + + lbs = self.get( + self.LBS_PATH, + params={'tags': ['test_tag2,test_tag3']} + ).json.get(self.root_tag_list) + self.assertIsInstance(lbs, list) + self.assertEqual(1, len(lbs)) + self.assertEqual( + [lb2.get('id')], + [lb.get('id') for lb in lbs] + ) + + lbs = self.get( + self.LBS_PATH, + params={'tags-any': 'test_tag2'} + ).json.get(self.root_tag_list) + self.assertIsInstance(lbs, list) + self.assertEqual(2, len(lbs)) + self.assertEqual( + [lb1.get('id'), lb2.get('id')], + [lb.get('id') for lb in lbs] + ) + + lbs = self.get( + self.LBS_PATH, + params={'not-tags': 'test_tag2'} + ).json.get(self.root_tag_list) + self.assertIsInstance(lbs, list) + self.assertEqual(1, len(lbs)) + self.assertEqual( + [lb3.get('id')], + [lb.get('id') for lb in lbs] + ) + + lbs = self.get( + self.LBS_PATH, + params={'not-tags-any': ['test_tag2', 'test_tag4']} + ).json.get(self.root_tag_list) + self.assertIsInstance(lbs, list) + self.assertEqual(0, len(lbs)) + + lbs = self.get( + self.LBS_PATH, + params={'tags': 'test_tag2', + 'tags-any': ['test_tag1', 'test_tag3']} + ).json.get(self.root_tag_list) + self.assertIsInstance(lbs, list) + self.assertEqual(2, len(lbs)) + self.assertEqual( + [lb1.get('id'), lb2.get('id')], + [lb.get('id') for lb in lbs] + ) + + lbs = self.get( + self.LBS_PATH, + params={'tags': 'test_tag2', 'not-tags': 'test_tag2'} + ).json.get(self.root_tag_list) + self.assertIsInstance(lbs, list) + self.assertEqual(0, len(lbs)) + + def test_get_all_tags_mixed_filters(self): + lb1 = self.create_load_balancer( + uuidutils.generate_uuid(), + name='lb1', + project_id=self.project_id, + vip_address='10.0.0.1', + tags=['test_tag1', 'test_tag2'] + ).get(self.root_tag) + self.create_load_balancer( + uuidutils.generate_uuid(), + name='lb2', + project_id=self.project_id, + tags=['test_tag2', 'test_tag3'] + ).get(self.root_tag) + + lbs = self.get( + self.LBS_PATH, + params={'name': 'lb1', 'tags': 'test_tag2', + 'vip_address': '10.0.0.1'} + ).json.get(self.root_tag_list) + self.assertIsInstance(lbs, list) + self.assertEqual(1, len(lbs)) + self.assertEqual(lb1.get('id'), lbs[0].get('id')) + + lbs = self.get( + self.LBS_PATH, + params={'tags': 'test_tag2', 'vip_address': '10.0.0.1'} + ).json.get(self.root_tag_list) + self.assertIsInstance(lbs, list) + self.assertEqual(1, len(lbs)) + self.assertEqual(lb1.get('id'), lbs[0].get('id')) + + lbs = self.get( + self.LBS_PATH, + params={'name': 'lb1', 'tags': 'test_tag1', + 'vip_address': '10.0.0.1'} + ).json.get(self.root_tag_list) + self.assertIsInstance(lbs, list) + self.assertEqual(1, len(lbs)) + self.assertEqual(lb1.get('id'), lbs[0].get('id')) + + lbs = self.get( + self.LBS_PATH, + params={'name': 'lb1', 'tags': 'test_tag3', + 'vip_address': '10.0.0.1'} + ).json.get(self.root_tag_list) + self.assertIsInstance(lbs, list) + self.assertEqual(0, len(lbs)) + + lbs = self.get( + self.LBS_PATH, + params={'name': 'lb1', 'tags': 'test_tag3', + 'vip_address': '10.0.0.1'} + ).json.get(self.root_tag_list) + self.assertIsInstance(lbs, list) + self.assertEqual(0, len(lbs)) + + lbs = self.get( + self.LBS_PATH, + params={'name': 'bogus-lb', 'tags': 'test_tag2', + 'vip_address': '10.0.0.1'} + ).json.get(self.root_tag_list) + self.assertIsInstance(lbs, list) + self.assertEqual(0, len(lbs)) + + def test_get_all_hides_deleted(self): + api_lb = self.create_load_balancer( + uuidutils.generate_uuid()).get(self.root_tag) + + response = self.get(self.LBS_PATH) + objects = response.json.get(self.root_tag_list) + self.assertEqual(len(objects), 1) + self.set_object_status(self.lb_repo, api_lb.get('id'), + provisioning_status=constants.DELETED) + response = self.get(self.LBS_PATH) + objects = response.json.get(self.root_tag_list) + self.assertEqual(len(objects), 0) + + def test_get(self): + project_id = uuidutils.generate_uuid() + subnet = network_models.Subnet(id=uuidutils.generate_uuid(), + network_id=uuidutils.generate_uuid()) + network = network_models.Network(id=subnet.network_id, + subnets=[subnet]) + port = network_models.Port(id=uuidutils.generate_uuid(), + network_id=network.id) + with mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_network") as mock_get_network, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_port") as mock_get_port, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager." + "get_subnet") as mock_get_subnet: + mock_get_network.return_value = network + mock_get_port.return_value = port + mock_get_subnet.return_value = subnet + + lb = self.create_load_balancer(subnet.id, + vip_address='10.0.0.1', + vip_network_id=network.id, + vip_port_id=port.id, + name='lb1', + project_id=project_id, + description='desc1', + admin_state_up=False, + tags=['test_tag']) + lb_dict = lb.get(self.root_tag) + response = self.get( + self.LB_PATH.format( + lb_id=lb_dict.get('id'))).json.get(self.root_tag) + self.assertEqual('lb1', response.get('name')) + self.assertEqual(project_id, response.get('project_id')) + self.assertEqual('desc1', response.get('description')) + self.assertFalse(response.get('admin_state_up')) + self.assertEqual('10.0.0.1', response.get('vip_address')) + self.assertEqual(subnet.id, response.get('vip_subnet_id')) + self.assertEqual(network.id, response.get('vip_network_id')) + self.assertEqual(port.id, response.get('vip_port_id')) + self.assertEqual(['test_tag'], response.get('tags')) + + def test_get_deleted_gives_404(self): + api_lb = self.create_load_balancer( + uuidutils.generate_uuid()).get(self.root_tag) + + self.set_object_status(self.lb_repo, api_lb.get('id'), + provisioning_status=constants.DELETED) + + self.get(self.LB_PATH.format(lb_id=api_lb.get('id')), status=404) + + def test_get_bad_lb_id(self): + path = self.LB_PATH.format(lb_id='SEAN-CONNERY') + self.get(path, status=404) + + def test_get_authorized(self): + project_id = uuidutils.generate_uuid() + subnet = network_models.Subnet(id=uuidutils.generate_uuid(), + network_id=uuidutils.generate_uuid()) + network = network_models.Network(id=subnet.network_id, + subnets=[subnet]) + port = network_models.Port(id=uuidutils.generate_uuid(), + network_id=network.id) + with mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_network") as mock_get_network, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_port") as mock_get_port, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager." + "get_subnet") as mock_get_subnet: + mock_get_network.return_value = network + mock_get_port.return_value = port + mock_get_subnet.return_value = subnet + + lb = self.create_load_balancer(subnet.id, + vip_address='10.0.0.1', + vip_network_id=network.id, + vip_port_id=port.id, + name='lb1', + project_id=project_id, + description='desc1', + admin_state_up=False) + lb_dict = lb.get(self.root_tag) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.get(self.LB_PATH.format( + lb_id=lb_dict.get('id'))).json.get(self.root_tag) + self.assertEqual('lb1', response.get('name')) + self.assertEqual(project_id, response.get('project_id')) + self.assertEqual('desc1', response.get('description')) + self.assertFalse(response.get('admin_state_up')) + self.assertEqual('10.0.0.1', response.get('vip_address')) + self.assertEqual(subnet.id, response.get('vip_subnet_id')) + self.assertEqual(network.id, response.get('vip_network_id')) + self.assertEqual(port.id, response.get('vip_port_id')) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + def test_get_not_authorized(self): + project_id = uuidutils.generate_uuid() + subnet = network_models.Subnet(id=uuidutils.generate_uuid(), + network_id=uuidutils.generate_uuid()) + network = network_models.Network(id=subnet.network_id, + subnets=[subnet]) + port = network_models.Port(id=uuidutils.generate_uuid(), + network_id=network.id) + with mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_network") as mock_get_network, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager" + ".get_port") as mock_get_port, mock.patch( + "octavia.network.drivers.noop_driver.driver.NoopManager." + "get_subnet") as mock_get_subnet: + mock_get_network.return_value = network + mock_get_port.return_value = port + mock_get_subnet.return_value = subnet + + lb = self.create_load_balancer(subnet.id, + vip_address='10.0.0.1', + vip_network_id=network.id, + vip_port_id=port.id, + name='lb1', + project_id=project_id, + description='desc1', + admin_state_up=False) + lb_dict = lb.get(self.root_tag) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=uuidutils.generate_uuid()) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + response = self.get(self.LB_PATH.format(lb_id=lb_dict.get('id')), + status=403) + api_lb = response.json + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, api_lb) + + def test_create_over_quota(self): + self.start_quota_mock(data_models.LoadBalancer) + lb_json = {'name': 'test1', + 'vip_subnet_id': uuidutils.generate_uuid(), + 'project_id': self.project_id} + body = self._build_body(lb_json) + self.post(self.LBS_PATH, body, status=403) + + def test_update(self): + project_id = uuidutils.generate_uuid() + lb = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=project_id, + description='desc1', + admin_state_up=False, + tags=['test_tag1']) + lb_dict = lb.get(self.root_tag) + lb_json = self._build_body({'name': 'lb2', 'tags': ['test_tag2']}) + lb = self.set_lb_status(lb_dict.get('id')) + response = self.put(self.LB_PATH.format(lb_id=lb_dict.get('id')), + lb_json) + api_lb = response.json.get(self.root_tag) + self.assertIsNotNone(api_lb.get('vip_subnet_id')) + self.assertEqual('lb2', api_lb.get('name')) + self.assertEqual(['test_tag2'], api_lb.get('tags')) + self.assertEqual(project_id, api_lb.get('project_id')) + self.assertEqual('desc1', api_lb.get('description')) + self.assertFalse(api_lb.get('admin_state_up')) + self.assertIsNotNone(api_lb.get('created_at')) + self.assertIsNotNone(api_lb.get('updated_at')) + self.assert_correct_lb_status(api_lb.get('id'), constants.ONLINE, + constants.PENDING_UPDATE) + + def test_update_delete_tag(self): + project_id = uuidutils.generate_uuid() + lb = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=project_id, + admin_state_up=False, + tags=['test_tag1'],) + lb_dict = lb.get(self.root_tag) + lb_json = self._build_body({'tags': []}) + self.set_lb_status(lb_dict.get('id')) + response = self.put(self.LB_PATH.format(lb_id=lb_dict.get('id')), + lb_json) + api_lb = response.json.get(self.root_tag) + self.assertEqual([], api_lb.get('tags')) + + def test_update_with_vip(self): + project_id = uuidutils.generate_uuid() + lb = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=project_id, + description='desc1', + admin_state_up=False) + lb_dict = lb.get(self.root_tag) + lb_json = self._build_body({'vip_subnet_id': '1234'}) + self.set_lb_status(lb_dict.get('id')) + self.put(self.LB_PATH.format(lb_id=lb_dict.get('id')), + lb_json, status=400) + + def test_update_with_qos(self): + project_id = uuidutils.generate_uuid() + lb = self.create_load_balancer( + uuidutils.generate_uuid(), name='lb1', + project_id=project_id, + vip_qos_policy_id=uuidutils.generate_uuid()) + lb_dict = lb.get(self.root_tag) + self.set_lb_status(lb_dict.get('id')) + lb_json = self._build_body( + {'vip_qos_policy_id': uuidutils.generate_uuid()}) + self.put(self.LB_PATH.format(lb_id=lb_dict.get('id')), + lb_json, status=200) + + def test_update_with_bad_qos(self): + project_id = uuidutils.generate_uuid() + vip_qos_policy_id = uuidutils.generate_uuid() + lb = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=project_id, + vip_qos_policy_id=vip_qos_policy_id) + lb_dict = lb.get(self.root_tag) + lb_json = self._build_body({'vip_qos_policy_id': 'BAD'}) + self.set_lb_status(lb_dict.get('id')) + self.put(self.LB_PATH.format(lb_id=lb_dict.get('id')), + lb_json, status=400) + + def test_update_with_qos_ext_disabled(self): + project_id = uuidutils.generate_uuid() + lb = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=project_id) + lb_dict = lb.get(self.root_tag) + self.set_lb_status(lb_dict.get('id')) + vip_qos_policy_id = uuidutils.generate_uuid() + lb_json = self._build_body({'vip_qos_policy_id': vip_qos_policy_id}) + with mock.patch("octavia.network.drivers.noop_driver.driver" + ".NoopManager.qos_enabled", return_value=False): + self.put(self.LB_PATH.format(lb_id=lb_dict.get('id')), + lb_json, status=400) + + def test_update_with_sg_ids(self): + project_id = uuidutils.generate_uuid() + sg1_id = uuidutils.generate_uuid() + sg2_id = uuidutils.generate_uuid() + lb = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=project_id, + vip_sg_ids=[sg1_id, sg2_id]) + lb_dict = lb.get(self.root_tag) + lb_id = lb_dict.get('id') + self.assertEqual(sorted([sg1_id, sg2_id]), + sorted(lb_dict['vip_sg_ids'])) + self.set_lb_status(lb_dict.get('id')) + + lb_json = self._build_body({'vip_sg_ids': [sg2_id]}) + self.put(self.LB_PATH.format(lb_id=lb_id), + lb_json, status=200) + self.set_lb_status(lb_dict.get('id')) + response = self.get(self.LB_PATH.format(lb_id=lb_id)) + lb_dict = response.json.get(self.root_tag) + self.assertEqual(sorted([sg2_id]), + sorted(lb_dict['vip_sg_ids'])) + + lb_json = self._build_body({'vip_sg_ids': []}) + lb = self.put(self.LB_PATH.format(lb_id=lb_dict.get('id')), + lb_json, status=200) + self.set_lb_status(lb_dict.get('id')) + response = self.get(self.LB_PATH.format(lb_id=lb_id)) + lb_dict = response.json.get(self.root_tag) + self.assertEqual(0, len(lb_dict['vip_sg_ids'])) + + def test_update_bad_lb_id(self): + path = self.LB_PATH.format(lb_id='SEAN-CONNERY') + self.put(path, body={}, status=404) + + def test_update_pending_create(self): + project_id = uuidutils.generate_uuid() + lb = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=project_id, + description='desc1', + admin_state_up=False) + lb_dict = lb.get(self.root_tag) + lb_json = self._build_body({'name': 'Roberto'}) + self.put(self.LB_PATH.format(lb_id=lb_dict.get('id')), + lb_json, status=409) + + def test_update_authorized(self): + project_id = uuidutils.generate_uuid() + lb = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=project_id, + description='desc1', + admin_state_up=False) + lb_dict = lb.get(self.root_tag) + lb_json = self._build_body({'name': 'lb2'}) + self.set_lb_status(lb_dict.get('id')) + + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.put( + self.LB_PATH.format(lb_id=lb_dict.get('id')), lb_json) + api_lb = response.json.get(self.root_tag) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertIsNotNone(api_lb.get('vip_subnet_id')) + self.assertEqual('lb2', api_lb.get('name')) + self.assertEqual(project_id, api_lb.get('project_id')) + self.assertEqual('desc1', api_lb.get('description')) + self.assertFalse(api_lb.get('admin_state_up')) + self.assertIsNotNone(api_lb.get('created_at')) + self.assertIsNotNone(api_lb.get('updated_at')) + self.assert_correct_lb_status(api_lb.get('id'), constants.ONLINE, + constants.PENDING_UPDATE) + + def test_update_not_authorized(self): + project_id = uuidutils.generate_uuid() + lb = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=project_id, + description='desc1', + admin_state_up=False) + lb_dict = lb.get(self.root_tag) + lb_json = self._build_body({'name': 'lb2'}) + self.set_lb_status(lb_dict.get('id')) + + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=uuidutils.generate_uuid()) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + response = self.put(self.LB_PATH.format(lb_id=lb_dict.get('id')), + lb_json, status=403) + api_lb = response.json + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, api_lb) + self.assert_correct_lb_status(lb_dict.get('id'), constants.ONLINE, + constants.ACTIVE) + + def test_delete_pending_create(self): + project_id = uuidutils.generate_uuid() + lb = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=project_id, + description='desc1', + admin_state_up=False) + lb_dict = lb.get(self.root_tag) + self.delete(self.LB_PATH.format(lb_id=lb_dict.get('id')), status=409) + + def test_update_pending_update(self): + project_id = uuidutils.generate_uuid() + lb = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=project_id, + description='desc1', + admin_state_up=False) + lb_dict = lb.get(self.root_tag) + lb_json = self._build_body({'name': 'Bob'}) + lb = self.set_lb_status(lb_dict.get('id')) + self.put(self.LB_PATH.format(lb_id=lb_dict.get('id')), lb_json) + self.put(self.LB_PATH.format(lb_id=lb_dict.get('id')), + lb_json, status=409) + + def test_delete_pending_update(self): + project_id = uuidutils.generate_uuid() + lb = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=project_id, + description='desc1', + admin_state_up=False) + lb_json = self._build_body({'name': 'Steve'}) + lb_dict = lb.get(self.root_tag) + lb = self.set_lb_status(lb_dict.get('id')) + self.put(self.LB_PATH.format(lb_id=lb_dict.get('id')), lb_json) + self.delete(self.LB_PATH.format(lb_id=lb_dict.get('id')), status=409) + + def test_delete_with_error_status(self): + project_id = uuidutils.generate_uuid() + lb = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=project_id, + description='desc1', + admin_state_up=False) + lb_dict = lb.get(self.root_tag) + lb = self.set_lb_status(lb_dict.get('id'), status=constants.ERROR) + self.delete(self.LB_PATH.format(lb_id=lb_dict.get('id')), status=204) + + def test_update_pending_delete(self): + project_id = uuidutils.generate_uuid() + lb = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=project_id, + description='desc1', + admin_state_up=False) + lb_dict = lb.get(self.root_tag) + lb = self.set_lb_status(lb_dict.get('id')) + self.delete(self.LB_PATH.format(lb_id=lb_dict.get('id'))) + lb_json = self._build_body({'name': 'John'}) + self.put(self.LB_PATH.format(lb_id=lb_dict.get('id')), + lb_json, status=409) + + def test_delete_pending_delete(self): + project_id = uuidutils.generate_uuid() + lb = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=project_id, + description='desc1', + admin_state_up=False) + lb_dict = lb.get(self.root_tag) + lb = self.set_lb_status(lb_dict.get('id')) + self.delete(self.LB_PATH.format(lb_id=lb_dict.get('id'))) + self.delete(self.LB_PATH.format(lb_id=lb_dict.get('id')), status=409) + + def test_update_already_deleted(self): + project_id = uuidutils.generate_uuid() + lb = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=project_id, + description='desc1', + admin_state_up=False) + lb_dict = lb.get(self.root_tag) + lb = self.set_lb_status(lb_dict.get('id'), status=constants.DELETED) + lb_json = self._build_body({'name': 'John'}) + self.put(self.LB_PATH.format(lb_id=lb_dict.get('id')), + lb_json, status=404) + + def test_delete_already_deleted(self): + project_id = uuidutils.generate_uuid() + lb = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=project_id, + description='desc1', + admin_state_up=False) + lb_dict = lb.get(self.root_tag) + lb = self.set_lb_status(lb_dict.get('id'), status=constants.DELETED) + self.delete(self.LB_PATH.format(lb_id=lb_dict.get('id')), status=404) + + def test_delete(self): + project_id = uuidutils.generate_uuid() + lb = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', project_id=project_id, + description='desc1', + admin_state_up=False) + lb_dict = lb.get(self.root_tag) + lb = self.set_lb_status(lb_dict.get('id')) + self.delete(self.LB_PATH.format(lb_id=lb_dict.get('id'))) + response = self.get(self.LB_PATH.format(lb_id=lb_dict.get('id'))) + api_lb = response.json.get(self.root_tag) + self.assertEqual('lb1', api_lb.get('name')) + self.assertEqual('desc1', api_lb.get('description')) + self.assertEqual(project_id, api_lb.get('project_id')) + self.assertFalse(api_lb.get('admin_state_up')) + self.assert_correct_lb_status(api_lb.get('id'), constants.ONLINE, + constants.PENDING_DELETE) + + def test_delete_authorized(self): + project_id = uuidutils.generate_uuid() + lb = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=project_id, + description='desc1', + admin_state_up=False) + lb_dict = lb.get(self.root_tag) + lb = self.set_lb_status(lb_dict.get('id')) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + self.delete(self.LB_PATH.format(lb_id=lb_dict.get('id'))) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + response = self.get(self.LB_PATH.format(lb_id=lb_dict.get('id'))) + api_lb = response.json.get(self.root_tag) + self.assertEqual('lb1', api_lb.get('name')) + self.assertEqual('desc1', api_lb.get('description')) + self.assertEqual(project_id, api_lb.get('project_id')) + self.assertFalse(api_lb.get('admin_state_up')) + self.assertEqual(lb.get('operational_status'), + api_lb.get('operational_status')) + self.assert_correct_lb_status(api_lb.get('id'), constants.ONLINE, + constants.PENDING_DELETE) + + def test_delete_not_authorized(self): + project_id = uuidutils.generate_uuid() + lb = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=project_id, + description='desc1', + admin_state_up=False) + lb_dict = lb.get(self.root_tag) + self.set_lb_status(lb_dict.get('id')) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=uuidutils.generate_uuid()) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + self.delete(self.LB_PATH.format(lb_id=lb_dict.get('id')), + status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + response = self.get(self.LB_PATH.format(lb_id=lb_dict.get('id'))) + api_lb = response.json.get(self.root_tag) + self.assertEqual('lb1', api_lb.get('name')) + self.assertEqual('desc1', api_lb.get('description')) + self.assertEqual(project_id, api_lb.get('project_id')) + self.assertFalse(api_lb.get('admin_state_up')) + self.assert_correct_lb_status(api_lb.get('id'), constants.ONLINE, + constants.ACTIVE) + + def test_delete_fails_with_pool(self): + project_id = uuidutils.generate_uuid() + lb = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=project_id, + description='desc1').get(self.root_tag) + lb_id = lb.get('id') + self.set_lb_status(lb_id) + self.create_pool( + lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN) + self.set_lb_status(lb_id) + self.delete(self.LB_PATH.format(lb_id=lb_id), status=400) + self.assert_correct_status(lb_id=lb_id) + + def test_delete_fails_with_listener(self): + project_id = uuidutils.generate_uuid() + lb = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=project_id, + description='desc1').get(self.root_tag) + lb_id = lb.get('id') + self.set_lb_status(lb_id) + self.create_listener(constants.PROTOCOL_HTTP, 80, lb_id) + self.set_lb_status(lb_id) + self.delete(self.LB_PATH.format(lb_id=lb_id), status=400) + self.assert_correct_status(lb_id=lb_id) + + def test_cascade_delete(self): + project_id = uuidutils.generate_uuid() + lb = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=project_id, + description='desc1').get(self.root_tag) + lb_id = lb.get('id') + self.set_lb_status(lb_id) + listener = self.create_listener( + constants.PROTOCOL_HTTP, 80, lb_id).get('listener') + listener_id = listener.get('id') + self.set_lb_status(lb_id) + self.create_pool( + lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=listener_id) + self.set_lb_status(lb_id) + self.delete(self.LB_PATH.format(lb_id=lb_id), + params={'cascade': "true"}) + + def test_delete_bad_lb_id(self): + path = self.LB_PATH.format(lb_id='bad_uuid') + self.delete(path, status=404) + + def test_failover(self): + project_id = uuidutils.generate_uuid() + lb = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=project_id, + description='desc1', + admin_state_up=False) + lb_dict = lb.get(self.root_tag) + lb = self.set_lb_status(lb_dict.get('id')) + self.app.put(self._get_full_path( + self.LB_PATH.format(lb_id=lb_dict.get('id')) + "/failover"), + status=202) + + def test_failover_pending(self): + project_id = uuidutils.generate_uuid() + lb = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=project_id, + description='desc1', + admin_state_up=False) + lb_dict = lb.get(self.root_tag) + lb = self.set_lb_status(lb_dict.get('id'), + status=constants.PENDING_UPDATE) + self.app.put(self._get_full_path( + self.LB_PATH.format(lb_id=lb_dict.get('id')) + "/failover"), + status=409) + + def test_failover_error(self): + project_id = uuidutils.generate_uuid() + lb = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=project_id, + description='desc1', + admin_state_up=False) + lb_dict = lb.get(self.root_tag) + lb = self.set_lb_status(lb_dict.get('id'), + status=constants.ERROR) + self.app.put(self._get_full_path( + self.LB_PATH.format(lb_id=lb_dict.get('id')) + "/failover"), + status=202) + + def test_failover_not_authorized(self): + project_id = uuidutils.generate_uuid() + lb = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=project_id, + description='desc1', + admin_state_up=False) + lb_dict = lb.get(self.root_tag) + self.set_lb_status(lb_dict.get('id')) + + path = self._get_full_path(self.LB_PATH.format( + lb_id=lb_dict.get('id')) + "/failover") + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=uuidutils.generate_uuid()) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + response = self.app.put(path, status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) + + def test_failover_not_authorized_no_role(self): + project_id = uuidutils.generate_uuid() + lb = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=project_id, + description='desc1', + admin_state_up=False) + lb_dict = lb.get(self.root_tag) + self.set_lb_status(lb_dict.get('id')) + + path = self._get_full_path(self.LB_PATH.format( + lb_id=lb_dict.get('id')) + "/failover") + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': [], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.app.put(path, status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) + + def test_failover_authorized_lb_admin(self): + project_id = uuidutils.generate_uuid() + project_id_2 = uuidutils.generate_uuid() + lb = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=project_id, + description='desc1', + admin_state_up=False) + lb_dict = lb.get(self.root_tag) + self.set_lb_status(lb_dict.get('id')) + + path = self._get_full_path(self.LB_PATH.format( + lb_id=lb_dict.get('id')) + "/failover") + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['admin'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project_id_2} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + self.app.put(path, status=202) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + def test_failover_authorized_no_auth(self): + project_id = uuidutils.generate_uuid() + project_id_2 = uuidutils.generate_uuid() + lb = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=project_id, + description='desc1', + admin_state_up=False) + lb_dict = lb.get(self.root_tag) + self.set_lb_status(lb_dict.get('id')) + + path = self._get_full_path(self.LB_PATH.format( + lb_id=lb_dict.get('id')) + "/failover") + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.NOAUTH) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project_id_2} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + self.app.put(path, status=202) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + def test_failover_deleted(self): + project_id = uuidutils.generate_uuid() + lb = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=project_id, + description='desc1', + admin_state_up=False) + lb_dict = lb.get(self.root_tag) + lb = self.set_lb_status(lb_dict.get('id'), status=constants.DELETED) + + path = self._get_full_path(self.LB_PATH.format( + lb_id=lb_dict.get('id')) + "/failover") + self.app.put(path, status=404) + + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_create_with_bad_provider(self, mock_provider): + mock_provider.side_effect = exceptions.ProviderDriverError( + prov='bad_driver', user_msg='broken') + lb_json = {'name': 'test-lb', + 'vip_subnet_id': uuidutils.generate_uuid(), + 'project_id': self.project_id} + response = self.post(self.LBS_PATH, self._build_body(lb_json), + status=500) + self.assertIn('Provider \'bad_driver\' reports error: broken', + response.json.get('faultstring')) + + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_update_with_bad_provider(self, mock_provider): + mock_provider.return_value = (mock.MagicMock(), []) + api_lb = self.create_load_balancer( + uuidutils.generate_uuid()).get(self.root_tag) + self.set_lb_status(lb_id=api_lb.get('id')) + new_listener = {'name': 'new_name'} + mock_provider.side_effect = exceptions.ProviderDriverError( + prov='bad_driver', user_msg='broken') + response = self.put(self.LB_PATH.format(lb_id=api_lb.get('id')), + self._build_body(new_listener), status=500) + self.assertIn('Provider \'bad_driver\' reports error: broken', + response.json.get('faultstring')) + + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_delete_with_bad_provider(self, mock_provider): + mock_provider.return_value = (mock.MagicMock(), []) + api_lb = self.create_load_balancer( + uuidutils.generate_uuid()).get(self.root_tag) + self.set_lb_status(lb_id=api_lb.get('id')) + # Set status to ACTIVE/ONLINE because set_lb_status did it in the db + api_lb['provisioning_status'] = constants.ACTIVE + api_lb['operating_status'] = constants.ONLINE + response = self.get(self.LB_PATH.format( + lb_id=api_lb.get('id'))).json.get(self.root_tag) + + self.assertIsNone(api_lb.pop('updated_at')) + self.assertIsNotNone(response.pop('updated_at')) + self.assertEqual(api_lb, response) + mock_provider.side_effect = exceptions.ProviderDriverError( + prov='bad_driver', user_msg='broken') + self.delete(self.LB_PATH.format(lb_id=api_lb.get('id')), status=500) + + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_create_with_provider_not_implemented(self, mock_provider): + mock_provider.side_effect = exceptions.ProviderNotImplementedError( + prov='bad_driver', user_msg='broken') + lb_json = {'name': 'test-lb', + 'vip_subnet_id': uuidutils.generate_uuid(), + 'project_id': self.project_id} + response = self.post(self.LBS_PATH, self._build_body(lb_json), + status=501) + self.assertIn('Provider \'bad_driver\' does not support a requested ' + 'action: broken', response.json.get('faultstring')) + + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_update_with_provider_not_implemented(self, mock_provider): + mock_provider.return_value = (mock.MagicMock(), []) + api_lb = self.create_load_balancer( + uuidutils.generate_uuid()).get(self.root_tag) + self.set_lb_status(lb_id=api_lb.get('id')) + new_listener = {'name': 'new_name'} + mock_provider.side_effect = exceptions.ProviderNotImplementedError( + prov='bad_driver', user_msg='broken') + response = self.put(self.LB_PATH.format(lb_id=api_lb.get('id')), + self._build_body(new_listener), status=501) + self.assertIn('Provider \'bad_driver\' does not support a requested ' + 'action: broken', response.json.get('faultstring')) + + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_delete_with_provider_not_implemented(self, mock_provider): + mock_provider.return_value = (mock.MagicMock(), []) + api_lb = self.create_load_balancer( + uuidutils.generate_uuid()).get(self.root_tag) + self.set_lb_status(lb_id=api_lb.get('id')) + # Set status to ACTIVE/ONLINE because set_lb_status did it in the db + api_lb['provisioning_status'] = constants.ACTIVE + api_lb['operating_status'] = constants.ONLINE + response = self.get(self.LB_PATH.format( + lb_id=api_lb.get('id'))).json.get(self.root_tag) + + self.assertIsNone(api_lb.pop('updated_at')) + self.assertIsNotNone(response.pop('updated_at')) + self.assertEqual(api_lb, response) + mock_provider.side_effect = exceptions.ProviderNotImplementedError( + prov='bad_driver', user_msg='broken') + self.delete(self.LB_PATH.format(lb_id=api_lb.get('id')), status=501) + + response = self.get(self.LB_PATH.format( + lb_id=api_lb.get('id'))).json.get(self.root_tag) + self.assertEqual(constants.ACTIVE, response['provisioning_status']) + + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_create_with_provider_unsupport_option(self, mock_provider): + mock_provider.side_effect = exceptions.ProviderUnsupportedOptionError( + prov='bad_driver', user_msg='broken') + lb_json = {'name': 'test-lb', + 'vip_subnet_id': uuidutils.generate_uuid(), + 'project_id': self.project_id} + response = self.post(self.LBS_PATH, self._build_body(lb_json), + status=501) + self.assertIn('Provider \'bad_driver\' does not support a requested ' + 'option: broken', response.json.get('faultstring')) + + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_update_with_provider_unsupport_option(self, mock_provider): + mock_provider.return_value = (mock.MagicMock(), []) + api_lb = self.create_load_balancer( + uuidutils.generate_uuid()).get(self.root_tag) + self.set_lb_status(lb_id=api_lb.get('id')) + new_listener = {'name': 'new_name'} + mock_provider.side_effect = exceptions.ProviderUnsupportedOptionError( + prov='bad_driver', user_msg='broken') + response = self.put(self.LB_PATH.format(lb_id=api_lb.get('id')), + self._build_body(new_listener), status=501) + self.assertIn('Provider \'bad_driver\' does not support a requested ' + 'option: broken', response.json.get('faultstring')) + + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_delete_with_provider_unsupport_option(self, mock_provider): + mock_provider.return_value = (mock.MagicMock(), []) + api_lb = self.create_load_balancer( + uuidutils.generate_uuid()).get(self.root_tag) + self.set_lb_status(lb_id=api_lb.get('id')) + # Set status to ACTIVE/ONLINE because set_lb_status did it in the db + api_lb['provisioning_status'] = constants.ACTIVE + api_lb['operating_status'] = constants.ONLINE + response = self.get(self.LB_PATH.format( + lb_id=api_lb.get('id'))).json.get(self.root_tag) + + self.assertIsNone(api_lb.pop('updated_at')) + self.assertIsNotNone(response.pop('updated_at')) + self.assertEqual(api_lb, response) + mock_provider.side_effect = exceptions.ProviderUnsupportedOptionError( + prov='bad_driver', user_msg='broken') + self.delete(self.LB_PATH.format(lb_id=api_lb.get('id')), status=501) + + +class TestLoadBalancerGraph(base.BaseAPITest): + + root_tag = 'loadbalancer' + + def setUp(self): + super().setUp() + self._project_id = uuidutils.generate_uuid() + + def _build_body(self, json): + return {self.root_tag: json} + + def _assert_graphs_equal(self, expected_graph, observed_graph): + observed_graph_copy = copy.deepcopy(observed_graph) + del observed_graph_copy['created_at'] + del observed_graph_copy['updated_at'] + self.assertEqual(observed_graph_copy['project_id'], + observed_graph_copy.pop('tenant_id')) + + obs_lb_id = observed_graph_copy.pop('id') + self.assertTrue(uuidutils.is_uuid_like(obs_lb_id)) + + expected_listeners = expected_graph.pop('listeners', []) + observed_listeners = observed_graph_copy.pop('listeners', []) + expected_pools = expected_graph.pop('pools', []) + observed_pools = observed_graph_copy.pop('pools', []) + expected_additional_vips = expected_graph.pop('additional_vips', []) + observed_additional_vips = observed_graph_copy.pop('additional_vips', + []) + expected_vip_sg_ids = expected_graph.pop('vip_sg_ids', []) + observed_vip_sg_ids = observed_graph_copy.pop('vip_sg_ids', []) + self.assertEqual(expected_graph, observed_graph_copy) + + self.assertEqual(sorted(expected_vip_sg_ids), + sorted(observed_vip_sg_ids)) + + self.assertEqual(len(expected_pools), len(observed_pools)) + + self.assertEqual(len(expected_listeners), len(observed_listeners)) + for observed_listener in observed_listeners: + del observed_listener['created_at'] + del observed_listener['updated_at'] + self.assertEqual(observed_listener['project_id'], + observed_listener.pop('tenant_id')) + + self.assertTrue(uuidutils.is_uuid_like( + observed_listener.pop('id'))) + if observed_listener.get('default_pool_id'): + self.assertTrue(uuidutils.is_uuid_like( + observed_listener.pop('default_pool_id'))) + + default_pool = observed_listener.get('default_pool') + if default_pool: + observed_listener.pop('default_pool_id') + self.assertTrue(default_pool.get('id')) + default_pool.pop('id') + default_pool.pop('created_at') + default_pool.pop('updated_at') + hm = default_pool.get('health_monitor') + if hm: + self.assertTrue(hm.get('id')) + hm.pop('id') + for member in default_pool.get('members', []): + self.assertTrue(member.get('id')) + member.pop('id') + member.pop('created_at') + member.pop('updated_at') + if observed_listener.get('sni_containers'): + observed_listener['sni_containers'].sort() + o_l7policies = observed_listener.get('l7policies') + if o_l7policies: + for o_l7policy in o_l7policies: + o_l7policy.pop('created_at') + o_l7policy.pop('updated_at') + self.assertEqual(o_l7policy['project_id'], + o_l7policy.pop('tenant_id')) + if o_l7policy.get('redirect_pool_id'): + r_pool_id = o_l7policy.pop('redirect_pool_id') + self.assertTrue(uuidutils.is_uuid_like(r_pool_id)) + o_l7policy_id = o_l7policy.pop('id') + self.assertTrue(uuidutils.is_uuid_like(o_l7policy_id)) + o_l7policy_l_id = o_l7policy.pop('listener_id') + self.assertTrue(uuidutils.is_uuid_like(o_l7policy_l_id)) + l7rules = o_l7policy.get('rules') or [] + for l7rule in l7rules: + l7rule.pop('created_at') + l7rule.pop('updated_at') + self.assertEqual(l7rule['project_id'], + l7rule.pop('tenant_id')) + self.assertTrue(l7rule.pop('id')) + self.assertIn(observed_listener, expected_listeners) + self.assertEqual(len(expected_additional_vips), + len(observed_additional_vips)) + for observed_add_vip in observed_additional_vips: + if not observed_add_vip['ip_address']: + del observed_add_vip['ip_address'] + self.assertIn(observed_add_vip, expected_additional_vips) + + def _get_lb_bodies(self, create_listeners, expected_listeners, + create_pools=None, additional_vips=None, + vip_sg_ids=None, flavor_id=None, sriov=False): + create_lb = { + 'name': 'lb1', + 'project_id': self._project_id, + 'vip_subnet_id': uuidutils.generate_uuid(), + 'vip_port_id': uuidutils.generate_uuid(), + 'vip_address': '198.51.100.10', + 'provider': 'noop_driver', + 'listeners': create_listeners, + 'pools': create_pools or [] + } + if vip_sg_ids: + create_lb['vip_sg_ids'] = vip_sg_ids + if additional_vips: + for add_vip in additional_vips: + if 'port_id' not in add_vip: + add_vip['port_id'] = create_lb['vip_port_id'] + create_lb.update({'additional_vips': additional_vips}) + expected_lb = { + 'description': '', + 'admin_state_up': True, + 'availability_zone': None, + 'provisioning_status': constants.PENDING_CREATE, + 'operating_status': constants.OFFLINE, + # TODO(rm_work): vip_network_id is a weird case, as it will be + # replaced from the port, which in the noop network driver will be + # freshly generated... I don't see a way to actually set it sanely + # for this test without interfering with a ton of stuff, and it is + # expected that this would be overwritten anyway, so 'ANY' is fine? + 'vip_network_id': mock.ANY, + 'vip_qos_policy_id': None, + 'flavor_id': None, + 'provider': 'noop_driver', + 'tags': [], + 'vip_vnic_type': constants.VNIC_TYPE_NORMAL, + 'vip_sg_ids': vip_sg_ids or [], + } + if flavor_id: + create_lb['flavor_id'] = flavor_id + expected_lb['flavor_id'] = flavor_id + if sriov: + expected_lb['vip_vnic_type'] = constants.VNIC_TYPE_DIRECT + expected_lb.update(create_lb) + expected_lb['listeners'] = expected_listeners + expected_lb['pools'] = create_pools or [] + return create_lb, expected_lb + + def _get_listener_bodies( + self, name='listener1', protocol_port=80, + create_default_pool_name=None, create_default_pool_id=None, + create_l7policies=None, expected_l7policies=None, + create_sni_containers=None, expected_sni_containers=None, + create_client_ca_tls_container=None, + expected_client_ca_tls_container=None, + create_protocol=constants.PROTOCOL_HTTP, + create_client_authentication=None, + expected_client_authentication=constants.CLIENT_AUTH_NONE, + create_client_crl_container=None, + expected_client_crl_container=None, + create_allowed_cidrs=None, + expected_allowed_cidrs=None, + create_timeout_client_data=None, + expected_timeout_client_data=None, + create_timeout_member_connect=None, + expected_timeout_member_connect=None, + create_timeout_member_data=None, + expected_timeout_member_data=None, + create_timeout_tcp_inspect=None, + expected_timeout_tcp_inspect=None): + create_listener = { + 'name': name, + 'protocol_port': protocol_port, + 'protocol': create_protocol + } + expected_listener = { + 'description': '', + 'default_tls_container_ref': None, + 'sni_container_refs': [], + 'connection_limit': constants.DEFAULT_CONNECTION_LIMIT, + 'admin_state_up': True, + 'provisioning_status': constants.PENDING_CREATE, + 'operating_status': constants.OFFLINE, + 'insert_headers': {}, + 'project_id': self._project_id, + 'timeout_client_data': constants.DEFAULT_TIMEOUT_CLIENT_DATA, + 'timeout_member_connect': constants.DEFAULT_TIMEOUT_MEMBER_CONNECT, + 'timeout_member_data': constants.DEFAULT_TIMEOUT_MEMBER_DATA, + 'timeout_tcp_inspect': constants.DEFAULT_TIMEOUT_TCP_INSPECT, + 'tags': [], + 'client_ca_tls_container_ref': None, + 'client_authentication': constants.CLIENT_AUTH_NONE, + 'client_crl_container_ref': None, + 'allowed_cidrs': None, + 'tls_ciphers': None, + 'tls_versions': None, + 'alpn_protocols': None, + 'hsts_include_subdomains': False, + 'hsts_max_age': None, + 'hsts_preload': False, + } + if create_sni_containers: + create_listener['sni_container_refs'] = create_sni_containers + expected_listener.update(create_listener) + if create_default_pool_name: + pool = {'name': create_default_pool_name} + create_listener['default_pool'] = pool + elif create_default_pool_id: + create_listener['default_pool_id'] = create_default_pool_id + expected_listener['default_pool_id'] = create_default_pool_id + else: + expected_listener['default_pool_id'] = None + if create_l7policies: + l7policies = create_l7policies + create_listener['l7policies'] = l7policies + if create_client_ca_tls_container: + create_listener['client_ca_tls_container_ref'] = ( + create_client_ca_tls_container) + if create_client_authentication: + create_listener['client_authentication'] = ( + create_client_authentication) + if create_client_crl_container: + create_listener['client_crl_container_ref'] = ( + create_client_crl_container) + if create_allowed_cidrs: + create_listener['allowed_cidrs'] = create_allowed_cidrs + if expected_sni_containers: + expected_listener['sni_container_refs'] = expected_sni_containers + if expected_l7policies: + expected_listener['l7policies'] = expected_l7policies + else: + expected_listener['l7policies'] = [] + if expected_client_ca_tls_container: + expected_listener['client_ca_tls_container_ref'] = ( + expected_client_ca_tls_container) + expected_listener['client_authentication'] = ( + constants.CLIENT_AUTH_NONE) + if expected_client_authentication: + expected_listener[ + 'client_authentication'] = expected_client_authentication + if expected_client_crl_container: + expected_listener['client_crl_container_ref'] = ( + expected_client_crl_container) + if expected_allowed_cidrs: + expected_listener['allowed_cidrs'] = expected_allowed_cidrs + if create_protocol == constants.PROTOCOL_TERMINATED_HTTPS: + expected_listener['tls_ciphers'] = constants.CIPHERS_OWASP_SUITE_B + expected_listener['tls_versions'] = ( + constants.TLS_VERSIONS_OWASP_SUITE_B) + expected_listener['alpn_protocols'] = ( + [lib_consts.ALPN_PROTOCOL_HTTP_2, + lib_consts.ALPN_PROTOCOL_HTTP_1_1, + lib_consts.ALPN_PROTOCOL_HTTP_1_0]) + + if create_timeout_client_data is not None: + create_listener['timeout_client_data'] = ( + create_timeout_client_data) + if expected_timeout_client_data is not None: + expected_listener['timeout_client_data'] = ( + expected_timeout_client_data) + if create_timeout_member_connect is not None: + create_listener['timeout_member_connect'] = ( + create_timeout_member_connect) + if expected_timeout_member_connect is not None: + expected_listener['timeout_member_connect'] = ( + expected_timeout_member_connect) + if create_timeout_member_data is not None: + create_listener['timeout_member_data'] = ( + create_timeout_member_data) + if expected_timeout_member_data is not None: + expected_listener['timeout_member_data'] = ( + expected_timeout_member_data) + if create_timeout_tcp_inspect is not None: + create_listener['timeout_tcp_inspect'] = ( + create_timeout_tcp_inspect) + if expected_timeout_tcp_inspect is not None: + expected_listener['timeout_tcp_inspect'] = ( + expected_timeout_tcp_inspect) + + return create_listener, expected_listener + + def _get_pool_bodies(self, name='pool1', create_members=None, + expected_members=None, create_hm=None, + expected_hm=None, protocol=constants.PROTOCOL_HTTP, + session_persistence=True): + create_pool = { + 'name': name, + 'protocol': protocol, + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + } + if session_persistence: + create_pool['session_persistence'] = { + 'type': constants.SESSION_PERSISTENCE_SOURCE_IP, + 'cookie_name': None} + if create_members: + create_pool['members'] = create_members + if create_hm: + create_pool['healthmonitor'] = create_hm + expected_pool = { + 'description': None, + 'session_persistence': None, + 'members': [], + 'enabled': True, + 'provisioning_status': constants.PENDING_CREATE, + 'operating_status': constants.OFFLINE, + 'project_id': self._project_id, + 'tags': [] + } + expected_pool.update(create_pool) + if expected_members: + expected_pool['members'] = expected_members + if expected_hm: + expected_pool['healthmonitor'] = expected_hm + return create_pool, expected_pool + + def _get_member_bodies(self, protocol_port=80, sriov=False): + create_member = { + 'address': '10.0.0.1', + 'protocol_port': protocol_port + } + expected_member = { + 'weight': 1, + 'enabled': True, + 'subnet_id': None, + 'operating_status': constants.OFFLINE, + 'project_id': self._project_id, + 'tags': [] + } + if sriov: + create_member[constants.REQUEST_SRIOV] = True + expected_member[constants.VNIC_TYPE] = constants.VNIC_TYPE_DIRECT + expected_member.update(create_member) + return create_member, expected_member + + def _get_hm_bodies(self, hm_type=constants.HEALTH_MONITOR_PING, + delay=1): + if hm_type == constants.HEALTH_MONITOR_UDP_CONNECT: + create_hm = { + 'type': constants.HEALTH_MONITOR_UDP_CONNECT, + 'delay': delay, + 'timeout': 1, + 'max_retries_down': 1, + 'max_retries': 1 + } + expected_hm = { + 'admin_state_up': True, + 'project_id': self._project_id, + 'provisioning_status': constants.PENDING_CREATE, + 'operating_status': constants.OFFLINE, + 'tags': [] + } + elif hm_type == constants.HEALTH_MONITOR_HTTP: + create_hm = { + 'type': constants.HEALTH_MONITOR_HTTP, + 'delay': delay, + 'timeout': 1, + 'max_retries_down': 1, + 'max_retries': 1 + } + expected_hm = { + 'http_method': 'GET', + 'url_path': '/', + 'expected_codes': '200', + 'admin_state_up': True, + 'project_id': self._project_id, + 'provisioning_status': constants.PENDING_CREATE, + 'operating_status': constants.OFFLINE, + 'tags': [] + } + else: + create_hm = { + 'type': constants.HEALTH_MONITOR_PING, + 'delay': delay, + 'timeout': 1, + 'max_retries_down': 1, + 'max_retries': 1 + } + expected_hm = { + 'admin_state_up': True, + 'project_id': self._project_id, + 'provisioning_status': constants.PENDING_CREATE, + 'operating_status': constants.OFFLINE, + 'tags': [] + } + expected_hm.update(create_hm) + return create_hm, expected_hm + + def _get_sni_container_bodies(self): + create_sni_container1 = uuidutils.generate_uuid() + create_sni_container2 = uuidutils.generate_uuid() + create_sni_containers = [create_sni_container1, create_sni_container2] + expected_sni_containers = [create_sni_container1, + create_sni_container2] + expected_sni_containers.sort() + return create_sni_containers, expected_sni_containers + + def _get_l7policies_bodies(self, + create_pool_name=None, create_pool_id=None, + create_l7rules=None, expected_l7rules=None): + create_l7policies = [] + if create_pool_name: + create_l7policy = { + 'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + 'redirect_pool': {'name': create_pool_name}, + 'position': 1, + 'admin_state_up': False + } + else: + create_l7policy = { + 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, + 'redirect_url': 'http://127.0.0.1/', + 'position': 1, + 'redirect_http_code': 302, + 'admin_state_up': False + } + create_l7policies.append(create_l7policy) + expected_l7policy = { + 'name': '', + 'description': '', + 'redirect_http_code': None, + 'redirect_url': None, + 'redirect_prefix': None, + 'rules': [], + 'project_id': self._project_id, + 'provisioning_status': constants.PENDING_CREATE, + 'operating_status': constants.OFFLINE, + 'tags': [] + } + expected_l7policy.update(create_l7policy) + expected_l7policy.pop('redirect_pool', None) + expected_l7policies = [] + if not create_pool_name: + expected_l7policy['redirect_pool_id'] = create_pool_id + expected_l7policies.append(expected_l7policy) + if expected_l7rules: + expected_l7policies[0]['rules'] = expected_l7rules + if create_l7rules: + create_l7policies[0]['rules'] = create_l7rules + return create_l7policies, expected_l7policies + + def _get_l7rules_bodies(self, value="localhost"): + create_l7rules = [{ + 'type': constants.L7RULE_TYPE_HOST_NAME, + 'compare_type': constants.L7RULE_COMPARE_TYPE_EQUAL_TO, + 'value': value, + 'invert': False, + 'admin_state_up': True + }] + expected_l7rules = [{ + 'key': None, + 'project_id': self._project_id, + 'provisioning_status': constants.PENDING_CREATE, + 'operating_status': constants.OFFLINE, + 'tags': [] + }] + expected_l7rules[0].update(create_l7rules[0]) + return create_l7rules, expected_l7rules + + def test_with_additional_vips(self): + create_lb, expected_lb = self._get_lb_bodies( + [], [], additional_vips=[ + {'subnet_id': uuidutils.generate_uuid()}]) + + # Pre-populate test subnet/network data + network_driver = utils.get_network_driver() + vip_subnet = network_driver.get_subnet(create_lb['vip_subnet_id']) + additional_subnet = network_driver.get_subnet( + create_lb['additional_vips'][0]['subnet_id']) + additional_subnet.network_id = vip_subnet.network_id + + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_graphs_equal(expected_lb, api_lb) + + def test_with_additional_vips_duplicate_subnet(self): + create_lb, expected_lb = self._get_lb_bodies( + [], []) + create_lb['additional_vips'] = [ + {'subnet_id': create_lb['vip_subnet_id']}] + + # Pre-populate test subnet/network data + network_driver = utils.get_network_driver() + vip_subnet = network_driver.get_subnet(create_lb['vip_subnet_id']) + additional_subnet = network_driver.get_subnet( + create_lb['additional_vips'][0]['subnet_id']) + additional_subnet.network_id = vip_subnet.network_id + + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body, status=400) + error_text = response.json.get('faultstring') + self.assertIn('Duplicate VIP subnet(s) specified.', error_text) + + def test_with_additional_vips_different_networks(self): + create_lb, expected_lb = self._get_lb_bodies( + [], [], additional_vips=[ + {'subnet_id': uuidutils.generate_uuid()}]) + + # Pre-populate test subnet/network data + network_driver = utils.get_network_driver() + additional_subnet = network_driver.get_subnet( + create_lb['additional_vips'][0]['subnet_id']) + additional_subnet.network_id = uuidutils.generate_uuid() + + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body, status=400) + error_text = response.json.get('faultstring') + self.assertIn('All VIP subnets must belong to the same network.', + error_text) + + @mock.patch('octavia.api.v2.controllers.load_balancer.' + 'LoadBalancersController._apply_flavor_to_lb_dict', + return_value={constants.SRIOV_VIP: True}) + def test_with_vip_vnic_type_direct(self, mock_flavor_dict): + create_lb, expected_lb = self._get_lb_bodies( + [], []) + expected_lb[constants.VIP_VNIC_TYPE] = constants.VNIC_TYPE_DIRECT + + body = self._build_body(create_lb) + + response = self.post(self.LBS_PATH, body) + self._assert_graphs_equal(expected_lb, response.json['loadbalancer']) + + api_lb = response.json.get(self.root_tag) + self._assert_graphs_equal(expected_lb, api_lb) + + def test_with_sg_ids(self): + create_lb, expected_lb = self._get_lb_bodies( + [], [], vip_sg_ids=[uuidutils.generate_uuid(), + uuidutils.generate_uuid()]) + + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_graphs_equal(expected_lb, api_lb) + + def test_with_one_listener(self): + create_listener, expected_listener = self._get_listener_bodies() + create_lb, expected_lb = self._get_lb_bodies([create_listener], + [expected_listener]) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_graphs_equal(expected_lb, api_lb) + + def test_with_one_listener_sg_ids(self): + create_listener, expected_listener = self._get_listener_bodies() + create_lb, expected_lb = self._get_lb_bodies( + [create_listener], [expected_listener], + vip_sg_ids=[uuidutils.generate_uuid(), + uuidutils.generate_uuid()]) + + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_graphs_equal(expected_lb, api_lb) + + @mock.patch('octavia.api.v2.controllers.load_balancer.' + 'LoadBalancersController._apply_flavor_to_lb_dict', + return_value={constants.SRIOV_VIP: True}) + def test_with_vip_vnic_type_direct_and_sg_ids(self, mock_flavor_dict): + create_lb, expected_lb = self._get_lb_bodies( + [], [], + vip_sg_ids=[uuidutils.generate_uuid(), + uuidutils.generate_uuid()]) + expected_lb[constants.VIP_VNIC_TYPE] = constants.VNIC_TYPE_DIRECT + + body = self._build_body(create_lb) + + response = self.post(self.LBS_PATH, body, status=400, + expect_errors=True) + error_text = response.json.get('faultstring') + self.assertIn("VIP Security Groups are not allowed with VNIC " + "direct type", error_text) + + def test_with_one_listener_sg_ids_and_allowed_cidrs(self): + allowed_cidrs = ['10.0.1.0/24'] + create_listener, expected_listener = self._get_listener_bodies( + create_allowed_cidrs=allowed_cidrs, + expected_allowed_cidrs=allowed_cidrs) + create_lb, expected_lb = self._get_lb_bodies( + [create_listener], [expected_listener], + vip_sg_ids=[uuidutils.generate_uuid(), + uuidutils.generate_uuid()]) + + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body, status=400, + expect_errors=True) + error_text = response.json.get('faultstring') + self.assertIn("Allowed CIDRs are not allowed when using custom " + "VIP Security Groups", error_text) + + def test_with_one_listener_with_default_timeouts(self): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + self.conf.config(group='haproxy_amphora', timeout_client_data=20) + self.conf.config(group='haproxy_amphora', timeout_member_connect=21) + self.conf.config(group='haproxy_amphora', + timeout_member_data=constants.MIN_TIMEOUT) + self.conf.config(group='haproxy_amphora', + timeout_tcp_inspect=constants.MAX_TIMEOUT) + + create_listener, expected_listener = self._get_listener_bodies( + expected_timeout_client_data=20, + expected_timeout_member_connect=21, + expected_timeout_member_data=constants.MIN_TIMEOUT, + expected_timeout_tcp_inspect=constants.MAX_TIMEOUT) + create_lb, expected_lb = self._get_lb_bodies([create_listener], + [expected_listener]) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_graphs_equal(expected_lb, api_lb) + + def test_with_many_listeners(self): + create_listener1, expected_listener1 = self._get_listener_bodies() + create_listener2, expected_listener2 = self._get_listener_bodies( + name='listener2', protocol_port=81) + create_lb, expected_lb = self._get_lb_bodies( + [create_listener1, create_listener2], + [expected_listener1, expected_listener2]) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_graphs_equal(expected_lb, api_lb) + + def test_with_one_listener_one_pool(self): + create_pool, expected_pool = self._get_pool_bodies() + create_listener, expected_listener = self._get_listener_bodies( + create_default_pool_name=create_pool['name']) + create_lb, expected_lb = self._get_lb_bodies( + create_listeners=[create_listener], + expected_listeners=[expected_listener], + create_pools=[create_pool]) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_graphs_equal(expected_lb, api_lb) + + def test_with_many_listeners_one_pool(self): + create_pool1, expected_pool1 = self._get_pool_bodies() + create_pool2, expected_pool2 = self._get_pool_bodies(name='pool2') + create_listener1, expected_listener1 = self._get_listener_bodies( + create_default_pool_name=create_pool1['name']) + create_listener2, expected_listener2 = self._get_listener_bodies( + create_default_pool_name=create_pool2['name'], + name='listener2', protocol_port=81) + create_lb, expected_lb = self._get_lb_bodies( + create_listeners=[create_listener1, create_listener2], + expected_listeners=[expected_listener1, expected_listener2], + create_pools=[create_pool1, create_pool2]) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_graphs_equal(expected_lb, api_lb) + + def test_with_one_listener_one_member(self): + create_member, expected_member = self._get_member_bodies() + create_pool, expected_pool = self._get_pool_bodies( + create_members=[create_member], + expected_members=[expected_member]) + create_listener, expected_listener = self._get_listener_bodies( + create_default_pool_name=create_pool['name']) + create_lb, expected_lb = self._get_lb_bodies( + create_listeners=[create_listener], + expected_listeners=[expected_listener], + create_pools=[create_pool]) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_graphs_equal(expected_lb, api_lb) + + def test_with_one_listener_one_member_sriov(self): + flavor_profile = self.create_flavor_profile( + 'sriov-graph-create', 'noop_driver', + f'{{"{constants.ALLOW_MEMBER_SRIOV}": true, ' + f'"{constants.SRIOV_VIP}": true}}') + + flavor = self.create_flavor('sriov-graph-create', '', + flavor_profile['id'], True) + + create_member, expected_member = self._get_member_bodies(sriov=True) + create_pool, expected_pool = self._get_pool_bodies( + create_members=[create_member], + expected_members=[expected_member]) + create_listener, expected_listener = self._get_listener_bodies( + create_default_pool_name=create_pool['name']) + create_lb, expected_lb = self._get_lb_bodies( + create_listeners=[create_listener], + expected_listeners=[expected_listener], + create_pools=[create_pool], flavor_id=flavor['id'], sriov=True) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_graphs_equal(expected_lb, api_lb) + + def test_with_one_listener_one_member_sriov_disabled(self): + create_member, expected_member = self._get_member_bodies(sriov=True) + create_pool, expected_pool = self._get_pool_bodies( + create_members=[create_member], + expected_members=[expected_member]) + create_listener, expected_listener = self._get_listener_bodies( + create_default_pool_name=create_pool['name']) + create_lb, expected_lb = self._get_lb_bodies( + create_listeners=[create_listener], + expected_listeners=[expected_listener], + create_pools=[create_pool]) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body, status=400) + error_text = response.json.get('faultstring') + self.assertIn('flavor does not allow SR-IOV member ports.', + error_text) + + def test_with_one_listener_one_hm(self): + create_hm, expected_hm = self._get_hm_bodies() + create_pool, expected_pool = self._get_pool_bodies( + create_hm=create_hm, + expected_hm=expected_hm) + create_listener, expected_listener = self._get_listener_bodies( + create_default_pool_name=create_pool['name']) + create_lb, expected_lb = self._get_lb_bodies( + create_listeners=[create_listener], + expected_listeners=[expected_listener], + create_pools=[create_pool]) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_graphs_equal(expected_lb, api_lb) + + def test_with_one_listener_one_hm_udp(self): + create_hm, expected_hm = self._get_hm_bodies( + hm_type=constants.HEALTH_MONITOR_UDP_CONNECT, + delay=3) + create_pool, expected_pool = self._get_pool_bodies( + create_hm=create_hm, + expected_hm=expected_hm, + protocol=constants.PROTOCOL_UDP) + create_listener, expected_listener = self._get_listener_bodies( + create_default_pool_name=create_pool['name'], + create_protocol=constants.PROTOCOL_UDP) + create_lb, expected_lb = self._get_lb_bodies( + create_listeners=[create_listener], + expected_listeners=[expected_listener], + create_pools=[create_pool]) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_graphs_equal(expected_lb, api_lb) + + def test_with_one_listener_one_hm_udp_validation_failure(self): + create_hm, expected_hm = self._get_hm_bodies( + hm_type=constants.HEALTH_MONITOR_UDP_CONNECT, + delay=1) + create_pool, expected_pool = self._get_pool_bodies( + create_hm=create_hm, + expected_hm=expected_hm, + protocol=constants.PROTOCOL_UDP) + create_listener, expected_listener = self._get_listener_bodies( + create_default_pool_name=create_pool['name'], + create_protocol=constants.PROTOCOL_UDP) + create_lb, expected_lb = self._get_lb_bodies( + create_listeners=[create_listener], + expected_listeners=[expected_listener], + create_pools=[create_pool]) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body, status=400, + expect_errors=True) + error_text = response.json.get('faultstring') + self.assertIn('request delay value 1 should be larger', error_text) + + def test_with_one_listener_allowed_cidrs(self): + allowed_cidrs = ['10.0.1.0/24', '172.16.0.0/16'] + create_listener, expected_listener = self._get_listener_bodies( + create_allowed_cidrs=allowed_cidrs, + expected_allowed_cidrs=allowed_cidrs) + create_lb, expected_lb = self._get_lb_bodies([create_listener], + [expected_listener]) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_graphs_equal(expected_lb, api_lb) + + # TODO(johnsom) Fix this when there is a noop certificate manager + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_with_one_listener_sni_containers(self, mock_cert_data): + cert1 = data_models.TLSContainer(certificate='cert 1') + cert2 = data_models.TLSContainer(certificate='cert 2') + cert3 = data_models.TLSContainer(certificate='cert 3') + mock_cert_data.return_value = {'tls_cert': cert1, + 'sni_certs': [cert2, cert3]} + create_sni_containers, expected_sni_containers = ( + self._get_sni_container_bodies()) + create_listener, expected_listener = self._get_listener_bodies( + create_protocol=constants.PROTOCOL_TERMINATED_HTTPS, + create_sni_containers=create_sni_containers, + expected_sni_containers=expected_sni_containers) + create_lb, expected_lb = self._get_lb_bodies( + create_listeners=[create_listener], + expected_listeners=[expected_listener]) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_graphs_equal(expected_lb, api_lb) + + @mock.patch('cryptography.hazmat.backends.default_backend') + @mock.patch('cryptography.x509.load_pem_x509_crl') + @mock.patch('cryptography.x509.load_pem_x509_certificate') + @mock.patch('octavia.api.drivers.utils._get_secret_data') + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_with_full_listener_certs(self, mock_cert_data, mock_get_secret, + mock_x509_cert, mock_x509_crl, + mock_backend): + cert1 = data_models.TLSContainer(certificate='cert 1') + cert2 = data_models.TLSContainer(certificate='cert 2') + cert3 = data_models.TLSContainer(certificate='cert 3') + mock_get_secret.side_effect = ['ca cert', 'X509 CRL FILE'] + mock_cert_data.return_value = {'tls_cert': cert1, + 'sni_certs': [cert2, cert3]} + cert_mock = mock.MagicMock() + mock_x509_cert.return_value = cert_mock + create_client_ca_tls_container, create_client_crl_container = ( + uuidutils.generate_uuid(), uuidutils.generate_uuid()) + expected_client_ca_tls_container = create_client_ca_tls_container + create_client_authentication = constants.CLIENT_AUTH_MANDATORY + expected_client_authentication = constants.CLIENT_AUTH_MANDATORY + expected_client_crl_container = create_client_crl_container + create_sni_containers, expected_sni_containers = ( + self._get_sni_container_bodies()) + create_listener, expected_listener = self._get_listener_bodies( + create_protocol=constants.PROTOCOL_TERMINATED_HTTPS, + create_sni_containers=create_sni_containers, + expected_sni_containers=expected_sni_containers, + create_client_ca_tls_container=create_client_ca_tls_container, + expected_client_ca_tls_container=expected_client_ca_tls_container, + create_client_authentication=create_client_authentication, + expected_client_authentication=expected_client_authentication, + create_client_crl_container=create_client_crl_container, + expected_client_crl_container=expected_client_crl_container) + create_lb, expected_lb = self._get_lb_bodies( + create_listeners=[create_listener], + expected_listeners=[expected_listener]) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_graphs_equal(expected_lb, api_lb) + + def test_with_l7policy_redirect_pool_no_rule(self): + create_pool, expected_pool = self._get_pool_bodies(create_members=[], + expected_members=[]) + create_l7policies, expected_l7policies = self._get_l7policies_bodies( + create_pool_name=create_pool['name']) + create_listener, expected_listener = self._get_listener_bodies( + create_l7policies=create_l7policies, + expected_l7policies=expected_l7policies) + create_lb, expected_lb = self._get_lb_bodies( + create_listeners=[create_listener], + expected_listeners=[expected_listener], + create_pools=[create_pool]) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_graphs_equal(expected_lb, api_lb) + + def test_with_l7policy_redirect_pool_one_rule(self): + create_pool, expected_pool = self._get_pool_bodies(create_members=[], + expected_members=[]) + create_l7rules, expected_l7rules = self._get_l7rules_bodies() + create_l7policies, expected_l7policies = self._get_l7policies_bodies( + create_pool_name=create_pool['name'], + create_l7rules=create_l7rules, + expected_l7rules=expected_l7rules) + create_listener, expected_listener = self._get_listener_bodies( + create_l7policies=create_l7policies, + expected_l7policies=expected_l7policies) + create_lb, expected_lb = self._get_lb_bodies( + create_listeners=[create_listener], + expected_listeners=[expected_listener], + create_pools=[create_pool]) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_graphs_equal(expected_lb, api_lb) + + def test_with_l7policies_one_redirect_pool_one_rule(self): + create_pool, expected_pool = self._get_pool_bodies(create_members=[], + expected_members=[]) + create_l7rules, expected_l7rules = self._get_l7rules_bodies() + create_l7policies, expected_l7policies = self._get_l7policies_bodies( + create_pool_name=create_pool['name'], + create_l7rules=create_l7rules, + expected_l7rules=expected_l7rules) + c_l7policies_url, e_l7policies_url = self._get_l7policies_bodies() + for policy in c_l7policies_url: + policy['position'] = 2 + create_l7policies.append(policy) + for policy in e_l7policies_url: + policy['position'] = 2 + expected_l7policies.append(policy) + create_listener, expected_listener = self._get_listener_bodies( + create_l7policies=create_l7policies, + expected_l7policies=expected_l7policies) + create_lb, expected_lb = self._get_lb_bodies( + create_listeners=[create_listener], + expected_listeners=[expected_listener], + create_pools=[create_pool]) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_graphs_equal(expected_lb, api_lb) + + def test_with_l7policies_one_redirect_url_with_default_pool(self): + create_pool, expected_pool = self._get_pool_bodies(create_members=[], + expected_members=[]) + create_l7rules, expected_l7rules = self._get_l7rules_bodies() + create_l7policies, expected_l7policies = self._get_l7policies_bodies( + create_l7rules=create_l7rules, + expected_l7rules=expected_l7rules) + create_listener, expected_listener = self._get_listener_bodies( + create_default_pool_name=create_pool['name'], + create_l7policies=create_l7policies, + expected_l7policies=expected_l7policies, + ) + create_lb, expected_lb = self._get_lb_bodies( + create_listeners=[create_listener], + expected_listeners=[expected_listener], + create_pools=[create_pool]) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_graphs_equal(expected_lb, api_lb) + + def test_with_l7policies_redirect_pools_no_rules(self): + create_pool, expected_pool = self._get_pool_bodies() + create_l7policies, expected_l7policies = self._get_l7policies_bodies( + create_pool_name=create_pool['name']) + r_create_pool, r_expected_pool = self._get_pool_bodies(name='pool2') + c_l7policies_url, e_l7policies_url = self._get_l7policies_bodies( + create_pool_name=r_create_pool['name']) + for policy in c_l7policies_url: + policy['position'] = 2 + create_l7policies.append(policy) + for policy in e_l7policies_url: + policy['position'] = 2 + expected_l7policies.append(policy) + create_listener, expected_listener = self._get_listener_bodies( + create_l7policies=create_l7policies, + expected_l7policies=expected_l7policies) + create_lb, expected_lb = self._get_lb_bodies( + create_listeners=[create_listener], + expected_listeners=[expected_listener], + create_pools=[create_pool, r_create_pool]) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_graphs_equal(expected_lb, api_lb) + + def test_with_l7policy_redirect_pool_bad_rule(self): + create_pool, expected_pool = self._get_pool_bodies(create_members=[], + expected_members=[]) + create_l7rules, expected_l7rules = self._get_l7rules_bodies( + value="local host") + create_l7policies, expected_l7policies = self._get_l7policies_bodies( + create_pool_name=create_pool['name'], + create_l7rules=create_l7rules, + expected_l7rules=expected_l7rules) + create_listener, expected_listener = self._get_listener_bodies( + create_l7policies=create_l7policies, + expected_l7policies=expected_l7policies) + create_lb, expected_lb = self._get_lb_bodies( + create_listeners=[create_listener], + expected_listeners=[expected_listener], + create_pools=[create_pool]) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body, status=400) + self.assertIn('L7Rule: Invalid characters', + response.json.get('faultstring')) + + def test_with_member_invalid_address(self): + # 169.254.169.254 is the default invalid member address + create_member = { + 'address': '169.254.169.254', + 'protocol_port': 80, + } + create_pool, _ = self._get_pool_bodies( + create_members=[create_member], + protocol=constants.PROTOCOL_TCP + ) + create_listener, _ = self._get_listener_bodies( + create_default_pool_name="pool1", + ) + create_lb, _ = self._get_lb_bodies( + [create_listener], + [], + create_pools=[create_pool] + ) + + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body, expect_errors=True) + + self.assertEqual(400, response.status_code) + expect_error_msg = ("169.254.169.254 is not a valid option for member " + "address") + self.assertEqual(expect_error_msg, response.json['faultstring']) + + def _test_with_one_of_everything_helper(self): + create_member, expected_member = self._get_member_bodies() + create_hm, expected_hm = self._get_hm_bodies() + create_pool, expected_pool = self._get_pool_bodies( + create_members=[create_member], + expected_members=[expected_member], + create_hm=create_hm, + expected_hm=expected_hm, + protocol=constants.PROTOCOL_HTTP) + create_sni_containers, expected_sni_containers = ( + self._get_sni_container_bodies()) + create_l7rules, expected_l7rules = self._get_l7rules_bodies() + r_create_member, r_expected_member = self._get_member_bodies( + protocol_port=88) + r_create_pool, r_expected_pool = self._get_pool_bodies( + create_members=[r_create_member], + expected_members=[r_expected_member]) + create_l7policies, expected_l7policies = self._get_l7policies_bodies( + create_pool_name=r_create_pool['name'], + create_l7rules=create_l7rules, + expected_l7rules=expected_l7rules) + create_listener, expected_listener = self._get_listener_bodies( + create_default_pool_name=create_pool['name'], + create_protocol=constants.PROTOCOL_TERMINATED_HTTPS, + create_l7policies=create_l7policies, + expected_l7policies=expected_l7policies, + create_sni_containers=create_sni_containers, + expected_sni_containers=expected_sni_containers) + create_lb, expected_lb = self._get_lb_bodies( + create_listeners=[create_listener], + expected_listeners=[expected_listener], + create_pools=[create_pool]) + body = self._build_body(create_lb) + return body, expected_lb + + # TODO(johnsom) Fix this when there is a noop certificate manager + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_with_one_of_everything(self, mock_cert_data): + cert1 = data_models.TLSContainer(certificate='cert 1') + cert2 = data_models.TLSContainer(certificate='cert 2') + cert3 = data_models.TLSContainer(certificate='cert 3') + mock_cert_data.return_value = {'tls_cert': cert1, + 'sni_certs': [cert2, cert3]} + body, expected_lb = self._test_with_one_of_everything_helper() + response = self.post(self.LBS_PATH, body) + api_lb = response.json.get(self.root_tag) + self._assert_graphs_equal(expected_lb, api_lb) + + def test_db_create_failure(self): + create_listener, expected_listener = self._get_listener_bodies() + create_lb, _ = self._get_lb_bodies([create_listener], + [expected_listener]) + body = self._build_body(create_lb) + with mock.patch('octavia.db.repositories.Repositories.' + 'create_load_balancer_and_vip') as repo_mock: + repo_mock.side_effect = Exception('I am a DB Error') + self.post(self.LBS_PATH, body, status=500) + + def test_pool_names_not_unique(self): + create_pool1, expected_pool1 = self._get_pool_bodies() + create_pool2, expected_pool2 = self._get_pool_bodies() + create_listener, expected_listener = self._get_listener_bodies( + create_default_pool_name=create_pool1['name']) + create_lb, expected_lb = self._get_lb_bodies( + create_listeners=[create_listener], + expected_listeners=[expected_listener], + create_pools=[create_pool1, create_pool2]) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body, status=400) + self.assertIn("Pool names must be unique", + response.json.get('faultstring')) + + def test_pool_names_must_have_specs(self): + create_pool, expected_pool = self._get_pool_bodies() + create_listener, expected_listener = self._get_listener_bodies( + create_default_pool_name="my_nonexistent_pool") + create_lb, expected_lb = self._get_lb_bodies( + create_listeners=[create_listener], + expected_listeners=[expected_listener], + create_pools=[create_pool]) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body, status=400) + self.assertIn("referenced but no full definition", + response.json.get('faultstring')) + + def test_pool_mandatory_attributes(self): + create_pool, expected_pool = self._get_pool_bodies() + create_pool.pop('protocol') + create_listener, expected_listener = self._get_listener_bodies( + create_default_pool_name=create_pool['name']) + create_lb, expected_lb = self._get_lb_bodies( + create_listeners=[create_listener], + expected_listeners=[expected_listener], + create_pools=[create_pool]) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body, status=400) + self.assertIn("missing required attribute: protocol", + response.json.get('faultstring')) + + def test_create_over_quota_lb(self): + body, _ = self._test_with_one_of_everything_helper() + self.start_quota_mock(data_models.LoadBalancer) + self.post(self.LBS_PATH, body, status=403) + + def test_create_over_quota_pools(self): + body, _ = self._test_with_one_of_everything_helper() + self.start_quota_mock(data_models.Pool) + self.post(self.LBS_PATH, body, status=403) + + def test_create_over_quota_listeners(self): + body, _ = self._test_with_one_of_everything_helper() + self.start_quota_mock(data_models.Listener) + self.post(self.LBS_PATH, body, status=403) + + def test_create_over_quota_members(self): + body, _ = self._test_with_one_of_everything_helper() + self.start_quota_mock(data_models.Member) + self.post(self.LBS_PATH, body, status=403) + + def test_create_over_quota_hms(self): + body, _ = self._test_with_one_of_everything_helper() + self.start_quota_mock(data_models.HealthMonitor) + self.post(self.LBS_PATH, body, status=403) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_create_over_quota_sanity_check(self, mock_cert_data): + cert1 = data_models.TLSContainer(certificate='cert 1') + cert2 = data_models.TLSContainer(certificate='cert 2') + cert3 = data_models.TLSContainer(certificate='cert 3') + mock_cert_data.return_value = {'tls_cert': cert1, + 'sni_certs': [cert2, cert3]} + # This one should create, as we don't check quotas on L7Policies + body, _ = self._test_with_one_of_everything_helper() + self.start_quota_mock(data_models.L7Policy) + self.post(self.LBS_PATH, body) + + def _getStatus(self, lb_id): + res = self.get(self.LB_PATH.format(lb_id=lb_id + "/status")) + return res.json.get('statuses').get('loadbalancer') + + # Test the "statuses" alias for "status". + # This is required for backward compatibility with neutron-lbaas + def test_statuses(self): + lb = self.create_load_balancer( + uuidutils.generate_uuid()).get('loadbalancer') + + statuses = self.get(self.LB_PATH.format(lb_id=lb['id'] + "/statuses")) + response = statuses.json.get('statuses').get('loadbalancer') + self.assertEqual(lb['name'], response['name']) + self.assertEqual(lb['id'], response['id']) + self.assertEqual(lb['operating_status'], + response['operating_status']) + self.assertEqual(lb['provisioning_status'], + response['provisioning_status']) + + def test_status(self): + lb = self.create_load_balancer( + uuidutils.generate_uuid()).get('loadbalancer') + + response = self._getStatus(lb['id']) + self.assertEqual(lb['name'], response['name']) + self.assertEqual(lb['id'], response['id']) + self.assertEqual(lb['operating_status'], + response['operating_status']) + self.assertEqual(lb['provisioning_status'], + response['provisioning_status']) + + def _assertLB(self, lb, response): + self.assertEqual(lb['name'], response['name']) + self.assertEqual(lb['id'], response['id']) + self.assertEqual(constants.ONLINE, + response['operating_status']) + self.assertEqual(constants.PENDING_UPDATE, + response['provisioning_status']) + + def test_statuses_listener(self): + lb = self.create_load_balancer( + uuidutils.generate_uuid()).get('loadbalancer') + self.set_lb_status(lb['id']) + listener = self.create_listener( + constants.PROTOCOL_HTTP, 80, lb['id']).get('listener') + + response = self._getStatus(lb['id']) + + self._assertLB(lb, response) + response = response.get('listeners')[0] + self.assertEqual(listener['name'], response['name']) + self.assertEqual(listener['id'], response['id']) + self.assertEqual(listener['operating_status'], + response['operating_status']) + self.assertEqual(listener['provisioning_status'], + response['provisioning_status']) + + def _assertListener(self, listener, response, + prov_status=constants.ACTIVE): + self.assertEqual(listener['name'], response['name']) + self.assertEqual(listener['id'], response['id']) + self.assertEqual(constants.ONLINE, + response['operating_status']) + self.assertEqual(prov_status, response['provisioning_status']) + + def _assertListenerPending(self, listener, response): + self._assertListener(listener, response, constants.PENDING_UPDATE) + + def test_statuses_multiple_listeners(self): + lb = self.create_load_balancer( + uuidutils.generate_uuid()).get('loadbalancer') + self.set_lb_status(lb['id']) + listener1 = self.create_listener( + constants.PROTOCOL_HTTP, 80, lb['id']).get('listener') + + self.set_lb_status(lb['id']) + listener2 = self.create_listener( + constants.PROTOCOL_HTTPS, 443, lb['id']).get('listener') + + response = self._getStatus(lb['id']) + + self._assertLB(lb, response) + self._assertListener(listener1, response.get('listeners')[0]) + response = response.get('listeners')[1] + self.assertEqual(listener2['name'], response['name']) + self.assertEqual(listener2['id'], response['id']) + self.assertEqual(listener2['operating_status'], + response['operating_status']) + self.assertEqual(listener2['provisioning_status'], + response['provisioning_status']) + + def test_statuses_pool(self): + lb = self.create_load_balancer( + uuidutils.generate_uuid()).get('loadbalancer') + self.set_lb_status(lb['id']) + listener = self.create_listener( + constants.PROTOCOL_HTTP, 80, lb['id']).get('listener') + self.set_lb_status(lb['id']) + pool = self.create_pool( + lb['id'], + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=listener['id']).get('pool') + + response = self._getStatus(lb['id']) + + self._assertLB(lb, response) + self._assertListenerPending(listener, response.get('listeners')[0]) + response = response.get('listeners')[0]['pools'][0] + self.assertEqual(pool['name'], response['name']) + self.assertEqual(pool['id'], response['id']) + self.assertEqual(pool['operating_status'], + response['operating_status']) + self.assertEqual(pool['provisioning_status'], + response['provisioning_status']) + + def _assertPool(self, pool, response, + prov_status=constants.ACTIVE): + self.assertEqual(pool['name'], response['name']) + self.assertEqual(pool['id'], response['id']) + self.assertEqual(constants.ONLINE, + response['operating_status']) + self.assertEqual(prov_status, response['provisioning_status']) + + def _assertPoolPending(self, pool, response): + self._assertPool(pool, response, constants.PENDING_UPDATE) + + def test_statuses_pools(self): + lb = self.create_load_balancer( + uuidutils.generate_uuid()).get('loadbalancer') + self.set_lb_status(lb['id']) + listener = self.create_listener( + constants.PROTOCOL_HTTP, 80, lb['id']).get('listener') + self.set_lb_status(lb['id']) + pool1 = self.create_pool( + lb['id'], + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=listener['id']).get('pool') + self.set_lb_status(lb['id']) + pool2 = self.create_pool( + lb['id'], + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_lb_status(lb['id']) + l7_policy = self.create_l7policy( + listener['id'], + constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + redirect_pool_id=pool2.get('id')).get('l7policy') + self.set_lb_status(lb['id']) + self.create_l7rule( + l7_policy['id'], constants.L7RULE_TYPE_HOST_NAME, + constants.L7RULE_COMPARE_TYPE_EQUAL_TO, + 'www.example.com').get(self.root_tag) + + response = self._getStatus(lb['id']) + + self._assertLB(lb, response) + self._assertListenerPending(listener, response.get('listeners')[0]) + self._assertPool(pool1, response.get('listeners')[0]['pools'][0]) + self._assertPool(pool2, response.get('listeners')[0]['pools'][1]) + + def test_statuses_health_monitor(self): + lb = self.create_load_balancer( + uuidutils.generate_uuid()).get('loadbalancer') + self.set_lb_status(lb['id']) + listener = self.create_listener( + constants.PROTOCOL_HTTP, 80, lb['id']).get('listener') + self.set_lb_status(lb['id']) + pool = self.create_pool( + lb['id'], + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=listener['id']).get('pool') + self.set_lb_status(lb['id']) + hm = self.create_health_monitor( + pool['id'], constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1).get('healthmonitor') + + response = self._getStatus(lb['id']) + + self._assertLB(lb, response) + self._assertListenerPending(listener, response.get('listeners')[0]) + self._assertPoolPending(pool, response.get('listeners')[0]['pools'][0]) + response = response.get('listeners')[0]['pools'][0]['health_monitor'] + self.assertEqual(hm['name'], response['name']) + self.assertEqual(hm['id'], response['id']) + self.assertEqual(hm['type'], response['type']) + self.assertEqual(hm['operating_status'], + response['operating_status']) + self.assertEqual(hm['provisioning_status'], + response['provisioning_status']) + + def test_statuses_member(self): + lb = self.create_load_balancer( + uuidutils.generate_uuid()).get('loadbalancer') + self.set_lb_status(lb['id']) + listener = self.create_listener( + constants.PROTOCOL_HTTP, 80, lb['id']).get('listener') + self.set_lb_status(lb['id']) + pool = self.create_pool( + lb['id'], + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=listener['id']).get('pool') + self.set_lb_status(lb['id']) + member = self.create_member( + pool['id'], '10.0.0.1', 80).get('member') + + response = self._getStatus(lb['id']) + + self._assertLB(lb, response) + self._assertListenerPending(listener, response.get('listeners')[0]) + self._assertPoolPending(pool, response.get('listeners')[0]['pools'][0]) + response = response.get('listeners')[0]['pools'][0]['members'][0] + self.assertEqual(member['name'], response['name']) + self.assertEqual(member['id'], response['id']) + self.assertEqual(member['address'], response['address']) + self.assertEqual(member['protocol_port'], response['protocol_port']) + self.assertEqual(member['operating_status'], + response['operating_status']) + self.assertEqual(member['provisioning_status'], + response['provisioning_status']) + + def test_statuses_members(self): + lb = self.create_load_balancer( + uuidutils.generate_uuid()).get('loadbalancer') + self.set_lb_status(lb['id']) + listener = self.create_listener( + constants.PROTOCOL_HTTP, 80, lb['id']).get('listener') + self.set_lb_status(lb['id']) + pool = self.create_pool( + lb['id'], + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=listener['id']).get('pool') + self.set_lb_status(lb['id']) + member1 = self.create_member( + pool['id'], '10.0.0.1', 80).get('member') + self.set_lb_status(lb['id']) + member2 = self.create_member( + pool['id'], '10.0.0.2', 88, name='test').get('member') + + response = self._getStatus(lb['id']) + + self._assertLB(lb, response) + self._assertListenerPending(listener, response.get('listeners')[0]) + self._assertPoolPending(pool, response.get('listeners')[0]['pools'][0]) + members = response.get('listeners')[0]['pools'][0]['members'] + response = members[0] + self.assertEqual(member1['name'], response['name']) + self.assertEqual(member1['id'], response['id']) + self.assertEqual(member1['address'], response['address']) + self.assertEqual(member1['protocol_port'], response['protocol_port']) + self.assertEqual(constants.ONLINE, + response['operating_status']) + self.assertEqual(constants.ACTIVE, + response['provisioning_status']) + response = members[1] + self.assertEqual(member2['name'], response['name']) + self.assertEqual(member2['id'], response['id']) + self.assertEqual(member2['address'], response['address']) + self.assertEqual(member2['protocol_port'], response['protocol_port']) + + def test_statuses_authorized(self): + project_id = uuidutils.generate_uuid() + lb = self.create_load_balancer( + uuidutils.generate_uuid(), + project_id=project_id).get('loadbalancer') + + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self._getStatus(lb['id']) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + self.assertEqual(lb['name'], response['name']) + self.assertEqual(lb['id'], response['id']) + self.assertEqual(lb['operating_status'], + response['operating_status']) + self.assertEqual(lb['provisioning_status'], + response['provisioning_status']) + + def test_statuses_not_authorized(self): + lb = self.create_load_balancer( + uuidutils.generate_uuid()).get('loadbalancer') + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=uuidutils.generate_uuid()) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + res = self.get(self.LB_PATH.format(lb_id=lb['id'] + "/status"), + status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, res.json) + + def test_statuses_get_deleted(self): + project_id = uuidutils.generate_uuid() + lb = self.create_load_balancer( + uuidutils.generate_uuid(), + project_id=project_id).get('loadbalancer') + self.set_lb_status(lb['id'], status=constants.DELETED) + self.get(self.LB_PATH.format(lb_id=lb['id'] + "/status"), + status=404) + + def _getStats(self, lb_id): + res = self.get(self.LB_PATH.format(lb_id=lb_id + "/stats")) + return res.json.get('stats') + + def test_statistics(self): + lb = self.create_load_balancer( + uuidutils.generate_uuid()).get('loadbalancer') + self.set_lb_status(lb['id']) + li = self.create_listener( + constants.PROTOCOL_HTTP, 80, lb.get('id')).get('listener') + amphora = self.create_amphora(uuidutils.generate_uuid(), lb['id']) + ls = self.create_listener_stats_dynamic( + listener_id=li.get('id'), + amphora_id=amphora.id, + bytes_in=random.randint(1, 9), + bytes_out=random.randint(1, 9), + total_connections=random.randint(1, 9), + request_errors=random.randint(1, 9)) + self.session.commit() + + response = self._getStats(lb['id']) + self.assertEqual(ls['bytes_in'], response['bytes_in']) + self.assertEqual(ls['bytes_out'], response['bytes_out']) + self.assertEqual(ls['total_connections'], + response['total_connections']) + self.assertEqual(ls['active_connections'], + response['active_connections']) + self.assertEqual(ls['request_errors'], + response['request_errors']) + + def test_statistics_authorized(self): + project_id = uuidutils.generate_uuid() + lb = self.create_load_balancer( + uuidutils.generate_uuid(), + project_id=project_id).get('loadbalancer') + self.set_lb_status(lb['id']) + li = self.create_listener( + constants.PROTOCOL_HTTP, 80, lb.get('id')).get('listener') + amphora = self.create_amphora(uuidutils.generate_uuid(), lb['id']) + ls = self.create_listener_stats_dynamic( + listener_id=li.get('id'), + amphora_id=amphora.id, + bytes_in=random.randint(1, 9), + bytes_out=random.randint(1, 9), + total_connections=random.randint(1, 9), + request_errors=random.randint(1, 9)) + self.session.commit() + + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self._getStats(lb['id']) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + self.assertEqual(ls['bytes_in'], response['bytes_in']) + self.assertEqual(ls['bytes_out'], response['bytes_out']) + self.assertEqual(ls['total_connections'], + response['total_connections']) + self.assertEqual(ls['active_connections'], + response['active_connections']) + self.assertEqual(ls['request_errors'], + response['request_errors']) + + def test_statistics_not_authorized(self): + lb = self.create_load_balancer( + uuidutils.generate_uuid()).get('loadbalancer') + self.set_lb_status(lb['id']) + li = self.create_listener( + constants.PROTOCOL_HTTP, 80, lb.get('id')).get('listener') + amphora = self.create_amphora(uuidutils.generate_uuid(), lb['id']) + self.create_listener_stats_dynamic( + listener_id=li.get('id'), + amphora_id=amphora.id, + bytes_in=random.randint(1, 9), + bytes_out=random.randint(1, 9), + total_connections=random.randint(1, 9)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=uuidutils.generate_uuid()) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + res = self.get(self.LB_PATH.format(lb_id=lb['id'] + "/stats"), + status=403) + + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, res.json) + + def test_statistics_get_deleted(self): + lb = self.create_load_balancer( + uuidutils.generate_uuid()).get('loadbalancer') + self.set_lb_status(lb['id']) + li = self.create_listener( + constants.PROTOCOL_HTTP, 80, lb.get('id')).get('listener') + amphora = self.create_amphora(uuidutils.generate_uuid(), lb['id']) + self.create_listener_stats_dynamic( + listener_id=li.get('id'), + amphora_id=amphora.id, + bytes_in=random.randint(1, 9), + bytes_out=random.randint(1, 9), + total_connections=random.randint(1, 9)) + self.set_lb_status(lb['id'], status=constants.DELETED) + self.get(self.LB_PATH.format(lb_id=lb['id'] + "/stats"), status=404) diff --git a/octavia/tests/functional/api/v2/test_member.py b/octavia/tests/functional/api/v2/test_member.py new file mode 100644 index 0000000000..f4ee37d970 --- /dev/null +++ b/octavia/tests/functional/api/v2/test_member.py @@ -0,0 +1,1582 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from octavia_lib.api.drivers import data_models as driver_dm +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils +from sqlalchemy.orm import exc as sa_exception + +from octavia.api.drivers import utils as driver_utils +from octavia.common import constants +import octavia.common.context +from octavia.common import data_models +from octavia.common import exceptions +from octavia.db import repositories +from octavia.network import base as network_base +from octavia.tests.functional.api.v2 import base + + +class TestMember(base.BaseAPITest): + + root_tag = 'member' + root_tag_list = 'members' + root_tag_links = 'members_links' + + def setUp(self): + super().setUp() + vip_subnet_id = uuidutils.generate_uuid() + self.lb = self.create_load_balancer(vip_subnet_id) + self.lb_id = self.lb.get('loadbalancer').get('id') + self.project_id = self.lb.get('loadbalancer').get('project_id') + self.set_lb_status(self.lb_id) + self.listener = self.create_listener( + constants.PROTOCOL_HTTP, 80, + lb_id=self.lb_id) + self.listener_id = self.listener.get('listener').get('id') + self.set_lb_status(self.lb_id) + self.pool = self.create_pool(self.lb_id, constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN) + self.pool_id = self.pool.get('pool').get('id') + self.set_lb_status(self.lb_id) + self.pool_with_listener = self.create_pool( + self.lb_id, constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=self.listener_id) + self.pool_with_listener_id = ( + self.pool_with_listener.get('pool').get('id')) + self.set_lb_status(self.lb_id) + self.members_path = self.MEMBERS_PATH.format( + pool_id=self.pool_id) + self.member_path = self.members_path + '/{member_id}' + self.members_path_listener = self.MEMBERS_PATH.format( + pool_id=self.pool_with_listener_id) + self.member_path_listener = self.members_path_listener + '/{member_id}' + self.pool_repo = repositories.PoolRepository() + + def test_get(self): + api_member = self.create_member( + self.pool_id, '192.0.2.1', 80).get(self.root_tag) + response = self.get(self.member_path.format( + member_id=api_member.get('id'))).json.get(self.root_tag) + self.assertEqual(api_member, response) + self.assertEqual(api_member.get('name'), '') + self.assertEqual([], api_member['tags']) + + def test_get_authorized(self): + api_member = self.create_member( + self.pool_id, '192.0.2.1', 80).get(self.root_tag) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.get(self.member_path.format( + member_id=api_member.get('id'))).json.get(self.root_tag) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(api_member, response) + self.assertEqual(api_member.get('name'), '') + + def test_get_not_authorized(self): + api_member = self.create_member( + self.pool_id, '192.0.2.1', 80).get(self.root_tag) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=uuidutils.generate_uuid()) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + response = self.get(self.member_path.format( + member_id=api_member.get('id')), status=403).json + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, response) + + def test_get_deleted_gives_404(self): + api_member = self.create_member( + self.pool_id, '192.0.2.1', 80).get(self.root_tag) + + self.set_object_status(self.member_repo, api_member.get('id'), + provisioning_status=constants.DELETED) + self.get(self.member_path.format(member_id=api_member.get('id')), + status=404) + + def test_bad_get(self): + self.get(self.member_path.format(member_id=uuidutils.generate_uuid()), + status=404) + + def test_get_all(self): + api_m_1 = self.create_member( + self.pool_id, '192.0.2.1', 80, + tags=['test_tag1']).get(self.root_tag) + self.set_lb_status(self.lb_id) + api_m_2 = self.create_member( + self.pool_id, '192.0.2.2', 80, + tags=['test_tag2']).get(self.root_tag) + self.set_lb_status(self.lb_id) + # Original objects didn't have the updated operating/provisioning + # status that exists in the DB. + for m in [api_m_1, api_m_2]: + m['operating_status'] = constants.ONLINE + m['provisioning_status'] = constants.ACTIVE + m.pop('updated_at') + response = self.get(self.members_path).json.get(self.root_tag_list) + self.assertIsInstance(response, list) + self.assertEqual(2, len(response)) + for m in response: + m.pop('updated_at') + for m in [api_m_1, api_m_2]: + self.assertIn(m, response) + + def test_get_all_hides_deleted(self): + api_member = self.create_member( + self.pool_id, '10.0.0.1', 80).get(self.root_tag) + + response = self.get(self.members_path) + objects = response.json.get(self.root_tag_list) + self.assertEqual(len(objects), 1) + self.set_object_status(self.member_repo, api_member.get('id'), + provisioning_status=constants.DELETED) + response = self.get(self.members_path) + objects = response.json.get(self.root_tag_list) + self.assertEqual(len(objects), 0) + + def _test_get_all_authorized(self, roles, project_id): + api_m_1 = self.create_member( + self.pool_id, '192.0.2.1', 80).get(self.root_tag) + self.set_lb_status(self.lb_id) + api_m_2 = self.create_member( + self.pool_id, '192.0.2.2', 80).get(self.root_tag) + self.set_lb_status(self.lb_id) + # Original objects didn't have the updated operating/provisioning + # status that exists in the DB. + for m in [api_m_1, api_m_2]: + m['operating_status'] = constants.ONLINE + m['provisioning_status'] = constants.ACTIVE + m.pop('updated_at') + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': roles, + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.get(self.members_path) + response = response.json.get(self.root_tag_list) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + self.assertIsInstance(response, list) + self.assertEqual(2, len(response)) + for m in response: + m.pop('updated_at') + for m in [api_m_1, api_m_2]: + self.assertIn(m, response) + + def test_get_all_authorized(self): + self._test_get_all_authorized( + roles=['load-balancer_member', 'member'], + project_id=self.project_id) + + def test_get_all_authorized_service(self): + self._test_get_all_authorized( + roles=['service'], project_id='services') + + def test_get_all_unscoped_token(self): + api_m_1 = self.create_member( + self.pool_id, '192.0.2.1', 80).get(self.root_tag) + self.set_lb_status(self.lb_id) + api_m_2 = self.create_member( + self.pool_id, '192.0.2.2', 80).get(self.root_tag) + self.set_lb_status(self.lb_id) + # Original objects didn't have the updated operating/provisioning + # status that exists in the DB. + for m in [api_m_1, api_m_2]: + m['operating_status'] = constants.ONLINE + m['provisioning_status'] = constants.ACTIVE + m.pop('updated_at') + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': None} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + result = self.get(self.members_path, status=403).json + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, result) + + def test_get_all_not_authorized(self): + api_m_1 = self.create_member( + self.pool_id, '192.0.2.1', 80).get(self.root_tag) + self.set_lb_status(self.lb_id) + api_m_2 = self.create_member( + self.pool_id, '192.0.2.2', 80).get(self.root_tag) + self.set_lb_status(self.lb_id) + # Original objects didn't have the updated operating/provisioning + # status that exists in the DB. + for m in [api_m_1, api_m_2]: + m['operating_status'] = constants.ONLINE + m['provisioning_status'] = constants.ACTIVE + m.pop('updated_at') + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=uuidutils.generate_uuid()) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + response = self.get(self.members_path, status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) + + def test_get_all_sorted(self): + self.create_member(self.pool_id, '192.0.2.1', 80, name='member1') + self.set_lb_status(self.lb_id) + self.create_member(self.pool_id, '192.0.2.2', 80, name='member2') + self.set_lb_status(self.lb_id) + self.create_member(self.pool_id, '192.0.2.3', 80, name='member3') + self.set_lb_status(self.lb_id) + + response = self.get(self.members_path, + params={'sort': 'name:desc'}) + members_desc = response.json.get(self.root_tag_list) + response = self.get(self.members_path, + params={'sort': 'name:asc'}) + members_asc = response.json.get(self.root_tag_list) + + self.assertEqual(3, len(members_desc)) + self.assertEqual(3, len(members_asc)) + + member_id_names_desc = [(member.get('id'), member.get('name')) + for member in members_desc] + member_id_names_asc = [(member.get('id'), member.get('name')) + for member in members_asc] + self.assertEqual(member_id_names_asc, + list(reversed(member_id_names_desc))) + + def test_get_all_limited(self): + self.create_member(self.pool_id, '192.0.2.1', 80, name='member1') + self.set_lb_status(self.lb_id) + self.create_member(self.pool_id, '192.0.2.2', 80, name='member2') + self.set_lb_status(self.lb_id) + self.create_member(self.pool_id, '192.0.2.3', 80, name='member3') + self.set_lb_status(self.lb_id) + + # First two -- should have 'next' link + first_two = self.get(self.members_path, params={'limit': 2}).json + objs = first_two[self.root_tag_list] + links = first_two[self.root_tag_links] + self.assertEqual(2, len(objs)) + self.assertEqual(1, len(links)) + self.assertEqual('next', links[0]['rel']) + + # Third + off the end -- should have previous link + third = self.get(self.members_path, params={ + 'limit': 2, + 'marker': first_two[self.root_tag_list][1]['id']}).json + objs = third[self.root_tag_list] + links = third[self.root_tag_links] + self.assertEqual(1, len(objs)) + self.assertEqual(1, len(links)) + self.assertEqual('previous', links[0]['rel']) + + # Middle -- should have both links + middle = self.get(self.members_path, params={ + 'limit': 1, + 'marker': first_two[self.root_tag_list][0]['id']}).json + objs = middle[self.root_tag_list] + links = middle[self.root_tag_links] + self.assertEqual(1, len(objs)) + self.assertEqual(2, len(links)) + self.assertCountEqual(['previous', 'next'], + [link['rel'] for link in links]) + + def test_get_all_fields_filter(self): + self.create_member(self.pool_id, '192.0.2.1', 80, name='member1') + self.set_lb_status(self.lb_id) + self.create_member(self.pool_id, '192.0.2.2', 80, name='member2') + self.set_lb_status(self.lb_id) + self.create_member(self.pool_id, '192.0.2.3', 80, name='member3') + self.set_lb_status(self.lb_id) + + members = self.get(self.members_path, params={ + 'fields': ['id', 'address']}).json + for member in members['members']: + self.assertIn('id', member) + self.assertIn('address', member) + self.assertNotIn('name', member) + self.assertNotIn('monitor_address', member) + + def test_get_one_fields_filter(self): + member1 = self.create_member( + self.pool_id, '192.0.2.1', 80, name='member1').get(self.root_tag) + self.set_lb_status(self.lb_id) + + member = self.get( + self.member_path.format(member_id=member1.get('id')), + params={'fields': ['id', 'address']}).json.get(self.root_tag) + self.assertIn('id', member) + self.assertIn('address', member) + self.assertNotIn('name', member) + self.assertNotIn('monitor_address', member) + + def test_get_all_filter(self): + mem1 = self.create_member(self.pool_id, + '192.0.2.1', + 80, + name='member1').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.create_member(self.pool_id, + '192.0.2.2', + 80, + name='member2').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.create_member(self.pool_id, + '192.0.2.3', + 80, + name='member3').get(self.root_tag) + self.set_lb_status(self.lb_id) + + members = self.get(self.members_path, params={ + 'id': mem1['id']}).json + self.assertEqual(1, len(members['members'])) + self.assertEqual(mem1['id'], + members['members'][0]['id']) + + def test_get_all_tags_filter(self): + mem1 = self.create_member( + self.pool_id, + '192.0.2.1', + 80, + name='member1', + tags=['test_tag1', 'test_tag2'] + ).get(self.root_tag) + self.set_lb_status(self.lb_id) + mem2 = self.create_member( + self.pool_id, + '192.0.2.2', + 80, + name='member2', + tags=['test_tag2', 'test_tag3'] + ).get(self.root_tag) + self.set_lb_status(self.lb_id) + mem3 = self.create_member( + self.pool_id, + '192.0.2.3', + 80, + name='member3', + tags=['test_tag4', 'test_tag5'] + ).get(self.root_tag) + self.set_lb_status(self.lb_id) + + mems = self.get( + self.members_path, + params={'tags': 'test_tag2'} + ).json.get(self.root_tag_list) + self.assertIsInstance(mems, list) + self.assertEqual(2, len(mems)) + self.assertEqual( + [mem1.get('id'), mem2.get('id')], + [mem.get('id') for mem in mems] + ) + + mems = self.get( + self.members_path, + params={'tags': ['test_tag2', 'test_tag3']} + ).json.get(self.root_tag_list) + self.assertIsInstance(mems, list) + self.assertEqual(1, len(mems)) + self.assertEqual( + [mem2.get('id')], + [mem.get('id') for mem in mems] + ) + + mems = self.get( + self.members_path, + params={'tags-any': 'test_tag2'} + ).json.get(self.root_tag_list) + self.assertIsInstance(mems, list) + self.assertEqual(2, len(mems)) + self.assertEqual( + [mem1.get('id'), mem2.get('id')], + [mem.get('id') for mem in mems] + ) + + mems = self.get( + self.members_path, + params={'not-tags': 'test_tag2'} + ).json.get(self.root_tag_list) + self.assertIsInstance(mems, list) + self.assertEqual(1, len(mems)) + self.assertEqual( + [mem3.get('id')], + [mem.get('id') for mem in mems] + ) + + mems = self.get( + self.members_path, + params={'not-tags-any': ['test_tag2', 'test_tag4']} + ).json.get(self.root_tag_list) + self.assertIsInstance(mems, list) + self.assertEqual(0, len(mems)) + + mems = self.get( + self.members_path, + params={'tags': 'test_tag2', + 'tags-any': ['test_tag1', 'test_tag3']} + ).json.get(self.root_tag_list) + self.assertIsInstance(mems, list) + self.assertEqual(2, len(mems)) + self.assertEqual( + [mem1.get('id'), mem2.get('id')], + [mem.get('id') for mem in mems] + ) + + mems = self.get( + self.members_path, + params={'tags': 'test_tag2', 'not-tags': 'test_tag2'} + ).json.get(self.root_tag_list) + self.assertIsInstance(mems, list) + self.assertEqual(0, len(mems)) + + def test_empty_get_all(self): + response = self.get(self.members_path).json.get(self.root_tag_list) + self.assertIsInstance(response, list) + self.assertEqual(0, len(response)) + + def test_create_sans_listener(self): + api_member = self.create_member( + self.pool_id, '192.0.2.1', 80).get(self.root_tag) + self.assertEqual('192.0.2.1', api_member['address']) + self.assertEqual(80, api_member['protocol_port']) + self.assertIsNotNone(api_member['created_at']) + self.assertIsNone(api_member['updated_at']) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_id, + member_id=api_member.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.ACTIVE, + pool_prov_status=constants.PENDING_UPDATE, + member_prov_status=constants.PENDING_CREATE, + member_op_status=constants.NO_MONITOR) + self.set_lb_status(self.lb_id) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_id, member_id=api_member.get('id')) + + def test_create_authorized(self): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + + api_member = self.create_member( + self.pool_id, '192.0.2.1', 80, + tags=['test_tag']).get(self.root_tag) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + self.assertEqual('192.0.2.1', api_member['address']) + self.assertEqual(80, api_member['protocol_port']) + self.assertEqual(['test_tag'], api_member['tags']) + self.assertIsNotNone(api_member['created_at']) + self.assertIsNone(api_member['updated_at']) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_id, + member_id=api_member.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.ACTIVE, + pool_prov_status=constants.PENDING_UPDATE, + member_prov_status=constants.PENDING_CREATE, + member_op_status=constants.NO_MONITOR) + self.set_lb_status(self.lb_id) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_id, member_id=api_member.get('id')) + + def test_create_not_authorized(self): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + + test_context = octavia.common.context.RequestContext( + project_id=self.project_id) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + api_member = self.create_member( + self.pool_id, '192.0.2.1', 80, status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, api_member) + + def test_create_pool_in_error(self): + project_id = uuidutils.generate_uuid() + lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', + project_id=project_id) + lb1_id = lb1.get('loadbalancer').get('id') + self.set_lb_status(lb1_id) + pool1 = self.create_pool( + lb1_id, constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + pool1_id = pool1.get('id') + self.set_lb_status(lb1_id) + self.set_object_status(self.pool_repo, pool1_id, + provisioning_status=constants.ERROR) + api_member = self.create_member(pool1_id, '192.0.2.1', 80, status=409) + ref_msg = f'Pool {pool1_id} is immutable and cannot be updated.' + self.assertEqual(ref_msg, api_member.get('faultstring')) + + # TODO(rm_work) Remove after deprecation of project_id in POST (R series) + def test_create_with_project_id_is_ignored(self): + pid = uuidutils.generate_uuid() + api_member = self.create_member( + self.pool_id, '192.0.2.1', 80, project_id=pid).get(self.root_tag) + self.assertEqual(self.project_id, api_member['project_id']) + + def test_create_backup(self): + api_member = self.create_member( + self.pool_id, '192.0.2.1', 80, backup=True).get(self.root_tag) + self.assertTrue(api_member['backup']) + self.set_lb_status(self.lb_id) + api_member = self.create_member( + self.pool_id, '192.0.2.1', 81, backup=False).get(self.root_tag) + self.assertFalse(api_member['backup']) + + def test_bad_create(self): + member = {'name': 'test1'} + self.post(self.members_path, self._build_body(member), status=400) + + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_create_with_bad_provider(self, mock_provider): + mock_provider.side_effect = exceptions.ProviderDriverError( + prov='bad_driver', user_msg='broken') + response = self.create_member(self.pool_id, '192.0.2.1', 80, + status=500) + self.assertIn('Provider \'bad_driver\' reports error: broken', + response.get('faultstring')) + + def test_create_with_sriov(self): + flavor_profile = self.create_flavor_profile( + 'sriov-member-create', 'noop_driver', + f'{{"{constants.ALLOW_MEMBER_SRIOV}": true}}') + + flavor = self.create_flavor('sriov-member-create', '', + flavor_profile['id'], True) + + vip_subnet_id = uuidutils.generate_uuid() + lb = self.create_load_balancer(vip_subnet_id, flavor_id=flavor['id']) + lb_id = lb.get('loadbalancer').get('id') + self.set_lb_status(lb_id) + + listener = self.create_listener( + constants.PROTOCOL_HTTP, 80, + lb_id=lb_id) + listener_id = listener.get('listener').get('id') + self.set_lb_status(lb_id) + + pool_with_listener = self.create_pool( + lb_id, constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, listener_id=listener_id) + pool_with_listener_id = pool_with_listener.get('pool').get('id') + self.set_lb_status(lb_id) + + api_member = self.create_member( + pool_with_listener_id, '192.0.2.1', + 80, request_sriov=True).get(self.root_tag) + self.assertEqual(constants.VNIC_TYPE_DIRECT, + api_member[constants.VNIC_TYPE]) + + def test_create_with_sriov_disabled(self): + # Test with no flavor enabling SR-IOV members + response = self.create_member( + self.pool_id, '192.0.2.1', + 80, request_sriov=True, status=400) + self.assertIn('flavor does not allow SR-IOV member ports', + response.get('faultstring')) + + @mock.patch('octavia.db.repositories.FlavorRepository.' + 'get_flavor_metadata_dict') + def test_create_with_sriov_missing_flavor(self, mock_get_flavor): + mock_get_flavor.side_effect = sa_exception.NoResultFound() + response = self.create_member( + self.pool_id, '192.0.2.1', + 80, request_sriov=True, status=400) + self.assertIn('flavor does not allow SR-IOV member ports', + response.get('faultstring')) + + @mock.patch('octavia.api.drivers.driver_factory.get_driver') + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_full_batch_members(self, mock_provider, mock_get_driver): + mock_driver = mock.MagicMock() + mock_driver.name = 'noop_driver' + mock_get_driver.return_value = mock_driver + + member1 = {'address': '192.0.2.1', 'protocol_port': 80, + 'project_id': self.project_id} + member2 = {'address': '192.0.2.2', 'protocol_port': 80, + 'project_id': self.project_id} + member3 = {'address': '192.0.2.3', 'protocol_port': 80, + 'project_id': self.project_id} + member4 = {'address': '192.0.2.4', 'protocol_port': 80, + 'project_id': self.project_id} + member5 = {'address': '192.0.2.5', 'protocol_port': 80, + 'project_id': self.project_id} + member6 = {'address': '192.0.2.6', 'protocol_port': 80, + 'project_id': self.project_id} + members = [member1, member2, member3, member4] + for m in members: + self.create_member(pool_id=self.pool_id, **m) + self.set_lb_status(self.lb_id) + + # We are only concerned about the batch update, so clear out the + # create members calls above. + mock_provider.reset_mock() + + req_dict = [member1, member2, member5, member6] + body = {self.root_tag_list: req_dict} + path = self.MEMBERS_PATH.format(pool_id=self.pool_id) + self.put(path, body, status=202) + returned_members = self.get( + self.MEMBERS_PATH.format(pool_id=self.pool_id) + ).json.get(self.root_tag_list) + + expected_members = [ + ('192.0.2.1', 80, 'PENDING_UPDATE'), + ('192.0.2.2', 80, 'PENDING_UPDATE'), + ('192.0.2.3', 80, 'PENDING_DELETE'), + ('192.0.2.4', 80, 'PENDING_DELETE'), + ('192.0.2.5', 80, 'PENDING_CREATE'), + ('192.0.2.6', 80, 'PENDING_CREATE'), + ] + + provider_creates = [] + provider_updates = [] + for rm in returned_members: + self.assertIn( + (rm['address'], + rm['protocol_port'], + rm['provisioning_status']), expected_members) + + provider_dict = driver_utils.member_dict_to_provider_dict(rm) + # Adjust for API response + provider_dict['pool_id'] = self.pool_id + if rm['provisioning_status'] == 'PENDING_UPDATE': + del provider_dict['name'] + del provider_dict['subnet_id'] + provider_updates.append(driver_dm.Member(**provider_dict)) + elif rm['provisioning_status'] == 'PENDING_CREATE': + provider_dict['name'] = None + provider_creates.append(driver_dm.Member(**provider_dict)) + # Order matters here + provider_creates += provider_updates + + mock_provider.assert_called_once_with('noop_driver', + mock_driver.member_batch_update, + self.pool_id, provider_creates) + + @mock.patch('octavia.api.drivers.driver_factory.get_driver') + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_create_batch_members(self, mock_provider, mock_get_driver): + mock_driver = mock.MagicMock() + mock_driver.name = 'noop_driver' + mock_get_driver.return_value = mock_driver + + member5 = {'address': '192.0.2.5', 'protocol_port': 80, + 'tags': ['test_tag1']} + member6 = {'address': '192.0.2.6', 'protocol_port': 80, + 'tags': ['test_tag2']} + + req_dict = [member5, member6] + body = {self.root_tag_list: req_dict} + path = self.MEMBERS_PATH.format(pool_id=self.pool_id) + self.put(path, body, status=202) + returned_members = self.get( + self.MEMBERS_PATH.format(pool_id=self.pool_id) + ).json.get(self.root_tag_list) + + expected_members = [ + ('192.0.2.5', 80, 'PENDING_CREATE', ['test_tag1']), + ('192.0.2.6', 80, 'PENDING_CREATE', ['test_tag2']), + ] + + provider_members = [] + for rm in returned_members: + self.assertIn( + (rm['address'], + rm['protocol_port'], + rm['provisioning_status'], + rm['tags']), expected_members) + + provider_dict = driver_utils.member_dict_to_provider_dict(rm) + # Adjust for API response + provider_dict['pool_id'] = self.pool_id + provider_dict['name'] = None + provider_members.append(driver_dm.Member(**provider_dict)) + + mock_provider.assert_called_once_with('noop_driver', + mock_driver.member_batch_update, + self.pool_id, provider_members) + + def test_create_batch_members_with_bad_subnet(self): + subnet_id = uuidutils.generate_uuid() + member5 = {'address': '10.0.0.5', + 'protocol_port': 80, + 'subnet_id': subnet_id} + + req_dict = [member5] + body = {self.root_tag_list: req_dict} + path = self.MEMBERS_PATH.format(pool_id=self.pool_id) + + with mock.patch( + 'octavia.common.utils.get_network_driver') as net_mock: + net_mock.return_value.get_subnet = mock.Mock( + side_effect=network_base.SubnetNotFound('Subnet not found')) + response = self.put(path, body, status=400).json + err_msg = 'Subnet ' + subnet_id + ' not found.' + self.assertEqual(response.get('faultstring'), err_msg) + + def test_create_batch_members_with_invalid_address(self): + # 169.254.169.254 is the default invalid member address + member5 = {'address': '169.254.169.254', + 'protocol_port': 80} + + req_dict = [member5] + body = {self.root_tag_list: req_dict} + path = self.MEMBERS_PATH.format(pool_id=self.pool_id) + + response = self.put(path, body, status=400).json + err_msg = ("169.254.169.254 is not a valid option for member address") + self.assertEqual(err_msg, response.get('faultstring')) + + @mock.patch('octavia.api.drivers.driver_factory.get_driver') + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_additive_only_batch_members(self, mock_provider, mock_get_driver): + mock_driver = mock.MagicMock() + mock_driver.name = 'noop_driver' + mock_get_driver.return_value = mock_driver + + member1 = {'address': '192.0.2.1', 'protocol_port': 80} + member2 = {'address': '192.0.2.2', 'protocol_port': 80} + member3 = {'address': '192.0.2.3', 'protocol_port': 80} + member4 = {'address': '192.0.2.4', 'protocol_port': 80} + member5 = {'address': '192.0.2.5', 'protocol_port': 80} + member6 = {'address': '192.0.2.6', 'protocol_port': 80} + members = [member1, member2, member3, member4] + for m in members: + self.create_member(pool_id=self.pool_id, **m) + self.set_lb_status(self.lb_id) + + # We are only concerned about the batch update, so clear out the + # create members calls above. + mock_provider.reset_mock() + + req_dict = [member1, member2, member5, member6] + body = {self.root_tag_list: req_dict} + path = self.MEMBERS_PATH.format(pool_id=self.pool_id) + path = f"{path}?additive_only=True" + self.put(path, body, status=202) + returned_members = self.get( + self.MEMBERS_PATH.format(pool_id=self.pool_id) + ).json.get(self.root_tag_list) + + # Members 1+2 should be updated, 3+4 left alone, and 5+6 created + expected_members = [ + ('192.0.2.1', 80, 'PENDING_UPDATE'), + ('192.0.2.2', 80, 'PENDING_UPDATE'), + ('192.0.2.3', 80, 'ACTIVE'), + ('192.0.2.4', 80, 'ACTIVE'), + ('192.0.2.5', 80, 'PENDING_CREATE'), + ('192.0.2.6', 80, 'PENDING_CREATE'), + ] + + provider_creates = [] + provider_updates = [] + provider_ignored = [] + for rm in returned_members: + self.assertIn( + (rm['address'], + rm['protocol_port'], + rm['provisioning_status']), expected_members) + + provider_dict = driver_utils.member_dict_to_provider_dict(rm) + # Adjust for API response + provider_dict['pool_id'] = self.pool_id + if rm['provisioning_status'] == 'PENDING_UPDATE': + del provider_dict['name'] + del provider_dict['subnet_id'] + provider_updates.append(driver_dm.Member(**provider_dict)) + elif rm['provisioning_status'] == 'PENDING_CREATE': + provider_dict['name'] = None + provider_creates.append(driver_dm.Member(**provider_dict)) + elif rm['provisioning_status'] == 'ACTIVE': + provider_dict['name'] = None + provider_ignored.append(driver_dm.Member(**provider_dict)) + # Order matters here + provider_creates += provider_updates + provider_creates += provider_ignored + + mock_provider.assert_called_once_with('noop_driver', + mock_driver.member_batch_update, + self.pool_id, provider_creates) + + @mock.patch('octavia.api.drivers.driver_factory.get_driver') + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_update_batch_members(self, mock_provider, mock_get_driver): + mock_driver = mock.MagicMock() + mock_driver.name = 'noop_driver' + mock_get_driver.return_value = mock_driver + + member1 = {'address': '192.0.2.1', 'protocol_port': 80, + 'project_id': self.project_id} + member2 = {'address': '192.0.2.2', 'protocol_port': 80, + 'project_id': self.project_id} + members = [member1, member2] + for m in members: + self.create_member(pool_id=self.pool_id, **m) + self.set_lb_status(self.lb_id) + + # We are only concerned about the batch update, so clear out the + # create members calls above. + mock_provider.reset_mock() + + req_dict = [member1, member2] + body = {self.root_tag_list: req_dict} + path = self.MEMBERS_PATH.format(pool_id=self.pool_id) + self.put(path, body, status=202) + returned_members = self.get( + self.MEMBERS_PATH.format(pool_id=self.pool_id) + ).json.get(self.root_tag_list) + + expected_members = [ + ('192.0.2.1', 80, 'PENDING_UPDATE'), + ('192.0.2.2', 80, 'PENDING_UPDATE'), + ] + + provider_members = [] + for rm in returned_members: + self.assertIn( + (rm['address'], + rm['protocol_port'], + rm['provisioning_status']), expected_members) + + provider_dict = driver_utils.member_dict_to_provider_dict(rm) + # Adjust for API response + provider_dict['pool_id'] = self.pool_id + del provider_dict['name'] + del provider_dict['subnet_id'] + provider_members.append(driver_dm.Member(**provider_dict)) + + mock_provider.assert_called_once_with('noop_driver', + mock_driver.member_batch_update, + self.pool_id, provider_members) + + @mock.patch('octavia.api.drivers.driver_factory.get_driver') + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_update_members_subnet_duplicate( + self, mock_provider, mock_get_driver): + mock_driver = mock.MagicMock() + mock_driver.name = 'noop_driver' + mock_get_driver.return_value = mock_driver + subnet_id = uuidutils.generate_uuid() + + member1 = {'address': '192.0.2.1', 'protocol_port': 80, + 'project_id': self.project_id, 'subnet_id': subnet_id} + member2 = {'address': '192.0.2.2', 'protocol_port': 80, + 'project_id': self.project_id, 'subnet_id': subnet_id} + + req_dict = [member1, member2] + body = {self.root_tag_list: req_dict} + path = self.MEMBERS_PATH.format(pool_id=self.pool_id) + with mock.patch("octavia.common.validate." + "subnet_exists") as m_subnet_exists: + m_subnet_exists.return_value = True + self.put(path, body, status=202) + m_subnet_exists.assert_called_once_with( + member1['subnet_id'], context=mock.ANY) + + @mock.patch('octavia.api.drivers.driver_factory.get_driver') + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_update_members_member_duplicate( + self, mock_provider, mock_get_driver): + mock_driver = mock.MagicMock() + mock_driver.name = 'noop_driver' + mock_get_driver.return_value = mock_driver + subnet_id = uuidutils.generate_uuid() + + member1 = {'address': '192.0.2.1', 'protocol_port': 80, + 'project_id': self.project_id, 'subnet_id': subnet_id} + + req_dict = [member1] + body = {self.root_tag_list: req_dict} + path = self.MEMBERS_PATH.format(pool_id=self.pool_id) + self.put(path, body, status=202) + + self.set_lb_status(self.lb_id) + + # Same member (same address and protocol_port) updated twice in the + # same PUT request + member1 = {'address': '192.0.2.1', 'protocol_port': 80, + 'project_id': self.project_id, 'subnet_id': subnet_id, + 'name': 'member1'} + member2 = {'address': '192.0.2.1', 'protocol_port': 80, + 'project_id': self.project_id, 'subnet_id': subnet_id, + 'name': 'member2'} + + req_dict = [member1, member2] + body = {self.root_tag_list: req_dict} + self.put(path, body, status=400) + + @mock.patch('octavia.api.drivers.driver_factory.get_driver') + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_update_members_subnet_not_found( + self, mock_provider, mock_get_driver): + mock_driver = mock.MagicMock() + mock_driver.name = 'noop_driver' + mock_get_driver.return_value = mock_driver + fake_subnet_id = uuidutils.generate_uuid() + + member1 = {'address': '192.0.2.1', 'protocol_port': 80, + 'project_id': self.project_id, 'subnet_id': fake_subnet_id} + + req_dict = [member1] + body = {self.root_tag_list: req_dict} + path = self.MEMBERS_PATH.format(pool_id=self.pool_id) + with mock.patch("octavia.common.validate." + "subnet_exists") as m_subnet_exists: + m_subnet_exists.return_value = False + self.put(path, body, status=404) + + @mock.patch('octavia.api.drivers.driver_factory.get_driver') + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_delete_batch_members(self, mock_provider, mock_get_driver): + mock_driver = mock.MagicMock() + mock_driver.name = 'noop_driver' + mock_get_driver.return_value = mock_driver + + member3 = {'address': '192.0.2.3', 'protocol_port': 80} + member4 = {'address': '192.0.2.4', 'protocol_port': 80} + members = [member3, member4] + for m in members: + self.create_member(pool_id=self.pool_id, **m) + self.set_lb_status(self.lb_id) + + # We are only concerned about the batch update, so clear out the + # create members calls above. + mock_provider.reset_mock() + + req_dict = [] + body = {self.root_tag_list: req_dict} + path = self.MEMBERS_PATH.format(pool_id=self.pool_id) + self.put(path, body, status=202) + returned_members = self.get( + self.MEMBERS_PATH.format(pool_id=self.pool_id) + ).json.get(self.root_tag_list) + + expected_members = [ + ('192.0.2.3', 80, 'PENDING_DELETE'), + ('192.0.2.4', 80, 'PENDING_DELETE'), + ] + + provider_members = [] + for rm in returned_members: + self.assertIn( + (rm['address'], + rm['protocol_port'], + rm['provisioning_status']), expected_members) + + mock_provider.assert_called_once_with('noop_driver', + mock_driver.member_batch_update, + self.pool_id, provider_members) + + @mock.patch('octavia.api.drivers.driver_factory.get_driver') + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_delete_batch_members_already_empty(self, mock_provider, + mock_get_driver): + mock_driver = mock.MagicMock() + mock_driver.name = 'noop_driver' + mock_get_driver.return_value = mock_driver + + req_dict = [] + body = {self.root_tag_list: req_dict} + path = self.MEMBERS_PATH.format(pool_id=self.pool_id) + self.put(path, body, status=202) + returned_members = self.get( + self.MEMBERS_PATH.format(pool_id=self.pool_id) + ).json.get(self.root_tag_list) + + self.assertEqual([], returned_members) + + mock_provider.assert_not_called() + + def test_create_with_attached_listener(self): + api_member = self.create_member( + self.pool_with_listener_id, '192.0.2.1', 80).get(self.root_tag) + self.assertEqual('192.0.2.1', api_member['address']) + self.assertEqual(80, api_member['protocol_port']) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_with_listener_id, member_id=api_member.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE, + member_prov_status=constants.PENDING_CREATE, + member_op_status=constants.NO_MONITOR) + self.set_lb_status(self.lb_id) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_with_listener_id, member_id=api_member.get('id')) + + def test_create_with_monitor_address_and_port(self): + api_member = self.create_member( + self.pool_with_listener_id, '192.0.2.1', 80, + monitor_address='192.0.2.3', + monitor_port=80).get(self.root_tag) + self.assertEqual('192.0.2.1', api_member['address']) + self.assertEqual(80, api_member['protocol_port']) + self.assertEqual('192.0.2.3', api_member['monitor_address']) + self.assertEqual(80, api_member['monitor_port']) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_with_listener_id, member_id=api_member.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE, + member_prov_status=constants.PENDING_CREATE, + member_op_status=constants.NO_MONITOR) + self.set_lb_status(self.lb_id) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_with_listener_id, member_id=api_member.get('id')) + + def test_create_with_health_monitor(self): + self.create_health_monitor(self.pool_with_listener_id, + constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1) + self.set_lb_status(self.lb_id) + api_member = self.create_member( + self.pool_with_listener_id, '192.0.2.1', 80).get(self.root_tag) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_with_listener_id, member_id=api_member.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE, + member_prov_status=constants.PENDING_CREATE, + member_op_status=constants.OFFLINE) + + def test_duplicate_create(self): + member = {'address': '192.0.2.1', 'protocol_port': 80, + 'project_id': self.project_id} + self.post(self.members_path, self._build_body(member)) + self.set_lb_status(self.lb_id) + self.post(self.members_path, self._build_body(member), status=409) + + def test_create_with_bad_subnet(self): + with mock.patch( + 'octavia.common.utils.get_network_driver') as net_mock: + net_mock.return_value.get_subnet = mock.Mock( + side_effect=network_base.SubnetNotFound('Subnet not found')) + subnet_id = uuidutils.generate_uuid() + response = self.create_member(self.pool_id, '192.0.2.1', 80, + subnet_id=subnet_id, status=400) + err_msg = 'Subnet ' + subnet_id + ' not found.' + self.assertEqual(response.get('faultstring'), err_msg) + + def test_create_with_valid_subnet(self): + with mock.patch( + 'octavia.common.utils.get_network_driver') as net_mock: + subnet_id = uuidutils.generate_uuid() + net_mock.return_value.get_subnet.return_value = subnet_id + response = self.create_member( + self.pool_id, '192.0.2.1', 80, + subnet_id=subnet_id).get(self.root_tag) + self.assertEqual('192.0.2.1', response['address']) + self.assertEqual(80, response['protocol_port']) + self.assertEqual(subnet_id, response['subnet_id']) + + def test_create_bad_port_number(self): + member = {'address': '192.0.2.3', + 'protocol_port': constants.MIN_PORT_NUMBER - 1} + resp = self.post(self.members_path, self._build_body(member), + status=400) + self.assertIn('Value should be greater or equal to', + resp.json.get('faultstring')) + member = {'address': '192.0.2.3', + 'protocol_port': constants.MAX_PORT_NUMBER + 1} + resp = self.post(self.members_path, self._build_body(member), + status=400) + self.assertIn('Value should be lower or equal to', + resp.json.get('faultstring')) + + def test_create_over_quota(self): + self.start_quota_mock(data_models.Member) + member = {'address': '192.0.2.3', 'protocol_port': 81} + self.post(self.members_path, self._build_body(member), status=403) + + def test_update_with_attached_listener(self): + old_name = "name1" + new_name = "name2" + api_member = self.create_member( + self.pool_with_listener_id, '192.0.2.1', 80, + name=old_name).get(self.root_tag) + self.set_lb_status(self.lb_id) + new_member = {'name': new_name} + response = self.put( + self.member_path_listener.format(member_id=api_member.get('id')), + self._build_body(new_member)).json.get(self.root_tag) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_with_listener_id, member_id=api_member.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE, + member_prov_status=constants.PENDING_UPDATE) + self.set_lb_status(self.lb_id) + self.assertEqual(new_name, response.get('name')) + self.assertEqual(api_member.get('created_at'), + response.get('created_at')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_with_listener_id, member_id=api_member.get('id')) + + def test_update_authorized(self): + old_name = "name1" + new_name = "name2" + api_member = self.create_member( + self.pool_with_listener_id, '192.0.2.1', 80, + name=old_name, tags=['old_tag']).get(self.root_tag) + self.set_lb_status(self.lb_id) + new_member = {'name': new_name, 'tags': ['new_tag']} + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + member_path = self.member_path_listener.format( + member_id=api_member.get('id')) + response = self.put( + member_path, + self._build_body(new_member)).json.get(self.root_tag) + + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_with_listener_id, member_id=api_member.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE, + member_prov_status=constants.PENDING_UPDATE) + self.set_lb_status(self.lb_id) + self.assertEqual(new_name, response.get('name')) + self.assertEqual(['new_tag'], response['tags']) + self.assertEqual(api_member.get('created_at'), + response.get('created_at')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_with_listener_id, member_id=api_member.get('id')) + + def test_update_not_authorized(self): + old_name = "name1" + new_name = "name2" + api_member = self.create_member( + self.pool_with_listener_id, '192.0.2.1', 80, + name=old_name).get(self.root_tag) + self.set_lb_status(self.lb_id) + new_member = {'name': new_name} + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=self.project_id) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + member_path = self.member_path_listener.format( + member_id=api_member.get('id')) + response = self.put( + member_path, + self._build_body(new_member), status=403) + + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) + + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_with_listener_id, member_id=api_member.get('id'), + lb_prov_status=constants.ACTIVE, + listener_prov_status=constants.ACTIVE, + pool_prov_status=constants.ACTIVE, + member_prov_status=constants.ACTIVE) + + def test_update_sans_listener(self): + old_name = "name1" + new_name = "name2" + api_member = self.create_member( + self.pool_id, '192.0.2.1', 80, name=old_name).get(self.root_tag) + self.set_lb_status(self.lb_id) + member_path = self.member_path.format( + member_id=api_member.get('id')) + new_member = {'name': new_name} + response = self.put( + member_path, self._build_body(new_member)).json.get(self.root_tag) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_id, member_id=api_member.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.ACTIVE, + pool_prov_status=constants.PENDING_UPDATE, + member_prov_status=constants.PENDING_UPDATE) + self.set_lb_status(self.lb_id) + self.assertEqual(new_name, response.get('name')) + self.assertEqual(api_member.get('created_at'), + response.get('created_at')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_id, member_id=api_member.get('id')) + + def test_bad_update(self): + api_member = self.create_member( + self.pool_id, '192.0.2.1', 80).get(self.root_tag) + new_member = {'protocol_port': 'ten'} + self.put(self.member_path.format(member_id=api_member.get('id')), + self._build_body(new_member), status=400) + + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_update_with_bad_provider(self, mock_provider): + api_member = self.create_member( + self.pool_with_listener_id, '192.0.2.1', 80, + name="member1").get(self.root_tag) + self.set_lb_status(self.lb_id) + new_member = {'name': "member2"} + mock_provider.side_effect = exceptions.ProviderDriverError( + prov='bad_driver', user_msg='broken') + response = self.put(self.member_path_listener.format( + member_id=api_member.get('id')), self._build_body(new_member), + status=500) + self.assertIn('Provider \'bad_driver\' reports error: broken', + response.json.get('faultstring')) + + def test_update_unset_defaults(self): + old_name = "name1" + api_member = self.create_member( + self.pool_with_listener_id, '192.0.2.1', 80, + name=old_name, backup=True, monitor_address='192.0.2.2', + monitor_port=8888, weight=10).get(self.root_tag) + self.set_lb_status(self.lb_id) + unset_params = {'name': None, 'backup': None, 'monitor_address': None, + 'monitor_port': None, 'weight': None} + member_path = self.member_path_listener.format( + member_id=api_member.get('id')) + response = self.put(member_path, self._build_body(unset_params)) + response = response.json.get(self.root_tag) + + self.assertFalse(response['backup']) + self.assertIsNone(response['monitor_address']) + self.assertIsNone(response['monitor_port']) + self.assertEqual('', response['name']) + self.assertEqual(constants.DEFAULT_WEIGHT, response['weight']) + + def test_delete(self): + api_member = self.create_member( + self.pool_with_listener_id, '192.0.2.1', 80).get(self.root_tag) + self.set_lb_status(self.lb_id) + member = self.get(self.member_path_listener.format( + member_id=api_member.get('id'))).json.get(self.root_tag) + api_member['provisioning_status'] = constants.ACTIVE + api_member['operating_status'] = constants.ONLINE + self.assertIsNone(api_member.pop('updated_at')) + self.assertIsNotNone(member.pop('updated_at')) + self.assertEqual(api_member, member) + self.delete(self.member_path_listener.format( + member_id=api_member.get('id'))) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_with_listener_id, member_id=member.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE, + member_prov_status=constants.PENDING_DELETE) + + self.set_lb_status(self.lb_id) + member = self.get(self.member_path_listener.format( + member_id=api_member.get('id')), status=404) + + def test_delete_authorized(self): + api_member = self.create_member( + self.pool_with_listener_id, '192.0.2.1', 80).get(self.root_tag) + self.set_lb_status(self.lb_id) + member = self.get(self.member_path_listener.format( + member_id=api_member.get('id'))).json.get(self.root_tag) + api_member['provisioning_status'] = constants.ACTIVE + api_member['operating_status'] = constants.ONLINE + self.assertIsNone(api_member.pop('updated_at')) + self.assertIsNotNone(member.pop('updated_at')) + self.assertEqual(api_member, member) + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + self.delete(self.member_path_listener.format( + member_id=api_member.get('id'))) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_with_listener_id, member_id=member.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE, + member_prov_status=constants.PENDING_DELETE) + + self.set_lb_status(self.lb_id) + member = self.get(self.member_path_listener.format( + member_id=api_member.get('id')), status=404) + + def test_delete_not_authorized(self): + api_member = self.create_member( + self.pool_with_listener_id, '192.0.2.1', 80).get(self.root_tag) + self.set_lb_status(self.lb_id) + member = self.get(self.member_path_listener.format( + member_id=api_member.get('id'))).json.get(self.root_tag) + api_member['provisioning_status'] = constants.ACTIVE + api_member['operating_status'] = constants.ONLINE + self.assertIsNone(api_member.pop('updated_at')) + self.assertIsNotNone(member.pop('updated_at')) + self.assertEqual(api_member, member) + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=self.project_id) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + self.delete(self.member_path_listener.format( + member_id=api_member.get('id')), status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=self.pool_with_listener_id, member_id=member.get('id'), + lb_prov_status=constants.ACTIVE, + listener_prov_status=constants.ACTIVE, + pool_prov_status=constants.ACTIVE, + member_prov_status=constants.ACTIVE) + + def test_bad_delete(self): + self.delete(self.member_path.format( + member_id=uuidutils.generate_uuid()), status=404) + + def test_delete_mismatch_pool(self): + # Create a pool that will not have the member, but is valid. + self.pool = self.create_pool(self.lb_id, constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN) + bad_pool_id = self.pool.get('pool').get('id') + self.set_lb_status(self.lb_id) + # Create a member on our reference pool + api_member = self.create_member( + self.pool_with_listener_id, '192.0.2.1', 80).get(self.root_tag) + self.set_lb_status(self.lb_id) + # Attempt to delete the member using the wrong pool in the path + member_path = self.MEMBERS_PATH.format( + pool_id=bad_pool_id) + '/' + api_member['id'] + result = self.delete(member_path, status=404).json + ref_msg = f"Member {api_member['id']} not found." + self.assertEqual(ref_msg, result.get('faultstring')) + + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_delete_with_bad_provider(self, mock_provider): + api_member = self.create_member( + self.pool_with_listener_id, '192.0.2.1', 80).get(self.root_tag) + self.set_lb_status(self.lb_id) + member = self.get(self.member_path_listener.format( + member_id=api_member.get('id'))).json.get(self.root_tag) + api_member['provisioning_status'] = constants.ACTIVE + api_member['operating_status'] = constants.ONLINE + self.assertIsNone(api_member.pop('updated_at')) + self.assertIsNotNone(member.pop('updated_at')) + self.assertEqual(api_member, member) + + mock_provider.side_effect = exceptions.ProviderDriverError( + prov='bad_driver', user_msg='broken') + self.delete(self.member_path_listener.format( + member_id=api_member.get('id')), status=500) + + def test_create_when_lb_pending_update(self): + self.create_member(self.pool_id, address="192.0.2.2", + protocol_port=80) + self.set_lb_status(self.lb_id) + self.put(self.LB_PATH.format(lb_id=self.lb_id), + body={'loadbalancer': {'name': 'test_name_change'}}) + member = {'address': '192.0.2.1', 'protocol_port': 80, + 'project_id': self.project_id} + self.post(self.members_path, + body=self._build_body(member), + status=409) + + def test_update_when_lb_pending_update(self): + member = self.create_member( + self.pool_id, address="192.0.2.1", protocol_port=80, + name="member1").get(self.root_tag) + self.set_lb_status(self.lb_id) + self.put(self.LB_PATH.format(lb_id=self.lb_id), + body={'loadbalancer': {'name': 'test_name_change'}}) + self.put( + self.member_path.format(member_id=member.get('id')), + body=self._build_body({'name': "member2"}), status=409) + + def test_delete_when_lb_pending_update(self): + member = self.create_member( + self.pool_id, address="192.0.2.1", + protocol_port=80).get(self.root_tag) + self.set_lb_status(self.lb_id) + self.put(self.LB_PATH.format(lb_id=self.lb_id), + body={'loadbalancer': {'name': 'test_name_change'}}) + self.delete(self.member_path.format( + member_id=member.get('id')), status=409) + + def test_create_when_lb_pending_delete(self): + self.create_member(self.pool_id, address="192.0.2.1", + protocol_port=80) + self.set_lb_status(self.lb_id) + self.delete(self.LB_PATH.format(lb_id=self.lb_id), + params={'cascade': "true"}) + member = {'address': '192.0.2.2', 'protocol_port': 88, + 'project_id': self.project_id} + self.post(self.members_path, body=self._build_body(member), + status=409) + + def test_update_when_lb_pending_delete(self): + member = self.create_member( + self.pool_id, address="192.0.2.1", protocol_port=80, + name="member1").get(self.root_tag) + self.set_lb_status(self.lb_id) + self.delete(self.LB_PATH.format(lb_id=self.lb_id), + params={'cascade': "true"}) + self.put(self.member_path.format(member_id=member.get('id')), + body=self._build_body({'name': "member2"}), status=409) + + def test_update_when_deleted(self): + member = self.create_member( + self.pool_id, address="10.0.0.1", + protocol_port=80).get(self.root_tag) + self.set_lb_status(self.lb_id, status=constants.DELETED) + self.put(self.member_path.format(member_id=member.get('id')), + body=self._build_body({'name': "member2"}), status=404) + + def test_delete_when_lb_pending_delete(self): + member = self.create_member( + self.pool_id, address="192.0.2.1", + protocol_port=80).get(self.root_tag) + self.set_lb_status(self.lb_id) + self.delete(self.LB_PATH.format(lb_id=self.lb_id), + params={'cascade': "true"}) + self.delete(self.member_path.format( + member_id=member.get('id')), status=409) + + def test_delete_already_deleted(self): + member = self.create_member( + self.pool_id, address="192.0.2.1", + protocol_port=80).get(self.root_tag) + self.set_lb_status(self.lb_id, status=constants.DELETED) + self.delete(self.member_path.format( + member_id=member.get('id')), status=404) diff --git a/octavia/tests/functional/api/v2/test_pool.py b/octavia/tests/functional/api/v2/test_pool.py new file mode 100644 index 0000000000..75524a969c --- /dev/null +++ b/octavia/tests/functional/api/v2/test_pool.py @@ -0,0 +1,2729 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from octavia_lib.common import constants as lib_constants +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils + +from octavia.common import constants +import octavia.common.context +from octavia.common import data_models +from octavia.common import exceptions +from octavia.db import api as db_api +from octavia.tests.common import constants as c_const +from octavia.tests.common import sample_certs +from octavia.tests.functional.api.v2 import base + + +class TestPool(base.BaseAPITest): + + root_tag = 'pool' + root_tag_list = 'pools' + root_tag_links = 'pools_links' + + def setUp(self): + super().setUp() + + self.lb = self.create_load_balancer( + uuidutils.generate_uuid()).get('loadbalancer') + self.lb_id = self.lb.get('id') + self.project_id = self.lb.get('project_id') + + self.set_lb_status(self.lb_id) + + self.listener = self.create_listener( + constants.PROTOCOL_HTTP, 80, + self.lb_id).get('listener') + self.listener_id = self.listener.get('id') + + self.set_lb_status(self.lb_id) + self._setup_udp_lb_resources() + + def _setup_udp_lb_resources(self): + self.udp_lb = self.create_load_balancer(uuidutils.generate_uuid()).get( + 'loadbalancer') + self.udp_lb_id = self.udp_lb.get('id') + self.set_lb_status(self.udp_lb_id) + + self.udp_listener = self.create_listener( + constants.PROTOCOL_UDP, 8888, + self.udp_lb_id).get('listener') + self.udp_listener_id = self.udp_listener.get('id') + self.set_lb_status(self.udp_lb_id) + + def test_get(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id, tags=['test_tag']).get(self.root_tag) + # Set status to ACTIVE/ONLINE because set_lb_status did it in the db + api_pool['provisioning_status'] = constants.ACTIVE + api_pool['operating_status'] = constants.ONLINE + api_pool.pop('updated_at') + self.set_lb_status(lb_id=self.lb_id) + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + response.pop('updated_at') + self.assertEqual(api_pool, response) + + def test_get_authorized(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + # Set status to ACTIVE/ONLINE because set_lb_status did it in the db + api_pool['provisioning_status'] = constants.ACTIVE + api_pool['operating_status'] = constants.ONLINE + api_pool.pop('updated_at') + self.set_lb_status(lb_id=self.lb_id) + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + response.pop('updated_at') + self.assertEqual(api_pool, response) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + def test_get_not_authorized(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + # Set status to ACTIVE/ONLINE because set_lb_status did it in the db + api_pool['provisioning_status'] = constants.ACTIVE + api_pool['operating_status'] = constants.ONLINE + api_pool.pop('updated_at') + self.set_lb_status(lb_id=self.lb_id) + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=uuidutils.generate_uuid()) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id')), status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) + + def test_get_deleted_gives_404(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + + self.set_object_status(self.pool_repo, api_pool.get('id'), + provisioning_status=constants.DELETED) + self.get(self.POOL_PATH.format(pool_id=api_pool.get('id')), status=404) + + def test_bad_get(self): + self.get(self.POOL_PATH.format(pool_id=uuidutils.generate_uuid()), + status=404) + + def test_get_all(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id, tags=['test_tag']).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + pools = self.get(self.POOLS_PATH).json.get(self.root_tag_list) + self.assertIsInstance(pools, list) + self.assertEqual(1, len(pools)) + self.assertEqual(api_pool.get('id'), pools[0].get('id')) + self.assertEqual(['test_tag'], pools[0]['tags']) + + def test_get_all_hides_deleted(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + + response = self.get(self.POOLS_PATH) + objects = response.json.get(self.root_tag_list) + self.assertEqual(len(objects), 1) + self.set_object_status(self.pool_repo, api_pool.get('id'), + provisioning_status=constants.DELETED) + response = self.get(self.POOLS_PATH) + objects = response.json.get(self.root_tag_list) + self.assertEqual(len(objects), 0) + + def test_get_all_admin(self): + project_id = uuidutils.generate_uuid() + lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', + project_id=project_id) + lb1_id = lb1.get('loadbalancer').get('id') + self.set_lb_status(lb1_id) + pool1 = self.create_pool( + lb1_id, constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) + self.set_lb_status(lb1_id) + pool2 = self.create_pool( + lb1_id, constants.PROTOCOL_HTTPS, + constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) + self.set_lb_status(lb1_id) + pool3 = self.create_pool( + lb1_id, constants.PROTOCOL_TCP, + constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) + self.set_lb_status(lb1_id) + pools = self.get(self.POOLS_PATH).json.get(self.root_tag_list) + self.assertEqual(3, len(pools)) + pool_id_protocols = [(p.get('id'), p.get('protocol')) for p in pools] + self.assertIn((pool1.get('id'), pool1.get('protocol')), + pool_id_protocols) + self.assertIn((pool2.get('id'), pool2.get('protocol')), + pool_id_protocols) + self.assertIn((pool3.get('id'), pool3.get('protocol')), + pool_id_protocols) + + def test_get_all_non_admin(self): + project_id = uuidutils.generate_uuid() + lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', + project_id=project_id) + lb1_id = lb1.get('loadbalancer').get('id') + self.set_lb_status(lb1_id) + self.create_pool( + lb1_id, constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) + self.set_lb_status(lb1_id) + self.create_pool( + lb1_id, constants.PROTOCOL_HTTPS, + constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) + self.set_lb_status(lb1_id) + pool3 = self.create_pool( + self.lb_id, constants.PROTOCOL_TCP, + constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) + self.set_lb_status(self.lb_id) + + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', + auth_strategy=constants.KEYSTONE) + test_context = octavia.common.context.RequestContext( + project_id=self.project_id) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + pools = self.get(self.POOLS_PATH).json.get(self.root_tag_list) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + self.assertEqual(1, len(pools)) + pool_id_protocols = [(p.get('id'), p.get('protocol')) for p in pools] + self.assertIn((pool3.get('id'), pool3.get('protocol')), + pool_id_protocols) + + def test_get_all_unscoped_token(self): + project_id = uuidutils.generate_uuid() + lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', + project_id=project_id) + lb1_id = lb1.get('loadbalancer').get('id') + self.set_lb_status(lb1_id) + self.create_pool( + lb1_id, constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) + self.set_lb_status(lb1_id) + self.create_pool( + lb1_id, constants.PROTOCOL_HTTPS, + constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) + self.set_lb_status(lb1_id) + self.create_pool( + self.lb_id, constants.PROTOCOL_TCP, + constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) + self.set_lb_status(self.lb_id) + + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', + auth_strategy=constants.KEYSTONE) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': None} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + result = self.get(self.POOLS_PATH, status=403).json + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, result) + + def test_get_all_non_admin_global_observer(self): + project_id = uuidutils.generate_uuid() + lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', + project_id=project_id) + lb1_id = lb1.get('loadbalancer').get('id') + self.set_lb_status(lb1_id) + pool1 = self.create_pool( + lb1_id, constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) + self.set_lb_status(lb1_id) + pool2 = self.create_pool( + lb1_id, constants.PROTOCOL_HTTPS, + constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) + self.set_lb_status(lb1_id) + pool3 = self.create_pool( + lb1_id, constants.PROTOCOL_TCP, + constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) + self.set_lb_status(lb1_id) + + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['admin'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + pools = self.get(self.POOLS_PATH).json.get(self.root_tag_list) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + self.assertEqual(3, len(pools)) + pool_id_protocols = [(p.get('id'), p.get('protocol')) for p in pools] + self.assertIn((pool1.get('id'), pool1.get('protocol')), + pool_id_protocols) + self.assertIn((pool2.get('id'), pool2.get('protocol')), + pool_id_protocols) + self.assertIn((pool3.get('id'), pool3.get('protocol')), + pool_id_protocols) + + def test_get_all_not_authorized(self): + project_id = uuidutils.generate_uuid() + lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', + project_id=project_id) + lb1_id = lb1.get('loadbalancer').get('id') + self.set_lb_status(lb1_id) + self.create_pool( + lb1_id, constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) + self.set_lb_status(lb1_id) + self.create_pool( + lb1_id, constants.PROTOCOL_HTTPS, + constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) + self.set_lb_status(lb1_id) + self.create_pool( + lb1_id, constants.PROTOCOL_TCP, + constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) + self.set_lb_status(lb1_id) + + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=uuidutils.generate_uuid()) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + pools = self.get(self.POOLS_PATH, status=403).json + + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, pools) + + def test_get_by_project_id(self): + project1_id = uuidutils.generate_uuid() + project2_id = uuidutils.generate_uuid() + lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', + project_id=project1_id) + lb1_id = lb1.get('loadbalancer').get('id') + self.set_lb_status(lb1_id) + lb2 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb2', + project_id=project2_id) + lb2_id = lb2.get('loadbalancer').get('id') + self.set_lb_status(lb2_id) + pool1 = self.create_pool( + lb1_id, constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) + self.set_lb_status(lb1_id) + pool2 = self.create_pool( + lb1_id, constants.PROTOCOL_HTTPS, + constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) + self.set_lb_status(lb1_id) + pool3 = self.create_pool( + lb2_id, constants.PROTOCOL_TCP, + constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) + self.set_lb_status(lb2_id) + pools = self.get( + self.POOLS_PATH, + params={'project_id': project1_id}).json.get(self.root_tag_list) + + self.assertEqual(2, len(pools)) + pool_id_protocols = [(p.get('id'), p.get('protocol')) for p in pools] + self.assertIn((pool1.get('id'), pool1.get('protocol')), + pool_id_protocols) + self.assertIn((pool2.get('id'), pool2.get('protocol')), + pool_id_protocols) + pools = self.get( + self.POOLS_PATH, + params={'project_id': project2_id}).json.get(self.root_tag_list) + self.assertEqual(1, len(pools)) + pool_id_protocols = [(p.get('id'), p.get('protocol')) for p in pools] + self.assertIn((pool3.get('id'), pool3.get('protocol')), + pool_id_protocols) + + def test_get_all_with_listener(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + response = self.get(self.POOLS_PATH).json.get(self.root_tag_list) + self.assertIsInstance(response, list) + self.assertEqual(1, len(response)) + self.assertEqual(api_pool.get('id'), response[0].get('id')) + + def test_get_all_sorted(self): + self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool1') + self.set_lb_status(lb_id=self.lb_id) + self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool2') + self.set_lb_status(lb_id=self.lb_id) + self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool3') + self.set_lb_status(lb_id=self.lb_id) + + response = self.get(self.POOLS_PATH, + params={'sort': 'name:desc'}) + pools_desc = response.json.get(self.root_tag_list) + response = self.get(self.POOLS_PATH, + params={'sort': 'name:asc'}) + pools_asc = response.json.get(self.root_tag_list) + + self.assertEqual(3, len(pools_desc)) + self.assertEqual(3, len(pools_asc)) + + pool_id_names_desc = [(pool.get('id'), pool.get('name')) + for pool in pools_desc] + pool_id_names_asc = [(pool.get('id'), pool.get('name')) + for pool in pools_asc] + self.assertEqual(pool_id_names_asc, + list(reversed(pool_id_names_desc))) + + def test_get_all_limited(self): + self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool1') + self.set_lb_status(lb_id=self.lb_id) + self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool2') + self.set_lb_status(lb_id=self.lb_id) + self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool3') + self.set_lb_status(lb_id=self.lb_id) + + # First two -- should have 'next' link + first_two = self.get(self.POOLS_PATH, params={'limit': 2}).json + objs = first_two[self.root_tag_list] + links = first_two[self.root_tag_links] + self.assertEqual(2, len(objs)) + self.assertEqual(1, len(links)) + self.assertEqual('next', links[0]['rel']) + + # Third + off the end -- should have previous link + third = self.get(self.POOLS_PATH, params={ + 'limit': 2, + 'marker': first_two[self.root_tag_list][1]['id']}).json + objs = third[self.root_tag_list] + links = third[self.root_tag_links] + self.assertEqual(1, len(objs)) + self.assertEqual(1, len(links)) + self.assertEqual('previous', links[0]['rel']) + + # Middle -- should have both links + middle = self.get(self.POOLS_PATH, params={ + 'limit': 1, + 'marker': first_two[self.root_tag_list][0]['id']}).json + objs = middle[self.root_tag_list] + links = middle[self.root_tag_links] + self.assertEqual(1, len(objs)) + self.assertEqual(2, len(links)) + self.assertCountEqual(['previous', 'next'], + [link['rel'] for link in links]) + + def test_get_all_fields_filter(self): + self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool1') + self.set_lb_status(lb_id=self.lb_id) + self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool2') + self.set_lb_status(lb_id=self.lb_id) + self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool3') + self.set_lb_status(lb_id=self.lb_id) + + pools = self.get(self.POOLS_PATH, params={ + 'fields': ['id', 'project_id']}).json + for pool in pools['pools']: + self.assertIn('id', pool) + self.assertIn('project_id', pool) + self.assertNotIn('description', pool) + + def test_get_one_fields_filter(self): + pool1 = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool1').get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + + pool = self.get( + self.POOL_PATH.format(pool_id=pool1.get('id')), + params={'fields': ['id', 'project_id']}).json.get(self.root_tag) + self.assertIn('id', pool) + self.assertIn('project_id', pool) + self.assertNotIn('description', pool) + + def test_get_all_filter(self): + po1 = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool1').get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + hm = self.create_health_monitor(po1['id'], + constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1).get('healthmonitor') + self.set_lb_status(lb_id=self.lb_id) + self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool2').get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool3').get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + + pools = self.get(self.POOLS_PATH, params={ + 'id': po1['id'], 'healthmonitor_id': hm['id']}).json + self.assertEqual(1, len(pools['pools'])) + self.assertEqual(po1['id'], + pools['pools'][0]['id']) + + def test_get_all_tags_filter(self): + po1 = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool1', + tags=['test_tag1', 'test_tag2'] + ).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + po2 = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool2', + tags=['test_tag2', 'test_tag3'] + ).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + po3 = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool3', + tags=['test_tag4', 'test_tag5'] + ).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + + pos = self.get( + self.POOLS_PATH, + params={'tags': 'test_tag2'} + ).json.get(self.root_tag_list) + self.assertIsInstance(pos, list) + self.assertEqual(2, len(pos)) + self.assertEqual( + [po1.get('id'), po2.get('id')], + [po.get('id') for po in pos] + ) + + pos = self.get( + self.POOLS_PATH, + params={'tags': ['test_tag2', 'test_tag3']} + ).json.get(self.root_tag_list) + self.assertIsInstance(pos, list) + self.assertEqual(1, len(pos)) + self.assertEqual( + [po2.get('id')], + [po.get('id') for po in pos] + ) + + pos = self.get( + self.POOLS_PATH, + params={'tags-any': 'test_tag2'} + ).json.get(self.root_tag_list) + self.assertIsInstance(pos, list) + self.assertEqual(2, len(pos)) + self.assertEqual( + [po1.get('id'), po2.get('id')], + [po.get('id') for po in pos] + ) + + pos = self.get( + self.POOLS_PATH, + params={'not-tags': 'test_tag2'} + ).json.get(self.root_tag_list) + self.assertIsInstance(pos, list) + self.assertEqual(1, len(pos)) + self.assertEqual( + [po3.get('id')], + [po.get('id') for po in pos] + ) + + pos = self.get( + self.POOLS_PATH, + params={'not-tags-any': ['test_tag2', 'test_tag4']} + ).json.get(self.root_tag_list) + self.assertIsInstance(pos, list) + self.assertEqual(0, len(pos)) + + pos = self.get( + self.POOLS_PATH, + params={'tags': 'test_tag2', + 'tags-any': ['test_tag1', 'test_tag3']} + ).json.get(self.root_tag_list) + self.assertIsInstance(pos, list) + self.assertEqual(2, len(pos)) + self.assertEqual( + [po1.get('id'), po2.get('id')], + [po.get('id') for po in pos] + ) + + pos = self.get( + self.POOLS_PATH, + params={'tags': 'test_tag2', 'not-tags': 'test_tag2'} + ).json.get(self.root_tag_list) + self.assertIsInstance(pos, list) + self.assertEqual(0, len(pos)) + + def test_empty_get_all(self): + response = self.get(self.POOLS_PATH).json.get(self.root_tag_list) + self.assertIsInstance(response, list) + self.assertEqual(0, len(response)) + + def test_create(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id, + tags=['test_tag']).get(self.root_tag) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_CREATE, + pool_op_status=constants.OFFLINE) + self.set_lb_status(self.lb_id) + self.assertEqual(constants.PROTOCOL_HTTP, api_pool.get('protocol')) + self.assertEqual(constants.LB_ALGORITHM_ROUND_ROBIN, + api_pool.get('lb_algorithm')) + self.assertEqual(['test_tag'], api_pool['tags']) + self.assertIsNotNone(api_pool.get('created_at')) + self.assertIsNone(api_pool.get('updated_at')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id')) + + def test_create_authorized(self): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_CREATE, + pool_op_status=constants.OFFLINE) + self.set_lb_status(self.lb_id) + self.assertEqual(constants.PROTOCOL_HTTP, api_pool.get('protocol')) + self.assertEqual(constants.LB_ALGORITHM_ROUND_ROBIN, + api_pool.get('lb_algorithm')) + self.assertIsNotNone(api_pool.get('created_at')) + self.assertIsNone(api_pool.get('updated_at')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id')) + + def test_create_not_authorized(self): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + + test_context = octavia.common.context.RequestContext( + project_id=uuidutils.generate_uuid()) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id, status=403) + + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, api_pool) + + def test_create_with_proxy_protocol(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_PROXY, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_CREATE, + pool_op_status=constants.OFFLINE) + self.set_lb_status(self.lb_id) + self.assertEqual(constants.PROTOCOL_PROXY, api_pool.get('protocol')) + self.assertEqual(constants.LB_ALGORITHM_ROUND_ROBIN, + api_pool.get('lb_algorithm')) + self.assertIsNotNone(api_pool.get('created_at')) + self.assertIsNone(api_pool.get('updated_at')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id')) + + def test_create_with_proxy_v2_protocol(self): + api_pool = self.create_pool( + self.lb_id, + lib_constants.PROTOCOL_PROXYV2, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_CREATE, + pool_op_status=constants.OFFLINE) + self.set_lb_status(self.lb_id) + self.assertEqual(lib_constants.PROTOCOL_PROXYV2, + api_pool.get('protocol')) + self.assertEqual(constants.LB_ALGORITHM_ROUND_ROBIN, + api_pool.get('lb_algorithm')) + self.assertIsNotNone(api_pool.get('created_at')) + self.assertIsNone(api_pool.get('updated_at')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id')) + + def test_create_sans_listener(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) + self.assertEqual(constants.PROTOCOL_HTTP, api_pool.get('protocol')) + self.assertEqual(constants.LB_ALGORITHM_ROUND_ROBIN, + api_pool.get('lb_algorithm')) + # Make sure listener status is unchanged, but LB status is changed. + # LB should still be locked even with pool and subordinate object + # updates. + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.ACTIVE, + pool_prov_status=constants.PENDING_CREATE, + pool_op_status=constants.OFFLINE) + + def test_create_sans_loadbalancer_id(self): + api_pool = self.create_pool( + None, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + self.assertEqual(constants.PROTOCOL_HTTP, api_pool.get('protocol')) + self.assertEqual(constants.LB_ALGORITHM_ROUND_ROBIN, + api_pool.get('lb_algorithm')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_CREATE, + pool_op_status=constants.OFFLINE) + + def test_create_with_listener_id_in_pool_dict(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_CREATE, + pool_op_status=constants.OFFLINE) + self.set_lb_status(self.lb_id) + self.assertEqual(constants.PROTOCOL_HTTP, api_pool.get('protocol')) + self.assertEqual(constants.LB_ALGORITHM_ROUND_ROBIN, + api_pool.get('lb_algorithm')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id')) + + def test_create_with_project_id(self): + optionals = { + 'listener_id': self.listener_id, + 'project_id': self.project_id} + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + **optionals).get(self.root_tag) + self.assertEqual(self.project_id, api_pool.get('project_id')) + + def test_create_udp_case_source_ip(self): + sp = {"type": constants.SESSION_PERSISTENCE_SOURCE_IP, + "persistence_timeout": 3, + "persistence_granularity": '255.255.255.0'} + api_pool = self.create_pool( + None, + constants.PROTOCOL_UDP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.udp_listener_id, + session_persistence=sp).get(self.root_tag) + self.assertEqual(constants.PROTOCOL_UDP, api_pool.get('protocol')) + self.assertEqual(constants.LB_ALGORITHM_ROUND_ROBIN, + api_pool.get('lb_algorithm')) + self.assertEqual(constants.SESSION_PERSISTENCE_SOURCE_IP, + api_pool.get('session_persistence')['type']) + self.assertEqual(3, api_pool.get( + 'session_persistence')['persistence_timeout']) + self.assertEqual('255.255.255.0', api_pool.get( + 'session_persistence')['persistence_granularity']) + self.assertIsNone(api_pool.get( + 'session_persistence')['cookie_name']) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id, + pool_id=api_pool.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_CREATE, + pool_op_status=constants.OFFLINE) + + def test_create_with_tls_enabled_only(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id, + tls_enabled=True).get(self.root_tag) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_CREATE, + pool_op_status=constants.OFFLINE) + self.set_lb_status(self.lb_id) + self.assertTrue(api_pool.get('tls_enabled')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id')) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_create_with_tls_container_ref(self, mock_cert_data): + tls_container_ref = uuidutils.generate_uuid() + pool_cert = data_models.TLSContainer(certificate='pool cert') + mock_cert_data.return_value = {'tls_cert': pool_cert, + 'sni_certs': [], + 'client_ca_cert': None} + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id, + tls_container_ref=tls_container_ref).get(self.root_tag) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_CREATE, + pool_op_status=constants.OFFLINE) + self.set_lb_status(self.lb_id) + self.assertEqual(tls_container_ref, api_pool.get('tls_container_ref')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id')) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_create_with_ca_and_crl(self, mock_cert_data): + self.cert_manager_mock().get_secret.side_effect = [ + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL] + + ca_tls_container_ref = uuidutils.generate_uuid() + crl_container_ref = uuidutils.generate_uuid() + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id, + ca_tls_container_ref=ca_tls_container_ref, + crl_container_ref=crl_container_ref).get(self.root_tag) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_CREATE, + pool_op_status=constants.OFFLINE) + self.set_lb_status(self.lb_id) + self.assertEqual(ca_tls_container_ref, + api_pool.get('ca_tls_container_ref')) + self.assertEqual(crl_container_ref, + api_pool.get('crl_container_ref')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id')) + + def test_create_with_bad_tls_container_ref(self): + tls_container_ref = uuidutils.generate_uuid() + self.cert_manager_mock().get_cert.side_effect = [Exception( + "bad cert")] + self.cert_manager_mock().get_secret.side_effect = [Exception( + "bad secret")] + api_pool = self.create_pool( + self.lb_id, constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id, + tls_container_ref=tls_container_ref, status=400) + self.assertIn(tls_container_ref, api_pool['faultstring']) + + def test_create_with_bad_ca_tls_container_ref(self): + ca_tls_container_ref = uuidutils.generate_uuid() + self.cert_manager_mock().get_cert.side_effect = [Exception( + "bad ca cert")] + self.cert_manager_mock().get_secret.side_effect = [Exception( + "bad ca secret")] + api_pool = self.create_pool( + self.lb_id, constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id, + ca_tls_container_ref=ca_tls_container_ref, status=400) + self.assertIn(ca_tls_container_ref, api_pool['faultstring']) + + def test_create_with_unreachable_crl(self): + ca_tls_container_ref = uuidutils.generate_uuid() + crl_container_ref = uuidutils.generate_uuid() + self.cert_manager_mock().get_cert.side_effect = [ + 'cert 1', Exception('unknow/bad cert')] + self.cert_manager_mock().get_secret.side_effect = [Exception( + 'bad secret')] + api_pool = self.create_pool( + self.lb_id, constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id, + ca_tls_container_ref=ca_tls_container_ref, + crl_container_ref=crl_container_ref, status=400) + self.assertIn(crl_container_ref, api_pool['faultstring']) + + def test_create_with_crl_only(self): + crl_container_ref = uuidutils.generate_uuid() + api_pool = self.create_pool( + self.lb_id, constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id, + crl_container_ref=crl_container_ref, status=400) + self.assertIn( + 'A CA certificate reference is required to specify a ' + 'revocation list.', api_pool['faultstring']) + + def test_negative_create_udp_case(self): + # Error create pool with udp protocol but non-udp-type + sp = {"type": constants.SESSION_PERSISTENCE_HTTP_COOKIE, + "cookie_name": 'test-cookie-name'} + req_dict = { + 'listener_id': self.udp_listener_id, + 'protocol': constants.PROTOCOL_UDP, + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'session_persistence': sp} + expect_error_msg = ("Validation failure: Cookie names are not " + "supported for %s pools.") % ( + "/".join((constants.PROTOCOL_UDP, + lib_constants.PROTOCOL_SCTP))) + res = self.post(self.POOLS_PATH, self._build_body(req_dict), + status=400, expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id) + + # Error create pool with any non-udp-types and udp session persistence + # options. + sp = {"type": constants.SESSION_PERSISTENCE_SOURCE_IP, + "persistence_timeout": 3, + "persistence_granularity": '255.255.255.0'} + req_dict = { + 'listener_id': self.udp_listener_id, + 'protocol': constants.PROTOCOL_UDP, + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'session_persistence': None} + for type in [constants.SESSION_PERSISTENCE_HTTP_COOKIE, + constants.SESSION_PERSISTENCE_APP_COOKIE]: + expect_error_msg = ("Validation failure: Session persistence of " + "type %s is not supported for %s protocol " + "pools.") % ( + type, + "/".join((constants.PROTOCOL_UDP, + lib_constants.PROTOCOL_SCTP))) + sp.update({'type': type}) + req_dict['session_persistence'] = sp + res = self.post(self.POOLS_PATH, self._build_body(req_dict), + status=400, + expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id) + + # Error create pool with source ip session persistence and wrong + # options. + sp = {"type": constants.SESSION_PERSISTENCE_SOURCE_IP, + "persistence_timeout": 3, + "persistence_granularity": '255.255.255.0', + "cookie_name": 'test-cookie-name'} + req_dict = { + 'listener_id': self.udp_listener_id, + 'protocol': constants.PROTOCOL_UDP, + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'session_persistence': sp} + expect_error_msg = ( + "Validation failure: session_persistence %s type for %s " + "protocols only accepts: type, persistence_timeout, " + "persistence_granularity.") % ( + constants.SESSION_PERSISTENCE_SOURCE_IP, + " and ".join((constants.PROTOCOL_UDP, + lib_constants.PROTOCOL_SCTP))) + res = self.post(self.POOLS_PATH, self._build_body(req_dict), + status=400, expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id) + + # Error create non-udp pool with udp session persistence + sps = [{"type": constants.SESSION_PERSISTENCE_SOURCE_IP, + "persistence_timeout": 3, + "persistence_granularity": '255.255.255.0'}, + {"type": constants.SESSION_PERSISTENCE_APP_COOKIE, + "persistence_timeout": 3, + "persistence_granularity": '255.255.255.0'}] + req_dict = { + 'listener_id': self.listener_id, + 'protocol': constants.PROTOCOL_HTTP, + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN} + expect_error_msg = ("Validation failure: persistence_timeout and " + "persistence_granularity is only for %s protocol " + "pools.") % ( + " and ".join((constants.PROTOCOL_UDP, + lib_constants.PROTOCOL_SCTP))) + for s in sps: + req_dict.update({'session_persistence': s}) + res = self.post(self.POOLS_PATH, self._build_body(req_dict), + status=400, expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id) + + def test_bad_create(self): + pool = {'name': 'test1'} + self.post(self.POOLS_PATH, self._build_body(pool), status=400) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id) + + def test_create_with_listener_with_default_pool_id_set(self): + self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id) + self.set_lb_status(self.lb_id) + lb_pool = { + 'loadbalancer_id': self.lb_id, + 'listener_id': self.listener_id, + 'protocol': constants.PROTOCOL_HTTP, + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'project_id': self.project_id} + self.post(self.POOLS_PATH, self._build_body(lb_pool), status=409) + + def test_create_bad_protocol(self): + lb_pool = { + 'loadbalancer_id': self.lb_id, + 'protocol': 'STUPID_PROTOCOL', + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN} + self.post(self.POOLS_PATH, self._build_body(lb_pool), status=400) + + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_create_with_bad_provider(self, mock_provider): + mock_provider.side_effect = exceptions.ProviderDriverError( + prov='bad_driver', user_msg='broken') + lb_pool = { + 'loadbalancer_id': self.lb_id, + 'protocol': constants.PROTOCOL_HTTP, + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'project_id': self.project_id} + response = self.post(self.POOLS_PATH, self._build_body(lb_pool), + status=500) + self.assertIn('Provider \'bad_driver\' reports error: broken', + response.json.get('faultstring')) + + def test_create_over_quota(self): + self.start_quota_mock(data_models.Pool) + lb_pool = { + 'loadbalancer_id': self.lb_id, + 'protocol': constants.PROTOCOL_HTTP, + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'project_id': self.project_id} + self.post(self.POOLS_PATH, self._build_body(lb_pool), status=403) + + def test_negative_create_prometheus_listener(self): + stats_listener = self.create_listener( + lib_constants.PROTOCOL_PROMETHEUS, 8123, + self.lb_id).get('listener') + stats_listener_id = stats_listener.get('id') + self.set_lb_status(self.lb_id) + + lb_pool = { + 'listener_id': stats_listener_id, + 'protocol': 'HTTP', + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN} + self.post(self.POOLS_PATH, self._build_body(lb_pool), status=400) + + def test_update(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id, tags=['old_tag']).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + new_pool = {'name': 'new_name', 'tags': ['new_tag']} + self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool)) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE) + self.set_lb_status(self.lb_id) + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + self.assertEqual('new_name', response.get('name')) + self.assertEqual(['new_tag'], response['tags']) + self.assertIsNotNone(response.get('created_at')) + self.assertIsNotNone(response.get('updated_at')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=response.get('id')) + + def test_update_authorized(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + new_pool = {'name': 'new_name'} + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + + self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool)) + + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE) + self.set_lb_status(self.lb_id) + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + self.assertEqual('new_name', response.get('name')) + self.assertIsNotNone(response.get('created_at')) + self.assertIsNotNone(response.get('updated_at')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=response.get('id')) + + def test_update_not_authorized(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + new_pool = {'name': 'new_name'} + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=uuidutils.generate_uuid()) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + api_pool = self.put( + self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool), status=403) + + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, api_pool.json) + self.assert_correct_lb_status(self.lb_id, constants.ONLINE, + constants.ACTIVE) + + def test_update_get_session_persistence_from_db_if_no_request(self): + sp = {"type": constants.SESSION_PERSISTENCE_SOURCE_IP, + "persistence_timeout": 3, + "persistence_granularity": '255.255.255.0'} + optionals = {"listener_id": self.udp_listener_id, + "session_persistence": sp} + api_pool = self.create_pool( + None, + constants.PROTOCOL_UDP, + constants.LB_ALGORITHM_ROUND_ROBIN, + **optionals).get(self.root_tag) + self.set_lb_status(lb_id=self.udp_lb_id) + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + sess_p = response.get('session_persistence') + ty = sess_p.pop('type') + sess_p['persistence_timeout'] = 4 + sess_p['persistence_granularity'] = "255.255.0.0" + new_pool = {'session_persistence': sess_p} + self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool)) + sess_p['type'] = ty + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + self.assertEqual(sess_p, response.get('session_persistence')) + self.assert_correct_status( + listener_id=self.udp_listener_id, + pool_id=api_pool.get('id'), + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE) + + def test_update_udp_case_source_ip(self): + sp = {"type": constants.SESSION_PERSISTENCE_SOURCE_IP, + "persistence_timeout": 3, + "persistence_granularity": '255.255.255.0'} + optionals = {"listener_id": self.udp_listener_id, + "session_persistence": sp} + api_pool = self.create_pool( + None, + constants.PROTOCOL_UDP, + constants.LB_ALGORITHM_ROUND_ROBIN, + **optionals).get(self.root_tag) + self.set_lb_status(lb_id=self.udp_lb_id) + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + sess_p = response.get('session_persistence') + sess_p['persistence_timeout'] = 4 + sess_p['persistence_granularity'] = "255.255.0.0" + new_pool = {'session_persistence': sess_p} + self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool)) + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + self.assertEqual(sess_p, response.get('session_persistence')) + self.assert_correct_status( + listener_id=self.udp_listener_id, + pool_id=api_pool.get('id'), + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE) + + self.set_lb_status(self.udp_lb_id) + self.set_object_status(self.pool_repo, api_pool.get('id')) + # Negative cases + # Error during update pool with non-UDP type and cookie_name. + expect_error_msg = ( + "Validation failure: Cookie names are not supported for %s" + " pools.") % ("/".join((constants.PROTOCOL_UDP, + lib_constants.PROTOCOL_SCTP))) + sess_p['type'] = constants.SESSION_PERSISTENCE_HTTP_COOKIE + sess_p['cookie_name'] = 'test-cookie-name' + new_pool = {'session_persistence': sess_p} + res = self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool), status=400, + expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id) + + # Error during update pool with source ip type and more options. + expect_error_msg = ( + "Validation failure: session_persistence %s type for %s protocols " + "only accepts: type, persistence_timeout, " + "persistence_granularity.") % ( + constants.SESSION_PERSISTENCE_SOURCE_IP, + " and ".join((constants.PROTOCOL_UDP, + lib_constants.PROTOCOL_SCTP))) + sess_p['type'] = constants.SESSION_PERSISTENCE_SOURCE_IP + sess_p['cookie_name'] = 'test-cookie-name' + sess_p['persistence_timeout'] = 4 + sess_p['persistence_granularity'] = "255.255.0.0" + res = self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool), status=400, + expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id) + + # Error during update pool with non-UDP session persistence type. + sess_p['cookie_name'] = None + for ty in [constants.SESSION_PERSISTENCE_APP_COOKIE, + constants.SESSION_PERSISTENCE_HTTP_COOKIE]: + expect_error_msg = ("Validation failure: Session persistence of " + "type %s is not supported for %s protocol " + "pools.") % ( + ty, + "/".join((constants.PROTOCOL_UDP, + lib_constants.PROTOCOL_SCTP))) + sess_p['type'] = ty + res = self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool), status=400, + expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id) + + def test_update_with_tls_enabled_only(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + self.assertFalse(api_pool['tls_enabled']) + new_pool = {'tls_enabled': True} + self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool)) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE) + self.set_lb_status(self.lb_id) + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + self.assertTrue(response.get('tls_enabled')) + self.assertIsNotNone(response.get('created_at')) + self.assertIsNotNone(response.get('updated_at')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=response.get('id')) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_update_with_tls_enabled_only_on_pool_certs_exist( + self, mock_cert_data): + tls_container_ref = uuidutils.generate_uuid() + ca_tls_container_ref = uuidutils.generate_uuid() + crl_container_ref = uuidutils.generate_uuid() + pool_cert = data_models.TLSContainer(certificate='pool cert') + mock_cert_data.return_value = {'tls_cert': pool_cert, + 'sni_certs': [], + 'client_ca_cert': None} + self.cert_manager_mock().get_secret.side_effect = [ + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL] + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id, + tls_container_ref=tls_container_ref, + ca_tls_container_ref=ca_tls_container_ref, + crl_container_ref=crl_container_ref).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + self.assertFalse(api_pool['tls_enabled']) + + new_pool = {'tls_enabled': True} + self.cert_manager_mock().get_cert.reset_mock() + self.cert_manager_mock().get_secret.reset_mock() + self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool)) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE) + self.set_lb_status(self.lb_id) + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + self.assertTrue(response.get('tls_enabled')) + self.assertIsNotNone(response.get('created_at')) + self.assertIsNotNone(response.get('updated_at')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=response.get('id')) + + @mock.patch( + 'octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_update_with_tls_container_ref(self, mock_cert_data): + tls_container_ref = uuidutils.generate_uuid() + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + new_pool = {'tls_container_ref': tls_container_ref} + pool_cert = data_models.TLSContainer(certificate='pool cert') + mock_cert_data.return_value = {'tls_cert': pool_cert, + 'sni_certs': [], + 'client_ca_cert': None} + self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool)) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE) + self.set_lb_status(self.lb_id) + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + self.assertEqual(tls_container_ref, response.get('tls_container_ref')) + self.assertIsNotNone(response.get('created_at')) + self.assertIsNotNone(response.get('updated_at')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=response.get('id')) + + def test_update_with_bad_tls_ref(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + # Set status to ACTIVE/ONLINE because set_lb_status did it in the db + api_pool['provisioning_status'] = constants.ACTIVE + api_pool['operating_status'] = constants.ONLINE + api_pool.pop('updated_at') + + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + response.pop('updated_at') + self.assertEqual(api_pool, response) + + tls_uuid = uuidutils.generate_uuid() + self.pool_repo.update(db_api.get_session(), + api_pool.get('id'), + tls_certificate_id=tls_uuid) + update_data = {'name': 'pool2'} + self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(update_data)) + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + self.assertEqual('pool2', response.get('name')) + + def test_bad_update(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + self.set_lb_status(self.lb_id) + new_pool = {'enabled': 'one'} + self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool), status=400) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id')) + + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_update_with_bad_provider(self, mock_provider): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + new_pool = {'name': 'new_name'} + mock_provider.side_effect = exceptions.ProviderDriverError( + prov='bad_driver', user_msg='broken') + response = self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool), status=500) + self.assertIn('Provider \'bad_driver\' reports error: broken', + response.json.get('faultstring')) + + def test_bad_update_non_udp_pool_with_udp_fields(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + sp = {"type": constants.SESSION_PERSISTENCE_APP_COOKIE, + "persistence_timeout": 3, + "persistence_granularity": '255.255.255.0'} + self.set_lb_status(self.lb_id) + new_pool = {'session_persistence': sp} + expect_error_msg = ("Validation failure: persistence_timeout and " + "persistence_granularity is only for %s " + "protocol pools.") % constants.PROTOCOL_UDP + res = self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool), status=400, + expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status( + lb_id=self.udp_lb_id, listener_id=self.udp_listener_id) + + def test_update_with_bad_tls_container_ref(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + tls_container_ref = uuidutils.generate_uuid() + new_pool = {'tls_container_ref': tls_container_ref} + + self.cert_manager_mock().get_cert.side_effect = [Exception( + "bad cert")] + self.cert_manager_mock().get_secret.side_effect = [Exception( + "bad secret")] + resp = self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool), status=400).json + self.assertIn(tls_container_ref, resp['faultstring']) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_update_with_ca_and_crl(self, mock_cert_data): + self.cert_manager_mock().get_secret.side_effect = [ + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL] + ca_tls_container_ref = uuidutils.generate_uuid() + crl_container_ref = uuidutils.generate_uuid() + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + new_pool = {'ca_tls_container_ref': ca_tls_container_ref, + 'crl_container_ref': crl_container_ref} + self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool)) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE) + self.set_lb_status(self.lb_id) + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + self.assertEqual(ca_tls_container_ref, + response.get('ca_tls_container_ref')) + self.assertEqual(crl_container_ref, + response.get('crl_container_ref')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=response.get('id')) + + def test_update_with_bad_ca_tls_container_ref(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + ca_tls_container_ref = uuidutils.generate_uuid() + new_pool = {'ca_tls_container_ref': ca_tls_container_ref} + self.cert_manager_mock().get_cert.side_effect = [Exception( + "bad cert")] + self.cert_manager_mock().get_secret.side_effect = [Exception( + "bad secret")] + resp = self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool), status=400).json + self.assertIn(ca_tls_container_ref, resp['faultstring']) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_update_with_crl(self, mock_cert_data): + ca_tls_container_ref = uuidutils.generate_uuid() + crl_container_ref = uuidutils.generate_uuid() + self.cert_manager_mock().get_secret.side_effect = [ + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL] + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id, + ca_tls_container_ref=ca_tls_container_ref, + crl_container_ref=crl_container_ref).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + new_crl_container_ref = uuidutils.generate_uuid() + new_pool = {'crl_container_ref': new_crl_container_ref} + self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool)) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE) + self.set_lb_status(self.lb_id) + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + self.assertEqual(new_crl_container_ref, + response.get('crl_container_ref')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=response.get('id')) + + def test_update_with_crl_only_negative_case(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + crl_container_ref = uuidutils.generate_uuid() + new_pool = {'crl_container_ref': crl_container_ref} + resp = self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool), status=400).json + self.assertIn( + 'A CA reference is required to specify a certificate revocation ' + 'list.', resp['faultstring']) + + def test_update_with_crl_only_none_ca(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + crl_container_ref = uuidutils.generate_uuid() + new_pool = {'ca_tls_container_ref': None, + 'crl_container_ref': crl_container_ref} + resp = self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool), status=400).json + self.assertIn( + 'A CA reference is required to specify a certificate revocation ' + 'list.', resp['faultstring']) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_update_with_unreachable_crl(self, mock_cert_data): + crl_container_ref = uuidutils.generate_uuid() + new_crl_container_ref = uuidutils.generate_uuid() + ca_tls_container_ref = uuidutils.generate_uuid() + self.cert_manager_mock().get_secret.side_effect = [ + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL] + + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id, + ca_tls_container_ref=ca_tls_container_ref, + crl_container_ref=crl_container_ref).get(self.root_tag) + self.set_lb_status(self.lb_id) + new_crl_container_ref = uuidutils.generate_uuid() + new_pool = {'crl_container_ref': new_crl_container_ref} + self.cert_manager_mock().get_secret.side_effect = [ + exceptions.CertificateRetrievalException( + ref=new_crl_container_ref)] + resp = self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool), status=400).json + self.assertIn(new_crl_container_ref, resp['faultstring']) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_update_unset_ca_cert(self, mock_cert_data): + self.cert_manager_mock().get_secret.return_value = ( + sample_certs.X509_CA_CERT) + + ca_tls_uuid = uuidutils.generate_uuid() + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id, + ca_tls_container_ref=ca_tls_uuid).get(self.root_tag) + self.set_lb_status(self.lb_id) + new_pool = {'ca_tls_container_ref': None} + body = self._build_body(new_pool) + listener_path = self.POOL_PATH.format( + pool_id=api_pool['id']) + api_pool = self.put(listener_path, body).json.get(self.root_tag) + self.assertIsNone(api_pool.get('ca_tls_container_ref')) + self.assertIsNone(api_pool.get('crl_container_ref')) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_update_unset_ca_cert_with_crl(self, mock_cert_data): + self.cert_manager_mock().get_secret.side_effect = [ + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL] + + ca_tls_uuid = uuidutils.generate_uuid() + crl_uuid = uuidutils.generate_uuid() + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id, + ca_tls_container_ref=ca_tls_uuid, + crl_container_ref=crl_uuid).get(self.root_tag) + self.set_lb_status(self.lb_id) + new_pool = {'ca_tls_container_ref': None} + body = self._build_body(new_pool) + listener_path = self.POOL_PATH.format( + pool_id=api_pool['id']) + response = self.put(listener_path, body, status=400).json + self.assertIn('A CA reference cannot be removed when a certificate ' + 'revocation list is present.', response['faultstring']) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_update_unset_crl(self, mock_cert_data): + self.cert_manager_mock().get_secret.side_effect = [ + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL, + sample_certs.X509_CA_CERT, sample_certs.X509_CA_CRL] + ca_tls_uuid = uuidutils.generate_uuid() + crl_uuid = uuidutils.generate_uuid() + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id, + ca_tls_container_ref=ca_tls_uuid, + crl_container_ref=crl_uuid).get(self.root_tag) + self.set_lb_status(self.lb_id) + new_pool = {'crl_container_ref': None} + body = self._build_body(new_pool) + listener_path = self.POOL_PATH.format( + pool_id=api_pool['id']) + update_pool = self.put(listener_path, body).json.get(self.root_tag) + self.assertEqual(api_pool.get('ca_tls_container_ref'), + update_pool.get('ca_tls_container_ref')) + self.assertIsNone(update_pool.get('crl_container_ref')) + + def test_update_with_tls_versions(self): + tls_versions = [lib_constants.TLS_VERSION_1_3, + lib_constants.TLS_VERSION_1_2] + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + tls_enabled=True, + tls_versions=tls_versions, + listener_id=self.listener_id).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + self.assertTrue(api_pool['tls_enabled']) + self.assertCountEqual(tls_versions, + api_pool['tls_versions']) + + new_pool = {'tls_versions': [lib_constants.TLS_VERSION_1_3]} + self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool)) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE) + self.set_lb_status(self.lb_id) + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + self.assertCountEqual([lib_constants.TLS_VERSION_1_3], + response['tls_versions']) + self.assertIsNotNone(response.get('created_at')) + self.assertIsNotNone(response.get('updated_at')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=response.get('id')) + + def test_update_with_empty_tls_versions(self): + default_pool_tls_versions = [lib_constants.TLS_VERSION_1_3, + lib_constants.TLS_VERSION_1_2] + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + self.conf.config(group='api_settings', + default_pool_tls_versions=default_pool_tls_versions) + + tls_versions = [lib_constants.TLS_VERSION_1_3] + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + tls_enabled=True, + tls_versions=tls_versions, + listener_id=self.listener_id).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + self.assertTrue(api_pool['tls_enabled']) + self.assertCountEqual(tls_versions, + api_pool['tls_versions']) + + new_pool = {'tls_versions': None} + self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool)) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE) + self.set_lb_status(self.lb_id) + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + self.assertCountEqual(default_pool_tls_versions, + response['tls_versions']) + self.assertIsNotNone(response.get('created_at')) + self.assertIsNotNone(response.get('updated_at')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=response.get('id')) + + def test_update_with_tls_ciphers(self): + default_ciphers = ( + 'TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256') + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + self.conf.config(group='api_settings', + default_pool_ciphers=default_ciphers) + + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + tls_enabled=True, + listener_id=self.listener_id).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + self.assertTrue(api_pool['tls_enabled']) + self.assertEqual(default_ciphers, api_pool['tls_ciphers']) + + new_tls_ciphers = 'DHE-RSA-AES128-GCM-SHA256' + new_pool = {'tls_ciphers': new_tls_ciphers} + self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool)) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE) + self.set_lb_status(self.lb_id) + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + self.assertEqual(new_tls_ciphers, response['tls_ciphers']) + self.assertIsNotNone(response.get('created_at')) + self.assertIsNotNone(response.get('updated_at')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=response.get('id')) + + def test_update_with_empty_tls_ciphers(self): + default_ciphers = ( + 'TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256') + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + self.conf.config(group='api_settings', + default_pool_ciphers=default_ciphers) + + tls_ciphers = 'DHE-RSA-AES128-GCM-SHA256' + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + tls_enabled=True, + tls_ciphers=tls_ciphers, + listener_id=self.listener_id).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + self.assertTrue(api_pool['tls_enabled']) + self.assertEqual(tls_ciphers, api_pool['tls_ciphers']) + + new_pool = {'tls_ciphers': None} + self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool)) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE) + self.set_lb_status(self.lb_id) + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + self.assertEqual(default_ciphers, response['tls_ciphers']) + self.assertIsNotNone(response.get('created_at')) + self.assertIsNotNone(response.get('updated_at')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=response.get('id')) + + def test_delete(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + # Set status to ACTIVE/ONLINE because set_lb_status did it in the db + api_pool['provisioning_status'] = constants.ACTIVE + api_pool['operating_status'] = constants.ONLINE + api_pool.pop('updated_at') + + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + response.pop('updated_at') + self.assertEqual(api_pool, response) + + self.delete(self.POOL_PATH.format(pool_id=api_pool.get('id'))) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_DELETE) + + # Problems with TLS certs should not block a delete + def test_delete_with_bad_tls_ref(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + # Set status to ACTIVE/ONLINE because set_lb_status did it in the db + api_pool['provisioning_status'] = constants.ACTIVE + api_pool['operating_status'] = constants.ONLINE + api_pool.pop('updated_at') + + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + response.pop('updated_at') + self.assertEqual(api_pool, response) + + tls_uuid = uuidutils.generate_uuid() + self.pool_repo.update(db_api.get_session(), + api_pool.get('id'), + tls_certificate_id=tls_uuid) + + self.delete(self.POOL_PATH.format(pool_id=api_pool.get('id'))) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_DELETE) + + def test_delete_authorize(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + # Set status to ACTIVE/ONLINE because set_lb_status did it in the db + api_pool['provisioning_status'] = constants.ACTIVE + api_pool['operating_status'] = constants.ONLINE + api_pool.pop('updated_at') + + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + response.pop('updated_at') + self.assertEqual(api_pool, response) + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + self.delete(self.POOL_PATH.format(pool_id=api_pool.get('id'))) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_DELETE) + + def test_delete_not_authorize(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + # Set status to ACTIVE/ONLINE because set_lb_status did it in the db + api_pool['provisioning_status'] = constants.ACTIVE + api_pool['operating_status'] = constants.ONLINE + api_pool.pop('updated_at') + + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + response.pop('updated_at') + self.assertEqual(api_pool, response) + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + test_context = octavia.common.context.RequestContext( + project_id=uuidutils.generate_uuid()) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + self.delete(self.POOL_PATH.format(pool_id=api_pool.get('id')), + status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id'), + lb_prov_status=constants.ACTIVE, + listener_prov_status=constants.ACTIVE, + pool_prov_status=constants.ACTIVE) + + def test_bad_delete(self): + self.delete(self.POOL_PATH.format( + pool_id=uuidutils.generate_uuid()), status=404) + + def test_delete_with_l7policy(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + self.set_lb_status(self.lb_id) + self.create_l7policy( + self.listener_id, + constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + redirect_pool_id=api_pool.get('id')) + self.set_lb_status(self.lb_id) + self.delete(self.POOL_PATH.format( + pool_id=api_pool.get('id')), status=409) + + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_delete_with_bad_provider(self, mock_provider): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + # Set status to ACTIVE/ONLINE because set_lb_status did it in the db + api_pool['provisioning_status'] = constants.ACTIVE + api_pool['operating_status'] = constants.ONLINE + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + + self.assertIsNone(api_pool.pop('updated_at')) + self.assertIsNotNone(response.pop('updated_at')) + self.assertEqual(api_pool, response) + mock_provider.side_effect = exceptions.ProviderDriverError( + prov='bad_driver', user_msg='broken') + self.delete(self.POOL_PATH.format(pool_id=api_pool.get('id')), + status=500) + + def test_create_with_session_persistence(self): + sp = {"type": constants.SESSION_PERSISTENCE_APP_COOKIE, + "cookie_name": "test_cookie_name"} + optionals = {"listener_id": self.listener_id, + "session_persistence": sp} + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + **optionals).get(self.root_tag) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_CREATE, + pool_op_status=constants.OFFLINE) + self.set_lb_status(self.lb_id) + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + sess_p = response.get('session_persistence') + self.assertIsNotNone(sess_p) + self.assertEqual(constants.SESSION_PERSISTENCE_APP_COOKIE, + sess_p.get('type')) + self.assertEqual('test_cookie_name', sess_p.get('cookie_name')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id')) + + def test_create_with_bad_session_persistence(self): + sp = {"type": "persistence_type", + "cookie_name": "test_cookie_name"} + lb_pool = { + 'loadbalancer_id': self.lb_id, + 'listener_id': self.listener_id, + 'protocol': constants.PROTOCOL_HTTP, + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'session_persistence': sp} + self.post(self.POOLS_PATH, self._build_body(lb_pool), status=400) + + def test_create_with_bad_SP_type_HTTP_cookie(self): + sp = {"type": constants.SESSION_PERSISTENCE_HTTP_COOKIE, + "cookie_name": "test_cookie_name"} + lb_pool = { + 'loadbalancer_id': self.lb_id, + 'listener_id': self.listener_id, + 'protocol': constants.PROTOCOL_HTTP, + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'session_persistence': sp} + self.post(self.POOLS_PATH, self._build_body(lb_pool), status=400) + + def test_create_with_bad_SP_type_IP_cookie(self): + sp = {"type": constants.SESSION_PERSISTENCE_SOURCE_IP, + "cookie_name": "test_cookie_name"} + lb_pool = { + 'loadbalancer_id': self.lb_id, + 'listener_id': self.listener_id, + 'protocol': constants.PROTOCOL_HTTP, + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'session_persistence': sp} + self.post(self.POOLS_PATH, self._build_body(lb_pool), status=400) + + def test_create_with_bad_SP_cookie_name(self): + sp = {"type": constants.SESSION_PERSISTENCE_APP_COOKIE, + "cookie_name": "b@d_cookie_name"} + lb_pool = { + 'loadbalancer_id': self.lb_id, + 'listener_id': self.listener_id, + 'protocol': constants.PROTOCOL_HTTP, + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'session_persistence': sp} + self.post(self.POOLS_PATH, self._build_body(lb_pool), status=400) + + def test_create_with_missing_cookie_name(self): + sp = {"type": constants.SESSION_PERSISTENCE_APP_COOKIE} + lb_pool = { + 'loadbalancer_id': self.lb_id, + 'listener_id': self.listener_id, + 'protocol': constants.PROTOCOL_HTTP, + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'session_persistence': sp} + self.post(self.POOLS_PATH, self._build_body(lb_pool), status=400) + + def test_add_session_persistence(self): + sp = {"type": constants.SESSION_PERSISTENCE_APP_COOKIE, + "cookie_name": "test_cookie_name", + 'persistence_granularity': None, + 'persistence_timeout': None} + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + new_pool = {'session_persistence': sp} + self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool)) + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + self.assertEqual(sp, response.get('session_persistence')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE) + + def test_update_session_persistence(self): + sp = {"type": constants.SESSION_PERSISTENCE_APP_COOKIE, + "cookie_name": "test_cookie_name"} + optionals = {"listener_id": self.listener_id, + "session_persistence": sp} + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + **optionals).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + sess_p = response.get('session_persistence') + sess_p['cookie_name'] = None + sess_p['type'] = constants.SESSION_PERSISTENCE_SOURCE_IP + new_pool = {'session_persistence': sess_p} + self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool)) + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + self.assertEqual(sess_p, response.get('session_persistence')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE) + + def test_update_preserve_session_persistence(self): + sp = {"type": constants.SESSION_PERSISTENCE_APP_COOKIE, + "cookie_name": "test_cookie_name", + 'persistence_granularity': None, + 'persistence_timeout': None} + optionals = {"listener_id": self.listener_id, + "name": "name", "session_persistence": sp} + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + **optionals).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + new_pool = {'name': 'update_name'} + self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool)) + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + self.assertEqual(sp, response.get('session_persistence')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE) + + def test_update_bad_session_persistence(self): + sp = {"type": constants.SESSION_PERSISTENCE_APP_COOKIE, + "cookie_name": "test_cookie_name"} + optionals = {"listener_id": self.listener_id, + "session_persistence": sp} + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + **optionals).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + sess_p = response.get('session_persistence') + sess_p['type'] = 'fake_type' + new_pool = {'session_persistence': sess_p} + self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool), status=400) + + def test_update_with_bad_SP_type_HTTP_cookie(self): + sp = {"type": constants.SESSION_PERSISTENCE_SOURCE_IP} + optionals = {"listener_id": self.listener_id, + "session_persistence": sp} + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + **optionals).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + sess_p = response.get('session_persistence') + sess_p['type'] = constants.SESSION_PERSISTENCE_HTTP_COOKIE + sess_p['cookie_name'] = 'test_cookie_name' + new_pool = {'session_persistence': sess_p} + self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool), status=400) + + def test_update_with_bad_SP_type_IP_cookie(self): + sp = {"type": constants.SESSION_PERSISTENCE_HTTP_COOKIE} + optionals = {"listener_id": self.listener_id, + "session_persistence": sp} + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + **optionals).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + sess_p = response.get('session_persistence') + sess_p['type'] = constants.SESSION_PERSISTENCE_SOURCE_IP + sess_p['cookie_name'] = 'test_cookie_name' + new_pool = {'session_persistence': sess_p} + self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool), status=400) + + def test_update_with_bad_SP_cookie_name(self): + sp = {"type": constants.SESSION_PERSISTENCE_SOURCE_IP} + optionals = {"listener_id": self.listener_id, + "session_persistence": sp} + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + **optionals).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + sess_p = response.get('session_persistence') + sess_p['type'] = constants.SESSION_PERSISTENCE_APP_COOKIE + sess_p['cookie_name'] = 'b@d_cookie_name' + new_pool = {'session_persistence': sess_p} + self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool), status=400) + + def test_update_with_missing_SP_cookie_name(self): + sp = {"type": constants.SESSION_PERSISTENCE_SOURCE_IP} + optionals = {"listener_id": self.listener_id, + "session_persistence": sp} + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + **optionals).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + sess_p = response.get('session_persistence') + sess_p['type'] = constants.SESSION_PERSISTENCE_APP_COOKIE + new_pool = {'session_persistence': sess_p} + self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool), status=400) + + def test_delete_with_session_persistence(self): + sp = {"type": constants.SESSION_PERSISTENCE_APP_COOKIE, + "cookie_name": "test_cookie_name"} + optionals = {"listener_id": self.listener_id, + "session_persistence": sp} + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + **optionals).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + self.delete(self.POOL_PATH.format(pool_id=api_pool.get('id'))) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_DELETE) + + def test_delete_session_persistence(self): + sp = {"type": constants.SESSION_PERSISTENCE_APP_COOKIE, + "cookie_name": "test_cookie_name"} + optionals = {"listener_id": self.listener_id, + "session_persistence": sp} + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + **optionals).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + new_sp = {"pool": {"session_persistence": None}} + response = self.put(self.POOL_PATH.format( + pool_id=api_pool.get('id')), new_sp).json.get(self.root_tag) + self.assertIsNone(response.get('session_persistence')) + self.assert_correct_status( + lb_id=self.lb_id, listener_id=self.listener_id, + pool_id=api_pool.get('id'), + lb_prov_status=constants.PENDING_UPDATE, + listener_prov_status=constants.PENDING_UPDATE, + pool_prov_status=constants.PENDING_UPDATE) + + def test_create_when_lb_pending_update(self): + self.put(self.LB_PATH.format(lb_id=self.lb_id), + {'loadbalancer': {'name': 'test_name_change'}}) + lb_pool = { + 'loadbalancer_id': self.lb_id, + 'listener_id': self.listener_id, + 'protocol': constants.PROTOCOL_HTTP, + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'project_id': self.project_id} + self.post(self.POOLS_PATH, self._build_body(lb_pool), status=409) + + def test_update_when_lb_pending_update(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + self.set_lb_status(self.lb_id) + self.put(self.LB_PATH.format(lb_id=self.lb_id), + {'loadbalancer': {'name': 'test_name_change'}}) + new_pool = {'admin_state_up': False} + self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool), status=409) + + def test_delete_when_lb_pending_update(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + self.set_lb_status(self.lb_id) + self.put(self.LB_PATH.format(lb_id=self.lb_id), + {"loadbalancer": {'name': 'test_name_change'}}) + self.delete(self.POOL_PATH.format(pool_id=api_pool.get('id')), + status=409) + + def test_create_when_lb_pending_delete(self): + self.delete(self.LB_PATH.format(lb_id=self.lb_id), + params={'cascade': "true"}) + new_pool = { + 'loadbalancer_id': self.lb_id, + 'listener_id': self.listener_id, + 'protocol': constants.PROTOCOL_HTTP, + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'project_id': self.project_id} + self.post(self.POOLS_PATH, self._build_body(new_pool), status=409) + + def test_update_when_lb_pending_delete(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + self.set_lb_status(self.lb_id) + self.delete(self.LB_PATH.format(lb_id=self.lb_id), + params={'cascade': "true"}) + new_pool = {'admin_state_up': False} + self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool), status=409) + + def test_delete_when_lb_pending_delete(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + self.set_lb_status(self.lb_id) + self.delete(self.LB_PATH.format(lb_id=self.lb_id), + params={'cascade': "true"}) + self.delete(self.POOL_PATH.format(pool_id=api_pool.get('id')), + status=409) + + def test_update_already_deleted(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + # This updates the child objects + self.set_lb_status(self.lb_id, status=constants.DELETED) + new_pool = {'admin_state_up': False} + self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool), status=404) + + def test_delete_already_deleted(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + # This updates the child objects + self.set_lb_status(self.lb_id, status=constants.DELETED) + self.delete(self.POOL_PATH.format(pool_id=api_pool.get('id')), + status=404) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_valid_listener_pool_protocol(self, mock_cert_data): + cert = data_models.TLSContainer(certificate='cert') + lb_pool = { + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'project_id': self.project_id} + mock_cert_data.return_value = {'sni_certs': [cert]} + valid_map = constants.VALID_LISTENER_POOL_PROTOCOL_MAP + port = 1 + for listener_proto in valid_map: + for pool_proto in valid_map[listener_proto]: + port = port + 1 + opts = {} + if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: + opts['sni_container_refs'] = [uuidutils.generate_uuid()] + listener = self.create_listener( + listener_proto, port, self.lb_id, **opts).get('listener') + self.set_object_status(self.lb_repo, self.lb_id) + if listener['default_pool_id'] is None: + lb_pool['protocol'] = pool_proto + lb_pool['listener_id'] = listener.get('id') + self.post(self.POOLS_PATH, self._build_body(lb_pool), + status=201) + self.set_object_status(self.lb_repo, self.lb_id) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_invalid_listener_pool_protocol_map(self, mock_cert_data): + cert = data_models.TLSContainer(certificate='cert') + lb_pool = { + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'project_id': self.project_id} + mock_cert_data.return_value = {'sni_certs': [cert]} + invalid_map = c_const.INVALID_LISTENER_POOL_PROTOCOL_MAP + port = 1 + for listener_proto in invalid_map: + opts = {} + if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: + opts['sni_container_refs'] = [uuidutils.generate_uuid()] + listener = self.create_listener( + listener_proto, port, self.lb_id, **opts).get('listener') + self.set_object_status(self.lb_repo, self.lb_id) + port = port + 1 + for pool_proto in invalid_map[listener_proto]: + expect_error_msg = ("Validation failure: The pool protocol " + "'%s' is invalid while the listener " + "protocol is '%s'.") % (pool_proto, + listener_proto) + if listener['default_pool_id'] is None: + lb_pool['protocol'] = pool_proto + lb_pool['listener_id'] = listener.get('id') + res = self.post(self.POOLS_PATH, self._build_body(lb_pool), + status=400, expect_errors=True) + if pool_proto == constants.PROTOCOL_TERMINATED_HTTPS: + self.assertIn('Invalid input', + res.json['faultstring']) + else: + self.assertEqual(expect_error_msg, + res.json['faultstring']) + self.assert_correct_status(lb_id=self.lb_id) + + def test_create_with_alpn(self): + alpn_protocols = [lib_constants.ALPN_PROTOCOL_HTTP_2, + lib_constants.ALPN_PROTOCOL_HTTP_1_1] + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id, + tls_enabled=True, + alpn_protocols=alpn_protocols).get(self.root_tag) + self.assertEqual(alpn_protocols, api_pool['alpn_protocols']) + + def test_create_with_alpn_negative(self): + req_dict = {'protocol': constants.PROTOCOL_HTTP, + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'listener_id': self.listener_id, + 'tls_enabled': True, + 'alpn_protocols': [lib_constants.ALPN_PROTOCOL_HTTP_1_1, + 'invalid-proto']} + res = self.post(self.POOLS_PATH, self._build_body(req_dict), + status=400) + fault = res.json['faultstring'] + self.assertIn( + 'Invalid input for field/attribute alpn_protocols', fault) + self.assertIn('Value should be a valid ALPN protocol ID', fault) + self.assert_correct_status(lb_id=self.lb_id) + + def test_update_with_alpn(self): + alpn_protocols_orig = [lib_constants.ALPN_PROTOCOL_HTTP_1_0] + alpn_protocols = [lib_constants.ALPN_PROTOCOL_HTTP_2, + lib_constants.ALPN_PROTOCOL_HTTP_1_1] + pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id, + tls_enabled=True, + alpn_protocols=alpn_protocols_orig) + self.set_lb_status(self.lb_id) + pool_path = self.POOL_PATH.format(pool_id=pool['pool']['id']) + get_pool = self.get(pool_path).json['pool'] + self.assertEqual(alpn_protocols_orig, get_pool.get('alpn_protocols')) + self.put(pool_path, + self._build_body({'alpn_protocols': alpn_protocols})) + get_pool = self.get(pool_path).json['pool'] + self.assertEqual(alpn_protocols, get_pool.get('alpn_protocols')) + + def test_update_with_alpn_negative(self): + alpn_protocols_orig = [lib_constants.ALPN_PROTOCOL_HTTP_1_0] + pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id, + tls_enabled=True, + alpn_protocols=alpn_protocols_orig) + self.set_lb_status(self.lb_id) + pool_path = self.POOL_PATH.format(pool_id=pool['pool']['id']) + get_pool = self.get(pool_path).json['pool'] + self.assertEqual(alpn_protocols_orig, get_pool.get('alpn_protocols')) + + req_dict = {'alpn_protocols': [ + lib_constants.ALPN_PROTOCOL_HTTP_1_1, 'invalid-proto']} + res = self.put(self.POOLS_PATH, self._build_body(req_dict), status=400) + fault = res.json['faultstring'] + self.assertIn( + 'Invalid input for field/attribute alpn_protocols', fault) + self.assertIn('Value should be a valid ALPN protocol ID', fault) + self.assert_correct_status(lb_id=self.lb_id) + + @mock.patch("octavia.api.drivers.noop_driver.driver.NoopManager." + "pool_update") + def test_update_with_exception_in_provider_driver(self, pool_update_mock): + pool_update_mock.side_effect = Exception("Provider error") + + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + + new_pool = {'name': 'foo'} + self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(new_pool), status=500) + + lb = self.get(self.LB_PATH.format(lb_id=self.lb_id)).json.get( + "loadbalancer") + self.assertEqual(lb[constants.PROVISIONING_STATUS], + constants.ACTIVE) diff --git a/octavia/tests/functional/api/v2/test_provider.py b/octavia/tests/functional/api/v2/test_provider.py new file mode 100644 index 0000000000..1970e7d9ff --- /dev/null +++ b/octavia/tests/functional/api/v2/test_provider.py @@ -0,0 +1,319 @@ +# Copyright 2018 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from unittest import mock + +from octavia_lib.api.drivers import exceptions as lib_exceptions +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils + +from octavia.common import constants +from octavia.tests.functional.api.v2 import base + + +class TestProvider(base.BaseAPITest): + + root_tag_list = 'providers' + + def setUp(self): + super().setUp() + + def test_get_all_providers(self): + octavia_dict = {'description': 'Octavia driver.', + 'name': 'octavia'} + amphora_dict = {'description': 'Amp driver.', 'name': 'amphora'} + noop_dict = {'description': 'NoOp driver.', 'name': 'noop_driver'} + providers = self.get(self.PROVIDERS_PATH).json.get(self.root_tag_list) + self.assertEqual(4, len(providers)) + self.assertIn(octavia_dict, providers) + self.assertIn(amphora_dict, providers) + self.assertIn(noop_dict, providers) + + def test_get_all_providers_fields(self): + octavia_dict = {'name': 'octavia'} + amphora_dict = {'name': 'amphora'} + noop_dict = {'name': 'noop_driver'} + providers = self.get(self.PROVIDERS_PATH, params={'fields': ['name']}) + providers_list = providers.json.get(self.root_tag_list) + self.assertEqual(4, len(providers_list)) + self.assertIn(octavia_dict, providers_list) + self.assertIn(amphora_dict, providers_list) + self.assertIn(noop_dict, providers_list) + + +class TestFlavorCapabilities(base.BaseAPITest): + + root_tag = 'flavor_capabilities' + + def setUp(self): + super().setUp() + + def test_nonexistent_provider(self): + self.get(self.FLAVOR_CAPABILITIES_PATH.format(provider='bogus'), + status=400) + + def test_noop_provider(self): + ref_capabilities = [{'description': 'The glance image tag to use for ' + 'this load balancer.', 'name': 'amp_image_tag'}] + + result = self.get( + self.FLAVOR_CAPABILITIES_PATH.format(provider='noop_driver')) + self.assertEqual(ref_capabilities, result.json.get(self.root_tag)) + + def test_amphora_driver(self): + ref_description = ("The load balancer topology. One of: SINGLE - One " + "amphora per load balancer. ACTIVE_STANDBY - Two " + "amphora per load balancer.") + result = self.get( + self.FLAVOR_CAPABILITIES_PATH.format(provider='amphora')) + capabilities = result.json.get(self.root_tag) + capability_dict = [i for i in capabilities if + i['name'] == 'loadbalancer_topology'][0] + self.assertEqual(ref_description, + capability_dict['description']) + + # Some drivers might not have implemented this yet, test that case + @mock.patch('octavia.api.drivers.noop_driver.driver.NoopProviderDriver.' + 'get_supported_flavor_metadata') + def test_not_implemented(self, mock_get_metadata): + mock_get_metadata.side_effect = lib_exceptions.NotImplementedError() + self.get(self.FLAVOR_CAPABILITIES_PATH.format(provider='noop_driver'), + status=501) + + def test_authorized(self): + ref_capabilities = [{'description': 'The glance image tag to use ' + 'for this load balancer.', + 'name': 'amp_image_tag'}] + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + project_id = uuidutils.generate_uuid() + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': True, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + result = self.get(self.FLAVOR_CAPABILITIES_PATH.format( + provider='noop_driver')) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(ref_capabilities, result.json.get(self.root_tag)) + + def test_not_authorized(self): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + self.get(self.FLAVOR_CAPABILITIES_PATH.format(provider='noop_driver'), + status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + def test_amphora_driver_one_filter(self): + ref_description = ("The compute driver flavor ID.") + result = self.get( + self.FLAVOR_CAPABILITIES_PATH.format(provider=constants.AMPHORA), + params={constants.NAME: 'compute_flavor'}) + capabilities = result.json.get(self.root_tag) + self.assertEqual(1, len(capabilities)) + self.assertEqual(2, len(capabilities[0])) + self.assertEqual(ref_description, + capabilities[0][constants.DESCRIPTION]) + + def test_amphora_driver_two_filters(self): + ref_description = ("The compute driver flavor ID.") + result = self.get( + self.FLAVOR_CAPABILITIES_PATH.format(provider=constants.AMPHORA), + params={constants.NAME: 'compute_flavor', + constants.DESCRIPTION: ref_description}) + capabilities = result.json.get(self.root_tag) + self.assertEqual(1, len(capabilities)) + self.assertEqual(ref_description, + capabilities[0][constants.DESCRIPTION]) + + def test_amphora_driver_filter_no_match(self): + result = self.get( + self.FLAVOR_CAPABILITIES_PATH.format(provider=constants.AMPHORA), + params={constants.NAME: 'bogus'}) + capabilities = result.json.get(self.root_tag) + self.assertEqual([], capabilities) + + def test_amphora_driver_one_filter_one_field(self): + result = self.get( + self.FLAVOR_CAPABILITIES_PATH.format(provider=constants.AMPHORA), + params={constants.NAME: 'compute_flavor', + constants.FIELDS: constants.NAME}) + capabilities = result.json.get(self.root_tag) + self.assertEqual(1, len(capabilities)) + self.assertEqual(1, len(capabilities[0])) + self.assertEqual('compute_flavor', capabilities[0][constants.NAME]) + + +class TestAvailabilityZoneCapabilities(base.BaseAPITest): + + root_tag = 'availability_zone_capabilities' + + def setUp(self): + super().setUp() + + def test_nonexistent_provider(self): + self.get(self.AVAILABILITY_ZONE_CAPABILITIES_PATH.format( + provider='bogus'), status=400) + + def test_noop_provider(self): + ref_capabilities = [ + {'description': 'The compute availability zone to ' + 'use for this loadbalancer.', + 'name': constants.COMPUTE_ZONE}, + {'description': 'The volume availability zone to ' + 'use for this loadbalancer.', + 'name': constants.VOLUME_ZONE}, + ] + + result = self.get( + self.AVAILABILITY_ZONE_CAPABILITIES_PATH.format( + provider='noop_driver')) + self.assertEqual(ref_capabilities, result.json.get(self.root_tag)) + + def test_amphora_driver(self): + ref_description1 = 'The compute availability zone.' + ref_description2 = 'The management network ID for the amphora.' + result = self.get( + self.AVAILABILITY_ZONE_CAPABILITIES_PATH.format( + provider='amphora')) + capabilities = result.json.get(self.root_tag) + capability_dict = [i for i in capabilities if + i['name'] == constants.COMPUTE_ZONE][0] + self.assertEqual(ref_description1, + capability_dict['description']) + capability_dict = [i for i in capabilities if + i['name'] == constants.MANAGEMENT_NETWORK][0] + self.assertEqual(ref_description2, + capability_dict['description']) + + # Some drivers might not have implemented this yet, test that case + @mock.patch('octavia.api.drivers.noop_driver.driver.NoopProviderDriver.' + 'get_supported_availability_zone_metadata') + def test_not_implemented(self, mock_get_metadata): + mock_get_metadata.side_effect = lib_exceptions.NotImplementedError() + self.get(self.AVAILABILITY_ZONE_CAPABILITIES_PATH.format( + provider='noop_driver'), status=501) + + def test_authorized(self): + ref_capabilities = [ + {'description': 'The compute availability zone to ' + 'use for this loadbalancer.', + 'name': constants.COMPUTE_ZONE}, + {'description': 'The volume availability zone to ' + 'use for this loadbalancer.', + 'name': constants.VOLUME_ZONE}, + ] + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + project_id = uuidutils.generate_uuid() + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': True, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + result = self.get( + self.AVAILABILITY_ZONE_CAPABILITIES_PATH.format( + provider='noop_driver')) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(ref_capabilities, result.json.get(self.root_tag)) + + def test_not_authorized(self): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + self.get(self.AVAILABILITY_ZONE_CAPABILITIES_PATH.format( + provider='noop_driver'), status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + def test_amphora_driver_one_filter(self): + ref_description = 'The compute availability zone.' + result = self.get( + self.AVAILABILITY_ZONE_CAPABILITIES_PATH.format( + provider=constants.AMPHORA), + params={constants.NAME: constants.COMPUTE_ZONE}) + capabilities = result.json.get(self.root_tag) + self.assertEqual(1, len(capabilities)) + self.assertEqual(2, len(capabilities[0])) + self.assertEqual(ref_description, + capabilities[0][constants.DESCRIPTION]) + + ref_description = 'The management network ID for the amphora.' + result = self.get( + self.AVAILABILITY_ZONE_CAPABILITIES_PATH.format( + provider=constants.AMPHORA), + params={constants.NAME: constants.MANAGEMENT_NETWORK}) + capabilities = result.json.get(self.root_tag) + self.assertEqual(1, len(capabilities)) + self.assertEqual(2, len(capabilities[0])) + self.assertEqual(ref_description, + capabilities[0][constants.DESCRIPTION]) + + def test_amphora_driver_two_filters(self): + ref_description = 'The compute availability zone.' + result = self.get( + self.AVAILABILITY_ZONE_CAPABILITIES_PATH.format( + provider=constants.AMPHORA), + params={constants.NAME: constants.COMPUTE_ZONE, + constants.DESCRIPTION: ref_description}) + capabilities = result.json.get(self.root_tag) + self.assertEqual(1, len(capabilities)) + self.assertEqual(ref_description, + capabilities[0][constants.DESCRIPTION]) + + def test_amphora_driver_filter_no_match(self): + result = self.get( + self.AVAILABILITY_ZONE_CAPABILITIES_PATH.format( + provider=constants.AMPHORA), + params={constants.NAME: 'bogus'}) + capabilities = result.json.get(self.root_tag) + self.assertEqual([], capabilities) + + def test_amphora_driver_one_filter_one_field(self): + result = self.get( + self.AVAILABILITY_ZONE_CAPABILITIES_PATH.format( + provider=constants.AMPHORA), + params={constants.NAME: constants.COMPUTE_ZONE, + constants.FIELDS: constants.NAME}) + capabilities = result.json.get(self.root_tag) + self.assertEqual(1, len(capabilities)) + self.assertEqual(1, len(capabilities[0])) + self.assertEqual(constants.COMPUTE_ZONE, + capabilities[0][constants.NAME]) diff --git a/octavia/tests/functional/api/v2/test_quotas.py b/octavia/tests/functional/api/v2/test_quotas.py new file mode 100644 index 0000000000..84af2295ca --- /dev/null +++ b/octavia/tests/functional/api/v2/test_quotas.py @@ -0,0 +1,931 @@ +# Copyright 2016 Rackspace +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import random +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils + +from octavia.common import constants +import octavia.common.context +from octavia.tests.functional.api.v2 import base + +CONF = cfg.CONF + + +class TestQuotas(base.BaseAPITest): + + root_tag = 'quota' + root_tag_list = 'quotas' + root_tag_links = 'quotas_links' + + def setUp(self): + super().setUp() + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config( + group="quotas", + default_load_balancer_quota=random.randrange( + constants.QUOTA_UNLIMITED, 9000)) + conf.config( + group="quotas", + default_listener_quota=random.randrange( + constants.QUOTA_UNLIMITED, 9000)) + conf.config( + group="quotas", + default_member_quota=random.randrange( + constants.QUOTA_UNLIMITED, 9000)) + # We need to make sure unlimited gets tested each pass + conf.config(group="quotas", + default_pool_quota=constants.QUOTA_UNLIMITED) + conf.config( + group="quotas", + default_health_monitor_quota=random.randrange( + constants.QUOTA_UNLIMITED, 9000)) + conf.config( + group="quotas", + default_l7policy_quota=random.randrange( + constants.QUOTA_UNLIMITED, 9000)) + conf.config( + group="quotas", + default_l7rule_quota=random.randrange( + constants.QUOTA_UNLIMITED, 9000)) + + self.project_id = uuidutils.generate_uuid() + + def _assert_quotas_equal(self, observed, expected=None): + if not expected: + expected = {'load_balancer': + CONF.quotas.default_load_balancer_quota, + 'listener': CONF.quotas.default_listener_quota, + 'pool': CONF.quotas.default_pool_quota, + 'health_monitor': + CONF.quotas.default_health_monitor_quota, + 'member': CONF.quotas.default_member_quota, + 'l7policy': CONF.quotas.default_l7policy_quota, + 'l7rule': CONF.quotas.default_l7rule_quota} + self.assertEqual(expected['load_balancer'], observed['load_balancer']) + self.assertEqual(expected['listener'], observed['listener']) + self.assertEqual(expected['pool'], observed['pool']) + self.assertEqual(expected['health_monitor'], + observed['health_monitor']) + self.assertEqual(expected['member'], observed['member']) + self.assertEqual(expected['l7policy'], observed['l7policy']) + self.assertEqual(expected['l7rule'], observed['l7rule']) + + def test_get_all_quotas_no_quotas(self): + response = self.get(self.QUOTAS_PATH) + quota_list = response.json + self.assertEqual({'quotas': [], 'quotas_links': []}, quota_list) + + def test_get_all_quotas_with_quotas(self): + project_id1 = uuidutils.generate_uuid() + project_id2 = uuidutils.generate_uuid() + quota_path1 = self.QUOTA_PATH.format(project_id=project_id1) + quota1 = {'load_balancer': constants.QUOTA_UNLIMITED, 'listener': 30, + 'pool': 30, 'health_monitor': 30, 'member': 30, + 'l7policy': 30, 'l7rule': 30} + body1 = {'quota': quota1} + self.put(quota_path1, body1, status=202) + quota_path2 = self.QUOTA_PATH.format(project_id=project_id2) + quota2 = {'load_balancer': 50, 'listener': 50, 'pool': 50, + 'health_monitor': 50, 'member': 50, 'l7policy': 50, + 'l7rule': 50} + body2 = {'quota': quota2} + self.put(quota_path2, body2, status=202) + + response = self.get(self.QUOTAS_PATH) + quota_list = response.json + + quota1['project_id'] = quota1['tenant_id'] = project_id1 + quota2['project_id'] = quota2['tenant_id'] = project_id2 + # Expected deprecated names until T + quota1['healthmonitor'] = quota1['health_monitor'] + quota1['loadbalancer'] = quota1['load_balancer'] + quota2['healthmonitor'] = quota2['health_monitor'] + quota2['loadbalancer'] = quota2['load_balancer'] + + expected = {'quotas': [quota1, quota2], 'quotas_links': []} + self.assertEqual(expected, quota_list) + + def test_deprecated_get_and_put_vars(self): + project_id1 = uuidutils.generate_uuid() + project_id2 = uuidutils.generate_uuid() + quota_path1 = self.QUOTA_PATH.format(project_id=project_id1) + quota1 = {'load_balancer': constants.QUOTA_UNLIMITED, 'listener': 30, + 'pool': 30, 'health_monitor': 30, 'member': 30, + 'l7policy': 30, 'l7rule': 30} + body1 = {'quota': quota1} + self.put(quota_path1, body1, status=202) + quota_path2 = self.QUOTA_PATH.format(project_id=project_id2) + quota2 = {'loadbalancer': 50, 'listener': 50, 'pool': 50, + 'healthmonitor': 50, 'member': 50, 'l7policy': 50, + 'l7rule': 50} + body2 = {'quota': quota2} + self.put(quota_path2, body2, status=202) + + response = self.get(self.QUOTAS_PATH) + quota_list = response.json + + quota1['project_id'] = quota1['tenant_id'] = project_id1 + quota2['project_id'] = quota2['tenant_id'] = project_id2 + # Expected deprecated names until T + quota1['healthmonitor'] = quota1['health_monitor'] + quota1['loadbalancer'] = quota1['load_balancer'] + quota2['health_monitor'] = quota2['healthmonitor'] + quota2['load_balancer'] = quota2['loadbalancer'] + + expected = {'quotas': [quota1, quota2], 'quotas_links': []} + self.assertEqual(expected, quota_list) + + def test_get_all_not_Authorized(self): + project_id1 = uuidutils.generate_uuid() + project_id2 = uuidutils.generate_uuid() + quota_path1 = self.QUOTA_PATH.format(project_id=project_id1) + quota1 = {'load_balancer': constants.QUOTA_UNLIMITED, 'listener': 30, + 'pool': 30, 'health_monitor': 30, 'member': 30, + 'l7policy': 30, 'l7rule': 30} + body1 = {'quota': quota1} + self.put(quota_path1, body1, status=202) + quota_path2 = self.QUOTA_PATH.format(project_id=project_id2) + quota2 = {'load_balancer': 50, 'listener': 50, 'pool': 50, + 'health_monitor': 50, 'member': 50, 'l7policy': 50, + 'l7rule': 50} + body2 = {'quota': quota2} + self.put(quota_path2, body2, status=202) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer:bogus'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': uuidutils.generate_uuid()} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.get(self.QUOTAS_PATH, status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) + + def test_get_all_not_Authorized_no_role(self): + project_id1 = uuidutils.generate_uuid() + quota_path1 = self.QUOTA_PATH.format(project_id=project_id1) + quota1 = {'load_balancer': constants.QUOTA_UNLIMITED, 'listener': 30, + 'pool': 30, 'health_monitor': 30, 'member': 30, + 'l7policy': 30, 'l7rule': 30} + body1 = {'quota': quota1} + self.put(quota_path1, body1, status=202) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': [], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.get(self.QUOTAS_PATH, status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) + + def test_get_all_not_Authorized_bogus_role(self): + project_id1 = uuidutils.generate_uuid() + project_id2 = uuidutils.generate_uuid() + quota_path1 = self.QUOTA_PATH.format(project_id=project_id1) + quota1 = {'load_balancer': constants.QUOTA_UNLIMITED, 'listener': 30, + 'pool': 30, 'health_monitor': 30, 'member': 30, + 'l7policy': 30, 'l7rule': 30} + body1 = {'quota': quota1} + self.put(quota_path1, body1, status=202) + quota_path2 = self.QUOTA_PATH.format(project_id=project_id2) + quota2 = {'load_balancer': 50, 'listener': 50, 'pool': 50, + 'health_monitor': 50, 'member': 50, 'l7policy': 50, + 'l7rule': 50} + body2 = {'quota': quota2} + self.put(quota_path2, body2, status=202) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_bogus'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.get(self.QUOTAS_PATH, status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) + + def test_get_all_admin(self): + project_id1 = uuidutils.generate_uuid() + project_id2 = uuidutils.generate_uuid() + project_id3 = uuidutils.generate_uuid() + quota1 = self.create_quota( + project_id=project_id1, lb_quota=1, member_quota=1 + ).get(self.root_tag) + quota2 = self.create_quota( + project_id=project_id2, lb_quota=2, member_quota=2 + ).get(self.root_tag) + quota3 = self.create_quota( + project_id=project_id3, lb_quota=3, member_quota=3 + ).get(self.root_tag) + quotas = self.get(self.QUOTAS_PATH).json.get(self.root_tag_list) + self.assertEqual(3, len(quotas)) + quota_lb_member_quotas = [(lb.get('load_balancer'), lb.get('member')) + for lb in quotas] + self.assertIn((quota1.get('load_balancer'), quota1.get('member')), + quota_lb_member_quotas) + self.assertIn((quota2.get('load_balancer'), quota2.get('member')), + quota_lb_member_quotas) + self.assertIn((quota3.get('load_balancer'), quota3.get('member')), + quota_lb_member_quotas) + + def test_get_all_non_admin_global_observer(self): + project_id1 = uuidutils.generate_uuid() + project_id2 = uuidutils.generate_uuid() + project_id3 = uuidutils.generate_uuid() + quota1 = self.create_quota( + project_id=project_id1, lb_quota=1, member_quota=1 + ).get(self.root_tag) + quota2 = self.create_quota( + project_id=project_id2, lb_quota=2, member_quota=2 + ).get(self.root_tag) + quota3 = self.create_quota( + project_id=project_id3, lb_quota=3, member_quota=3 + ).get(self.root_tag) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['admin'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + quotas = self.get(self.QUOTAS_PATH) + quotas = quotas.json.get(self.root_tag_list) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(3, len(quotas)) + quota_lb_member_quotas = [(lb.get('load_balancer'), lb.get('member')) + for lb in quotas] + self.assertIn((quota1.get('load_balancer'), quota1.get('member')), + quota_lb_member_quotas) + self.assertIn((quota2.get('load_balancer'), quota2.get('member')), + quota_lb_member_quotas) + self.assertIn((quota3.get('load_balancer'), quota3.get('member')), + quota_lb_member_quotas) + + def test_get_all_quota_admin(self): + project_id1 = uuidutils.generate_uuid() + project_id2 = uuidutils.generate_uuid() + project_id3 = uuidutils.generate_uuid() + quota1 = self.create_quota( + project_id=project_id1, lb_quota=1, member_quota=1 + ).get(self.root_tag) + quota2 = self.create_quota( + project_id=project_id2, lb_quota=2, member_quota=2 + ).get(self.root_tag) + quota3 = self.create_quota( + project_id=project_id3, lb_quota=3, member_quota=3 + ).get(self.root_tag) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['admin'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + quotas = self.get(self.QUOTAS_PATH) + quotas = quotas.json.get(self.root_tag_list) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(3, len(quotas)) + quota_lb_member_quotas = [(lb.get('load_balancer'), lb.get('member')) + for lb in quotas] + self.assertIn((quota1.get('load_balancer'), quota1.get('member')), + quota_lb_member_quotas) + self.assertIn((quota2.get('load_balancer'), quota2.get('member')), + quota_lb_member_quotas) + self.assertIn((quota3.get('load_balancer'), quota3.get('member')), + quota_lb_member_quotas) + + def test_get_all_non_admin(self): + project1_id = uuidutils.generate_uuid() + project2_id = uuidutils.generate_uuid() + project3_id = uuidutils.generate_uuid() + self.create_quota( + project_id=project1_id, lb_quota=1, member_quota=1 + ).get(self.root_tag) + self.create_quota( + project_id=project2_id, lb_quota=2, member_quota=2 + ).get(self.root_tag) + quota3 = self.create_quota( + project_id=project3_id, lb_quota=3, member_quota=3 + ).get(self.root_tag) + + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', + auth_strategy=constants.KEYSTONE) + test_context = octavia.common.context.RequestContext( + project_id=project3_id) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project3_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + quotas = self.get(self.QUOTAS_PATH) + quotas = quotas.json.get(self.root_tag_list) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + self.assertEqual(1, len(quotas)) + quota_lb_member_quotas = [(lb.get('load_balancer'), lb.get('member')) + for lb in quotas] + self.assertIn((quota3.get('load_balancer'), quota3.get('member')), + quota_lb_member_quotas) + + def test_get_all_non_admin_observer(self): + project1_id = uuidutils.generate_uuid() + project2_id = uuidutils.generate_uuid() + project3_id = uuidutils.generate_uuid() + self.create_quota( + project_id=project1_id, lb_quota=1, member_quota=1 + ).get(self.root_tag) + self.create_quota( + project_id=project2_id, lb_quota=2, member_quota=2 + ).get(self.root_tag) + quota3 = self.create_quota( + project_id=project3_id, lb_quota=3, member_quota=3 + ).get(self.root_tag) + + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', + auth_strategy=constants.KEYSTONE) + test_context = octavia.common.context.RequestContext( + project_id=project3_id) + with mock.patch('oslo_context.context.RequestContext.from_environ', + return_value=test_context): + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_observer', 'reader'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project3_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + quotas = self.get(self.QUOTAS_PATH) + quotas = quotas.json.get(self.root_tag_list) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + self.assertEqual(1, len(quotas)) + quota_lb_member_quotas = [(lb.get('load_balancer'), lb.get('member')) + for lb in quotas] + self.assertIn((quota3.get('load_balancer'), quota3.get('member')), + quota_lb_member_quotas) + + def test_get_by_project_id(self): + project1_id = uuidutils.generate_uuid() + project2_id = uuidutils.generate_uuid() + quota1 = self.create_quota( + project_id=project1_id, lb_quota=1, member_quota=1 + ).get(self.root_tag) + quota2 = self.create_quota( + project_id=project2_id, lb_quota=2, member_quota=2 + ).get(self.root_tag) + + quotas = self.get( + self.QUOTA_PATH.format(project_id=project1_id) + ).json.get(self.root_tag) + self._assert_quotas_equal(quotas, quota1) + quotas = self.get( + self.QUOTA_PATH.format(project_id=project2_id) + ).json.get(self.root_tag) + self._assert_quotas_equal(quotas, quota2) + + def test_get_Authorized_member(self): + self._test_get_Authorized(['load-balancer_member', 'member']) + + def test_get_Authorized_observer(self): + self._test_get_Authorized(['load-balancer_observer', 'reader']) + + def test_get_Authorized_quota_admin(self): + self._test_get_Authorized(['admin']) + + def _test_get_Authorized(self, roles): + project1_id = uuidutils.generate_uuid() + quota1 = self.create_quota( + project_id=project1_id, lb_quota=1, member_quota=1 + ).get(self.root_tag) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': roles, + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project1_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + quotas = self.get( + self.QUOTA_PATH.format(project_id=project1_id) + ).json.get(self.root_tag) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self._assert_quotas_equal(quotas, quota1) + + def test_get_not_Authorized(self): + project1_id = uuidutils.generate_uuid() + self.create_quota( + project_id=project1_id, lb_quota=1, member_quota=1 + ).get(self.root_tag) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer:bogus'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': uuidutils.generate_uuid()} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + quotas = self.get(self.QUOTA_PATH.format(project_id=project1_id), + status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, quotas.json) + + def test_get_not_Authorized_bogus_role(self): + project1_id = uuidutils.generate_uuid() + self.create_quota( + project_id=project1_id, lb_quota=1, member_quota=1 + ).get(self.root_tag) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer:bogus'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project1_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + quotas = self.get( + self.QUOTA_PATH.format(project_id=project1_id), + status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, quotas.json) + + def test_get_not_Authorized_no_role(self): + project1_id = uuidutils.generate_uuid() + self.create_quota( + project_id=project1_id, lb_quota=1, member_quota=1 + ).get(self.root_tag) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': [], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': project1_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + quotas = self.get( + self.QUOTA_PATH.format(project_id=project1_id), + status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, quotas.json) + + def test_get_all_sorted(self): + project1_id = uuidutils.generate_uuid() + project2_id = uuidutils.generate_uuid() + project3_id = uuidutils.generate_uuid() + self.create_quota( + project_id=project1_id, lb_quota=3, member_quota=8 + ).get(self.root_tag) + self.create_quota( + project_id=project2_id, lb_quota=2, member_quota=10 + ).get(self.root_tag) + self.create_quota( + project_id=project3_id, lb_quota=1, member_quota=9 + ).get(self.root_tag) + response = self.get(self.QUOTAS_PATH, + params={'sort': 'load_balancer:desc'}) + quotas_desc = response.json.get(self.root_tag_list) + response = self.get(self.QUOTAS_PATH, + params={'sort': 'load_balancer:asc'}) + quotas_asc = response.json.get(self.root_tag_list) + + self.assertEqual(3, len(quotas_desc)) + self.assertEqual(3, len(quotas_asc)) + + quota_lb_member_desc = [(lb.get('load_balancer'), lb.get('member')) + for lb in quotas_desc] + quota_lb_member_asc = [(lb.get('load_balancer'), lb.get('member')) + for lb in quotas_asc] + self.assertEqual(quota_lb_member_asc, + list(reversed(quota_lb_member_desc))) + + def test_get_all_limited(self): + self.skipTest("No idea how this should work yet") + # TODO(rm_work): Figure out how to make this ... work + project1_id = uuidutils.generate_uuid() + project2_id = uuidutils.generate_uuid() + project3_id = uuidutils.generate_uuid() + self.create_quota( + project_id=project1_id, lb_quota=3, member_quota=8 + ).get(self.root_tag) + self.create_quota( + project_id=project2_id, lb_quota=2, member_quota=10 + ).get(self.root_tag) + self.create_quota( + project_id=project3_id, lb_quota=1, member_quota=9 + ).get(self.root_tag) + + # First two -- should have 'next' link + first_two = self.get(self.QUOTAS_PATH, params={'limit': 2}).json + objs = first_two[self.root_tag_list] + links = first_two[self.root_tag_links] + self.assertEqual(2, len(objs)) + self.assertEqual(1, len(links)) + self.assertEqual('next', links[0]['rel']) + + # Third + off the end -- should have previous link + third = self.get(self.QUOTAS_PATH, params={ + 'limit': 2, + 'marker': first_two[self.root_tag_list][1]['id']}).json + objs = third[self.root_tag_list] + links = third[self.root_tag_links] + self.assertEqual(1, len(objs)) + self.assertEqual(1, len(links)) + self.assertEqual('previous', links[0]['rel']) + + # Middle -- should have both links + middle = self.get(self.QUOTAS_PATH, params={ + 'limit': 1, + 'marker': first_two[self.root_tag_list][0]['id']}).json + objs = middle[self.root_tag_list] + links = middle[self.root_tag_links] + self.assertEqual(1, len(objs)) + self.assertEqual(2, len(links)) + self.assertCountEqual(['previous', 'next'], + [link['rel'] for link in links]) + + def test_get_default_quotas(self): + response = self.get(self.QUOTA_DEFAULT_PATH.format( + project_id=self.project_id)) + quota_dict = response.json + self._assert_quotas_equal(quota_dict['quota']) + + def test_get_default_quotas_Authorized(self): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member', 'member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.get(self.QUOTA_DEFAULT_PATH.format( + project_id=self.project_id)) + quota_dict = response.json + self._assert_quotas_equal(quota_dict['quota']) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + def test_get_default_quotas_not_Authorized(self): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer:bogus'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': uuidutils.generate_uuid()} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.get(self.QUOTA_DEFAULT_PATH.format( + project_id=self.project_id), status=403) + self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + + def test_custom_quotas(self): + quota_path = self.QUOTA_PATH.format(project_id=self.project_id) + body = {'quota': {'load_balancer': 30, 'listener': 30, 'pool': 30, + 'health_monitor': 30, 'member': 30, + 'l7policy': 30, 'l7rule': 30}} + self.put(quota_path, body, status=202) + response = self.get(quota_path) + quota_dict = response.json + self._assert_quotas_equal(quota_dict['quota'], expected=body['quota']) + + def test_custom_quotas_quota_admin(self): + quota_path = self.QUOTA_PATH.format(project_id=self.project_id) + body = {'quota': {'load_balancer': 30, 'listener': 30, 'pool': 30, + 'health_monitor': 30, 'member': 30, 'l7policy': 30, + 'l7rule': 30}} + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['admin'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + self.put(quota_path, body, status=202) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + response = self.get(quota_path) + quota_dict = response.json + self._assert_quotas_equal(quota_dict['quota'], expected=body['quota']) + + def test_custom_quotas_not_Authorized_member(self): + quota_path = self.QUOTA_PATH.format(project_id=self.project_id) + body = {'quota': {'load_balancer': 30, 'listener': 30, 'pool': 30, + 'health_monitor': 30, 'member': 30, 'l7policy': 30, + 'l7rule': 30}} + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + response = self.put(quota_path, body, status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, response.json) + + def test_custom_partial_quotas(self): + quota_path = self.QUOTA_PATH.format(project_id=self.project_id) + body = {'quota': {'load_balancer': 30, 'listener': None, 'pool': 30, + 'health_monitor': 30, 'member': 30, 'l7policy': 30, + 'l7rule': 30}} + expected_body = {'quota': { + 'load_balancer': 30, + 'listener': CONF.quotas.default_listener_quota, 'pool': 30, + 'health_monitor': 30, 'member': 30, 'l7policy': 30, 'l7rule': 30}} + self.put(quota_path, body, status=202) + response = self.get(quota_path) + quota_dict = response.json + self._assert_quotas_equal(quota_dict['quota'], + expected=expected_body['quota']) + + def test_custom_missing_quotas(self): + quota_path = self.QUOTA_PATH.format(project_id=self.project_id) + body = {'quota': {'load_balancer': 30, 'pool': 30, + 'health_monitor': 30, 'member': 30, + 'l7policy': 30, 'l7rule': 30}} + expected_body = {'quota': { + 'load_balancer': 30, + 'listener': CONF.quotas.default_listener_quota, 'pool': 30, + 'health_monitor': 30, 'member': 30, 'l7policy': 30, 'l7rule': 30}} + self.put(quota_path, body, status=202) + response = self.get(quota_path) + quota_dict = response.json + self._assert_quotas_equal(quota_dict['quota'], + expected=expected_body['quota']) + + def test_delete_custom_quotas(self): + quota_path = self.QUOTA_PATH.format(project_id=self.project_id) + body = {'quota': {'load_balancer': 30, 'listener': 30, 'pool': 30, + 'health_monitor': 30, 'member': 30, 'l7policy': 30, + 'l7rule': 30}} + self.put(quota_path, body, status=202) + response = self.get(quota_path) + quota_dict = response.json + self._assert_quotas_equal(quota_dict['quota'], expected=body['quota']) + self.delete(quota_path, status=202) + response = self.get(quota_path) + quota_dict = response.json + self._assert_quotas_equal(quota_dict['quota']) + + def test_delete_custom_quotas_admin(self): + quota_path = self.QUOTA_PATH.format(project_id=self.project_id) + body = {'quota': {'load_balancer': 30, 'listener': 30, 'pool': 30, + 'health_monitor': 30, 'member': 30, 'l7policy': 30, + 'l7rule': 30}} + self.put(quota_path, body, status=202) + response = self.get(quota_path) + quota_dict = response.json + self._assert_quotas_equal(quota_dict['quota'], expected=body['quota']) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['admin'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + self.delete(quota_path, status=202) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + response = self.get(quota_path) + quota_dict = response.json + self._assert_quotas_equal(quota_dict['quota']) + + def test_delete_quotas_not_Authorized_member(self): + quota_path = self.QUOTA_PATH.format(project_id=self.project_id) + body = {'quota': {'load_balancer': 30, 'listener': 30, 'pool': 30, + 'health_monitor': 30, 'member': 30, 'l7policy': 30, + 'l7rule': 30}} + self.put(quota_path, body, status=202) + response = self.get(quota_path) + quota_dict = response.json + self._assert_quotas_equal(quota_dict['quota'], expected=body['quota']) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': self.project_id} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + self.delete(quota_path, status=403) + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + response = self.get(quota_path) + quota_dict = response.json + self._assert_quotas_equal(quota_dict['quota'], expected=body['quota']) + + def test_delete_non_existent_custom_quotas(self): + quota_path = self.QUOTA_PATH.format(project_id='bogus') + self.delete(quota_path, status=404) diff --git a/octavia/tests/functional/db/__init__.py b/octavia/tests/functional/db/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/functional/db/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/functional/db/base.py b/octavia/tests/functional/db/base.py new file mode 100644 index 0000000000..ba36ae429a --- /dev/null +++ b/octavia/tests/functional/db/base.py @@ -0,0 +1,152 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +from oslo_config import fixture as oslo_fixture +from oslo_db.sqlalchemy import enginefacade +from oslotest import base as test_base + +from octavia.common import config +from octavia.common import constants +from octavia.db import api as db_api +from octavia.db import base_models +from octavia.db import models + +from octavia.tests import fixtures as oc_fixtures + + +class OctaviaDBTestBase(test_base.BaseTestCase): + + facade = None + + def setUp(self, connection_string='sqlite://'): + super().setUp() + + self.connection_string = connection_string + self.warning_fixture = self.useFixture(oc_fixtures.WarningsFixture()) + + # NOTE(blogan): doing this for now because using the engine and + # session set up in the fixture for test_base.DbTestCase does not work + # with the API functional tests. Need to investigate more if this + # becomes a problem + conf = self.useFixture(oslo_fixture.Config(config.cfg.CONF)) + conf.config(group="database", connection=connection_string) + + # Disable pool_timeout when using sqlite with a file + # pool_timeout is not support by sqlalchemy 2 with SQLite/NullPool + if connection_string.startswith('sqlite:///'): + conf.config(group="database", pool_timeout=None) + + engine, self.session = self._get_db_engine_session() + + base_models.BASE.metadata.create_all(engine) + + with self.session.begin(): + self._seed_lookup_tables(self.session) + + def clear_tables(): + """Unregister all data models.""" + base_models.BASE.metadata.drop_all(engine) + # If we created a file, clean it up too + if 'sqlite:///' in connection_string: + os.remove(connection_string.replace('sqlite:///', '')) + + self.addCleanup(clear_tables) + + def _get_db_engine_session(self): + # We need to get our own Facade so that the file backed sqlite tests + # don't use the _FACADE singleton. Some tests will use in-memory + # sqlite, some will use a file backed sqlite. + if 'sqlite:///' in self.connection_string: + facade = enginefacade.transaction_context() + facade.configure(sqlite_fk=True, expire_on_commit=True) + engine = facade.writer.get_engine() + sessionmaker = facade.writer.get_sessionmaker() + session = sessionmaker() + self.facade = facade + else: + engine = db_api.get_engine() + session = db_api.get_session() + return engine, session + + def get_session(self): + if 'sqlite:///' in self.connection_string: + return self.facade.get_session(expire_on_commit=True) + else: + return db_api.get_session() + + def _seed_lookup_tables(self, session): + self._seed_lookup_table( + session, constants.SUPPORTED_PROVISIONING_STATUSES, + models.ProvisioningStatus) + self._seed_lookup_table( + session, constants.SUPPORTED_HEALTH_MONITOR_TYPES, + models.HealthMonitorType) + self._seed_lookup_table( + session, constants.SUPPORTED_LB_ALGORITHMS, + models.Algorithm) + self._seed_lookup_table( + session, constants.SUPPORTED_PROTOCOLS, + models.Protocol) + self._seed_lookup_table( + session, constants.SUPPORTED_OPERATING_STATUSES, + models.OperatingStatus) + self._seed_lookup_table( + session, constants.SUPPORTED_SP_TYPES, + models.SessionPersistenceType) + self._seed_lookup_table(session, constants.SUPPORTED_AMPHORA_ROLES, + models.AmphoraRoles) + self._seed_lookup_table(session, constants.SUPPORTED_LB_TOPOLOGIES, + models.LBTopology) + self._seed_lookup_table(session, constants.SUPPORTED_VRRP_AUTH, + models.VRRPAuthMethod) + self._seed_lookup_table(session, constants.SUPPORTED_L7RULE_TYPES, + models.L7RuleType) + self._seed_lookup_table(session, + constants.SUPPORTED_L7RULE_COMPARE_TYPES, + models.L7RuleCompareType) + self._seed_lookup_table(session, constants.SUPPORTED_L7POLICY_ACTIONS, + models.L7PolicyAction) + self._seed_lookup_table(session, constants.SUPPORTED_CLIENT_AUTH_MODES, + models.ClientAuthenticationMode) + # Add in the id='DELETED' placeholders + deleted_flavor_profile = models.FlavorProfile( + id=constants.NIL_UUID, name='DELETED-PLACEHOLDER', + provider_name=constants.DELETED, flavor_data='{}') + session.add(deleted_flavor_profile) + session.flush() + deleted_flavor = models.Flavor( + id=constants.NIL_UUID, flavor_profile_id=constants.NIL_UUID, + name='DELETED-PLACEHOLDER', enabled=False, + description='Placeholder for DELETED LBs with DELETED flavors') + session.add(deleted_flavor) + session.flush() + deleted_az_profile = models.AvailabilityZoneProfile( + id=constants.NIL_UUID, name='DELETED-PLACEHOLDER', + provider_name=constants.DELETED, availability_zone_data='{}') + session.add(deleted_az_profile) + session.flush() + deleted_az = models.AvailabilityZone( + availability_zone_profile_id=constants.NIL_UUID, + name=constants.NIL_UUID, enabled=False, + description='Placeholder for DELETED LBs with DELETED ' + 'availability zones') + session.add(deleted_az) + session.flush() + + def _seed_lookup_table(self, session, name_list, model_cls): + for name in name_list: + model = model_cls(name=name) + session.add(model) diff --git a/octavia/tests/functional/db/test_models.py b/octavia/tests/functional/db/test_models.py new file mode 100644 index 0000000000..368209644d --- /dev/null +++ b/octavia/tests/functional/db/test_models.py @@ -0,0 +1,1992 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +from oslo_utils import timeutils +from oslo_utils import uuidutils + +from octavia.common import constants +from octavia.common import data_models +from octavia.db import models +from octavia.tests.functional.db import base + +from sqlalchemy.orm import collections + + +class ModelTestMixin: + + FAKE_IP = '10.0.0.1' + FAKE_UUID_1 = uuidutils.generate_uuid() + FAKE_UUID_2 = uuidutils.generate_uuid() + FAKE_AZ = 'zone1' + + def _insert(self, session, model_cls, model_kwargs): + model = model_cls(**model_kwargs) + session.add(model) + session.commit() + return model + + def create_flavor_profile(self, session, **overrides): + kwargs = {'id': self.FAKE_UUID_1, + 'name': 'fake_profile', + 'provider_name': 'fake_provider', + 'flavor_data': "{'glance_image': 'ubuntu-16.04.03'}"} + kwargs.update(overrides) + return self._insert(session, models.FlavorProfile, kwargs) + + def create_flavor(self, session, profile, **overrides): + kwargs = {'id': self.FAKE_UUID_1, + 'name': 'fake_flavor', + 'flavor_profile_id': profile, + 'description': 'fake flavor', + 'enabled': True} + kwargs.update(overrides) + return self._insert(session, models.Flavor, kwargs) + + def associate_amphora(self, load_balancer, amphora): + load_balancer.amphorae.append(amphora) + + def create_listener(self, session, **overrides): + kwargs = {'project_id': self.FAKE_UUID_1, + 'id': self.FAKE_UUID_1, + 'protocol': constants.PROTOCOL_HTTP, + 'protocol_port': 80, + 'provisioning_status': constants.ACTIVE, + 'operating_status': constants.ONLINE, + 'enabled': True} + kwargs.update(overrides) + return self._insert(session, models.Listener, kwargs) + + def create_listener_statistics(self, session, listener_id, amphora_id, + **overrides): + kwargs = {'listener_id': listener_id, + 'amphora_id': amphora_id, + 'bytes_in': 0, + 'bytes_out': 0, + 'active_connections': 0, + 'total_connections': 0, + 'request_errors': 0} + kwargs.update(overrides) + return self._insert(session, models.ListenerStatistics, kwargs) + + def create_pool(self, session, **overrides): + kwargs = {'project_id': self.FAKE_UUID_1, + 'id': self.FAKE_UUID_1, + 'protocol': constants.PROTOCOL_HTTP, + 'lb_algorithm': constants.LB_ALGORITHM_LEAST_CONNECTIONS, + 'provisioning_status': constants.ACTIVE, + 'operating_status': constants.ONLINE, + 'enabled': True, + 'tls_enabled': False} + kwargs.update(overrides) + return self._insert(session, models.Pool, kwargs) + + def create_session_persistence(self, session, pool_id, **overrides): + kwargs = {'pool_id': pool_id, + 'type': constants.SESSION_PERSISTENCE_HTTP_COOKIE, + 'cookie_name': 'cookie_name'} + kwargs.update(overrides) + return self._insert(session, models.SessionPersistence, kwargs) + + def create_health_monitor(self, session, pool_id, **overrides): + kwargs = {'id': pool_id, + 'pool_id': pool_id, + 'type': constants.HEALTH_MONITOR_HTTP, + 'delay': 1, + 'timeout': 1, + 'fall_threshold': 1, + 'rise_threshold': 1, + 'enabled': True, + 'operating_status': constants.ONLINE, + 'provisioning_status': constants.ACTIVE, + 'project_id': self.FAKE_UUID_1} + kwargs.update(overrides) + return self._insert(session, models.HealthMonitor, kwargs) + + def create_member(self, session, pool_id, **overrides): + kwargs = {'project_id': self.FAKE_UUID_1, + 'id': self.FAKE_UUID_1, + 'pool_id': pool_id, + 'ip_address': '10.0.0.1', + 'protocol_port': 80, + 'provisioning_status': constants.ACTIVE, + 'operating_status': constants.ONLINE, + 'enabled': True, + 'backup': False} + kwargs.update(overrides) + return self._insert(session, models.Member, kwargs) + + def create_load_balancer(self, session, **overrides): + kwargs = {'project_id': self.FAKE_UUID_1, + 'id': self.FAKE_UUID_1, + 'provisioning_status': constants.ACTIVE, + 'operating_status': constants.ONLINE, + 'enabled': True, + 'server_group_id': self.FAKE_UUID_1} + kwargs.update(overrides) + return self._insert(session, models.LoadBalancer, kwargs) + + def create_vip(self, session, load_balancer_id, **overrides): + kwargs = {'load_balancer_id': load_balancer_id} + kwargs.update(overrides) + return self._insert(session, models.Vip, kwargs) + + def create_sni(self, session, **overrides): + kwargs = {'listener_id': self.FAKE_UUID_1, + 'tls_container_id': self.FAKE_UUID_1} + kwargs.update(overrides) + return self._insert(session, models.SNI, kwargs) + + def create_amphora(self, session, **overrides): + kwargs = {'id': self.FAKE_UUID_1, + 'compute_id': self.FAKE_UUID_1, + 'status': constants.ACTIVE, + 'vrrp_ip': self.FAKE_IP, + 'ha_ip': self.FAKE_IP, + 'vrrp_port_id': self.FAKE_UUID_1, + 'ha_port_id': self.FAKE_UUID_2, + 'lb_network_ip': self.FAKE_IP, + 'cert_expiration': timeutils.utcnow(), + 'cert_busy': False, + 'cached_zone': self.FAKE_AZ} + kwargs.update(overrides) + return self._insert(session, models.Amphora, kwargs) + + def create_amphora_health(self, session, **overrides): + kwargs = {'amphora_id': self.FAKE_UUID_1, + 'last_update': datetime.date.today(), + 'busy': True} + kwargs.update(overrides) + return self._insert(session, models.AmphoraHealth, kwargs) + + def create_l7policy(self, session, listener_id, **overrides): + kwargs = {'id': self.FAKE_UUID_1, + 'listener_id': listener_id, + 'action': constants.L7POLICY_ACTION_REJECT, + 'position': 1, + 'provisioning_status': constants.ACTIVE, + 'operating_status': constants.ONLINE, + 'enabled': True} + kwargs.update(overrides) + return self._insert(session, models.L7Policy, kwargs) + + def create_l7rule(self, session, l7policy_id, **overrides): + kwargs = {'id': self.FAKE_UUID_1, + 'l7policy_id': l7policy_id, + 'type': constants.L7RULE_TYPE_PATH, + 'compare_type': constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + 'value': '/api', + 'provisioning_status': constants.ACTIVE, + 'operating_status': constants.ONLINE, + 'enabled': True} + kwargs.update(overrides) + return self._insert(session, models.L7Rule, kwargs) + + def create_listener_cidr(self, session, listener_id, cidr): + kwargs = {'listener_id': listener_id, 'cidr': cidr} + return self._insert(session, models.ListenerCidr, kwargs) + + def create_quotas(self, session, **overrides): + kwargs = {"project_id": self.FAKE_UUID_1} + kwargs.update(overrides) + return self._insert(session, models.Quotas, kwargs) + + +class PoolModelTest(base.OctaviaDBTestBase, ModelTestMixin): + + def test_create(self): + pool = self.create_pool(self.session) + self.assertEqual(f"Pool(id={pool.id!r}, name=None, " + f"project_id={pool.project_id!r}, " + f"provisioning_status='ACTIVE', protocol='HTTP', " + f"lb_algorithm='LEAST_CONNECTIONS', enabled=True)", + str(pool)) + + self.assertIsNotNone(pool.created_at) + self.assertIsNone(pool.updated_at) + + def test_update(self): + pool = self.create_pool(self.session) + self.assertIsNone(pool.updated_at) + + id = pool.id + pool.enabled = False + new_pool = self.session.query( + models.Pool).filter_by(id=id).first() + self.assertFalse(new_pool.enabled) + self.assertIsNotNone(new_pool.updated_at) + + def test_delete(self): + pool = self.create_pool(self.session) + id = pool.id + self.session.delete(pool) + self.session.commit() + new_pool = self.session.query( + models.Pool).filter_by(id=id).first() + self.assertIsNone(new_pool) + + def test_member_relationship(self): + pool = self.create_pool(self.session) + self.create_member(self.session, pool.id, id=self.FAKE_UUID_1, + ip_address="10.0.0.1") + self.create_member(self.session, pool.id, id=self.FAKE_UUID_2, + ip_address="10.0.0.2") + new_pool = self.session.query( + models.Pool).filter_by(id=pool.id).first() + self.assertIsNotNone(new_pool.members) + self.assertEqual(2, len(new_pool.members)) + self.assertIsInstance(new_pool.members[0], models.Member) + + def test_health_monitor_relationship(self): + pool = self.create_pool(self.session) + self.create_health_monitor(self.session, pool.id) + new_pool = self.session.query(models.Pool).filter_by( + id=pool.id).first() + self.assertIsNotNone(new_pool.health_monitor) + self.assertIsInstance(new_pool.health_monitor, + models.HealthMonitor) + + def test_session_persistence_relationship(self): + pool = self.create_pool(self.session) + self.create_session_persistence(self.session, pool_id=pool.id) + new_pool = self.session.query(models.Pool).filter_by( + id=pool.id).first() + self.assertIsNotNone(new_pool.session_persistence) + self.assertIsInstance(new_pool.session_persistence, + models.SessionPersistence) + + def test_listener_relationship(self): + pool = self.create_pool(self.session) + listener = self.create_listener(self.session, default_pool_id=pool.id) + new_pool = self.session.query(models.Pool).filter_by( + id=pool.id).first() + self.assertIsNotNone(new_pool.listeners) + self.assertIsInstance(new_pool.listeners, list) + self.assertIsInstance(new_pool.listeners[0], models.Listener) + self.assertIn(listener.id, [li.id for li in new_pool.listeners]) + + +class MemberModelTest(base.OctaviaDBTestBase, ModelTestMixin): + + def setUp(self): + super().setUp() + self.pool = self.create_pool(self.session) + + def test_create(self): + member = self.create_member(self.session, self.pool.id) + self.assertEqual(f"Member(id={member.id!r}, name=None, " + f"project_id={member.project_id!r}, " + f"provisioning_status='ACTIVE', " + f"ip_address='10.0.0.1', protocol_port=80, " + f"operating_status='ONLINE', weight=None, " + f"vnic_type=None)", + str(member)) + + self.assertIsNotNone(member.created_at) + self.assertIsNone(member.updated_at) + + def test_update(self): + member = self.create_member(self.session, self.pool.id) + self.assertIsNone(member.updated_at) + + member_id = member.id + member.enabled = False + self.session.commit() + + new_member = self.session.query( + models.Member).filter_by(id=member_id).first() + self.assertFalse(new_member.enabled) + self.assertIsNotNone(new_member.updated_at) + + def test_delete(self): + member = self.create_member(self.session, self.pool.id) + member_id = member.id + self.session.commit() + + self.session.delete(member) + self.session.commit() + + new_member = self.session.query( + models.Member).filter_by(id=member_id).first() + self.assertIsNone(new_member) + + def test_pool_relationship(self): + member = self.create_member(self.session, self.pool.id, + id=self.FAKE_UUID_1, + ip_address="10.0.0.1") + self.create_member(self.session, self.pool.id, id=self.FAKE_UUID_2, + ip_address="10.0.0.2") + new_member = self.session.query(models.Member).filter_by( + id=member.id).first() + self.assertIsNotNone(new_member.pool) + self.assertIsInstance(new_member.pool, models.Pool) + + +class SessionPersistenceModelTest(base.OctaviaDBTestBase, ModelTestMixin): + + def setUp(self): + super().setUp() + self.pool = self.create_pool(self.session) + + def test_create(self): + obj = self.create_session_persistence(self.session, self.pool.id) + self.assertEqual(f"SessionPersistence(cookie_name='cookie_name', " + f"persistence_granularity=None, " + f"persistence_timeout=None, pool_id={obj.pool_id!r}, " + f"type='HTTP_COOKIE')", str(obj)) + + def test_update(self): + session_persistence = self.create_session_persistence(self.session, + self.pool.id) + session_persistence.name = 'test1' + new_session_persistence = self.session.query( + models.SessionPersistence).filter_by(pool_id=self.pool.id).first() + self.assertEqual('test1', new_session_persistence.name) + + def test_delete(self): + session_persistence = self.create_session_persistence(self.session, + self.pool.id) + with self.session.begin(): + self.session.delete(session_persistence) + self.session.flush() + new_session_persistence = self.session.query( + models.SessionPersistence).filter_by(pool_id=self.pool.id).first() + self.assertIsNone(new_session_persistence) + + def test_pool_relationship(self): + self.create_session_persistence(self.session, self.pool.id) + new_persistence = self.session.query( + models.SessionPersistence).filter_by(pool_id=self.pool.id).first() + self.assertIsNotNone(new_persistence.pool) + self.assertIsInstance(new_persistence.pool, models.Pool) + + +class ListenerModelTest(base.OctaviaDBTestBase, ModelTestMixin): + + def test_create(self): + listener = self.create_listener(self.session) + self.assertEqual(f"Listener(id={listener.id!r}, default_pool=None, " + f"name=None, project_id={listener.project_id!r}, " + f"protocol='HTTP', protocol_port=80, enabled=True)", + str(listener)) + + self.assertIsNotNone(listener.created_at) + self.assertIsNone(listener.updated_at) + + def test_create_with_timeouts(self): + timeouts = { + 'timeout_client_data': 1, + 'timeout_member_connect': 2, + 'timeout_member_data': constants.MIN_TIMEOUT, + 'timeout_tcp_inspect': constants.MAX_TIMEOUT, + } + listener = self.create_listener(self.session, **timeouts) + for item in timeouts: + self.assertEqual(timeouts[item], getattr(listener, item)) + + def test_update(self): + listener = self.create_listener(self.session) + self.assertIsNone(listener.updated_at) + + listener_id = listener.id + listener.name = 'test1' + new_listener = self.session.query( + models.Listener).filter_by(id=listener_id).first() + self.assertEqual('test1', new_listener.name) + self.assertIsNotNone(new_listener.updated_at) + + def test_update_with_timeouts(self): + listener = self.create_listener(self.session) + listener_id = listener.id + + timeouts = { + 'timeout_client_data': 1, + 'timeout_member_connect': 2, + 'timeout_member_data': 3, + 'timeout_tcp_inspect': 4, + } + + for item in timeouts: + setattr(listener, item, timeouts[item]) + new_listener = self.session.query( + models.Listener).filter_by(id=listener_id).first() + for item in timeouts: + self.assertEqual(timeouts[item], getattr(new_listener, item)) + + def test_delete(self): + listener = self.create_listener(self.session) + listener_id = listener.id + self.session.delete(listener) + self.session.commit() + new_listener = self.session.query( + models.Listener).filter_by(id=listener_id).first() + self.assertIsNone(new_listener) + + def test_load_balancer_relationship(self): + lb = self.create_load_balancer(self.session) + listener = self.create_listener(self.session, load_balancer_id=lb.id) + new_listener = self.session.query( + models.Listener).filter_by(id=listener.id).first() + self.assertIsNotNone(new_listener.load_balancer) + self.assertIsInstance(new_listener.load_balancer, models.LoadBalancer) + + def test_default_pool_relationship(self): + pool = self.create_pool(self.session) + listener = self.create_listener(self.session, default_pool_id=pool.id) + new_listener = self.session.query(models.Listener).filter_by( + id=listener.id).first() + self.assertIsNotNone(new_listener.default_pool) + self.assertIsInstance(new_listener.default_pool, models.Pool) + self.assertIsInstance(new_listener.pools, list) + self.assertIn(pool.id, [p.id for p in new_listener.pools]) + + def test_sni_relationship(self): + listener = self.create_listener(self.session) + self.create_sni(self.session, listener_id=listener.id, + tls_container_id=self.FAKE_UUID_1) + self.create_sni(self.session, listener_id=listener.id, + tls_container_id=self.FAKE_UUID_2) + new_listener = self.session.query(models.Listener).filter_by( + id=listener.id).first() + self.assertIsNotNone(new_listener.sni_containers) + self.assertEqual(2, len(new_listener.sni_containers)) + + def test_pools_list(self): + pool = self.create_pool(self.session) + listener = self.create_listener(self.session, default_pool_id=pool.id) + new_listener = self.session.query(models.Listener).filter_by( + id=listener.id).first() + self.assertIsNotNone(new_listener.pools) + self.assertIsInstance(new_listener.pools, list) + self.assertIsInstance(new_listener.pools[0], models.Pool) + + +class ListenerStatisticsModelTest(base.OctaviaDBTestBase, ModelTestMixin): + + def setUp(self): + super().setUp() + self.listener = self.create_listener(self.session) + self.amphora = self.create_amphora(self.session) + + def test_create(self): + obj = self.create_listener_statistics(self.session, self.listener.id, + self.amphora.id) + self.assertEqual(f"ListenerStatistics(active_connections=0, " + f"amphora_id={obj.amphora_id!r}, bytes_in=0, " + f"bytes_out=0, listener_id={obj.listener_id!r}, " + f"request_errors=0, total_connections=0)", str(obj)) + + def test_create_with_negative_int(self): + overrides = {'bytes_in': -1} + self.assertRaises(ValueError, + self.create_listener_statistics, + self.session, self.listener.id, + self.amphora.id, **overrides) + + def test_update(self): + stats = self.create_listener_statistics(self.session, self.listener.id, + self.amphora.id) + stats.name = 'test1' + new_stats = self.session.query(models.ListenerStatistics).filter_by( + listener_id=self.listener.id).first() + self.assertEqual('test1', new_stats.name) + + def test_delete(self): + stats = self.create_listener_statistics(self.session, self.listener.id, + self.amphora.id) + with self.session.begin(): + self.session.delete(stats) + self.session.flush() + new_stats = self.session.query(models.ListenerStatistics).filter_by( + listener_id=self.listener.id).first() + self.assertIsNone(new_stats) + + +class HealthMonitorModelTest(base.OctaviaDBTestBase, ModelTestMixin): + + def setUp(self): + super().setUp() + self.pool = self.create_pool(self.session) + + def test_create(self): + obj = self.create_health_monitor(self.session, self.pool.id) + self.assertEqual(f"HealthMonitor(id={obj.id!r}, name=None, " + f"project_id={obj.project_id!r}, type='HTTP', " + f"enabled=True)", str(obj)) + + def test_update(self): + health_monitor = self.create_health_monitor(self.session, self.pool.id) + with self.session.begin(): + health_monitor.name = 'test1' + new_health_monitor = self.session.query( + models.HealthMonitor).filter_by( + pool_id=health_monitor.pool_id).first() + self.assertEqual('test1', new_health_monitor.name) + + def test_delete(self): + health_monitor = self.create_health_monitor(self.session, self.pool.id) + with self.session.begin(): + self.session.delete(health_monitor) + self.session.flush() + new_health_monitor = self.session.query( + models.HealthMonitor).filter_by( + pool_id=health_monitor.pool_id).first() + self.assertIsNone(new_health_monitor) + + def test_pool_relationship(self): + health_monitor = self.create_health_monitor(self.session, self.pool.id) + new_health_monitor = self.session.query( + models.HealthMonitor).filter_by( + pool_id=health_monitor.pool_id).first() + self.assertIsNotNone(new_health_monitor.pool) + self.assertIsInstance(new_health_monitor.pool, models.Pool) + + +class LoadBalancerModelTest(base.OctaviaDBTestBase, ModelTestMixin): + + def test_create(self): + load_balancer = self.create_load_balancer(self.session) + self.assertEqual(f"LoadBalancer(id={load_balancer.id!r}, name=None, " + f"project_id={load_balancer.project_id!r}, " + f"vip=None, provisioning_status='ACTIVE', " + f"operating_status='ONLINE', provider=None)", + str(load_balancer)) + + self.assertIsNotNone(load_balancer.created_at) + self.assertIsNone(load_balancer.updated_at) + + def test_update(self): + load_balancer = self.create_load_balancer(self.session) + self.assertIsNone(load_balancer.updated_at) + + lb_id = load_balancer.id + load_balancer.enabled = False + new_load_balancer = self.session.query( + models.LoadBalancer).filter_by(id=lb_id).first() + self.assertFalse(new_load_balancer.enabled) + self.assertIsNotNone(new_load_balancer.updated_at) + + def test_delete(self): + load_balancer = self.create_load_balancer(self.session) + lb_id = load_balancer.id + self.session.delete(load_balancer) + self.session.commit() + new_load_balancer = self.session.query( + models.LoadBalancer).filter_by(id=lb_id).first() + self.assertIsNone(new_load_balancer) + + def test_listener_relationship(self): + load_balancer = self.create_load_balancer(self.session) + self.create_listener(self.session, load_balancer_id=load_balancer.id) + new_load_balancer = self.session.query( + models.LoadBalancer).filter_by(id=load_balancer.id).first() + self.assertIsNotNone(new_load_balancer.listeners) + self.assertEqual(1, len(new_load_balancer.listeners)) + + def test_load_balancer_amphora_relationship(self): + load_balancer = self.create_load_balancer(self.session) + amphora = self.create_amphora(self.session) + self.associate_amphora(load_balancer, amphora) + new_load_balancer = self.session.query( + models.LoadBalancer).filter_by(id=load_balancer.id).first() + self.assertIsNotNone(new_load_balancer.amphorae) + self.assertEqual(1, len(new_load_balancer.amphorae)) + + def test_load_balancer_vip_relationship(self): + load_balancer = self.create_load_balancer(self.session) + self.create_vip(self.session, load_balancer.id) + new_load_balancer = self.session.query( + models.LoadBalancer).filter_by(id=load_balancer.id).first() + self.assertIsNotNone(new_load_balancer.vip) + self.assertIsInstance(new_load_balancer.vip, models.Vip) + + +class VipModelTest(base.OctaviaDBTestBase, ModelTestMixin): + + def setUp(self): + super().setUp() + self.load_balancer = self.create_load_balancer(self.session) + + def test_create(self): + obj = self.create_vip(self.session, self.load_balancer.id) + self.assertEqual(f"Vip(ip_address=None, " + f"load_balancer_id={obj.load_balancer_id!r}, " + f"network_id=None, octavia_owned=None, port_id=None, " + f"qos_policy_id=None, subnet_id=None, " + f"vnic_type=None)", str(obj)) + + def test_update(self): + vip = self.create_vip(self.session, self.load_balancer.id) + with self.session.begin(): + vip.ip_address = "10.0.0.1" + new_vip = self.session.query(models.Vip).filter_by( + load_balancer_id=self.load_balancer.id).first() + self.assertEqual("10.0.0.1", new_vip.ip_address) + + def test_delete(self): + vip = self.create_vip(self.session, self.load_balancer.id) + with self.session.begin(): + self.session.delete(vip) + self.session.flush() + new_vip = self.session.query(models.Vip).filter_by( + load_balancer_id=vip.load_balancer_id).first() + self.assertIsNone(new_vip) + + def test_vip_load_balancer_relationship(self): + self.create_vip(self.session, self.load_balancer.id) + new_vip = self.session.query(models.Vip).filter_by( + load_balancer_id=self.load_balancer.id).first() + self.assertIsNotNone(new_vip.load_balancer) + self.assertIsInstance(new_vip.load_balancer, models.LoadBalancer) + + +class SNIModelTest(base.OctaviaDBTestBase, ModelTestMixin): + + def setUp(self): + super().setUp() + self.listener = self.create_listener(self.session) + + def test_create(self): + obj = self.create_sni(self.session, listener_id=self.listener.id) + self.assertEqual(f"SNI(listener_id={obj.listener_id!r}, " + f"position=None, " + f"tls_container_id={obj.tls_container_id!r})", + str(obj)) + + def test_update(self): + sni = self.create_sni(self.session, listener_id=self.listener.id) + sni.tls_container_id = self.FAKE_UUID_2 + new_sni = self.session.query( + models.SNI).filter_by(listener_id=self.FAKE_UUID_1).first() + self.assertEqual(self.FAKE_UUID_2, new_sni.tls_container_id) + + def test_delete(self): + sni = self.create_sni(self.session, listener_id=self.listener.id) + with self.session.begin(): + self.session.delete(sni) + self.session.flush() + new_sni = self.session.query( + models.SNI).filter_by(listener_id=self.listener.id).first() + self.assertIsNone(new_sni) + + def test_sni_relationship(self): + self.create_sni(self.session, listener_id=self.listener.id) + new_sni = self.session.query(models.SNI).filter_by( + listener_id=self.listener.id).first() + self.assertIsNotNone(new_sni.listener) + self.assertIsInstance(new_sni.listener, models.Listener) + + +class AmphoraModelTest(base.OctaviaDBTestBase, ModelTestMixin): + + def setUp(self): + super().setUp() + self.load_balancer = self.create_load_balancer(self.session) + + def test_create(self): + obj = self.create_amphora(self.session) + self.assertEqual(f"Amphora(id={obj.id!r}, load_balancer_id=None, " + f"status='ACTIVE', role=None, " + f"lb_network_ip='10.0.0.1', vrrp_ip='10.0.0.1')", + str(obj)) + + def test_update(self): + amphora = self.create_amphora( + self.session) + amphora.amphora_id = self.FAKE_UUID_2 + new_amphora = self.session.query(models.Amphora).filter_by( + id=amphora.id).first() + self.assertEqual(self.FAKE_UUID_2, new_amphora.amphora_id) + + def test_delete(self): + amphora = self.create_amphora( + self.session) + with self.session.begin(): + self.session.delete(amphora) + self.session.flush() + new_amphora = self.session.query( + models.Amphora).filter_by(id=amphora.id).first() + self.assertIsNone(new_amphora) + + def test_load_balancer_relationship(self): + amphora = self.create_amphora(self.session) + self.associate_amphora(self.load_balancer, amphora) + new_amphora = self.session.query(models.Amphora).filter_by( + id=amphora.id).first() + self.assertIsNotNone(new_amphora.load_balancer) + self.assertIsInstance(new_amphora.load_balancer, models.LoadBalancer) + + +class AmphoraHealthModelTest(base.OctaviaDBTestBase, ModelTestMixin): + def setUp(self): + super().setUp() + self.amphora = self.create_amphora(self.session) + + def test_create(self): + obj = self.create_amphora_health(self.session) + self.assertEqual(f"AmphoraHealth(amphora_id={obj.amphora_id!r}, " + f"busy=True, last_update={obj.last_update!r})", + str(obj)) + + def test_update(self): + amphora_health = self.create_amphora_health(self.session) + d = datetime.date.today() + newdate = d.replace(day=d.day) + amphora_health.last_update = newdate + new_amphora_health = self.session.query( + models.AmphoraHealth).filter_by( + amphora_id=amphora_health.amphora_id).first() + self.assertEqual(newdate, new_amphora_health.last_update) + + def test_delete(self): + amphora_health = self.create_amphora_health( + self.session) + with self.session.begin(): + self.session.delete(amphora_health) + self.session.flush() + new_amphora_health = self.session.query( + models.AmphoraHealth).filter_by( + amphora_id=amphora_health.amphora_id).first() + self.assertIsNone(new_amphora_health) + + +class L7PolicyModelTest(base.OctaviaDBTestBase, ModelTestMixin): + def setUp(self): + super().setUp() + self.listener = self.create_listener(self.session) + + def test_create(self): + l7policy = self.create_l7policy(self.session, self.listener.id) + self.assertEqual(f"L7Policy(id={l7policy.id!r}, name=None, " + f"project_id=None, provisioning_status='ACTIVE', " + f"action='http://github.com/REJECT', position=1, enabled=True)", + str(l7policy)) + self.assertIsInstance(l7policy, models.L7Policy) + + def test_update(self): + l7policy = self.create_l7policy(self.session, self.listener.id) + pool = self.create_pool(self.session) + with self.session.begin(): + l7policy.action = constants.L7POLICY_ACTION_REDIRECT_TO_POOL + l7policy.redirect_pool_id = pool.id + new_l7policy = self.session.query( + models.L7Policy).filter_by(id=l7policy.id).first() + self.assertEqual(pool.id, new_l7policy.redirect_pool_id) + self.assertEqual(constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + new_l7policy.action) + + def test_delete(self): + l7policy = self.create_l7policy(self.session, self.listener.id) + l7policy_id = l7policy.id + self.session.delete(l7policy) + self.session.commit() + new_l7policy = self.session.query( + models.L7Policy).filter_by(id=l7policy_id).first() + self.assertIsNone(new_l7policy) + + def test_l7rule_relationship(self): + l7policy = self.create_l7policy(self.session, self.listener.id) + self.create_l7rule( + self.session, l7policy.id, id=self.FAKE_UUID_1, + type=constants.L7RULE_TYPE_HOST_NAME, + compare_type=constants.L7RULE_COMPARE_TYPE_EQUAL_TO, + value='www.example.com') + self.create_l7rule( + self.session, l7policy.id, id=self.FAKE_UUID_2, + type=constants.L7RULE_TYPE_PATH, + compare_type=constants.L7RULE_COMPARE_TYPE_EQUAL_TO, + value='/api') + new_l7policy = self.session.query( + models.L7Policy).filter_by(id=l7policy.id).first() + self.assertIsNotNone(new_l7policy.l7rules) + self.assertEqual(2, len(new_l7policy.l7rules)) + self.assertIsInstance(new_l7policy.l7rules[0], models.L7Rule) + self.assertIsInstance(new_l7policy.l7rules[1], models.L7Rule) + + def test_pool_relationship(self): + l7policy = self.create_l7policy(self.session, self.listener.id) + self.create_pool(self.session, id=self.FAKE_UUID_2) + with self.session.begin(): + l7policy.action = constants.L7POLICY_ACTION_REDIRECT_TO_POOL + l7policy.redirect_pool_id = self.FAKE_UUID_2 + new_l7policy = self.session.query( + models.L7Policy).filter_by(id=l7policy.id).first() + self.assertIsNotNone(new_l7policy.redirect_pool) + self.assertIsInstance(new_l7policy.redirect_pool, models.Pool) + + def test_listener_relationship(self): + l7policy = self.create_l7policy(self.session, self.listener.id, + id=self.FAKE_UUID_1) + self.create_l7policy(self.session, self.listener.id, + id=self.FAKE_UUID_2, position=1) + new_l7policy = self.session.query(models.L7Policy).filter_by( + id=l7policy.id).first() + self.assertIsNotNone(new_l7policy.listener) + self.assertIsInstance(new_l7policy.listener, models.Listener) + + def test_listeners_pools_refs_with_l7policy_with_l7rule(self): + pool = self.create_pool(self.session, id=self.FAKE_UUID_2) + l7policy = self.create_l7policy( + self.session, self.listener.id, + action=constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + redirect_pool_id=pool.id) + self.create_l7rule(self.session, l7policy.id, id=self.FAKE_UUID_1) + new_pool = self.session.query(models.Pool).filter_by( + id=pool.id).first() + new_listener = self.session.query(models.Listener).filter_by( + id=self.listener.id).first() + self.assertIsInstance(new_pool.listeners, list) + self.assertIn(new_listener.id, [li.id for li in new_pool.listeners]) + self.assertIsInstance(new_listener.pools, list) + self.assertIn(new_pool.id, [p.id for p in new_listener.pools]) + + def test_listeners_pools_refs_with_l7policy_without_l7rule(self): + pool = self.create_pool(self.session, id=self.FAKE_UUID_2) + self.create_l7policy( + self.session, self.listener.id, + action=constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + redirect_pool_id=pool.id) + new_pool = self.session.query(models.Pool).filter_by( + id=pool.id).first() + new_listener = self.session.query(models.Listener).filter_by( + id=self.listener.id).first() + self.assertIsInstance(new_pool.listeners, list) + self.assertNotIn(new_listener.id, [li.id for li in new_pool.listeners]) + self.assertIsInstance(new_listener.pools, list) + self.assertNotIn(new_pool.id, [p.id for p in new_listener.pools]) + + def test_listeners_pools_refs_with_disabled_l7policy(self): + pool = self.create_pool(self.session, id=self.FAKE_UUID_2) + l7policy = self.create_l7policy( + self.session, self.listener.id, + action=constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + redirect_pool_id=pool.id, enabled=False) + self.create_l7rule(self.session, l7policy.id, id=self.FAKE_UUID_1) + new_pool = self.session.query(models.Pool).filter_by( + id=pool.id).first() + new_listener = self.session.query(models.Listener).filter_by( + id=self.listener.id).first() + self.assertIsInstance(new_pool.listeners, list) + self.assertNotIn(new_listener.id, [li.id for li in new_pool.listeners]) + self.assertIsInstance(new_listener.pools, list) + self.assertNotIn(new_pool.id, [p.id for p in new_listener.pools]) + + +class L7RuleModelTest(base.OctaviaDBTestBase, ModelTestMixin): + + def setUp(self): + super().setUp() + self.listener = self.create_listener(self.session) + self.l7policy = self.create_l7policy(self.session, self.listener.id) + + def test_create(self): + l7rule = self.create_l7rule(self.session, self.l7policy.id) + self.assertEqual(f"L7Rule(id={l7rule.id!r}, project_id=None, " + f"provisioning_status='ACTIVE', type='PATH', " + f"key=None, value='/api', invert=False, " + f"enabled=True)", str(l7rule)) + self.assertIsInstance(l7rule, models.L7Rule) + + def test_update(self): + l7rule = self.create_l7rule(self.session, self.l7policy.id) + l7rule_id = l7rule.id + l7rule.value = '/images' + new_l7rule = self.session.query( + models.L7Rule).filter_by(id=l7rule_id).first() + self.assertEqual('/images', new_l7rule.value) + + def test_delete(self): + l7rule = self.create_l7rule(self.session, self.l7policy.id) + l7rule_id = l7rule.id + self.session.delete(l7rule) + self.session.commit() + new_l7rule = self.session.query( + models.L7Rule).filter_by(id=l7rule_id).first() + self.assertIsNone(new_l7rule) + + def test_l7policy_relationship(self): + l7rule = self.create_l7rule( + self.session, self.l7policy.id, id=self.FAKE_UUID_1, + type=constants.L7RULE_TYPE_HOST_NAME, + compare_type=constants.L7RULE_COMPARE_TYPE_EQUAL_TO, + value='www.example.com') + self.create_l7rule( + self.session, self.l7policy.id, id=self.FAKE_UUID_2, + type=constants.L7RULE_TYPE_PATH, + compare_type=constants.L7RULE_COMPARE_TYPE_EQUAL_TO, + value='/api') + new_l7rule = self.session.query(models.L7Rule).filter_by( + id=l7rule.id).first() + self.assertIsNotNone(new_l7rule.l7policy) + self.assertIsInstance(new_l7rule.l7policy, models.L7Policy) + + +class TestDataModelConversionTest(base.OctaviaDBTestBase, ModelTestMixin): + + def setUp(self): + super().setUp() + self.lb = self.create_load_balancer(self.session) + self.amphora = self.create_amphora(self.session) + self.associate_amphora(self.lb, self.amphora) + self.amphora_health = self.create_amphora_health(self.session) + self.pool = self.create_pool(self.session, load_balancer_id=self.lb.id) + self.hm = self.create_health_monitor(self.session, self.pool.id) + self.member = self.create_member(self.session, self.pool.id, + id=self.FAKE_UUID_1, + ip_address='10.0.0.1') + self.sp = self.create_session_persistence(self.session, self.pool.id) + self.vip = self.create_vip(self.session, self.lb.id) + self.listener = self.create_listener(self.session, + default_pool_id=self.pool.id, + load_balancer_id=self.lb.id) + self.stats = self.create_listener_statistics(self.session, + self.listener.id, + self.amphora.id) + self.sni = self.create_sni(self.session, listener_id=self.listener.id) + self.l7policy = self.create_l7policy( + self.session, listener_id=self.listener.id, + action=constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + redirect_pool_id=self.pool.id) + self.l7rule = self.create_l7rule(self.session, + l7policy_id=self.l7policy.id) + self.listener_cidr = self.create_listener_cidr( + self.session, listener_id=self.listener.id, cidr='10.0.1.0/24') + + @staticmethod + def _get_unique_key(obj): + """Returns a unique key for passed object for data model building.""" + # First handle all objects with their own ID, then handle subordinate + # objects. + if obj.__class__.__name__ in ['Member', 'Pool', 'LoadBalancer', + 'Listener', 'Amphora', 'L7Policy', + 'L7Rule']: + return obj.__class__.__name__ + obj.id + elif obj.__class__.__name__ in ['SessionPersistence', 'HealthMonitor']: + return obj.__class__.__name__ + obj.pool_id + elif obj.__class__.__name__ in ['ListenerStatistics']: + return obj.__class__.__name__ + obj.listener_id + obj.amphora_id + elif obj.__class__.__name__ in ['ListenerCidr']: + return obj.__class__.__name__ + obj.listener_id + obj.cidr + elif obj.__class__.__name__ in ['VRRPGroup', 'Vip']: + return obj.__class__.__name__ + obj.load_balancer_id + elif obj.__class__.__name__ in ['AmphoraHealth']: + return obj.__class__.__name__ + obj.amphora_id + elif obj.__class__.__name__ in ['SNI']: + return (obj.__class__.__name__ + + obj.listener_id + obj.tls_container_id) + else: + raise NotImplementedError + + def count_graph_nodes(self, node, _graph_nodes=None): + """Counts connected BaseDataModel nodes in a graph given the + + starting node. Node should be a data model in any case. + """ + _graph_nodes = _graph_nodes or [] + total = 0 + mykey = self._get_unique_key(node) + if mykey in _graph_nodes: + # Seen this node already + return total + else: + total += 1 + _graph_nodes.append(mykey) + attr_names = [attr_name for attr_name in dir(node) + if not attr_name.startswith('_')] + for attr_name in attr_names: + attr = getattr(node, attr_name) + if isinstance(attr, data_models.BaseDataModel): + total += self.count_graph_nodes( + attr, _graph_nodes=_graph_nodes) + elif isinstance(attr, (collections.InstrumentedList, list)): + for item in attr: + if isinstance(item, data_models.BaseDataModel): + total += self.count_graph_nodes( + item, _graph_nodes=_graph_nodes) + return total + + def test_unique_key_generation(self): + self.assertEqual(self._get_unique_key(self.lb), + self.lb.to_data_model()._get_unique_key()) + self.assertEqual(self._get_unique_key(self.amphora), + self.amphora.to_data_model()._get_unique_key()) + self.assertEqual(self._get_unique_key(self.amphora_health), + self.amphora_health.to_data_model()._get_unique_key()) + self.assertEqual(self._get_unique_key(self.pool), + self.pool.to_data_model()._get_unique_key()) + self.assertEqual(self._get_unique_key(self.hm), + self.hm.to_data_model()._get_unique_key()) + self.assertEqual(self._get_unique_key(self.member), + self.member.to_data_model()._get_unique_key()) + self.assertEqual(self._get_unique_key(self.sp), + self.sp.to_data_model()._get_unique_key()) + self.assertEqual(self._get_unique_key(self.vip), + self.vip.to_data_model()._get_unique_key()) + self.assertEqual(self._get_unique_key(self.listener), + self.listener.to_data_model()._get_unique_key()) + self.assertEqual(self._get_unique_key(self.stats), + self.stats.to_data_model()._get_unique_key()) + self.assertEqual(self._get_unique_key(self.sni), + self.sni.to_data_model()._get_unique_key()) + self.assertEqual(self._get_unique_key(self.l7policy), + self.l7policy.to_data_model()._get_unique_key()) + self.assertEqual(self._get_unique_key(self.l7rule), + self.l7rule.to_data_model()._get_unique_key()) + self.assertEqual(self._get_unique_key(self.listener_cidr), + self.listener_cidr.to_data_model()._get_unique_key()) + + def test_graph_completeness(self): + # Generate equivalent graphs starting arbitrarily from different + # nodes within it; Make sure the resulting graphs all contain the + # same number of nodes. + # check the default value for recursion_depth=None + lb_dm = self.session.query(models.LoadBalancer).filter_by( + id=self.lb.id).first().to_data_model() + lb_graph_count = self.count_graph_nodes(lb_dm) + p_dm = self.session.query(models.Pool).filter_by( + id=self.pool.id).first().to_data_model() + p_graph_count = self.count_graph_nodes(p_dm) + mem_dm = self.session.query(models.Member).filter_by( + id=self.member.id).first().to_data_model() + mem_graph_count = self.count_graph_nodes(mem_dm) + self.assertNotEqual(0, lb_graph_count) + self.assertNotEqual(1, lb_graph_count) + self.assertEqual(lb_graph_count, p_graph_count) + self.assertEqual(lb_graph_count, mem_graph_count) + + def _get_dms_for_recursion_depth(self, recursion_depth): + lb_dm = self.session.query(models.LoadBalancer).filter_by( + id=self.lb.id).first().to_data_model( + recursion_depth=recursion_depth) + p_dm = self.session.query(models.Pool).filter_by( + id=self.pool.id).first().to_data_model( + recursion_depth=recursion_depth) + mem_dm = self.session.query(models.Member).filter_by( + id=self.member.id).first().to_data_model( + recursion_depth=recursion_depth) + return lb_dm, p_dm, mem_dm + + def _get_nodes_count_for_dms(self, lb_dm, p_dm, mem_dm): + return ( + self.count_graph_nodes(lb_dm), + self.count_graph_nodes(p_dm), + self.count_graph_nodes(mem_dm), + ) + + def test_graph_completeness_with_recursion_depth_equal_zero(self): + lb_dm, p_dm, mem_dm = self._get_dms_for_recursion_depth( + recursion_depth=0 + ) + lb_graph_count, p_graph_count, mem_graph_count = ( + self._get_nodes_count_for_dms(lb_dm, p_dm, mem_dm) + ) + self.assertNotEqual(0, lb_graph_count) + # recursion_depth equal to 0 means, that only current node will be + # handled. there is no recursion + self.assertEqual(1, lb_graph_count) + self.assertEqual(1, p_graph_count) + self.assertEqual(1, mem_graph_count) + + def test_graph_completeness_with_recursion_depth_equal_one(self): + lb_dm, p_dm, mem_dm = self._get_dms_for_recursion_depth( + recursion_depth=1 + ) + lb_graph_count, p_graph_count, mem_graph_count = ( + self._get_nodes_count_for_dms(lb_dm, p_dm, mem_dm) + ) + self.assertNotEqual(0, lb_graph_count) + self.assertNotEqual(1, lb_graph_count) + self.assertNotEqual(1, p_graph_count) + self.assertNotEqual(1, mem_graph_count) + # the nodes count is different for each node type + # due to different number of related nodes + self.assertEqual(5, lb_graph_count) + self.assertEqual(7, p_graph_count) + self.assertEqual(2, mem_graph_count) + + def test_graph_completeness_with_recursion_depth_huge(self): + lb_dm, p_dm, mem_dm = self._get_dms_for_recursion_depth( + recursion_depth=10 + ) + lb_graph_count, p_graph_count, mem_graph_count = ( + self._get_nodes_count_for_dms(lb_dm, p_dm, mem_dm) + ) + # recursion_depth=None is default value, so it's equal to run without + # limit on recursion + lb_dm_none, p_dm_none, mem_dm_none = self._get_dms_for_recursion_depth( + recursion_depth=None + ) + lb_graph_count_none, p_graph_count_none, mem_graph_count_none = ( + self._get_nodes_count_for_dms(lb_dm_none, p_dm_none, mem_dm_none) + ) + self.assertNotEqual(0, lb_graph_count) + self.assertNotEqual(1, lb_graph_count) + self.assertEqual(lb_graph_count, p_graph_count) + self.assertEqual(lb_graph_count, mem_graph_count) + + # huge recursion_depth is enough to iterate through all nodes in graph + self.assertEqual( + (lb_graph_count, p_graph_count, mem_graph_count), + (lb_graph_count_none, p_graph_count_none, mem_graph_count_none) + ) + + def test_data_model_graph_traversal(self): + lb_dm = self.session.query(models.LoadBalancer).filter_by( + id=self.lb.id).first().to_data_model() + # This is an arbitrary traversal that covers one of each type + # of parent an child relationship. + lb_id = (lb_dm.listeners[0].default_pool.members[0].pool. + session_persistence.pool.health_monitor.pool.listeners[0]. + sni_containers[0].listener.load_balancer. + listeners[0].load_balancer.pools[0].listeners[0]. + load_balancer.listeners[0].pools[0].load_balancer.vip. + load_balancer.id) + self.assertEqual(lb_dm.id, lb_id) + mem_dm = self.session.query(models.Member).filter_by( + id=self.member.id).first().to_data_model() + # Same as the above, but we generate the graph starting with an + # arbitrary member. + m_lb_id = (mem_dm.pool.listeners[0].load_balancer.vip.load_balancer. + pools[0].session_persistence.pool.health_monitor.pool. + listeners[0].sni_containers[0].listener. + load_balancer.pools[0].members[0].pool.load_balancer.id) + self.assertEqual(lb_dm.id, m_lb_id) + + def test_data_model_graph_traversal_with_recursion_depth_zero(self): + lb_dm, p_dm, mem_dm = self._get_dms_for_recursion_depth( + recursion_depth=0 + ) + # Traverse is not possible, because resources are not handled + # It happens, because there is no recursion + self.assertEqual([], lb_dm.listeners) + self.assertEqual([], lb_dm.pools) + self.assertIsNone(lb_dm.vip) + self.assertEqual([], lb_dm.amphorae) + # not inner objects for Pool + self.assertEqual([], p_dm.listeners) + self.assertIsNone(p_dm.load_balancer) + self.assertIsNone(p_dm.session_persistence) + self.assertIsNone(p_dm.health_monitor) + self.assertEqual([], p_dm.members) + self.assertEqual([], p_dm.l7policies) + # not inner objects for Member + self.assertIsNone(mem_dm.pool) + + def test_data_model_graph_traversal_with_recursion_depth_one(self): + lb_dm, p_dm, mem_dm = self._get_dms_for_recursion_depth( + recursion_depth=1 + ) + # one hop resources are available for LB + self.assertEqual(1, len(lb_dm.listeners)) + self.assertEqual(1, len(lb_dm.pools)) + self.assertIsNotNone(lb_dm.vip) + self.assertEqual(1, len(lb_dm.amphorae)) + # second hop resources are not available for LB + self.assertEqual([], lb_dm.pools[0].listeners) + # one hop resources are available for Pool + self.assertEqual(1, len(p_dm.listeners)) + self.assertIsNotNone(p_dm.load_balancer) + self.assertIsNotNone(p_dm.session_persistence) + self.assertIsNotNone(p_dm.health_monitor) + self.assertEqual(1, len(p_dm.members)) + self.assertEqual(1, len(p_dm.l7policies)) + # second hop resources are not available for Pool + self.assertEqual([], p_dm.load_balancer.listeners) + # one hop resources are available for Member + self.assertIsNotNone(mem_dm.pool) + # second hop resources are not available for Member + self.assertEqual([], mem_dm.pool.listeners) + + def test_update_data_model_listener_default_pool_id(self): + lb_dm = self.create_load_balancer( + self.session, id=uuidutils.generate_uuid()).to_data_model() + pool1_dm = self.create_pool( + self.session, id=uuidutils.generate_uuid(), + load_balancer_id=lb_dm.id).to_data_model() + pool2_dm = self.create_pool( + self.session, id=uuidutils.generate_uuid(), + load_balancer_id=lb_dm.id).to_data_model() + listener_dm = self.create_listener( + self.session, id=uuidutils.generate_uuid(), + load_balancer_id=lb_dm.id, + default_pool_id=pool1_dm.id).to_data_model() + self.assertEqual(pool1_dm.id, listener_dm.default_pool.id) + listener_dm.update({'default_pool_id': pool2_dm.id}) + self.assertEqual(listener_dm.default_pool.id, pool2_dm.id) + + def test_load_balancer_tree(self): + lb_db = self.session.query(models.LoadBalancer).filter_by( + id=self.lb.id).first() + self.check_load_balancer(lb_db.to_data_model()) + + def test_vip_tree(self): + vip_db = self.session.query(models.Vip).filter_by( + load_balancer_id=self.lb.id).first() + self.check_vip(vip_db.to_data_model()) + + def test_listener_tree(self): + listener_db = self.session.query(models.Listener).filter_by( + id=self.listener.id).first() + self.check_listener(listener_db.to_data_model()) + + def test_sni_tree(self): + sni_db = self.session.query(models.SNI).filter_by( + listener_id=self.listener.id).first() + self.check_sni(sni_db.to_data_model()) + + def test_listener_statistics_tree(self): + stats_db = self.session.query(models.ListenerStatistics).filter_by( + listener_id=self.listener.id).first() + self.check_listener_statistics(stats_db.to_data_model()) + + def test_pool_tree(self): + pool_db = self.session.query(models.Pool).filter_by( + id=self.pool.id).first() + self.check_pool(pool_db.to_data_model()) + + def test_session_persistence_tree(self): + sp_db = self.session.query(models.SessionPersistence).filter_by( + pool_id=self.pool.id).first() + self.check_session_persistence(sp_db.to_data_model()) + + def test_health_monitor_tree(self): + hm_db = self.session.query(models.HealthMonitor).filter_by( + pool_id=self.hm.pool_id).first() + self.check_health_monitor(hm_db.to_data_model()) + + def test_member_tree(self): + member_db = self.session.query(models.Member).filter_by( + id=self.member.id).first() + self.check_member(member_db.to_data_model()) + + def test_l7policy_tree(self): + l7policy_db = self.session.query(models.L7Policy).filter_by( + id=self.l7policy.id).first() + self.check_l7policy(l7policy_db.to_data_model()) + + def test_l7rule_tree(self): + l7rule_db = self.session.query(models.L7Rule).filter_by( + id=self.l7rule.id).first() + self.check_l7rule(l7rule_db.to_data_model()) + + def check_load_balancer(self, lb, check_listeners=True, + check_amphorae=True, check_vip=True, + check_pools=True): + self.assertIsInstance(lb, data_models.LoadBalancer) + self.check_load_balancer_data_model(lb) + self.assertIsInstance(lb.listeners, list) + self.assertIsInstance(lb.amphorae, list) + if check_listeners: + for listener in lb.listeners: + self.check_listener(listener, check_lb=False, + check_pools=check_pools) + if check_amphorae: + for amphora in lb.amphorae: + self.check_amphora(amphora, check_load_balancer=False) + if check_vip: + self.check_vip(lb.vip, check_lb=False) + if check_pools: + for pool in lb.pools: + self.check_pool(pool, check_lb=False, + check_listeners=check_listeners) + + def check_vip(self, vip, check_lb=True): + self.assertIsInstance(vip, data_models.Vip) + self.check_vip_data_model(vip) + if check_lb: + self.check_load_balancer(vip.load_balancer, check_vip=False) + + def check_sni(self, sni, check_listener=True): + self.assertIsInstance(sni, data_models.SNI) + self.check_sni_data_model(sni) + if check_listener: + self.check_listener(sni.listener, check_sni=False) + + def check_listener_statistics(self, stats, check_listener=True): + self.assertIsInstance(stats, data_models.ListenerStatistics) + self.check_listener_statistics_data_model(stats) + if check_listener: + listener_db = (self.session.query(models.Listener) + .filter_by(id=stats.listener_id).first()) + self.check_listener(listener_db.to_data_model()) + + def check_amphora(self, amphora, check_load_balancer=True): + self.assertIsInstance(amphora, data_models.Amphora) + self.check_amphora_data_model(amphora) + if check_load_balancer: + self.check_load_balancer(amphora.load_balancer) + + def check_listener(self, listener, check_sni=True, check_pools=True, + check_lb=True, check_l7policies=True): + self.assertIsInstance(listener, data_models.Listener) + self.check_listener_data_model(listener) + if check_lb: + self.check_load_balancer(listener.load_balancer, + check_listeners=False, + check_pools=check_pools) + if check_sni: + c_containers = listener.sni_containers + self.assertIsInstance(c_containers, list) + for sni in c_containers: + self.check_sni(sni, check_listener=False) + if check_pools: + for pool in listener.pools: + self.check_pool(pool, check_listeners=False, check_lb=check_lb) + if check_l7policies: + c_l7policies = listener.l7policies + self.assertIsInstance(c_l7policies, list) + for policy in c_l7policies: + self.check_l7policy(policy, check_listener=False, + check_pool=check_pools, check_lb=check_lb) + + def check_session_persistence(self, session_persistence, check_pool=True): + self.assertIsInstance(session_persistence, + data_models.SessionPersistence) + self.check_session_persistence_data_model(session_persistence) + if check_pool: + self.check_pool(session_persistence.pool, check_sp=False) + + def check_member(self, member, check_pool=True): + self.assertIsInstance(member, data_models.Member) + self.check_member_data_model(member) + if check_pool: + self.check_pool(member.pool, check_members=False) + + def check_l7policy(self, l7policy, check_listener=True, check_pool=True, + check_l7rules=True, check_lb=True): + self.assertIsInstance(l7policy, data_models.L7Policy) + self.check_l7policy_data_model(l7policy) + if check_listener: + self.check_listener(l7policy.listener, check_l7policies=False, + check_pools=check_pool, check_lb=check_lb) + if check_l7rules: + c_l7rules = l7policy.l7rules + self.assertIsInstance(c_l7rules, list) + for rule in c_l7rules: + self.check_l7rule(rule, check_l7policy=False) + if check_pool and l7policy.redirect_pool is not None: + self.assertEqual(l7policy.action, + constants.L7POLICY_ACTION_REDIRECT_TO_POOL) + self.check_pool(l7policy.redirect_pool, + check_listeners=check_listener, + check_l7policies=False, check_lb=check_lb) + + def check_l7rule(self, l7rule, check_l7policy=True): + self.assertIsInstance(l7rule, data_models.L7Rule) + self.check_l7rule_data_model(l7rule) + if check_l7policy: + self.check_l7policy(l7rule.l7policy) + + def check_health_monitor(self, health_monitor, check_pool=True): + self.assertIsInstance(health_monitor, data_models.HealthMonitor) + self.check_health_monitor_data_model(health_monitor) + if check_pool: + self.check_pool(health_monitor.pool, check_hm=False) + + def check_pool(self, pool, check_listeners=True, check_sp=True, + check_hm=True, check_members=True, check_l7policies=True, + check_lb=True): + self.assertIsInstance(pool, data_models.Pool) + self.check_pool_data_model(pool) + if check_listeners: + for listener in pool.listeners: + self.check_listener(listener, check_pools=False, + check_lb=check_lb) + if check_sp: + self.check_session_persistence(pool.session_persistence, + check_pool=False) + if check_members: + c_members = pool.members + self.assertIsNotNone(c_members) + self.assertEqual(1, len(c_members)) + for c_member in c_members: + self.check_member(c_member, check_pool=False) + if check_hm: + self.check_health_monitor(pool.health_monitor, check_pool=False) + if check_lb: + self.check_load_balancer(pool.load_balancer, check_pools=False, + check_listeners=check_listeners) + if check_l7policies: + c_l7policies = pool.l7policies + self.assertIsInstance(c_l7policies, list) + for policy in c_l7policies: + self.check_l7policy(policy, check_pool=False, + check_listener=check_listeners, + check_lb=check_lb) + + def check_load_balancer_data_model(self, lb): + self.assertEqual(self.FAKE_UUID_1, lb.project_id) + self.assertEqual(self.FAKE_UUID_1, lb.id) + self.assertEqual(constants.ACTIVE, lb.provisioning_status) + self.assertTrue(lb.enabled) + + def check_vip_data_model(self, vip): + self.assertEqual(self.FAKE_UUID_1, vip.load_balancer_id) + + def check_listener_data_model(self, listener): + self.assertEqual(self.FAKE_UUID_1, listener.project_id) + self.assertEqual(self.FAKE_UUID_1, listener.id) + self.assertEqual(constants.PROTOCOL_HTTP, listener.protocol) + self.assertEqual(80, listener.protocol_port) + self.assertEqual(constants.ACTIVE, listener.provisioning_status) + self.assertEqual(constants.ONLINE, listener.operating_status) + self.assertTrue(listener.enabled) + + def check_sni_data_model(self, sni): + self.assertEqual(self.FAKE_UUID_1, sni.listener_id) + self.assertEqual(self.FAKE_UUID_1, sni.tls_container_id) + + def check_listener_statistics_data_model(self, stats): + self.assertEqual(self.listener.id, stats.listener_id) + self.assertEqual(0, stats.bytes_in) + self.assertEqual(0, stats.bytes_out) + self.assertEqual(0, stats.active_connections) + self.assertEqual(0, stats.total_connections) + + def check_pool_data_model(self, pool): + self.assertEqual(self.FAKE_UUID_1, pool.project_id) + self.assertEqual(self.FAKE_UUID_1, pool.id) + self.assertEqual(constants.PROTOCOL_HTTP, pool.protocol) + self.assertEqual(constants.LB_ALGORITHM_LEAST_CONNECTIONS, + pool.lb_algorithm) + self.assertEqual(constants.ONLINE, pool.operating_status) + self.assertTrue(pool.enabled) + + def check_session_persistence_data_model(self, sp): + self.assertEqual(self.pool.id, sp.pool_id) + self.assertEqual(constants.SESSION_PERSISTENCE_HTTP_COOKIE, sp.type) + + def check_health_monitor_data_model(self, hm): + self.assertEqual(constants.HEALTH_MONITOR_HTTP, hm.type) + self.assertEqual(1, hm.delay) + self.assertEqual(1, hm.timeout) + self.assertEqual(1, hm.fall_threshold) + self.assertEqual(1, hm.rise_threshold) + self.assertTrue(hm.enabled) + + def check_member_data_model(self, member): + self.assertEqual(self.FAKE_UUID_1, member.project_id) + self.assertEqual(self.FAKE_UUID_1, member.id) + self.assertEqual(self.pool.id, member.pool_id) + self.assertEqual('10.0.0.1', member.ip_address) + self.assertEqual(80, member.protocol_port) + self.assertEqual(constants.ONLINE, member.operating_status) + self.assertTrue(member.enabled) + + def check_l7policy_data_model(self, l7policy): + self.assertEqual(self.FAKE_UUID_1, l7policy.id) + self.assertEqual(self.listener.id, l7policy.listener_id) + self.assertEqual(constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + l7policy.action) + self.assertEqual(self.pool.id, l7policy.redirect_pool_id) + self.assertEqual(1, l7policy.position) + + def check_l7rule_data_model(self, l7rule): + self.assertEqual(self.FAKE_UUID_1, l7rule.id) + self.assertEqual(self.l7policy.id, l7rule.l7policy_id) + self.assertEqual(constants.L7RULE_TYPE_PATH, l7rule.type) + self.assertEqual(constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + l7rule.compare_type) + self.assertEqual('/api', l7rule.value) + self.assertFalse(l7rule.invert) + + def check_amphora_data_model(self, amphora): + self.assertEqual(self.FAKE_UUID_1, amphora.id) + self.assertEqual(self.FAKE_UUID_1, amphora.compute_id) + self.assertEqual(constants.ACTIVE, amphora.status) + + def check_load_balancer_amphora_data_model(self, amphora): + self.assertEqual(self.FAKE_UUID_1, amphora.amphora_id) + self.assertEqual(self.FAKE_UUID_1, amphora.load_balancer_id) + + +class TestDataModelManipulations(base.OctaviaDBTestBase, ModelTestMixin): + + def setUp(self): + super().setUp() + self.lb = self.create_load_balancer(self.session) + self.amphora = self.create_amphora(self.session) + self.associate_amphora(self.lb, self.amphora) + # This pool will be the listener's default_pool and be referenced + # by self.l7policy + self.pool = self.create_pool(self.session, load_balancer_id=self.lb.id) + self.hm = self.create_health_monitor(self.session, self.pool.id) + self.member = self.create_member(self.session, self.pool.id, + id=self.FAKE_UUID_1, + ip_address='10.0.0.1') + self.sp = self.create_session_persistence(self.session, self.pool.id) + self.vip = self.create_vip(self.session, self.lb.id) + self.listener = self.create_listener(self.session, + default_pool_id=self.pool.id, + load_balancer_id=self.lb.id) + self.stats = self.create_listener_statistics(self.session, + self.listener.id, + self.amphora.id) + self.sni = self.create_sni(self.session, listener_id=self.listener.id) + self.l7policy = self.create_l7policy( + self.session, listener_id=self.listener.id, + action=constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + redirect_pool_id=self.pool.id) + self.l7rule = self.create_l7rule(self.session, + l7policy_id=self.l7policy.id) + # This pool, l7policy and l7rule are connected to the listener, + # but are not the default_pool + self.pool2 = self.create_pool( + self.session, load_balancer_id=self.lb.id, + id=uuidutils.generate_uuid()) + self.l7policy2 = self.create_l7policy( + self.session, listener_id=self.listener.id, + action=constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + redirect_pool_id=self.pool2.id, + id=uuidutils.generate_uuid(), position=2) + self.l7rule2 = self.create_l7rule( + self.session, l7policy_id=self.l7policy2.id, + id=uuidutils.generate_uuid()) + # This pool is not connected to the listener at all + self.pool3 = self.create_pool( + self.session, load_balancer_id=self.lb.id, + id=uuidutils.generate_uuid()) + + def test_dm_simple_update(self): + lb = self.lb.to_data_model() + self.assertIsNone(lb.name) + lb.update({'name': 'test_name_change'}) + self.assertEqual(lb.name, 'test_name_change') + + def test_dm_session_persistence_delete(self): + sp = self.sp.to_data_model() + pool = sp.pool + sp.delete() + self.assertIsNone(pool.session_persistence) + + def test_dm_health_monitor_delete(self): + hm = self.hm.to_data_model() + pool = hm.pool + hm.delete() + self.assertIsNone(pool.health_monitor) + + def test_dm_pool_simple_update(self): + pool = self.pool.to_data_model() + self.assertIsNone(pool.name) + pool.update({'name': 'new_pool_name'}) + self.assertEqual(pool.name, 'new_pool_name') + + def test_dm_pool_session_persistence_update(self): + pool = self.pool.to_data_model() + self.assertEqual(pool.session_persistence.cookie_name, + 'cookie_name') + sp_dict = {'cookie_name': 'new_name'} + pool.update({'session_persistence': sp_dict}) + self.assertEqual(pool.session_persistence.cookie_name, + 'new_name') + + def test_dm_pool_session_persistence_delete(self): + pool = self.pool.to_data_model() + self.assertEqual(pool.session_persistence.cookie_name, + 'cookie_name') + sp_dict = {} + pool.update({'session_persistence': sp_dict}) + self.assertIsNone(pool.session_persistence) + + def test_dm_pool_session_persistence_create(self): + pool = self.pool.to_data_model() + pool.update({'session_persistence': {}}) + self.assertIsNone(pool.session_persistence) + sp_dict = {'type': constants.SESSION_PERSISTENCE_HTTP_COOKIE, + 'cookie_name': 'cookie_name'} + pool.update({'session_persistence': sp_dict}) + self.assertEqual(pool.session_persistence.type, + constants.SESSION_PERSISTENCE_HTTP_COOKIE) + self.assertEqual(pool.session_persistence.pool_id, pool.id) + + def test_dm_pool_delete(self): + pool = self.pool.to_data_model() + listener = pool.listeners[0] + lb = pool.load_balancer + l7policy = pool.l7policies[0] + self.assertIn(pool, listener.pools) + self.assertIn(pool, lb.pools) + self.assertEqual(pool.id, l7policy.redirect_pool_id) + pool.delete() + self.assertNotIn(pool, listener.pools) + self.assertIsNone(listener.default_pool) + self.assertIsNone(listener.default_pool_id) + self.assertNotIn(pool, lb.pools) + self.assertEqual(l7policy.action, constants.L7POLICY_ACTION_REJECT) + self.assertIsNone(l7policy.redirect_pool_id) + self.assertIsNone(l7policy.redirect_pool) + + def test_dm_member_delete(self): + member = self.member.to_data_model() + pool = member.pool + self.assertIn(member, pool.members) + member.delete() + self.assertNotIn(member, pool.members) + + def test_dm_listener_update_and_clear_default_pool(self): + listener = self.listener.to_data_model() + new_pool = listener._find_in_graph('Pool' + self.pool3.id) + self.assertNotEqual(new_pool.id, listener.default_pool_id) + self.assertNotIn(listener, new_pool.listeners) + self.assertNotIn(new_pool, listener.pools) + listener.update({'default_pool_id': new_pool.id}) + self.assertEqual(new_pool.id, listener.default_pool_id) + self.assertIn(listener, new_pool.listeners) + self.assertIn(new_pool, listener.pools) + listener.update({'default_pool_id': None}) + self.assertIsNone(listener.default_pool_id) + self.assertIsNone(listener.default_pool) + self.assertNotIn(listener, new_pool.listeners) + self.assertNotIn(new_pool, listener.pools) + + def test_dm_listener_update_clear_default_pool_with_l7p_referral(self): + listener = self.listener.to_data_model() + pool = listener.default_pool + self.assertEqual(pool.id, listener.default_pool_id) + self.assertIn(listener, pool.listeners) + self.assertIn(pool, listener.pools) + listener.update({'default_pool_id': None}) + self.assertIsNone(listener.default_pool_id) + self.assertIsNone(listener.default_pool) + self.assertIn(listener, pool.listeners) + self.assertIn(pool, listener.pools) + + def test_dm_listener_delete(self): + listener = self.listener.to_data_model() + lb = listener.load_balancer + pools = listener.pools + self.assertIn(listener, lb.listeners) + for pool in pools: + self.assertIn(listener, pool.listeners) + listener.delete() + self.assertNotIn(listener, lb.listeners) + for pool in pools: + self.assertNotIn(listener, pool.listeners) + + def test_dm_amphora_delete(self): + amphora = self.amphora.to_data_model() + lb = amphora.load_balancer + self.assertIn(amphora, lb.amphorae) + amphora.delete() + self.assertNotIn(amphora, lb.amphorae) + + def test_dm_l7rule_delete(self): + l7r = self.l7rule2.to_data_model() + l7p = l7r.l7policy + listener = l7p.listener + pool2 = l7p.redirect_pool + self.assertIn(pool2, listener.pools) + self.assertNotEqual(pool2.id, listener.default_pool_id) + self.assertIn(l7r, l7p.l7rules) + self.assertEqual(1, len(l7p.l7rules)) + l7r.delete() + self.assertNotIn(l7r, l7p.l7rules) + self.assertNotIn(pool2, listener.pools) + + def test_dm_l7policy_delete_with_listener_default_pool_ref(self): + l7p = self.l7policy.to_data_model() + listener = l7p.listener + pool = l7p.redirect_pool + self.assertIn(pool, listener.pools) + self.assertEqual(pool.id, listener.default_pool_id) + self.assertIn(l7p, listener.l7policies) + self.assertIn(l7p, pool.l7policies) + l7p.delete() + self.assertIn(pool, listener.pools) + self.assertEqual(pool.id, listener.default_pool_id) + self.assertNotIn(l7p, listener.l7policies) + self.assertNotIn(l7p, pool.l7policies) + + def test_dm_l7policy_delete_not_listener_default_pool(self): + l7p = self.l7policy2.to_data_model() + listener = l7p.listener + pool2 = l7p.redirect_pool + self.assertIn(pool2, listener.pools) + self.assertNotEqual(pool2.id, listener.default_pool_id) + self.assertIn(l7p, listener.l7policies) + self.assertIn(l7p, pool2.l7policies) + l7p.delete() + self.assertNotIn(pool2, listener.pools) + self.assertNotIn(l7p, listener.l7policies) + self.assertNotIn(l7p, pool2.l7policies) + + def test_dm_l7policy_update_simple(self): + l7p = self.l7policy.to_data_model() + self.assertIsNone(l7p.name) + l7p.update({'name': 'new_name'}) + self.assertEqual(l7p.name, 'new_name') + + def test_dm_l7policy_update_action_rdr_url_no_default_pool_link(self): + l7p = self.l7policy2.to_data_model() + listener = l7p.listener + pool2 = l7p.redirect_pool + self.assertEqual(l7p.action, + constants.L7POLICY_ACTION_REDIRECT_TO_POOL) + self.assertIn(pool2, listener.pools) + self.assertIn(listener, pool2.listeners) + self.assertIsNone(l7p.redirect_url) + update_dict = {'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, + 'redirect_url': 'http://www.example.com/'} + l7p.update(update_dict) + self.assertEqual(l7p.action, + constants.L7POLICY_ACTION_REDIRECT_TO_URL) + self.assertEqual(l7p.redirect_url, 'http://www.example.com/') + self.assertIsNone(l7p.redirect_pool_id) + self.assertIsNone(l7p.redirect_pool) + self.assertNotIn(pool2, listener.pools) + self.assertNotIn(listener, pool2.listeners) + + def test_dm_l7policy_update_action_rdr_url_with_default_pool_link(self): + l7p = self.l7policy.to_data_model() + listener = l7p.listener + pool = l7p.redirect_pool + self.assertEqual(l7p.action, + constants.L7POLICY_ACTION_REDIRECT_TO_POOL) + self.assertIn(pool, listener.pools) + self.assertIn(listener, pool.listeners) + self.assertIsNone(l7p.redirect_url) + update_dict = {'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, + 'redirect_url': 'http://www.example.com/'} + l7p.update(update_dict) + self.assertEqual(l7p.action, + constants.L7POLICY_ACTION_REDIRECT_TO_URL) + self.assertEqual(l7p.redirect_url, 'http://www.example.com/') + self.assertIsNone(l7p.redirect_pool_id) + self.assertIsNone(l7p.redirect_pool) + self.assertIn(pool, listener.pools) + self.assertIn(listener, pool.listeners) + + def test_dm_l7policy_update_action_reject_no_default_pool_link(self): + l7p = self.l7policy2.to_data_model() + listener = l7p.listener + pool2 = l7p.redirect_pool + self.assertEqual(l7p.action, + constants.L7POLICY_ACTION_REDIRECT_TO_POOL) + self.assertIn(pool2, listener.pools) + self.assertIn(listener, pool2.listeners) + self.assertIsNone(l7p.redirect_url) + update_dict = {'action': constants.L7POLICY_ACTION_REJECT} + l7p.update(update_dict) + self.assertEqual(l7p.action, constants.L7POLICY_ACTION_REJECT) + self.assertIsNone(l7p.redirect_url) + self.assertIsNone(l7p.redirect_pool_id) + self.assertIsNone(l7p.redirect_pool) + self.assertNotIn(pool2, listener.pools) + self.assertNotIn(listener, pool2.listeners) + + def test_dm_l7policy_update_action_reject_with_default_pool_link(self): + l7p = self.l7policy.to_data_model() + listener = l7p.listener + pool = l7p.redirect_pool + self.assertEqual(l7p.action, + constants.L7POLICY_ACTION_REDIRECT_TO_POOL) + self.assertIn(pool, listener.pools) + self.assertIn(listener, pool.listeners) + self.assertIsNone(l7p.redirect_url) + update_dict = {'action': constants.L7POLICY_ACTION_REJECT} + l7p.update(update_dict) + self.assertEqual(l7p.action, constants.L7POLICY_ACTION_REJECT) + self.assertIsNone(l7p.redirect_url) + self.assertIsNone(l7p.redirect_pool_id) + self.assertIsNone(l7p.redirect_pool) + self.assertIn(pool, listener.pools) + self.assertIn(listener, pool.listeners) + + def test_dm_l7policy_update_position(self): + l7p = self.l7policy.to_data_model() + listener = l7p.listener + self.assertEqual(l7p, listener.l7policies[l7p.position - 1]) + update_dict = {'position': 1} + l7p.update(update_dict) + self.assertEqual(l7p, listener.l7policies[0]) + update_dict = {'position': 2} + l7p.update(update_dict) + self.assertEqual(l7p, listener.l7policies[1]) + + def test_dm_l7policy_update_reject_to_rdr_pool(self): + l7p = self.l7policy.to_data_model() + listener = l7p.listener + new_pool = listener._find_in_graph('Pool' + self.pool3.id) + update_dict = {'action': constants.L7POLICY_ACTION_REJECT} + l7p.update(update_dict) + self.assertEqual(l7p.action, constants.L7POLICY_ACTION_REJECT) + self.assertIsNone(l7p.redirect_url) + self.assertIsNone(l7p.redirect_pool_id) + self.assertIsNone(l7p.redirect_pool) + self.assertNotIn(new_pool, listener.pools) + self.assertNotIn(listener, new_pool.listeners) + update_dict = {'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + 'redirect_pool_id': new_pool.id} + l7p.update(update_dict) + self.assertEqual(l7p.action, + constants.L7POLICY_ACTION_REDIRECT_TO_POOL) + self.assertIsNone(l7p.redirect_url) + self.assertEqual(l7p.redirect_pool_id, new_pool.id) + self.assertEqual(l7p.redirect_pool, new_pool) + self.assertIn(new_pool, listener.pools) + self.assertIn(listener, new_pool.listeners) + + def test_dm_l7policy_update_reject_to_rdr_pool_with_no_l7rules(self): + l7p = self.l7policy.to_data_model() + listener = l7p.listener + new_pool = listener._find_in_graph('Pool' + self.pool3.id) + update_dict = {'action': constants.L7POLICY_ACTION_REJECT} + l7p.update(update_dict) + self.assertEqual(l7p.action, constants.L7POLICY_ACTION_REJECT) + self.assertIsNone(l7p.redirect_url) + self.assertIsNone(l7p.redirect_pool_id) + self.assertIsNone(l7p.redirect_pool) + self.assertNotIn(new_pool, listener.pools) + self.assertNotIn(listener, new_pool.listeners) + l7p.l7rules[0].delete() + update_dict = {'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + 'redirect_pool_id': new_pool.id} + l7p.update(update_dict) + self.assertEqual(l7p.action, + constants.L7POLICY_ACTION_REDIRECT_TO_POOL) + self.assertIsNone(l7p.redirect_url) + self.assertEqual(l7p.redirect_pool_id, new_pool.id) + self.assertEqual(l7p.redirect_pool, new_pool) + self.assertNotIn(new_pool, listener.pools) + self.assertNotIn(listener, new_pool.listeners) + + def test_dm_l7policy_update_reject_to_rdr_pool_with_disabled_policy(self): + l7p = self.l7policy.to_data_model() + listener = l7p.listener + new_pool = listener._find_in_graph('Pool' + self.pool3.id) + update_dict = {'action': constants.L7POLICY_ACTION_REJECT} + l7p.update(update_dict) + self.assertEqual(l7p.action, constants.L7POLICY_ACTION_REJECT) + self.assertIsNone(l7p.redirect_url) + self.assertIsNone(l7p.redirect_pool_id) + self.assertIsNone(l7p.redirect_pool) + self.assertNotIn(new_pool, listener.pools) + self.assertNotIn(listener, new_pool.listeners) + update_dict = {'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + 'redirect_pool_id': new_pool.id, + 'enabled': False} + l7p.update(update_dict) + self.assertEqual(l7p.action, + constants.L7POLICY_ACTION_REDIRECT_TO_POOL) + self.assertIsNone(l7p.redirect_url) + self.assertEqual(l7p.redirect_pool_id, new_pool.id) + self.assertEqual(l7p.redirect_pool, new_pool) + self.assertNotIn(new_pool, listener.pools) + self.assertNotIn(listener, new_pool.listeners) + + def test_dm_l7policy_update_enable_and_disable(self): + l7p = self.l7policy2.to_data_model() + listener = l7p.listener + self.assertIn(l7p.redirect_pool, listener.pools) + update_dict = {'enabled': False} + l7p.update(update_dict) + self.assertNotIn(l7p.redirect_pool, listener.pools) + update_dict = {'enabled': True} + l7p.update(update_dict) + self.assertIn(l7p.redirect_pool, listener.pools) + + def test_dm_l7policy_update_disable_with_default_pool_link(self): + l7p = self.l7policy.to_data_model() + listener = l7p.listener + self.assertIn(l7p.redirect_pool, listener.pools) + update_dict = {'enabled': False} + l7p.update(update_dict) + self.assertIn(l7p.redirect_pool, listener.pools) + + def test_dm_l7policy_update_enable_with_reject_to_rdr_pool(self): + l7p = self.l7policy.to_data_model() + listener = l7p.listener + new_pool = listener._find_in_graph('Pool' + self.pool3.id) + update_dict = {'action': constants.L7POLICY_ACTION_REJECT, + 'enabled': False} + l7p.update(update_dict) + self.assertEqual(l7p.action, constants.L7POLICY_ACTION_REJECT) + self.assertIsNone(l7p.redirect_url) + self.assertIsNone(l7p.redirect_pool_id) + self.assertIsNone(l7p.redirect_pool) + self.assertNotIn(new_pool, listener.pools) + self.assertNotIn(listener, new_pool.listeners) + update_dict = {'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + 'redirect_pool_id': new_pool.id, + 'enabled': True} + l7p.update(update_dict) + self.assertEqual(l7p.action, + constants.L7POLICY_ACTION_REDIRECT_TO_POOL) + self.assertIsNone(l7p.redirect_url) + self.assertEqual(l7p.redirect_pool_id, new_pool.id) + self.assertEqual(l7p.redirect_pool, new_pool) + self.assertIn(new_pool, listener.pools) + self.assertIn(listener, new_pool.listeners) + + +class FlavorModelTest(base.OctaviaDBTestBase, ModelTestMixin): + + def setUp(self): + super().setUp() + self.profile = self.create_flavor_profile(self.session) + + def test_create(self): + flavor = self.create_flavor(self.session, self.profile.id) + self.assertEqual(f"Flavor(description='fake flavor', enabled=True, " + f"flavor_profile_id={flavor.flavor_profile_id!r}, " + f"id={flavor.id!r}, name='fake_flavor')", str(flavor)) + self.assertIsNotNone(flavor.id) + + def test_delete(self): + flavor = self.create_flavor(self.session, self.profile.id) + self.assertIsNotNone(flavor.id) + id = flavor.id + self.session.commit() + + self.session.delete(flavor) + self.session.commit() + + new_flavor = self.session.query( + models.Flavor).filter_by(id=id).first() + self.assertIsNone(new_flavor) + + +class FlavorProfileModelTest(base.OctaviaDBTestBase, ModelTestMixin): + + def test_create(self): + fp = self.create_flavor_profile(self.session) + self.assertEqual(f"FlavorProfile(flavor_data={fp.flavor_data!r}, " + f"id={fp.id!r}, name='fake_profile', " + f"provider_name='fake_provider')", str(fp)) + self.assertIsNotNone(fp.id) + + def test_delete(self): + fp = self.create_flavor_profile(self.session) + self.assertIsNotNone(fp.id) + id = fp.id + self.session.commit() + + self.session.delete(fp) + self.session.commit() + new_fp = self.session.query( + models.FlavorProfile).filter_by(id=id).first() + self.assertIsNone(new_fp) + + +class QuotasModelTest(base.OctaviaDBTestBase, ModelTestMixin): + + def test_create(self): + obj = self.create_quotas(self.session, load_balancer=1, listener=2, + pool=3, health_monitor=4, member=5, + l7policy=6, l7rule=8) + self.assertEqual(f"Quotas(project_id={obj.project_id!r}, " + f"load_balancer=1, listener=2, pool=3, " + f"health_monitor=4, member=5, l7policy=6, l7rule=8)", + str(obj)) diff --git a/octavia/tests/functional/db/test_repositories.py b/octavia/tests/functional/db/test_repositories.py new file mode 100644 index 0000000000..43c4bfc374 --- /dev/null +++ b/octavia/tests/functional/db/test_repositories.py @@ -0,0 +1,5305 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from concurrent.futures import ThreadPoolExecutor +import datetime +import random +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_db import exception as db_exception +from oslo_utils import timeutils +from oslo_utils import uuidutils +from sqlalchemy.orm import defer +from sqlalchemy.orm import exc as sa_exception + +from octavia.common import constants +from octavia.common import data_models +from octavia.common import exceptions +from octavia.db import api as db_api +from octavia.db import models as db_models +from octavia.db import repositories as repo +from octavia.tests.functional.db import base + +CONF = cfg.CONF + + +class BaseRepositoryTest(base.OctaviaDBTestBase): + + FAKE_IP = "192.0.2.1" + FAKE_UUID_1 = uuidutils.generate_uuid() + FAKE_UUID_2 = uuidutils.generate_uuid() + FAKE_UUID_3 = uuidutils.generate_uuid() + FAKE_UUID_4 = uuidutils.generate_uuid() + FAKE_UUID_5 = uuidutils.generate_uuid() + FAKE_UUID_6 = uuidutils.generate_uuid() + FAKE_UUID_7 = uuidutils.generate_uuid() + FAKE_EXP_AGE = 10 + + def setUp(self): + super().setUp() + self.pool_repo = repo.PoolRepository() + self.member_repo = repo.MemberRepository() + self.lb_repo = repo.LoadBalancerRepository() + self.vip_repo = repo.VipRepository() + self.listener_repo = repo.ListenerRepository() + self.listener_stats_repo = repo.ListenerStatisticsRepository() + self.sp_repo = repo.SessionPersistenceRepository() + self.hm_repo = repo.HealthMonitorRepository() + self.sni_repo = repo.SNIRepository() + self.amphora_repo = repo.AmphoraRepository() + self.amphora_health_repo = repo.AmphoraHealthRepository() + self.vrrp_group_repo = repo.VRRPGroupRepository() + self.l7policy_repo = repo.L7PolicyRepository() + self.l7rule_repo = repo.L7RuleRepository() + self.quota_repo = repo.QuotasRepository() + self.flavor_repo = repo.FlavorRepository() + self.flavor_profile_repo = repo.FlavorProfileRepository() + + def create_loadbalancer(self, lb_id): + lb = self.lb_repo.create(self.session, id=lb_id, + project_id=self.FAKE_UUID_2, name="lb_name", + description="lb_description", + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, + enabled=True) + self.session.commit() + return lb + + def test_get_all_return_value(self): + pool_list, _ = self.pool_repo.get_all(self.session, + project_id=self.FAKE_UUID_2) + self.assertIsInstance(pool_list, list) + lb_list, _ = self.lb_repo.get_all(self.session, + project_id=self.FAKE_UUID_2) + self.assertIsInstance(lb_list, list) + listener_list, _ = self.listener_repo.get_all( + self.session, project_id=self.FAKE_UUID_2) + self.assertIsInstance(listener_list, list) + member_list, _ = self.member_repo.get_all(self.session, + project_id=self.FAKE_UUID_2) + self.assertIsInstance(member_list, list) + fp_list, _ = self.flavor_profile_repo.get_all( + self.session, id=self.FAKE_UUID_2) + self.assertIsInstance(fp_list, list) + flavor_list, _ = self.flavor_repo.get_all( + self.session, id=self.FAKE_UUID_2) + self.assertIsInstance(flavor_list, list) + + +class AllRepositoriesTest(base.OctaviaDBTestBase): + + FAKE_UUID_1 = uuidutils.generate_uuid() + FAKE_UUID_2 = uuidutils.generate_uuid() + FAKE_UUID_3 = uuidutils.generate_uuid() + FAKE_UUID_4 = uuidutils.generate_uuid() + FAKE_IP = '192.0.2.44' + + def setUp(self): + super().setUp() + self.repos = repo.Repositories() + self.load_balancer = self.repos.load_balancer.create( + self.session, id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2, + name="lb_name", description="lb_description", + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, enabled=True) + self.listener = self.repos.listener.create( + self.session, id=self.FAKE_UUID_4, + protocol=constants.PROTOCOL_HTTP, protocol_port=80, + enabled=True, provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, + load_balancer_id=self.load_balancer.id) + self.amphora = self.repos.amphora.create( + self.session, id=uuidutils.generate_uuid(), + load_balancer_id=self.load_balancer.id, + compute_id=self.FAKE_UUID_3, status=constants.ACTIVE, + vrrp_ip=self.FAKE_IP, lb_network_ip=self.FAKE_IP) + self.session.commit() + + def test_all_repos_has_correct_repos(self): + repo_attr_names = ('load_balancer', 'vip', 'health_monitor', + 'session_persistence', 'pool', 'member', 'listener', + 'listener_stats', 'amphora', 'sni', + 'amphorahealth', 'vrrpgroup', 'l7rule', 'l7policy', + 'amp_build_slots', 'amp_build_req', 'quotas', + 'flavor', 'flavor_profile', 'listener_cidr', + 'availability_zone', 'availability_zone_profile', + 'additional_vip', 'amphora_member_port') + for repo_attr in repo_attr_names: + single_repo = getattr(self.repos, repo_attr, None) + message = (f"Class Repositories should have {repo_attr} instance " + f"variable.") + self.assertIsNotNone(single_repo, message=message) + message = (("instance variable, %(repo_name)s, of class " + "Repositories should be an instance of %(base)s") % + {'repo_name': repo_attr, + 'base': repo.BaseRepository.__name__}) + self.assertIsInstance(single_repo, repo.BaseRepository, + msg=message) + + for attr in vars(self.repos): + if attr.startswith('_') or attr in repo_attr_names: + continue + possible_repo = getattr(self.repos, attr, None) + message = ('Class Repositories is not expected to have %s instance' + ' variable as a repository.' % attr) + self.assertNotIsInstance(possible_repo, repo.BaseRepository, + msg=message) + + def test_create_load_balancer_and_vip(self): + lb = {'name': 'test1', 'description': 'desc1', 'enabled': True, + 'provisioning_status': constants.PENDING_UPDATE, + 'operating_status': constants.OFFLINE, + 'topology': constants.TOPOLOGY_ACTIVE_STANDBY, + 'vrrp_group': None, + 'provider': 'amphora', + 'server_group_id': uuidutils.generate_uuid(), + 'project_id': uuidutils.generate_uuid(), + 'id': uuidutils.generate_uuid(), 'flavor_id': None, + 'tags': ['test_tag']} + vip = {'ip_address': '192.0.2.1', + 'port_id': uuidutils.generate_uuid(), + 'subnet_id': uuidutils.generate_uuid(), + 'network_id': uuidutils.generate_uuid(), + 'qos_policy_id': None, 'octavia_owned': True, + 'vnic_type': None, 'sgs': []} + additional_vips = [{'subnet_id': uuidutils.generate_uuid(), + 'ip_address': '192.0.2.2'}] + lb_dm = self.repos.create_load_balancer_and_vip(self.session, lb, vip, + additional_vips) + self.session.commit() + lb_dm_dict = lb_dm.to_dict() + del lb_dm_dict['vip'] + del lb_dm_dict['additional_vips'] + del lb_dm_dict['listeners'] + del lb_dm_dict['amphorae'] + del lb_dm_dict['pools'] + del lb_dm_dict['created_at'] + del lb_dm_dict['updated_at'] + self.assertIsNone(lb_dm_dict.pop('availability_zone')) + self.assertEqual(lb, lb_dm_dict) + vip_dm_dict = lb_dm.vip.to_dict() + vip_dm_dict['load_balancer_id'] = lb_dm.id + del vip_dm_dict['load_balancer'] + vip['sg_ids'] = [] + self.assertEqual(vip, vip_dm_dict) + + ret = self.repos.load_balancer.get(self.session, id=lb_dm.id) + print(ret.vip.port_id) + ret = self.repos.vip.get(self.session, load_balancer_id=lb_dm.id) + print(ret.port_id) + + def test_create_load_balancer_and_update_vip(self): + lb = {'name': 'test1', 'description': 'desc1', 'enabled': True, + 'provisioning_status': constants.PENDING_UPDATE, + 'operating_status': constants.OFFLINE, + 'topology': constants.TOPOLOGY_ACTIVE_STANDBY, + 'vrrp_group': None, + 'provider': 'amphora', + 'server_group_id': uuidutils.generate_uuid(), + 'project_id': uuidutils.generate_uuid(), + 'id': uuidutils.generate_uuid(), 'flavor_id': None, + 'tags': ['test_tag']} + vip = {'ip_address': '192.0.2.1', + 'port_id': uuidutils.generate_uuid(), + 'subnet_id': uuidutils.generate_uuid(), + 'network_id': uuidutils.generate_uuid(), + 'qos_policy_id': None, 'octavia_owned': True, + 'vnic_type': None, 'sgs': []} + additional_vips = [{'subnet_id': uuidutils.generate_uuid(), + 'ip_address': '192.0.2.2'}] + lb_dm = self.repos.create_load_balancer_and_vip(self.session, lb, vip, + additional_vips) + self.session.commit() + + vip_dm_dict = lb_dm.vip.to_dict() + self.assertEqual(0, len(vip_dm_dict["sg_ids"])) + + vip_update = { + 'port_id': uuidutils.generate_uuid(), + } + self.repos.vip.update(self.session, lb_dm.id, **vip_update) + self.session.expire_all() + self.session.flush() + self.session.commit() + + updated_vip_dm = self.repos.vip.get(self.session, + load_balancer_id=lb_dm.id) + self.assertEqual(vip_update['port_id'], updated_vip_dm.port_id) + + def test_create_load_balancer_and_update_vip_sg_ids(self): + lb = {'name': 'test1', 'description': 'desc1', 'enabled': True, + 'provisioning_status': constants.PENDING_UPDATE, + 'operating_status': constants.OFFLINE, + 'topology': constants.TOPOLOGY_ACTIVE_STANDBY, + 'vrrp_group': None, + 'provider': 'amphora', + 'server_group_id': uuidutils.generate_uuid(), + 'project_id': uuidutils.generate_uuid(), + 'id': uuidutils.generate_uuid(), 'flavor_id': None, + 'tags': ['test_tag']} + vip = {'ip_address': '192.0.2.1', + 'port_id': uuidutils.generate_uuid(), + 'subnet_id': uuidutils.generate_uuid(), + 'network_id': uuidutils.generate_uuid(), + 'qos_policy_id': None, 'octavia_owned': True, + 'vnic_type': None, 'sgs': []} + additional_vips = [{'subnet_id': uuidutils.generate_uuid(), + 'ip_address': '192.0.2.2'}] + lb_dm = self.repos.create_load_balancer_and_vip(self.session, lb, vip, + additional_vips) + self.session.commit() + + vip_dm_dict = lb_dm.vip.to_dict() + self.assertEqual(0, len(vip_dm_dict["sg_ids"])) + + vip_update = { + 'sg_ids': [uuidutils.generate_uuid(), + uuidutils.generate_uuid()], + } + self.repos.vip.update(self.session, lb_dm.id, **vip_update) + self.session.commit() + + updated_vip_dm = self.repos.vip.get(self.session, + load_balancer_id=lb_dm.id) + self.assertEqual(2, len(vip_update['sg_ids'])) + self.assertIn(vip_update['sg_ids'][0], updated_vip_dm.sg_ids) + self.assertIn(vip_update['sg_ids'][1], updated_vip_dm.sg_ids) + + vip_update['sg_ids'] = [uuidutils.generate_uuid()] + self.repos.vip.update(self.session, lb_dm.id, **vip_update) + self.session.commit() + + updated_vip_dm = self.repos.vip.get(self.session, + load_balancer_id=lb_dm.id) + self.assertEqual(1, len(vip_update['sg_ids'])) + self.assertIn(vip_update['sg_ids'][0], updated_vip_dm.sg_ids) + + vip_update['sg_ids'] = [] + self.repos.vip.update(self.session, lb_dm.id, **vip_update) + self.session.commit() + + updated_vip_dm = self.repos.vip.get(self.session, + load_balancer_id=lb_dm.id) + self.assertEqual(0, len(vip_update['sg_ids'])) + + def test_create_pool_on_listener_without_sp(self): + pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1', + 'description': 'desc1', + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'enabled': True, 'operating_status': constants.ONLINE, + 'project_id': uuidutils.generate_uuid(), + 'id': uuidutils.generate_uuid(), + 'provisioning_status': constants.ACTIVE, + 'tags': ['test_tag'], + 'tls_certificate_id': uuidutils.generate_uuid(), + 'tls_enabled': False, 'tls_ciphers': None, + 'tls_versions': None, + 'alpn_protocols': None} + pool_dm = self.repos.create_pool_on_load_balancer( + self.session, pool, listener_id=self.listener.id) + self.session.commit() + pool_dm_dict = pool_dm.to_dict() + # These are not defined in the sample pool dict but will + # be in the live data. + del pool_dm_dict['members'] + del pool_dm_dict['health_monitor'] + del pool_dm_dict['session_persistence'] + del pool_dm_dict['listeners'] + del pool_dm_dict['load_balancer'] + del pool_dm_dict['load_balancer_id'] + del pool_dm_dict['l7policies'] + del pool_dm_dict['created_at'] + del pool_dm_dict['updated_at'] + del pool_dm_dict['ca_tls_certificate_id'] + del pool_dm_dict['crl_container_id'] + self.assertEqual(pool, pool_dm_dict) + new_listener = self.repos.listener.get(self.session, + id=self.listener.id) + self.assertEqual(pool_dm.id, new_listener.default_pool_id) + + def test_create_pool_on_listener_with_sp(self): + pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1', + 'description': 'desc1', + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'enabled': True, 'operating_status': constants.ONLINE, + 'project_id': uuidutils.generate_uuid(), + 'id': uuidutils.generate_uuid(), + 'provisioning_status': constants.ACTIVE, + 'tags': ['test_tag'], + 'tls_certificate_id': uuidutils.generate_uuid(), + 'tls_enabled': False, + 'tls_ciphers': None, + 'tls_versions': None, + 'alpn_protocols': None} + sp = {'type': constants.SESSION_PERSISTENCE_HTTP_COOKIE, + 'cookie_name': 'cookie_monster', + 'pool_id': pool['id'], + 'persistence_granularity': None, + 'persistence_timeout': None} + pool.update({'session_persistence': sp}) + pool_dm = self.repos.create_pool_on_load_balancer( + self.session, pool, listener_id=self.listener.id) + self.session.commit() + pool_dm_dict = pool_dm.to_dict() + # These are not defined in the sample pool dict but will + # be in the live data. + del pool_dm_dict['members'] + del pool_dm_dict['health_monitor'] + del pool_dm_dict['session_persistence'] + del pool_dm_dict['listeners'] + del pool_dm_dict['load_balancer'] + del pool_dm_dict['load_balancer_id'] + del pool_dm_dict['l7policies'] + del pool_dm_dict['created_at'] + del pool_dm_dict['updated_at'] + del pool_dm_dict['ca_tls_certificate_id'] + del pool_dm_dict['crl_container_id'] + self.assertEqual(pool, pool_dm_dict) + sp_dm_dict = pool_dm.session_persistence.to_dict() + del sp_dm_dict['pool'] + sp['pool_id'] = pool_dm.id + self.assertEqual(sp, sp_dm_dict) + new_listener = self.repos.listener.get(self.session, + id=self.listener.id) + self.assertEqual(pool_dm.id, new_listener.default_pool_id) + new_sp = self.repos.session_persistence.get(self.session, + pool_id=pool_dm.id) + self.assertIsNotNone(new_sp) + + def test_update_pool_without_sp(self): + pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1', + 'description': 'desc1', + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'enabled': True, 'operating_status': constants.ONLINE, + 'project_id': uuidutils.generate_uuid(), + 'id': uuidutils.generate_uuid(), + 'provisioning_status': constants.ACTIVE, + 'tags': ['test_tag'], 'tls_enabled': False, + 'tls_ciphers': None, + 'tls_versions': None, + 'alpn_protocols': None} + pool_dm = self.repos.create_pool_on_load_balancer( + self.session, pool, listener_id=self.listener.id) + update_pool = {'protocol': constants.PROTOCOL_TCP, 'name': 'up_pool'} + new_pool_dm = self.repos.update_pool_and_sp( + self.session, pool_dm.id, update_pool) + self.session.commit() + pool_dm_dict = new_pool_dm.to_dict() + # These are not defined in the sample pool dict but will + # be in the live data. + del pool_dm_dict['members'] + del pool_dm_dict['health_monitor'] + del pool_dm_dict['session_persistence'] + del pool_dm_dict['listeners'] + del pool_dm_dict['load_balancer'] + del pool_dm_dict['load_balancer_id'] + del pool_dm_dict['l7policies'] + del pool_dm_dict['created_at'] + del pool_dm_dict['updated_at'] + del pool_dm_dict['ca_tls_certificate_id'] + del pool_dm_dict['crl_container_id'] + pool.update(update_pool) + pool['tls_certificate_id'] = None + self.assertEqual(pool, pool_dm_dict) + self.assertIsNone(new_pool_dm.session_persistence) + + def test_update_pool_with_existing_sp(self): + pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1', + 'description': 'desc1', + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'enabled': True, 'operating_status': constants.ONLINE, + 'project_id': uuidutils.generate_uuid(), + 'id': uuidutils.generate_uuid(), + 'provisioning_status': constants.ACTIVE, + 'tags': ['test_tag'], + 'tls_certificate_id': uuidutils.generate_uuid(), + 'tls_enabled': False, 'tls_ciphers': None, + 'tls_versions': None, + 'alpn_protocols': None} + sp = {'type': constants.SESSION_PERSISTENCE_HTTP_COOKIE, + 'cookie_name': 'cookie_monster', + 'pool_id': pool['id'], + 'persistence_granularity': None, + 'persistence_timeout': None} + pool.update({'session_persistence': sp}) + pool_dm = self.repos.create_pool_on_load_balancer( + self.session, pool, listener_id=self.listener.id) + update_pool = {'protocol': constants.PROTOCOL_TCP, 'name': 'up_pool'} + update_sp = {'type': constants.SESSION_PERSISTENCE_SOURCE_IP} + update_pool.update({'session_persistence': update_sp}) + new_pool_dm = self.repos.update_pool_and_sp( + self.session, pool_dm.id, update_pool) + self.session.commit() + pool_dm_dict = new_pool_dm.to_dict() + # These are not defined in the sample pool dict but will + # be in the live data. + del pool_dm_dict['members'] + del pool_dm_dict['health_monitor'] + del pool_dm_dict['session_persistence'] + del pool_dm_dict['listeners'] + del pool_dm_dict['load_balancer'] + del pool_dm_dict['load_balancer_id'] + del pool_dm_dict['l7policies'] + del pool_dm_dict['created_at'] + del pool_dm_dict['updated_at'] + del pool_dm_dict['ca_tls_certificate_id'] + del pool_dm_dict['crl_container_id'] + pool.update(update_pool) + self.assertEqual(pool, pool_dm_dict) + sp_dm_dict = new_pool_dm.session_persistence.to_dict() + del sp_dm_dict['pool'] + sp['pool_id'] = pool_dm.id + sp.update(update_sp) + self.assertEqual(sp, sp_dm_dict) + + def test_update_pool_with_nonexisting_sp(self): + pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1', + 'description': 'desc1', + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'enabled': True, 'operating_status': constants.ONLINE, + 'provisioning_status': constants.ACTIVE, + 'project_id': uuidutils.generate_uuid(), + 'id': uuidutils.generate_uuid()} + pool_dm = self.repos.create_pool_on_load_balancer( + self.session, pool, listener_id=self.listener.id) + self.session.commit() + update_pool = {'protocol': constants.PROTOCOL_TCP, 'name': 'up_pool'} + update_sp = {'type': constants.SESSION_PERSISTENCE_HTTP_COOKIE, + 'cookie_name': 'monster_cookie', + 'persistence_granularity': None, + 'persistence_timeout': None} + update_pool.update({'session_persistence': update_sp}) + new_pool_dm = self.repos.update_pool_and_sp( + self.session, pool_dm.id, update_pool) + self.session.commit() + sp_dm_dict = new_pool_dm.session_persistence.to_dict() + del sp_dm_dict['pool'] + update_sp['pool_id'] = pool_dm.id + update_sp.update(update_sp) + self.assertEqual(update_sp, sp_dm_dict) + + def test_update_pool_with_nonexisting_sp_delete_sp(self): + pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1', + 'description': 'desc1', + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'enabled': True, 'operating_status': constants.ONLINE, + 'provisioning_status': constants.ACTIVE, + 'project_id': uuidutils.generate_uuid(), + 'id': uuidutils.generate_uuid()} + pool_dm = self.repos.create_pool_on_load_balancer( + self.session, pool, listener_id=self.listener.id) + update_pool = {'protocol': constants.PROTOCOL_TCP, 'name': 'up_pool', + 'session_persistence': None} + new_pool_dm = self.repos.update_pool_and_sp( + self.session, pool_dm.id, update_pool) + self.session.commit() + self.assertIsNone(new_pool_dm.session_persistence) + + def test_update_pool_with_existing_sp_delete_sp(self): + pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1', + 'description': 'desc1', + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'enabled': True, 'operating_status': constants.ONLINE, + 'provisioning_status': constants.PENDING_CREATE, + 'project_id': uuidutils.generate_uuid(), + 'id': uuidutils.generate_uuid()} + sp = {'type': constants.SESSION_PERSISTENCE_HTTP_COOKIE, + 'cookie_name': 'cookie_monster', + 'pool_id': pool['id']} + pool.update({'session_persistence': sp}) + pool_dm = self.repos.create_pool_on_load_balancer( + self.session, pool, listener_id=self.listener.id) + self.session.commit() + update_pool = {'protocol': constants.PROTOCOL_TCP, 'name': 'up_pool', + 'session_persistence': {}} + new_pool_dm = self.repos.update_pool_and_sp( + self.session, pool_dm.id, update_pool) + self.session.commit() + self.assertIsNone(new_pool_dm.session_persistence) + + def test_update_pool_with_cert(self): + pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1', + 'description': 'desc1', + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'enabled': True, 'operating_status': constants.ONLINE, + 'project_id': uuidutils.generate_uuid(), + 'id': uuidutils.generate_uuid(), + 'provisioning_status': constants.ACTIVE, + 'tls_enabled': False, 'tls_ciphers': None, + 'tls_versions': None, + 'alpn_protocols': None} + pool_dm = self.repos.create_pool_on_load_balancer( + self.session, pool, listener_id=self.listener.id) + update_pool = {'tls_certificate_id': uuidutils.generate_uuid()} + new_pool_dm = self.repos.update_pool_and_sp( + self.session, pool_dm.id, update_pool) + self.session.commit() + pool_dm_dict = new_pool_dm.to_dict() + # These are not defined in the sample pool dict but will + # be in the live data. + del pool_dm_dict['members'] + del pool_dm_dict['health_monitor'] + del pool_dm_dict['session_persistence'] + del pool_dm_dict['listeners'] + del pool_dm_dict['load_balancer'] + del pool_dm_dict['load_balancer_id'] + del pool_dm_dict['l7policies'] + del pool_dm_dict['created_at'] + del pool_dm_dict['updated_at'] + del pool_dm_dict['tags'] + del pool_dm_dict['ca_tls_certificate_id'] + del pool_dm_dict['crl_container_id'] + pool.update(update_pool) + self.assertEqual(pool, pool_dm_dict) + + def test_sqlite_transactions_broken(self): + self.skipTest("SLQAlchemy/PySqlite transaction handling is broken. " + "Version 1.3.16 of sqlachemy changes how sqlite3 " + "transactions are handled and this test fails as " + "The LB created early in this process now disappears " + "from the transaction context.") + """This test is a canary for pysqlite fixing transaction handling. + + When this test starts failing, we can fix and un-skip the deadlock. + """ + project_id = uuidutils.generate_uuid() + vip = {'ip_address': '192.0.2.1', 'port_id': uuidutils.generate_uuid(), + 'subnet_id': uuidutils.generate_uuid()} + lb = {'name': 'lb1', 'description': 'desc1', 'enabled': True, + 'topology': constants.TOPOLOGY_ACTIVE_STANDBY, + 'vrrp_group': None, 'server_group_id': uuidutils.generate_uuid(), + 'project_id': project_id, + 'provisioning_status': constants.PENDING_CREATE, + 'operating_status': constants.ONLINE, + 'id': uuidutils.generate_uuid()} + + session = db_api.get_session() + lock_session = db_api.get_session() + lbs = lock_session.query(db_models.LoadBalancer).filter_by( + project_id=project_id).all() + self.assertEqual(0, len(lbs)) # Initially: 0 + self.repos.create_load_balancer_and_vip(lock_session, lb, vip) + self.session.commit() + lbs = lock_session.query(db_models.LoadBalancer).filter_by( + project_id=project_id).all() + self.assertEqual(1, len(lbs)) # After create: 1 + lock_session.rollback() + lbs = lock_session.query(db_models.LoadBalancer).filter_by( + project_id=project_id).all() + self.assertEqual(0, len(lbs)) # After rollback: 0 + self.repos.create_load_balancer_and_vip(lock_session, lb, vip) + self.session.commit() + lbs = lock_session.query(db_models.LoadBalancer).filter_by( + project_id=project_id).all() + self.assertEqual(1, len(lbs)) # After create: 1 + lock_session.rollback() + lbs = lock_session.query(db_models.LoadBalancer).filter_by( + project_id=project_id).all() + self.assertEqual(0, len(lbs)) # After rollback: 0 + # Force a count(), which breaks transaction integrity in pysqlite + session.query(db_models.LoadBalancer).filter( + db_models.LoadBalancer.project_id == project_id).count() + self.repos.create_load_balancer_and_vip(lock_session, lb, vip) + self.session.commit() + lbs = lock_session.query(db_models.LoadBalancer).filter_by( + project_id=project_id).all() + self.assertEqual(1, len(lbs)) # After create: 1 + lock_session.rollback() + lbs = lock_session.query(db_models.LoadBalancer).filter_by( + project_id=project_id).all() + self.assertEqual(1, len(lbs)) # After rollback: 1 (broken!) + + def test_check_quota_met_check_deadlock(self): + # This test doesn't work with sqlite, using another backend is not + # straighforward, we need to update the connection_string passed to the + # __init__ func and also change some calls in the constructor (don't + # create the DB objects if we use a DB that was deployed for Octavia) + if 'sqlite://' in self.connection_string: + self.skipTest("The test for checking potential deadlocks " + "doesn't work with the sqlite backend") + + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(group='api_settings', auth_strategy=constants.TESTING) + conf.config(group='quotas', default_load_balancer_quota=-1) + + # Calling check_quota_met concurrently from many threads may + # have triggered a deadlock in the DB + # (Note: we run the test 8 times because it's not 100% reproducible) + # https://bugs.launchpad.net/octavia/+bug/2038798 + for _ in range(8): + number_of_projects = 8 + project_ids = ( + uuidutils.generate_uuid() + for _ in range(number_of_projects)) + + with ThreadPoolExecutor( + max_workers=number_of_projects) as executor: + def _test_check_quota_met(project_id): + session = self.get_session() + session.begin() + self.assertFalse(self.repos.check_quota_met( + session, data_models.LoadBalancer, + project_id)) + session.commit() + + futs = [] + for project_id in project_ids: + future = executor.submit(_test_check_quota_met, project_id) + futs.append(future) + + for fut in futs: + fut.result() + + def test_check_quota_met(self): + + project_id = uuidutils.generate_uuid() + + # Test auth_strategy == NOAUTH + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(group='api_settings', auth_strategy=constants.NOAUTH) + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.LoadBalancer, + project_id)) + conf.config(group='api_settings', auth_strategy=constants.TESTING) + + # Test check for missing project_id + self.assertRaises(exceptions.MissingProjectID, + self.repos.check_quota_met, + self.session, + data_models.LoadBalancer, None) + self.session.commit() + + # Test non-quota object + project_id = uuidutils.generate_uuid() + self.assertFalse( + self.repos.check_quota_met(self.session, + data_models.SessionPersistence, + project_id)) + self.session.commit() + + # Test DB deadlock case + project_id = uuidutils.generate_uuid() + mock_session = mock.MagicMock() + mock_session.query = mock.MagicMock( + side_effect=db_exception.DBDeadlock) + self.assertRaises(exceptions.ProjectBusyException, + self.repos.check_quota_met, + mock_session, + data_models.LoadBalancer, project_id) + self.session.commit() + + # ### Test load balancer quota + # Test with no pre-existing quota record default 0 + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_load_balancer_quota=0) + self.assertTrue(self.repos.check_quota_met(self.session, + data_models.LoadBalancer, + project_id)) + self.session.commit() + self.assertIsNone(self.repos.quotas.get( + self.session, project_id=project_id).in_use_load_balancer) + + # Test with no pre-existing quota record default 1 + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_load_balancer_quota=1) + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.LoadBalancer, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_load_balancer) + # Test above project is now at quota + self.assertTrue(self.repos.check_quota_met(self.session, + data_models.LoadBalancer, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_load_balancer) + + # Test with no pre-existing quota record default unlimited + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', + default_load_balancer_quota=constants.QUOTA_UNLIMITED) + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.LoadBalancer, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_load_balancer) + # Test above project adding another load balancer + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.LoadBalancer, + project_id)) + self.assertEqual(2, self.repos.quotas.get( + self.session, project_id=project_id).in_use_load_balancer) + + # Test upgrade case with pre-quota load balancers + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_load_balancer_quota=1) + self.repos.load_balancer.create( + self.session, id=uuidutils.generate_uuid(), + project_id=project_id, name="lb_name", + description="lb_description", + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, + enabled=True) + self.session.commit() + self.assertTrue(self.repos.check_quota_met(self.session, + data_models.LoadBalancer, + project_id)) + self.session.commit() + + # Test upgrade case with pre-quota deleted load balancers + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_load_balancer_quota=1) + self.repos.load_balancer.create( + self.session, id=uuidutils.generate_uuid(), + project_id=project_id, name="lb_name", + description="lb_description", + provisioning_status=constants.DELETED, + operating_status=constants.ONLINE, + enabled=True) + self.session.commit() + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.LoadBalancer, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_load_balancer) + + # Test pre-existing quota with quota of zero + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_load_balancer_quota=10) + quota = {'load_balancer': 0} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.session.commit() + self.assertTrue(self.repos.check_quota_met(self.session, + data_models.LoadBalancer, + project_id)) + self.session.commit() + + # Test pre-existing quota with quota of one + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_load_balancer_quota=0) + quota = {'load_balancer': 1} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.session.commit() + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.LoadBalancer, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_load_balancer) + # Test above project is now at quota + self.assertTrue(self.repos.check_quota_met(self.session, + data_models.LoadBalancer, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_load_balancer) + + # Test pre-existing quota with quota of unlimited + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_load_balancer_quota=0) + quota = {'load_balancer': constants.QUOTA_UNLIMITED} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.session.commit() + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.LoadBalancer, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_load_balancer) + # Test above project adding another load balancer + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.LoadBalancer, + project_id)) + self.session.commit() + self.assertEqual(2, self.repos.quotas.get( + self.session, project_id=project_id).in_use_load_balancer) + + # ### Test listener quota + # Test with no pre-existing quota record default 0 + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_listener_quota=0) + self.assertTrue(self.repos.check_quota_met(self.session, + data_models.Listener, + project_id)) + self.session.commit() + self.assertIsNone(self.repos.quotas.get( + self.session, project_id=project_id).in_use_listener) + + # Test with no pre-existing quota record default 1 + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_listener_quota=1) + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.Listener, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_listener) + # Test above project is now at quota + self.assertTrue(self.repos.check_quota_met(self.session, + data_models.Listener, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_listener) + + # Test with no pre-existing quota record default unlimited + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', + default_listener_quota=constants.QUOTA_UNLIMITED) + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.Listener, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_listener) + # Test above project adding another listener + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.Listener, + project_id)) + self.session.commit() + self.assertEqual(2, self.repos.quotas.get( + self.session, project_id=project_id).in_use_listener) + + # Test upgrade case with pre-quota listener + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_listener_quota=1) + lb = self.repos.load_balancer.create( + self.session, id=uuidutils.generate_uuid(), + project_id=project_id, name="lb_name", + description="lb_description", + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, + enabled=True) + self.session.commit() + self.repos.listener.create( + self.session, id=uuidutils.generate_uuid(), + protocol=constants.PROTOCOL_HTTP, protocol_port=80, + enabled=True, provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, project_id=project_id, + load_balancer_id=lb.id) + self.session.commit() + self.assertTrue(self.repos.check_quota_met(self.session, + data_models.Listener, + project_id)) + self.session.commit() + + # Test upgrade case with pre-quota deleted listener + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_listener_quota=1) + lb = self.repos.load_balancer.create( + self.session, id=uuidutils.generate_uuid(), + project_id=project_id, name="lb_name", + description="lb_description", + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, + enabled=True) + self.session.commit() + self.repos.listener.create( + self.session, id=uuidutils.generate_uuid(), + protocol=constants.PROTOCOL_HTTP, protocol_port=80, + enabled=True, provisioning_status=constants.DELETED, + operating_status=constants.ONLINE, project_id=project_id, + load_balancer_id=lb.id) + self.session.commit() + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.Listener, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_listener) + + # Test pre-existing quota with quota of zero + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_listener_quota=10) + quota = {'listener': 0} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.session.commit() + self.assertTrue(self.repos.check_quota_met(self.session, + data_models.Listener, + project_id)) + self.session.commit() + + # Test pre-existing quota with quota of one + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_listener_quota=0) + quota = {'listener': 1} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.session.commit() + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.Listener, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_listener) + # Test above project is now at quota + self.assertTrue(self.repos.check_quota_met(self.session, + data_models.Listener, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_listener) + + # Test pre-existing quota with quota of unlimited + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_listener_quota=0) + quota = {'listener': constants.QUOTA_UNLIMITED} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.session.commit() + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.Listener, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_listener) + # Test above project adding another listener + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.Listener, + project_id)) + self.session.commit() + self.assertEqual(2, self.repos.quotas.get( + self.session, project_id=project_id).in_use_listener) + + # ### Test pool quota + # Test with no pre-existing quota record default 0 + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_pool_quota=0) + self.assertTrue(self.repos.check_quota_met(self.session, + data_models.Pool, + project_id)) + self.session.commit() + self.assertIsNone(self.repos.quotas.get( + self.session, project_id=project_id).in_use_pool) + + # Test with no pre-existing quota record default 1 + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_pool_quota=1) + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.Pool, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_pool) + # Test above project is now at quota + self.assertTrue(self.repos.check_quota_met(self.session, + data_models.Pool, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_pool) + + # Test with no pre-existing quota record default unlimited + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', + default_pool_quota=constants.QUOTA_UNLIMITED) + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.Pool, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_pool) + # Test above project adding another pool + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.Pool, + project_id)) + self.session.commit() + self.assertEqual(2, self.repos.quotas.get( + self.session, project_id=project_id).in_use_pool) + + # Test upgrade case with pre-quota pool + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_pool_quota=1) + lb = self.repos.load_balancer.create( + self.session, id=uuidutils.generate_uuid(), + project_id=project_id, name="lb_name", + description="lb_description", + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, + enabled=True) + self.session.commit() + self.repos.pool.create( + self.session, id=uuidutils.generate_uuid(), + project_id=project_id, name="pool1", + protocol=constants.PROTOCOL_HTTP, + lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, + enabled=True, load_balancer_id=lb.id) + self.session.commit() + self.assertTrue(self.repos.check_quota_met(self.session, + data_models.Pool, + project_id)) + self.session.commit() + + # Test upgrade case with pre-quota deleted pool + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_pool_quota=1) + lb = self.repos.load_balancer.create( + self.session, id=uuidutils.generate_uuid(), + project_id=project_id, name="lb_name", + description="lb_description", + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, + enabled=True) + self.session.commit() + self.repos.pool.create( + self.session, id=uuidutils.generate_uuid(), + project_id=project_id, name="pool1", + protocol=constants.PROTOCOL_HTTP, + lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, + provisioning_status=constants.DELETED, + operating_status=constants.ONLINE, + enabled=True, load_balancer_id=lb.id) + self.session.commit() + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.Pool, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_pool) + + # Test pre-existing quota with quota of zero + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_pool_quota=10) + quota = {'pool': 0} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.assertTrue(self.repos.check_quota_met(self.session, + data_models.Pool, + project_id)) + self.session.commit() + + # Test pre-existing quota with quota of one + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_pool_quota=0) + quota = {'pool': 1} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.Pool, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_pool) + # Test above project is now at quota + self.assertTrue(self.repos.check_quota_met(self.session, + data_models.Pool, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_pool) + + # Test pre-existing quota with quota of unlimited + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_pool_quota=0) + quota = {'pool': constants.QUOTA_UNLIMITED} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.session.commit() + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.Pool, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_pool) + # Test above project adding another pool + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.Pool, + project_id)) + self.session.commit() + self.assertEqual(2, self.repos.quotas.get( + self.session, project_id=project_id).in_use_pool) + + # ### Test health monitor quota + # Test with no pre-existing quota record default 0 + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_health_monitor_quota=0) + self.assertTrue(self.repos.check_quota_met(self.session, + data_models.HealthMonitor, + project_id)) + self.session.commit() + self.assertIsNone(self.repos.quotas.get( + self.session, project_id=project_id).in_use_health_monitor) + + # Test with no pre-existing quota record default 1 + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_health_monitor_quota=1) + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.HealthMonitor, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_health_monitor) + # Test above project is now at quota + self.assertTrue(self.repos.check_quota_met(self.session, + data_models.HealthMonitor, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_health_monitor) + + # Test with no pre-existing quota record default unlimited + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', + default_health_monitor_quota=constants.QUOTA_UNLIMITED) + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.HealthMonitor, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_health_monitor) + # Test above project adding another health monitor + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.HealthMonitor, + project_id)) + self.session.commit() + self.assertEqual(2, self.repos.quotas.get( + self.session, project_id=project_id).in_use_health_monitor) + + # Test upgrade case with pre-quota health monitor + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_health_monitor_quota=1) + lb = self.repos.load_balancer.create( + self.session, id=uuidutils.generate_uuid(), + project_id=project_id, name="lb_name", + description="lb_description", + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, + enabled=True) + self.session.commit() + pool = self.repos.pool.create( + self.session, id=uuidutils.generate_uuid(), + project_id=project_id, name="pool1", + protocol=constants.PROTOCOL_HTTP, + lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, + enabled=True, load_balancer_id=lb.id) + self.session.commit() + self.repos.health_monitor.create( + self.session, project_id=project_id, + name="health_mon1", type=constants.HEALTH_MONITOR_HTTP, + delay=1, timeout=1, fall_threshold=1, rise_threshold=1, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, + enabled=True, pool_id=pool.id) + self.session.commit() + self.assertTrue(self.repos.check_quota_met(self.session, + data_models.HealthMonitor, + project_id)) + self.session.commit() + + # Test upgrade case with pre-quota deleted health monitor + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_health_monitor_quota=1) + lb = self.repos.load_balancer.create( + self.session, id=uuidutils.generate_uuid(), + project_id=project_id, name="lb_name", + description="lb_description", + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, + enabled=True) + self.session.commit() + pool = self.repos.pool.create( + self.session, id=uuidutils.generate_uuid(), + project_id=project_id, name="pool1", + protocol=constants.PROTOCOL_HTTP, + lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, + enabled=True, load_balancer_id=lb.id) + self.session.commit() + self.repos.health_monitor.create( + self.session, project_id=project_id, + name="health_mon1", type=constants.HEALTH_MONITOR_HTTP, + delay=1, timeout=1, fall_threshold=1, rise_threshold=1, + provisioning_status=constants.DELETED, + operating_status=constants.OFFLINE, + enabled=True, pool_id=pool.id) + self.session.commit() + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.HealthMonitor, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_health_monitor) + + # Test pre-existing quota with quota of zero + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_health_monitor_quota=10) + quota = {'health_monitor': 0} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.session.commit() + self.assertTrue(self.repos.check_quota_met(self.session, + data_models.HealthMonitor, + project_id)) + self.session.commit() + + # Test pre-existing quota with quota of one + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_health_monitor_quota=0) + quota = {'health_monitor': 1} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.HealthMonitor, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_health_monitor) + # Test above project is now at quota + self.assertTrue(self.repos.check_quota_met(self.session, + data_models.HealthMonitor, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_health_monitor) + + # Test pre-existing quota with quota of unlimited + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_health_monitor_quota=0) + quota = {'health_monitor': constants.QUOTA_UNLIMITED} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.session.commit() + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.HealthMonitor, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_health_monitor) + # Test above project adding another health monitor + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.HealthMonitor, + project_id)) + self.session.commit() + self.assertEqual(2, self.repos.quotas.get( + self.session, project_id=project_id).in_use_health_monitor) + + # ### Test member quota + # Test with no pre-existing quota record default 0 + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_member_quota=0) + self.assertTrue(self.repos.check_quota_met(self.session, + data_models.Member, + project_id)) + self.session.commit() + self.assertIsNone(self.repos.quotas.get( + self.session, project_id=project_id).in_use_member) + + # Test with no pre-existing quota record default 1 + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_member_quota=1) + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.Member, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_member) + # Test above project is now at quota + self.assertTrue(self.repos.check_quota_met(self.session, + data_models.Member, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_member) + + # Test with no pre-existing quota record default unlimited + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', + default_member_quota=constants.QUOTA_UNLIMITED) + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.Member, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_member) + # Test above project adding another member + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.Member, + project_id)) + self.session.commit() + self.assertEqual(2, self.repos.quotas.get( + self.session, project_id=project_id).in_use_member) + + # Test upgrade case with pre-quota member + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_member_quota=1) + lb = self.repos.load_balancer.create( + self.session, id=uuidutils.generate_uuid(), + project_id=project_id, name="lb_name", + description="lb_description", + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, + enabled=True) + self.session.commit() + pool = self.repos.pool.create( + self.session, id=uuidutils.generate_uuid(), + project_id=project_id, name="pool1", + protocol=constants.PROTOCOL_HTTP, + lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, + enabled=True, load_balancer_id=lb.id) + self.session.commit() + self.repos.member.create( + self.session, id=uuidutils.generate_uuid(), + project_id=project_id, + ip_address='192.0.2.1', protocol_port=80, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, + enabled=True, pool_id=pool.id, backup=False) + self.session.commit() + self.assertTrue(self.repos.check_quota_met(self.session, + data_models.Member, + project_id)) + self.session.commit() + + # Test upgrade case with pre-quota deleted member + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_member_quota=1) + lb = self.repos.load_balancer.create( + self.session, id=uuidutils.generate_uuid(), + project_id=project_id, name="lb_name", + description="lb_description", + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, + enabled=True) + self.session.commit() + pool = self.repos.pool.create( + self.session, id=uuidutils.generate_uuid(), + project_id=project_id, name="pool1", + protocol=constants.PROTOCOL_HTTP, + lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, + enabled=True, load_balancer_id=lb.id) + self.session.commit() + self.repos.member.create( + self.session, id=uuidutils.generate_uuid(), + project_id=project_id, + ip_address='192.0.2.1', protocol_port=80, + provisioning_status=constants.DELETED, + operating_status=constants.ONLINE, + enabled=True, pool_id=pool.id, backup=False) + self.session.commit() + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.Member, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_member) + + # Test pre-existing quota with quota of zero + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_member_quota=10) + quota = {'member': 0} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.session.commit() + self.assertTrue(self.repos.check_quota_met(self.session, + data_models.Member, + project_id)) + self.session.commit() + + # Test pre-existing quota with quota of one + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_member_quota=0) + quota = {'member': 1} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.session.commit() + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.Member, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_member) + # Test above project is now at quota + self.assertTrue(self.repos.check_quota_met(self.session, + data_models.Member, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_member) + + # Test pre-existing quota with quota of unlimited + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_member_quota=0) + quota = {'member': constants.QUOTA_UNLIMITED} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.session.commit() + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.Member, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_member) + # Test above project adding another member + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.Member, + project_id)) + self.session.commit() + self.assertEqual(2, self.repos.quotas.get( + self.session, project_id=project_id).in_use_member) + + # ### Test l7policy quota + # Test with no pre-existing quota record default 0 + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_l7policy_quota=0) + self.assertTrue(self.repos.check_quota_met(self.session, + data_models.L7Policy, + project_id)) + self.session.commit() + self.assertIsNone(self.repos.quotas.get( + self.session, project_id=project_id).in_use_l7policy) + + # Test with no pre-existing quota record default 1 + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_l7policy_quota=1) + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.L7Policy, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_l7policy) + # Test above project is now at quota + self.assertTrue(self.repos.check_quota_met(self.session, + data_models.L7Policy, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_l7policy) + + # Test with no pre-existing quota record default unlimited + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', + default_l7policy_quota=constants.QUOTA_UNLIMITED) + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.L7Policy, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_l7policy) + # Test above project adding another l7policy + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.L7Policy, + project_id)) + self.session.commit() + self.assertEqual(2, self.repos.quotas.get( + self.session, project_id=project_id).in_use_l7policy) + + # Test upgrade case with pre-quota l7policy + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_l7policy_quota=1) + lb = self.repos.load_balancer.create( + self.session, id=uuidutils.generate_uuid(), + project_id=project_id, name="lb_name", + description="lb_description", + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, + enabled=True) + self.session.commit() + listener = self.repos.listener.create( + self.session, id=uuidutils.generate_uuid(), + protocol=constants.PROTOCOL_HTTP, protocol_port=80, + enabled=True, provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, project_id=project_id, + load_balancer_id=lb.id) + self.session.commit() + self.repos.l7policy.create( + self.session, name='l7policy', enabled=True, position=1, + action=constants.L7POLICY_ACTION_REJECT, + provisioning_status=constants.ACTIVE, listener_id=listener.id, + operating_status=constants.ONLINE, project_id=project_id, + id=uuidutils.generate_uuid()) + self.session.commit() + self.assertTrue(self.repos.check_quota_met(self.session, + data_models.L7Policy, + project_id)) + self.session.commit() + + # Test upgrade case with pre-quota deleted l7policy + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_l7policy_quota=1) + lb = self.repos.load_balancer.create( + self.session, id=uuidutils.generate_uuid(), + project_id=project_id, name="lb_name", + description="lb_description", + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, + enabled=True) + self.session.commit() + listener = self.repos.listener.create( + self.session, id=uuidutils.generate_uuid(), + protocol=constants.PROTOCOL_HTTP, protocol_port=80, + enabled=True, provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, project_id=project_id, + load_balancer_id=lb.id) + self.session.commit() + self.repos.l7policy.create( + self.session, name='l7policy', enabled=True, position=1, + action=constants.L7POLICY_ACTION_REJECT, + provisioning_status=constants.DELETED, listener_id=listener.id, + operating_status=constants.ONLINE, project_id=project_id, + id=uuidutils.generate_uuid()) + self.session.commit() + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.L7Policy, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_l7policy) + + # Test pre-existing quota with quota of zero + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_l7policy_quota=10) + quota = {'l7policy': 0} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.session.commit() + self.assertTrue(self.repos.check_quota_met(self.session, + data_models.L7Policy, + project_id)) + self.session.commit() + + # Test pre-existing quota with quota of one + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_l7policy_quota=0) + quota = {'l7policy': 1} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.session.commit() + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.L7Policy, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_l7policy) + # Test above project is now at quota + self.assertTrue(self.repos.check_quota_met(self.session, + data_models.L7Policy, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_l7policy) + + # Test pre-existing quota with quota of unlimited + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_l7policy_quota=0) + quota = {'l7policy': constants.QUOTA_UNLIMITED} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.session.commit() + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.L7Policy, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_l7policy) + # Test above project adding another l7policy + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.L7Policy, + project_id)) + self.session.commit() + self.assertEqual(2, self.repos.quotas.get( + self.session, project_id=project_id).in_use_l7policy) + + # ### Test l7rule quota + # Test with no pre-existing quota record default 0 + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_l7rule_quota=0) + self.assertTrue(self.repos.check_quota_met(self.session, + data_models.L7Rule, + project_id)) + self.session.commit() + self.assertIsNone(self.repos.quotas.get( + self.session, project_id=project_id).in_use_l7rule) + + # Test with no pre-existing quota record default 1 + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_l7rule_quota=1) + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.L7Rule, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_l7rule) + # Test above project is now at quota + self.assertTrue(self.repos.check_quota_met(self.session, + data_models.L7Rule, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_l7rule) + + # Test with no pre-existing quota record default unlimited + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', + default_l7rule_quota=constants.QUOTA_UNLIMITED) + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.L7Rule, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_l7rule) + # Test above project adding another l7rule + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.L7Rule, + project_id)) + self.session.commit() + self.assertEqual(2, self.repos.quotas.get( + self.session, project_id=project_id).in_use_l7rule) + + # Test upgrade case with pre-quota l7rule + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_l7rule_quota=1) + lb = self.repos.load_balancer.create( + self.session, id=uuidutils.generate_uuid(), + project_id=project_id, name="lb_name", + description="lb_description", + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, + enabled=True) + self.session.commit() + listener = self.repos.listener.create( + self.session, id=uuidutils.generate_uuid(), + protocol=constants.PROTOCOL_HTTP, protocol_port=80, + enabled=True, provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, project_id=project_id, + load_balancer_id=lb.id) + self.session.commit() + l7policy = self.repos.l7policy.create( + self.session, name='l7policy', enabled=True, position=1, + action=constants.L7POLICY_ACTION_REJECT, + provisioning_status=constants.ACTIVE, listener_id=listener.id, + operating_status=constants.ONLINE, project_id=project_id, + id=uuidutils.generate_uuid()) + self.session.commit() + self.repos.l7rule.create( + self.session, id=uuidutils.generate_uuid(), + l7policy_id=l7policy.id, type=constants.L7RULE_TYPE_HOST_NAME, + compare_type=constants.L7RULE_COMPARE_TYPE_EQUAL_TO, enabled=True, + provisioning_status=constants.ACTIVE, value='hostname', + operating_status=constants.ONLINE, project_id=project_id) + self.session.commit() + self.assertTrue(self.repos.check_quota_met(self.session, + data_models.L7Rule, + project_id)) + self.session.commit() + + # Test upgrade case with pre-quota deleted l7rule + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_l7policy_quota=1) + lb = self.repos.load_balancer.create( + self.session, id=uuidutils.generate_uuid(), + project_id=project_id, name="lb_name", + description="lb_description", + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, + enabled=True) + self.session.commit() + listener = self.repos.listener.create( + self.session, id=uuidutils.generate_uuid(), + protocol=constants.PROTOCOL_HTTP, protocol_port=80, + enabled=True, provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, project_id=project_id, + load_balancer_id=lb.id) + self.session.commit() + l7policy = self.repos.l7policy.create( + self.session, name='l7policy', enabled=True, position=1, + action=constants.L7POLICY_ACTION_REJECT, + provisioning_status=constants.ACTIVE, listener_id=listener.id, + operating_status=constants.ONLINE, project_id=project_id, + id=uuidutils.generate_uuid()) + self.session.commit() + self.repos.l7rule.create( + self.session, id=uuidutils.generate_uuid(), + l7policy_id=l7policy.id, type=constants.L7RULE_TYPE_HOST_NAME, + compare_type=constants.L7RULE_COMPARE_TYPE_EQUAL_TO, enabled=True, + provisioning_status=constants.DELETED, value='hostname', + operating_status=constants.ONLINE, project_id=project_id) + self.session.commit() + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.L7Rule, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_l7rule) + + # Test pre-existing quota with quota of zero + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_l7rule_quota=10) + quota = {'l7rule': 0} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.session.commit() + self.assertTrue(self.repos.check_quota_met(self.session, + data_models.L7Rule, + project_id)) + self.session.commit() + + # Test pre-existing quota with quota of one + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_l7rule_quota=0) + quota = {'l7rule': 1} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.session.commit() + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.L7Rule, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_l7rule) + # Test above project is now at quota + self.assertTrue(self.repos.check_quota_met(self.session, + data_models.L7Rule, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_l7rule) + + # Test pre-existing quota with quota of unlimited + project_id = uuidutils.generate_uuid() + conf.config(group='quotas', default_l7rule_quota=0) + quota = {'l7rule': constants.QUOTA_UNLIMITED} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.session.commit() + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.L7Rule, + project_id)) + self.session.commit() + self.assertEqual(1, self.repos.quotas.get( + self.session, project_id=project_id).in_use_l7rule) + # Test above project adding another l7rule + self.assertFalse(self.repos.check_quota_met(self.session, + data_models.L7Rule, + project_id)) + self.session.commit() + self.assertEqual(2, self.repos.quotas.get( + self.session, project_id=project_id).in_use_l7rule) + + def test_decrement_quota(self): + # Test decrement on non-existent quota with noauth + project_id = uuidutils.generate_uuid() + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(group='api_settings', auth_strategy=constants.NOAUTH) + self.repos.decrement_quota(self.session, + data_models.LoadBalancer, + project_id) + self.assertEqual(0, self.repos.quotas.count(self.session, + project_id=project_id)) + conf.config(group='api_settings', auth_strategy=constants.TESTING) + + # Test decrement on non-existent quota + project_id = uuidutils.generate_uuid() + self.repos.decrement_quota(self.session, + data_models.LoadBalancer, + project_id) + self.assertEqual(0, self.repos.quotas.count(self.session, + project_id=project_id)) + + # Test DB deadlock case + project_id = uuidutils.generate_uuid() + mock_session = mock.MagicMock() + mock_session.query = mock.MagicMock( + side_effect=db_exception.DBDeadlock) + self.assertRaises(exceptions.ProjectBusyException, + self.repos.decrement_quota, + mock_session, + data_models.LoadBalancer, project_id) + + # ### Test load balancer quota + # Test decrement on zero in use quota + project_id = uuidutils.generate_uuid() + quota = {'in_use_load_balancer': 0} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.repos.decrement_quota(self.session, + data_models.LoadBalancer, + project_id) + self.assertEqual(0, self.repos.quotas.get( + self.session, project_id=project_id).in_use_load_balancer) + + # Test decrement on zero in use quota with noauth + project_id = uuidutils.generate_uuid() + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(group='api_settings', auth_strategy=constants.NOAUTH) + quota = {'in_use_load_balancer': 0} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.repos.decrement_quota(self.session, + data_models.LoadBalancer, + project_id) + self.assertEqual(0, self.repos.quotas.get( + self.session, project_id=project_id).in_use_load_balancer) + conf.config(group='api_settings', auth_strategy=constants.TESTING) + + # Test decrement on in use quota + project_id = uuidutils.generate_uuid() + quota = {'in_use_load_balancer': 1} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.repos.decrement_quota(self.session, + data_models.LoadBalancer, + project_id) + self.assertEqual(0, self.repos.quotas.get( + self.session, project_id=project_id).in_use_load_balancer) + + # Test decrement on in use quota with noauth + project_id = uuidutils.generate_uuid() + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(group='api_settings', auth_strategy=constants.NOAUTH) + quota = {'in_use_load_balancer': 1} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.repos.decrement_quota(self.session, + data_models.LoadBalancer, + project_id) + self.assertEqual(0, self.repos.quotas.get( + self.session, project_id=project_id).in_use_load_balancer) + conf.config(group='api_settings', auth_strategy=constants.TESTING) + + # ### Test listener quota + # Test decrement on zero in use quota + project_id = uuidutils.generate_uuid() + quota = {'in_use_listener': 0} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.repos.decrement_quota(self.session, + data_models.Listener, + project_id) + self.assertEqual(0, self.repos.quotas.get( + self.session, project_id=project_id).in_use_listener) + + # Test decrement on zero in use quota with noauth + project_id = uuidutils.generate_uuid() + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(group='api_settings', auth_strategy=constants.NOAUTH) + quota = {'in_use_listener': 0} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.repos.decrement_quota(self.session, + data_models.Listener, + project_id) + self.assertEqual(0, self.repos.quotas.get( + self.session, project_id=project_id).in_use_listener) + conf.config(group='api_settings', auth_strategy=constants.TESTING) + + # Test decrement on in use quota + project_id = uuidutils.generate_uuid() + quota = {'in_use_listener': 1} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.repos.decrement_quota(self.session, + data_models.Listener, + project_id) + self.assertEqual(0, self.repos.quotas.get( + self.session, project_id=project_id).in_use_listener) + + # Test decrement on in use quota with noauth + project_id = uuidutils.generate_uuid() + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(group='api_settings', auth_strategy=constants.NOAUTH) + quota = {'in_use_listener': 1} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.repos.decrement_quota(self.session, + data_models.Listener, + project_id) + self.assertEqual(0, self.repos.quotas.get( + self.session, project_id=project_id).in_use_listener) + conf.config(group='api_settings', auth_strategy=constants.TESTING) + + # ### Test pool quota + # Test decrement on zero in use quota + project_id = uuidutils.generate_uuid() + quota = {'in_use_pool': 0} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.repos.decrement_quota(self.session, + data_models.Pool, + project_id) + self.assertEqual(0, self.repos.quotas.get( + self.session, project_id=project_id).in_use_pool) + + # Test decrement on zero in use quota with noauth + project_id = uuidutils.generate_uuid() + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(group='api_settings', auth_strategy=constants.NOAUTH) + quota = {'in_use_pool': 0} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.repos.decrement_quota(self.session, + data_models.Pool, + project_id) + self.assertEqual(0, self.repos.quotas.get( + self.session, project_id=project_id).in_use_pool) + conf.config(group='api_settings', auth_strategy=constants.TESTING) + + # Test decrement on in use quota + project_id = uuidutils.generate_uuid() + quota = {'in_use_pool': 1} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.repos.decrement_quota(self.session, + data_models.Pool, + project_id) + self.assertEqual(0, self.repos.quotas.get( + self.session, project_id=project_id).in_use_pool) + + # Test decrement on in use quota with noauth + project_id = uuidutils.generate_uuid() + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(group='api_settings', auth_strategy=constants.NOAUTH) + quota = {'in_use_pool': 1} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.repos.decrement_quota(self.session, + data_models.Pool, + project_id) + self.assertEqual(0, self.repos.quotas.get( + self.session, project_id=project_id).in_use_pool) + conf.config(group='api_settings', auth_strategy=constants.TESTING) + + # ### Test health monitor quota + # Test decrement on zero in use quota + project_id = uuidutils.generate_uuid() + quota = {'in_use_health_monitor': 0} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.repos.decrement_quota(self.session, + data_models.HealthMonitor, + project_id) + self.assertEqual(0, self.repos.quotas.get( + self.session, project_id=project_id).in_use_health_monitor) + + # Test decrement on zero in use quota with noauth + project_id = uuidutils.generate_uuid() + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(group='api_settings', auth_strategy=constants.NOAUTH) + quota = {'in_use_health_monitor': 0} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.repos.decrement_quota(self.session, + data_models.HealthMonitor, + project_id) + self.assertEqual(0, self.repos.quotas.get( + self.session, project_id=project_id).in_use_health_monitor) + conf.config(group='api_settings', auth_strategy=constants.TESTING) + + # Test decrement on in use quota + project_id = uuidutils.generate_uuid() + quota = {'in_use_health_monitor': 1} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.repos.decrement_quota(self.session, + data_models.HealthMonitor, + project_id) + self.assertEqual(0, self.repos.quotas.get( + self.session, project_id=project_id).in_use_health_monitor) + + # Test decrement on in use quota with noauth + project_id = uuidutils.generate_uuid() + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(group='api_settings', auth_strategy=constants.NOAUTH) + quota = {'in_use_health_monitor': 1} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.repos.decrement_quota(self.session, + data_models.HealthMonitor, + project_id) + self.assertEqual(0, self.repos.quotas.get( + self.session, project_id=project_id).in_use_health_monitor) + conf.config(group='api_settings', auth_strategy=constants.TESTING) + + # ### Test member quota + # Test decrement on zero in use quota + project_id = uuidutils.generate_uuid() + quota = {'in_use_member': 0} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.repos.decrement_quota(self.session, + data_models.Member, + project_id) + self.assertEqual(0, self.repos.quotas.get( + self.session, project_id=project_id).in_use_member) + + # Test decrement on zero in use quota with noauth + project_id = uuidutils.generate_uuid() + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(group='api_settings', auth_strategy=constants.NOAUTH) + quota = {'in_use_member': 0} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.repos.decrement_quota(self.session, + data_models.Member, + project_id) + self.assertEqual(0, self.repos.quotas.get( + self.session, project_id=project_id).in_use_member) + conf.config(group='api_settings', auth_strategy=constants.TESTING) + + # Test decrement on in use quota + project_id = uuidutils.generate_uuid() + quota = {'in_use_member': 1} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.repos.decrement_quota(self.session, + data_models.Member, + project_id) + self.assertEqual(0, self.repos.quotas.get( + self.session, project_id=project_id).in_use_member) + + # Test decrement on in use quota with noauth + project_id = uuidutils.generate_uuid() + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(group='api_settings', auth_strategy=constants.NOAUTH) + quota = {'in_use_member': 1} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.repos.decrement_quota(self.session, + data_models.Member, + project_id) + self.assertEqual(0, self.repos.quotas.get( + self.session, project_id=project_id).in_use_member) + conf.config(group='api_settings', auth_strategy=constants.TESTING) + # ### Test l7policy quota + # Test decrement on zero in use quota + project_id = uuidutils.generate_uuid() + quota = {'in_use_l7policy': 0} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.repos.decrement_quota(self.session, + data_models.L7Policy, + project_id) + self.assertEqual(0, self.repos.quotas.get( + self.session, project_id=project_id).in_use_l7policy) + + # Test decrement on zero in use quota with noauth + project_id = uuidutils.generate_uuid() + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(group='api_settings', auth_strategy=constants.NOAUTH) + quota = {'in_use_l7policy': 0} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.repos.decrement_quota(self.session, + data_models.L7Policy, + project_id) + self.assertEqual(0, self.repos.quotas.get( + self.session, project_id=project_id).in_use_l7policy) + conf.config(group='api_settings', auth_strategy=constants.TESTING) + + # Test decrement on in use quota + project_id = uuidutils.generate_uuid() + quota = {'in_use_l7policy': 1} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.repos.decrement_quota(self.session, + data_models.L7Policy, + project_id) + self.assertEqual(0, self.repos.quotas.get( + self.session, project_id=project_id).in_use_l7policy) + + # Test decrement on in use quota with noauth + project_id = uuidutils.generate_uuid() + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(group='api_settings', auth_strategy=constants.NOAUTH) + quota = {'in_use_l7policy': 1} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.repos.decrement_quota(self.session, + data_models.L7Policy, + project_id) + self.assertEqual(0, self.repos.quotas.get( + self.session, project_id=project_id).in_use_l7policy) + conf.config(group='api_settings', auth_strategy=constants.TESTING) + + # ### Test l7rule quota + # Test decrement on zero in use quota + project_id = uuidutils.generate_uuid() + quota = {'in_use_l7rule': 0} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.repos.decrement_quota(self.session, + data_models.L7Rule, + project_id) + self.assertEqual(0, self.repos.quotas.get( + self.session, project_id=project_id).in_use_l7rule) + + # Test decrement on zero in use quota with noauth + project_id = uuidutils.generate_uuid() + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(group='api_settings', auth_strategy=constants.NOAUTH) + quota = {'in_use_l7rule': 0} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.repos.decrement_quota(self.session, + data_models.L7Rule, + project_id) + self.assertEqual(0, self.repos.quotas.get( + self.session, project_id=project_id).in_use_l7rule) + conf.config(group='api_settings', auth_strategy=constants.TESTING) + + # Test decrement on in use quota + project_id = uuidutils.generate_uuid() + quota = {'in_use_l7rule': 1} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.repos.decrement_quota(self.session, + data_models.L7Rule, + project_id) + self.assertEqual(0, self.repos.quotas.get( + self.session, project_id=project_id).in_use_l7rule) + + # Test decrement on in use quota with noauth + project_id = uuidutils.generate_uuid() + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(group='api_settings', auth_strategy=constants.NOAUTH) + quota = {'in_use_l7rule': 1} + self.repos.quotas.update(self.session, project_id, quota=quota) + self.repos.decrement_quota(self.session, + data_models.L7Rule, + project_id) + self.assertEqual(0, self.repos.quotas.get( + self.session, project_id=project_id).in_use_l7rule) + conf.config(group='api_settings', auth_strategy=constants.TESTING) + + def test_get_amphora_stats(self): + listener2_id = uuidutils.generate_uuid() + self.repos.listener_stats.create( + self.session, listener_id=self.listener.id, + amphora_id=self.amphora.id, bytes_in=1, bytes_out=2, + active_connections=3, total_connections=4, request_errors=5) + self.repos.listener_stats.create( + self.session, listener_id=listener2_id, + amphora_id=self.amphora.id, bytes_in=6, bytes_out=7, + active_connections=8, total_connections=9, request_errors=10) + amp_stats = self.repos.get_amphora_stats(self.session, self.amphora.id) + self.assertEqual(2, len(amp_stats)) + for stats in amp_stats: + if stats['listener_id'] == self.listener.id: + self.assertEqual(self.load_balancer.id, + stats['loadbalancer_id']) + self.assertEqual(self.listener.id, stats['listener_id']) + self.assertEqual(self.amphora.id, stats['id']) + self.assertEqual(1, stats['bytes_in']) + self.assertEqual(2, stats['bytes_out']) + self.assertEqual(3, stats['active_connections']) + self.assertEqual(4, stats['total_connections']) + self.assertEqual(5, stats['request_errors']) + else: + self.assertEqual(self.load_balancer.id, + stats['loadbalancer_id']) + self.assertEqual(listener2_id, stats['listener_id']) + self.assertEqual(self.amphora.id, stats['id']) + self.assertEqual(6, stats['bytes_in']) + self.assertEqual(7, stats['bytes_out']) + self.assertEqual(8, stats['active_connections']) + self.assertEqual(9, stats['total_connections']) + self.assertEqual(10, stats['request_errors']) + + +class PoolRepositoryTest(BaseRepositoryTest): + + def create_pool(self, pool_id, project_id): + pool = self.pool_repo.create( + self.session, id=pool_id, project_id=project_id, name="pool_test", + description="pool_description", protocol=constants.PROTOCOL_HTTP, + lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, enabled=True, tags=['test_tag']) + self.session.commit() + return pool + + def test_get(self): + pool = self.create_pool(pool_id=self.FAKE_UUID_1, + project_id=self.FAKE_UUID_2) + new_pool = self.pool_repo.get(self.session, id=pool.id) + self.assertIsInstance(new_pool, data_models.Pool) + self.assertEqual(pool.id, new_pool.id) + self.assertEqual(pool.project_id, new_pool.project_id) + + def test_get_limited_graph(self): + def check_pool_attrs(pool, new_pool, lb, limited_graph): + self.assertIsInstance(new_pool, data_models.Pool) + self.assertEqual(pool.id, new_pool.id) + self.assertEqual(pool.project_id, new_pool.project_id) + if limited_graph: + self.assertIsNone(new_pool.load_balancer) + else: + self.assertEqual(lb.id, new_pool.load_balancer.id) + + pool = self.create_pool(pool_id=self.FAKE_UUID_1, + project_id=self.FAKE_UUID_2) + # Create LB and attach pool to it. + # It means, that in graph pool node get new relationship to LB + lb = self.create_loadbalancer(self.FAKE_UUID_5) + self.pool_repo.update(self.session, id=pool.id, load_balancer_id=lb.id) + self.pool_repo.update(self.session, id=pool.id, load_balancer_id=lb.id) + + new_pool = self.pool_repo.get(self.session, id=pool.id) + check_pool_attrs(pool, new_pool, lb, limited_graph=False) + + new_pool2 = self.pool_repo.get(self.session, id=pool.id, + limited_graph=True) + check_pool_attrs(pool, new_pool2, lb, limited_graph=True) + + def test_get_all(self): + pool_one = self.create_pool(pool_id=self.FAKE_UUID_1, + project_id=self.FAKE_UUID_2) + pool_two = self.create_pool(pool_id=self.FAKE_UUID_3, + project_id=self.FAKE_UUID_2) + pool_list, _ = self.pool_repo.get_all(self.session, + project_id=self.FAKE_UUID_2) + self.assertIsInstance(pool_list, list) + self.assertEqual(2, len(pool_list)) + self.assertEqual(pool_one.id, pool_list[0].id) + self.assertEqual(pool_one.project_id, pool_list[0].project_id) + self.assertEqual(pool_two.id, pool_list[1].id) + self.assertEqual(pool_two.project_id, pool_list[1].project_id) + + def test_create(self): + pool = self.create_pool(pool_id=self.FAKE_UUID_1, + project_id=self.FAKE_UUID_2) + self.assertIsInstance(pool, data_models.Pool) + self.assertEqual(self.FAKE_UUID_2, pool.project_id) + self.assertEqual("pool_test", pool.name) + self.assertEqual("pool_description", pool.description) + self.assertEqual(constants.PROTOCOL_HTTP, pool.protocol) + self.assertEqual(constants.LB_ALGORITHM_ROUND_ROBIN, pool.lb_algorithm) + self.assertEqual(constants.ONLINE, pool.operating_status) + + def test_update(self): + pool = self.create_pool(pool_id=self.FAKE_UUID_1, + project_id=self.FAKE_UUID_2) + self.pool_repo.update(self.session, pool.id, + description="other_pool_description") + self.session.commit() + new_pool = self.pool_repo.get(self.session, id=self.FAKE_UUID_1) + self.assertEqual("other_pool_description", new_pool.description) + + def test_delete(self): + pool = self.create_pool(pool_id=self.FAKE_UUID_1, + project_id=self.FAKE_UUID_2) + self.pool_repo.delete(self.session, id=pool.id) + self.session.commit() + self.assertIsNone(self.pool_repo.get(self.session, id=pool.id)) + + def test_delete_with_member(self): + pool = self.create_pool(pool_id=self.FAKE_UUID_1, + project_id=self.FAKE_UUID_2) + member = self.member_repo.create(self.session, id=self.FAKE_UUID_3, + project_id=self.FAKE_UUID_2, + pool_id=pool.id, + ip_address="192.0.2.1", + protocol_port=80, enabled=True, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, + backup=False) + self.session.commit() + new_pool = self.pool_repo.get(self.session, id=pool.id) + self.assertEqual(1, len(new_pool.members)) + self.assertEqual(member.id, new_pool.members[0].id) + self.assertEqual(member.ip_address, new_pool.members[0].ip_address) + self.pool_repo.delete(self.session, id=pool.id) + self.session.commit() + self.assertIsNone(self.pool_repo.get(self.session, id=pool.id)) + self.assertIsNone(self.member_repo.get(self.session, id=member.id)) + + def test_delete_with_health_monitor(self): + pool = self.create_pool(pool_id=self.FAKE_UUID_1, + project_id=self.FAKE_UUID_2) + hm = self.hm_repo.create(self.session, id=uuidutils.generate_uuid(), + pool_id=pool.id, + type=constants.HEALTH_MONITOR_HTTP, + delay=1, timeout=1, fall_threshold=1, + rise_threshold=1, enabled=True, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE) + self.session.commit() + new_pool = self.pool_repo.get(self.session, id=pool.id) + self.assertEqual(pool.id, new_pool.id) + self.assertEqual(pool.name, new_pool.name) + self.assertEqual(pool.project_id, new_pool.project_id) + self.assertEqual(hm.id, new_pool.health_monitor.id) + self.assertEqual(hm.name, new_pool.health_monitor.name) + self.assertEqual(hm.type, new_pool.health_monitor.type) + self.assertEqual(hm.pool_id, new_pool.health_monitor.pool_id) + self.pool_repo.delete(self.session, id=pool.id) + self.session.commit() + self.assertIsNone(self.pool_repo.get(self.session, id=pool.id)) + self.assertIsNone(self.hm_repo.get(self.session, pool_id=hm.pool_id)) + + def test_delete_with_session_persistence(self): + pool = self.create_pool(pool_id=self.FAKE_UUID_1, + project_id=self.FAKE_UUID_2) + sp = self.sp_repo.create( + self.session, pool_id=pool.id, + type=constants.SESSION_PERSISTENCE_HTTP_COOKIE, + cookie_name="cookie_name") + self.session.commit() + new_pool = self.pool_repo.get(self.session, id=pool.id) + self.assertEqual(pool.id, new_pool.id) + self.assertEqual(pool.project_id, new_pool.project_id) + self.assertEqual(sp.pool_id, new_pool.session_persistence.pool_id) + self.pool_repo.delete(self.session, id=new_pool.id) + self.session.commit() + self.assertIsNone(self.pool_repo.get(self.session, id=pool.id)) + self.assertIsNone(self.sp_repo.get(self.session, pool_id=sp.pool_id)) + + def test_delete_with_all_children(self): + pool = self.create_pool(pool_id=self.FAKE_UUID_1, + project_id=self.FAKE_UUID_2) + hm = self.hm_repo.create(self.session, + id=uuidutils.generate_uuid(), + pool_id=pool.id, + type=constants.HEALTH_MONITOR_HTTP, + delay=1, timeout=1, fall_threshold=1, + rise_threshold=1, enabled=True, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE) + member = self.member_repo.create(self.session, id=self.FAKE_UUID_3, + project_id=self.FAKE_UUID_2, + pool_id=pool.id, + ip_address="192.0.2.1", + protocol_port=80, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, + enabled=True, + backup=False) + sp = self.sp_repo.create( + self.session, pool_id=pool.id, + type=constants.SESSION_PERSISTENCE_HTTP_COOKIE, + cookie_name="cookie_name") + self.session.commit() + new_pool = self.pool_repo.get(self.session, id=pool.id) + self.assertEqual(pool.id, new_pool.id) + self.assertEqual(pool.project_id, new_pool.project_id) + self.assertEqual(1, len(new_pool.members)) + new_member = self.member_repo.get(self.session, id=member.id) + self.assertEqual(new_member.id, new_pool.members[0].id) + self.assertEqual(new_member.pool_id, new_pool.members[0].pool_id) + self.assertEqual(new_member.ip_address, new_pool.members[0].ip_address) + self.assertEqual(hm.id, new_pool.health_monitor.id) + self.assertEqual(hm.type, new_pool.health_monitor.type) + self.assertEqual(hm.pool_id, new_pool.health_monitor.pool_id) + self.assertEqual(sp.type, new_pool.session_persistence.type) + self.assertEqual(sp.pool_id, new_pool.session_persistence.pool_id) + self.pool_repo.delete(self.session, id=pool.id) + self.session.commit() + self.assertIsNone(self.pool_repo.get(self.session, id=pool.id)) + self.assertIsNone(self.member_repo.get(self.session, id=member.id)) + self.assertIsNone(self.hm_repo.get(self.session, pool_id=hm.pool_id)) + self.assertIsNone(self.sp_repo.get(self.session, pool_id=sp.pool_id)) + + def test_get_children_count(self): + pool = self.create_pool(pool_id=self.FAKE_UUID_1, + project_id=self.FAKE_UUID_2) + hm_count, member_count = ( + self.pool_repo.get_children_count(self.session, pool.id)) + self.assertEqual(0, hm_count) + self.assertEqual(0, member_count) + + self.hm_repo.create(self.session, pool_id=pool.id, + type=constants.HEALTH_MONITOR_HTTP, + delay=1, timeout=1, fall_threshold=1, + rise_threshold=1, enabled=True, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE) + self.session.commit() + + hm_count, member_count = ( + self.pool_repo.get_children_count(self.session, pool.id)) + self.assertEqual(1, hm_count) + self.assertEqual(0, member_count) + + self.member_repo.create(self.session, id=self.FAKE_UUID_3, + project_id=self.FAKE_UUID_2, + pool_id=pool.id, + ip_address="192.0.2.1", + protocol_port=80, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, + enabled=True, + backup=False) + self.member_repo.create(self.session, id=self.FAKE_UUID_4, + project_id=self.FAKE_UUID_2, + pool_id=pool.id, + ip_address="192.0.2.2", + protocol_port=80, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, + enabled=True, + backup=False) + self.session.commit() + + hm_count, member_count = ( + self.pool_repo.get_children_count(self.session, pool.id)) + self.assertEqual(1, hm_count) + self.assertEqual(2, member_count) + + +class MemberRepositoryTest(BaseRepositoryTest): + + def setUp(self): + super().setUp() + self.pool = self.pool_repo.create( + self.session, id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2, + name="pool_test", description="pool_description", + protocol=constants.PROTOCOL_HTTP, + lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, enabled=True, tags=['test_tag']) + self.session.commit() + + def create_member(self, member_id, project_id, pool_id, ip_address): + member = self.member_repo.create(self.session, id=member_id, + project_id=project_id, + pool_id=pool_id, + ip_address=ip_address, + protocol_port=80, + operating_status=constants.ONLINE, + provisioning_status=constants.ACTIVE, + enabled=True, + backup=False) + self.session.commit() + return member + + def test_get(self): + member = self.create_member(self.FAKE_UUID_1, self.FAKE_UUID_2, + self.pool.id, "192.0.2.1") + new_member = self.member_repo.get(self.session, id=member.id) + self.assertIsInstance(new_member, data_models.Member) + self.assertEqual(member.id, new_member.id) + self.assertEqual(member.pool_id, new_member.pool_id) + self.assertEqual(member.ip_address, new_member.ip_address) + + def test_get_limited_graph(self): + def check_member_attrs(member, new_member, lb, limited_graph): + self.assertIsInstance(new_member, data_models.Member) + self.assertEqual(member.id, new_member.id) + self.assertEqual(member.pool_id, new_member.pool_id) + self.assertEqual(member.ip_address, new_member.ip_address) + if limited_graph: + self.assertIsNone(new_member.pool) + else: + self.assertEqual(lb.id, new_member.pool.load_balancer.id) + + member = self.create_member(self.FAKE_UUID_1, self.FAKE_UUID_2, + self.pool.id, "192.0.2.1") + # Create LB and attach pool to it. + # It means, that in graph pool node get new relationship to LB + lb = self.create_loadbalancer(self.FAKE_UUID_5) + self.pool_repo.update(self.session, id=self.pool.id, + load_balancer_id=lb.id) + + new_member = self.member_repo.get(self.session, id=member.id) + check_member_attrs(member, new_member, lb, limited_graph=False) + + new_member2 = self.member_repo.get(self.session, id=member.id, + limited_graph=True) + check_member_attrs(member, new_member2, lb, limited_graph=True) + + def _validate_members_response(self, member_one, member_two, member_list): + self.assertIsInstance(member_list, list) + self.assertEqual(2, len(member_list)) + self.assertEqual(member_one.id, member_list[0].id) + self.assertEqual(member_one.pool_id, member_list[0].pool_id) + self.assertEqual(member_one.ip_address, member_list[0].ip_address) + self.assertEqual(member_two.id, member_list[1].id) + self.assertEqual(member_two.pool_id, member_list[1].pool_id) + self.assertEqual(member_two.ip_address, member_list[1].ip_address) + + def test_get_all(self): + member_one = self.create_member(self.FAKE_UUID_1, self.FAKE_UUID_2, + self.pool.id, "192.0.2.1") + member_two = self.create_member(self.FAKE_UUID_3, self.FAKE_UUID_2, + self.pool.id, "192.0.2.2") + member_list, _ = self.member_repo.get_all(self.session, + project_id=self.FAKE_UUID_2) + self._validate_members_response(member_one, member_two, member_list) + + def test_get_all_with_loadbalancer(self): + member_one = self.create_member(self.FAKE_UUID_1, self.FAKE_UUID_2, + self.pool.id, "192.0.2.1") + member_two = self.create_member(self.FAKE_UUID_3, self.FAKE_UUID_2, + self.pool.id, "192.0.2.2") + # Create LB and attach pool to it. + # It means, that in graph pool node get new relationship to LB + lb = self.create_loadbalancer(self.FAKE_UUID_5) + self.pool_repo.update(self.session, id=self.pool.id, + load_balancer_id=lb.id) + + member_list, _ = self.member_repo.get_all(self.session, + project_id=self.FAKE_UUID_2) + self._validate_members_response(member_one, member_two, member_list) + # Without limit on recursion all nodes will be processed. + # As result load_balancer node will be available in response + self.assertEqual(self.pool.id, member_list[0].pool.id) + self.assertEqual(self.pool.id, member_list[1].pool.id) + self.assertEqual(lb.id, member_list[0].pool.load_balancer.id) + self.assertEqual(lb.id, member_list[1].pool.load_balancer.id) + + # get the same list of members with enabled limit graph recursion + member_list_limit, _ = self.member_repo.get_all( + self.session, + project_id=self.FAKE_UUID_2, + limited_graph=True + ) + self._validate_members_response( + member_one, + member_two, + member_list_limit + ) + # With limit on recursion load_balancer node will not be processed. + # As result load_balancer node will not be available in response + self.assertEqual(self.pool.id, member_list_limit[0].pool.id) + self.assertEqual(self.pool.id, member_list_limit[1].pool.id) + self.assertIsNone(member_list_limit[0].pool.load_balancer) + self.assertIsNone(member_list_limit[1].pool.load_balancer) + + def test_create(self): + member = self.create_member(self.FAKE_UUID_1, self.FAKE_UUID_2, + self.pool.id, ip_address="192.0.2.1") + new_member = self.member_repo.get(self.session, id=member.id) + self.assertEqual(self.FAKE_UUID_1, new_member.id) + self.assertEqual(self.FAKE_UUID_2, new_member.project_id) + self.assertEqual(self.pool.id, new_member.pool_id) + self.assertEqual("192.0.2.1", new_member.ip_address) + self.assertEqual(80, new_member.protocol_port) + self.assertEqual(constants.ONLINE, new_member.operating_status) + self.assertTrue(new_member.enabled) + + def test_update(self): + ip_address_change = "192.0.2.2" + member = self.create_member(self.FAKE_UUID_1, self.FAKE_UUID_2, + self.pool.id, "192.0.2.1") + self.member_repo.update(self.session, id=member.id, + ip_address=ip_address_change) + self.session.commit() + new_member = self.member_repo.get(self.session, id=member.id) + self.assertEqual(ip_address_change, new_member.ip_address) + + def test_delete(self): + member = self.create_member(self.FAKE_UUID_1, self.FAKE_UUID_2, + self.pool.id, "192.0.2.1") + self.member_repo.delete(self.session, id=member.id) + self.session.commit() + self.assertIsNone(self.member_repo.get(self.session, id=member.id)) + new_pool = self.pool_repo.get(self.session, id=self.pool.id) + self.assertIsNotNone(new_pool) + self.assertEqual(0, len(new_pool.members)) + + def test_update_pool_members(self): + member1 = self.create_member(self.FAKE_UUID_1, self.FAKE_UUID_2, + self.pool.id, "192.0.2.1") + member2 = self.create_member(self.FAKE_UUID_3, self.FAKE_UUID_2, + self.pool.id, "192.0.2.2") + self.member_repo.update_pool_members( + self.session, + pool_id=self.pool.id, + operating_status=constants.OFFLINE) + self.session.commit() + new_member1 = self.member_repo.get(self.session, id=member1.id) + new_member2 = self.member_repo.get(self.session, id=member2.id) + self.assertEqual(constants.OFFLINE, new_member1.operating_status) + self.assertEqual(constants.OFFLINE, new_member2.operating_status) + + +class SessionPersistenceRepositoryTest(BaseRepositoryTest): + + def setUp(self): + super().setUp() + self.pool = self.pool_repo.create( + self.session, id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2, + name="pool_test", description="pool_description", + protocol=constants.PROTOCOL_HTTP, + lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, enabled=True) + + def create_session_persistence(self, pool_id): + sp = self.sp_repo.create( + self.session, pool_id=pool_id, + type=constants.SESSION_PERSISTENCE_HTTP_COOKIE, + cookie_name="cookie_name") + return sp + + def test_get(self): + sp = self.create_session_persistence(self.pool.id) + new_sp = self.sp_repo.get(self.session, pool_id=sp.pool_id) + self.assertIsInstance(new_sp, data_models.SessionPersistence) + self.assertEqual(sp, new_sp) + + def test_create(self): + sp = self.create_session_persistence(self.pool.id) + new_sp = self.sp_repo.get(self.session, pool_id=sp.pool_id) + self.assertEqual(self.pool.id, new_sp.pool_id) + self.assertEqual(constants.SESSION_PERSISTENCE_HTTP_COOKIE, + new_sp.type) + self.assertEqual("cookie_name", new_sp.cookie_name) + + def test_update(self): + name_change = "new_cookie_name" + sp = self.create_session_persistence(self.pool.id) + self.sp_repo.update(self.session, pool_id=sp.pool_id, + cookie_name=name_change) + new_sp = self.sp_repo.get(self.session, pool_id=sp.pool_id) + self.assertEqual(name_change, new_sp.cookie_name) + + def test_delete(self): + sp = self.create_session_persistence(self.pool.id) + self.sp_repo.delete(self.session, pool_id=sp.pool_id) + self.assertIsNone(self.member_repo.get(self.session, + pool_id=sp.pool_id)) + new_pool = self.pool_repo.get(self.session, id=self.pool.id) + self.assertIsNotNone(new_pool) + self.assertIsNone(new_pool.session_persistence) + + +class TestListenerRepositoryTest(BaseRepositoryTest): + + def setUp(self): + super().setUp() + self.load_balancer = self.lb_repo.create( + self.session, id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2, + name="lb_name", description="lb_description", + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, enabled=True, + server_group_id=self.FAKE_UUID_1) + self.session.commit() + + def create_listener(self, listener_id, port, default_pool_id=None, + provisioning_status=constants.ACTIVE): + listener = self.listener_repo.create( + self.session, id=listener_id, project_id=self.FAKE_UUID_2, + name="listener_name", description="listener_description", + protocol=constants.PROTOCOL_HTTP, protocol_port=port, + connection_limit=1, load_balancer_id=self.load_balancer.id, + default_pool_id=default_pool_id, operating_status=constants.ONLINE, + provisioning_status=provisioning_status, enabled=True, + peer_port=1025, tags=['test_tag']) + self.session.commit() + return listener + + def create_amphora(self, amphora_id, loadbalancer_id): + amphora = self.amphora_repo.create(self.session, id=amphora_id, + load_balancer_id=loadbalancer_id, + compute_id=self.FAKE_UUID_3, + status=constants.ACTIVE, + vrrp_ip=self.FAKE_IP, + lb_network_ip=self.FAKE_IP) + self.session.commit() + return amphora + + def test_get(self): + listener = self.create_listener(self.FAKE_UUID_1, 80) + new_listener = self.listener_repo.get(self.session, id=listener.id) + self.assertIsInstance(new_listener, data_models.Listener) + self.assertEqual(listener.id, new_listener.id) + self.assertEqual(listener.name, new_listener.name) + self.assertEqual(listener.protocol, new_listener.protocol) + self.assertEqual(listener.protocol_port, new_listener.protocol_port) + + def test_get_all(self): + listener_one = self.create_listener(self.FAKE_UUID_1, 80) + listener_two = self.create_listener(self.FAKE_UUID_3, 88) + listener_list, _ = self.listener_repo.get_all( + self.session, project_id=self.FAKE_UUID_2) + self.assertIsInstance(listener_list, list) + self.assertEqual(2, len(listener_list)) + self.assertEqual(listener_one.id, listener_list[0].id) + self.assertEqual(listener_two.id, listener_list[1].id) + + def test_create(self): + listener = self.create_listener(self.FAKE_UUID_1, 80) + new_listener = self.listener_repo.get(self.session, id=listener.id) + self.assertEqual(self.FAKE_UUID_1, new_listener.id) + self.assertEqual(self.FAKE_UUID_2, new_listener.project_id) + self.assertEqual("listener_name", new_listener.name) + self.assertEqual("listener_description", new_listener.description) + self.assertEqual(constants.PROTOCOL_HTTP, new_listener.protocol) + self.assertEqual(80, new_listener.protocol_port) + self.assertEqual(1, new_listener.connection_limit) + self.assertEqual(self.load_balancer.id, new_listener.load_balancer_id) + self.assertEqual(constants.ACTIVE, new_listener.provisioning_status) + self.assertEqual(constants.ONLINE, new_listener.operating_status) + self.assertEqual(1025, new_listener.peer_port) + self.assertTrue(new_listener.enabled) + + def test_create_no_peer_port(self): + lb = self.create_loadbalancer(uuidutils.generate_uuid()) + listener = self.listener_repo.create( + self.session, id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2, + load_balancer_id=lb.id, protocol=constants.PROTOCOL_HTTP, + protocol_port=80, provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, enabled=True) + self.session.commit() + new_listener = self.listener_repo.get(self.session, id=listener.id) + self.assertEqual(1025, new_listener.peer_port) + + def test_create_no_peer_port_increments(self): + lb = self.create_loadbalancer(uuidutils.generate_uuid()) + listener_a = self.listener_repo.create( + self.session, id=uuidutils.generate_uuid(), + project_id=self.FAKE_UUID_2, + load_balancer_id=lb.id, protocol=constants.PROTOCOL_HTTP, + protocol_port=80, provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, enabled=True) + self.session.commit() + listener_b = self.listener_repo.create( + self.session, id=uuidutils.generate_uuid(), + project_id=self.FAKE_UUID_2, + load_balancer_id=lb.id, protocol=constants.PROTOCOL_HTTP, + protocol_port=81, provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, enabled=True) + self.session.commit() + new_listener_a = self.listener_repo.get(self.session, id=listener_a.id) + new_listener_b = self.listener_repo.get(self.session, id=listener_b.id) + self.assertEqual(1025, new_listener_a.peer_port) + self.assertEqual(1026, new_listener_b.peer_port) + + def test_create_listener_on_different_lb_than_default_pool(self): + load_balancer2 = self.lb_repo.create( + self.session, id=self.FAKE_UUID_3, project_id=self.FAKE_UUID_2, + name="lb_name2", description="lb_description2", + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, enabled=True) + self.session.commit() + pool = self.pool_repo.create( + self.session, id=self.FAKE_UUID_4, project_id=self.FAKE_UUID_2, + name="pool_test", description="pool_description", + protocol=constants.PROTOCOL_HTTP, + lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, enabled=True, + load_balancer_id=load_balancer2.id) + self.session.commit() + self.assertRaises(exceptions.NotFound, self.create_listener, + self.FAKE_UUID_1, 80, default_pool_id=pool.id) + + def test_create_2_sni_containers(self): + listener = self.create_listener(self.FAKE_UUID_1, 80) + container1 = {'listener_id': listener.id, + 'tls_container_id': self.FAKE_UUID_1} + container2 = {'listener_id': listener.id, + 'tls_container_id': self.FAKE_UUID_2} + container1_dm = data_models.SNI(**container1) + container2_dm = data_models.SNI(**container2) + self.sni_repo.create(self.session, **container1) + self.sni_repo.create(self.session, **container2) + self.session.commit() + new_listener = self.listener_repo.get(self.session, id=listener.id) + self.assertIn(container1_dm, new_listener.sni_containers) + self.assertIn(container2_dm, new_listener.sni_containers) + + def test_update(self): + name_change = "new_listener_name" + listener = self.create_listener(self.FAKE_UUID_1, 80) + self.session.commit() + self.listener_repo.update(self.session, listener.id, + name=name_change) + self.session.commit() + new_listener = self.listener_repo.get(self.session, id=listener.id) + self.assertEqual(name_change, new_listener.name) + + def test_update_with_sni(self): + listener = self.create_listener(self.FAKE_UUID_1, 80) + container1 = {'listener_id': listener.id, + 'tls_container_id': self.FAKE_UUID_2} + container1_dm = data_models.SNI(**container1) + self.listener_repo.update(self.session, listener.id, + sni_containers=[self.FAKE_UUID_2]) + self.session.commit() + new_listener = self.listener_repo.get(self.session, id=listener.id) + self.assertIn(container1_dm, new_listener.sni_containers) + + def test_update_bad_id(self): + self.assertRaises(exceptions.NotFound, self.listener_repo.update, + self.session, id=uuidutils.generate_uuid()) + + def test_delete(self): + listener = self.create_listener(self.FAKE_UUID_1, 80) + self.listener_repo.delete(self.session, id=listener.id) + self.assertIsNone(self.listener_repo.get(self.session, id=listener.id)) + + def test_delete_with_sni(self): + listener = self.create_listener(self.FAKE_UUID_1, 80) + sni = self.sni_repo.create(self.session, listener_id=listener.id, + tls_container_id=self.FAKE_UUID_3) + self.session.commit() + new_listener = self.listener_repo.get(self.session, id=listener.id) + self.assertIsNotNone(new_listener) + self.assertEqual(sni, new_listener.sni_containers[0]) + self.listener_repo.delete(self.session, id=new_listener.id) + self.session.commit() + self.assertIsNone(self.listener_repo.get(self.session, id=listener.id)) + self.assertIsNone(self.sni_repo.get(self.session, + listener_id=listener.id)) + + def test_delete_with_stats(self): + listener = self.create_listener(self.FAKE_UUID_1, 80) + lb = self.create_loadbalancer(uuidutils.generate_uuid()) + amphora = self.create_amphora(uuidutils.generate_uuid(), lb.id) + self.listener_stats_repo.create( + self.session, listener_id=listener.id, amphora_id=amphora.id, + bytes_in=1, bytes_out=1, + active_connections=1, total_connections=1, request_errors=1) + self.session.commit() + new_listener = self.listener_repo.get(self.session, id=listener.id) + self.assertIsNotNone(new_listener) + self.assertIsNotNone(self.listener_stats_repo.get( + self.session, listener_id=listener.id)) + self.listener_repo.delete(self.session, id=listener.id) + self.session.commit() + self.assertIsNone(self.listener_repo.get(self.session, id=listener.id)) + # ListenerStatistics should stick around + self.assertIsNotNone(self.listener_stats_repo.get( + self.session, listener_id=listener.id)) + + def test_delete_with_pool(self): + pool = self.pool_repo.create( + self.session, id=self.FAKE_UUID_3, project_id=self.FAKE_UUID_2, + name="pool_test", description="pool_description", + protocol=constants.PROTOCOL_HTTP, + lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, enabled=True, + load_balancer_id=self.load_balancer.id) + self.session.commit() + listener = self.create_listener(self.FAKE_UUID_1, 80, + default_pool_id=pool.id) + pool = self.pool_repo.get(self.session, id=pool.id) + new_listener = self.listener_repo.get(self.session, id=listener.id) + self.assertIsNotNone(new_listener) + self.assertEqual(pool, new_listener.default_pool) + self.listener_repo.delete(self.session, id=new_listener.id) + self.session.commit() + self.assertIsNone(self.listener_repo.get(self.session, id=listener.id)) + # Pool should stick around + self.assertIsNotNone(self.pool_repo.get(self.session, id=pool.id)) + + def test_delete_with_all_children(self): + pool = self.pool_repo.create( + self.session, id=self.FAKE_UUID_3, project_id=self.FAKE_UUID_2, + name="pool_test", description="pool_description", + protocol=constants.PROTOCOL_HTTP, + lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, enabled=True, + load_balancer_id=self.load_balancer.id) + self.session.commit() + listener = self.create_listener(self.FAKE_UUID_1, 80, + default_pool_id=pool.id) + sni = self.sni_repo.create(self.session, listener_id=listener.id, + tls_container_id=self.FAKE_UUID_3) + lb = self.create_loadbalancer(uuidutils.generate_uuid()) + amphora = self.create_amphora(uuidutils.generate_uuid(), lb.id) + self.listener_stats_repo.create( + self.session, listener_id=listener.id, + amphora_id=amphora.id, + bytes_in=1, bytes_out=1, + active_connections=1, total_connections=1, request_errors=1) + self.session.commit() + pool = self.pool_repo.get(self.session, id=pool.id) + new_listener = self.listener_repo.get(self.session, id=listener.id) + self.assertIsNotNone(new_listener) + self.assertEqual(pool, new_listener.default_pool) + self.assertEqual(sni, new_listener.sni_containers[0]) + self.listener_repo.delete(self.session, id=listener.id) + self.session.commit() + self.assertIsNone(self.listener_repo.get(self.session, id=listener.id)) + self.assertIsNone(self.sni_repo.get(self.session, + listener_id=listener.id)) + # ListenerStatistics should stick around + self.assertIsNotNone(self.listener_stats_repo.get( + self.session, listener_id=sni.listener_id)) + # Pool should stick around + self.assertIsNotNone(self.pool_repo.get(self.session, id=pool.id)) + + def test_delete_default_pool_from_beneath_listener(self): + pool = self.pool_repo.create( + self.session, id=self.FAKE_UUID_3, project_id=self.FAKE_UUID_2, + name="pool_test", description="pool_description", + protocol=constants.PROTOCOL_HTTP, + lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, enabled=True, + load_balancer_id=self.load_balancer.id) + self.session.commit() + listener = self.create_listener(self.FAKE_UUID_1, 80, + default_pool_id=pool.id) + new_listener = self.listener_repo.get(self.session, id=listener.id) + self.assertIsNotNone(new_listener) + self.assertEqual(pool.id, new_listener.default_pool.id) + self.assertEqual(pool.load_balancer_id, + new_listener.default_pool.load_balancer_id) + self.assertEqual(pool.project_id, new_listener.default_pool.project_id) + self.pool_repo.delete(self.session, id=pool.id) + self.session.commit() + new_listener = self.listener_repo.get(self.session, id=listener.id) + self.assertIsNone(new_listener.default_pool) + + def test_prov_status_active_if_not_error_active(self): + listener = self.create_listener(self.FAKE_UUID_1, 80, + provisioning_status=constants.ACTIVE) + self.listener_repo.prov_status_active_if_not_error(self.session, + listener.id) + self.session.commit() + new_listener = self.listener_repo.get(self.session, id=listener.id) + self.assertEqual(constants.ACTIVE, new_listener.provisioning_status) + + def test_prov_status_active_if_not_error_error(self): + listener = self.create_listener(self.FAKE_UUID_1, 80, + provisioning_status=constants.ERROR) + self.listener_repo.prov_status_active_if_not_error(self.session, + listener.id) + self.session.commit() + new_listener = self.listener_repo.get(self.session, id=listener.id) + self.assertEqual(constants.ERROR, new_listener.provisioning_status) + + def test_prov_status_active_if_not_error_pending_update(self): + listener = self.create_listener( + self.FAKE_UUID_1, 80, provisioning_status=constants.PENDING_UPDATE) + self.listener_repo.prov_status_active_if_not_error(self.session, + listener.id) + self.session.commit() + new_listener = self.listener_repo.get(self.session, id=listener.id) + self.assertEqual(constants.ACTIVE, new_listener.provisioning_status) + + def test_prov_status_active_if_not_error_bogus_listener(self): + listener = self.create_listener( + self.FAKE_UUID_1, 80, provisioning_status=constants.PENDING_UPDATE) + # Should not raise an exception nor change any status + self.listener_repo.prov_status_active_if_not_error(self.session, + 'bogus_id') + self.session.commit() + new_listener = self.listener_repo.get(self.session, id=listener.id) + self.assertEqual(constants.PENDING_UPDATE, + new_listener.provisioning_status) + + def test_get_port_protocol_cidr_for_lb(self): + self.create_listener(self.FAKE_UUID_1, 80, + provisioning_status=constants.ACTIVE) + rules = self.listener_repo.get_port_protocol_cidr_for_lb( + self.session, self.FAKE_UUID_1) + self.assertEqual([{'protocol': 'TCP', 'cidr': None, 'port': 80}], + rules) + + +class ListenerStatisticsRepositoryTest(BaseRepositoryTest): + + def setUp(self): + super().setUp() + self.listener = self.listener_repo.create( + self.session, id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2, + name="listener_name", description="listener_description", + protocol=constants.PROTOCOL_HTTP, protocol_port=80, + connection_limit=1, provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, enabled=True, peer_port=1025) + self.lb = self.lb_repo.create(self.session, + id=uuidutils.generate_uuid(), + project_id=self.FAKE_UUID_2, + name="lb_name", + description="lb_description", + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, + enabled=True) + self.amphora = self.amphora_repo.create(self.session, + id=uuidutils.generate_uuid(), + load_balancer_id=self.lb.id, + compute_id=self.FAKE_UUID_3, + status=constants.ACTIVE, + vrrp_ip=self.FAKE_IP, + lb_network_ip=self.FAKE_IP) + self.session.commit() + + def create_listener_stats(self, listener_id, amphora_id): + stats = self.listener_stats_repo.create( + self.session, listener_id=listener_id, amphora_id=amphora_id, + bytes_in=1, bytes_out=1, + active_connections=1, total_connections=1, request_errors=1) + self.session.commit() + return stats + + def test_get(self): + stats = self.create_listener_stats(self.listener.id, self.amphora.id) + new_stats = self.listener_stats_repo.get(self.session, + listener_id=stats.listener_id) + self.assertIsInstance(new_stats, data_models.ListenerStatistics) + self.assertEqual(stats.listener_id, new_stats.listener_id) + + def test_create(self): + stats = self.create_listener_stats(self.listener.id, self.amphora.id) + new_stats = self.listener_stats_repo.get(self.session, + listener_id=stats.listener_id) + self.assertEqual(self.listener.id, new_stats.listener_id) + self.assertEqual(1, new_stats.bytes_in) + self.assertEqual(1, new_stats.bytes_out) + self.assertEqual(1, new_stats.active_connections) + self.assertEqual(1, new_stats.total_connections) + self.assertEqual(1, new_stats.request_errors) + + def test_update(self): + bytes_in_change = 2 + stats = self.create_listener_stats(self.listener.id, self.amphora.id) + self.listener_stats_repo.update(self.session, stats.listener_id, + bytes_in=bytes_in_change) + self.session.commit() + new_stats = self.listener_stats_repo.get(self.session, + listener_id=stats.listener_id) + self.assertIsInstance(new_stats, data_models.ListenerStatistics) + self.assertEqual(stats.listener_id, new_stats.listener_id) + + def test_delete(self): + stats = self.create_listener_stats(self.listener.id, self.amphora.id) + self.listener_stats_repo.delete(self.session, + listener_id=stats.listener_id) + self.session.commit() + self.assertIsNone(self.listener_stats_repo.get( + self.session, listener_id=stats.listener_id)) + new_listener = self.listener_repo.get(self.session, + id=self.listener.id) + self.assertIsNotNone(new_listener) + self.assertIsNone(new_listener.stats) + + def test_replace(self): + # Test the create path + bytes_in = random.randrange(1000000000) + bytes_out = random.randrange(1000000000) + active_conns = random.randrange(1000000000) + total_conns = random.randrange(1000000000) + request_errors = random.randrange(1000000000) + self.assertIsNone(self.listener_stats_repo.get( + self.session, listener_id=self.listener.id)) + stats_obj = data_models.ListenerStatistics( + listener_id=self.listener.id, + amphora_id=self.amphora.id, + bytes_in=bytes_in, + bytes_out=bytes_out, + active_connections=active_conns, + total_connections=total_conns, + request_errors=request_errors + ) + self.listener_stats_repo.replace(self.session, stats_obj) + self.session.commit() + obj = self.listener_stats_repo.get(self.session, + listener_id=self.listener.id) + self.assertIsNotNone(obj) + self.assertEqual(self.listener.id, obj.listener_id) + self.assertEqual(self.amphora.id, obj.amphora_id) + self.assertEqual(bytes_in, obj.bytes_in) + self.assertEqual(bytes_out, obj.bytes_out) + self.assertEqual(active_conns, obj.active_connections) + self.assertEqual(total_conns, obj.total_connections) + self.assertEqual(request_errors, obj.request_errors) + + # Test the update path + bytes_in_2 = random.randrange(1000000000) + bytes_out_2 = random.randrange(1000000000) + active_conns_2 = random.randrange(1000000000) + total_conns_2 = random.randrange(1000000000) + request_errors_2 = random.randrange(1000000000) + stats_obj_2 = data_models.ListenerStatistics( + listener_id=self.listener.id, + amphora_id=self.amphora.id, + bytes_in=bytes_in_2, + bytes_out=bytes_out_2, + active_connections=active_conns_2, + total_connections=total_conns_2, + request_errors=request_errors_2 + ) + self.listener_stats_repo.replace(self.session, stats_obj_2) + self.session.commit() + obj = self.listener_stats_repo.get(self.session, + listener_id=self.listener.id) + self.assertIsNotNone(obj) + self.assertEqual(self.listener.id, obj.listener_id) + self.assertEqual(self.amphora.id, obj.amphora_id) + self.assertEqual(bytes_in_2, obj.bytes_in) + self.assertEqual(bytes_out_2, obj.bytes_out) + self.assertEqual(active_conns_2, obj.active_connections) + self.assertEqual(total_conns_2, obj.total_connections) + self.assertEqual(request_errors_2, obj.request_errors) + + # Test uses listener_id as amphora_id if not passed + stats_obj = data_models.ListenerStatistics( + listener_id=self.listener.id, + bytes_in=bytes_in, + bytes_out=bytes_out, + active_connections=active_conns, + total_connections=total_conns, + request_errors=request_errors + ) + self.listener_stats_repo.replace(self.session, stats_obj) + self.session.commit() + obj = self.listener_stats_repo.get(self.session, + listener_id=self.listener.id, + amphora_id=self.listener.id) + self.assertIsNotNone(obj) + self.assertEqual(self.listener.id, obj.listener_id) + self.assertEqual(self.listener.id, obj.amphora_id) + self.assertEqual(bytes_in, obj.bytes_in) + self.assertEqual(bytes_out, obj.bytes_out) + self.assertEqual(active_conns, obj.active_connections) + self.assertEqual(total_conns, obj.total_connections) + self.assertEqual(request_errors, obj.request_errors) + + def test_increment(self): + # Test the create path + bytes_in = random.randrange(1000000000) + bytes_out = random.randrange(1000000000) + active_conns = random.randrange(1000000000) + total_conns = random.randrange(1000000000) + request_errors = random.randrange(1000000000) + self.assertIsNone(self.listener_stats_repo.get( + self.session, listener_id=self.listener.id)) + delta_stats = data_models.ListenerStatistics( + listener_id=self.listener.id, + amphora_id=self.amphora.id, + bytes_in=bytes_in, + bytes_out=bytes_out, + active_connections=active_conns, + total_connections=total_conns, + request_errors=request_errors + ) + self.listener_stats_repo.increment(self.session, delta_stats) + self.session.commit() + obj = self.listener_stats_repo.get(self.session, + listener_id=self.listener.id) + self.assertIsNotNone(obj) + self.assertEqual(self.listener.id, obj.listener_id) + self.assertEqual(self.amphora.id, obj.amphora_id) + self.assertEqual(bytes_in, obj.bytes_in) + self.assertEqual(bytes_out, obj.bytes_out) + self.assertEqual(active_conns, obj.active_connections) + self.assertEqual(total_conns, obj.total_connections) + self.assertEqual(request_errors, obj.request_errors) + + # Test the update path + bytes_in_2 = random.randrange(1000000000) + bytes_out_2 = random.randrange(1000000000) + active_conns_2 = random.randrange(1000000000) + total_conns_2 = random.randrange(1000000000) + request_errors_2 = random.randrange(1000000000) + delta_stats_2 = data_models.ListenerStatistics( + listener_id=self.listener.id, + amphora_id=self.amphora.id, + bytes_in=bytes_in_2, + bytes_out=bytes_out_2, + active_connections=active_conns_2, + total_connections=total_conns_2, + request_errors=request_errors_2 + ) + self.listener_stats_repo.increment(self.session, delta_stats_2) + self.session.commit() + obj = self.listener_stats_repo.get(self.session, + listener_id=self.listener.id) + self.assertIsNotNone(obj) + self.assertEqual(self.listener.id, obj.listener_id) + self.assertEqual(self.amphora.id, obj.amphora_id) + self.assertEqual(bytes_in + bytes_in_2, obj.bytes_in) + self.assertEqual(bytes_out + bytes_out_2, obj.bytes_out) + self.assertEqual(active_conns_2, obj.active_connections) # not a delta + self.assertEqual(total_conns + total_conns_2, obj.total_connections) + self.assertEqual(request_errors + request_errors_2, obj.request_errors) + + # Test uses listener_id as amphora_id if not passed + stats_obj = data_models.ListenerStatistics( + listener_id=self.listener.id, + bytes_in=bytes_in, + bytes_out=bytes_out, + active_connections=active_conns, + total_connections=total_conns, + request_errors=request_errors + ) + self.listener_stats_repo.increment(self.session, stats_obj) + self.session.commit() + obj = self.listener_stats_repo.get(self.session, + listener_id=self.listener.id, + amphora_id=self.listener.id) + self.assertIsNotNone(obj) + self.assertEqual(self.listener.id, obj.listener_id) + self.assertEqual(self.listener.id, obj.amphora_id) + self.assertEqual(bytes_in, obj.bytes_in) + self.assertEqual(bytes_out, obj.bytes_out) + self.assertEqual(active_conns, obj.active_connections) + self.assertEqual(total_conns, obj.total_connections) + self.assertEqual(request_errors, obj.request_errors) + + +class HealthMonitorRepositoryTest(BaseRepositoryTest): + + def setUp(self): + super().setUp() + self.pool = self.pool_repo.create( + self.session, id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2, + name="pool_test", description="pool_description", + protocol=constants.PROTOCOL_HTTP, + lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, enabled=True) + self.pool2 = self.pool_repo.create( + self.session, id=self.FAKE_UUID_2, project_id=self.FAKE_UUID_2, + name="pool2_test", description="pool2_description", + protocol=constants.PROTOCOL_HTTP, + lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, enabled=True) + self.session.commit() + + def create_health_monitor(self, hm_id, pool_id): + health_monitor = self.hm_repo.create( + self.session, type=constants.HEALTH_MONITOR_HTTP, id=hm_id, + pool_id=pool_id, delay=1, timeout=1, fall_threshold=1, + rise_threshold=1, http_method="POST", + url_path="http://localhost/index.php", + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, + expected_codes="200", enabled=True, tags=['test_tag']) + self.session.commit() + self.assertEqual(hm_id, health_monitor.id) + return health_monitor + + def test_get(self): + hm = self.create_health_monitor(self.FAKE_UUID_3, self.pool.id) + new_hm = self.hm_repo.get(self.session, id=hm.id) + self.assertIsInstance(new_hm, data_models.HealthMonitor) + self.assertEqual(hm.id, new_hm.id) + self.assertEqual(hm.pool_id, new_hm.pool_id) + self.assertEqual(hm.type, new_hm.type) + + def test_create(self): + hm = self.create_health_monitor(self.FAKE_UUID_3, self.pool.id) + new_hm = self.hm_repo.get(self.session, id=hm.id) + self.assertEqual(constants.HEALTH_MONITOR_HTTP, new_hm.type) + self.assertEqual(self.pool.id, new_hm.pool_id) + self.assertEqual(1, new_hm.delay) + self.assertEqual(1, new_hm.timeout) + self.assertEqual(1, new_hm.fall_threshold) + self.assertEqual(1, new_hm.rise_threshold) + self.assertEqual("POST", new_hm.http_method) + self.assertEqual("http://localhost/index.php", new_hm.url_path) + self.assertEqual("200", new_hm.expected_codes) + self.assertTrue(new_hm.enabled) + + def test_update(self): + delay_change = 2 + hm = self.create_health_monitor(self.FAKE_UUID_3, self.pool.id) + self.hm_repo.update( + self.session, hm.id, delay=delay_change) + self.session.commit() + new_hm = self.hm_repo.get(self.session, id=hm.id) + self.assertEqual(delay_change, new_hm.delay) + + def test_delete(self): + hm = self.create_health_monitor(self.FAKE_UUID_3, self.pool.id) + self.hm_repo.delete(self.session, id=hm.id) + self.session.commit() + self.assertIsNone(self.hm_repo.get(self.session, id=hm.id)) + new_pool = self.pool_repo.get(self.session, id=self.pool.id) + self.assertIsNotNone(new_pool) + self.assertIsNone(new_pool.health_monitor) + + +class LoadBalancerRepositoryTest(BaseRepositoryTest): + + def create_loadbalancer(self, lb_id, **overrides): + settings = dict( + id=lb_id, + project_id=self.FAKE_UUID_2, name="lb_name", + description="lb_description", + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, + enabled=True, tags=['test_tag'], + ) + settings.update(**overrides) + lb = self.lb_repo.create(self.session, **settings) + self.session.commit() + return lb + + def test_get(self): + lb = self.create_loadbalancer(self.FAKE_UUID_1) + new_lb = self.lb_repo.get(self.session, id=lb.id) + self.assertIsInstance(new_lb, data_models.LoadBalancer) + self.assertEqual(lb.id, new_lb.id) + self.assertEqual(lb.project_id, new_lb.project_id) + + def test_get_all(self): + lb_one = self.create_loadbalancer(self.FAKE_UUID_1) + lb_two = self.create_loadbalancer(self.FAKE_UUID_3) + lb_list, _ = self.lb_repo.get_all(self.session, + project_id=self.FAKE_UUID_2) + self.assertEqual(2, len(lb_list)) + self.assertEqual(lb_one.id, lb_list[0].id) + self.assertEqual(lb_one.project_id, lb_list[0].project_id) + self.assertEqual(lb_two.id, lb_list[1].id) + self.assertEqual(lb_two.project_id, lb_list[1].project_id) + + def test_create(self): + lb = self.create_loadbalancer(self.FAKE_UUID_1) + self.assertEqual(self.FAKE_UUID_1, lb.id) + self.assertEqual(self.FAKE_UUID_2, lb.project_id) + self.assertEqual("lb_name", lb.name) + self.assertEqual("lb_description", lb.description) + self.assertEqual(constants.ACTIVE, lb.provisioning_status) + self.assertEqual(constants.ONLINE, lb.operating_status) + self.assertTrue(lb.enabled) + + def test_update(self): + name_change = "load_balancer_name" + lb = self.create_loadbalancer(self.FAKE_UUID_1) + self.lb_repo.update(self.session, lb.id, name=name_change) + self.session.commit() + new_lb = self.lb_repo.get(self.session, id=lb.id) + self.assertEqual(name_change, new_lb.name) + + def test_delete(self): + lb = self.create_loadbalancer(self.FAKE_UUID_1) + self.lb_repo.delete(self.session, id=lb.id) + self.session.commit() + self.assertIsNone(self.lb_repo.get(self.session, id=lb.id)) + + def test_delete_with_amphora(self): + lb = self.create_loadbalancer(self.FAKE_UUID_1) + amphora = self.amphora_repo.create(self.session, id=self.FAKE_UUID_1, + load_balancer_id=lb.id, + compute_id=self.FAKE_UUID_3, + status=constants.ACTIVE, + vrrp_ip=self.FAKE_IP, + lb_network_ip=self.FAKE_IP) + new_lb = self.lb_repo.get(self.session, id=lb.id) + self.assertIsNotNone(new_lb) + self.assertEqual(1, len(new_lb.amphorae)) + self.assertEqual(amphora.id, new_lb.amphorae[0].id) + self.assertEqual(amphora.load_balancer_id, + new_lb.amphorae[0].load_balancer_id) + self.assertEqual(amphora.compute_id, new_lb.amphorae[0].compute_id) + self.lb_repo.delete(self.session, id=new_lb.id) + self.session.commit() + self.assertIsNone(self.lb_repo.get(self.session, id=lb.id)) + new_amphora = self.amphora_repo.get(self.session, id=amphora.id) + self.assertIsNotNone(new_amphora) + self.assertIsNone(new_amphora.load_balancer_id) + + def test_delete_with_many_amphora(self): + lb = self.create_loadbalancer(self.FAKE_UUID_1) + amphora_1 = self.amphora_repo.create(self.session, id=self.FAKE_UUID_1, + load_balancer_id=lb.id, + compute_id=self.FAKE_UUID_3, + status=constants.ACTIVE) + self.session.commit() + amphora_2 = self.amphora_repo.create(self.session, id=self.FAKE_UUID_3, + load_balancer_id=lb.id, + compute_id=self.FAKE_UUID_3, + lb_network_ip=self.FAKE_IP, + vrrp_ip=self.FAKE_IP, + status=constants.ACTIVE) + self.session.commit() + new_lb = self.lb_repo.get(self.session, id=lb.id) + self.assertIsNotNone(new_lb) + self.assertEqual(2, len(new_lb.amphorae)) + amphora_ids = [amp.id for amp in new_lb.amphorae] + self.assertIn(amphora_1.id, amphora_ids) + self.assertIn(amphora_2.id, amphora_ids) + self.lb_repo.delete(self.session, id=new_lb.id) + self.session.commit() + self.assertIsNone(self.lb_repo.get(self.session, id=lb.id)) + new_amphora_1 = self.amphora_repo.get(self.session, id=amphora_1.id) + new_amphora_2 = self.amphora_repo.get(self.session, id=amphora_2.id) + self.assertIsNotNone(new_amphora_1) + self.assertIsNotNone(new_amphora_2) + self.assertIsNone(new_amphora_1.load_balancer_id) + self.assertIsNone(new_amphora_2.load_balancer_id) + + def test_delete_with_vip(self): + lb = self.create_loadbalancer(self.FAKE_UUID_1) + vip = self.vip_repo.create(self.session, load_balancer_id=lb.id, + ip_address="192.0.2.1") + self.session.commit() + new_lb = self.lb_repo.get(self.session, id=lb.id) + self.assertIsNotNone(new_lb) + self.assertIsNotNone(new_lb.vip) + self.assertEqual(vip, new_lb.vip) + self.lb_repo.delete(self.session, id=new_lb.id) + self.session.commit() + self.assertIsNone(self.lb_repo.get(self.session, id=lb.id)) + self.assertIsNone(self.vip_repo.get(self.session, + load_balancer_id=lb.id)) + + def test_delete_with_listener(self): + lb = self.create_loadbalancer(self.FAKE_UUID_1) + listener = self.listener_repo.create( + self.session, id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2, + name="listener_name", description="listener_description", + load_balancer_id=lb.id, protocol=constants.PROTOCOL_HTTP, + protocol_port=80, connection_limit=1, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, enabled=True) + self.session.commit() + new_lb = self.lb_repo.get(self.session, id=lb.id) + self.assertIsNotNone(new_lb) + self.assertEqual(1, len(new_lb.listeners)) + self.assertEqual(listener.id, new_lb.listeners[0].id) + self.assertEqual(listener.load_balancer_id, + new_lb.listeners[0].load_balancer_id) + self.assertEqual(listener.project_id, new_lb.listeners[0].project_id) + self.lb_repo.delete(self.session, id=new_lb.id) + self.session.commit() + self.assertIsNone(self.lb_repo.get(self.session, id=lb.id)) + self.assertIsNone(self.listener_repo.get(self.session, id=listener.id)) + + def test_delete_with_many_listeners(self): + lb = self.create_loadbalancer(self.FAKE_UUID_1) + listener_1 = self.listener_repo.create( + self.session, id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2, + name="listener_name", description="listener_description", + load_balancer_id=lb.id, protocol=constants.PROTOCOL_HTTP, + protocol_port=80, connection_limit=1, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, enabled=True) + self.session.commit() + listener_2 = self.listener_repo.create( + self.session, id=self.FAKE_UUID_3, project_id=self.FAKE_UUID_2, + name="listener_name", description="listener_description", + load_balancer_id=lb.id, protocol=constants.PROTOCOL_HTTPS, + protocol_port=443, connection_limit=1, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, enabled=True) + self.session.commit() + new_lb = self.lb_repo.get(self.session, id=lb.id) + self.assertIsNotNone(new_lb) + self.assertEqual(2, len(new_lb.listeners)) + listener_ids = [lstnr.id for lstnr in new_lb.listeners] + self.assertIn(listener_1.id, listener_ids) + self.assertIn(listener_2.id, listener_ids) + self.lb_repo.delete(self.session, id=new_lb.id) + self.session.commit() + self.assertIsNone(self.lb_repo.get(self.session, id=lb.id)) + self.assertIsNone(self.listener_repo.get(self.session, + id=listener_1.id)) + self.assertIsNone(self.listener_repo.get(self.session, + id=listener_2.id)) + + def test_delete_with_all_children(self): + lb = self.create_loadbalancer(self.FAKE_UUID_1) + self.session.commit() + amphora = self.amphora_repo.create(self.session, id=self.FAKE_UUID_1, + load_balancer_id=lb.id, + compute_id=self.FAKE_UUID_3, + lb_network_ip=self.FAKE_IP, + status=constants.ACTIVE) + self.session.commit() + vip = self.vip_repo.create(self.session, load_balancer_id=lb.id, + ip_address="192.0.2.1") + self.session.commit() + listener = self.listener_repo.create( + self.session, id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2, + name="listener_name", description="listener_description", + load_balancer_id=lb.id, protocol=constants.PROTOCOL_HTTP, + protocol_port=80, connection_limit=1, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, enabled=True) + self.session.commit() + new_lb = self.lb_repo.get(self.session, id=lb.id) + self.assertIsNotNone(new_lb) + self.assertIsNotNone(new_lb.vip) + self.assertEqual(vip, new_lb.vip) + self.assertEqual(1, len(new_lb.amphorae)) + self.assertEqual(1, len(new_lb.listeners)) + self.assertEqual(amphora.id, new_lb.amphorae[0].id) + self.assertEqual(amphora.load_balancer_id, + new_lb.amphorae[0].load_balancer_id) + self.assertEqual(amphora.compute_id, new_lb.amphorae[0].compute_id) + self.assertEqual(listener.id, new_lb.listeners[0].id) + self.assertEqual(listener.name, new_lb.listeners[0].name) + self.assertEqual(listener.protocol, new_lb.listeners[0].protocol) + self.assertEqual(listener.protocol_port, + new_lb.listeners[0].protocol_port) + self.lb_repo.delete(self.session, id=new_lb.id) + self.session.commit() + self.assertIsNone(self.lb_repo.get(self.session, id=lb.id)) + new_amphora = self.amphora_repo.get(self.session, id=amphora.id) + self.assertIsNotNone(new_amphora) + self.assertIsNone(new_amphora.load_balancer_id) + self.assertIsNone(self.vip_repo.get(self.session, + load_balancer_id=lb.id)) + self.assertIsNone(self.listener_repo.get(self.session, id=listener.id)) + + def test_test_and_set_provisioning_status_immutable(self): + lb_id = uuidutils.generate_uuid() + self.lb_repo.create(self.session, id=lb_id, + provisioning_status=constants.PENDING_CREATE, + operating_status=constants.OFFLINE, + enabled=True) + self.session.commit() + self.assertFalse(self.lb_repo.test_and_set_provisioning_status( + self.session, lb_id, constants.PENDING_UPDATE)) + lb = self.lb_repo.get(self.session, id=lb_id) + self.assertEqual(constants.PENDING_CREATE, lb.provisioning_status) + + def test_test_and_set_provisioning_status_immutable_raise(self): + lb_id = uuidutils.generate_uuid() + self.lb_repo.create(self.session, id=lb_id, + provisioning_status=constants.PENDING_CREATE, + operating_status=constants.OFFLINE, + enabled=True) + self.session.commit() + self.assertRaises(exceptions.ImmutableObject, + self.lb_repo.test_and_set_provisioning_status, + self.session, lb_id, + status=constants.PENDING_UPDATE, + raise_exception=True) + lb = self.lb_repo.get(self.session, id=lb_id) + self.assertEqual(constants.PENDING_CREATE, lb.provisioning_status) + + def test_test_and_set_provisioning_status_mutable(self): + lb_id = uuidutils.generate_uuid() + self.lb_repo.create(self.session, id=lb_id, + provisioning_status=constants.ACTIVE, + operating_status=constants.OFFLINE, + enabled=True) + self.session.commit() + self.lb_repo.test_and_set_provisioning_status( + self.session, lb_id, constants.PENDING_UPDATE) + self.session.commit() + lb = self.lb_repo.get(self.session, id=lb_id) + self.assertEqual(constants.PENDING_UPDATE, lb.provisioning_status) + + def test_test_and_set_provisioning_status_error_on_delete(self): + lb_id = uuidutils.generate_uuid() + self.lb_repo.create(self.session, id=lb_id, + provisioning_status=constants.ERROR, + operating_status=constants.OFFLINE, + enabled=True) + self.session.commit() + self.lb_repo.test_and_set_provisioning_status( + self.session, lb_id, constants.PENDING_DELETE) + self.session.commit() + lb = self.lb_repo.get(self.session, id=lb_id) + self.assertEqual(constants.PENDING_DELETE, lb.provisioning_status) + + def test_test_and_set_provisioning_status_concurrent(self): + lb_id = uuidutils.generate_uuid() + self.lb_repo.create(self.session, id=lb_id, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, + enabled=True) + self.session.commit() + + # Create a concurrent session + session2 = self._get_db_engine_session()[1] + + # Load LB into session2's identity map + session2.query(db_models.LoadBalancer).filter_by( + id=lb_id).one() + + # Update provisioning status in lock_session1 + self.lb_repo.test_and_set_provisioning_status( + self.session, lb_id, constants.PENDING_UPDATE) + self.session.commit() + + # Assert concurrent updates are rejected + self.assertFalse(self.lb_repo.test_and_set_provisioning_status( + self.session, lb_id, constants.PENDING_UPDATE)) + + def test_set_status_for_failover_immutable(self): + lb_id = uuidutils.generate_uuid() + self.lb_repo.create(self.session, id=lb_id, + provisioning_status=constants.PENDING_CREATE, + operating_status=constants.OFFLINE, + enabled=True) + self.session.commit() + self.assertFalse(self.lb_repo.set_status_for_failover( + self.session, lb_id, constants.PENDING_UPDATE)) + lb = self.lb_repo.get(self.session, id=lb_id) + self.assertEqual(constants.PENDING_CREATE, lb.provisioning_status) + + def test_set_status_for_failover_immutable_raise(self): + lb_id = uuidutils.generate_uuid() + self.lb_repo.create(self.session, id=lb_id, + provisioning_status=constants.PENDING_CREATE, + operating_status=constants.OFFLINE, + enabled=True) + self.session.commit() + self.assertRaises(exceptions.ImmutableObject, + self.lb_repo.set_status_for_failover, + self.session, lb_id, + status=constants.PENDING_UPDATE, + raise_exception=True) + lb = self.lb_repo.get(self.session, id=lb_id) + self.assertEqual(constants.PENDING_CREATE, lb.provisioning_status) + + def test_set_status_for_failover_mutable(self): + lb_id = uuidutils.generate_uuid() + self.lb_repo.create(self.session, id=lb_id, + provisioning_status=constants.ACTIVE, + operating_status=constants.OFFLINE, + enabled=True) + self.session.commit() + self.lb_repo.set_status_for_failover( + self.session, lb_id, constants.PENDING_UPDATE) + self.session.commit() + lb = self.lb_repo.get(self.session, id=lb_id) + self.assertEqual(constants.PENDING_UPDATE, lb.provisioning_status) + + def test_set_status_for_failover_error(self): + lb_id = uuidutils.generate_uuid() + self.lb_repo.create(self.session, id=lb_id, + provisioning_status=constants.ERROR, + operating_status=constants.OFFLINE, + enabled=True) + self.session.commit() + self.lb_repo.set_status_for_failover( + self.session, lb_id, constants.PENDING_UPDATE) + self.session.commit() + lb = self.lb_repo.get(self.session, id=lb_id) + self.assertEqual(constants.PENDING_UPDATE, lb.provisioning_status) + + def test_get_all_deleted_expiring_load_balancer(self): + exp_age = datetime.timedelta(seconds=self.FAKE_EXP_AGE) + updated_at = timeutils.utcnow() - exp_age + lb1 = self.create_loadbalancer( + self.FAKE_UUID_1, updated_at=updated_at, + provisioning_status=constants.DELETED) + lb2 = self.create_loadbalancer( + self.FAKE_UUID_2, provisioning_status=constants.DELETED) + + expiring_ids = self.lb_repo.get_all_deleted_expiring( + self.session, exp_age=exp_age) + self.assertIn(lb1.id, expiring_ids) + self.assertNotIn(lb2.id, expiring_ids) + + +class VipRepositoryTest(BaseRepositoryTest): + + def setUp(self): + super().setUp() + self.lb = self.lb_repo.create( + self.session, id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2, + name="lb_name", description="lb_description", + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, enabled=True) + + def create_vip(self, lb_id): + vip = self.vip_repo.create(self.session, load_balancer_id=lb_id, + ip_address="192.0.2.1") + return vip + + def test_get(self): + vip = self.create_vip(self.lb.id) + new_vip = self.vip_repo.get(self.session, + load_balancer_id=vip.load_balancer_id) + self.assertIsInstance(new_vip, data_models.Vip) + self.assertEqual(vip, new_vip) + + def test_create(self): + vip = self.create_vip(self.lb.id) + self.assertEqual(self.lb.id, vip.load_balancer_id) + self.assertEqual("192.0.2.1", vip.ip_address) + + def test_update(self): + address_change = "192.0.2.2" + vip = self.create_vip(self.lb.id) + self.vip_repo.update(self.session, vip.load_balancer_id, + ip_address=address_change) + new_vip = self.vip_repo.get(self.session, + load_balancer_id=vip.load_balancer_id) + self.assertEqual(address_change, new_vip.ip_address) + + def test_update_sg_ids(self): + sg1_id = uuidutils.generate_uuid() + sg2_id = uuidutils.generate_uuid() + vip = self.create_vip(self.lb.id) + self.vip_repo.update(self.session, vip.load_balancer_id, + sg_ids=[sg1_id, sg2_id]) + new_vip = self.vip_repo.get(self.session, + load_balancer_id=vip.load_balancer_id) + self.assertIn(sg1_id, new_vip.sg_ids) + self.assertIn(sg2_id, new_vip.sg_ids) + + self.vip_repo.update(self.session, vip.load_balancer_id, + sg_ids=[sg1_id]) + new_vip = self.vip_repo.get(self.session, + load_balancer_id=vip.load_balancer_id) + self.assertIn(sg1_id, new_vip.sg_ids) + self.assertNotIn(sg2_id, new_vip.sg_ids) + + self.vip_repo.update(self.session, vip.load_balancer_id, + sg_ids=[]) + new_vip = self.vip_repo.get(self.session, + load_balancer_id=vip.load_balancer_id) + self.assertNotIn(sg1_id, new_vip.sg_ids) + self.assertNotIn(sg2_id, new_vip.sg_ids) + + def test_delete(self): + vip = self.create_vip(self.lb.id) + self.vip_repo.delete(self.session, + load_balancer_id=vip.load_balancer_id) + self.assertIsNone(self.vip_repo.get( + self.session, load_balancer_id=vip.load_balancer_id)) + new_lb = self.lb_repo.get(self.session, id=self.lb.id) + self.assertIsNotNone(new_lb) + self.assertIsNone(new_lb.vip) + + def test_create_ipv6(self): + vip = self.vip_repo.create(self.session, load_balancer_id=self.lb.id, + ip_address="2001:DB8::10") + self.assertEqual(self.lb.id, vip.load_balancer_id) + self.assertEqual("2001:DB8::10", vip.ip_address) + + # Note: This test is using the unique local address range to + # validate that we handle a fully expaned IP address properly. + # This is not possible with the documentation/testnet range. + def test_create_ipv6_full(self): + vip = self.vip_repo.create( + self.session, load_balancer_id=self.lb.id, + ip_address="fdff:ffff:ffff:ffff:ffff:ffff:ffff:ffff") + self.assertEqual(self.lb.id, vip.load_balancer_id) + self.assertEqual("fdff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", + vip.ip_address) + + +class SNIRepositoryTest(BaseRepositoryTest): + + def setUp(self): + super().setUp() + self.listener = self.listener_repo.create( + self.session, id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2, + name="listener_name", description="listener_description", + protocol=constants.PROTOCOL_HTTP, protocol_port=80, + connection_limit=1, provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, enabled=True, peer_port=1025) + + def create_sni(self, listener_id): + sni = self.sni_repo.create(self.session, + listener_id=listener_id, + tls_container_id=self.FAKE_UUID_3, + position=0) + return sni + + def test_get(self): + sni = self.create_sni(self.listener.id) + new_sni = self.sni_repo.get(self.session, listener_id=sni.listener_id) + self.assertIsInstance(new_sni, data_models.SNI) + self.assertEqual(sni, new_sni) + + def test_create(self): + sni = self.create_sni(self.listener.id) + new_sni = self.sni_repo.get(self.session, listener_id=sni.listener_id) + self.assertEqual(self.listener.id, new_sni.listener_id) + self.assertEqual(self.FAKE_UUID_3, new_sni.tls_container_id) + self.assertEqual(0, new_sni.position) + + def test_update(self): + position_change = 10 + sni = self.create_sni(self.listener.id) + self.sni_repo.update(self.session, listener_id=sni.listener_id, + position=position_change) + new_sni = self.sni_repo.get(self.session, listener_id=sni.listener_id) + self.assertEqual(position_change, new_sni.position) + + def test_delete(self): + sni = self.create_sni(self.listener.id) + self.sni_repo.delete(self.session, listener_id=sni.listener_id) + self.assertIsNone(self.sni_repo.get(self.session, + listener_id=sni.listener_id)) + new_listener = self.listener_repo.get(self.session, + id=self.listener.id) + self.assertIsNotNone(new_listener) + self.assertEqual(0, len(new_listener.sni_containers)) + + +class AmphoraRepositoryTest(BaseRepositoryTest): + + def setUp(self): + super().setUp() + self.lb = self.lb_repo.create( + self.session, id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2, + name="lb_name", description="lb_description", + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, enabled=True) + self.session.commit() + + def create_amphora(self, amphora_id, **overrides): + settings = { + 'id': amphora_id, + 'compute_id': self.FAKE_UUID_3, + 'status': constants.ACTIVE, + 'lb_network_ip': self.FAKE_IP, + 'vrrp_ip': self.FAKE_IP, + 'ha_ip': self.FAKE_IP, + 'role': constants.ROLE_MASTER, + 'cert_expiration': timeutils.utcnow(), + 'cert_busy': False + } + settings.update(overrides) + amphora = self.amphora_repo.create(self.session, **settings) + self.session.commit() + return amphora + + def test_get(self): + amphora = self.create_amphora(self.FAKE_UUID_1) + new_amphora = self.amphora_repo.get(self.session, id=amphora.id) + self.assertIsInstance(new_amphora, data_models.Amphora) + self.assertEqual(amphora.id, new_amphora.id) + self.assertEqual(amphora.load_balancer_id, + new_amphora.load_balancer_id) + self.assertEqual(amphora.compute_id, new_amphora.compute_id) + + def test_count(self): + comp_id = uuidutils.generate_uuid() + self.create_amphora(self.FAKE_UUID_1, compute_id=comp_id) + self.create_amphora(self.FAKE_UUID_2, compute_id=comp_id, + status=constants.DELETED) + amp_count = self.amphora_repo.count(self.session, compute_id=comp_id) + self.assertEqual(2, amp_count) + + def test_count_not_deleted(self): + comp_id = uuidutils.generate_uuid() + self.create_amphora(self.FAKE_UUID_1, compute_id=comp_id) + self.create_amphora(self.FAKE_UUID_2, compute_id=comp_id, + status=constants.DELETED) + amp_count = self.amphora_repo.count(self.session, compute_id=comp_id, + show_deleted=False) + self.assertEqual(1, amp_count) + + def test_create(self): + amphora = self.create_amphora(self.FAKE_UUID_1) + self.assertEqual(self.FAKE_UUID_1, amphora.id) + self.assertEqual(self.FAKE_UUID_3, amphora.compute_id) + self.assertEqual(constants.ACTIVE, amphora.status) + self.assertEqual(constants.ROLE_MASTER, amphora.role) + + def test_exists_true(self): + amphora = self.create_amphora(self.FAKE_UUID_1) + exist = self.amphora_repo.exists(self.session, id=amphora.id) + self.assertTrue(exist) + + def test_exists_false(self): + self.create_amphora(self.FAKE_UUID_1) + exist = self.amphora_repo.exists(self.session, id='test') + self.assertFalse(exist) + + def test_update(self): + status_change = constants.PENDING_UPDATE + amphora = self.create_amphora(self.FAKE_UUID_1) + self.amphora_repo.update(self.session, amphora.id, + status=status_change) + new_amphora = self.amphora_repo.get(self.session, id=amphora.id) + self.assertEqual(status_change, new_amphora.status) + + def test_delete(self): + amphora = self.create_amphora(self.FAKE_UUID_1) + self.amphora_repo.delete(self.session, id=amphora.id) + self.assertIsNone(self.amphora_repo.get(self.session, id=amphora.id)) + + def test_associate_amphora_load_balancer(self): + amphora = self.create_amphora(self.FAKE_UUID_1) + self.amphora_repo.associate(self.session, self.lb.id, amphora.id) + new_amphora = self.amphora_repo.get(self.session, + id=amphora.id) + self.assertIsNotNone(new_amphora.load_balancer) + self.assertIsInstance(new_amphora.load_balancer, + data_models.LoadBalancer) + + def test_delete_amphora_with_load_balancer(self): + amphora = self.create_amphora(self.FAKE_UUID_1) + self.amphora_repo.associate(self.session, self.lb.id, amphora.id) + self.session.commit() + self.amphora_repo.delete(self.session, id=amphora.id) + self.session.commit() + self.assertIsNone(self.amphora_repo.get(self.session, id=amphora.id)) + new_lb = self.lb_repo.get(self.session, id=self.lb.id) + self.assertEqual(0, len(new_lb.amphorae)) + + def test_allocate_and_associate(self): + new_amphora = self.amphora_repo.allocate_and_associate(self.session, + self.lb.id) + self.assertIsNone(new_amphora) + + amphora = self.create_amphora(self.FAKE_UUID_1) + self.amphora_repo.update(self.session, amphora.id, + status='READY') + new_amphora = self.amphora_repo.allocate_and_associate(self.session, + self.lb.id) + self.assertIsNotNone(new_amphora) + self.assertIsInstance(new_amphora, data_models.Amphora) + + def test_get_lb_for_amphora(self): + amphora = self.create_amphora(self.FAKE_UUID_1) + self.amphora_repo.associate(self.session, self.lb.id, amphora.id) + self.session.commit() + lb = self.amphora_repo.get_lb_for_amphora(self.session, amphora.id) + self.assertIsNotNone(lb) + self.assertEqual(self.lb.id, lb.id) + + def test_get_all_deleted_expiring_amphora(self): + exp_age = datetime.timedelta(seconds=self.FAKE_EXP_AGE) + updated_at = timeutils.utcnow() - exp_age + amphora1 = self.create_amphora( + self.FAKE_UUID_1, updated_at=updated_at, status=constants.DELETED) + amphora2 = self.create_amphora( + self.FAKE_UUID_2, status=constants.DELETED) + + expiring_ids = self.amphora_repo.get_all_deleted_expiring( + self.session, exp_age=exp_age) + self.assertIn(amphora1.id, expiring_ids) + self.assertNotIn(amphora2.id, expiring_ids) + + def test_get_none_cert_expired_amphora(self): + # test with no expired amphora + amp = self.amphora_repo.get_cert_expiring_amphora(self.session) + self.assertIsNone(amp) + + amphora = self.create_amphora(self.FAKE_UUID_1) + + expired_interval = CONF.house_keeping.cert_expiry_buffer + expiration = timeutils.utcnow() + datetime.timedelta( + seconds=2 * expired_interval) + + self.amphora_repo.update(self.session, amphora.id, + cert_expiration=expiration) + amp = self.amphora_repo.get_cert_expiring_amphora(self.session) + self.assertIsNone(amp) + + def test_get_cert_expired_amphora(self): + # test with expired amphora + amphora2 = self.create_amphora(self.FAKE_UUID_2) + + expiration = timeutils.utcnow() + datetime.timedelta( + seconds=1) + self.amphora_repo.update(self.session, amphora2.id, + cert_expiration=expiration) + + cert_expired_amphora = self.amphora_repo.get_cert_expiring_amphora( + self.session) + + self.assertEqual(cert_expired_amphora.cert_expiration, expiration) + self.assertEqual(cert_expired_amphora.id, amphora2.id) + + def test_get_cert_expired_amphora_deleted(self): + amphora = self.create_amphora(self.FAKE_UUID_3) + expiration = timeutils.utcnow() + datetime.timedelta(seconds=1) + self.amphora_repo.update(self.session, amphora.id, + status=constants.DELETED, + cert_expiration=expiration) + + cert_expired_amphora = self.amphora_repo.get_cert_expiring_amphora( + self.session) + + self.assertIsNone(cert_expired_amphora) + + def test_get_lb_for_health_update(self): + amphora1 = self.create_amphora(self.FAKE_UUID_1) + amphora2 = self.create_amphora(self.FAKE_UUID_3) + self.amphora_repo.associate(self.session, self.lb.id, amphora1.id) + self.amphora_repo.associate(self.session, self.lb.id, amphora2.id) + self.session.commit() + + lb_ref = {'enabled': True, 'id': self.lb.id, + 'operating_status': constants.ONLINE, + 'provisioning_status': constants.ACTIVE} + + # Test with just a load balancer + lb = self.amphora_repo.get_lb_for_health_update(self.session, + self.FAKE_UUID_1) + self.assertEqual(lb_ref, lb) + + pool = self.pool_repo.create( + self.session, id=self.FAKE_UUID_4, project_id=self.FAKE_UUID_2, + name="pool_test", description="pool_description", + protocol=constants.PROTOCOL_HTTP, load_balancer_id=self.lb.id, + lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, enabled=True) + self.session.commit() + + pool_ref = {pool.id: {'members': {}, + 'operating_status': constants.ONLINE}} + lb_ref['pools'] = pool_ref + + # Test with an LB and a pool + lb = self.amphora_repo.get_lb_for_health_update(self.session, + self.FAKE_UUID_1) + self.assertEqual(lb_ref, lb) + + listener = self.listener_repo.create( + self.session, id=self.FAKE_UUID_5, project_id=self.FAKE_UUID_2, + name="listener_name", description="listener_description", + protocol=constants.PROTOCOL_HTTP, protocol_port=80, + connection_limit=1, operating_status=constants.ONLINE, + load_balancer_id=self.lb.id, provisioning_status=constants.ACTIVE, + enabled=True, peer_port=1025, default_pool_id=pool.id) + + listener_ref = {listener.id: {'operating_status': constants.ONLINE, + 'protocol': constants.PROTOCOL_HTTP, + 'enabled': 1}} + lb_ref['listeners'] = listener_ref + + # Test with an LB, pool, and listener (no members) + lb = self.amphora_repo.get_lb_for_health_update(self.session, + self.FAKE_UUID_1) + self.assertEqual(lb_ref, lb) + + member1 = self.member_repo.create(self.session, id=self.FAKE_UUID_6, + project_id=self.FAKE_UUID_2, + pool_id=pool.id, + ip_address="192.0.2.1", + protocol_port=80, enabled=True, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, + backup=False) + self.session.commit() + + member2 = self.member_repo.create(self.session, id=self.FAKE_UUID_7, + project_id=self.FAKE_UUID_2, + pool_id=pool.id, + ip_address="192.0.2.21", + protocol_port=80, enabled=True, + provisioning_status=constants.ACTIVE, + operating_status=constants.OFFLINE, + backup=False) + self.session.commit() + + member_ref = {member1.id: {'operating_status': constants.ONLINE}, + member2.id: {'operating_status': constants.OFFLINE}} + lb_ref['pools'][pool.id]['members'] = member_ref + + # Test with an LB, pool, listener, and members + lb = self.amphora_repo.get_lb_for_health_update(self.session, + self.FAKE_UUID_1) + self.assertEqual(lb_ref, lb) + + def test_and_set_status_for_delete(self): + # Normal path + amphora = self.create_amphora(self.FAKE_UUID_1, + status=constants.ERROR) + self.amphora_repo.test_and_set_status_for_delete(self.session, + amphora.id) + new_amphora = self.amphora_repo.get(self.session, id=amphora.id) + self.assertEqual(constants.PENDING_DELETE, new_amphora.status) + + # Test deleted path + amphora = self.create_amphora(self.FAKE_UUID_2, + status=constants.DELETED) + self.assertRaises(sa_exception.NoResultFound, + self.amphora_repo.test_and_set_status_for_delete, + self.session, amphora.id) + + # Test in use path + amphora = self.create_amphora(self.FAKE_UUID_3, + status=constants.AMPHORA_ALLOCATED) + self.assertRaises(exceptions.ImmutableObject, + self.amphora_repo.test_and_set_status_for_delete, + self.session, amphora.id) + + +class AmphoraHealthRepositoryTest(BaseRepositoryTest): + def setUp(self): + super().setUp() + self._fake_ip_gen = (self.FAKE_IP + str(ip_end) for ip_end in + range(100)) + self.amphora = self.amphora_repo.create(self.session, + id=self.FAKE_UUID_1, + compute_id=self.FAKE_UUID_3, + status=constants.ACTIVE, + lb_network_ip=self.FAKE_IP) + + def create_amphora(self, amphora_id, **overrides): + fake_ip = next(self._fake_ip_gen) + settings = { + 'id': amphora_id, + 'compute_id': uuidutils.generate_uuid(), + 'status': constants.ACTIVE, + 'lb_network_ip': fake_ip, + 'vrrp_ip': fake_ip, + 'ha_ip': fake_ip, + 'role': constants.ROLE_MASTER, + 'cert_expiration': timeutils.utcnow(), + 'cert_busy': False + } + settings.update(overrides) + amphora = self.amphora_repo.create(self.session, **settings) + return amphora + + def create_amphora_health(self, amphora_id): + newdate = timeutils.utcnow() - datetime.timedelta(minutes=10) + + amphora_health = self.amphora_health_repo.create( + self.session, amphora_id=amphora_id, + last_update=newdate, + busy=False) + return amphora_health + + def test_replace(self): + amphora_id = uuidutils.generate_uuid() + now = timeutils.utcnow() + self.assertIsNone(self.amphora_health_repo.get( + self.session, amphora_id=amphora_id)) + self.amphora_health_repo.replace(self.session, amphora_id, + last_update=now) + obj = self.amphora_health_repo.get(self.session, amphora_id=amphora_id) + self.assertIsNotNone(obj) + self.assertEqual(amphora_id, obj.amphora_id) + self.assertEqual(now, obj.last_update) + + now += datetime.timedelta(seconds=69) + self.amphora_health_repo.replace(self.session, amphora_id, + last_update=now) + obj = self.amphora_health_repo.get(self.session, amphora_id=amphora_id) + self.assertIsNotNone(obj) + self.assertEqual(amphora_id, obj.amphora_id) + self.assertEqual(now, obj.last_update) + + def test_get(self): + amphora_health = self.create_amphora_health(self.amphora.id) + new_amphora_health = self.amphora_health_repo.get( + self.session, amphora_id=amphora_health.amphora_id) + + self.assertIsInstance(new_amphora_health, data_models.AmphoraHealth) + self.assertEqual(amphora_health, new_amphora_health) + + def test_check_amphora_expired_default_exp_age(self): + """When exp_age defaults to CONF.house_keeping.amphora_expiry_age.""" + self.create_amphora_health(self.amphora.id) + checkres = self.amphora_health_repo.check_amphora_health_expired( + self.session, self.amphora.id) + # Default amphora_expiry_age value is 1 week so amphora shouldn't be + # considered expired. + self.assertFalse(checkres) + + def test_check_amphora_expired_with_exp_age(self): + """When exp_age is passed as an argument.""" + exp_age = datetime.timedelta( + seconds=self.FAKE_EXP_AGE) + self.create_amphora_health(self.amphora.id) + checkres = self.amphora_health_repo.check_amphora_health_expired( + self.session, self.amphora.id, exp_age) + self.assertTrue(checkres) + + def test_check_amphora_expired_with_no_age(self): + """When the amphora_health entry is missing in the DB.""" + checkres = self.amphora_health_repo.check_amphora_health_expired( + self.session, self.amphora.id) + self.assertTrue(checkres) + + def test_get_stale_amphora(self): + stale_amphora = self.amphora_health_repo.get_stale_amphora( + self.session) + self.assertIsNone(stale_amphora) + + uuid = uuidutils.generate_uuid() + self.create_amphora(uuid) + self.amphora_repo.update(self.session, uuid, + status=constants.AMPHORA_ALLOCATED) + self.create_amphora_health(uuid) + stale_amphora = self.amphora_health_repo.get_stale_amphora( + self.session) + self.assertEqual(uuid, stale_amphora.amphora_id) + + def test_get_stale_amphora_past_threshold(self): + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(group='health_manager', failover_threshold=3) + + stale_amphora = self.amphora_health_repo.get_stale_amphora( + self.session) + self.assertIsNone(stale_amphora) + + # Two stale amphora expected, should return that amp + # These will go into failover and be marked "busy" + uuids = [] + for _ in range(2): + uuid = uuidutils.generate_uuid() + uuids.append(uuid) + self.create_amphora(uuid) + self.amphora_repo.update(self.session, uuid, + status=constants.AMPHORA_ALLOCATED) + self.create_amphora_health(uuid) + stale_amphora = self.amphora_health_repo.get_stale_amphora( + self.session) + self.assertIn(stale_amphora.amphora_id, uuids) + + # Creating more stale amphorae should return no amps (past threshold) + stale_uuids = [] + for _ in range(4): + uuid = uuidutils.generate_uuid() + stale_uuids.append(uuid) + self.create_amphora(uuid) + self.amphora_repo.update(self.session, uuid, + status=constants.AMPHORA_ALLOCATED) + self.create_amphora_health(uuid) + stale_amphora = self.amphora_health_repo.get_stale_amphora( + self.session) + self.assertIsNone(stale_amphora) + num_fo_stopped = self.session.query(db_models.Amphora).filter( + db_models.Amphora.status == constants.AMPHORA_FAILOVER_STOPPED + ).count() + # Note that the two amphora started failover, so are "busy" and + # should not be marked FAILOVER_STOPPED. + self.assertEqual(4, num_fo_stopped) + + # One recovered, but still over threshold + # Two "busy", One fully healthy, three in FAILOVER_STOPPED + amp = self.session.query(db_models.AmphoraHealth).filter_by( + amphora_id=stale_uuids[2]).first() + amp.last_update = timeutils.utcnow() + self.session.flush() + stale_amphora = self.amphora_health_repo.get_stale_amphora( + self.session) + self.assertIsNone(stale_amphora) + num_fo_stopped = self.session.query(db_models.Amphora).filter( + db_models.Amphora.status == constants.AMPHORA_FAILOVER_STOPPED + ).count() + self.assertEqual(3, num_fo_stopped) + + # Another one recovered, now below threshold + # Two are "busy", Two are fully healthy, Two are in FAILOVER_STOPPED + amp = self.session.query(db_models.AmphoraHealth).filter_by( + amphora_id=stale_uuids[3]).first() + amp.last_update = timeutils.utcnow() + stale_amphora = self.amphora_health_repo.get_stale_amphora( + self.session) + self.assertIsNotNone(stale_amphora) + num_fo_stopped = self.session.query(db_models.Amphora).filter( + db_models.Amphora.status == constants.AMPHORA_FAILOVER_STOPPED + ).count() + self.assertEqual(2, num_fo_stopped) + + # After error recovery all amps should be allocated again + now = timeutils.utcnow() + for amp in self.session.query(db_models.AmphoraHealth).all(): + amp.last_update = now + stale_amphora = self.amphora_health_repo.get_stale_amphora( + self.session) + self.assertIsNone(stale_amphora) + num_allocated = self.session.query(db_models.Amphora).filter( + db_models.Amphora.status == constants.AMPHORA_ALLOCATED + ).count() + self.assertEqual(5, num_allocated) + + def test_create(self): + amphora_health = self.create_amphora_health(self.FAKE_UUID_1) + self.assertEqual(self.FAKE_UUID_1, amphora_health.amphora_id) + newcreatedtime = timeutils.utcnow() + oldcreatetime = amphora_health.last_update + + diff = newcreatedtime - oldcreatetime + self.assertEqual(600, diff.seconds) + + def test_update(self): + d = datetime.datetime.today() + amphora_health = self.create_amphora_health(self.FAKE_UUID_1) + self.amphora_health_repo.update(self.session, + amphora_health.amphora_id, + last_update=d) + new_amphora_health = self.amphora_health_repo.get( + self.session, amphora_id=amphora_health.amphora_id) + self.assertEqual(d, new_amphora_health.last_update) + + def test_delete(self): + amphora_health = self.create_amphora_health(self.FAKE_UUID_1) + self.amphora_health_repo.delete( + self.session, amphora_id=amphora_health.amphora_id) + self.assertIsNone(self.amphora_health_repo.get( + self.session, amphora_id=amphora_health.amphora_id)) + + +class VRRPGroupRepositoryTest(BaseRepositoryTest): + def setUp(self): + super().setUp() + self.lb = self.lb_repo.create( + self.session, id=self.FAKE_UUID_1, project_id=self.FAKE_UUID_2, + name="lb_name", description="lb_description", + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, enabled=True) + + def test_update(self): + self.vrrpgroup = self.vrrp_group_repo.create( + self.session, + load_balancer_id=self.lb.id, + vrrp_group_name='TESTVRRPGROUP', + vrrp_auth_type=constants.VRRP_AUTH_DEFAULT, + vrrp_auth_pass='TESTPASS', + advert_int=1) + + # Validate baseline + old_vrrp_group = self.vrrp_group_repo.get(self.session, + load_balancer_id=self.lb.id) + + self.assertEqual('TESTVRRPGROUP', old_vrrp_group.vrrp_group_name) + self.assertEqual(constants.VRRP_AUTH_DEFAULT, + old_vrrp_group.vrrp_auth_type) + self.assertEqual('TESTPASS', old_vrrp_group.vrrp_auth_pass) + self.assertEqual(1, old_vrrp_group.advert_int) + + # Test update + self.vrrp_group_repo.update(self.session, + load_balancer_id=self.lb.id, + vrrp_group_name='TESTVRRPGROUP2', + vrrp_auth_type='AH', + vrrp_auth_pass='TESTPASS2', + advert_int=2) + + new_vrrp_group = self.vrrp_group_repo.get(self.session, + load_balancer_id=self.lb.id) + + self.assertEqual('TESTVRRPGROUP2', new_vrrp_group.vrrp_group_name) + self.assertEqual('AH', new_vrrp_group.vrrp_auth_type) + self.assertEqual('TESTPASS2', new_vrrp_group.vrrp_auth_pass) + self.assertEqual(2, new_vrrp_group.advert_int) + + +class L7PolicyRepositoryTest(BaseRepositoryTest): + + def setUp(self): + super().setUp() + self.pool = self.create_pool(self.FAKE_UUID_1) + self.listener = self.create_listener(self.FAKE_UUID_1, 80) + + def create_listener(self, listener_id, port): + listener = self.listener_repo.create( + self.session, id=listener_id, project_id=self.FAKE_UUID_2, + name="listener_name", description="listener_description", + protocol=constants.PROTOCOL_HTTP, protocol_port=port, + connection_limit=1, operating_status=constants.ONLINE, + provisioning_status=constants.ACTIVE, enabled=True, peer_port=1025) + return listener + + def create_pool(self, pool_id): + pool = self.pool_repo.create( + self.session, id=pool_id, project_id=self.FAKE_UUID_2, + name="pool_test", description="pool_description", + protocol=constants.PROTOCOL_HTTP, + lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, enabled=True, tags=['test_tag']) + return pool + + def create_l7policy(self, l7policy_id, listener_id, position, + action=constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + redirect_pool_id=None, redirect_url=None): + l7policy = self.l7policy_repo.create( + self.session, id=l7policy_id, name='l7policy_test', + description='l7policy_description', listener_id=listener_id, + position=position, action=action, + redirect_pool_id=redirect_pool_id, redirect_url=redirect_url, + operating_status=constants.ONLINE, + provisioning_status=constants.ACTIVE, enabled=True) + return l7policy + + def create_l7rule(self, l7rule_id, l7policy_id, + type=constants.L7RULE_TYPE_PATH, + compare_type=constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + key=None, value="/api", enabled=True): + l7rule = self.l7rule_repo.create( + self.session, id=l7rule_id, l7policy_id=l7policy_id, + type=type, compare_type=compare_type, key=key, value=value, + operating_status=constants.ONLINE, enabled=enabled, + provisioning_status=constants.ACTIVE) + return l7rule + + def test_get(self): + listener = self.create_listener(uuidutils.generate_uuid(), 80) + pool = self.create_pool(uuidutils.generate_uuid()) + l7policy = self.create_l7policy(uuidutils.generate_uuid(), + listener.id, 999, + redirect_pool_id=pool.id) + new_l7policy = self.l7policy_repo.get(self.session, id=l7policy.id) + self.assertIsInstance(new_l7policy, data_models.L7Policy) + self.assertEqual(l7policy, new_l7policy) + self.assertEqual(1, new_l7policy.position) + + def test_get_all(self): + listener = self.create_listener(uuidutils.generate_uuid(), 80) + pool = self.create_pool(uuidutils.generate_uuid()) + l7policy_a = self.create_l7policy(uuidutils.generate_uuid(), + listener.id, + 1, redirect_pool_id=pool.id) + l7policy_c = self.create_l7policy(uuidutils.generate_uuid(), + listener.id, + 2, redirect_pool_id=pool.id) + l7policy_b = self.create_l7policy(uuidutils.generate_uuid(), + listener.id, + 2, redirect_pool_id=pool.id) + new_l7policy_a = self.l7policy_repo.get(self.session, + id=l7policy_a.id) + new_l7policy_b = self.l7policy_repo.get(self.session, + id=l7policy_b.id) + new_l7policy_c = self.l7policy_repo.get(self.session, + id=l7policy_c.id) + + self.assertEqual(1, new_l7policy_a.position) + self.assertEqual(2, new_l7policy_b.position) + self.assertEqual(3, new_l7policy_c.position) + l7policy_list, _ = self.l7policy_repo.get_all( + self.session, listener_id=listener.id) + self.assertIsInstance(l7policy_list, list) + self.assertEqual(3, len(l7policy_list)) + self.assertEqual(l7policy_a.id, l7policy_list[0].id) + self.assertEqual(l7policy_b.id, l7policy_list[1].id) + self.assertEqual(l7policy_c.id, l7policy_list[2].id) + + def test_create(self): + listener = self.create_listener(uuidutils.generate_uuid(), 80) + pool = self.create_pool(uuidutils.generate_uuid()) + l7policy = self.create_l7policy(self.FAKE_UUID_1, listener.id, 1, + redirect_pool_id=pool.id) + new_l7policy = self.l7policy_repo.get(self.session, id=l7policy.id) + self.assertEqual(self.FAKE_UUID_1, new_l7policy.id) + self.assertEqual(listener.id, new_l7policy.listener_id) + self.assertEqual(pool.id, new_l7policy.redirect_pool_id) + self.assertEqual(constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + new_l7policy.action) + self.assertEqual(1, new_l7policy.position) + self.assertIsNone(new_l7policy.redirect_url) + + def test_create_no_id(self): + listener = self.create_listener(uuidutils.generate_uuid(), 80) + l7policy = self.l7policy_repo.create( + self.session, listener_id=listener.id, + action=constants.L7POLICY_ACTION_REJECT, + operating_status=constants.ONLINE, + provisioning_status=constants.ACTIVE, + enabled=True) + new_l7policy = self.l7policy_repo.get(self.session, id=l7policy.id) + self.assertEqual(listener.id, new_l7policy.listener_id) + self.assertIsNone(new_l7policy.redirect_pool_id) + self.assertIsNone(new_l7policy.redirect_url) + self.assertEqual(constants.L7POLICY_ACTION_REJECT, + new_l7policy.action) + self.assertEqual(1, new_l7policy.position) + + def test_l7policy_create_no_listener_id(self): + self.assertRaises( + db_exception.DBError, self.l7policy_repo.create, + self.session, action=constants.L7POLICY_ACTION_REJECT, + operating_status=constants.ONLINE, + provisioning_status=constants.ACTIVE, + enabled=True) + + def test_update(self): + new_url = 'http://www.example.com/' + listener = self.create_listener(uuidutils.generate_uuid(), 80) + pool = self.create_pool(uuidutils.generate_uuid()) + l7policy = self.create_l7policy(uuidutils.generate_uuid(), + listener.id, 1, + redirect_pool_id=pool.id) + self.l7policy_repo.update( + self.session, id=l7policy.id, + action=constants.L7POLICY_ACTION_REDIRECT_TO_URL, + redirect_url=new_url, position=l7policy.position) + new_l7policy = self.l7policy_repo.get(self.session, id=l7policy.id) + new_pool = self.pool_repo.get(self.session, id=pool.id) + self.assertEqual(new_url, new_l7policy.redirect_url) + self.assertEqual(constants.L7POLICY_ACTION_REDIRECT_TO_URL, + new_l7policy.action) + self.assertIsNone(new_l7policy.redirect_pool_id) + self.assertNotIn(new_l7policy.id, new_pool.l7policies) + + def test_update_bad_id(self): + self.assertRaises(exceptions.NotFound, self.l7policy_repo.update, + self.session, id=uuidutils.generate_uuid()) + + def test_delete(self): + listener = self.create_listener(uuidutils.generate_uuid(), 80) + pool = self.create_pool(uuidutils.generate_uuid()) + l7policy = self.create_l7policy(uuidutils.generate_uuid(), + listener.id, 1, + redirect_pool_id=pool.id) + self.l7policy_repo.delete(self.session, id=l7policy.id) + self.assertIsNone(self.l7policy_repo.get(self.session, id=l7policy.id)) + new_listener = self.listener_repo.get(self.session, id=listener.id) + self.assertIsNotNone(new_listener) + self.assertEqual(0, len(new_listener.l7policies)) + + def test_delete_bad_id(self): + self.assertRaises(exceptions.NotFound, self.l7policy_repo.delete, + self.session, id=uuidutils.generate_uuid()) + + def test_reorder_policies(self): + listener = self.create_listener(uuidutils.generate_uuid(), 80) + pool = self.create_pool(uuidutils.generate_uuid()) + l7policy_a = self.create_l7policy(uuidutils.generate_uuid(), + listener.id, + 1, redirect_pool_id=pool.id) + l7policy_b = self.create_l7policy(uuidutils.generate_uuid(), + listener.id, + 2, redirect_pool_id=pool.id) + l7policy_c = self.create_l7policy(uuidutils.generate_uuid(), + listener.id, + 3, redirect_pool_id=pool.id) + new_l7policy_a = self.l7policy_repo.get(self.session, + id=l7policy_a.id) + new_l7policy_b = self.l7policy_repo.get(self.session, + id=l7policy_b.id) + new_l7policy_c = self.l7policy_repo.get(self.session, + id=l7policy_c.id) + self.assertEqual(1, new_l7policy_a.position) + self.assertEqual(2, new_l7policy_b.position) + self.assertEqual(3, new_l7policy_c.position) + self.l7policy_repo.update(self.session, id=l7policy_a.id, position=2) + new_l7policy_a = self.l7policy_repo.get(self.session, + id=l7policy_a.id) + new_l7policy_b = self.l7policy_repo.get(self.session, + id=l7policy_b.id) + new_l7policy_c = self.l7policy_repo.get(self.session, + id=l7policy_c.id) + self.assertEqual(2, new_l7policy_a.position) + self.assertEqual(1, new_l7policy_b.position) + self.assertEqual(3, new_l7policy_c.position) + self.l7policy_repo.update(self.session, id=l7policy_c.id, position=1) + new_l7policy_a = self.l7policy_repo.get(self.session, + id=l7policy_a.id) + new_l7policy_b = self.l7policy_repo.get(self.session, + id=l7policy_b.id) + new_l7policy_c = self.l7policy_repo.get(self.session, + id=l7policy_c.id) + self.assertEqual(3, new_l7policy_a.position) + self.assertEqual(2, new_l7policy_b.position) + self.assertEqual(1, new_l7policy_c.position) + self.l7policy_repo.update(self.session, id=l7policy_c.id, position=1) + new_l7policy_a = self.l7policy_repo.get(self.session, + id=l7policy_a.id) + new_l7policy_b = self.l7policy_repo.get(self.session, + id=l7policy_b.id) + new_l7policy_c = self.l7policy_repo.get(self.session, + id=l7policy_c.id) + self.assertEqual(3, new_l7policy_a.position) + self.assertEqual(2, new_l7policy_b.position) + self.assertEqual(1, new_l7policy_c.position) + + def test_delete_forcing_reorder(self): + listener = self.create_listener(uuidutils.generate_uuid(), 80) + pool = self.create_pool(uuidutils.generate_uuid()) + l7policy_a = self.create_l7policy(uuidutils.generate_uuid(), + listener.id, + 1, redirect_pool_id=pool.id) + l7policy_b = self.create_l7policy(uuidutils.generate_uuid(), + listener.id, + 2, redirect_pool_id=pool.id) + l7policy_c = self.create_l7policy(uuidutils.generate_uuid(), + listener.id, + 999, redirect_pool_id=pool.id) + new_l7policy_a = self.l7policy_repo.get(self.session, + id=l7policy_a.id) + new_l7policy_b = self.l7policy_repo.get(self.session, + id=l7policy_b.id) + new_l7policy_c = self.l7policy_repo.get(self.session, + id=l7policy_c.id) + self.assertEqual(1, new_l7policy_a.position) + self.assertEqual(2, new_l7policy_b.position) + self.assertEqual(3, new_l7policy_c.position) + self.l7policy_repo.delete(self.session, id=l7policy_b.id) + l7policy_list, _ = self.l7policy_repo.get_all( + self.session, listener_id=listener.id) + self.assertIsInstance(l7policy_list, list) + self.assertEqual(2, len(l7policy_list)) + new_l7policy_a = self.l7policy_repo.get(self.session, + id=l7policy_a.id) + new_l7policy_c = self.l7policy_repo.get(self.session, + id=l7policy_c.id) + self.assertEqual(1, new_l7policy_a.position) + self.assertEqual(2, new_l7policy_c.position) + + def test_delete_with_rule(self): + listener = self.create_listener(uuidutils.generate_uuid(), 80) + pool = self.create_pool(uuidutils.generate_uuid()) + l7policy = self.create_l7policy(uuidutils.generate_uuid(), + listener.id, 1, + redirect_pool_id=pool.id,) + l7rule = self.create_l7rule(uuidutils.generate_uuid(), l7policy.id) + new_l7policy = self.l7policy_repo.get(self.session, id=l7policy.id) + new_l7rule = self.l7rule_repo.get(self.session, id=l7rule.id) + self.assertEqual(l7policy.id, new_l7policy.id) + self.assertEqual(l7rule.id, new_l7rule.id) + self.l7policy_repo.delete(self.session, id=l7policy.id) + self.assertIsNone(self.l7policy_repo.get(self.session, id=l7policy.id)) + self.assertIsNone(self.l7rule_repo.get(self.session, id=l7rule.id)) + + def test_update_action_rdr_url_to_redirect_pool(self): + listener = self.create_listener(uuidutils.generate_uuid(), 80) + pool = self.create_pool(uuidutils.generate_uuid()) + l7policy = self.create_l7policy( + uuidutils.generate_uuid(), listener.id, 1, + action=constants.L7POLICY_ACTION_REDIRECT_TO_URL, + redirect_url="http://www.example.com/") + self.session.commit() + new_l7policy = self.l7policy_repo.get(self.session, id=l7policy.id) + self.assertIsNone(new_l7policy.redirect_pool_id) + self.l7policy_repo.update( + self.session, id=l7policy.id, + redirect_pool_id=pool.id) + self.session.commit() + new_l7policy = self.l7policy_repo.get(self.session, id=l7policy.id) + self.assertEqual(pool.id, new_l7policy.redirect_pool.id) + self.assertEqual(constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + new_l7policy.action) + self.assertIsNone(new_l7policy.redirect_url) + + def test_update_action_rdr_url_to_reject(self): + listener = self.create_listener(uuidutils.generate_uuid(), 80) + l7policy = self.create_l7policy( + uuidutils.generate_uuid(), listener.id, 1, + action=constants.L7POLICY_ACTION_REDIRECT_TO_URL, + redirect_url="http://www.example.com/") + self.session.commit() + new_l7policy = self.l7policy_repo.get(self.session, id=l7policy.id) + self.assertIsNone(new_l7policy.redirect_pool_id) + self.assertEqual(constants.L7POLICY_ACTION_REDIRECT_TO_URL, + new_l7policy.action) + self.l7policy_repo.update( + self.session, id=l7policy.id, + action=constants.L7POLICY_ACTION_REJECT) + new_l7policy = self.l7policy_repo.get(self.session, id=l7policy.id) + self.assertEqual(constants.L7POLICY_ACTION_REJECT, + new_l7policy.action) + self.assertIsNone(new_l7policy.redirect_url) + self.assertIsNone(new_l7policy.redirect_pool_id) + + def test_update_action_rdr_pool_to_reject(self): + listener = self.create_listener(uuidutils.generate_uuid(), 80) + pool = self.create_pool(uuidutils.generate_uuid()) + l7policy = self.create_l7policy( + uuidutils.generate_uuid(), listener.id, 1, + action=constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + redirect_pool_id=pool.id) + self.session.commit() + new_l7policy = self.l7policy_repo.get(self.session, id=l7policy.id) + self.assertIsNone(new_l7policy.redirect_url) + self.l7policy_repo.update( + self.session, id=l7policy.id, + action=constants.L7POLICY_ACTION_REJECT) + new_l7policy = self.l7policy_repo.get(self.session, id=l7policy.id) + self.assertEqual(constants.L7POLICY_ACTION_REJECT, + new_l7policy.action) + self.assertIsNone(new_l7policy.redirect_url) + self.assertIsNone(new_l7policy.redirect_pool_id) + + def test_update_reject_to_rdr_pool(self): + listener = self.create_listener(uuidutils.generate_uuid(), 80) + pool = self.create_pool(uuidutils.generate_uuid()) + l7policy = self.create_l7policy( + uuidutils.generate_uuid(), listener.id, 1, + action=constants.L7POLICY_ACTION_REJECT) + new_l7policy = self.l7policy_repo.get(self.session, id=l7policy.id) + self.assertIsNone(new_l7policy.redirect_url) + self.assertIsNone(new_l7policy.redirect_pool_id) + self.l7policy_repo.update( + self.session, id=l7policy.id, + redirect_pool_id=pool.id) + new_l7policy = self.l7policy_repo.get(self.session, id=l7policy.id) + self.assertEqual(pool.id, new_l7policy.redirect_pool_id) + self.assertEqual(constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + new_l7policy.action) + self.assertIsNone(new_l7policy.redirect_url) + + def test_update_reject_to_rdr_url(http://github.com/self): + listener = self.create_listener(uuidutils.generate_uuid(), 80) + l7policy = self.create_l7policy( + uuidutils.generate_uuid(), listener.id, 1, + action=constants.L7POLICY_ACTION_REJECT) + new_l7policy = self.l7policy_repo.get(self.session, id=l7policy.id) + self.assertIsNone(new_l7policy.redirect_url) + self.assertIsNone(new_l7policy.redirect_pool_id) + self.l7policy_repo.update( + self.session, id=l7policy.id, + redirect_url='http://www.example.com/') + new_l7policy = self.l7policy_repo.get(self.session, id=l7policy.id) + self.assertEqual('http://www.example.com/', new_l7policy.redirect_url) + self.assertEqual(constants.L7POLICY_ACTION_REDIRECT_TO_URL, + new_l7policy.action) + self.assertIsNone(new_l7policy.redirect_pool_id) + + def test_update_position_only(self): + listener = self.create_listener(uuidutils.generate_uuid(), 80) + l7policy_a = self.create_l7policy( + uuidutils.generate_uuid(), listener.id, 1, + action=constants.L7POLICY_ACTION_REJECT) + l7policy_b = self.create_l7policy( + uuidutils.generate_uuid(), listener.id, 2, + action=constants.L7POLICY_ACTION_REJECT) + new_l7policy_a = self.l7policy_repo.get(self.session, id=l7policy_a.id) + new_l7policy_b = self.l7policy_repo.get(self.session, id=l7policy_b.id) + self.assertEqual(1, new_l7policy_a.position) + self.assertEqual(2, new_l7policy_b.position) + self.l7policy_repo.update( + self.session, id=l7policy_a.id, + position=999) + new_l7policy_a = self.l7policy_repo.get(self.session, id=l7policy_a.id) + new_l7policy_b = self.l7policy_repo.get(self.session, id=l7policy_b.id) + self.assertEqual(2, new_l7policy_a.position) + self.assertEqual(1, new_l7policy_b.position) + self.l7policy_repo.update( + self.session, id=l7policy_a.id, + position=1) + new_l7policy_a = self.l7policy_repo.get(self.session, id=l7policy_a.id) + new_l7policy_b = self.l7policy_repo.get(self.session, id=l7policy_b.id) + self.assertEqual(1, new_l7policy_a.position) + self.assertEqual(2, new_l7policy_b.position) + + def test_create_with_invalid_redirect_pool_id(self): + bad_lb = self.lb_repo.create( + self.session, id=uuidutils.generate_uuid(), + project_id=uuidutils.generate_uuid(), + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, enabled=True) + bad_pool = self.pool_repo.create( + self.session, id=uuidutils.generate_uuid(), + project_id=bad_lb.project_id, + protocol=constants.PROTOCOL_HTTP, + lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, enabled=True) + self.assertRaises(exceptions.NotFound, self.create_l7policy, + uuidutils.generate_uuid(), self.listener.id, 1, + action=constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + redirect_pool_id=bad_pool.id) + + def test_create_with_invalid_redirect_url(http://github.com/self): + self.assertRaises(exceptions.InvalidURL, self.create_l7policy, + uuidutils.generate_uuid(), self.listener.id, 1, + action=constants.L7POLICY_ACTION_REDIRECT_TO_URL, + redirect_url="This is not a URL.") + + +class L7RuleRepositoryTest(BaseRepositoryTest): + + def setUp(self): + super().setUp() + self.listener = self.listener_repo.create( + self.session, id=uuidutils.generate_uuid(), + project_id=self.FAKE_UUID_2, + protocol=constants.PROTOCOL_HTTP, protocol_port=80, + connection_limit=1, operating_status=constants.ONLINE, + provisioning_status=constants.ACTIVE, enabled=True, peer_port=1025) + self.l7policy = self.l7policy_repo.create( + self.session, id=self.FAKE_UUID_1, name='l7policy_test', + description='l7policy_description', listener_id=self.listener.id, + position=1, action=constants.L7POLICY_ACTION_REJECT, + operating_status=constants.ONLINE, + provisioning_status=constants.ACTIVE, enabled=True) + + def create_l7rule(self, l7rule_id, l7policy_id, + type=constants.L7RULE_TYPE_PATH, + compare_type=constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + key=None, value="/api", invert=False, enabled=True): + l7rule = self.l7rule_repo.create( + self.session, id=l7rule_id, l7policy_id=l7policy_id, + type=type, compare_type=compare_type, key=key, value=value, + invert=invert, provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, enabled=enabled, + tags=['test_tag']) + return l7rule + + def test_get(self): + l7rule = self.create_l7rule(uuidutils.generate_uuid(), + self.l7policy.id) + new_l7rule = self.l7rule_repo.get(self.session, id=l7rule.id) + self.assertIsInstance(new_l7rule, data_models.L7Rule) + self.assertEqual(l7rule, new_l7rule) + + def test_get_all(self): + l7policy = self.l7policy_repo.create( + self.session, id=uuidutils.generate_uuid(), name='l7policy_test', + description='l7policy_description', listener_id=self.listener.id, + position=1, action=constants.L7POLICY_ACTION_REJECT, + operating_status=constants.ONLINE, + provisioning_status=constants.ACTIVE, enabled=True) + l7rule_a = self.create_l7rule(uuidutils.generate_uuid(), l7policy.id) + l7rule_b = self.create_l7rule(uuidutils.generate_uuid(), l7policy.id) + new_l7rule_a = self.l7rule_repo.get(self.session, + id=l7rule_a.id) + new_l7rule_b = self.l7rule_repo.get(self.session, + id=l7rule_b.id) + l7rule_list, _ = self.l7rule_repo.get_all( + self.session, l7policy_id=l7policy.id) + self.assertIsInstance(l7rule_list, list) + self.assertEqual(2, len(l7rule_list)) + self.assertIn(new_l7rule_a.id, [r.id for r in l7rule_list]) + self.assertIn(new_l7rule_b.id, [r.id for r in l7rule_list]) + + def test_create(self): + l7rule = self.create_l7rule(self.FAKE_UUID_1, + self.l7policy.id) + new_l7rule = self.l7rule_repo.get(self.session, id=l7rule.id) + self.assertEqual(self.FAKE_UUID_1, new_l7rule.id) + self.assertEqual(self.l7policy.id, new_l7rule.l7policy_id) + self.assertEqual(constants.L7RULE_TYPE_PATH, new_l7rule.type) + self.assertEqual(constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + new_l7rule.compare_type) + self.assertIsNone(new_l7rule.key) + self.assertEqual('/api', new_l7rule.value) + self.assertFalse(new_l7rule.invert) + + def test_create_without_id(self): + l7rule = self.l7rule_repo.create( + self.session, id=None, l7policy_id=self.l7policy.id, + type=constants.L7RULE_TYPE_PATH, + compare_type=constants.L7RULE_COMPARE_TYPE_CONTAINS, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, + value='something', + enabled=True) + new_l7rule = self.l7rule_repo.get(self.session, id=l7rule.id) + self.assertIsNotNone(l7rule.id) + self.assertEqual(self.l7policy.id, new_l7rule.l7policy_id) + self.assertEqual(constants.L7RULE_TYPE_PATH, new_l7rule.type) + self.assertEqual(constants.L7RULE_COMPARE_TYPE_CONTAINS, + new_l7rule.compare_type) + self.assertIsNone(new_l7rule.key) + self.assertEqual('something', new_l7rule.value) + self.assertFalse(new_l7rule.invert) + + def test_l7rule_create_wihout_l7policy_id(self): + self.assertRaises( + db_exception.DBError, self.l7rule_repo.create, + self.session, id=None, type=constants.L7RULE_TYPE_PATH, + compare_type=constants.L7RULE_COMPARE_TYPE_CONTAINS, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE, + value='something', + enabled=True) + + def test_update(self): + l7rule = self.create_l7rule(uuidutils.generate_uuid(), + self.l7policy.id, + type=constants.L7RULE_TYPE_HEADER, + key="My-Header") + new_l7rule = self.l7rule_repo.get(self.session, id=l7rule.id) + self.assertEqual('/api', new_l7rule.value) + self.assertFalse(new_l7rule.invert) + update_dict = {'type': constants.L7RULE_TYPE_PATH, + 'value': '/images', + 'invert': True} + self.l7rule_repo.update(self.session, id=l7rule.id, **update_dict) + new_l7rule = self.l7rule_repo.get(self.session, id=l7rule.id) + self.assertEqual(constants.L7RULE_TYPE_PATH, new_l7rule.type) + self.assertEqual('/images', new_l7rule.value) + self.assertIsNone(new_l7rule.key) + self.assertTrue(new_l7rule.invert) + + def test_update_bad_id(self): + self.assertRaises(exceptions.NotFound, + self.l7rule_repo.update, self.session, + id='bad id', value='/some/path') + + def test_bad_update(self): + l7rule = self.create_l7rule(uuidutils.generate_uuid(), + self.l7policy.id) + new_l7rule = self.l7rule_repo.get(self.session, id=l7rule.id) + self.assertEqual('/api', new_l7rule.value) + self.assertRaises(exceptions.InvalidString, + self.l7rule_repo.update, self.session, + id=l7rule.id, value='bad path') + + def test_delete(self): + l7rule = self.create_l7rule(uuidutils.generate_uuid(), + self.l7policy.id) + self.l7rule_repo.delete(self.session, id=l7rule.id) + self.assertIsNone(self.l7rule_repo.get(self.session, id=l7rule.id)) + + def test_create_bad_rule_type(self): + self.assertRaises(exceptions.InvalidL7Rule, self.create_l7rule, + self.FAKE_UUID_1, self.l7policy.id, + type="not valid") + + def test_create_header_rule(self): + l7rule = self.create_l7rule( + uuidutils.generate_uuid(), + self.l7policy.id, + type=constants.L7RULE_TYPE_HEADER, + compare_type=constants.L7RULE_COMPARE_TYPE_EQUAL_TO, + key="Some-header", + value='"some value"') + new_l7rule = self.l7rule_repo.get(self.session, id=l7rule.id) + self.assertEqual(constants.L7RULE_TYPE_HEADER, new_l7rule.type) + self.assertEqual(constants.L7RULE_COMPARE_TYPE_EQUAL_TO, + new_l7rule.compare_type) + self.assertEqual('Some-header', new_l7rule.key) + self.assertEqual('"some value"', new_l7rule.value) + + def test_create_header_rule_no_key(self): + self.assertRaises( + exceptions.InvalidL7Rule, self.create_l7rule, + self.FAKE_UUID_1, self.l7policy.id, + type=constants.L7RULE_TYPE_HEADER, + compare_type=constants.L7RULE_COMPARE_TYPE_EQUAL_TO, + value='"some value"') + + def test_create_header_rule_invalid_key(self): + self.assertRaises( + exceptions.InvalidString, self.create_l7rule, + self.FAKE_UUID_1, self.l7policy.id, + type=constants.L7RULE_TYPE_HEADER, + compare_type=constants.L7RULE_COMPARE_TYPE_EQUAL_TO, + key='bad key;', + value='"some value"') + + def test_create_header_rule_invalid_value_string(self): + self.assertRaises( + exceptions.InvalidString, self.create_l7rule, + self.FAKE_UUID_1, self.l7policy.id, + type=constants.L7RULE_TYPE_HEADER, + compare_type=constants.L7RULE_COMPARE_TYPE_EQUAL_TO, + key='Some-header', + value='\x18') + + def test_create_header_rule_invalid_value_regex(self): + self.assertRaises( + exceptions.InvalidRegex, self.create_l7rule, + self.FAKE_UUID_1, self.l7policy.id, + type=constants.L7RULE_TYPE_HEADER, + compare_type=constants.L7RULE_COMPARE_TYPE_REGEX, + key='Some-header', + value='bad regex\\') + + def test_create_header_rule_bad_compare_type(self): + self.assertRaises( + exceptions.InvalidL7Rule, self.create_l7rule, + self.FAKE_UUID_1, self.l7policy.id, + type=constants.L7RULE_TYPE_HEADER, + compare_type="bad compare", + key="Some-header", + value='"some value"') + + def test_create_cookie_rule(self): + l7rule = self.create_l7rule( + uuidutils.generate_uuid(), + self.l7policy.id, + type=constants.L7RULE_TYPE_COOKIE, + compare_type=constants.L7RULE_COMPARE_TYPE_EQUAL_TO, + key="some_cookie", + value='some-value') + new_l7rule = self.l7rule_repo.get(self.session, id=l7rule.id) + self.assertEqual(constants.L7RULE_TYPE_COOKIE, new_l7rule.type) + self.assertEqual(constants.L7RULE_COMPARE_TYPE_EQUAL_TO, + new_l7rule.compare_type) + self.assertEqual('some_cookie', new_l7rule.key) + self.assertEqual('some-value', new_l7rule.value) + + def test_create_cookie_rule_no_key(self): + self.assertRaises( + exceptions.InvalidL7Rule, self.create_l7rule, + self.FAKE_UUID_1, self.l7policy.id, + type=constants.L7RULE_TYPE_COOKIE, + compare_type=constants.L7RULE_COMPARE_TYPE_EQUAL_TO, + value='some-value') + + def test_create_cookie_rule_invalid_key(self): + self.assertRaises( + exceptions.InvalidString, self.create_l7rule, + self.FAKE_UUID_1, self.l7policy.id, + type=constants.L7RULE_TYPE_COOKIE, + compare_type=constants.L7RULE_COMPARE_TYPE_EQUAL_TO, + key='bad key;', + value='some-value') + + def test_create_cookie_rule_invalid_value_string(self): + self.assertRaises( + exceptions.InvalidString, self.create_l7rule, + self.FAKE_UUID_1, self.l7policy.id, + type=constants.L7RULE_TYPE_COOKIE, + compare_type=constants.L7RULE_COMPARE_TYPE_EQUAL_TO, + key='some_cookie', + value='bad value;') + + def test_create_cookie_rule_invalid_value_regex(self): + self.assertRaises( + exceptions.InvalidRegex, self.create_l7rule, + self.FAKE_UUID_1, self.l7policy.id, + type=constants.L7RULE_TYPE_COOKIE, + compare_type=constants.L7RULE_COMPARE_TYPE_REGEX, + key='some_cookie', + value='bad regex\\') + + def test_create_cookie_rule_bad_compare_type(self): + self.assertRaises( + exceptions.InvalidL7Rule, self.create_l7rule, + self.FAKE_UUID_1, self.l7policy.id, + type=constants.L7RULE_TYPE_COOKIE, + compare_type="bad compare", + key="some_cookie", + value='some-value') + + def test_create_path_rule(self): + l7rule = self.create_l7rule( + uuidutils.generate_uuid(), + self.l7policy.id, + type=constants.L7RULE_TYPE_PATH, + compare_type=constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + value='/some/path') + new_l7rule = self.l7rule_repo.get(self.session, id=l7rule.id) + self.assertEqual(constants.L7RULE_TYPE_PATH, new_l7rule.type) + self.assertEqual(constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + new_l7rule.compare_type) + self.assertEqual('/some/path', new_l7rule.value) + + def test_create_path_rule_invalid_value_string(self): + self.assertRaises( + exceptions.InvalidString, self.create_l7rule, + self.FAKE_UUID_1, self.l7policy.id, + type=constants.L7RULE_TYPE_PATH, + compare_type=constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + value='bad path') + + def test_create_path_rule_invalid_value_regex(self): + self.assertRaises( + exceptions.InvalidRegex, self.create_l7rule, + self.FAKE_UUID_1, self.l7policy.id, + type=constants.L7RULE_TYPE_PATH, + compare_type=constants.L7RULE_COMPARE_TYPE_REGEX, + value='bad regex\\') + + def test_create_path_rule_bad_compare_type(self): + self.assertRaises( + exceptions.InvalidL7Rule, self.create_l7rule, + self.FAKE_UUID_1, self.l7policy.id, + type=constants.L7RULE_TYPE_PATH, + compare_type="bad compare", + value='/some/path') + + def test_create_host_name_rule(self): + l7rule = self.create_l7rule( + uuidutils.generate_uuid(), + self.l7policy.id, + type=constants.L7RULE_TYPE_HOST_NAME, + compare_type=constants.L7RULE_COMPARE_TYPE_ENDS_WITH, + value='.example.com') + new_l7rule = self.l7rule_repo.get(self.session, id=l7rule.id) + self.assertEqual(constants.L7RULE_TYPE_HOST_NAME, new_l7rule.type) + self.assertEqual(constants.L7RULE_COMPARE_TYPE_ENDS_WITH, + new_l7rule.compare_type) + self.assertEqual('.example.com', new_l7rule.value) + + def test_create_host_name_rule_invalid_value_string(self): + self.assertRaises( + exceptions.InvalidString, self.create_l7rule, + self.FAKE_UUID_1, self.l7policy.id, + type=constants.L7RULE_TYPE_HOST_NAME, + compare_type=constants.L7RULE_COMPARE_TYPE_ENDS_WITH, + value='bad hostname') + + def test_create_host_name_rule_invalid_value_regex(self): + self.assertRaises( + exceptions.InvalidRegex, self.create_l7rule, + self.FAKE_UUID_1, self.l7policy.id, + type=constants.L7RULE_TYPE_HOST_NAME, + compare_type=constants.L7RULE_COMPARE_TYPE_REGEX, + value='bad regex\\') + + def test_create_host_name_rule_bad_compare_type(self): + self.assertRaises( + exceptions.InvalidL7Rule, self.create_l7rule, + self.FAKE_UUID_1, self.l7policy.id, + type=constants.L7RULE_TYPE_HOST_NAME, + compare_type="bad compare", + value='.example.com') + + def test_create_file_type_rule(self): + l7rule = self.create_l7rule( + uuidutils.generate_uuid(), + self.l7policy.id, + type=constants.L7RULE_TYPE_FILE_TYPE, + compare_type=constants.L7RULE_COMPARE_TYPE_REGEX, + value='png|jpg') + new_l7rule = self.l7rule_repo.get(self.session, id=l7rule.id) + self.assertEqual(constants.L7RULE_TYPE_FILE_TYPE, new_l7rule.type) + self.assertEqual(constants.L7RULE_COMPARE_TYPE_REGEX, + new_l7rule.compare_type) + self.assertEqual('png|jpg', new_l7rule.value) + + def test_create_file_type_rule_invalid_value_string(self): + self.assertRaises( + exceptions.InvalidString, self.create_l7rule, + self.FAKE_UUID_1, self.l7policy.id, + type=constants.L7RULE_TYPE_FILE_TYPE, + compare_type=constants.L7RULE_COMPARE_TYPE_EQUAL_TO, + value='bad file type') + + def test_create_file_type_rule_invalid_value_regex(self): + self.assertRaises( + exceptions.InvalidRegex, self.create_l7rule, + self.FAKE_UUID_1, self.l7policy.id, + type=constants.L7RULE_TYPE_FILE_TYPE, + compare_type=constants.L7RULE_COMPARE_TYPE_REGEX, + value='bad regex\\') + + def test_create_file_type_rule_bad_compare_type(self): + self.assertRaises( + exceptions.InvalidL7Rule, self.create_l7rule, + self.FAKE_UUID_1, self.l7policy.id, + type=constants.L7RULE_TYPE_FILE_TYPE, + compare_type=constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + value='png|jpg') + + +class TestQuotasRepository(BaseRepositoryTest): + + def setUp(self): + super().setUp() + + def update_quotas(self, project_id, load_balancer=20, listener=20, pool=20, + health_monitor=20, member=20, l7policy=20, l7rule=20): + quota = {'load_balancer': load_balancer, + 'listener': listener, + 'pool': pool, + 'health_monitor': health_monitor, + 'member': member, + 'l7policy': l7policy, + 'l7rule': l7rule} + quotas = self.quota_repo.update(self.session, project_id, quota=quota) + return quotas + + def _compare(self, expected, observed): + self.assertEqual(expected.project_id, observed.project_id) + self.assertEqual(expected.load_balancer, + observed.load_balancer) + self.assertEqual(expected.listener, + observed.listener) + self.assertEqual(expected.pool, + observed.pool) + self.assertEqual(expected.health_monitor, + observed.health_monitor) + self.assertEqual(expected.member, + observed.member) + self.assertEqual(expected.l7policy, + observed.l7policy) + self.assertEqual(expected.l7rule, + observed.l7rule) + + def test_get(self): + expected = self.update_quotas(self.FAKE_UUID_1) + observed = self.quota_repo.get(self.session, + project_id=self.FAKE_UUID_1) + self.assertIsInstance(observed, data_models.Quotas) + self._compare(expected, observed) + + def test_update(self): + first_expected = self.update_quotas(self.FAKE_UUID_1) + first_observed = self.quota_repo.get(self.session, + project_id=self.FAKE_UUID_1) + second_expected = self.update_quotas(self.FAKE_UUID_1, load_balancer=1) + second_observed = self.quota_repo.get(self.session, + project_id=self.FAKE_UUID_1) + self.assertIsInstance(first_expected, data_models.Quotas) + self._compare(first_expected, first_observed) + self.assertIsInstance(second_expected, data_models.Quotas) + self._compare(second_expected, second_observed) + self.assertIsNot(first_expected.load_balancer, + second_expected.load_balancer) + + def test_delete(self): + expected = self.update_quotas(self.FAKE_UUID_1) + observed = self.quota_repo.get(self.session, + project_id=self.FAKE_UUID_1) + self.assertIsInstance(observed, data_models.Quotas) + self._compare(expected, observed) + self.quota_repo.delete(self.session, self.FAKE_UUID_1) + observed = self.quota_repo.get(self.session, + project_id=self.FAKE_UUID_1) + self.assertIsNone(observed.health_monitor) + self.assertIsNone(observed.load_balancer) + self.assertIsNone(observed.listener) + self.assertIsNone(observed.member) + self.assertIsNone(observed.pool) + self.assertIsNone(observed.l7policy) + self.assertIsNone(observed.l7rule) + + def test_delete_non_existent(self): + self.assertRaises(exceptions.NotFound, + self.quota_repo.delete, + self.session, 'bogus') + + +class FlavorProfileRepositoryTest(BaseRepositoryTest): + + def create_flavor_profile(self, fp_id): + fp = self.flavor_profile_repo.create( + self.session, id=fp_id, name="fp1", provider_name='pr1', + flavor_data="{'image': 'unbuntu'}") + return fp + + def test_get(self): + fp = self.create_flavor_profile(fp_id=self.FAKE_UUID_1) + new_fp = self.flavor_profile_repo.get(self.session, id=fp.id) + self.assertIsInstance(new_fp, data_models.FlavorProfile) + self.assertEqual(fp, new_fp) + + def test_get_all(self): + fp1 = self.create_flavor_profile(fp_id=self.FAKE_UUID_1) + fp2 = self.create_flavor_profile(fp_id=self.FAKE_UUID_2) + fp_list, _ = self.flavor_profile_repo.get_all( + self.session, + query_options=defer(db_models.FlavorProfile.name)) + self.assertIsInstance(fp_list, list) + self.assertEqual(2, len(fp_list)) + self.assertEqual(fp1, fp_list[0]) + self.assertEqual(fp2, fp_list[1]) + + def test_create(self): + fp = self.create_flavor_profile(fp_id=self.FAKE_UUID_1) + self.assertIsInstance(fp, data_models.FlavorProfile) + self.assertEqual(self.FAKE_UUID_1, fp.id) + self.assertEqual("fp1", fp.name) + + def test_delete(self): + fp = self.create_flavor_profile(fp_id=self.FAKE_UUID_1) + self.flavor_profile_repo.delete(self.session, id=fp.id) + self.assertIsNone(self.flavor_profile_repo.get( + self.session, id=fp.id)) + + +class FlavorRepositoryTest(BaseRepositoryTest): + + PROVIDER_NAME = 'provider1' + + def create_flavor_profile(self): + fp = self.flavor_profile_repo.create( + self.session, id=uuidutils.generate_uuid(), + name="fp1", provider_name=self.PROVIDER_NAME, + flavor_data='{"image": "ubuntu"}') + self.session.commit() + return fp + + def create_flavor(self, flavor_id, name): + fp = self.create_flavor_profile() + flavor = self.flavor_repo.create( + self.session, id=flavor_id, name=name, + flavor_profile_id=fp.id, description='test', + enabled=True) + self.session.commit() + return flavor + + def test_get(self): + flavor = self.create_flavor(flavor_id=self.FAKE_UUID_2, name='flavor') + new_flavor = self.flavor_repo.get(self.session, id=flavor.id) + self.assertIsInstance(new_flavor, data_models.Flavor) + self.assertEqual(flavor.id, new_flavor.id) + self.assertEqual(flavor.name, new_flavor.name) + + def test_get_all(self): + fl1 = self.create_flavor(flavor_id=self.FAKE_UUID_2, name='flavor1') + fl2 = self.create_flavor(flavor_id=self.FAKE_UUID_3, name='flavor2') + fl_list, _ = self.flavor_repo.get_all( + self.session, + query_options=defer(db_models.Flavor.enabled)) + self.assertIsInstance(fl_list, list) + self.assertEqual(2, len(fl_list)) + self.assertEqual(fl1.id, fl_list[0].id) + self.assertEqual(fl1.name, fl_list[0].name) + self.assertEqual(fl2.id, fl_list[1].id) + self.assertEqual(fl2.name, fl_list[1].name) + + def test_create(self): + fl = self.create_flavor(flavor_id=self.FAKE_UUID_2, name='fl1') + self.assertIsInstance(fl, data_models.Flavor) + self.assertEqual(self.FAKE_UUID_2, fl.id) + self.assertEqual("fl1", fl.name) + + def test_delete(self): + fl = self.create_flavor(flavor_id=self.FAKE_UUID_2, name='fl1') + self.flavor_repo.delete(self.session, id=fl.id) + self.assertIsNone(self.flavor_repo.get( + self.session, id=fl.id)) + + def test_get_flavor_metadata_dict(self): + ref_dict = {'image': 'ubuntu'} + self.create_flavor(flavor_id=self.FAKE_UUID_2, name='fl1') + flavor_metadata_dict = self.flavor_repo.get_flavor_metadata_dict( + self.session, self.FAKE_UUID_2) + self.assertEqual(ref_dict, flavor_metadata_dict) + + # Test missing flavor + self.assertRaises(sa_exception.NoResultFound, + self.flavor_repo.get_flavor_metadata_dict, + self.session, self.FAKE_UUID_1) + + def test_get_flavor_provider(self): + self.create_flavor(flavor_id=self.FAKE_UUID_2, name='fl1') + provider_name = self.flavor_repo.get_flavor_provider(self.session, + self.FAKE_UUID_2) + self.assertEqual(self.PROVIDER_NAME, provider_name) + + # Test missing flavor + self.assertRaises(sa_exception.NoResultFound, + self.flavor_repo.get_flavor_provider, + self.session, self.FAKE_UUID_1) diff --git a/octavia/tests/unit/__init__.py b/octavia/tests/unit/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/amphorae/__init__.py b/octavia/tests/unit/amphorae/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/amphorae/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/amphorae/backends/__init__.py b/octavia/tests/unit/amphorae/backends/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/amphorae/backends/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/amphorae/backends/agent/__init__.py b/octavia/tests/unit/amphorae/backends/agent/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/amphorae/backends/agent/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/amphorae/backends/agent/api_server/__init__.py b/octavia/tests/unit/amphorae/backends/agent/api_server/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/amphorae/backends/agent/api_server/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/amphorae/backends/agent/api_server/test_amphora_info.py b/octavia/tests/unit/amphorae/backends/agent/api_server/test_amphora_info.py new file mode 100644 index 0000000000..3c494276b8 --- /dev/null +++ b/octavia/tests/unit/amphorae/backends/agent/api_server/test_amphora_info.py @@ -0,0 +1,450 @@ +# Copyright 2017 Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import os +import random +from unittest import mock + +from oslo_utils import uuidutils + +from octavia.amphorae.backends.agent import api_server +from octavia.amphorae.backends.agent.api_server import amphora_info +from octavia.amphorae.backends.agent.api_server import util +from octavia.common.jinja.haproxy.combined_listeners import jinja_cfg +from octavia.tests.common import utils as test_utils +import octavia.tests.unit.base as base +from octavia.tests.unit.common.sample_configs import sample_configs_combined + + +class TestAmphoraInfo(base.TestCase): + + API_VERSION = random.randrange(0, 10000) + BASE_AMP_PATH = '/var/lib/octavia' + BASE_CRT_PATH = BASE_AMP_PATH + '/certs' + HAPROXY_VERSION = random.randrange(0, 10000) + KEEPALIVED_VERSION = random.randrange(0, 10000) + IPVSADM_VERSION = random.randrange(0, 10000) + FAKE_LISTENER_ID_1 = uuidutils.generate_uuid() + FAKE_LISTENER_ID_2 = uuidutils.generate_uuid() + FAKE_LISTENER_ID_3 = uuidutils.generate_uuid() + FAKE_LISTENER_ID_4 = uuidutils.generate_uuid() + LB_ID_1 = uuidutils.generate_uuid() + LB_ID_2 = uuidutils.generate_uuid() + + def setUp(self): + super().setUp() + self.osutils_mock = mock.MagicMock() + self.amp_info = amphora_info.AmphoraInfo(self.osutils_mock) + self.lvs_driver = mock.MagicMock() + + # setup a fake haproxy config file + templater = jinja_cfg.JinjaTemplater( + base_amp_path=self.BASE_AMP_PATH, + base_crt_dir=self.BASE_CRT_PATH) + tls_tupel = {'cont_id_1': + sample_configs_combined.sample_tls_container_tuple( + id='tls_container_id', + certificate='imaCert1', private_key='imaPrivateKey1', + primary_cn='FakeCN')} + self.rendered_haproxy_cfg = templater.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + proto='TERMINATED_HTTPS', tls=True, sni=True)], + tls_tupel) + path = util.config_path(self.LB_ID_1) + self.useFixture(test_utils.OpenFixture(path, + self.rendered_haproxy_cfg)) + + def _return_version(self, package_name): + if package_name == 'ipvsadm': + return self.IPVSADM_VERSION + elif package_name == 'keepalived': + return self.KEEPALIVED_VERSION + else: + return self.HAPROXY_VERSION + + @mock.patch.object(amphora_info, "webob") + @mock.patch('octavia.amphorae.backends.agent.api_server.' + 'amphora_info.AmphoraInfo._get_version_of_installed_package', + return_value=HAPROXY_VERSION) + @mock.patch('socket.gethostname', return_value='FAKE_HOST') + def test_compile_amphora_info(self, mock_gethostname, mock_pkg_version, + mock_webob): + original_version = api_server.VERSION + api_server.VERSION = self.API_VERSION + expected_dict = {'api_version': self.API_VERSION, + 'hostname': 'FAKE_HOST', + 'haproxy_version': self.HAPROXY_VERSION} + self.amp_info.compile_amphora_info() + mock_webob.Response.assert_called_once_with(json=expected_dict) + api_server.VERSION = original_version + + @mock.patch.object(amphora_info, "webob") + @mock.patch('octavia.amphorae.backends.agent.api_server.' + 'amphora_info.AmphoraInfo._get_version_of_installed_package') + @mock.patch('socket.gethostname', return_value='FAKE_HOST') + def test_compile_amphora_info_for_udp(self, mock_gethostname, + mock_pkg_version, mock_webob): + + mock_pkg_version.side_effect = self._return_version + self.lvs_driver.get_subscribed_amp_compile_info.side_effect = [ + ['keepalived', 'ipvsadm']] + original_version = api_server.VERSION + api_server.VERSION = self.API_VERSION + expected_dict = {'api_version': self.API_VERSION, + 'hostname': 'FAKE_HOST', + 'haproxy_version': self.HAPROXY_VERSION, + 'keepalived_version': self.KEEPALIVED_VERSION, + 'ipvsadm_version': self.IPVSADM_VERSION + } + self.amp_info.compile_amphora_info(extend_lvs_driver=self.lvs_driver) + mock_webob.Response.assert_called_once_with(json=expected_dict) + api_server.VERSION = original_version + + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'get_listeners', return_value=[FAKE_LISTENER_ID_1, + FAKE_LISTENER_ID_2]) + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'get_loadbalancers', return_value=[LB_ID_1, LB_ID_2]) + @mock.patch('octavia.amphorae.backends.agent.api_server.' + 'amphora_info.AmphoraInfo._get_meminfo') + @mock.patch('octavia.amphorae.backends.agent.api_server.' + 'amphora_info.AmphoraInfo._cpu') + @mock.patch('os.statvfs') + @mock.patch('octavia.amphorae.backends.agent.api_server.' + 'amphora_info.AmphoraInfo._get_networks') + @mock.patch('octavia.amphorae.backends.agent.api_server.' + 'amphora_info.AmphoraInfo._load') + @mock.patch('octavia.amphorae.backends.agent.api_server.' + 'amphora_info.AmphoraInfo._get_version_of_installed_package') + @mock.patch('octavia.amphorae.backends.agent.api_server.' + 'amphora_info.AmphoraInfo._count_haproxy_processes') + @mock.patch('socket.gethostname', return_value='FAKE_HOST') + def test_compile_amphora_details(self, mhostname, m_count, m_pkg_version, + m_load, m_get_nets, m_os, m_cpu, + mget_mem, mget_loadbalancers, + mget_listeners): + mget_mem.return_value = {'SwapCached': 0, 'Buffers': 344792, + 'MemTotal': 21692784, 'Cached': 4271856, + 'Slab': 534384, 'MemFree': 12685624, + 'Shmem': 9520} + m_cpu.return_value = {'user': '252551', 'softirq': '8336', + 'system': '52554', 'total': 7503411} + m_pkg_version.side_effect = self._return_version + mdisk_info = mock.MagicMock() + m_os.return_value = mdisk_info + mdisk_info.f_blocks = 34676992 + mdisk_info.f_bfree = 28398016 + mdisk_info.f_frsize = 4096 + mdisk_info.f_bavail = 26630646 + m_get_nets.return_value = {'eth1': {'network_rx': 996, + 'network_tx': 418}, + 'eth2': {'network_rx': 848, + 'network_tx': 578}} + m_load.return_value = ['0.09', '0.11', '0.10'] + m_count.return_value = 5 + original_version = api_server.VERSION + api_server.VERSION = self.API_VERSION + expected_dict = {'active': True, + 'active_tuned_profiles': '', + 'api_version': self.API_VERSION, + 'cpu': {'soft_irq': '8336', + 'system': '52554', + 'total': 7503411, + 'user': '252551'}, + 'cpu_count': os.cpu_count(), + 'disk': {'available': 109079126016, + 'used': 25718685696}, + 'haproxy_count': 5, + 'haproxy_version': self.HAPROXY_VERSION, + 'hostname': 'FAKE_HOST', + 'listeners': sorted([self.FAKE_LISTENER_ID_1, + self.FAKE_LISTENER_ID_2]), + 'load': ['0.09', '0.11', '0.10'], + 'memory': {'buffers': 344792, + 'cached': 4271856, + 'free': 12685624, + 'shared': 9520, + 'slab': 534384, + 'swap_used': 0, + 'total': 21692784}, + 'networks': {'eth1': {'network_rx': 996, + 'network_tx': 418}, + 'eth2': {'network_rx': 848, + 'network_tx': 578}}, + 'packages': {}, + 'topology': 'SINGLE', + 'topology_status': 'OK'} + self.useFixture(test_utils.OpenFixture('/etc/tuned/active_profile')) + actual = self.amp_info.compile_amphora_details() + self.assertEqual(expected_dict, actual.json) + m_count.assert_called_once_with(sorted(mget_loadbalancers())) + api_server.VERSION = original_version + + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'is_lvs_listener_running') + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'get_lvs_listeners', + return_value=[FAKE_LISTENER_ID_3, FAKE_LISTENER_ID_4]) + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'get_loadbalancers') + @mock.patch('octavia.amphorae.backends.agent.api_server.' + 'amphora_info.AmphoraInfo._get_meminfo') + @mock.patch('octavia.amphorae.backends.agent.api_server.' + 'amphora_info.AmphoraInfo._cpu') + @mock.patch('os.statvfs') + @mock.patch('octavia.amphorae.backends.agent.api_server.' + 'amphora_info.AmphoraInfo._get_networks') + @mock.patch('octavia.amphorae.backends.agent.api_server.' + 'amphora_info.AmphoraInfo._load') + @mock.patch('octavia.amphorae.backends.agent.api_server.' + 'amphora_info.AmphoraInfo._get_version_of_installed_package') + @mock.patch('octavia.amphorae.backends.agent.api_server.' + 'amphora_info.AmphoraInfo._count_haproxy_processes') + @mock.patch('socket.gethostname', return_value='FAKE_HOST') + def test_compile_amphora_details_for_ipvs(self, mhostname, m_count, + m_pkg_version, m_load, + m_get_nets, + m_os, m_cpu, mget_mem, + mock_get_lb, mget_lvs_listener, + mock_is_lvs_listener_running): + mget_mem.return_value = {'SwapCached': 0, 'Buffers': 344792, + 'MemTotal': 21692784, 'Cached': 4271856, + 'Slab': 534384, 'MemFree': 12685624, + 'Shmem': 9520} + m_cpu.return_value = {'user': '252551', 'softirq': '8336', + 'system': '52554', 'total': 7503411} + m_pkg_version.side_effect = self._return_version + mdisk_info = mock.MagicMock() + m_os.return_value = mdisk_info + mdisk_info.f_blocks = 34676992 + mdisk_info.f_bfree = 28398016 + mdisk_info.f_frsize = 4096 + mdisk_info.f_bavail = 26630646 + m_get_nets.return_value = {'eth1': {'network_rx': 996, + 'network_tx': 418}, + 'eth2': {'network_rx': 848, + 'network_tx': 578}} + m_load.return_value = ['0.09', '0.11', '0.10'] + m_count.return_value = 5 + self.lvs_driver.get_subscribed_amp_compile_info.return_value = [ + 'keepalived', 'ipvsadm'] + mock_is_lvs_listener_running.side_effect = [True, False] + mock_get_lb.return_value = [self.LB_ID_1] + original_version = api_server.VERSION + api_server.VERSION = self.API_VERSION + expected_dict = {'active': True, + 'active_tuned_profiles': '', + 'api_version': self.API_VERSION, + 'cpu': {'soft_irq': '8336', + 'system': '52554', + 'total': 7503411, + 'user': '252551'}, + 'cpu_count': os.cpu_count(), + 'disk': {'available': 109079126016, + 'used': 25718685696}, + 'haproxy_count': 5, + 'haproxy_version': self.HAPROXY_VERSION, + 'keepalived_version': self.KEEPALIVED_VERSION, + 'ipvsadm_version': self.IPVSADM_VERSION, + 'lvs_listener_process_count': 1, + 'hostname': 'FAKE_HOST', + 'listeners': sorted(list({self.FAKE_LISTENER_ID_3, + self.FAKE_LISTENER_ID_4, + 'sample_listener_id_1'})), + 'load': ['0.09', '0.11', '0.10'], + 'memory': {'buffers': 344792, + 'cached': 4271856, + 'free': 12685624, + 'shared': 9520, + 'slab': 534384, + 'swap_used': 0, + 'total': 21692784}, + 'networks': {'eth1': {'network_rx': 996, + 'network_tx': 418}, + 'eth2': {'network_rx': 848, + 'network_tx': 578}}, + 'packages': {}, + 'topology': 'SINGLE', + 'topology_status': 'OK'} + self.useFixture(test_utils.OpenFixture('/etc/tuned/active_profile')) + actual = self.amp_info.compile_amphora_details(self.lvs_driver) + self.assertEqual(expected_dict, actual.json) + api_server.VERSION = original_version + + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'is_lb_running') + def test__count_haproxy_process(self, mock_is_running): + + # Test no listeners passed in + result = self.amp_info._count_haproxy_processes([]) + self.assertEqual(0, result) + + # Test with a listener specified + mock_is_running.side_effect = [True, False] + result = self.amp_info._count_haproxy_processes( + [uuidutils.generate_uuid(), uuidutils.generate_uuid()]) + self.assertEqual(1, result) + + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'is_lvs_listener_running') + def test__count_lvs_listener_processes(self, mock_is_lvs_listener_running): + mock_is_lvs_listener_running.side_effect = [True, False, True] + expected = 2 + actual = self.amp_info._count_lvs_listener_processes( + self.lvs_driver, [self.FAKE_LISTENER_ID_1, + self.FAKE_LISTENER_ID_2, + self.FAKE_LISTENER_ID_3]) + self.assertEqual(expected, actual) + + @mock.patch('octavia.amphorae.backends.agent.api_server.' + 'amphora_info.AmphoraInfo._get_version_of_installed_package') + def test__get_extend_body_from_lvs_driver(self, m_get_version): + self.lvs_driver.get_subscribed_amp_compile_info.return_value = [ + 'keepalived', 'ipvsadm'] + m_get_version.side_effect = self._return_version + expected = { + "keepalived_version": self.KEEPALIVED_VERSION, + "ipvsadm_version": self.IPVSADM_VERSION + } + actual = self.amp_info._get_extend_body_from_lvs_driver( + self.lvs_driver) + self.assertEqual(expected, actual) + + def test__get_meminfo(self): + # Known data test + meminfo = ('MemTotal: 21692784 kB\n' + 'MemFree: 12685624 kB\n' + 'MemAvailable: 17384072 kB\n' + 'Buffers: 344792 kB\n' + 'Cached: 4271856 kB\n' + 'SwapCached: 0 kB\n' + 'Active: 5808816 kB\n' + 'Inactive: 2445236 kB\n' + 'Active(anon): 3646184 kB\n' + 'Inactive(anon): 8672 kB\n' + 'Active(file): 2162632 kB\n' + 'Inactive(file): 2436564 kB\n' + 'Unevictable: 52664 kB\n' + 'Mlocked: 52664 kB\n' + 'SwapTotal: 20476924 kB\n' + 'SwapFree: 20476924 kB\n' + 'Dirty: 92 kB\n' + 'Writeback: 0 kB\n' + 'AnonPages: 3690088 kB\n' + 'Mapped: 108520 kB\n' + 'Shmem: 9520 kB\n' + 'Slab: 534384 kB\n' + 'SReclaimable: 458160 kB\n' + 'SUnreclaim: 76224 kB\n' + 'KernelStack: 11776 kB\n' + 'PageTables: 33088 kB\n' + 'NFS_Unstable: 0 kB\n' + 'Bounce: 0 kB\n' + 'WritebackTmp: 0 kB\n' + 'CommitLimit: 31323316 kB\n' + 'Committed_AS: 6930732 kB\n' + 'VmallocTotal: 34359738367 kB\n' + 'VmallocUsed: 0 kB\n' + 'VmallocChunk: 0 kB\n' + 'HardwareCorrupted: 0 kB\n' + 'AnonHugePages: 1400832 kB\n' + 'CmaTotal: 0 kB\n' + 'CmaFree: 0 kB\n' + 'HugePages_Total: 0\n' + 'HugePages_Free: 0\n' + 'HugePages_Rsvd: 0\n' + 'HugePages_Surp: 0\n' + 'Hugepagesize: 2048 kB\n' + 'DirectMap4k: 130880 kB\n' + 'DirectMap2M: 8376320 kB\n' + 'DirectMap1G: 14680064 kB\n') + + self.useFixture(test_utils.OpenFixture('/proc/meminfo', + contents=meminfo)) + + expected_result = {'SwapCached': 0, 'DirectMap2M': 8376320, + 'CmaTotal': 0, 'Inactive': 2445236, + 'KernelStack': 11776, 'SwapTotal': 20476924, + 'VmallocUsed': 0, 'Buffers': 344792, + 'MemTotal': 21692784, 'Mlocked': 52664, + 'Cached': 4271856, 'AnonPages': 3690088, + 'Unevictable': 52664, 'SUnreclaim': 76224, + 'MemFree': 12685624, 'Writeback': 0, + 'NFS_Unstable': 0, 'VmallocTotal': 34359738367, + 'MemAvailable': 17384072, 'CmaFree': 0, + 'SwapFree': 20476924, 'AnonHugePages': 1400832, + 'DirectMap1G': 14680064, 'Hugepagesize': 2048, + 'Dirty': 92, 'Bounce': 0, 'PageTables': 33088, + 'SReclaimable': 458160, 'Active': 5808816, + 'Mapped': 108520, 'Slab': 534384, + 'Active(anon)': 3646184, 'VmallocChunk': 0, + 'Inactive(file)': 2436564, 'WritebackTmp': 0, + 'Shmem': 9520, 'Inactive(anon)': 8672, + 'HardwareCorrupted': 0, 'Active(file)': 2162632, + 'DirectMap4k': 130880, 'Committed_AS': 6930732, + 'CommitLimit': 31323316} + + result = self.amp_info._get_meminfo() + self.assertEqual(expected_result, result) + + def test__cpu(self): + + sample_stat = 'cpu 252551 802 52554 7181757 7411 0 8336 0 0 0' + + expected_result = {'user': '252551', 'iowait': '7411', 'nice': '802', + 'softirq': '8336', 'idle': '7181757', + 'system': '52554', 'total': 7503411, 'irq': '0'} + + self.useFixture(test_utils.OpenFixture('/proc/stat', + contents=sample_stat)) + + result = self.amp_info._cpu() + + self.assertEqual(expected_result, result) + + def test__load(self): + + sample_loadavg = '0.09 0.11 0.10 2/630 15346' + + expected_result = ['0.09', '0.11', '0.10'] + + self.useFixture(test_utils.OpenFixture('/proc/loadavg', + contents=sample_loadavg)) + + result = self.amp_info._load() + + self.assertEqual(expected_result, result) + + @mock.patch('pyroute2.NetNS', create=True) + def test__get_networks(self, mock_netns): + + # The output of get_links is huge, just pulling out the parts we + # care about for this test. + sample_get_links_minimal = [ + {'attrs': [('IFLA_IFNAME', 'lo')]}, + {'attrs': [('IFLA_IFNAME', 'eth1'), + ('IFLA_STATS64', {'tx_bytes': 418, 'rx_bytes': 996})]}, + {'attrs': [('IFLA_IFNAME', 'eth2'), + ('IFLA_STATS64', {'tx_bytes': 578, 'rx_bytes': 848})]}, + {'attrs': [('IFLA_IFNAME', 'eth3')]}] + + netns_handle = mock_netns.return_value.__enter__.return_value + netns_handle.get_links.return_value = sample_get_links_minimal + + expected_result = {'eth1': {'network_rx': 996, 'network_tx': 418}, + 'eth2': {'network_rx': 848, 'network_tx': 578}} + + result = self.amp_info._get_networks() + + self.assertEqual(expected_result, result) diff --git a/octavia/tests/unit/amphorae/backends/agent/api_server/test_haproxy_compatibility.py b/octavia/tests/unit/amphorae/backends/agent/api_server/test_haproxy_compatibility.py new file mode 100644 index 0000000000..d92355ee1f --- /dev/null +++ b/octavia/tests/unit/amphorae/backends/agent/api_server/test_haproxy_compatibility.py @@ -0,0 +1,129 @@ +# Copyright 2017 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from octavia.amphorae.backends.agent.api_server import haproxy_compatibility +from octavia.common import constants +import octavia.tests.unit.base as base +from octavia.tests.unit.common.sample_configs import sample_configs_combined + + +class HAProxyCompatTestCase(base.TestCase): + def setUp(self): + super().setUp() + self.old_haproxy_global = ( + "# Configuration for loadbalancer sample_loadbalancer_id_1\n" + "global\n" + " daemon\n" + " user nobody\n" + " log /run/rsyslog/octavia/log local0\n" + " log /run/rsyslog/octavia/log local1 notice\n" + " stats socket /var/lib/octavia/sample_loadbalancer_id_1.sock" + " mode 0666 level user\n" + " maxconn {maxconn}\n\n" + "defaults\n" + " log global\n" + " retries 3\n" + " option redispatch\n" + " option splice-request\n" + " option splice-response\n" + " option http-keep-alive\n\n\n\n\n" + "frontend sample_listener_id_1\n" + " maxconn {maxconn}\n" + " bind 10.0.0.2:80\n" + " mode http\n" + " default_backend sample_pool_id_1:sample_listener_id_1\n" + " timeout client 50000\n" + " log-format 12345\\ sample_loadbalancer_id_1\\ %f\\ " + "%ci\\ %cp\\ %t\\ %{{+Q}}r\\ %ST\\ %B\\ %U\\ " + "%[ssl_c_verify]\\ %{{+Q}}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ " + "%tsc\n\n").format( + maxconn=constants.HAPROXY_DEFAULT_MAXCONN) + self.backend_without_external = ( + "backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31\n" + " option httpchk GET /index.html\n" + " http-check expect rstatus 418\n" + " fullconn {maxconn}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 weight 13 " + "check inter 30s fall 3 rise 2 cookie sample_member_id_1\n" + " server sample_member_id_2 10.0.0.98:82 weight 13 " + "check inter 30s fall 3 rise 2 cookie " + "sample_member_id_2\n").format( + maxconn=constants.HAPROXY_DEFAULT_MAXCONN) + self.backend_with_external = ( + "backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31\n" + " option httpchk GET /index.html\n" + " http-check expect rstatus 418\n" + " option external-check\n" + " external-check command /var/lib/octavia/ping-wrapper.sh\n" + " fullconn {maxconn}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 weight 13 " + "check inter 30s fall 3 rise 2 cookie sample_member_id_1\n" + " server sample_member_id_2 10.0.0.98:82 weight 13 " + "check inter 30s fall 3 rise 2 cookie " + "sample_member_id_2\n").format( + maxconn=constants.HAPROXY_DEFAULT_MAXCONN) + + @mock.patch('subprocess.check_output') + def test_get_haproxy_versions(self, mock_process): + mock_process.return_value = ( + b"THIS-App version 1.6.3 2099/10/12\n" + b"Some other data here \n") + major, minor = haproxy_compatibility.get_haproxy_versions() + self.assertEqual(1, major) + self.assertEqual(6, minor) + + @mock.patch('subprocess.check_output') + def test_get_haproxy_versions_devel(self, mock_process): + mock_process.return_value = ( + b"HA-Proxy version 2.3-dev0 2019/11/25 - https://haproxy.org/\n" + b"Some other data here \n") + major, minor = haproxy_compatibility.get_haproxy_versions() + self.assertEqual(2, major) + self.assertEqual(3, minor) + + @mock.patch('octavia.amphorae.backends.agent.api_server.' + 'haproxy_compatibility.get_haproxy_versions') + def test_process_cfg_for_version_compat(self, mock_get_version): + # Test 1.6 version path, no change to config expected + mock_get_version.return_value = [1, 6] + test_config = sample_configs_combined.sample_base_expected_config( + backend=self.backend_with_external) + result_config = haproxy_compatibility.process_cfg_for_version_compat( + test_config) + self.assertEqual(test_config, result_config) + + # Test 1.5 version path, external-check should be removed + mock_get_version.return_value = [1, 5] + test_config = sample_configs_combined.sample_base_expected_config( + backend=self.backend_with_external) + result_config = haproxy_compatibility.process_cfg_for_version_compat( + test_config) + expected_config = (self.old_haproxy_global + + self.backend_without_external) + self.assertEqual(expected_config, result_config) diff --git a/octavia/tests/unit/amphorae/backends/agent/api_server/test_keepalived.py b/octavia/tests/unit/amphorae/backends/agent/api_server/test_keepalived.py new file mode 100644 index 0000000000..76f3923b06 --- /dev/null +++ b/octavia/tests/unit/amphorae/backends/agent/api_server/test_keepalived.py @@ -0,0 +1,48 @@ +# Copyright 2015 Hewlett Packard Enterprise Development Company LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import subprocess +from unittest import mock + +import flask + +from octavia.amphorae.backends.agent.api_server import keepalived +import octavia.tests.unit.base as base + + +class KeepalivedTestCase(base.TestCase): + def setUp(self): + super().setUp() + self.app = flask.Flask(__name__) + self.client = self.app.test_client() + self._ctx = self.app.test_request_context() + self._ctx.push() + self.test_keepalived = keepalived.Keepalived() + + @mock.patch('subprocess.check_output') + def test_manager_keepalived_service(self, mock_check_output): + res = self.test_keepalived.manager_keepalived_service('start') + cmd = 'systemctl start octavia-keepalived.service' + mock_check_output.assert_called_once_with(cmd.split(), + stderr=subprocess.STDOUT, + encoding='utf-8') + self.assertEqual(202, res.status_code) + + res = self.test_keepalived.manager_keepalived_service('restart') + self.assertEqual(400, res.status_code) + + mock_check_output.side_effect = subprocess.CalledProcessError(1, + 'blah!') + + res = self.test_keepalived.manager_keepalived_service('start') + self.assertEqual(500, res.status_code) diff --git a/octavia/tests/unit/amphorae/backends/agent/api_server/test_keepalivedlvs.py b/octavia/tests/unit/amphorae/backends/agent/api_server/test_keepalivedlvs.py new file mode 100644 index 0000000000..16573eca96 --- /dev/null +++ b/octavia/tests/unit/amphorae/backends/agent/api_server/test_keepalivedlvs.py @@ -0,0 +1,41 @@ +# Copyright 2015 Hewlett Packard Enterprise Development Company LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from oslo_utils import uuidutils + +from octavia.amphorae.backends.agent.api_server import keepalivedlvs +from octavia.tests.unit import base + + +class KeepalivedLvsTestCase(base.TestCase): + FAKE_ID = uuidutils.generate_uuid() + + def setUp(self): + super().setUp() + self.test_keepalivedlvs = keepalivedlvs.KeepalivedLvs() + + @mock.patch.object(keepalivedlvs, "webob") + @mock.patch('os.path.exists') + def test_delete_lvs_listener_not_exist(self, m_exist, m_webob): + m_exist.return_value = False + self.test_keepalivedlvs.delete_lvs_listener(self.FAKE_ID) + calls = [ + mock.call( + json=dict(message='UDP Listener Not Found', + details="No UDP listener with UUID: " + "{}".format(self.FAKE_ID)), status=404), + mock.call(json={'message': 'OK'}) + ] + m_webob.Response.assert_has_calls(calls) diff --git a/octavia/tests/unit/amphorae/backends/agent/api_server/test_loadbalancer.py b/octavia/tests/unit/amphorae/backends/agent/api_server/test_loadbalancer.py new file mode 100644 index 0000000000..e949372bf0 --- /dev/null +++ b/octavia/tests/unit/amphorae/backends/agent/api_server/test_loadbalancer.py @@ -0,0 +1,317 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import subprocess +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils + +from octavia.amphorae.backends.agent.api_server import loadbalancer +from octavia.amphorae.backends.agent.api_server import util as agent_util +from octavia.common import constants as consts +from octavia.tests.common import utils as test_utils +import octavia.tests.unit.base as base + +CONF = cfg.CONF +LISTENER_ID1 = uuidutils.generate_uuid() +LB_ID1 = uuidutils.generate_uuid() + + +class ListenerTestCase(base.TestCase): + def setUp(self): + super().setUp() + self.mock_platform = mock.patch("distro.id").start() + self.mock_platform.return_value = "ubuntu" + self.test_loadbalancer = loadbalancer.Loadbalancer() + + @mock.patch('os.path.exists') + @mock.patch('octavia.amphorae.backends.agent.api_server' + + '.util.get_haproxy_pid') + def test_check_haproxy_status(self, mock_pid, mock_exists): + mock_pid.return_value = '1245' + mock_exists.side_effect = [True, True] + self.assertEqual( + consts.ACTIVE, + self.test_loadbalancer._check_haproxy_status(LISTENER_ID1)) + + mock_exists.side_effect = [True, False] + self.assertEqual( + consts.OFFLINE, + self.test_loadbalancer._check_haproxy_status(LISTENER_ID1)) + + mock_exists.side_effect = [False] + self.assertEqual( + consts.OFFLINE, + self.test_loadbalancer._check_haproxy_status(LISTENER_ID1)) + + @mock.patch('time.sleep') + @mock.patch('octavia.amphorae.backends.agent.api_server.loadbalancer.LOG') + @mock.patch('octavia.amphorae.backends.agent.api_server.loadbalancer.' + 'Loadbalancer._check_haproxy_status') + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'vrrp_check_script_update') + @mock.patch('os.path.exists') + @mock.patch('octavia.amphorae.backends.agent.api_server.loadbalancer.' + 'Loadbalancer._check_lb_exists') + @mock.patch('subprocess.check_output') + @mock.patch('octavia.amphorae.backends.utils.haproxy_query.HAProxyQuery') + def test_start_stop_lb(self, mock_haproxy_query, mock_check_output, + mock_lb_exists, mock_path_exists, mock_vrrp_update, + mock_check_status, mock_LOG, mock_time_sleep): + listener_id = uuidutils.generate_uuid() + + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + + mock_path_exists.side_effect = [False, True, True, False, False] + mock_check_status.side_effect = ['bogus', consts.OFFLINE] + + # Happy path - No VRRP + cmd = "systemctl {action} haproxy-{listener_id}.service".format( + action=consts.AMP_ACTION_START, listener_id=listener_id) + + result = self.test_loadbalancer.start_stop_lb( + listener_id, consts.AMP_ACTION_START) + + mock_check_output.assert_called_once_with(cmd.split(), + stderr=subprocess.STDOUT, + encoding='utf-8') + mock_lb_exists.assert_called_once_with(listener_id) + mock_vrrp_update.assert_not_called() + self.assertEqual(202, result.status_code) + self.assertEqual('OK', result.json['message']) + ref_details = ('Configuration file is valid\n' + 'haproxy daemon for {} started'.format(listener_id)) + self.assertEqual(ref_details, result.json['details']) + + # Happy path - VRRP - RELOAD + conf.config(group="controller_worker", + loadbalancer_topology=consts.TOPOLOGY_ACTIVE_STANDBY) + + mock_lb_exists.reset_mock() + mock_vrrp_update.reset_mock() + mock_check_output.reset_mock() + + cmd = "systemctl {action} haproxy-{listener_id}.service".format( + action=consts.AMP_ACTION_RELOAD, listener_id=listener_id) + + result = self.test_loadbalancer.start_stop_lb( + listener_id, consts.AMP_ACTION_RELOAD) + + mock_check_output.assert_called_once_with(cmd.split(), + stderr=subprocess.STDOUT, + encoding='utf-8') + mock_lb_exists.assert_called_once_with(listener_id) + mock_vrrp_update.assert_called_once_with(listener_id, + consts.AMP_ACTION_RELOAD) + self.assertEqual(202, result.status_code) + self.assertEqual('OK', result.json['message']) + ref_details = f'Listener {listener_id} {consts.AMP_ACTION_RELOAD}ed' + self.assertEqual(ref_details, result.json['details']) + + # Happy path - VRRP - RELOAD - OFFLINE + mock_lb_exists.reset_mock() + mock_vrrp_update.reset_mock() + mock_check_output.reset_mock() + + cmd = "systemctl {action} haproxy-{listener_id}.service".format( + action=consts.AMP_ACTION_START, listener_id=listener_id) + + result = self.test_loadbalancer.start_stop_lb( + listener_id, consts.AMP_ACTION_RELOAD) + + mock_check_output.assert_called_once_with(cmd.split(), + stderr=subprocess.STDOUT, + encoding='utf-8') + mock_lb_exists.assert_called_once_with(listener_id) + mock_vrrp_update.assert_called_once_with(listener_id, + consts.AMP_ACTION_RELOAD) + self.assertEqual(202, result.status_code) + self.assertEqual('OK', result.json['message']) + ref_details = ('Configuration file is valid\n' + 'haproxy daemon for {} started'.format(listener_id)) + self.assertEqual(ref_details, result.json['details']) + + # Unhappy path - Not already running + conf.config(group="controller_worker", + loadbalancer_topology=consts.TOPOLOGY_SINGLE) + + mock_lb_exists.reset_mock() + mock_vrrp_update.reset_mock() + mock_check_output.reset_mock() + + cmd = "systemctl {action} haproxy-{listener_id}.service".format( + action=consts.AMP_ACTION_START, listener_id=listener_id) + + mock_check_output.side_effect = subprocess.CalledProcessError( + output='bogus', returncode=-2, cmd='sit') + + result = self.test_loadbalancer.start_stop_lb( + listener_id, consts.AMP_ACTION_START) + + mock_check_output.assert_called_once_with(cmd.split(), + stderr=subprocess.STDOUT, + encoding='utf-8') + mock_lb_exists.assert_called_once_with(listener_id) + mock_vrrp_update.assert_not_called() + self.assertEqual(500, result.status_code) + self.assertEqual(f'Error {consts.AMP_ACTION_START}ing haproxy', + result.json['message']) + self.assertEqual('bogus', result.json['details']) + + # Unhappy path - Already running + mock_lb_exists.reset_mock() + mock_vrrp_update.reset_mock() + mock_check_output.reset_mock() + + cmd = "systemctl {action} haproxy-{listener_id}.service".format( + action=consts.AMP_ACTION_START, listener_id=listener_id) + + mock_check_output.side_effect = subprocess.CalledProcessError( + output='Job is already running', returncode=-2, cmd='sit') + + result = self.test_loadbalancer.start_stop_lb( + listener_id, consts.AMP_ACTION_START) + + mock_check_output.assert_called_once_with(cmd.split(), + stderr=subprocess.STDOUT, + encoding='utf-8') + mock_lb_exists.assert_called_once_with(listener_id) + mock_vrrp_update.assert_not_called() + self.assertEqual(202, result.status_code) + self.assertEqual('OK', result.json['message']) + ref_details = ('Configuration file is valid\n' + 'haproxy daemon for {} started'.format(listener_id)) + self.assertEqual(ref_details, result.json['details']) + + # Invalid action + mock_check_output.reset_mock() + mock_lb_exists.reset_mock() + mock_path_exists.reset_mock() + mock_vrrp_update.reset_mock() + result = self.test_loadbalancer.start_stop_lb(listener_id, 'bogus') + self.assertEqual(400, result.status_code) + self.assertEqual('Invalid Request', result.json['message']) + self.assertEqual('Unknown action: bogus', result.json['details']) + mock_lb_exists.assert_not_called() + mock_path_exists.assert_not_called() + mock_vrrp_update.assert_not_called() + mock_check_output.assert_not_called() + + # haproxy error on reload + mock_check_output.reset_mock() + mock_lb_exists.reset_mock() + mock_path_exists.reset_mock() + mock_vrrp_update.reset_mock() + mock_check_status.reset_mock() + mock_LOG.reset_mock() + + mock_check_output.side_effect = [ + subprocess.CalledProcessError( + output='haproxy.service is not active, cannot reload.', + returncode=-2, cmd='service'), + None] + mock_check_status.return_value = 'ACTIVE' + mock_check_status.side_effect = None + + mock_query = mock.Mock() + mock_haproxy_query.return_value = mock_query + mock_query.show_info.side_effect = [Exception("error"), + {'Uptime_sec': 5}] + + result = self.test_loadbalancer.start_stop_lb(listener_id, 'reload') + self.assertEqual(202, result.status_code) + + LOG_last_call = mock_LOG.mock_calls[-1] + self.assertIn('An error occured with haproxy', LOG_last_call[1][0]) + + # haproxy error on reload - retry limit + print("--") + mock_check_output.reset_mock() + mock_lb_exists.reset_mock() + mock_path_exists.reset_mock() + mock_vrrp_update.reset_mock() + mock_check_status.reset_mock() + mock_LOG.reset_mock() + + mock_check_output.side_effect = [ + subprocess.CalledProcessError( + output='haproxy.service is not active, cannot reload.', + returncode=-2, cmd='service'), + subprocess.CalledProcessError( + output='haproxy.service is not active, cannot reload.', + returncode=-2, cmd='service'), + subprocess.CalledProcessError( + output='haproxy.service is not active, cannot reload.', + returncode=-2, cmd='service')] + mock_check_status.return_value = 'ACTIVE' + mock_check_status.side_effect = None + + mock_query = mock.Mock() + mock_haproxy_query.return_value = mock_query + mock_query.show_info.side_effect = Exception("error") + + result = self.test_loadbalancer.start_stop_lb(listener_id, 'reload') + self.assertEqual(500, result.status_code) + self.assertEqual('Error reloading haproxy', result.json['message']) + self.assertEqual('haproxy.service is not active, cannot reload.', + result.json['details']) + + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'config_path') + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'get_haproxy_pid') + @mock.patch('os.path.exists') + def test_get_listeners_on_lb(self, mock_exists, mock_get_haproxy_pid, + mock_config_path): + + fake_cfg_path = '/some/fake/cfg/file.cfg' + mock_config_path.return_value = fake_cfg_path + mock_get_haproxy_pid.return_value = 'fake_pid' + + # Finds two listeners + mock_exists.side_effect = [True, True] + fake_cfg_data = 'frontend list1\nbackend foo\nfrontend list2' + self.useFixture( + test_utils.OpenFixture(fake_cfg_path, fake_cfg_data)).mock_open + result = self.test_loadbalancer._get_listeners_on_lb(LB_ID1) + self.assertEqual(['list1', 'list2'], result) + mock_exists.assert_has_calls([mock.call(agent_util.pid_path(LB_ID1)), + mock.call('/proc/fake_pid')]) + + # No PID file, no listeners + mock_exists.reset_mock() + mock_exists.side_effect = [False] + result = self.test_loadbalancer._get_listeners_on_lb(LB_ID1) + self.assertEqual([], result) + mock_exists.assert_called_once_with(agent_util.pid_path(LB_ID1)) + + # PID file, no running process, no listeners + mock_exists.reset_mock() + mock_exists.side_effect = [True, False] + result = self.test_loadbalancer._get_listeners_on_lb(LB_ID1) + self.assertEqual([], result) + mock_exists.assert_has_calls([mock.call(agent_util.pid_path(LB_ID1)), + mock.call('/proc/fake_pid')]) + + # PID file, running process, no listeners + mock_exists.reset_mock() + mock_exists.side_effect = [True, True] + fake_cfg_data = 'backend only' + self.useFixture( + test_utils.OpenFixture(fake_cfg_path, fake_cfg_data)).mock_open + result = self.test_loadbalancer._get_listeners_on_lb(LB_ID1) + self.assertEqual([], result) + mock_exists.assert_has_calls([mock.call(agent_util.pid_path(LB_ID1)), + mock.call('/proc/fake_pid')]) diff --git a/octavia/tests/unit/amphorae/backends/agent/api_server/test_osutils.py b/octavia/tests/unit/amphorae/backends/agent/api_server/test_osutils.py new file mode 100644 index 0000000000..44f5b76c3b --- /dev/null +++ b/octavia/tests/unit/amphorae/backends/agent/api_server/test_osutils.py @@ -0,0 +1,231 @@ +# Copyright 2017 Redhat. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import ipaddress +from unittest import mock + +from octavia.amphorae.backends.agent.api_server import osutils +from octavia.common import exceptions as octavia_exceptions +from octavia.tests.unit import base + + +class TestOSUtils(base.TestCase): + + def setUp(self): + super().setUp() + + self.base_os_util = osutils.BaseOS('unknown') + + with mock.patch('distro.id', + return_value='ubuntu'): + self.ubuntu_os_util = osutils.BaseOS.get_os_util() + + with mock.patch('distro.id', + return_value='rhel'): + self.rh_os_util = osutils.BaseOS.get_os_util() + + with mock.patch('distro.id', return_value='centos'): + with mock.patch('distro.version', return_value='8'): + self.centos_os_util = osutils.BaseOS.get_os_util() + + with mock.patch('distro.id', return_value='centos'): + with mock.patch('distro.version', return_value='7'): + self.centos7_os_util = osutils.BaseOS.get_os_util() + + def test_get_os_util(self): + with mock.patch('distro.id', + return_value='ubuntu'): + returned_cls = osutils.BaseOS.get_os_util() + self.assertIsInstance(returned_cls, osutils.Ubuntu) + with mock.patch('distro.id', + return_value='fedora'): + returned_cls = osutils.BaseOS.get_os_util() + self.assertIsInstance(returned_cls, osutils.RH) + with mock.patch('distro.id', + return_value='rhel'): + returned_cls = osutils.BaseOS.get_os_util() + self.assertIsInstance(returned_cls, osutils.RH) + with mock.patch('distro.id', + return_value='centos'): + returned_cls = osutils.BaseOS.get_os_util() + self.assertIsInstance(returned_cls, osutils.CentOS) + with mock.patch('distro.id', + return_value='FakeOS'): + self.assertRaises( + octavia_exceptions.InvalidAmphoraOperatingSystem, + osutils.BaseOS.get_os_util) + + def test_cmd_get_version_of_installed_package(self): + package_name = 'foo' + ubuntu_cmd = f"dpkg-query -W -f=${{Version}} {package_name}" + rh_cmd = f"rpm -q --queryformat %{{VERSION}} {package_name}" + + returned_ubuntu_cmd = ( + self.ubuntu_os_util.cmd_get_version_of_installed_package( + package_name)) + self.assertEqual(ubuntu_cmd, returned_ubuntu_cmd) + + returned_rh_cmd = (self.rh_os_util. + cmd_get_version_of_installed_package(package_name)) + self.assertEqual(rh_cmd, returned_rh_cmd) + + def test_cmd_get_version_of_installed_package_mapped(self): + package_name = 'haproxy' + centos7_cmd = "rpm -q --queryformat %{VERSION} haproxy18" + + returned_centos7_cmd = ( + self.centos7_os_util.cmd_get_version_of_installed_package( + package_name)) + self.assertEqual(centos7_cmd, returned_centos7_cmd) + + centos_cmd = "rpm -q --queryformat %{VERSION} haproxy" + returned_centos_cmd = ( + self.centos_os_util.cmd_get_version_of_installed_package( + package_name)) + self.assertEqual(centos_cmd, returned_centos_cmd) + + @mock.patch('octavia.amphorae.backends.utils.interface_file.' + 'InterfaceFile') + def test_write_interface_file(self, mock_interface_file): + mock_interface = mock.MagicMock() + mock_interface_file.return_value = mock_interface + + self.ubuntu_os_util.write_interface_file('eth1', + '192.0.2.2', 16) + + mock_interface_file.assert_called_once_with( + name='eth1', if_type="lo", + addresses=[{"address": "192.0.2.2", "prefixlen": 16}]) + mock_interface.write.assert_called_once() + + @mock.patch('octavia.amphorae.backends.utils.interface_file.' + 'VIPInterfaceFile') + def test_write_vip_interface_file(self, mock_vip_interface_file): + netns_interface = 'eth1234' + FIXED_IP = '192.0.2.2' + SUBNET_CIDR = '192.0.2.0/24' + GATEWAY = '192.51.100.1' + DEST1 = '198.51.100.0/24' + DEST2 = '203.0.113.0/24' + NEXTHOP = '192.0.2.1' + MTU = 1450 + FIXED_IP_IPV6 = '2001:0db8:0000:0000:0000:0000:0000:000a' + # Subnet prefix is purposefully not 32, because that coincidentally + # matches the result of any arbitrary IPv4->prefixlen conversion + SUBNET_CIDR_IPV6 = '2001:db8::/70' + GATEWAY_IPV6 = '2001:0db8:0000:0000:0000:0000:0000:0001' + + ip = ipaddress.ip_address(FIXED_IP) + network = ipaddress.ip_network(SUBNET_CIDR) + + ipv6 = ipaddress.ip_address(FIXED_IP_IPV6) + networkv6 = ipaddress.ip_network(SUBNET_CIDR_IPV6) + + host_routes = [ + {'nexthop': NEXTHOP, 'destination': DEST1}, + {'nexthop': NEXTHOP, 'destination': DEST2} + ] + + self.ubuntu_os_util.write_vip_interface_file( + interface=netns_interface, + vips={ + 'address': FIXED_IP, + 'ip_version': ip.version, + 'prefixlen': network.prefixlen, + 'gateway': GATEWAY, + 'host_routes': host_routes + }, + mtu=MTU, + vrrp_info=None + ) + + mock_vip_interface_file.assert_called_once_with( + name=netns_interface, + vips={ + 'address': FIXED_IP, + 'ip_version': ip.version, + 'prefixlen': network.prefixlen, + 'gateway': GATEWAY, + 'host_routes': host_routes + }, + mtu=MTU, + vrrp_info=None, + fixed_ips=None, + topology="SINGLE", + is_sriov=False) + mock_vip_interface_file.return_value.write.assert_called_once() + + # Now test with an IPv6 VIP + mock_vip_interface_file.reset_mock() + + self.ubuntu_os_util.write_vip_interface_file( + interface=netns_interface, + vips={ + 'address': FIXED_IP_IPV6, + 'ip_version': ipv6.version, + 'prefixlen': networkv6.prefixlen, + 'gateway': GATEWAY_IPV6, + 'host_routes': host_routes + }, + mtu=MTU, + vrrp_info=None) + + mock_vip_interface_file.assert_called_once_with( + name=netns_interface, + vips={ + 'address': FIXED_IP_IPV6, + 'ip_version': ipv6.version, + 'prefixlen': networkv6.prefixlen, + 'gateway': GATEWAY_IPV6, + 'host_routes': host_routes + }, + mtu=MTU, + vrrp_info=None, + fixed_ips=None, + topology="SINGLE", + is_sriov=False) + + @mock.patch('octavia.amphorae.backends.utils.interface_file.' + 'PortInterfaceFile') + def test_write_port_interface_file(self, mock_port_interface_file): + FIXED_IP = '192.0.2.2' + NEXTHOP = '192.0.2.1' + DEST = '198.51.100.0/24' + host_routes = [ + {'nexthop': NEXTHOP, 'destination': ipaddress.ip_network(DEST)} + ] + FIXED_IP_IPV6 = '2001:db8::2' + NEXTHOP_IPV6 = '2001:db8::1' + DEST_IPV6 = '2001:db8:51:100::/64' + host_routes_ipv6 = [ + {'nexthop': NEXTHOP_IPV6, + 'destination': ipaddress.ip_network(DEST_IPV6)} + ] + ip_addr = {'ip_address': FIXED_IP, 'host_routes': host_routes} + ipv6_addr = {'ip_address': FIXED_IP_IPV6, + 'host_routes': host_routes_ipv6} + + netns_interface = 'eth1234' + MTU = 1450 + fixed_ips = [ip_addr, ipv6_addr] + + self.base_os_util.write_port_interface_file( + interface=netns_interface, + fixed_ips=fixed_ips, + mtu=MTU) + + mock_port_interface_file.assert_called_once_with( + name=netns_interface, + fixed_ips=fixed_ips, + mtu=MTU, is_sriov=False) + mock_port_interface_file.return_value.write.assert_called_once() diff --git a/octavia/tests/unit/amphorae/backends/agent/api_server/test_plug.py b/octavia/tests/unit/amphorae/backends/agent/api_server/test_plug.py new file mode 100644 index 0000000000..fb9fc094af --- /dev/null +++ b/octavia/tests/unit/amphorae/backends/agent/api_server/test_plug.py @@ -0,0 +1,448 @@ +# Copyright 2015 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import os +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from werkzeug import exceptions as wz_exceptions + +from octavia.amphorae.backends.agent.api_server import osutils +from octavia.amphorae.backends.agent.api_server import plug +from octavia.common import constants +import octavia.tests.unit.base as base + +FAKE_CIDR_IPV4 = '10.0.0.0/24' +FAKE_GATEWAY_IPV4 = '10.0.0.1' +FAKE_IP_IPV4 = '10.0.0.2' +FAKE_CIDR_IPV6 = '2001:db8::/32' +FAKE_GATEWAY_IPV6 = '2001:db8::1' +FAKE_IP_IPV6 = '2001:db8::2' +FAKE_IP_IPV6_EXPANDED = '2001:0db8:0000:0000:0000:0000:0000:0002' +FAKE_MAC_ADDRESS = 'ab:cd:ef:00:ff:22' +FAKE_INTERFACE = 'eth33' + + +class TestPlug(base.TestCase): + def setUp(self): + super().setUp() + self.mock_platform = mock.patch("distro.id").start() + self.mock_platform.return_value = "ubuntu" + self.osutil = osutils.BaseOS.get_os_util() + self.test_plug = plug.Plug(self.osutil) + self.addCleanup(self.mock_platform.stop) + + @mock.patch('pyroute2.IPRoute', create=True) + def test__interface_by_mac_case_insensitive_ubuntu(self, mock_ipr): + mock_ipr_instance = mock.MagicMock() + mock_ipr_instance.link_lookup.return_value = [33] + mock_ipr_instance.get_links.return_value = ({ + 'attrs': [('IFLA_IFNAME', FAKE_INTERFACE)]},) + mock_ipr().__enter__.return_value = mock_ipr_instance + + interface = self.test_plug._interface_by_mac(FAKE_MAC_ADDRESS.upper()) + self.assertEqual(FAKE_INTERFACE, interface) + mock_ipr_instance.get_links.assert_called_once_with(33) + + @mock.patch('pyroute2.IPRoute', create=True) + def test__interface_by_mac_not_found(self, mock_ipr): + mock_ipr_instance = mock.MagicMock() + mock_ipr_instance.link_lookup.return_value = [] + mock_ipr().__enter__.return_value = mock_ipr_instance + + fd_mock = mock.mock_open() + open_mock = mock.Mock() + isfile_mock = mock.Mock() + with mock.patch('os.open', open_mock), mock.patch.object( + os, 'fdopen', fd_mock), mock.patch.object( + os.path, 'isfile', isfile_mock): + self.assertRaises(wz_exceptions.HTTPException, + self.test_plug._interface_by_mac, + FAKE_MAC_ADDRESS.upper()) + open_mock.assert_called_once_with('/sys/bus/pci/rescan', os.O_WRONLY) + fd_mock().write.assert_called_once_with('1') + + @mock.patch('pyroute2.IPRoute', create=True) + def test__interface_by_mac_case_insensitive_rh(self, mock_ipr): + mock_ipr_instance = mock.MagicMock() + mock_ipr_instance.link_lookup.return_value = [33] + mock_ipr_instance.get_links.return_value = ({ + 'attrs': [('IFLA_IFNAME', FAKE_INTERFACE)]},) + mock_ipr().__enter__.return_value = mock_ipr_instance + + with mock.patch('distro.id', return_value='centos'): + osutil = osutils.BaseOS.get_os_util() + self.test_plug = plug.Plug(osutil) + interface = self.test_plug._interface_by_mac( + FAKE_MAC_ADDRESS.upper()) + self.assertEqual(FAKE_INTERFACE, interface) + mock_ipr_instance.get_links.assert_called_once_with(33) + + @mock.patch('octavia.amphorae.backends.agent.api_server.plug.Plug.' + '_interface_by_mac', return_value=FAKE_INTERFACE) + @mock.patch('pyroute2.NSPopen', create=True) + @mock.patch.object(plug, "webob") + @mock.patch('pyroute2.IPRoute', create=True) + @mock.patch('pyroute2.NetNS', create=True) + @mock.patch('subprocess.check_output') + @mock.patch('shutil.copytree') + @mock.patch('os.makedirs') + def test_plug_vip_ipv4(self, mock_makedirs, mock_copytree, + mock_check_output, mock_netns, mock_pyroute2, + mock_webob, mock_nspopen, mock_by_mac): + m = mock.mock_open() + with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m): + self.test_plug.plug_vip( + vip=FAKE_IP_IPV4, + subnet_cidr=FAKE_CIDR_IPV4, + gateway=FAKE_GATEWAY_IPV4, + mac_address=FAKE_MAC_ADDRESS + ) + mock_webob.Response.assert_any_call(json={ + 'message': 'OK', + 'details': f'VIPs plugged on interface eth1: {FAKE_IP_IPV4}' + }, status=202) + + @mock.patch('octavia.amphorae.backends.agent.api_server.plug.Plug.' + '_interface_by_mac', return_value=FAKE_INTERFACE) + @mock.patch('pyroute2.NSPopen', create=True) + @mock.patch.object(plug, "webob") + @mock.patch('pyroute2.IPRoute', create=True) + @mock.patch('pyroute2.NetNS', create=True) + @mock.patch('subprocess.check_output') + @mock.patch('shutil.copytree') + @mock.patch('os.makedirs') + def test_plug_vip_ipv6(self, mock_makedirs, mock_copytree, + mock_check_output, mock_netns, mock_pyroute2, + mock_webob, mock_nspopen, mock_by_mac): + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(group='controller_worker', + loadbalancer_topology=constants.TOPOLOGY_ACTIVE_STANDBY) + m = mock.mock_open() + with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m): + self.test_plug.plug_vip( + vip=FAKE_IP_IPV6, + subnet_cidr=FAKE_CIDR_IPV6, + gateway=FAKE_GATEWAY_IPV6, + mac_address=FAKE_MAC_ADDRESS + ) + mock_webob.Response.assert_any_call(json={ + 'message': 'OK', + 'details': f'VIPs plugged on interface eth1: ' + f'{FAKE_IP_IPV6_EXPANDED}' + }, status=202) + + @mock.patch('octavia.amphorae.backends.agent.api_server.plug.Plug.' + '_interface_by_mac', return_value=FAKE_INTERFACE) + @mock.patch('pyroute2.NSPopen', create=True) + @mock.patch.object(plug, "webob") + @mock.patch('pyroute2.IPRoute', create=True) + @mock.patch('pyroute2.NetNS', create=True) + @mock.patch('subprocess.check_output') + @mock.patch('shutil.copytree') + @mock.patch('os.makedirs') + def test_plug_vip_ipv4_and_ipv6( + self, mock_makedirs, mock_copytree, + mock_check_output, mock_netns, + mock_pyroute2, mock_webob, mock_nspopen, mock_by_mac): + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(group='controller_worker', + loadbalancer_topology=constants.TOPOLOGY_ACTIVE_STANDBY) + additional_vips = [ + {'ip_address': FAKE_IP_IPV4, 'subnet_cidr': FAKE_CIDR_IPV4, + 'host_routes': [], 'gateway': FAKE_GATEWAY_IPV4} + ] + m = mock.mock_open() + with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m): + self.test_plug.plug_vip( + vip=FAKE_IP_IPV6, + subnet_cidr=FAKE_CIDR_IPV6, + gateway=FAKE_GATEWAY_IPV6, + mac_address=FAKE_MAC_ADDRESS, + additional_vips=additional_vips + ) + mock_webob.Response.assert_any_call(json={ + 'message': 'OK', + 'details': 'VIPs plugged on interface {interface}: {vips}'.format( + vips=", ".join([FAKE_IP_IPV6_EXPANDED, FAKE_IP_IPV4]), + interface='eth1') + }, status=202) + + @mock.patch.object(plug, "webob") + @mock.patch('pyroute2.IPRoute', create=True) + @mock.patch('pyroute2.NetNS', create=True) + @mock.patch('subprocess.check_output') + @mock.patch('shutil.copytree') + @mock.patch('os.makedirs') + def test_plug_vip_bad_ip(self, mock_makedirs, mock_copytree, + mock_check_output, mock_netns, mock_pyroute2, + mock_webob): + m = mock.mock_open() + BAD_IP_ADDRESS = "error" + with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m): + self.test_plug.plug_vip( + vip=BAD_IP_ADDRESS, + subnet_cidr=FAKE_CIDR_IPV4, + gateway=FAKE_GATEWAY_IPV4, + mac_address=FAKE_MAC_ADDRESS + ) + mock_webob.Response.assert_any_call( + json={'message': ("Invalid VIP: '{ip}' does not appear to be an " + "IPv4 or IPv6 address").format( + ip=BAD_IP_ADDRESS)}, + status=400) + + @mock.patch.object(plug, "webob") + @mock.patch('pyroute2.IPRoute', create=True) + @mock.patch('pyroute2.NetNS', create=True) + @mock.patch('subprocess.check_output') + @mock.patch('shutil.copytree') + @mock.patch('os.makedirs') + def test_plug_vip_bad_vrrp_ip(self, mock_makedirs, mock_copytree, + mock_check_output, mock_netns, mock_pyroute2, + mock_webob): + m = mock.mock_open() + BAD_IP_ADDRESS = "error" + with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m): + self.test_plug.plug_vip( + vip=FAKE_IP_IPV4, + subnet_cidr=FAKE_CIDR_IPV4, + gateway=FAKE_GATEWAY_IPV4, + mac_address=FAKE_MAC_ADDRESS, + vrrp_ip=BAD_IP_ADDRESS + ) + mock_webob.Response.assert_any_call( + json={'message': ("Invalid VRRP Address: '{ip}' does not appear " + "to be an IPv4 or IPv6 address").format( + ip=BAD_IP_ADDRESS)}, + status=400) + + @mock.patch("octavia.amphorae.backends.agent.api_server.osutils." + "BaseOS.write_interface_file") + def test_plug_lo(self, mock_write_interface): + m = mock.mock_open() + with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m): + self.test_plug.plug_lo() + mock_write_interface.assert_called_once_with(interface='lo', + ip_address='127.0.0.1', + prefixlen=8) + + @mock.patch('pyroute2.NetNS', create=True) + def test__netns_interface_exists(self, mock_netns): + + netns_handle = mock_netns.return_value.__enter__.return_value + + netns_handle.get_links.return_value = [{ + 'attrs': [['IFLA_ADDRESS', '123'], + ['IFLA_IFNAME', 'eth0']]}] + + # Interface is found in netns + self.assertTrue(self.test_plug._netns_interface_exists('123')) + + # Interface is not found in netns + self.assertFalse(self.test_plug._netns_interface_exists('321')) + + @mock.patch.object(plug, "webob") + @mock.patch('octavia.amphorae.backends.agent.api_server.plug.Plug.' + '_netns_interface_exists', return_value=False) + @mock.patch('octavia.amphorae.backends.agent.api_server.plug.Plug.' + '_interface_by_mac', return_value=FAKE_INTERFACE) + @mock.patch('pyroute2.IPRoute', create=True) + @mock.patch('pyroute2.NetNS', create=True) + @mock.patch("octavia.amphorae.backends.agent.api_server.osutils." + "BaseOS.write_port_interface_file") + @mock.patch("octavia.amphorae.backends.agent.api_server.osutils." + "BaseOS.bring_interface_up") + @mock.patch("octavia.amphorae.backends.agent.api_server.util." + "send_member_advertisements") + def test_plug_network(self, mock_send_member_adv, + mock_if_up, mock_write_port_interface, + mock_netns, mock_iproute, + mock_by_mac, mock_interface_exists, mock_webob): + fixed_ips = [ + {'ip_address': FAKE_IP_IPV4, + 'subnet_cidr': FAKE_CIDR_IPV4, + 'gateway': FAKE_GATEWAY_IPV4, + 'host_routes': [ + {'destination': '192.0.2.0/24', + 'nexthop': '192.0.2.254'}] + }] + mtu = 1400 + m = mock.mock_open() + with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m): + self.test_plug.plug_network(FAKE_MAC_ADDRESS, fixed_ips, 1400) + + mock_write_port_interface.assert_called_once_with( + interface='eth2', fixed_ips=fixed_ips, mtu=mtu, is_sriov=False) + mock_if_up.assert_called_once_with('eth2', 'network') + mock_send_member_adv.assert_called_once_with(fixed_ips) + + mock_webob.Response.assert_any_call( + json={'message': 'OK', + 'details': 'Plugged on interface eth2'}, + status=202) + + @mock.patch.object(plug, "webob") + @mock.patch('octavia.amphorae.backends.agent.api_server.plug.Plug.' + '_netns_interface_exists', return_value=True) + @mock.patch('octavia.amphorae.backends.agent.api_server.plug.Plug.' + '_netns_interface_by_mac', return_value=FAKE_INTERFACE) + @mock.patch('pyroute2.NetNS', create=True) + @mock.patch("octavia.amphorae.backends.agent.api_server.osutils." + "BaseOS.write_port_interface_file") + @mock.patch("octavia.amphorae.backends.agent.api_server.osutils." + "BaseOS.bring_interface_up") + @mock.patch("octavia.amphorae.backends.agent.api_server.util." + "send_member_advertisements") + def test_plug_network_existing_interface(self, mock_send_member_adv, + mock_if_up, + mock_write_port_interface, + mock_netns, mock_by_mac, + mock_interface_exists, + mock_webob): + fixed_ips = [ + {'ip_address': FAKE_IP_IPV4, + 'subnet_cidr': FAKE_CIDR_IPV4, + 'gateway': FAKE_GATEWAY_IPV4, + 'host_routes': [ + {'destination': '192.0.2.0/24', + 'nexthop': '192.0.2.254'}] + }, {'ip_address': FAKE_IP_IPV6, + 'subnet_cidr': FAKE_CIDR_IPV6, + 'gateway': FAKE_GATEWAY_IPV6, + 'host_routes': [ + {'destination': '2001:db8::/64', + 'nexthop': '2001:db8::ffff'}] + }] + mtu = 1400 + m = mock.mock_open() + with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m): + self.test_plug.plug_network(FAKE_MAC_ADDRESS, fixed_ips, 1400) + + mock_write_port_interface.assert_called_once_with( + interface=FAKE_INTERFACE, fixed_ips=fixed_ips, mtu=mtu, + is_sriov=False) + mock_if_up.assert_called_once_with(FAKE_INTERFACE, 'network') + mock_send_member_adv.assert_called_once_with(fixed_ips) + + mock_webob.Response.assert_any_call( + json={'message': 'OK', + 'details': f'Updated existing interface {FAKE_INTERFACE}'}, + status=202) + + @mock.patch.object(plug, "webob") + @mock.patch('octavia.amphorae.backends.agent.api_server.plug.Plug.' + '_netns_interface_exists', return_value=True) + @mock.patch('octavia.amphorae.backends.agent.api_server.plug.Plug.' + '_netns_interface_by_mac', return_value=FAKE_INTERFACE) + @mock.patch('pyroute2.NetNS', create=True) + @mock.patch("octavia.amphorae.backends.agent.api_server.osutils." + "BaseOS.write_vip_interface_file") + @mock.patch("octavia.amphorae.backends.agent.api_server.osutils." + "BaseOS.bring_interface_up") + @mock.patch("octavia.amphorae.backends.agent.api_server.util." + "send_member_advertisements") + def test_plug_network_on_vip( + self, mock_send_member_adv, mock_if_up, mock_write_vip_interface, + mock_netns, mock_by_mac, mock_interface_exists, mock_webob): + fixed_ips = [ + {'ip_address': FAKE_IP_IPV4, + 'subnet_cidr': FAKE_CIDR_IPV4, + 'gateway': FAKE_GATEWAY_IPV4, + 'host_routes': [ + {'destination': '192.0.2.128/25', + 'nexthop': '192.0.2.100'}] + }, {'ip_address': FAKE_IP_IPV6, + 'subnet_cidr': FAKE_CIDR_IPV6, + 'gateway': FAKE_GATEWAY_IPV6, + 'host_routes': [ + {'destination': '2001:db8::/64', + 'nexthop': '2001:db8::ffff'}] + }] + mtu = 1400 + vip_net_info = { + 'vip': '192.0.2.10', + 'subnet_cidr': '192.0.2.0/25', + 'vrrp_ip': '192.0.2.11', + 'gateway': '192.0.2.1', + 'host_routes': [] + } + + m = mock.mock_open() + with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m): + self.test_plug.plug_network(FAKE_MAC_ADDRESS, fixed_ips, mtu=1400, + vip_net_info=vip_net_info) + + mock_write_vip_interface.assert_called_once_with( + interface=FAKE_INTERFACE, + vips=[{ + 'ip_address': vip_net_info['vip'], + 'ip_version': 4, + 'prefixlen': 25, + 'gateway': vip_net_info['gateway'], + 'host_routes': [], + }], + vrrp_info={ + 'ip': vip_net_info['vrrp_ip'], + 'ip_version': 4, + 'prefixlen': 25, + 'gateway': vip_net_info['gateway'], + 'host_routes': [], + }, + fixed_ips=fixed_ips, mtu=mtu, is_sriov=False) + + mock_if_up.assert_called_once_with(FAKE_INTERFACE, 'vip') + mock_send_member_adv.assert_called_once_with(fixed_ips) + + mock_webob.Response.assert_any_call( + json={'message': 'OK', + 'details': f'Updated existing interface {FAKE_INTERFACE}'}, + status=202) + + @mock.patch('pyroute2.NetNS', create=True) + def test__netns_get_next_interface(self, mock_netns): + netns_handle = mock_netns.return_value.__enter__.return_value + + netns_handle.get_links.return_value = [ + {'attrs': [['IFLA_IFNAME', 'lo']]}, + ] + + ifname = self.test_plug._netns_get_next_interface() + self.assertEqual('eth2', ifname) + + netns_handle.get_links.return_value = [ + {'attrs': [['IFLA_IFNAME', 'lo']]}, + {'attrs': [['IFLA_IFNAME', 'eth1']]}, + {'attrs': [['IFLA_IFNAME', 'eth2']]}, + {'attrs': [['IFLA_IFNAME', 'eth3']]}, + ] + + ifname = self.test_plug._netns_get_next_interface() + self.assertEqual('eth4', ifname) + + netns_handle.get_links.return_value = [ + {'attrs': [['IFLA_IFNAME', 'lo']]}, + {'attrs': [['IFLA_IFNAME', 'eth1']]}, + {'attrs': [['IFLA_IFNAME', 'eth3']]}, + ] + + ifname = self.test_plug._netns_get_next_interface() + self.assertEqual('eth2', ifname) + + netns_handle.get_links.return_value = [ + {'attrs': [['IFLA_IFNAME', f'eth{idx}']]} + for idx in range(2, 1000)] + + ifname = self.test_plug._netns_get_next_interface() + self.assertEqual('eth1000', ifname) diff --git a/octavia/tests/unit/amphorae/backends/agent/api_server/test_util.py b/octavia/tests/unit/amphorae/backends/agent/api_server/test_util.py new file mode 100644 index 0000000000..aa5335e03c --- /dev/null +++ b/octavia/tests/unit/amphorae/backends/agent/api_server/test_util.py @@ -0,0 +1,517 @@ +# Copyright 2018 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import os +import subprocess +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils + +from octavia.amphorae.backends.agent.api_server import util +from octavia.common import constants as consts +from octavia.common.jinja.haproxy.combined_listeners import jinja_cfg +from octavia.tests.common import utils as test_utils +import octavia.tests.unit.base as base +from octavia.tests.unit.common.sample_configs import sample_configs_combined + +BASE_AMP_PATH = '/var/lib/octavia' +BASE_CRT_PATH = BASE_AMP_PATH + '/certs' +CONF = cfg.CONF +LISTENER_ID1 = uuidutils.generate_uuid() +LB_ID1 = uuidutils.generate_uuid() + + +class TestUtil(base.TestCase): + def setUp(self): + super().setUp() + self.CONF = self.useFixture(oslo_fixture.Config(cfg.CONF)) + self.listener_id = uuidutils.generate_uuid() + self.jinja_cfg = jinja_cfg.JinjaTemplater( + base_amp_path=BASE_AMP_PATH, + base_crt_dir=BASE_CRT_PATH) + + def test_keepalived_lvs_dir(self): + fake_path = '/fake/path' + self.CONF.config(group="haproxy_amphora", base_path=fake_path) + + result = util.keepalived_lvs_dir() + fake_path = fake_path + '/lvs' + self.assertEqual(fake_path, result) + + def test_keepalived_lvs_init_path(self): + ref_path = (consts.SYSTEMD_DIR + '/' + + consts.KEEPALIVEDLVS_SYSTEMD % self.listener_id) + result = util.keepalived_lvs_init_path(self.listener_id) + self.assertEqual(ref_path, result) + + def test_keepalived_lvs_pids_path(self): + fake_path = '/fake/path' + self.CONF.config(group="haproxy_amphora", base_path=fake_path) + + pid_path = (fake_path + '/' + 'lvs/octavia-keepalivedlvs-' + + self.listener_id + '.' + 'pid') + vrrp_pid_path = (fake_path + '/' + 'lvs/octavia-keepalivedlvs-' + + self.listener_id + '.' + 'vrrp.pid') + check_pid_path = (fake_path + '/' + 'lvs/octavia-keepalivedlvs-' + + self.listener_id + '.' + 'check.pid') + + result1, result2, result3 = util.keepalived_lvs_pids_path( + self.listener_id) + + self.assertEqual(pid_path, result1) + self.assertEqual(vrrp_pid_path, result2) + self.assertEqual(check_pid_path, result3) + + def test_keepalived_lvs_cfg_path(self): + fake_path = '/fake/path' + self.CONF.config(group="haproxy_amphora", base_path=fake_path) + + ref_path = (fake_path + '/lvs/octavia-keepalivedlvs-' + + self.listener_id + '.conf') + result = util.keepalived_lvs_cfg_path(self.listener_id) + + self.assertEqual(ref_path, result) + + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'keepalived_lvs_pids_path') + def test_get_keepalivedlvs_pid(self, mock_path): + fake_path = '/fake/path' + mock_path.return_value = [fake_path] + self.useFixture(test_utils.OpenFixture( + fake_path, ' space data ')).mock_open + result = util.get_keepalivedlvs_pid(self.listener_id) + self.assertEqual(' space data', result) + + @mock.patch('jinja2.FileSystemLoader') + @mock.patch('jinja2.Environment') + @mock.patch('os.path') + @mock.patch('octavia.amphorae.backends.agent.api_server.osutils.' + 'BaseOS.get_os_util') + def test_install_netns_systemd_service(self, mock_get_os_util, + mock_os_path, mock_jinja2_env, + mock_fsloader): + mock_os_util = mock.MagicMock() + mock_os_util.has_ifup_all.return_value = True + mock_get_os_util.return_value = mock_os_util + + mock_os_path.realpath.return_value = '/dir/file' + mock_os_path.dirname.return_value = '/dir/' + mock_os_path.exists.return_value = False + mock_fsloader.return_value = 'fake_loader' + mock_jinja_env = mock.MagicMock() + mock_jinja2_env.return_value = mock_jinja_env + mock_template = mock.MagicMock() + mock_template.render.return_value = 'script' + mock_jinja_env.get_template.return_value = mock_template + + m = mock.mock_open() + with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m): + + util.install_netns_systemd_service() + + mock_jinja2_env.assert_called_with(autoescape=True, + loader='fake_loader') + + mock_jinja_env.get_template.assert_called_once_with( + consts.AMP_NETNS_SVC_PREFIX + '.systemd.j2') + mock_template.render.assert_called_once_with( + amphora_nsname=consts.AMPHORA_NAMESPACE) + handle = m() + handle.write.assert_called_with('script') + + # Test file exists path we don't over write + mock_jinja_env.get_template.reset_mock() + mock_os_path.exists.return_value = True + util.install_netns_systemd_service() + self.assertFalse(mock_jinja_env.get_template.called) + + @mock.patch('subprocess.check_output') + def test_run_systemctl_command(self, mock_check_output): + + util.run_systemctl_command('test', 'world') + mock_check_output.assert_called_once_with( + ['systemctl', 'test', 'world'], stderr=subprocess.STDOUT, + encoding='utf-8') + + mock_check_output.side_effect = subprocess.CalledProcessError(1, + 'boom') + self.assertRaises(subprocess.CalledProcessError, + util.run_systemctl_command, 'test', 'world') + + mock_check_output.side_effect = subprocess.CalledProcessError(1, + 'boom') + util.run_systemctl_command('test', 'world', False) + + @mock.patch('octavia.amphorae.backends.agent.api_server.util.config_path') + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'keepalived_lvs_cfg_path') + @mock.patch('os.path.exists') + def test_get_listener_protocol(self, mock_path_exists, mock_lvs_path, + mock_cfg_path): + mock_lvs_path.return_value = '/here' + mock_cfg_path.return_value = '/there' + mock_path_exists.side_effect = [True, False, True, False, False] + + result = util.get_backend_for_lb_object('1') + + mock_cfg_path.assert_called_once_with('1') + mock_path_exists.assert_called_once_with('/there') + self.assertFalse(mock_lvs_path.called) + self.assertEqual(consts.HAPROXY_BACKEND, result) + + mock_cfg_path.reset_mock() + + result = util.get_backend_for_lb_object('2') + + mock_cfg_path.assert_called_once_with('2') + mock_lvs_path.assert_called_once_with('2') + self.assertEqual(consts.LVS_BACKEND, result) + + mock_cfg_path.reset_mock() + mock_lvs_path.reset_mock() + + result = util.get_backend_for_lb_object('3') + + mock_cfg_path.assert_called_once_with('3') + mock_lvs_path.assert_called_once_with('3') + self.assertIsNone(result) + + def test_parse_haproxy_config(self): + self.CONF.config(group="haproxy_amphora", + base_cert_dir='/fake_cert_dir') + FAKE_CRT_LIST_FILENAME = os.path.join( + CONF.haproxy_amphora.base_cert_dir, + 'sample_loadbalancer_id_1/sample_listener_id_1.pem') + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + proto='TERMINATED_HTTPS', tls=True, sni=True)]) + + path = util.config_path(LISTENER_ID1) + self.useFixture(test_utils.OpenFixture(path, rendered_obj)) + + res = util.parse_haproxy_file(LISTENER_ID1) + listener_dict = res[1]['sample_listener_id_1'] + # NOTE: parse_haproxy_file makes mode TERMINATED_HTTPS even though + # the haproxy.cfg needs mode HTTP + self.assertEqual('TERMINATED_HTTPS', listener_dict['mode']) + self.assertEqual('/var/lib/octavia/sample_loadbalancer_id_1.sock', + res[0]) + self.assertEqual(FAKE_CRT_LIST_FILENAME, listener_dict['ssl_crt']) + + # render_template_tls_no_sni + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + proto='TERMINATED_HTTPS', tls=True)]) + self.useFixture(test_utils.OpenFixture(path, rendered_obj)) + + res = util.parse_haproxy_file(LISTENER_ID1) + listener_dict = res[1]['sample_listener_id_1'] + self.assertEqual('TERMINATED_HTTPS', listener_dict['mode']) + self.assertEqual(BASE_AMP_PATH + '/sample_loadbalancer_id_1.sock', + res[0]) + self.assertEqual(FAKE_CRT_LIST_FILENAME, listener_dict['ssl_crt']) + + # render_template_http + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple()]) + + self.useFixture(test_utils.OpenFixture(path, rendered_obj)) + + res = util.parse_haproxy_file(LISTENER_ID1) + listener_dict = res[1]['sample_listener_id_1'] + self.assertEqual('HTTP', listener_dict['mode']) + self.assertEqual(BASE_AMP_PATH + '/sample_loadbalancer_id_1.sock', + res[0]) + self.assertIsNone(listener_dict.get('ssl_crt', None)) + + # template_https + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple(proto='HTTPS')]) + self.useFixture(test_utils.OpenFixture(path, rendered_obj)) + + res = util.parse_haproxy_file(LISTENER_ID1) + listener_dict = res[1]['sample_listener_id_1'] + self.assertEqual('TCP', listener_dict['mode']) + self.assertEqual(BASE_AMP_PATH + '/sample_loadbalancer_id_1.sock', + res[0]) + self.assertIsNone(listener_dict.get('ssl_crt', None)) + + # Bogus format + self.useFixture(test_utils.OpenFixture(path, 'Bogus')) + try: + res = util.parse_haproxy_file(LISTENER_ID1) + self.fail("No Exception?") + except util.ParsingError: + pass + + # Bad listener mode + fake_cfg = f'stats socket foo\nfrontend {LISTENER_ID1}\nmode\n' + self.useFixture(test_utils.OpenFixture(path, fake_cfg)) + self.assertRaises(util.ParsingError, util.parse_haproxy_file, + LISTENER_ID1) + + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'get_lvs_listeners') + @mock.patch('os.makedirs') + @mock.patch('os.path.exists') + @mock.patch('os.listdir') + @mock.patch('os.path.join') + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'get_loadbalancers') + @mock.patch('octavia.amphorae.backends.agent.api_server.util' + '.haproxy_sock_path') + def test_vrrp_check_script_update(self, mock_sock_path, mock_get_lbs, + mock_join, mock_listdir, mock_exists, + mock_makedirs, mock_get_listeners): + mock_get_lbs.return_value = ['abc', LB_ID1] + mock_sock_path.return_value = 'listener.sock' + mock_exists.side_effect = [False, False, True] + mock_get_lbs.side_effect = [['abc', LB_ID1], ['abc', LB_ID1], []] + mock_get_listeners.return_value = [] + + # Test the stop action path + cmd = 'haproxy-vrrp-check ' + ' '.join(['listener.sock']) + '; exit $?' + path = util.keepalived_dir() + m = self.useFixture(test_utils.OpenFixture(path)).mock_open + + util.vrrp_check_script_update(LB_ID1, 'stop') + + handle = m() + handle.write.assert_called_once_with(cmd) + + # Test the start action path + cmd = ('haproxy-vrrp-check ' + ' '.join(['listener.sock', + 'listener.sock']) + '; exit ' + '$?') + m = self.useFixture(test_utils.OpenFixture(path)).mock_open + util.vrrp_check_script_update(LB_ID1, 'start') + handle = m() + handle.write.assert_called_once_with(cmd) + + # Test the path with existing keepalived directory and no LBs + mock_makedirs.reset_mock() + cmd = 'exit 1' + m = self.useFixture(test_utils.OpenFixture(path)).mock_open + + util.vrrp_check_script_update(LB_ID1, 'start') + + handle = m() + handle.write.assert_called_once_with(cmd) + mock_makedirs.assert_has_calls( + [mock.call(util.keepalived_dir(), exist_ok=True), + mock.call(util.keepalived_check_scripts_dir(), exist_ok=True)]) + + @mock.patch('octavia.amphorae.backends.agent.api_server.util.config_path') + def test_get_haproxy_vip_addresses(self, mock_cfg_path): + FAKE_PATH = 'fake_path' + mock_cfg_path.return_value = FAKE_PATH + self.useFixture( + test_utils.OpenFixture(FAKE_PATH, 'no match')).mock_open() + + # Test with no matching lines in the config file + self.assertEqual([], util.get_haproxy_vip_addresses(LB_ID1)) + mock_cfg_path.assert_called_once_with(LB_ID1) + + # Test with a matching bind line + mock_cfg_path.reset_mock() + test_data = 'no match\nbind 203.0.113.43:1\nbogus line' + self.useFixture( + test_utils.OpenFixture(FAKE_PATH, test_data)).mock_open() + expected_result = ['203.0.113.43'] + self.assertEqual(expected_result, + util.get_haproxy_vip_addresses(LB_ID1)) + mock_cfg_path.assert_called_once_with(LB_ID1) + + # Test with a matching bind line multiple binds + mock_cfg_path.reset_mock() + test_data = 'no match\nbind 203.0.113.44:1234, 203.0.113.45:4321' + self.useFixture( + test_utils.OpenFixture(FAKE_PATH, test_data)).mock_open() + expected_result = ['203.0.113.44', '203.0.113.45'] + self.assertEqual(expected_result, + util.get_haproxy_vip_addresses(LB_ID1)) + mock_cfg_path.assert_called_once_with(LB_ID1) + + # Test with a bogus bind line + mock_cfg_path.reset_mock() + test_data = 'no match\nbind\nbogus line' + self.useFixture( + test_utils.OpenFixture(FAKE_PATH, test_data)).mock_open() + self.assertEqual([], util.get_haproxy_vip_addresses(LB_ID1)) + mock_cfg_path.assert_called_once_with(LB_ID1) + + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'keepalived_lvs_cfg_path') + def test_get_lvs_vip_addresses(self, mock_cfg_path): + FAKE_PATH = 'fake_path' + mock_cfg_path.return_value = FAKE_PATH + self.useFixture( + test_utils.OpenFixture(FAKE_PATH, 'no match')).mock_open() + + # Test with no matching lines in the config file + self.assertEqual([], util.get_lvs_vip_addresses(LB_ID1)) + mock_cfg_path.assert_called_once_with(LB_ID1) + + # Test with 2 matching lines + mock_cfg_path.reset_mock() + test_data = ('virtual_server_group ipv4-group {\n' + ' 203.0.113.43 1\n' + ' 203.0.113.44 1\n' + '}\n') + self.useFixture( + test_utils.OpenFixture(FAKE_PATH, test_data)).mock_open() + expected_result = ['203.0.113.43', '203.0.113.44'] + self.assertEqual(expected_result, + util.get_lvs_vip_addresses(LB_ID1)) + mock_cfg_path.assert_called_once_with(LB_ID1) + + # Test with 2 groups + mock_cfg_path.reset_mock() + test_data = ('virtual_server_group ipv4-group {\n' + ' 203.0.113.43 1\n' + '}\n' + 'virtual_server_group ipv6-group {\n' + ' 2d01:27::1 2\n' + ' 2d01:27::2 2\n' + '}\n') + self.useFixture( + test_utils.OpenFixture(FAKE_PATH, test_data)).mock_open() + expected_result = ['203.0.113.43', '2d01:27::1', '2d01:27::2'] + self.assertEqual(expected_result, + util.get_lvs_vip_addresses(LB_ID1)) + mock_cfg_path.assert_called_once_with(LB_ID1) + + @mock.patch('octavia.amphorae.backends.utils.ip_advertisement.' + 'send_ip_advertisement') + @mock.patch('octavia.amphorae.backends.utils.network_utils.' + 'get_interface_name') + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'get_haproxy_vip_addresses') + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'get_lvs_vip_addresses') + def test_send_vip_advertisements(self, mock_get_lvs_vip_addrs, + mock_get_vip_addrs, + mock_get_int_name, mock_send_advert): + mock_get_vip_addrs.side_effect = [[], ['203.0.113.46'], + Exception('boom')] + mock_get_int_name.return_value = 'fake0' + + # Test no VIPs + util.send_vip_advertisements(LB_ID1) + mock_get_vip_addrs.assert_called_once_with(LB_ID1) + mock_get_lvs_vip_addrs.assert_not_called() + mock_get_int_name.assert_not_called() + mock_send_advert.assert_not_called() + + # Test with a VIP + mock_get_vip_addrs.reset_mock() + mock_get_int_name.reset_mock() + mock_send_advert.reset_mock() + util.send_vip_advertisements(LB_ID1) + mock_get_vip_addrs.assert_called_once_with(LB_ID1) + mock_get_lvs_vip_addrs.assert_not_called() + mock_get_int_name.assert_called_once_with( + '203.0.113.46', net_ns=consts.AMPHORA_NAMESPACE) + mock_send_advert.assert_called_once_with( + 'fake0', '203.0.113.46', net_ns=consts.AMPHORA_NAMESPACE) + + # Test with an exception (should not raise) + mock_get_vip_addrs.reset_mock() + mock_get_int_name.reset_mock() + mock_send_advert.reset_mock() + util.send_vip_advertisements(LB_ID1) + mock_get_int_name.assert_not_called() + mock_send_advert.assert_not_called() + + @mock.patch('octavia.amphorae.backends.utils.ip_advertisement.' + 'send_ip_advertisement') + @mock.patch('octavia.amphorae.backends.utils.network_utils.' + 'get_interface_name') + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'get_haproxy_vip_addresses') + @mock.patch('octavia.amphorae.backends.agent.api_server.util.' + 'get_lvs_vip_addresses') + def test_send_vip_advertisements_udp(self, mock_get_lvs_vip_addrs, + mock_get_vip_addrs, + mock_get_int_name, mock_send_advert): + mock_get_lvs_vip_addrs.side_effect = [[], ['203.0.113.46'], + Exception('boom')] + mock_get_int_name.return_value = 'fake0' + + # Test no VIPs + util.send_vip_advertisements(listener_id=LISTENER_ID1) + mock_get_lvs_vip_addrs.assert_called_once_with(LISTENER_ID1) + mock_get_vip_addrs.assert_not_called() + mock_get_int_name.assert_not_called() + mock_send_advert.assert_not_called() + + # Test with a VIP + mock_get_lvs_vip_addrs.reset_mock() + mock_get_int_name.reset_mock() + mock_send_advert.reset_mock() + util.send_vip_advertisements(listener_id=LISTENER_ID1) + mock_get_lvs_vip_addrs.assert_called_once_with(LISTENER_ID1) + mock_get_vip_addrs.assert_not_called() + mock_get_int_name.assert_called_once_with( + '203.0.113.46', net_ns=consts.AMPHORA_NAMESPACE) + mock_send_advert.assert_called_once_with( + 'fake0', '203.0.113.46', net_ns=consts.AMPHORA_NAMESPACE) + + # Test with an exception (should not raise) + mock_get_lvs_vip_addrs.reset_mock() + mock_get_int_name.reset_mock() + mock_send_advert.reset_mock() + util.send_vip_advertisements(listener_id=LISTENER_ID1) + mock_get_int_name.assert_not_called() + mock_send_advert.assert_not_called() + + @mock.patch('octavia.amphorae.backends.utils.ip_advertisement.' + 'send_ip_advertisement') + @mock.patch('octavia.amphorae.backends.utils.network_utils.' + 'get_interface_name') + def test_send_member_advertisements(self, mock_get_int_name, + mock_send_advert): + # IPv4 fixed_ips + mock_get_int_name.side_effect = ['fake0', 'fake1'] + fixed_ips = [{'ip_address': '192.0.2.1'}, + {'ip_address': '192.2.0.2'}] + util.send_member_advertisements(fixed_ips) + mock_send_advert.assert_has_calls( + [mock.call('fake0', fixed_ips[0]['ip_address'], + net_ns=consts.AMPHORA_NAMESPACE), + mock.call('fake1', fixed_ips[1]['ip_address'], + net_ns=consts.AMPHORA_NAMESPACE)]) + + # Mixed IPv4/IPv6 + mock_send_advert.reset_mock() + mock_get_int_name.side_effect = ['fake0', 'fake1'] + fixed_ips = [{'ip_address': '192.0.2.1'}, + {'ip_address': '2001:db8::2'}] + util.send_member_advertisements(fixed_ips) + mock_send_advert.assert_has_calls( + [mock.call('fake0', fixed_ips[0]['ip_address'], + net_ns=consts.AMPHORA_NAMESPACE), + mock.call('fake1', fixed_ips[1]['ip_address'], + net_ns=consts.AMPHORA_NAMESPACE)]) + + # Exception + mock_send_advert.reset_mock() + mock_get_int_name.side_effect = Exception('ERROR') + util.send_member_advertisements(fixed_ips) + mock_send_advert.assert_not_called() diff --git a/octavia/tests/unit/amphorae/backends/agent/test_agent_jinja_cfg.py b/octavia/tests/unit/amphorae/backends/agent/test_agent_jinja_cfg.py new file mode 100644 index 0000000000..065abf41ed --- /dev/null +++ b/octavia/tests/unit/amphorae/backends/agent/test_agent_jinja_cfg.py @@ -0,0 +1,168 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils + +from octavia.amphorae.backends.agent import agent_jinja_cfg +from octavia.common import constants +import octavia.tests.unit.base as base + +AMP_ID = uuidutils.generate_uuid() + + +class AgentJinjaTestCase(base.TestCase): + def setUp(self): + super().setUp() + + self.conf = oslo_fixture.Config(cfg.CONF) + self.conf.config(debug=False) + self.conf.config(group="amphora_agent", + agent_server_ca='/etc/octavia/certs/client_ca.pem') + self.conf.config(group="amphora_agent", + agent_server_cert='/etc/octavia/certs/server.pem') + self.conf.config(group="amphora_agent", + agent_server_network_dir='/etc/network/interfaces.d/') + self.conf.config(group='amphora_agent', + amphora_udp_driver='keepalived_lvs'), + self.conf.config(group="haproxy_amphora", + base_cert_dir='/var/lib/octavia/certs') + self.conf.config(group="haproxy_amphora", base_path='/var/lib/octavia') + self.conf.config(group="haproxy_amphora", bind_host='0.0.0.0') + self.conf.config(group="haproxy_amphora", bind_port=9443) + self.conf.config(group="haproxy_amphora", + haproxy_cmd='/usr/sbin/haproxy') + self.conf.config(group="health_manager", + controller_ip_port_list=['192.0.2.10:5555']) + self.conf.config(group="health_manager", heartbeat_interval=10) + self.conf.config(group="health_manager", heartbeat_key='TEST') + + def test_build_agent_config(self): + ajc = agent_jinja_cfg.AgentJinjaTemplater() + # Test execution order could influence this with the test below + self.conf.config(group="amphora_agent", + administrative_log_facility=1) + self.conf.config(group="amphora_agent", user_log_facility=0) + expected_config = ('\n[DEFAULT]\n' + 'debug = False\n' + 'use_syslog = True\n' + 'syslog_log_facility = LOG_LOCAL1\n\n' + '[haproxy_amphora]\n' + 'base_cert_dir = /var/lib/octavia/certs\n' + 'base_path = /var/lib/octavia\n' + 'bind_host = 0.0.0.0\n' + 'bind_port = 9443\n' + 'haproxy_cmd = /usr/sbin/haproxy\n' + 'user_log_facility = 0\n' + 'administrative_log_facility = 1\n\n' + '[health_manager]\n' + 'controller_ip_port_list = 192.0.2.10:5555\n' + 'heartbeat_interval = 10\n' + 'heartbeat_key = TEST\n\n' + '[amphora_agent]\n' + 'agent_server_ca = ' + '/etc/octavia/certs/client_ca.pem\n' + 'agent_server_cert = ' + '/etc/octavia/certs/server.pem\n' + 'agent_server_network_dir = ' + '/etc/network/interfaces.d/\n' + 'agent_request_read_timeout = 180\n' + 'amphora_id = ' + AMP_ID + '\n' + 'amphora_udp_driver = keepalived_lvs\n' + 'agent_tls_protocol = TLSv1.2\n\n' + '[controller_worker]\n' + 'loadbalancer_topology = ' + + constants.TOPOLOGY_SINGLE) + agent_cfg = ajc.build_agent_config(AMP_ID, constants.TOPOLOGY_SINGLE) + self.assertEqual(expected_config, agent_cfg) + + def test_build_agent_config_with_interfaces_file(self): + ajc = agent_jinja_cfg.AgentJinjaTemplater() + self.conf.config(group="amphora_agent", + administrative_log_facility=1) + self.conf.config(group="amphora_agent", user_log_facility=0) + expected_config = ('\n[DEFAULT]\n' + 'debug = False\n' + 'use_syslog = True\n' + 'syslog_log_facility = LOG_LOCAL1\n\n' + '[haproxy_amphora]\n' + 'base_cert_dir = /var/lib/octavia/certs\n' + 'base_path = /var/lib/octavia\n' + 'bind_host = 0.0.0.0\n' + 'bind_port = 9443\n' + 'haproxy_cmd = /usr/sbin/haproxy\n' + 'user_log_facility = 0\n' + 'administrative_log_facility = 1\n\n' + '[health_manager]\n' + 'controller_ip_port_list = 192.0.2.10:5555\n' + 'heartbeat_interval = 10\n' + 'heartbeat_key = TEST\n\n' + '[amphora_agent]\n' + 'agent_server_ca = ' + '/etc/octavia/certs/client_ca.pem\n' + 'agent_server_cert = ' + '/etc/octavia/certs/server.pem\n' + 'agent_server_network_dir = ' + '/etc/network/interfaces.d/\n' + 'agent_request_read_timeout = 180\n' + 'amphora_id = ' + AMP_ID + '\n' + 'amphora_udp_driver = keepalived_lvs\n' + 'agent_tls_protocol = TLSv1.2\n\n' + '[controller_worker]\n' + 'loadbalancer_topology = ' + + constants.TOPOLOGY_ACTIVE_STANDBY) + agent_cfg = ajc.build_agent_config(AMP_ID, + constants.TOPOLOGY_ACTIVE_STANDBY) + self.assertEqual(expected_config, agent_cfg) + + def test_build_agent_config_with_new_udp_driver(self): + ajc = agent_jinja_cfg.AgentJinjaTemplater() + self.conf.config(group="amphora_agent", + amphora_udp_driver='new_udp_driver') + self.conf.config(group="amphora_agent", + administrative_log_facility=1) + self.conf.config(group="amphora_agent", user_log_facility=0) + expected_config = ('\n[DEFAULT]\n' + 'debug = False\n' + 'use_syslog = True\n' + 'syslog_log_facility = LOG_LOCAL1\n\n' + '[haproxy_amphora]\n' + 'base_cert_dir = /var/lib/octavia/certs\n' + 'base_path = /var/lib/octavia\n' + 'bind_host = 0.0.0.0\n' + 'bind_port = 9443\n' + 'haproxy_cmd = /usr/sbin/haproxy\n' + 'user_log_facility = 0\n' + 'administrative_log_facility = 1\n\n' + '[health_manager]\n' + 'controller_ip_port_list = 192.0.2.10:5555\n' + 'heartbeat_interval = 10\n' + 'heartbeat_key = TEST\n\n' + '[amphora_agent]\n' + 'agent_server_ca = ' + '/etc/octavia/certs/client_ca.pem\n' + 'agent_server_cert = ' + '/etc/octavia/certs/server.pem\n' + 'agent_server_network_dir = ' + '/etc/network/interfaces.d/\n' + 'agent_request_read_timeout = 180\n' + 'amphora_id = ' + AMP_ID + '\n' + 'amphora_udp_driver = new_udp_driver\n' + 'agent_tls_protocol = TLSv1.2\n\n' + '[controller_worker]\n' + 'loadbalancer_topology = ' + + constants.TOPOLOGY_SINGLE) + agent_cfg = ajc.build_agent_config(AMP_ID, constants.TOPOLOGY_SINGLE) + self.assertEqual(expected_config, agent_cfg) diff --git a/octavia/tests/unit/amphorae/backends/health_daemon/__init__.py b/octavia/tests/unit/amphorae/backends/health_daemon/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/amphorae/backends/health_daemon/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/amphorae/backends/health_daemon/test_health_daemon.py b/octavia/tests/unit/amphorae/backends/health_daemon/test_health_daemon.py new file mode 100644 index 0000000000..8397c16a9d --- /dev/null +++ b/octavia/tests/unit/amphorae/backends/health_daemon/test_health_daemon.py @@ -0,0 +1,507 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +import json +import os +import queue +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils + +from octavia.amphorae.backends.health_daemon import health_daemon +from octavia.common import constants +from octavia.tests.common import utils as test_utils +import octavia.tests.unit.base as base + + +LB_ID1 = uuidutils.generate_uuid() +LISTENER_ID1 = uuidutils.generate_uuid() +LISTENER_ID2 = uuidutils.generate_uuid() +LISTENER_IDS = [LISTENER_ID1, LISTENER_ID2] +AMPHORA_ID = uuidutils.generate_uuid() +BASE_PATH = '/tmp/test' +SAMPLE_POOL_STATUS = { + '432fc8b3-d446-48d4-bb64-13beb90e22bc': { + 'status': 'UP', + 'uuid': '432fc8b3-d446-48d4-bb64-13beb90e22bc', + 'members': {'302e33d9-dee1-4de9-98d5-36329a06fb58': 'DOWN'}}, + '3661ed10-99db-4d2c-bffb-99b60eb876ff': { + 'status': 'UP', + 'uuid': '3661ed10-99db-4d2c-bffb-99b60eb876ff', + 'members': {'e657f950-a6a2-4d28-bffa-0c8a8c05f815': 'DOWN'}}} + +SAMPLE_BOGUS_POOL_STATUS = {LISTENER_ID1: { + 'status': 'UP', + 'uuid': LISTENER_ID1, + 'members': { + '302e33d9-dee1-4de9-98d5-36329a06fb58': + 'DOWN'}}} + +FRONTEND_STATS = {'': '', 'status': 'OPEN', 'lastchg': '', + 'weight': '', 'slim': '2000', 'pid': '1', 'comp_byp': '0', + 'lastsess': '', 'rate_lim': '0', 'check_duration': '', + 'rate': '0', 'req_rate': '0', 'check_status': '', + 'econ': '', 'comp_out': '0', 'wredis': '', 'dresp': '0', + 'ereq': '5', 'tracked': '', 'comp_in': '0', + 'pxname': LISTENER_ID1, + 'dreq': '0', 'hrsp_5xx': '0', 'last_chk': '', + 'check_code': '', 'sid': '0', 'bout': '10', 'hrsp_1xx': '0', + 'qlimit': '', 'hrsp_other': '0', 'bin': '5', 'rtime': '', + 'smax': '0', 'req_tot': '0', 'lbtot': '', 'stot': '0', + 'wretr': '', 'req_rate_max': '0', 'ttime': '', 'iid': '2', + 'hrsp_4xx': '0', 'chkfail': '', 'hanafail': '', + 'downtime': '', 'qcur': '', 'eresp': '', 'comp_rsp': '0', + 'cli_abrt': '', 'ctime': '', 'qtime': '', 'srv_abrt': '', + 'throttle': '', 'last_agt': '', 'scur': '0', 'type': '0', + 'bck': '', 'qmax': '', 'rate_max': '0', 'hrsp_2xx': '0', + 'act': '', 'chkdown': '', 'svname': 'FRONTEND', + 'hrsp_3xx': '0'} +MEMBER_STATS = {'': '', 'status': 'no check', 'lastchg': '', 'weight': '1', + 'slim': '', 'pid': '1', 'comp_byp': '', 'lastsess': '-1', + 'rate_lim': '', 'check_duration': '', 'rate': '0', + 'req_rate': '', 'check_status': '', 'econ': '0', + 'comp_out': '', 'wredis': '0', 'dresp': '0', 'ereq': '', + 'tracked': '', 'comp_in': '', + 'pxname': '432fc8b3-d446-48d4-bb64-13beb90e22bc', + 'dreq': '', 'hrsp_5xx': '0', 'last_chk': '', + 'check_code': '', 'sid': '1', 'bout': '0', 'hrsp_1xx': '0', + 'qlimit': '', 'hrsp_other': '0', 'bin': '0', 'rtime': '0', + 'smax': '0', 'req_tot': '', 'lbtot': '0', 'stot': '0', + 'wretr': '0', 'req_rate_max': '', 'ttime': '0', 'iid': '3', + 'hrsp_4xx': '0', 'chkfail': '', 'hanafail': '0', + 'downtime': '', 'qcur': '0', 'eresp': '0', 'comp_rsp': '', + 'cli_abrt': '0', 'ctime': '0', 'qtime': '0', 'srv_abrt': '0', + 'throttle': '', 'last_agt': '', 'scur': '0', 'type': '2', + 'bck': '0', 'qmax': '0', 'rate_max': '0', 'hrsp_2xx': '0', + 'act': '1', 'chkdown': '', + 'svname': '302e33d9-dee1-4de9-98d5-36329a06fb58', + 'hrsp_3xx': '0'} +BACKEND_STATS = {'': '', 'status': 'UP', 'lastchg': '122', 'weight': '1', + 'slim': '200', 'pid': '1', 'comp_byp': '0', 'lastsess': '-1', + 'rate_lim': '', 'check_duration': '', 'rate': '0', + 'req_rate': '', 'check_status': '', 'econ': '0', + 'comp_out': '0', 'wredis': '0', 'dresp': '0', 'ereq': '', + 'tracked': '', 'comp_in': '0', + 'pxname': '432fc8b3-d446-48d4-bb64-13beb90e22bc', 'dreq': '0', + 'hrsp_5xx': '0', 'last_chk': '', 'check_code': '', 'sid': '0', + 'bout': '0', 'hrsp_1xx': '0', 'qlimit': '', 'hrsp_other': '0', + 'bin': '0', 'rtime': '0', 'smax': '0', 'req_tot': '', + 'lbtot': '0', 'stot': '0', 'wretr': '0', 'req_rate_max': '', + 'ttime': '0', 'iid': '3', 'hrsp_4xx': '0', 'chkfail': '', + 'hanafail': '', 'downtime': '0', 'qcur': '0', 'eresp': '0', + 'comp_rsp': '0', 'cli_abrt': '0', 'ctime': '0', 'qtime': '0', + 'srv_abrt': '0', 'throttle': '', 'last_agt': '', 'scur': '0', + 'type': '1', 'bck': '0', 'qmax': '0', 'rate_max': '0', + 'hrsp_2xx': '0', 'act': '1', 'chkdown': '0', + 'svname': 'BACKEND', 'hrsp_3xx': '0'} +SAMPLE_STATS = (FRONTEND_STATS, MEMBER_STATS, BACKEND_STATS) + +SAMPLE_STATS_MSG = { + 'listeners': { + LISTENER_ID1: { + 'stats': { + 'totconns': 0, 'conns': 0, + 'tx': 8, 'rx': 4, 'ereq': 5}, + 'status': 'OPEN'}, + }, + 'pools': { + '432fc8b3-d446-48d4-bb64-13beb90e22bc': { + 'members': {'302e33d9-dee1-4de9-98d5-36329a06fb58': 'DOWN'}, + 'status': 'UP'}, + '3661ed10-99db-4d2c-bffb-99b60eb876ff': { + 'members': {'e657f950-a6a2-4d28-bffa-0c8a8c05f815': 'DOWN'}, + 'status': 'UP'}, + }, + 'id': AMPHORA_ID, + 'seq': mock.ANY, + 'ver': health_daemon.MSG_VER +} + +SAMPLE_MSG_HAPROXY_RESTART = { + 'listeners': { + LISTENER_ID1: { + 'stats': { + 'totconns': 0, 'conns': 0, + 'tx': 10, 'rx': 5, 'ereq': 5}, + 'status': 'OPEN'}, + }, + 'pools': { + '432fc8b3-d446-48d4-bb64-13beb90e22bc': { + 'members': {'302e33d9-dee1-4de9-98d5-36329a06fb58': 'DOWN'}, + 'status': 'UP'}, + '3661ed10-99db-4d2c-bffb-99b60eb876ff': { + 'members': {'e657f950-a6a2-4d28-bffa-0c8a8c05f815': 'DOWN'}, + 'status': 'UP'}, + }, + 'id': AMPHORA_ID, + 'seq': mock.ANY, + 'ver': health_daemon.MSG_VER +} + + +class TestHealthDaemon(base.TestCase): + + def setUp(self): + super().setUp() + conf = oslo_fixture.Config(cfg.CONF) + conf.config(group="haproxy_amphora", base_path=BASE_PATH) + conf.config(group="amphora_agent", amphora_id=AMPHORA_ID) + file_name = os.path.join(BASE_PATH, "stats_counters") + self.mock_open = self.useFixture( + test_utils.OpenFixture(file_name)).mock_open + + @mock.patch('octavia.amphorae.backends.agent.' + 'api_server.util.get_loadbalancers') + def test_list_sock_stat_files(self, mock_get_listener): + mock_get_listener.return_value = LISTENER_IDS + + health_daemon.list_sock_stat_files() + + files = health_daemon.list_sock_stat_files(BASE_PATH) + + expected_files = {LISTENER_ID1: BASE_PATH + '/' + + LISTENER_ID1 + '.sock', + LISTENER_ID2: BASE_PATH + '/' + + LISTENER_ID2 + '.sock'} + self.assertEqual(expected_files, files) + + @mock.patch('os.kill') + @mock.patch('os.path.isfile') + @mock.patch('octavia.amphorae.backends.health_daemon.' + 'health_daemon.time.sleep') + @mock.patch('oslo_config.cfg.CONF.reload_config_files') + @mock.patch('octavia.amphorae.backends.health_daemon.' + 'health_daemon.build_stats_message') + @mock.patch('octavia.amphorae.backends.health_daemon.' + 'health_sender.UDPStatusSender') + def test_run_sender(self, mock_UDPStatusSender, mock_build_msg, + mock_reload_cfg, mock_sleep, mock_isfile, mock_kill): + sender_mock = mock.MagicMock() + dosend_mock = mock.MagicMock() + sender_mock.dosend = dosend_mock + mock_UDPStatusSender.return_value = sender_mock + mock_build_msg.side_effect = ['TEST'] + + mock_isfile.return_value = False + + test_queue = queue.Queue() + with mock.patch('time.sleep') as mock_sleep: + mock_sleep.side_effect = Exception('break') + self.assertRaisesRegex(Exception, 'break', + health_daemon.run_sender, test_queue) + + sender_mock.dosend.assert_called_once_with('TEST') + + # Test a reload event + mock_build_msg.reset_mock() + mock_build_msg.side_effect = ['TEST'] + test_queue.put('reload') + with mock.patch('time.sleep') as mock_sleep: + mock_sleep.side_effect = Exception('break') + self.assertRaisesRegex(Exception, 'break', + health_daemon.run_sender, test_queue) + mock_reload_cfg.assert_called_once_with() + + # Test the shutdown path + sender_mock.reset_mock() + dosend_mock.reset_mock() + mock_build_msg.reset_mock() + mock_build_msg.side_effect = ['TEST', 'TEST'] + test_queue.put('shutdown') + health_daemon.run_sender(test_queue) + sender_mock.dosend.assert_called_once_with('TEST') + + # Test an unknown command + mock_build_msg.reset_mock() + mock_build_msg.side_effect = ['TEST'] + test_queue.put('bogus') + with mock.patch('time.sleep') as mock_sleep: + mock_sleep.side_effect = Exception('break') + self.assertRaisesRegex(Exception, 'break', + health_daemon.run_sender, test_queue) + + # Test keepalived config, but no PID + mock_build_msg.reset_mock() + dosend_mock.reset_mock() + mock_isfile.return_value = True + with mock.patch('octavia.amphorae.backends.health_daemon.' + 'health_daemon.open', mock.mock_open()) as mock_open: + mock_open.side_effect = FileNotFoundError + test_queue.put('shutdown') + health_daemon.run_sender(test_queue) + mock_build_msg.assert_not_called() + dosend_mock.assert_not_called() + + # Test keepalived config, but PID file error + mock_build_msg.reset_mock() + dosend_mock.reset_mock() + mock_isfile.return_value = True + with mock.patch('octavia.amphorae.backends.health_daemon.' + 'health_daemon.open', mock.mock_open()) as mock_open: + mock_open.side_effect = IOError + test_queue.put('shutdown') + health_daemon.run_sender(test_queue) + mock_build_msg.assert_not_called() + dosend_mock.assert_not_called() + + # Test keepalived config, but bogus PID + mock_build_msg.reset_mock() + dosend_mock.reset_mock() + mock_isfile.return_value = True + with mock.patch('octavia.amphorae.backends.health_daemon.' + 'health_daemon.open', + mock.mock_open(read_data='foo')) as mock_open: + test_queue.put('shutdown') + health_daemon.run_sender(test_queue) + mock_build_msg.assert_not_called() + dosend_mock.assert_not_called() + + # Test keepalived config, but not running + mock_build_msg.reset_mock() + dosend_mock.reset_mock() + mock_isfile.return_value = True + with mock.patch('octavia.amphorae.backends.health_daemon.' + 'health_daemon.open', + mock.mock_open(read_data='999999')) as mock_open: + mock_kill.side_effect = ProccessNotFoundError + test_queue.put('shutdown') + health_daemon.run_sender(test_queue) + mock_build_msg.assert_not_called() + dosend_mock.assert_not_called() + + # Test keepalived config, but process error + mock_build_msg.reset_mock() + dosend_mock.reset_mock() + mock_isfile.return_value = True + with mock.patch('octavia.amphorae.backends.health_daemon.' + 'health_daemon.open', + mock.mock_open(read_data='999999')) as mock_open: + mock_kill.side_effect = OSError + test_queue.put('shutdown') + health_daemon.run_sender(test_queue) + mock_build_msg.assert_not_called() + dosend_mock.assert_not_called() + + # Test with happy keepalive + sender_mock.reset_mock() + dosend_mock.reset_mock() + mock_kill.side_effect = [True] + mock_build_msg.reset_mock() + mock_build_msg.side_effect = ['TEST', 'TEST'] + mock_isfile.return_value = True + test_queue.put('shutdown') + with mock.patch('octavia.amphorae.backends.health_daemon.' + 'health_daemon.open', + mock.mock_open(read_data='999999')) as mock_open: + health_daemon.run_sender(test_queue) + sender_mock.dosend.assert_called_once_with('TEST') + + @mock.patch('octavia.amphorae.backends.utils.haproxy_query.HAProxyQuery') + def test_get_stats(self, mock_query): + stats_query_mock = mock.MagicMock() + mock_query.return_value = stats_query_mock + + health_daemon.get_stats('TEST') + + stats_query_mock.show_stat.assert_called_once_with() + stats_query_mock.get_pool_status.assert_called_once_with() + + @mock.patch('octavia.amphorae.backends.utils.haproxy_query.HAProxyQuery') + def test_get_stats_exception(self, mock_query): + mock_query.side_effect = Exception('Boom') + + stats, pool_status = health_daemon.get_stats('TEST') + self.assertEqual([], stats) + self.assertEqual({}, pool_status) + + @mock.patch('octavia.amphorae.backends.agent.api_server.' + 'util.is_lb_running') + @mock.patch('octavia.amphorae.backends.health_daemon.' + 'health_daemon.get_stats') + @mock.patch('octavia.amphorae.backends.health_daemon.' + 'health_daemon.list_sock_stat_files') + def test_build_stats_message(self, mock_list_files, + mock_get_stats, mock_is_running): + health_daemon.COUNTERS = None + health_daemon.COUNTERS_FILE = None + lb1_stats_socket = f'/var/lib/octavia/{LB_ID1}/haproxy.sock' + mock_list_files.return_value = {LB_ID1: lb1_stats_socket} + + mock_is_running.return_value = True + mock_get_stats.return_value = SAMPLE_STATS, SAMPLE_POOL_STATUS + + with mock.patch('os.open'), mock.patch.object( + os, 'fdopen', self.mock_open) as mock_fdopen: + mock_fdopen().read.return_value = json.dumps({ + LISTENER_ID1: {'bin': 1, 'bout': 2}, + }) + msg = health_daemon.build_stats_message() + + self.assertEqual(SAMPLE_STATS_MSG, msg) + + mock_get_stats.assert_any_call(lb1_stats_socket) + mock_fdopen().write.assert_called_once_with(json.dumps({ + LISTENER_ID1: { + 'bin': int(FRONTEND_STATS['bin']), + 'bout': int(FRONTEND_STATS['bout']), + 'ereq': int(FRONTEND_STATS['ereq']), + 'stot': int(FRONTEND_STATS['stot']) + + } + })) + + @mock.patch('octavia.amphorae.backends.agent.api_server.' + 'util.is_lb_running') + @mock.patch('octavia.amphorae.backends.health_daemon.' + 'health_daemon.get_stats') + @mock.patch('octavia.amphorae.backends.health_daemon.' + 'health_daemon.list_sock_stat_files') + def test_build_stats_message_no_listener(self, mock_list_files, + mock_get_stats, + mock_is_running): + health_daemon.COUNTERS = None + health_daemon.COUNTERS_FILE = None + lb1_stats_socket = f'/var/lib/octavia/{LB_ID1}/haproxy.sock' + mock_list_files.return_value = {LB_ID1: lb1_stats_socket} + + mock_is_running.return_value = False + + with mock.patch('os.open'), mock.patch.object( + os, 'fdopen', self.mock_open) as mock_fdopen: + health_daemon.build_stats_message() + + self.assertEqual(0, mock_get_stats.call_count) + self.assertEqual(0, mock_fdopen().read.call_count) + + @mock.patch("octavia.amphorae.backends.utils.keepalivedlvs_query." + "get_lvs_listener_pool_status") + @mock.patch("octavia.amphorae.backends.utils.keepalivedlvs_query." + "get_lvs_listeners_stats") + @mock.patch("octavia.amphorae.backends.agent.api_server.util." + "get_lvs_listeners") + def test_build_stats_message_with_lvs_listener( + self, mock_get_lvs_listeners, + mock_get_listener_stats, mock_get_pool_status): + health_daemon.COUNTERS = None + health_daemon.COUNTERS_FILE = None + udp_listener_id1 = uuidutils.generate_uuid() + udp_listener_id2 = uuidutils.generate_uuid() + udp_listener_id3 = uuidutils.generate_uuid() + pool_id = uuidutils.generate_uuid() + member_id1 = uuidutils.generate_uuid() + member_id2 = uuidutils.generate_uuid() + mock_get_lvs_listeners.return_value = [udp_listener_id1, + udp_listener_id2, + udp_listener_id3] + + mock_get_listener_stats.return_value = { + udp_listener_id1: { + 'status': constants.OPEN, + 'stats': {'bin': 5, 'stot': 5, 'bout': 10, + 'ereq': 0, 'scur': 0}}, + udp_listener_id3: { + 'status': constants.DOWN, + 'stats': {'bin': 0, 'stot': 0, 'bout': 0, + 'ereq': 0, 'scur': 0}} + } + udp_pool_status = { + 'lvs': { + 'uuid': pool_id, + 'status': constants.UP, + 'members': {member_id1: constants.UP, + member_id2: constants.UP}}} + mock_get_pool_status.side_effect = ( + lambda x: udp_pool_status if x == udp_listener_id1 else {}) + # the first listener can get all necessary info. + # the second listener can not get listener stats, so we won't report it + # the third listener can get listener stats, but can not get pool + # status, so the result will just contain the listener status for it. + expected = { + 'listeners': { + udp_listener_id1: { + 'status': constants.OPEN, + 'stats': {'conns': 0, 'totconns': 5, 'ereq': 0, + 'rx': 4, 'tx': 8}}, + udp_listener_id3: { + 'status': constants.DOWN, + 'stats': {'conns': 0, 'totconns': 0, 'ereq': 0, + 'rx': 0, 'tx': 0}}}, + 'pools': { + pool_id: { + 'status': constants.UP, + 'members': { + member_id1: constants.UP, + member_id2: constants.UP}}}, + 'id': AMPHORA_ID, + 'seq': mock.ANY, 'ver': health_daemon.MSG_VER} + + with mock.patch('os.open'), mock.patch.object( + os, 'fdopen', self.mock_open) as mock_fdopen: + mock_fdopen().read.return_value = json.dumps({ + udp_listener_id1: { + 'bin': 1, 'bout': 2, "ereq": 0, "stot": 0} + }) + msg = health_daemon.build_stats_message() + + self.assertEqual(expected, msg) + mock_fdopen().write.assert_called_once_with(json.dumps({ + udp_listener_id1: {'bin': 5, 'bout': 10, 'ereq': 0, 'stot': 5}, + udp_listener_id3: {'bin': 0, 'bout': 0, 'ereq': 0, 'stot': 0}, + })) + + @mock.patch('octavia.amphorae.backends.agent.api_server.' + 'util.is_lb_running') + @mock.patch('octavia.amphorae.backends.health_daemon.' + 'health_daemon.get_stats') + @mock.patch('octavia.amphorae.backends.health_daemon.' + 'health_daemon.list_sock_stat_files') + def test_haproxy_restart(self, mock_list_files, + mock_get_stats, mock_is_running): + health_daemon.COUNTERS = None + health_daemon.COUNTERS_FILE = None + lb1_stats_socket = f'/var/lib/octavia/{LB_ID1}/haproxy.sock' + mock_list_files.return_value = {LB_ID1: lb1_stats_socket} + + mock_is_running.return_value = True + mock_get_stats.return_value = SAMPLE_STATS, SAMPLE_POOL_STATUS + + with mock.patch('os.open'), mock.patch.object( + os, 'fdopen', self.mock_open) as mock_fdopen: + mock_fdopen().read.return_value = json.dumps({ + LISTENER_ID1: {'bin': 15, 'bout': 20}, + }) + msg = health_daemon.build_stats_message() + + self.assertEqual(SAMPLE_MSG_HAPROXY_RESTART, msg) + + mock_get_stats.assert_any_call(lb1_stats_socket) + mock_fdopen().write.assert_called_once_with(json.dumps({ + LISTENER_ID1: { + 'bin': int(FRONTEND_STATS['bin']), + 'bout': int(FRONTEND_STATS['bout']), + 'ereq': int(FRONTEND_STATS['ereq']), + 'stot': int(FRONTEND_STATS['stot']) + + } + })) + + +class FileNotFoundError(IOError): + errno = 2 + + +class ProccessNotFoundError(OSError): + errno = 3 diff --git a/octavia/tests/unit/amphorae/backends/health_daemon/test_health_sender.py b/octavia/tests/unit/amphorae/backends/health_daemon/test_health_sender.py new file mode 100644 index 0000000000..aa732b5181 --- /dev/null +++ b/octavia/tests/unit/amphorae/backends/health_daemon/test_health_sender.py @@ -0,0 +1,180 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import binascii +import random +import socket +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture + +from octavia.amphorae.backends.health_daemon import health_sender +from octavia.tests.unit import base + + +IP_PORT = ['192.0.2.10:5555', '192.0.2.10:5555'] +KEY = 'TEST' +PORT = random.randrange(1, 9000) +SAMPLE_MSG = {'testkey': 'TEST'} +SAMPLE_MSG_BIN = binascii.unhexlify('78daab562a492d2ec94ead54b252500a710d0e51a' + 'a050041b506243538303665356331393731653739' + '39353138313833393465613665373161643938396' + '66639353039343566393537336634616236663833' + '653235646238656437') + + +class TestHealthSender(base.TestCase): + + def setUp(self): + super().setUp() + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + self.conf.config(group="health_manager", + controller_ip_port_list=IP_PORT) + self.conf.config(group="health_manager", + heartbeat_key=KEY) + + @mock.patch('socket.getaddrinfo') + @mock.patch('socket.socket') + def test_sender(self, mock_socket, mock_getaddrinfo): + socket_mock = mock.MagicMock() + mock_socket.return_value = socket_mock + sendto_mock = mock.MagicMock() + socket_mock.sendto = sendto_mock + + # Test when no addresses are returned + self.conf.config(group="health_manager", + controller_ip_port_list='') + sender = health_sender.UDPStatusSender() + sender.dosend(SAMPLE_MSG) + sendto_mock.reset_mock() + + # Test IPv4 path + self.conf.config(group="health_manager", + controller_ip_port_list=['192.0.2.20:80']) + mock_getaddrinfo.return_value = [(socket.AF_INET, + socket.SOCK_DGRAM, + socket.IPPROTO_UDP, + '', + ('192.0.2.20', 80))] + + sender = health_sender.UDPStatusSender() + sender.dosend(SAMPLE_MSG) + + sendto_mock.assert_called_once_with(SAMPLE_MSG_BIN, + ('192.0.2.20', 80)) + sendto_mock.reset_mock() + + # Test IPv6 path + self.conf.config(group="health_manager", + controller_ip_port_list=['2001:0db8::f00d:80']) + mock_getaddrinfo.return_value = [(socket.AF_INET6, + socket.SOCK_DGRAM, + socket.IPPROTO_UDP, + '', + ('2001:db8::f00d', 80, 0, 0))] + + sender = health_sender.UDPStatusSender() + + sender.dosend(SAMPLE_MSG) + + sendto_mock.assert_called_once_with(SAMPLE_MSG_BIN, + ('2001:db8::f00d', 80, 0, 0)) + + sendto_mock.reset_mock() + + # Test IPv6 path enclosed within square brackets ("[" and "]"). + self.conf.config(group="health_manager", + controller_ip_port_list=['[2001:0db8::f00d]:80']) + mock_getaddrinfo.return_value = [(socket.AF_INET6, + socket.SOCK_DGRAM, + socket.IPPROTO_UDP, + '', + ('2001:db8::f00d', 80, 0, 0))] + + sender = health_sender.UDPStatusSender() + + sender.dosend(SAMPLE_MSG) + + sendto_mock.assert_called_once_with(SAMPLE_MSG_BIN, + ('2001:db8::f00d', 80, 0, 0)) + + sendto_mock.reset_mock() + + # Test IPv6 link-local address path + self.conf.config( + group="health_manager", + controller_ip_port_list=['fe80::00ff:fe00:cafe%eth0:80']) + mock_getaddrinfo.return_value = [(socket.AF_INET6, + socket.SOCK_DGRAM, + socket.IPPROTO_UDP, + '', + ('fe80::ff:fe00:cafe', 80, 0, 2))] + + sender = health_sender.UDPStatusSender() + + sender.dosend(SAMPLE_MSG) + + sendto_mock.assert_called_once_with(SAMPLE_MSG_BIN, + ('fe80::ff:fe00:cafe', 80, 0, 2)) + + sendto_mock.reset_mock() + + # Test socket error + self.conf.config(group="health_manager", + controller_ip_port_list=['2001:0db8::f00d:80']) + mock_getaddrinfo.return_value = [(socket.AF_INET6, + socket.SOCK_DGRAM, + socket.IPPROTO_UDP, + '', + ('2001:db8::f00d', 80, 0, 0))] + socket_mock.sendto.side_effect = socket.error + + sender = health_sender.UDPStatusSender() + + # Should not raise an exception + sender.dosend(SAMPLE_MSG) + + # Test an controller_ip_port_list update + sendto_mock.reset_mock() + mock_getaddrinfo.reset_mock() + self.conf.config(group="health_manager", + controller_ip_port_list=['192.0.2.20:80']) + mock_getaddrinfo.return_value = [(socket.AF_INET, + socket.SOCK_DGRAM, + socket.IPPROTO_UDP, + '', + ('192.0.2.20', 80))] + sender = health_sender.UDPStatusSender() + sender.dosend(SAMPLE_MSG) + sendto_mock.assert_called_once_with(SAMPLE_MSG_BIN, + ('192.0.2.20', 80)) + mock_getaddrinfo.assert_called_once_with('192.0.2.20', '80', + 0, socket.SOCK_DGRAM) + sendto_mock.reset_mock() + mock_getaddrinfo.reset_mock() + + self.conf.config(group="health_manager", + controller_ip_port_list=['192.0.2.21:81']) + mock_getaddrinfo.return_value = [(socket.AF_INET, + socket.SOCK_DGRAM, + socket.IPPROTO_UDP, + '', + ('192.0.2.21', 81))] + sender.dosend(SAMPLE_MSG) + mock_getaddrinfo.assert_called_once_with('192.0.2.21', '81', + 0, socket.SOCK_DGRAM) + sendto_mock.assert_called_once_with(SAMPLE_MSG_BIN, + ('192.0.2.21', 81)) + sendto_mock.reset_mock() + mock_getaddrinfo.reset_mock() diff --git a/octavia/tests/unit/amphorae/backends/health_daemon/test_status_message.py b/octavia/tests/unit/amphorae/backends/health_daemon/test_status_message.py new file mode 100644 index 0000000000..37362d8a4c --- /dev/null +++ b/octavia/tests/unit/amphorae/backends/health_daemon/test_status_message.py @@ -0,0 +1,56 @@ +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from octavia.amphorae.backends.health_daemon import status_message +from octavia.common import exceptions +from octavia.tests.unit import base + + +class TestEnvelope(base.TestCase): + def setUp(self): + super().setUp() + + def test_message_hmac(self): + seq = 42 + for i in range(0, 16): + statusMsg = {'seq': seq, + 'status': 'OK', + 'id': str(uuid.uuid4())} + envelope = status_message.wrap_envelope(statusMsg, 'samplekey1') + obj = status_message.unwrap_envelope(envelope, 'samplekey1') + self.assertEqual('OK', obj['status']) + self.assertEqual(seq, obj['seq']) + seq += 1 + args = (envelope, 'samplekey?') + self.assertRaises(exceptions.InvalidHMACException, + status_message.unwrap_envelope, *args) + + def test_message_hmac_compatibility(self): + seq = 42 + statusMsg = {'seq': seq, + 'status': 'OK', + 'id': str(uuid.uuid4())} + + envelope = status_message.wrap_envelope(statusMsg, 'samplekey1', + hex=False) + obj = status_message.unwrap_envelope(envelope, 'samplekey1') + + self.assertEqual('OK', obj['status']) + self.assertEqual(seq, obj['seq']) + + args = (envelope, 'samplekey?') + self.assertRaises(exceptions.InvalidHMACException, + status_message.unwrap_envelope, *args) diff --git a/octavia/tests/unit/amphorae/backends/utils/__init__.py b/octavia/tests/unit/amphorae/backends/utils/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/amphorae/backends/utils/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/amphorae/backends/utils/test_haproxy_query.py b/octavia/tests/unit/amphorae/backends/utils/test_haproxy_query.py new file mode 100644 index 0000000000..4650a15828 --- /dev/null +++ b/octavia/tests/unit/amphorae/backends/utils/test_haproxy_query.py @@ -0,0 +1,173 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import socket +from unittest import mock + +from octavia.amphorae.backends.utils import haproxy_query as query +from octavia.common import constants +from octavia.common import utils as octavia_utils +import octavia.tests.unit.base as base + +STATS_SOCKET_SAMPLE = ( + "# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,dreq,dresp,ereq," + "econ,eresp,wretr,wredis,status,weight,act,bck,chkfail,chkdown,lastchg," + "downtime,qlimit,pid,iid,sid,throttle,lbtot,tracked,type,rate,rate_lim," + "rate_max,check_status,check_code,check_duration,hrsp_1xx,hrsp_2xx,hrsp" + "_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,req_rate,req_rate_max,req_tot" + ",cli_abrt,srv_abrt,comp_in,comp_out,comp_byp,comp_rsp,lastsess,last_chk," + "last_agt,qtime,ctime,rtime,ttime,\n" + "http-servers:listener-id,id-34821,0,0,0,0,,0,0,0,,0,,0,0,0,0,DOWN,1,1,0," + "1,1,575,575,,1,3,1,,0,,2,0,,0,L4TOUT,,30001,0,0,0,0,0,0,0,,,,0,0,,,,,-1,," + ",0,0,0,0,\n" + "http-servers:listener-id,id-34824,0,0,0,0,,0,0,0,,0,,0,0,0,0,DOWN,1,1,0," + "1,1,567,567,,1,3,2,,0,,2,0,,0,L4TOUT,,30001,0,0,0,0,0,0,0,,,,0,0,,,,,-1,," + ",0,0,0,0,\n" + "http-servers:listener-id,BACKEND,0,0,0,0,200,0,0,0,0,0,,0,0,0,0,DOWN,0,0," + "0,,1,567,567,,1,3,0,,0,,1,0,,0,,,,0,0,0,0,0,0,,,,,0,0,0,0,0,0,-1,,,0,0,0," + "0,\n" + "tcp-servers:listener-id,id-34833,0,0,0,0,,0,0,0,,0,,0,0,0,0,UP,1,1,0,1,1," + "560,560,,1,5,1,,0,,2,0,,0,L4TOUT,,30000,,,,,,,0,,,,0,0,,,,,-1,,,0,0,0,0," + "\n" + "tcp-servers:listener-id,id-34836,0,0,0,0,,0,0,0,,0,,0,0,0,0,UP,0,1,0,1,1," + "552,552,,1,5,2,,0,,2,0,,0,L4TOUT,,30001,,,,,,,0,,,,0,0,,,,,-1,,,0,0,0,0," + "\n" + "tcp-servers:listener-id,id-34839,0,0,0,0,,0,0,0,,0,,0,0,0,0,DRAIN,0,1,0," + "0,0,552,0,,1,5,2,,0,,2,0,,0,L7OK,,30001,,,,,,,0,,,,0,0,,,,,-1,,,0,0,0,0," + "\n" + "tcp-servers:listener-id,id-34842,0,0,0,0,,0,0,0,,0,,0,0,0,0,MAINT,0,1,0," + "0,0,552,0,,1,5,2,,0,,2,0,,0,L7OK,,30001,,,,,,,0,,,,0,0,,,,,-1,,,0,0,0,0," + "\n" + "tcp-servers:listener-id,BACKEND,0,0,0,0,200,0,0,0,0,0,,0,0,0,0,UP,1,0,0,," + "1,552,552,,1,5,0,,0,,1,0,,0,,,,,,,,,,,,,,0,0,0,0,0,0,-1,,,0,0,0,0," +) + +INFO_SOCKET_SAMPLE = ( + 'Name: HAProxy\nVersion: 1.5.3\nRelease_date: 2014/07/25\nNbproc: 1\n' + 'Process_num: 1\nPid: 2238\nUptime: 0d 2h22m17s\nUptime_sec: 8537\n' + 'Memmax_MB: 0\nUlimit-n: 4031\nMaxsock: 4031\nMaxconn: 2000\n' + 'Hard_maxconn: 2000\nCurrConns: 0\nCumConns: 32\nCumReq: 32\n' + 'MaxSslConns: 0\nCurrSslConns: 0\nCumSslConns: 0\nMaxpipes: 0\n' + 'PipesUsed: 0\nPipesFree: 0\nConnRate: 0\nConnRateLimit: 0\n' + 'MaxConnRate: 0\nSessRate: 0\nSessRateLimit: 0\nMaxSessRate: 0\n' + 'SslRate:0\nSslRateLimit: 0\nMaxSslRate: 0\nSslFrontendKeyRate: 0\n' + 'SslFrontendMaxKeyRate: 0\nSslFrontendSessionReuse_pct: 0\n' + 'SslBackendKeyRate: 0\nSslBackendMaxKeyRate: 0\nSslCacheLookups: 0\n' + 'SslCacheMisses: 0\nCompressBpsIn: 0\nCompressBpsOut: 0\n' + 'CompressBpsRateLim: 0\nZlibMemUsage: 0\nMaxZlibMemUsage: 0\nTasks: 4\n' + 'Run_queue: 1\nIdle_pct: 100\nnode: amphora-abd35de5-e377-49c5-be32\n' + 'description:' +) + + +class QueryTestCase(base.TestCase): + def setUp(self): + self.q = query.HAProxyQuery('') + super().setUp() + + @mock.patch('socket.socket') + def test_query(self, mock_socket): + + sock = mock.MagicMock() + sock.connect.side_effect = [None, socket.error] + sock.recv.side_effect = ['testdata', None] + mock_socket.return_value = sock + + self.q._query('test') + + sock.connect.assert_called_once_with('') + sock.send.assert_called_once_with(octavia_utils.b('test\n')) + sock.recv.assert_called_with(1024) + self.assertTrue(sock.close.called) + + self.assertRaisesRegex(Exception, + 'HAProxy \'test\' query failed.', + self.q._query, 'test') + + def test_get_pool_status(self): + query_mock = mock.Mock() + self.q._query = query_mock + query_mock.return_value = STATS_SOCKET_SAMPLE + self.assertEqual( + {'tcp-servers:listener-id': { + 'status': constants.UP, + 'listener_uuid': 'listener-id', + 'pool_uuid': 'tcp-servers', + 'members': + {'id-34833': constants.UP, + 'id-34836': constants.DRAIN, + 'id-34839': constants.DRAIN, + 'id-34842': constants.MAINT}}, + 'http-servers:listener-id': { + 'status': constants.DOWN, + 'listener_uuid': 'listener-id', + 'pool_uuid': 'http-servers', + 'members': + {'id-34821': constants.DOWN, + 'id-34824': constants.DOWN}}}, + self.q.get_pool_status() + ) + + def test_show_info(self): + query_mock = mock.Mock() + self.q._query = query_mock + query_mock.return_value = INFO_SOCKET_SAMPLE + self.assertEqual( + {'SslRateLimit': '0', 'SessRateLimit': '0', 'Version': '1.5.3', + 'Hard_maxconn': '2000', 'Ulimit-n': '4031', 'PipesFree': '0', + 'SslRate': '0', 'ZlibMemUsage': '0', 'CumConns': '32', + 'ConnRate': '0', 'Memmax_MB': '0', 'CompressBpsOut': '0', + 'MaxConnRate': '0', 'Uptime_sec': '8537', 'SslCacheMisses': '0', + 'MaxZlibMemUsage': '0', 'SslCacheLookups': '0', + 'CurrSslConns': '0', 'SslBackendKeyRate': '0', + 'CompressBpsRateLim': '0', 'Run_queue': '1', 'CumReq': '32', + 'SslBackendMaxKeyRate': '0', 'SslFrontendSessionReuse_pct': '0', + 'Nbproc': '1', 'Tasks': '4', 'Maxpipes': '0', 'Maxconn': '2000', + 'Pid': '2238', 'Maxsock': '4031', 'CurrConns': '0', + 'Idle_pct': '100', 'CompressBpsIn': '0', + 'SslFrontendKeyRate': '0', 'MaxSessRate': '0', 'Process_num': '1', + 'Uptime': '0d 2h22m17s', 'PipesUsed': '0', 'SessRate': '0', + 'MaxSslRate': '0', 'ConnRateLimit': '0', 'CumSslConns': '0', + 'Name': 'HAProxy', 'SslFrontendMaxKeyRate': '0', + 'MaxSslConns': '0', 'node': 'amphora-abd35de5-e377-49c5-be32', + 'description': '', 'Release_date': '2014/07/25'}, + self.q.show_info() + ) + + def test_save_state(self): + filename = 'state_file' + + query_mock = mock.Mock() + query_mock.return_value = 'DATA' + + self.q._query = query_mock + + with mock.patch('builtins.open') as mock_open: + mock_fh = mock.MagicMock() + mock_open().__enter__.return_value = mock_fh + + self.q.save_state(filename) + + mock_fh.write.assert_called_once_with('DATA\n') + + def test_save_state_error(self): + """save_state() should swallow exceptions""" + filename = 'state_file' + + query_mock = mock.Mock(side_effect=OSError()) + self.q._query = query_mock + + try: + self.q.save_state(filename) + except Exception as ex: + self.fail(f"save_state() raised {ex!r} unexpectedly!") diff --git a/octavia/tests/unit/amphorae/backends/utils/test_interface.py b/octavia/tests/unit/amphorae/backends/utils/test_interface.py new file mode 100644 index 0000000000..e0cbccdf46 --- /dev/null +++ b/octavia/tests/unit/amphorae/backends/utils/test_interface.py @@ -0,0 +1,1437 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import errno +import os +import socket +import subprocess +from unittest import mock + +import pyroute2 + +from octavia.amphorae.backends.utils import interface +from octavia.amphorae.backends.utils import interface_file +from octavia.common import constants as consts +from octavia.common import exceptions +from octavia.tests.common import utils as test_utils +import octavia.tests.unit.base as base + + +class TestInterface(base.TestCase): + @mock.patch('os.listdir') + @mock.patch('octavia.amphorae.backends.utils.interface_file.' + 'InterfaceFile.get_directory') + def test_interface_file_list(self, mock_get_directory, mock_listdir): + mock_get_directory.return_value = consts.AMP_NET_DIR_TEMPLATE + + ifaces = ('eth0', 'eth7', 'eth8') + mock_listdir.return_value = [ + f"{iface}.json" + for iface in ifaces + ] + mock_listdir.return_value.extend(["invalidfile"]) + + controller = interface.InterfaceController() + r = controller.interface_file_list() + config_file_list = list(r) + + for iface in ifaces: + f = os.path.join(consts.AMP_NET_DIR_TEMPLATE, + f"{iface}.json") + self.assertIn(f, config_file_list) + + # unsupported file + f = os.path.join(consts.AMP_NET_DIR_TEMPLATE, + "invalidfile") + self.assertNotIn(f, config_file_list) + + # non existing file + f = os.path.join(consts.AMP_NET_DIR_TEMPLATE, + "eth2.json") + self.assertNotIn(f, config_file_list) + + @mock.patch('os.listdir') + @mock.patch('octavia.amphorae.backends.utils.interface_file.' + 'InterfaceFile.get_directory') + def test_list(self, mock_get_directory, mock_listdir): + mock_get_directory.return_value = consts.AMP_NET_DIR_TEMPLATE + mock_listdir.return_value = ["fakeiface.json"] + + content = ('{\n' + '"addresses": [\n' + '{"address": "10.0.0.2",\n' + '"prefixlen": 24}\n' + '],\n' + '"mtu": 1450,\n' + '"name": "eth1",\n' + '"if_type": "mytype",\n' + '"routes": [\n' + '{"dst": "0.0.0.0/0",\n' + '"gateway": "10.0.0.1"},\n' + '{"dst": "10.11.0.0/16",\n' + '"gateway": "10.0.0.24"}\n' + '],\n' + '"rules": [\n' + '{"src": "10.0.0.2",\n' + '"src_len": 32,\n' + '"table": 100}\n' + '],\n' + '"scripts": {\n' + '"up": [\n' + '{"command": "up-script"}],\n' + '"down": [\n' + '{"command": "down-script"}]\n' + '}}\n') + + filename = os.path.join(consts.AMP_NET_DIR_TEMPLATE, + "fakeiface.json") + + self.useFixture( + test_utils.OpenFixture(filename, + contents=content)) + + controller = interface.InterfaceController() + ifaces = controller.list() + + self.assertIn("eth1", ifaces) + iface = ifaces["eth1"] + + expected_dict = { + consts.NAME: "eth1", + consts.IF_TYPE: "mytype", + consts.MTU: 1450, + consts.ADDRESSES: [{ + consts.ADDRESS: "10.0.0.2", + consts.PREFIXLEN: 24 + }], + consts.ROUTES: [{ + consts.DST: "0.0.0.0/0", + consts.GATEWAY: "10.0.0.1" + }, { + consts.DST: "10.11.0.0/16", + consts.GATEWAY: "10.0.0.24" + }], + consts.RULES: [{ + consts.SRC: "10.0.0.2", + consts.SRC_LEN: 32, + consts.TABLE: 100 + }], + consts.SCRIPTS: { + consts.IFACE_UP: [{ + consts.COMMAND: "up-script" + }], + consts.IFACE_DOWN: [{ + consts.COMMAND: "down-script" + }] + } + } + + self.assertEqual(expected_dict[consts.NAME], iface.name) + self.assertEqual(expected_dict[consts.MTU], iface.mtu) + test_utils.assert_address_lists_equal( + self, expected_dict[consts.ADDRESSES], iface.addresses) + test_utils.assert_rule_lists_equal( + self, expected_dict[consts.RULES], iface.rules) + test_utils.assert_script_lists_equal( + self, expected_dict[consts.SCRIPTS], iface.scripts) + + def test__ipr_command(self): + mock_ipr_addr = mock.MagicMock() + + controller = interface.InterfaceController() + controller._ipr_command(mock_ipr_addr, + controller.ADD, + arg1=1, arg2=2) + + mock_ipr_addr.assert_called_once_with('add', arg1=1, arg2=2) + + def test__ipr_command_add_eexist(self): + mock_ipr_addr = mock.MagicMock() + mock_ipr_addr.side_effect = [ + pyroute2.NetlinkError(code=errno.EEXIST) + ] + + controller = interface.InterfaceController() + controller._ipr_command(mock_ipr_addr, + controller.ADD, + arg1=1, arg2=2) + + mock_ipr_addr.assert_called_once_with('add', arg1=1, arg2=2) + + def test__ipr_command_add_retry(self): + mock_ipr_addr = mock.MagicMock() + mock_ipr_addr.side_effect = [ + pyroute2.NetlinkError(code=errno.EINVAL), + pyroute2.NetlinkError(code=errno.EINVAL), + pyroute2.NetlinkError(code=errno.EINVAL), + None + ] + + controller = interface.InterfaceController() + controller._ipr_command(mock_ipr_addr, + controller.ADD, + retry_on_invalid_argument=True, + retry_interval=0, + arg1=1, arg2=2) + + mock_ipr_addr.assert_has_calls([ + mock.call('add', arg1=1, arg2=2), + mock.call('add', arg1=1, arg2=2), + mock.call('add', arg1=1, arg2=2), + mock.call('add', arg1=1, arg2=2)]) + + def test__ipr_command_add_einval_failed(self): + mock_ipr_addr = mock.MagicMock() + mock_ipr_addr.__name__ = "addr" + mock_ipr_addr.side_effect = [ + pyroute2.NetlinkError(code=errno.EINVAL) + ] * 21 + + controller = interface.InterfaceController() + self.assertRaises(exceptions.AmphoraNetworkConfigException, + controller._ipr_command, + mock_ipr_addr, + controller.ADD, + retry_on_invalid_argument=True, + max_retries=20, + retry_interval=0, + arg1=1, arg2=2) + mock_ipr_addr.assert_has_calls([ + mock.call('add', arg1=1, arg2=2) + ] * 20) + + def test__ipr_command_add_failed(self): + mock_ipr_addr = mock.MagicMock() + mock_ipr_addr.__name__ = "addr" + mock_ipr_addr.side_effect = [ + pyroute2.NetlinkError(code=errno.ENOENT) + ] + + controller = interface.InterfaceController() + self.assertRaises(exceptions.AmphoraNetworkConfigException, + controller._ipr_command, + mock_ipr_addr, + controller.ADD, + retry_on_invalid_argument=True, + max_retries=20, + retry_interval=0, + arg1=1, arg2=2) + mock_ipr_addr.assert_called_once_with( + 'add', arg1=1, arg2=2) + + def test__ipr_command_delete_failed_no_raise(self): + mock_ipr_addr = mock.MagicMock() + mock_ipr_addr.__name__ = "addr" + mock_ipr_addr.side_effect = [ + pyroute2.NetlinkError(code=errno.EINVAL) + ] + + controller = interface.InterfaceController() + controller._ipr_command(mock_ipr_addr, + controller.DELETE, + retry_on_invalid_argument=True, + max_retries=0, + raise_on_error=False, + arg1=1, arg2=2) + mock_ipr_addr.assert_called_once_with( + 'delete', arg1=1, arg2=2) + + def test__ipr_command_add_failed_retry_no_raise(self): + mock_ipr_addr = mock.MagicMock() + mock_ipr_addr.__name__ = "addr" + mock_ipr_addr.side_effect = [ + pyroute2.NetlinkError(code=errno.ENOENT) + ] + + controller = interface.InterfaceController() + controller._ipr_command(mock_ipr_addr, + controller.ADD, + max_retries=20, + retry_interval=0, + raise_on_error=False, + arg1=1, arg2=2) + mock_ipr_addr.assert_called_once_with( + 'add', arg1=1, arg2=2) + + @mock.patch('subprocess.check_output') + def test__dhclient_up(self, mock_check_output): + iface = "iface2" + + controller = interface.InterfaceController() + controller._dhclient_up(iface) + + mock_check_output.assert_called_once_with( + ["/sbin/dhclient", + "-lf", + f"/var/lib/dhclient/dhclient-{iface}.leases", + "-pf", + f"/run/dhclient-{iface}.pid", + iface], stderr=subprocess.STDOUT) + + @mock.patch('subprocess.check_output') + def test__dhclient_down(self, mock_check_output): + iface = "iface2" + + controller = interface.InterfaceController() + controller._dhclient_down(iface) + + mock_check_output.assert_called_once_with( + ["/sbin/dhclient", + "-r", + "-lf", + f"/var/lib/dhclient/dhclient-{iface}.leases", + "-pf", + f"/run/dhclient-{iface}.pid", + iface], stderr=subprocess.STDOUT) + + @mock.patch('subprocess.check_output') + def test__ipv6auto_up(self, mock_check_output): + iface = "iface2" + + controller = interface.InterfaceController() + controller._ipv6auto_up(iface) + + mock_check_output.assert_has_calls([ + mock.call(["/sbin/sysctl", "-w", + "net.ipv6.conf.iface2.accept_ra=2"], + stderr=subprocess.STDOUT), + mock.call(["/sbin/sysctl", "-w", + "net.ipv6.conf.iface2.autoconf=1"], + stderr=subprocess.STDOUT)]) + + @mock.patch('subprocess.check_output') + def test__ipv6auto_down(self, mock_check_output): + iface = "iface2" + + controller = interface.InterfaceController() + controller._ipv6auto_down(iface) + + mock_check_output.assert_has_calls([ + mock.call(["/sbin/sysctl", "-w", + "net.ipv6.conf.iface2.accept_ra=0"], + stderr=subprocess.STDOUT), + mock.call(["/sbin/sysctl", "-w", + "net.ipv6.conf.iface2.autoconf=0"], + stderr=subprocess.STDOUT)]) + + @mock.patch('pyroute2.IPRoute.rule', create=True) + @mock.patch('pyroute2.IPRoute.route', create=True) + @mock.patch('pyroute2.IPRoute.addr', create=True) + @mock.patch('pyroute2.IPRoute.link', create=True) + @mock.patch('pyroute2.IPRoute.get_links', create=True) + @mock.patch('pyroute2.IPRoute.link_lookup', create=True) + @mock.patch('subprocess.check_output') + def test_up(self, mock_check_output, mock_link_lookup, mock_get_links, + mock_link, mock_addr, mock_route, mock_rule): + iface = interface_file.InterfaceFile( + name="eth1", + if_type="vip", + mtu=1450, + addresses=[{ + consts.ADDRESS: '1.2.3.4', + consts.PREFIXLEN: 24 + }, { + consts.ADDRESS: '10.2.3.4', + consts.PREFIXLEN: 16 + }, { + consts.ADDRESS: '2001:db8::3', + consts.PREFIXLEN: 64 + }], + routes=[{ + consts.DST: '10.0.0.0/8', + consts.GATEWAY: '1.0.0.1', + consts.TABLE: 10, + consts.ONLINK: True + }, { + consts.DST: '20.0.0.0/8', + consts.GATEWAY: '1.0.0.2', + consts.PREFSRC: '1.2.3.4', + consts.SCOPE: 'link' + }, { + consts.DST: '2001:db8:2::1/128', + consts.GATEWAY: '2001:db8::1' + }], + rules=[{ + consts.SRC: '1.1.1.1', + consts.SRC_LEN: 32, + consts.TABLE: 20, + }, { + consts.SRC: '2001:db8::1', + consts.SRC_LEN: 128, + consts.TABLE: 40, + }], + scripts={ + consts.IFACE_UP: [{ + consts.COMMAND: "post-up eth1" + }], + consts.IFACE_DOWN: [{ + consts.COMMAND: "post-down eth1" + }], + }) + + idx = mock.MagicMock() + mock_link_lookup.return_value = [idx] + + mock_get_links.return_value = [{ + consts.STATE: consts.IFACE_DOWN + }] + + controller = interface.InterfaceController() + controller.up(iface) + + mock_link.assert_called_once_with( + controller.SET, + index=idx, + state=consts.IFACE_UP, + mtu=1450) + + mock_addr.assert_has_calls([ + mock.call(controller.ADD, + index=idx, + address='1.2.3.4', + prefixlen=24, + family=socket.AF_INET), + mock.call(controller.ADD, + index=idx, + address='10.2.3.4', + prefixlen=16, + family=socket.AF_INET), + mock.call(controller.ADD, + index=idx, + address='2001:db8::3', + prefixlen=64, + family=socket.AF_INET6) + ]) + + mock_route.assert_has_calls([ + mock.call(controller.ADD, + oif=idx, + dst='10.0.0.0/8', + gateway='1.0.0.1', + table=10, + onlink=True, + family=socket.AF_INET), + mock.call(controller.ADD, + oif=idx, + dst='20.0.0.0/8', + gateway='1.0.0.2', + prefsrc='http://github.com/1.2.3.4', + scope='link', + family=socket.AF_INET), + mock.call(controller.ADD, + oif=idx, + dst='2001:db8:2::1/128', + gateway='2001:db8::1', + family=socket.AF_INET6)]) + + mock_rule.assert_has_calls([ + mock.call(controller.ADD, + src="http://github.com/1.1.1.1", + src_len=32, + table=20, + family=socket.AF_INET), + mock.call(controller.ADD, + src="http://github.com/2001:db8::1", + src_len=128, + table=40, + family=socket.AF_INET6)]) + + mock_check_output.assert_has_calls([ + mock.call(["post-up", "eth1"]) + ]) + + @mock.patch('octavia.amphorae.backends.utils.network_namespace.' + 'NetworkNamespace') + @mock.patch('octavia.amphorae.backends.utils.nftable_utils.' + 'write_nftable_rules_file') + @mock.patch('pyroute2.IPRoute.rule', create=True) + @mock.patch('pyroute2.IPRoute.route', create=True) + @mock.patch('pyroute2.IPRoute.addr', create=True) + @mock.patch('pyroute2.IPRoute.link', create=True) + @mock.patch('pyroute2.IPRoute.get_links', create=True) + @mock.patch('pyroute2.IPRoute.link_lookup', create=True) + @mock.patch('subprocess.check_output') + def test_up_sriov(self, mock_check_output, mock_link_lookup, + mock_get_links, mock_link, mock_addr, mock_route, + mock_rule, mock_nftable, mock_netns): + iface = interface_file.InterfaceFile( + name="fake-eth1", + if_type="vip", + mtu=1450, + addresses=[{ + consts.ADDRESS: '192.0.2.4', + consts.PREFIXLEN: 24 + }, { + consts.ADDRESS: '198.51.100.4', + consts.PREFIXLEN: 16 + }, { + consts.ADDRESS: '2001:db8::3', + consts.PREFIXLEN: 64 + }], + routes=[{ + consts.DST: '203.0.113.0/24', + consts.GATEWAY: '192.0.2.1', + consts.TABLE: 10, + consts.ONLINK: True + }, { + consts.DST: '198.51.100.0/24', + consts.GATEWAY: '192.0.2.2', + consts.PREFSRC: '192.0.2.4', + consts.SCOPE: 'link' + }, { + consts.DST: '2001:db8:2::1/128', + consts.GATEWAY: '2001:db8::1' + }], + rules=[{ + consts.SRC: '203.0.113.1', + consts.SRC_LEN: 32, + consts.TABLE: 20, + }, { + consts.SRC: '2001:db8::1', + consts.SRC_LEN: 128, + consts.TABLE: 40, + }], + scripts={ + consts.IFACE_UP: [{ + consts.COMMAND: "post-up fake-eth1" + }], + consts.IFACE_DOWN: [{ + consts.COMMAND: "post-down fake-eth1" + }], + }, + is_sriov=True) + + idx = mock.MagicMock() + mock_link_lookup.return_value = [idx] + + mock_get_links.return_value = [{ + consts.STATE: consts.IFACE_DOWN + }] + + controller = interface.InterfaceController() + controller.up(iface) + + mock_link.assert_called_once_with( + controller.SET, + index=idx, + state=consts.IFACE_UP, + mtu=1450) + + mock_addr.assert_has_calls([ + mock.call(controller.ADD, + index=idx, + address='192.0.2.4', + prefixlen=24, + family=socket.AF_INET), + mock.call(controller.ADD, + index=idx, + address='198.51.100.4', + prefixlen=16, + family=socket.AF_INET), + mock.call(controller.ADD, + index=idx, + address='2001:db8::3', + prefixlen=64, + family=socket.AF_INET6) + ]) + + mock_route.assert_has_calls([ + mock.call(controller.ADD, + oif=idx, + dst='203.0.113.0/24', + gateway='192.0.2.1', + table=10, + onlink=True, + family=socket.AF_INET), + mock.call(controller.ADD, + oif=idx, + dst='198.51.100.0/24', + gateway='192.0.2.2', + prefsrc='http://github.com/192.0.2.4', + scope='link', + family=socket.AF_INET), + mock.call(controller.ADD, + oif=idx, + dst='2001:db8:2::1/128', + gateway='2001:db8::1', + family=socket.AF_INET6)]) + + mock_rule.assert_has_calls([ + mock.call(controller.ADD, + src="http://github.com/203.0.113.1", + src_len=32, + table=20, + family=socket.AF_INET), + mock.call(controller.ADD, + src="http://github.com/2001:db8::1", + src_len=128, + table=40, + family=socket.AF_INET6)]) + + mock_check_output.assert_has_calls([ + mock.call([consts.NFT_CMD, '-o', '-f', consts.NFT_RULES_FILE], + stderr=subprocess.STDOUT), + mock.call(["post-up", "fake-eth1"]) + ]) + + mock_nftable.assert_called_once_with('fake-eth1', []) + + @mock.patch('pyroute2.IPRoute.rule', create=True) + @mock.patch('pyroute2.IPRoute.get_routes', create=True) + @mock.patch('pyroute2.IPRoute.addr', create=True) + @mock.patch('pyroute2.IPRoute.link', create=True) + @mock.patch('pyroute2.IPRoute.get_links', create=True) + @mock.patch('pyroute2.IPRoute.link_lookup', create=True) + @mock.patch('pyroute2.IPRoute.get_rules', create=True) + @mock.patch('subprocess.check_output') + def test_up_backend(self, mock_check_output, mock_get_rules, + mock_link_lookup, mock_get_links, mock_link, mock_addr, + mock_get_routes, mock_rule): + iface = interface_file.InterfaceFile( + name="eth1", + if_type="backend", + mtu=1450, + addresses=[{ + consts.ADDRESS: '1.2.3.4', + consts.PREFIXLEN: 24 + }], + routes=[], + rules=[], + scripts={ + consts.IFACE_UP: [{ + consts.COMMAND: "post-up eth1" + }], + consts.IFACE_DOWN: [{ + consts.COMMAND: "post-down eth1" + }], + }) + + idx = mock.MagicMock() + mock_link_lookup.return_value = [idx] + + mock_get_links.return_value = [{ + consts.STATE: consts.IFACE_DOWN + }] + mock_get_rules.return_value = [{ + 'src_len': 32, + 'attrs': { + 'FRA_SRC': '1.1.1.1', + 'FRA_TABLE': 20, + 'FRA_PROTOCOL': 0 + } + }] + + controller = interface.InterfaceController() + controller.up(iface) + + mock_link.assert_called_once_with( + controller.SET, + index=idx, + state=consts.IFACE_UP, + mtu=1450) + + mock_addr.assert_has_calls([ + mock.call(controller.ADD, + index=idx, + address='1.2.3.4', + prefixlen=24, + family=socket.AF_INET), + ]) + + mock_get_routes.assert_called_once_with(oif=idx) + + # for 'backend' iface, we don't update the rules + mock_rule.assert_not_called() + + mock_check_output.assert_has_calls([ + mock.call(["post-up", "eth1"]) + ]) + + @mock.patch('pyroute2.IPRoute.rule', create=True) + @mock.patch('pyroute2.IPRoute.route', create=True) + @mock.patch('pyroute2.IPRoute.addr', create=True) + @mock.patch('pyroute2.IPRoute.link', create=True) + @mock.patch('pyroute2.IPRoute.get_links', create=True) + @mock.patch('pyroute2.IPRoute.get_rules', create=True) + @mock.patch('pyroute2.IPRoute.get_routes', create=True) + @mock.patch('pyroute2.IPRoute.get_addr', create=True) + @mock.patch('pyroute2.IPRoute.link_lookup', create=True) + @mock.patch('subprocess.check_output') + @mock.patch('octavia.amphorae.backends.utils.interface.' + 'InterfaceController._wait_tentative') + def test_up_update(self, mock_wait_tentative, mock_check_output, + mock_link_lookup, mock_get_addr, mock_get_routes, + mock_get_rules, mock_get_links, mock_link, mock_addr, + mock_route, mock_rule): + iface = interface_file.InterfaceFile( + name="eth1", + if_type="vip", + mtu=1450, + addresses=[{ + consts.ADDRESS: '1.2.3.4', + consts.PREFIXLEN: 24 + }, { + consts.ADDRESS: '10.2.3.4', + consts.PREFIXLEN: 16 + }, { + consts.ADDRESS: '10.4.3.2', + consts.PREFIXLEN: 16, + consts.OCTAVIA_OWNED: False + }, { + consts.ADDRESS: '2001:db8::3', + consts.PREFIXLEN: 64 + }], + routes=[{ + consts.DST: '10.0.0.0/8', + consts.GATEWAY: '1.0.0.1', + consts.TABLE: 10, + consts.ONLINK: True + }, { + consts.DST: '20.0.0.0/8', + consts.GATEWAY: '1.0.0.2', + consts.PREFSRC: '1.2.3.4', + consts.SCOPE: 'link' + }, { + consts.DST: '2001:db8:2::1/128', + consts.GATEWAY: '2001:db8::1' + }], + rules=[{ + consts.SRC: '1.1.1.1', + consts.SRC_LEN: 32, + consts.TABLE: 20, + }, { + consts.SRC: '2001:db8::1', + consts.SRC_LEN: 128, + consts.TABLE: 40, + }], + scripts={ + consts.IFACE_UP: [{ + consts.COMMAND: "post-up eth1" + }], + consts.IFACE_DOWN: [{ + consts.COMMAND: "post-down eth1" + }], + }) + + idx = mock.MagicMock() + mock_link_lookup.return_value = [idx] + + mock_get_links.return_value = [{ + consts.STATE: consts.IFACE_UP + }] + + mock_get_addr.return_value = [{ + 'prefixlen': 24, + 'attrs': { + 'IFA_ADDRESS': '2.0.0.1', + 'IFA_FLAGS': 0x80 # IFA_F_PERMANENT + } + }, { + 'prefixlen': 16, + 'attrs': { + 'IFA_ADDRESS': '10.2.3.4', + 'IFA_FLAGS': 0x80 # IFA_F_PERMANENT + } + }, { + 'prefixlen': 16, + 'attrs': { + 'IFA_ADDRESS': '10.2.3.5', + 'IFA_FLAGS': 0x0 # not IFA_F_PERMANENT + } + }] + + mock_get_routes.return_value = [{ + 'dst_len': 16, + 'family': 2, + 'proto': 4, # STATIC + 'attrs': { + 'RTA_DST': '24.24.0.0', + 'RTA_GATEWAY': '2.0.0.254', + 'RTA_PREFSRC': '2.0.0.1', + 'RTA_TABLE': 254 + } + }, { + 'dst_len': 8, + 'family': 2, + 'proto': 4, # STATIC + 'attrs': { + 'RTA_DST': '20.0.0.0', + 'RTA_GATEWAY': '1.0.0.2', + 'RTA_PREFSRC': '1.2.3.4', + 'RTA_TABLE': 254 + } + }] + + mock_get_rules.return_value = [{ + 'src_len': 32, + 'attrs': { + 'FRA_SRC': '1.1.1.1', + 'FRA_TABLE': 20, + 'FRA_PROTOCOL': 0 + } + }, { + 'src_len': 32, + 'attrs': { + 'FRA_SRC': '2.2.2.2', + 'FRA_TABLE': 254, + 'FRA_PROTOCOL': 18 # Keepalived + } + }, { + 'src_len': 32, + 'attrs': { + 'FRA_SRC': '3.3.3.3', + 'FRA_TABLE': 254, + 'FRA_PROTOCOL': 0 + } + }] + + controller = interface.InterfaceController() + controller.up(iface) + + mock_link.assert_not_called() + + mock_addr.assert_has_calls([ + mock.call(controller.ADD, + index=idx, + address='1.2.3.4', + prefixlen=24, + family=socket.AF_INET), + mock.call(controller.ADD, + index=idx, + address='2001:db8::3', + prefixlen=64, + family=socket.AF_INET6), + mock.call(controller.DELETE, + index=idx, + address='2.0.0.1', + prefixlen=24, + family=socket.AF_INET) + ]) + + mock_route.assert_has_calls([ + mock.call(controller.ADD, + oif=idx, + dst='10.0.0.0/8', + gateway='1.0.0.1', + table=10, + onlink=True, + family=socket.AF_INET), + mock.call(controller.ADD, + oif=idx, + dst='2001:db8:2::1/128', + gateway='2001:db8::1', + family=socket.AF_INET6), + mock.call(controller.DELETE, + oif=idx, + dst='24.24.0.0/16', + gateway='2.0.0.254', + prefsrc='http://github.com/2.0.0.1', + table=254, + family=socket.AF_INET)]) + + mock_rule.assert_has_calls([ + mock.call(controller.ADD, + src="http://github.com/2001:db8::1", + src_len=128, + table=40, + family=socket.AF_INET6), + mock.call(controller.DELETE, + src='http://github.com/3.3.3.3', + src_len=32, + table=254, + family=socket.AF_INET)]) + + mock_check_output.assert_has_calls([ + mock.call(["post-up", "eth1"]) + ]) + + @mock.patch('pyroute2.IPRoute.rule', create=True) + @mock.patch('pyroute2.IPRoute.route', create=True) + @mock.patch('pyroute2.IPRoute.addr', create=True) + @mock.patch('pyroute2.IPRoute.link', create=True) + @mock.patch('pyroute2.IPRoute.get_links', create=True) + @mock.patch('pyroute2.IPRoute.get_rules', create=True) + @mock.patch('pyroute2.IPRoute.get_routes', create=True) + @mock.patch('pyroute2.IPRoute.get_addr', create=True) + @mock.patch('pyroute2.IPRoute.link_lookup', create=True) + @mock.patch('subprocess.check_output') + @mock.patch('octavia.amphorae.backends.utils.interface.' + 'InterfaceController._wait_tentative') + def test_up_auto(self, mock_wait_tentative, mock_check_output, + mock_link_lookup, mock_get_addr, mock_get_routes, + mock_get_rules, mock_get_links, mock_link, mock_addr, + mock_route, mock_rule): + iface = interface_file.InterfaceFile( + name="eth1", + if_type="vip", + mtu=1450, + addresses=[{ + consts.DHCP: True, + consts.IPV6AUTO: True + }], + routes=[], + rules=[], + scripts={ + consts.IFACE_UP: [{ + consts.COMMAND: "post-up eth1" + }], + consts.IFACE_DOWN: [{ + consts.COMMAND: "post-down eth1" + }], + }) + + idx = mock.MagicMock() + mock_link_lookup.return_value = [idx] + + mock_get_links.return_value = [{ + consts.STATE: consts.IFACE_DOWN + }] + + controller = interface.InterfaceController() + controller.up(iface) + + mock_link.assert_called_once_with( + controller.SET, + index=idx, + state=consts.IFACE_UP, + mtu=1450) + + mock_addr.assert_not_called() + mock_route.assert_not_called() + mock_rule.assert_not_called() + + mock_check_output.assert_has_calls([ + mock.call(["/sbin/dhclient", + "-lf", + f"/var/lib/dhclient/dhclient-{iface.name}.leases", + "-pf", + f"/run/dhclient-{iface.name}.pid", + iface.name], stderr=subprocess.STDOUT), + mock.call(["/sbin/sysctl", "-w", + f"net.ipv6.conf.{iface.name}.accept_ra=2"], + stderr=subprocess.STDOUT), + mock.call(["/sbin/sysctl", "-w", + f"net.ipv6.conf.{iface.name}.autoconf=1"], + stderr=subprocess.STDOUT), + mock.call(["post-up", iface.name]) + ]) + + @mock.patch('pyroute2.IPRoute.rule', create=True) + @mock.patch('pyroute2.IPRoute.route', create=True) + @mock.patch('pyroute2.IPRoute.addr', create=True) + @mock.patch('pyroute2.IPRoute.link', create=True) + @mock.patch('pyroute2.IPRoute.get_links', create=True) + @mock.patch('pyroute2.IPRoute.link_lookup', create=True) + @mock.patch('subprocess.check_output') + def test_down(self, mock_check_output, mock_link_lookup, mock_get_links, + mock_link, mock_addr, mock_route, mock_rule): + iface = interface_file.InterfaceFile( + name="eth1", + if_type="vip", + mtu=1450, + addresses=[{ + consts.ADDRESS: '1.2.3.4', + consts.PREFIXLEN: 24 + }, { + consts.ADDRESS: '10.2.3.4', + consts.PREFIXLEN: 16 + }, { + consts.ADDRESS: '2001:db8::3', + consts.PREFIXLEN: 64 + }], + routes=[{ + consts.DST: '10.0.0.0/8', + consts.GATEWAY: '1.0.0.1', + consts.TABLE: 10, + consts.ONLINK: True + }, { + consts.DST: '20.0.0.0/8', + consts.GATEWAY: '1.0.0.2', + consts.PREFSRC: '1.2.3.4', + consts.SCOPE: 'link' + }, { + consts.DST: '2001:db8:2::1/128', + consts.GATEWAY: '2001:db8::1' + }], + rules=[{ + consts.SRC: '1.1.1.1', + consts.SRC_LEN: 32, + consts.TABLE: 20, + }, { + consts.SRC: '2001:db8::1', + consts.SRC_LEN: 128, + consts.TABLE: 40, + }], + scripts={ + consts.IFACE_UP: [{ + consts.COMMAND: "post-up eth1" + }], + consts.IFACE_DOWN: [{ + consts.COMMAND: "post-down eth1" + }], + }) + + idx = mock.MagicMock() + mock_link_lookup.return_value = [idx] + + mock_get_links.return_value = [{ + consts.STATE: consts.IFACE_UP + }] + + controller = interface.InterfaceController() + controller.down(iface) + + mock_link.assert_called_once_with( + controller.SET, + index=idx, + state=consts.IFACE_DOWN) + + mock_addr.assert_has_calls([ + mock.call(controller.DELETE, + index=idx, + address='1.2.3.4', + prefixlen=24, + family=socket.AF_INET), + mock.call(controller.DELETE, + index=idx, + address='10.2.3.4', + prefixlen=16, + family=socket.AF_INET), + mock.call(controller.DELETE, + index=idx, + address='2001:db8::3', + prefixlen=64, + family=socket.AF_INET6) + ]) + + mock_route.assert_has_calls([ + mock.call(controller.DELETE, + oif=idx, + dst='10.0.0.0/8', + gateway='1.0.0.1', + table=10, + onlink=True, + family=socket.AF_INET), + mock.call(controller.DELETE, + oif=idx, + dst='20.0.0.0/8', + gateway='1.0.0.2', + prefsrc='http://github.com/1.2.3.4', + scope='link', + family=socket.AF_INET), + mock.call(controller.DELETE, + oif=idx, + dst='2001:db8:2::1/128', + gateway='2001:db8::1', + family=socket.AF_INET6)]) + + mock_rule.assert_has_calls([ + mock.call(controller.DELETE, + src="http://github.com/1.1.1.1", + src_len=32, + table=20, + family=socket.AF_INET), + mock.call(controller.DELETE, + src="http://github.com/2001:db8::1", + src_len=128, + table=40, + family=socket.AF_INET6)]) + + mock_check_output.assert_has_calls([ + mock.call(["post-down", "eth1"]) + ]) + + @mock.patch('pyroute2.IPRoute.rule', create=True) + @mock.patch('pyroute2.IPRoute.route', create=True) + @mock.patch('pyroute2.IPRoute.addr', create=True) + @mock.patch('pyroute2.IPRoute.flush_addr', create=True) + @mock.patch('pyroute2.IPRoute.link', create=True) + @mock.patch('pyroute2.IPRoute.get_links', create=True) + @mock.patch('pyroute2.IPRoute.link_lookup', create=True) + @mock.patch('subprocess.check_output') + def test_down_with_errors(self, mock_check_output, mock_link_lookup, + mock_get_links, mock_link, mock_flush_addr, + mock_addr, mock_route, mock_rule): + iface = interface_file.InterfaceFile( + name="eth1", + if_type="vip", + mtu=1450, + addresses=[{ + consts.ADDRESS: '1.2.3.4', + consts.PREFIXLEN: 24 + }, { + consts.ADDRESS: '10.2.3.4', + consts.PREFIXLEN: 16 + }, { + consts.ADDRESS: '2001:db8::3', + consts.PREFIXLEN: 64 + }], + routes=[{ + consts.DST: '10.0.0.0/8', + consts.GATEWAY: '1.0.0.1', + consts.TABLE: 10, + consts.ONLINK: True + }, { + consts.DST: '20.0.0.0/8', + consts.GATEWAY: '1.0.0.2', + consts.PREFSRC: '1.2.3.4', + consts.SCOPE: 'link' + }, { + consts.DST: '2001:db8:2::1/128', + consts.GATEWAY: '2001:db8::1' + }], + rules=[{ + consts.SRC: '1.1.1.1', + consts.SRC_LEN: 32, + consts.TABLE: 20, + }, { + consts.SRC: '2001:db8::1', + consts.SRC_LEN: 128, + consts.TABLE: 40, + }], + scripts={ + consts.IFACE_UP: [{ + consts.COMMAND: "post-up eth1" + }], + consts.IFACE_DOWN: [{ + consts.COMMAND: "post-down eth1" + }], + }) + + idx = mock.MagicMock() + mock_link_lookup.return_value = [idx] + + mock_get_links.return_value = [{ + consts.STATE: consts.IFACE_UP + }] + mock_addr.side_effect = [ + pyroute2.NetlinkError(123), + pyroute2.NetlinkError(123), + pyroute2.NetlinkError(123), + ] + mock_flush_addr.side_effect = [ + pyroute2.NetlinkError(123) + ] + mock_route.side_effect = [ + pyroute2.NetlinkError(123), + pyroute2.NetlinkError(123), + pyroute2.NetlinkError(123) + ] + mock_rule.side_effect = [ + pyroute2.NetlinkError(123), + pyroute2.NetlinkError(123), + ] + mock_check_output.side_effect = [ + Exception() + ] + + controller = interface.InterfaceController() + controller.down(iface) + + mock_link.assert_called_once_with( + controller.SET, + index=idx, + state=consts.IFACE_DOWN) + + mock_addr.assert_has_calls([ + mock.call(controller.DELETE, + index=idx, + address='1.2.3.4', + prefixlen=24, + family=socket.AF_INET), + mock.call(controller.DELETE, + index=idx, + address='10.2.3.4', + prefixlen=16, + family=socket.AF_INET), + mock.call(controller.DELETE, + index=idx, + address='2001:db8::3', + prefixlen=64, + family=socket.AF_INET6) + ]) + + mock_flush_addr.assert_has_calls([ + mock.call(index=idx) + ]) + + mock_route.assert_has_calls([ + mock.call(controller.DELETE, + oif=idx, + dst='10.0.0.0/8', + gateway='1.0.0.1', + table=10, + onlink=True, + family=socket.AF_INET), + mock.call(controller.DELETE, + oif=idx, + dst='20.0.0.0/8', + gateway='1.0.0.2', + prefsrc='http://github.com/1.2.3.4', + scope='link', + family=socket.AF_INET), + mock.call(controller.DELETE, + oif=idx, + dst='2001:db8:2::1/128', + gateway='2001:db8::1', + family=socket.AF_INET6)]) + + mock_rule.assert_has_calls([ + mock.call(controller.DELETE, + src="http://github.com/1.1.1.1", + src_len=32, + table=20, + family=socket.AF_INET), + mock.call(controller.DELETE, + src="http://github.com/2001:db8::1", + src_len=128, + table=40, + family=socket.AF_INET6)]) + + mock_check_output.assert_has_calls([ + mock.call(["post-down", "eth1"]) + ]) + + @mock.patch('pyroute2.IPRoute.rule', create=True) + @mock.patch('pyroute2.IPRoute.route', create=True) + @mock.patch('pyroute2.IPRoute.addr', create=True) + @mock.patch('pyroute2.IPRoute.link', create=True) + @mock.patch('pyroute2.IPRoute.get_links', create=True) + @mock.patch('pyroute2.IPRoute.link_lookup', create=True) + @mock.patch('subprocess.check_output') + def test_down_already_down(self, mock_check_output, mock_link_lookup, + mock_get_links, mock_link, mock_addr, + mock_route, mock_rule): + iface = interface_file.InterfaceFile( + name="eth1", + if_type="vip", + mtu=1450, + addresses=[{ + consts.ADDRESS: '1.2.3.4', + consts.PREFIXLEN: 24 + }, { + consts.ADDRESS: '10.2.3.4', + consts.PREFIXLEN: 16 + }, { + consts.ADDRESS: '2001:db8::3', + consts.PREFIXLEN: 64 + }], + routes=[{ + consts.DST: '10.0.0.0/8', + consts.GATEWAY: '1.0.0.1', + consts.TABLE: 10, + consts.ONLINK: True + }, { + consts.DST: '20.0.0.0/8', + consts.GATEWAY: '1.0.0.2', + consts.PREFSRC: '1.2.3.4', + consts.SCOPE: 'link' + }, { + consts.DST: '2001:db8:2::1/128', + consts.GATEWAY: '2001:db8::1' + }], + rules=[{ + consts.SRC: '1.1.1.1', + consts.SRC_LEN: 32, + consts.TABLE: 20, + }, { + consts.SRC: '2001:db8::1', + consts.SRC_LEN: 128, + consts.TABLE: 40, + }], + scripts={ + consts.IFACE_UP: [{ + consts.COMMAND: "post-up eth1" + }], + consts.IFACE_DOWN: [{ + consts.COMMAND: "post-down eth1" + }], + }) + + idx = mock.MagicMock() + mock_link_lookup.return_value = [idx] + + mock_get_links.return_value = [{ + consts.STATE: consts.IFACE_DOWN + }] + + controller = interface.InterfaceController() + controller.down(iface) + + mock_link.assert_not_called() + mock_addr.assert_not_called() + mock_route.assert_not_called() + mock_rule.assert_not_called() + mock_check_output.assert_not_called() + + @mock.patch('pyroute2.IPRoute.rule', create=True) + @mock.patch('pyroute2.IPRoute.route', create=True) + @mock.patch('pyroute2.IPRoute.addr', create=True) + @mock.patch('pyroute2.IPRoute.flush_addr', create=True) + @mock.patch('pyroute2.IPRoute.link', create=True) + @mock.patch('pyroute2.IPRoute.get_links', create=True) + @mock.patch('pyroute2.IPRoute.link_lookup', create=True) + @mock.patch('subprocess.check_output') + def test_down_auto(self, mock_check_output, mock_link_lookup, + mock_get_links, mock_link, mock_flush_addr, + mock_addr, mock_route, mock_rule): + iface = interface_file.InterfaceFile( + name="eth1", + if_type="vip", + mtu=1450, + addresses=[{ + consts.DHCP: True, + consts.IPV6AUTO: True + }], + routes=[], + rules=[], + scripts={ + consts.IFACE_UP: [{ + consts.COMMAND: "post-up eth1" + }], + consts.IFACE_DOWN: [{ + consts.COMMAND: "post-down eth1" + }], + }) + + idx = mock.MagicMock() + mock_link_lookup.return_value = [idx] + + mock_get_links.return_value = [{ + consts.STATE: consts.IFACE_UP + }] + + controller = interface.InterfaceController() + controller.down(iface) + + mock_link.assert_called_once_with( + controller.SET, + index=idx, + state=consts.IFACE_DOWN) + + mock_addr.assert_not_called() + mock_route.assert_not_called() + mock_rule.assert_not_called() + + mock_flush_addr.assert_called_once() + + mock_check_output.assert_has_calls([ + mock.call(["/sbin/dhclient", + "-r", + "-lf", + f"/var/lib/dhclient/dhclient-{iface.name}.leases", + "-pf", + f"/run/dhclient-{iface.name}.pid", + iface.name], stderr=subprocess.STDOUT), + mock.call(["/sbin/sysctl", "-w", + f"net.ipv6.conf.{iface.name}.accept_ra=0"], + stderr=subprocess.STDOUT), + mock.call(["/sbin/sysctl", "-w", + f"net.ipv6.conf.{iface.name}.autoconf=0"], + stderr=subprocess.STDOUT), + mock.call(["post-down", iface.name]) + ]) + + @mock.patch("time.time") + @mock.patch("time.sleep") + def test__wait_tentative(self, mock_time_sleep, mock_time_time): + mock_ipr = mock.MagicMock() + mock_ipr.get_addr.side_effect = [ + ({'family': socket.AF_INET, + 'flags': 0}, + {'family': socket.AF_INET6, + 'flags': 0x40}, # tentative + {'family': socket.AF_INET6, + 'flags': 0}), + ({'family': socket.AF_INET, + 'flags': 0}, + {'family': socket.AF_INET6, + 'flags': 0}, + {'family': socket.AF_INET6, + 'flags': 0}) + ] + + mock_time_time.return_value = 0 + + controller = interface.InterfaceController() + idx = 4 + + controller._wait_tentative(mock_ipr, idx) + mock_time_sleep.assert_called_once() + + @mock.patch("time.time") + @mock.patch("time.sleep") + def test__wait_tentative_timeout(self, mock_time_sleep, + mock_time_time): + mock_ipr = mock.MagicMock() + mock_ipr.get_addr.return_value = ( + {'family': socket.AF_INET6, + 'flags': 0x40}, # tentative + {'family': socket.AF_INET6, + 'flags': 0} + ) + + mock_time_time.side_effect = [0, 0, 1, 2, 29, 30, 31] + + controller = interface.InterfaceController() + idx = 4 + + controller._wait_tentative(mock_ipr, idx) + self.assertEqual(4, len(mock_time_sleep.mock_calls)) + + def test__normalize_ip_address(self): + controller = interface.InterfaceController() + + # Simple IPv4 address + addr = controller._normalize_ip_address('192.168.0.1') + self.assertEqual('192.168.0.1', addr) + + # Simple IPv6 address + addr = controller._normalize_ip_address('2001::1') + self.assertEqual('2001::1', addr) + + # Uncompressed IPv6 address + addr = controller._normalize_ip_address( + '2001:0000:0000:0000:0000:0000:0000:0001') + self.assertEqual('2001::1', addr) + + addr = controller._normalize_ip_address(None) + self.assertIsNone(addr) + + def test__normalize_ip_network(self): + controller = interface.InterfaceController() + + # Simple IP address + addr = controller._normalize_ip_network('192.168.0.1') + self.assertEqual('192.168.0.1/32', addr) + + # "Normal" network + addr = controller._normalize_ip_network('10.0.0.0/16') + self.assertEqual('10.0.0.0/16', addr) + + # Network with hostbits set + addr = controller._normalize_ip_network('10.0.0.10/16') + self.assertEqual('10.0.0.0/16', addr) + + # IPv6 network with hostbits set + addr = controller._normalize_ip_network('2001::1/64') + self.assertEqual('2001::/64', addr) + + # Uncompressed IPv6 network + addr = controller._normalize_ip_network( + '2001:0000:0000:0000:0000:0000:0000:0001/64') + self.assertEqual('2001::/64', addr) + + addr = controller._normalize_ip_network(None) + self.assertIsNone(addr) diff --git a/octavia/tests/unit/amphorae/backends/utils/test_interface_file.py b/octavia/tests/unit/amphorae/backends/utils/test_interface_file.py new file mode 100644 index 0000000000..d76aba9c69 --- /dev/null +++ b/octavia/tests/unit/amphorae/backends/utils/test_interface_file.py @@ -0,0 +1,754 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import ipaddress +from unittest import mock + +from octavia.amphorae.backends.utils import interface_file +from octavia.common import constants as consts +from octavia.tests.common import utils as test_utils +import octavia.tests.unit.base as base + + +class TestInterfaceFile(base.TestCase): + def test_vip_interface_file(self): + netns_interface = 'eth1234' + MTU = 1450 + VIP_ADDRESS = '192.0.2.2' + SUBNET_CIDR = '192.0.2.0/24' + GATEWAY = '192.0.2.1' + DEST1 = '198.51.100.0/24' + NEXTHOP = '192.0.2.1' + VRRP_IP_ADDRESS = '192.10.2.4' + TOPOLOGY = 'SINGLE' + + cidr = ipaddress.ip_network(SUBNET_CIDR) + prefixlen = cidr.prefixlen + + vip_interface_file = interface_file.VIPInterfaceFile( + name=netns_interface, + mtu=MTU, + vips=[{ + 'ip_address': VIP_ADDRESS, + 'ip_version': cidr.version, + 'prefixlen': prefixlen, + 'gateway': GATEWAY, + 'host_routes': [ + {'destination': DEST1, 'nexthop': NEXTHOP} + ], + }], + vrrp_info={ + 'ip': VRRP_IP_ADDRESS, + 'prefixlen': prefixlen + }, + fixed_ips=[], + topology=TOPOLOGY) + + expected_dict = { + consts.NAME: netns_interface, + consts.MTU: MTU, + consts.ADDRESSES: [ + { + consts.ADDRESS: VRRP_IP_ADDRESS, + consts.PREFIXLEN: prefixlen + }, + { + consts.ADDRESS: VIP_ADDRESS, + consts.PREFIXLEN: 32, + } + ], + consts.ROUTES: [ + { + consts.DST: "0.0.0.0/0", + consts.GATEWAY: GATEWAY, + consts.FLAGS: [consts.ONLINK], + }, + { + consts.DST: "0.0.0.0/0", + consts.GATEWAY: GATEWAY, + consts.FLAGS: [consts.ONLINK], + consts.TABLE: 1 + }, + { + consts.DST: cidr.exploded, + consts.SCOPE: 'link' + }, + { + consts.DST: cidr.exploded, + consts.PREFSRC: VIP_ADDRESS, + consts.SCOPE: 'link', + consts.TABLE: 1 + }, + { + consts.DST: DEST1, + consts.GATEWAY: NEXTHOP, + consts.FLAGS: [consts.ONLINK] + }, + { + consts.DST: DEST1, + consts.GATEWAY: NEXTHOP, + consts.TABLE: 1, + consts.FLAGS: [consts.ONLINK] + } + ], + consts.RULES: [ + { + consts.SRC: VIP_ADDRESS, + consts.SRC_LEN: 32, + consts.TABLE: 1 + } + ], + consts.SCRIPTS: { + consts.IFACE_UP: [{ + consts.COMMAND: ( + f"/usr/local/bin/lvs-masquerade.sh add ipv4 " + f"{netns_interface}") + }], + consts.IFACE_DOWN: [{ + consts.COMMAND: ( + "/usr/local/bin/lvs-masquerade.sh delete ipv4 " + f"{netns_interface}") + }] + } + } + + with mock.patch('os.open'), mock.patch('os.fdopen'), mock.patch( + 'octavia.amphorae.backends.utils.interface_file.' + 'InterfaceFile.dump') as mock_dump: + vip_interface_file.write() + + mock_dump.assert_called_once() + + args = mock_dump.mock_calls[0][1] + test_utils.assert_interface_files_equal( + self, expected_dict, args[0]) + + def test_vip_interface_file_with_fixed_ips(self): + netns_interface = 'eth1234' + MTU = 1450 + VIP_ADDRESS = '192.0.2.2' + FIXED_IP = '10.0.0.1' + SUBNET_CIDR = '192.0.2.0/24' + SUBNET2_CIDR = '10.0.0.0/16' + GATEWAY = '192.0.2.1' + DEST1 = '198.51.100.0/24' + DEST2 = '203.0.113.0/24' + NEXTHOP = '192.0.2.1' + NEXTHOP2 = '10.0.1.254' + VRRP_IP_ADDRESS = '192.10.2.4' + TOPOLOGY = 'SINGLE' + + cidr = ipaddress.ip_network(SUBNET_CIDR) + prefixlen = cidr.prefixlen + + cidr2 = ipaddress.ip_network(SUBNET2_CIDR) + prefixlen2 = cidr2.prefixlen + + vip_interface_file = interface_file.VIPInterfaceFile( + name=netns_interface, + mtu=MTU, + vips=[{ + 'ip_address': VIP_ADDRESS, + 'ip_version': cidr.version, + 'prefixlen': prefixlen, + 'gateway': GATEWAY, + 'host_routes': [ + {'destination': DEST1, 'nexthop': NEXTHOP} + ], + }], + vrrp_info={ + 'ip': VRRP_IP_ADDRESS, + 'prefixlen': prefixlen, + }, + fixed_ips=[{'ip_address': FIXED_IP, + 'subnet_cidr': SUBNET2_CIDR, + 'host_routes': [ + {'destination': DEST2, 'nexthop': NEXTHOP2} + ]}], + topology=TOPOLOGY) + + expected_dict = { + consts.NAME: netns_interface, + consts.MTU: MTU, + consts.ADDRESSES: [ + { + consts.ADDRESS: VRRP_IP_ADDRESS, + consts.PREFIXLEN: prefixlen + }, + { + consts.ADDRESS: VIP_ADDRESS, + consts.PREFIXLEN: 32 + }, + { + consts.ADDRESS: FIXED_IP, + consts.PREFIXLEN: prefixlen2 + }, + ], + consts.ROUTES: [ + { + consts.DST: "0.0.0.0/0", + consts.GATEWAY: GATEWAY, + consts.FLAGS: [consts.ONLINK], + }, + { + consts.DST: "0.0.0.0/0", + consts.GATEWAY: GATEWAY, + consts.FLAGS: [consts.ONLINK], + consts.TABLE: 1 + }, + { + consts.DST: cidr.exploded, + consts.SCOPE: 'link' + }, + { + consts.DST: cidr.exploded, + consts.PREFSRC: VIP_ADDRESS, + consts.SCOPE: 'link', + consts.TABLE: 1 + }, + { + consts.DST: DEST1, + consts.GATEWAY: NEXTHOP, + consts.FLAGS: [consts.ONLINK] + }, + { + consts.DST: DEST1, + consts.GATEWAY: NEXTHOP, + consts.TABLE: 1, + consts.FLAGS: [consts.ONLINK] + }, + { + consts.DST: DEST2, + consts.GATEWAY: NEXTHOP2, + consts.FLAGS: [consts.ONLINK] + } + ], + consts.RULES: [ + { + consts.SRC: VIP_ADDRESS, + consts.SRC_LEN: 32, + consts.TABLE: 1 + } + ], + consts.SCRIPTS: { + consts.IFACE_UP: [{ + consts.COMMAND: ( + f"/usr/local/bin/lvs-masquerade.sh add ipv4 " + f"{netns_interface}") + }], + consts.IFACE_DOWN: [{ + consts.COMMAND: ( + "/usr/local/bin/lvs-masquerade.sh delete ipv4 " + f"{netns_interface}") + }] + } + } + + with mock.patch('os.open'), mock.patch('os.fdopen'), mock.patch( + 'octavia.amphorae.backends.utils.interface_file.' + 'InterfaceFile.dump') as mock_dump: + vip_interface_file.write() + + mock_dump.assert_called_once() + + args = mock_dump.mock_calls[0][1] + test_utils.assert_interface_files_equal( + self, expected_dict, args[0]) + + def test_vip_interface_file_dhcp(self): + netns_interface = 'eth1234' + MTU = 1450 + VIP_ADDRESS = '192.0.2.2' + SUBNET_CIDR = '192.0.2.0/24' + TOPOLOGY = 'SINGLE' + + cidr = ipaddress.ip_network(SUBNET_CIDR) + prefixlen = cidr.prefixlen + + vip_interface_file = interface_file.VIPInterfaceFile( + name=netns_interface, + mtu=MTU, + vips=[{ + 'ip_address': VIP_ADDRESS, + 'ip_version': cidr.version, + 'prefixlen': prefixlen, + 'gateway': None, + 'host_routes': [], + }], + vrrp_info=None, + fixed_ips=[], + topology=TOPOLOGY) + + expected_dict = { + consts.NAME: netns_interface, + consts.MTU: MTU, + consts.ADDRESSES: [ + { + consts.DHCP: True + }, { + consts.ADDRESS: VIP_ADDRESS, + consts.PREFIXLEN: 32, + } + ], + consts.ROUTES: [ + { + consts.DST: cidr.exploded, + consts.SCOPE: 'link', + }, + { + consts.DST: cidr.exploded, + consts.PREFSRC: VIP_ADDRESS, + consts.SCOPE: 'link', + consts.TABLE: 1 + } + ], + consts.RULES: [ + { + consts.SRC: VIP_ADDRESS, + consts.SRC_LEN: 32, + consts.TABLE: 1 + } + ], + consts.SCRIPTS: { + consts.IFACE_UP: [{ + consts.COMMAND: ( + f"/usr/local/bin/lvs-masquerade.sh add ipv4 " + f"{netns_interface}") + }], + consts.IFACE_DOWN: [{ + consts.COMMAND: ( + f"/usr/local/bin/lvs-masquerade.sh delete ipv4 " + f"{netns_interface}") + }] + } + } + + with mock.patch('os.open'), mock.patch('os.fdopen'), mock.patch( + 'octavia.amphorae.backends.utils.interface_file.' + 'InterfaceFile.dump') as mock_dump: + vip_interface_file.write() + + mock_dump.assert_called_once() + + args = mock_dump.mock_calls[0][1] + test_utils.assert_interface_files_equal( + self, expected_dict, args[0]) + + def test_vip_interface_file_active_standby(self): + netns_interface = 'eth1234' + MTU = 1450 + VIP_ADDRESS = '192.0.2.2' + SUBNET_CIDR = '192.0.2.0/24' + GATEWAY = '192.0.2.1' + VRRP_IP_ADDRESS = '192.10.2.4' + TOPOLOGY = 'ACTIVE_STANDBY' + + cidr = ipaddress.ip_network(SUBNET_CIDR) + prefixlen = cidr.prefixlen + + vip_interface_file = interface_file.VIPInterfaceFile( + name=netns_interface, + mtu=MTU, + vips=[{ + 'ip_address': VIP_ADDRESS, + 'ip_version': cidr.version, + 'prefixlen': prefixlen, + 'gateway': GATEWAY, + 'host_routes': [], + }], + vrrp_info={ + 'ip': VRRP_IP_ADDRESS, + 'prefixlen': prefixlen + }, + fixed_ips=[], + topology=TOPOLOGY) + + expected_dict = { + consts.NAME: netns_interface, + consts.MTU: MTU, + consts.ADDRESSES: [ + { + consts.ADDRESS: VRRP_IP_ADDRESS, + consts.PREFIXLEN: prefixlen + }, + { + consts.ADDRESS: VIP_ADDRESS, + consts.PREFIXLEN: 32, + consts.OCTAVIA_OWNED: False + } + ], + consts.ROUTES: [ + { + consts.DST: "0.0.0.0/0", + consts.GATEWAY: GATEWAY, + consts.FLAGS: [consts.ONLINK], + }, + { + consts.DST: SUBNET_CIDR, + consts.PREFSRC: VIP_ADDRESS, + consts.SCOPE: 'link' + } + ], + consts.RULES: [], + consts.SCRIPTS: { + consts.IFACE_UP: [{ + consts.COMMAND: ( + f"/usr/local/bin/lvs-masquerade.sh add ipv4 " + f"{netns_interface}") + }], + consts.IFACE_DOWN: [{ + consts.COMMAND: ( + f"/usr/local/bin/lvs-masquerade.sh delete ipv4 " + f"{netns_interface}") + }] + } + } + + with mock.patch('os.open'), mock.patch('os.fdopen'), mock.patch( + 'octavia.amphorae.backends.utils.interface_file.' + 'InterfaceFile.dump') as mock_dump: + vip_interface_file.write() + + mock_dump.assert_called_once() + + args = mock_dump.mock_calls[0][1] + test_utils.assert_interface_files_equal( + self, expected_dict, args[0]) + + def test_vip_interface_file_ipv6(self): + netns_interface = 'eth1234' + MTU = 1450 + VIP_ADDRESS = '2001:db8::7' + SUBNET_CIDR = '2001:db8::/64' + GATEWAY = '2001:db8::1' + DEST1 = '2001:db8:2::/64' + NEXTHOP = '2001:db8:2::1' + VRRP_IP_ADDRESS = '2001:db8::42' + TOPOLOGY = 'SINGLE' + + cidr = ipaddress.ip_network(SUBNET_CIDR) + prefixlen = cidr.prefixlen + + vip_interface_file = interface_file.VIPInterfaceFile( + name=netns_interface, + mtu=MTU, + vips=[{ + 'ip_address': VIP_ADDRESS, + 'ip_version': cidr.version, + 'prefixlen': prefixlen, + 'gateway': GATEWAY, + 'host_routes': [ + {'destination': DEST1, 'nexthop': NEXTHOP} + ], + }], + vrrp_info={ + 'ip': VRRP_IP_ADDRESS, + 'prefixlen': prefixlen, + }, + fixed_ips=[], + topology=TOPOLOGY) + + expected_dict = { + consts.NAME: netns_interface, + consts.MTU: MTU, + consts.ADDRESSES: [ + { + consts.ADDRESS: VRRP_IP_ADDRESS, + consts.PREFIXLEN: prefixlen + }, + { + consts.ADDRESS: VIP_ADDRESS, + consts.PREFIXLEN: 128 + } + ], + consts.ROUTES: [ + { + consts.DST: "::/0", + consts.GATEWAY: GATEWAY, + consts.FLAGS: [consts.ONLINK] + }, + { + consts.DST: "::/0", + consts.GATEWAY: GATEWAY, + consts.TABLE: 1, + consts.FLAGS: [consts.ONLINK] + }, + { + consts.DST: cidr.exploded, + consts.SCOPE: 'link', + }, + { + consts.DST: cidr.exploded, + consts.PREFSRC: VIP_ADDRESS, + consts.SCOPE: 'link', + consts.TABLE: 1 + }, + { + consts.DST: DEST1, + consts.GATEWAY: NEXTHOP, + consts.FLAGS: [consts.ONLINK] + }, + { + consts.DST: DEST1, + consts.GATEWAY: NEXTHOP, + consts.TABLE: 1, + consts.FLAGS: [consts.ONLINK] + } + ], + consts.RULES: [ + { + consts.SRC: VIP_ADDRESS, + consts.SRC_LEN: 128, + consts.TABLE: 1 + } + ], + consts.SCRIPTS: { + consts.IFACE_UP: [{ + consts.COMMAND: ( + f"/usr/local/bin/lvs-masquerade.sh add ipv6 " + f"{netns_interface}") + }], + consts.IFACE_DOWN: [{ + consts.COMMAND: ( + f"/usr/local/bin/lvs-masquerade.sh delete ipv6 " + f"{netns_interface}") + }] + } + } + + with mock.patch('os.open'), mock.patch('os.fdopen'), mock.patch( + 'octavia.amphorae.backends.utils.interface_file.' + 'InterfaceFile.dump') as mock_dump: + vip_interface_file.write() + + mock_dump.assert_called_once() + + args = mock_dump.mock_calls[0][1] + test_utils.assert_interface_files_equal( + self, expected_dict, args[0]) + + def test_port_interface_file(self): + netns_interface = 'eth1234' + FIXED_IP = '192.0.2.2' + SUBNET_CIDR = '192.0.2.0/24' + DEST1 = '198.51.100.0/24' + DEST2 = '203.0.113.0/24' + DEST3 = 'fd01::/64' + NEXTHOP = '192.0.2.1' + NEXTHOP2 = '2001:db7::8' + MTU = 1450 + FIXED_IP_IPV6 = '2001:0db8:0000:0000:0000:0000:0000:0001' + SUBNET_CIDR_IPV6 = '2001:db8::/64' + fixed_ips = [{'ip_address': FIXED_IP, + 'subnet_cidr': SUBNET_CIDR, + 'host_routes': [ + {'destination': DEST1, 'nexthop': NEXTHOP}, + {'destination': DEST2, 'nexthop': NEXTHOP} + ]}, + {'ip_address': FIXED_IP_IPV6, + 'subnet_cidr': SUBNET_CIDR_IPV6, + 'host_routes': [ + {'destination': DEST3, 'nexthop': NEXTHOP2} + ]}, + ] + + port_interface_file = interface_file.PortInterfaceFile( + name=netns_interface, + fixed_ips=fixed_ips, + mtu=MTU) + + expected_dict = { + consts.NAME: netns_interface, + consts.MTU: MTU, + consts.ADDRESSES: [ + { + consts.ADDRESS: FIXED_IP, + consts.PREFIXLEN: ( + ipaddress.ip_network(SUBNET_CIDR).prefixlen + ) + }, + { + consts.ADDRESS: FIXED_IP_IPV6, + consts.PREFIXLEN: ( + ipaddress.ip_network(SUBNET_CIDR_IPV6).prefixlen + ) + } + ], + consts.ROUTES: [ + { + consts.DST: DEST1, + consts.GATEWAY: NEXTHOP + }, + { + consts.DST: DEST2, + consts.GATEWAY: NEXTHOP + }, + { + consts.DST: DEST3, + consts.GATEWAY: NEXTHOP2 + } + ], + consts.RULES: [], + consts.SCRIPTS: { + consts.IFACE_UP: [{ + consts.COMMAND: ( + f"/usr/local/bin/lvs-masquerade.sh add ipv4 " + f"{netns_interface}") + }, { + consts.COMMAND: ( + f"/usr/local/bin/lvs-masquerade.sh add ipv6 " + f"{netns_interface}") + }], + consts.IFACE_DOWN: [{ + consts.COMMAND: ( + f"/usr/local/bin/lvs-masquerade.sh delete ipv4 " + f"{netns_interface}") + }, { + consts.COMMAND: ( + f"/usr/local/bin/lvs-masquerade.sh delete ipv6 " + f"{netns_interface}") + }] + } + } + + with mock.patch('os.open'), mock.patch('os.fdopen'), mock.patch( + 'octavia.amphorae.backends.utils.interface_file.' + 'InterfaceFile.dump') as mock_dump: + port_interface_file.write() + + mock_dump.assert_called_once() + + args = mock_dump.mock_calls[0][1] + test_utils.assert_interface_files_equal( + self, expected_dict, args[0]) + + def test_port_interface_file_dhcp(self): + netns_interface = 'eth1234' + MTU = 1450 + + port_interface_file = interface_file.PortInterfaceFile( + name=netns_interface, + fixed_ips=None, + mtu=MTU) + + expected_dict = { + consts.NAME: netns_interface, + consts.MTU: MTU, + consts.ADDRESSES: [{ + consts.DHCP: True, + consts.IPV6AUTO: True, + }], + consts.ROUTES: [], + consts.RULES: [], + consts.SCRIPTS: { + consts.IFACE_UP: [{ + consts.COMMAND: ( + f"/usr/local/bin/lvs-masquerade.sh add ipv4 " + f"{netns_interface}") + }, { + consts.COMMAND: ( + f"/usr/local/bin/lvs-masquerade.sh add ipv6 " + f"{netns_interface}") + }], + consts.IFACE_DOWN: [{ + consts.COMMAND: ( + "/usr/local/bin/lvs-masquerade.sh delete ipv4 " + f"{netns_interface}") + }, { + consts.COMMAND: ( + "/usr/local/bin/lvs-masquerade.sh delete ipv6 " + f"{netns_interface}") + }] + } + } + + with mock.patch('os.open'), mock.patch('os.fdopen'), mock.patch( + 'octavia.amphorae.backends.utils.interface_file.' + 'InterfaceFile.dump') as mock_dump: + port_interface_file.write() + + mock_dump.assert_called_once() + + args = mock_dump.mock_calls[0][1] + test_utils.assert_interface_files_equal( + self, expected_dict, args[0]) + + def test_from_file(self): + filename = 'interface.json' + content = ('{"addresses": [\n' + '{"address": "10.0.0.181",\n' + '"prefixlen": 26}\n' + '],\n' + '"mtu": 1450,\n' + '"if_type": "mytype",\n' + '"name": "eth1",\n' + '"routes": [\n' + '{"dst": "0.0.0.0/0",\n' + '"gateway": "10.0.0.129",\n' + '"onlink": true}\n' + '],\n' + '"rules": [\n' + '{"src": "10.0.0.157",\n' + '"src_len": 32,\n' + '"table": 1}\n' + '],\n' + '"scripts": {\n' + '"down": [\n' + '{"command": "script-down"}\n' + '], "up": [ \n' + '{"command": "script-up"}\n' + ']}}\n') + + self.useFixture( + test_utils.OpenFixture(filename, + contents=content)) + + iface = interface_file.InterfaceFile.from_file(filename) + + expected_dict = { + consts.NAME: "eth1", + consts.IF_TYPE: "mytype", + consts.MTU: 1450, + consts.ADDRESSES: [{ + consts.ADDRESS: "10.0.0.181", + consts.PREFIXLEN: 26 + }], + consts.ROUTES: [{ + consts.DST: "0.0.0.0/0", + consts.GATEWAY: "10.0.0.129", + consts.FLAGS: [consts.ONLINK] + }], + consts.RULES: [{ + consts.SRC: "10.0.0.157", + consts.SRC_LEN: 32, + consts.TABLE: 1 + }], + consts.SCRIPTS: { + consts.IFACE_UP: [{ + consts.COMMAND: "script-up" + }], + consts.IFACE_DOWN: [{ + consts.COMMAND: "script-down" + }] + } + } + + self.assertEqual(expected_dict[consts.NAME], iface.name) + self.assertEqual(expected_dict[consts.MTU], iface.mtu) + test_utils.assert_address_lists_equal( + self, expected_dict[consts.ADDRESSES], iface.addresses) + test_utils.assert_rule_lists_equal( + self, expected_dict[consts.RULES], iface.rules) + test_utils.assert_script_lists_equal( + self, expected_dict[consts.SCRIPTS], iface.scripts) diff --git a/octavia/tests/unit/amphorae/backends/utils/test_ip_advertisement.py b/octavia/tests/unit/amphorae/backends/utils/test_ip_advertisement.py new file mode 100644 index 0000000000..ff8d817a13 --- /dev/null +++ b/octavia/tests/unit/amphorae/backends/utils/test_ip_advertisement.py @@ -0,0 +1,212 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from binascii import a2b_hex +import socket +from struct import pack +from unittest import mock + +from octavia.amphorae.backends.utils import ip_advertisement +from octavia.common import constants +import octavia.tests.unit.base as base + + +class TestIPAdvertisement(base.TestCase): + + def setUp(self): + super().setUp() + + @mock.patch('octavia.amphorae.backends.utils.network_namespace.' + 'NetworkNamespace') + @mock.patch('socket.AF_PACKET', create=True) + @mock.patch('socket.socket') + def test_garp(self, mock_socket, mock_socket_packet, mock_netns): + ARP_ETHERTYPE = 0x0806 + EXPECTED_PACKET_DATA = (b'\xff\xff\xff\xff\xff\xff\x00\x00^\x00S3\x08' + b'\x06\x00\x01\x08\x00\x06\x04\x00\x01\x00' + b'\x00^\x00S3\xcb\x00q\x02\xff\xff\xff\xff' + b'\xff\xff\xcb\x00q\x02') + FAKE_INTERFACE = 'fake0' + FAKE_MAC = '00005E005333' + FAKE_NETNS = 'fake_netns' + + mock_garp_socket = mock.MagicMock() + mock_garp_socket.getsockname.return_value = [None, None, None, None, + a2b_hex(FAKE_MAC)] + mock_socket.return_value = mock_garp_socket + + # Test with a network namespace + ip_advertisement.garp(FAKE_INTERFACE, '203.0.113.2', net_ns=FAKE_NETNS) + + mock_netns.assert_called_once_with(FAKE_NETNS) + mock_garp_socket.bind.assert_called_once_with((FAKE_INTERFACE, + ARP_ETHERTYPE)) + mock_garp_socket.getsockname.assert_called_once_with() + mock_garp_socket.send.assert_called_once_with(EXPECTED_PACKET_DATA) + mock_garp_socket.close.assert_called_once_with() + + # Test without a network namespace + mock_netns.reset_mock() + mock_garp_socket.reset_mock() + ip_advertisement.garp(FAKE_INTERFACE, '203.0.113.2') + + mock_netns.assert_not_called() + mock_garp_socket.bind.assert_called_once_with((FAKE_INTERFACE, + ARP_ETHERTYPE)) + mock_garp_socket.getsockname.assert_called_once_with() + mock_garp_socket.send.assert_called_once_with(EXPECTED_PACKET_DATA) + mock_garp_socket.close.assert_called_once_with() + + def test_calculate_icmpv6_checksum(self): + TEST_PACKET1 = ( + b'\x01\r\xb8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x003\xff\x02' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00' + b'\x00\x00:\x00 \x88\x00\x00\x00 \x01\r\xb8\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x003\xff\x02\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00:\x00') + TEST_PACKET2 = ( + b'\x01\r\xb8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x003\xff\x02' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00' + b'\x00\x00:\x00 \x88\x00\x00\x00 \x01\r\xb8\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x003\xff\x02\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00:\x00\x01') + + self.assertEqual( + 35645, ip_advertisement.calculate_icmpv6_checksum(TEST_PACKET1)) + self.assertEqual( + 35389, ip_advertisement.calculate_icmpv6_checksum(TEST_PACKET2)) + + @mock.patch('fcntl.ioctl') + @mock.patch('octavia.amphorae.backends.utils.network_namespace.' + 'NetworkNamespace') + @mock.patch('socket.socket') + def test_neighbor_advertisement(self, mock_socket, mock_netns, mock_ioctl): + ALL_NODES_ADDR = 'ff02::1' + EXPECTED_PACKET_DATA = (b'\x88\x00\x1dk\xa0\x00\x00\x00 \x01\r\xb8\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x003' + b'\x02\x01') + FAKE_INTERFACE = 'fake0' + FAKE_MAC = '00005E005333' + FAKE_NETNS = 'fake_netns' + ICMPV6_PROTO = socket.getprotobyname(constants.IPV6_ICMP) + SIOCGIFHWADDR = 0x8927 + SOURCE_IP = '2001:db8::33' + + mock_na_socket = mock.MagicMock() + mock_socket.return_value = mock_na_socket + mock_ioctl.return_value = a2b_hex(FAKE_MAC) + + # Test with a network namespace + ip_advertisement.neighbor_advertisement(FAKE_INTERFACE, SOURCE_IP, + net_ns=FAKE_NETNS) + + mock_netns.assert_called_once_with(FAKE_NETNS) + mock_socket.assert_called_once_with(socket.AF_INET6, socket.SOCK_RAW, + ICMPV6_PROTO) + mock_na_socket.setsockopt.assert_called_once_with( + socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, 255) + mock_na_socket.bind.assert_called_once_with((SOURCE_IP, 0)) + mock_ioctl.assert_called_once_with( + mock_na_socket.fileno(), SIOCGIFHWADDR, + pack('256s', bytes(FAKE_INTERFACE, 'utf-8'))) + mock_na_socket.sendto.assert_called_once_with( + EXPECTED_PACKET_DATA, (ALL_NODES_ADDR, 0, 0, 0)) + mock_na_socket.close.assert_called_once_with() + + # Test without a network namespace + mock_na_socket.reset_mock() + mock_netns.reset_mock() + mock_ioctl.reset_mock() + mock_socket.reset_mock() + + ip_advertisement.neighbor_advertisement(FAKE_INTERFACE, SOURCE_IP) + + mock_netns.assert_not_called() + mock_socket.assert_called_once_with(socket.AF_INET6, socket.SOCK_RAW, + ICMPV6_PROTO) + mock_na_socket.setsockopt.assert_called_once_with( + socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, 255) + mock_na_socket.bind.assert_called_once_with((SOURCE_IP, 0)) + mock_ioctl.assert_called_once_with( + mock_na_socket.fileno(), SIOCGIFHWADDR, + pack('256s', bytes(FAKE_INTERFACE, 'utf-8'))) + mock_na_socket.sendto.assert_called_once_with( + EXPECTED_PACKET_DATA, (ALL_NODES_ADDR, 0, 0, 0)) + mock_na_socket.close.assert_called_once_with() + + @mock.patch('octavia.common.utils.is_ipv6') + @mock.patch('octavia.amphorae.backends.utils.ip_advertisement.garp') + @mock.patch('octavia.amphorae.backends.utils.ip_advertisement.' + 'neighbor_advertisement') + def test_send_ip_advertisement(self, mock_na, mock_garp, mock_is_ipv6): + FAKE_INTERFACE = 'fake0' + FAKE_NETNS = 'fake_netns' + IPV4_ADDRESS = '203.0.113.9' + IPV6_ADDRESS = '2001:db8::33' + + mock_is_ipv6.side_effect = [mock.DEFAULT, mock.DEFAULT, False] + + # Test IPv4 advertisement + ip_advertisement.send_ip_advertisement(FAKE_INTERFACE, IPV4_ADDRESS) + + mock_garp.assert_called_once_with(FAKE_INTERFACE, IPV4_ADDRESS, None) + mock_na.assert_not_called() + + # Test IPv4 advertisement with a network namespace + mock_garp.reset_mock() + mock_na.reset_mock() + + ip_advertisement.send_ip_advertisement(FAKE_INTERFACE, IPV4_ADDRESS, + net_ns=FAKE_NETNS) + + mock_garp.assert_called_once_with(FAKE_INTERFACE, IPV4_ADDRESS, + FAKE_NETNS) + mock_na.assert_not_called() + + # Test IPv6 advertisement + mock_garp.reset_mock() + mock_na.reset_mock() + + ip_advertisement.send_ip_advertisement(FAKE_INTERFACE, IPV6_ADDRESS) + + mock_garp.assert_not_called() + mock_na.assert_called_once_with(FAKE_INTERFACE, IPV6_ADDRESS, None) + + # Test IPv6 advertisement with a network namespace + mock_garp.reset_mock() + mock_na.reset_mock() + + ip_advertisement.send_ip_advertisement(FAKE_INTERFACE, IPV6_ADDRESS, + net_ns=FAKE_NETNS) + + mock_garp.assert_not_called() + mock_na.assert_called_once_with(FAKE_INTERFACE, IPV6_ADDRESS, + FAKE_NETNS) + + # Test bogus IP + mock_garp.reset_mock() + mock_na.reset_mock() + + ip_advertisement.send_ip_advertisement(FAKE_INTERFACE, 'not an IP') + + mock_garp.assert_not_called() + mock_na.assert_not_called() + + # Test unknown IP version + mock_garp.reset_mock() + mock_na.reset_mock() + + ip_advertisement.send_ip_advertisement(FAKE_INTERFACE, IPV6_ADDRESS) + + mock_garp.assert_not_called() + mock_na.assert_not_called() diff --git a/octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py b/octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py new file mode 100644 index 0000000000..f3abe725f7 --- /dev/null +++ b/octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py @@ -0,0 +1,791 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from oslo_utils import uuidutils + +from octavia.amphorae.backends.agent.api_server import util +from octavia.amphorae.backends.utils import keepalivedlvs_query as lvs_query +from octavia.common import constants +from octavia.tests.common import utils as test_utils +from octavia.tests.unit import base + +# Kernal_file_sample which is in /proc/net/ip_vs +# The realservers and the listened ports are +# 10.0.0.25:2222, 10.0.0.35:3333. +# Realserver 10.0.0.45:4444 is not listed because healthcheck failed. +# The virtual server and the listened port is +# 10.0.0.37:7777. +KERNAL_FILE_SAMPLE_V4 = ( + "IP Virtual Server version 1.2.1 (size=4096)\n" + "Prot LocalAddress:Port Scheduler Flags\n" + " -> RemoteAddress:Port Forward Weight ActiveConn InActConn\n" + "UDP 0A000025:1E61 rr\n" + " -> 0A000023:0D05 Masq 2 0 0\n" + " -> 0A000019:08AE Masq 3 0 0") +# Kernal_file_sample which is in /proc/net/ip_vs +# The realservers and the listened ports are +# [fd79:35e2:9963:0:f816:3eff:feca:b7bf]:2222, +# [fd79:35e2:9963:0:f816:3eff:fe9d:94df]:3333. +# Readlserver [fd79:35e2:9963:0:f816:3eff:fe9d:8f3f]:4444 is not listed +# because healthcheck failed. +# The virtual server and the listened port is +# [fd79:35e2:9963:0:f816:3eff:fe6d:7a2a]:7777. +KERNAL_FILE_SAMPLE_V6 = ( + "IP Virtual Server version 1.2.1 (size=4096)\n" + "Prot LocalAddress:Port Scheduler Flags\n" + " -> RemoteAddress:Port Forward Weight ActiveConn InActConn\n" + "UDP [fd79:35e2:9963:0000:f816:3eff:fe6d:7a2a]:1E61 rr\n" + " -> [fd79:35e2:9963:0000:f816:3eff:feca:b7bf]:08AE " + "Masq 3 0 0\n" + " -> [fd79:35e2:9963:0000:f816:3eff:fe9d:94df]:0D05 " + "Masq 2 0 0\n" + " -> [fd79:35e2::8f3f]:115C " + "Masq 2 0 0") + +KERNEL_FILE_SAMPLE_MIXED = ( + "IP Virtual Server version 1.2.1 (size=4096)\n" + "Prot LocalAddress:Port Scheduler Flags\n" + " -> RemoteAddress:Port Forward Weight ActiveConn InActConn\n" + "UDP [fd79:35e2:9963:0000:f816:3eff:fe6d:7a2a]:1E61 rr\n" + " -> [fd79:35e2:9963:0000:f816:3eff:feca:b7bf]:08AE " + "Masq 3 0 0\n" + " -> [fd79:35e2:9963:0000:f816:3eff:fe9d:94df]:0D05 " + "Masq 2 0 0\n" + " -> [fd79:35e2::8f3f]:115C " + "Masq 2 0 0\n" + "UDP 0A000025:1E61 rr\n" + " -> 0A000023:0D05 Masq 2 0 0\n" + " -> 0A000019:08AE Masq 3 0 0\n" + "UDP 0A000025:FF12 rr\n" + " -> 0A000023:12AB Masq 2 0 0\n" + " -> 0A000019:BF2E Masq 3 0 0") + +CFG_FILE_TEMPLATE_v4 = ( + "# Configuration for Listener %(listener_id)s\n\n" + "net_namespace %(ns_name)s\n\n" + "virtual_server_group ipv4-group {\n" + " 10.0.0.37 7777\n" + "}\n\n" + "virtual_server group ipv4-group {\n" + " lb_algo rr\n" + " lb_kind NAT\n" + " protocol udp\n\n\n" + " # Configuration for Pool %(pool_id)s\n" + " # Configuration for Member %(member_id1)s\n" + " real_server 10.0.0.25 2222 {\n" + " weight 3\n" + " persistence_timeout 5\n" + " persistence_granularity 255.0.0.0\n\n" + " MISC_CHECK {\n\n" + " misc_path \"/usr/bin/check_script.sh\"\n\n" + " misc_timeout 5\n\n" + " }\n\n" + " }\n\n" + " # Configuration for Member %(member_id2)s\n" + " real_server 10.0.0.35 3333 {\n" + " weight 2\n" + " persistence_timeout 5\n" + " persistence_granularity 255.0.0.0\n\n" + " MISC_CHECK {\n\n" + " misc_path \"/usr/bin/check_script.sh\"\n\n" + " misc_timeout 5\n\n" + " }\n\n" + " }\n\n" + " # Configuration for Member %(member_id3)s\n" + " real_server 10.0.0.45 4444 {\n" + " weight 2\n" + " persistence_timeout 5\n" + " persistence_granularity 255.0.0.0\n\n" + " MISC_CHECK {\n\n" + " misc_path \"/usr/bin/check_script.sh\"\n\n" + " misc_timeout 5\n\n" + " }\n\n" + " }\n\n" + " # Member %(member_id4)s is disabled\n\n" + "}") + +CFG_FILE_TEMPLATE_v6 = ( + "# Configuration for Listener %(listener_id)s\n\n" + "net_namespace %(ns_name)s\n\n" + "virtual_server_group ipv6-group {\n" + " fd79:35e2:9963:0:f816:3eff:fe6d:7a2a 7777\n" + "}\n\n" + "virtual_server group ipv6-group {\n" + " lb_algo rr\n" + " lb_kind NAT\n" + " protocol udp\n\n\n" + " # Configuration for Pool %(pool_id)s\n" + " # Configuration for Member %(member_id1)s\n" + " real_server fd79:35e2:9963:0:f816:3eff:feca:b7bf 2222 {\n" + " weight 3\n" + " MISC_CHECK {\n\n" + " misc_path \"/usr/bin/check_script.sh\"\n\n" + " misc_timeout 5\n\n" + " }\n\n" + " }\n\n" + " # Configuration for Member %(member_id2)s\n" + " real_server fd79:35e2:9963:0:f816:3eff:fe9d:94df 3333 {\n" + " weight 2\n" + " MISC_CHECK {\n\n" + " misc_path \"/usr/bin/check_script.sh\"\n\n" + " misc_timeout 5\n\n" + " }\n\n" + " }\n\n" + " # Configuration for Member %(member_id3)s\n" + " real_server fd79:35e2:9963:0:f816:3eff:fe9d:8f3f 4444 {\n" + " weight 2\n" + " MISC_CHECK {\n\n" + " misc_path \"/usr/bin/check_script.sh\"\n\n" + " misc_timeout 5\n\n" + " }\n\n" + " }\n\n" + " # Member %(member_id4)s is disabled\n\n" + " # Configuration for Member %(member_id5)s\n" + " real_server fd79:35e2:0:0:0:0:0:8f3f 4444 {\n" + " weight 2\n" + " MISC_CHECK {\n\n" + " misc_path \"/usr/bin/check_script.sh\"\n\n" + " misc_timeout 5\n\n" + " }\n\n" + " }\n\n" + "}") + +CFG_FILE_TEMPLATE_mixed = ( + "# Configuration for Listener %(listener_id)s\n\n" + "net_namespace %(ns_name)s\n\n" + "virtual_server_group ipv4-group {\n" + " 10.0.0.37 7777\n" + "}\n\n" + "virtual_server group ipv4-group {\n" + " lb_algo rr\n" + " lb_kind NAT\n" + " protocol udp\n\n\n" + " # Configuration for Pool %(pool_id)s\n" + " # Configuration for Member %(member_id1)s\n" + " real_server 10.0.0.25 2222 {\n" + " weight 3\n" + " MISC_CHECK {\n\n" + " misc_path \"/usr/bin/check_script.sh\"\n\n" + " misc_timeout 5\n\n" + " }\n\n" + " }\n\n" + " # Member %(member_id2)s is disabled\n\n" + "}\n" + "virtual_server_group ipv6-group {\n" + " fd79:35e2:9963:0:f816:3eff:fe6d:7a2a 7777\n" + "}\n\n" + "virtual_server group ipv6-group {\n" + " lb_algo rr\n" + " lb_kind NAT\n" + " protocol udp\n\n\n" + " # Configuration for Pool %(pool_id)s\n" + " # Configuration for Member %(member_id3)s\n" + " real_server fd79:35e2:9963:0:f816:3eff:feca:b7bf 2222 {\n" + " weight 3\n" + " MISC_CHECK {\n\n" + " misc_path \"/usr/bin/check_script.sh\"\n\n" + " misc_timeout 5\n\n" + " }\n\n" + " }\n\n" + " # Configuration for Member %(member_id4)s\n" + " real_server fd79:35e2:9963:0:f816:3eff:fe9d:94df 3333 {\n" + " weight 2\n" + " MISC_CHECK {\n\n" + " misc_path \"/usr/bin/check_script.sh\"\n\n" + " misc_timeout 5\n\n" + " }\n\n" + " }\n\n" + " # Member %(member_id5)s is disabled\n\n" + "}") + +CFG_FILE_TEMPLATE_mixed_no_ipv6_member = ( + "# Configuration for Listener %(listener_id)s\n\n" + "net_namespace %(ns_name)s\n\n" + "virtual_server_group ipv4-group {\n" + " 10.0.0.37 7777\n" + "}\n\n" + "virtual_server group ipv4-group {\n" + " lb_algo rr\n" + " lvs_method NAT\n" + " protocol udp\n\n\n" + " # Configuration for Pool %(pool_id)s\n" + " # Configuration for Member %(member_id1)s\n" + " real_server 10.0.0.25 2222 {\n" + " weight 3\n" + " MISC_CHECK {\n\n" + " misc_path \"/usr/bin/check_script.sh\"\n\n" + " misc_timeout 5\n\n" + " }\n\n" + " }\n\n" + " # Member %(member_id2)s is disabled\n\n" + "}\n" + "virtual_server_group ipv6-group {\n" + " fd79:35e2:9963:0:f816:3eff:fe6d:7a2a 7777\n" + "}\n\n" + "virtual_server group ipv6-group {\n" + " lb_algo rr\n" + " lvs_method NAT\n" + " protocol udp\n\n\n" + " # Configuration for Pool %(pool_id)s\n" + "}") + +CFG_FILE_TEMPLATE_DISABLED_LISTENER = ( + "# Listener %(listener_id)s is disabled \n\n" + "net_namespace %(ns_name)s\n\n" +) + +IPVSADM_OUTPUT_TEMPLATE = ( + "IP Virtual Server version 1.2.1 (size=4096)\n" + "Prot LocalAddress:Port Scheduler Flags\n" + " -> RemoteAddress:Port Forward Weight ActiveConn InActConn\n" + "UDP %(listener_ipport)s rr\n" + " -> %(member1_ipport)s Masq 3 0 0\n" + " -> %(member2_ipport)s Masq 2 0 0") + +IPVSADM_STATS_OUTPUT_TEMPLATE = ( + "IP Virtual Server version 1.2.1 (size=4096)\n" + "Prot LocalAddress:Port Conns InPkts OutPkts " + "InBytes OutBytes\n" + " -> RemoteAddress:Port\n" + "UDP %(listener_ipport)s 5 4264 5" + " 6387472 7490\n" + " -> %(member1_ipport)s 2 1706 2" + " 2555588 2996\n" + " -> %(member2_ipport)s 3 2558 3" + " 3831884 4494") + + +class LvsQueryTestCase(base.TestCase): + def setUp(self): + super().setUp() + self.listener_id_v4 = uuidutils.generate_uuid() + self.pool_id_v4 = uuidutils.generate_uuid() + self.member_id1_v4 = uuidutils.generate_uuid() + self.member_id2_v4 = uuidutils.generate_uuid() + self.member_id3_v4 = uuidutils.generate_uuid() + self.member_id4_v4 = uuidutils.generate_uuid() + self.listener_id_v6 = uuidutils.generate_uuid() + self.pool_id_v6 = uuidutils.generate_uuid() + self.member_id1_v6 = uuidutils.generate_uuid() + self.member_id2_v6 = uuidutils.generate_uuid() + self.member_id3_v6 = uuidutils.generate_uuid() + self.member_id4_v6 = uuidutils.generate_uuid() + self.member_id5_v6 = uuidutils.generate_uuid() + self.listener_id_mixed = uuidutils.generate_uuid() + self.listener_id_mixed_no_ipv6_member = uuidutils.generate_uuid() + self.pool_id_mixed = uuidutils.generate_uuid() + self.disabled_listener_id = uuidutils.generate_uuid() + cfg_content_v4 = CFG_FILE_TEMPLATE_v4 % { + 'listener_id': self.listener_id_v4, + 'ns_name': constants.AMPHORA_NAMESPACE, + 'pool_id': self.pool_id_v4, + 'member_id1': self.member_id1_v4, + 'member_id2': self.member_id2_v4, + 'member_id3': self.member_id3_v4, + 'member_id4': self.member_id4_v4, + } + cfg_content_v6 = CFG_FILE_TEMPLATE_v6 % { + 'listener_id': self.listener_id_v6, + 'ns_name': constants.AMPHORA_NAMESPACE, + 'pool_id': self.pool_id_v6, + 'member_id1': self.member_id1_v6, + 'member_id2': self.member_id2_v6, + 'member_id3': self.member_id3_v6, + 'member_id4': self.member_id4_v6, + 'member_id5': self.member_id5_v6 + } + cfg_content_mixed = CFG_FILE_TEMPLATE_mixed % { + 'listener_id': self.listener_id_mixed, + 'ns_name': constants.AMPHORA_NAMESPACE, + 'pool_id': self.pool_id_mixed, + 'member_id1': self.member_id1_v4, + 'member_id2': self.member_id2_v4, + 'member_id3': self.member_id3_v6, + 'member_id4': self.member_id4_v6, + 'member_id5': self.member_id5_v6 + } + cfg_content_mixed_no_ipv6_member = ( + CFG_FILE_TEMPLATE_mixed_no_ipv6_member % { + 'listener_id': self.listener_id_mixed, + 'ns_name': constants.AMPHORA_NAMESPACE, + 'pool_id': self.pool_id_mixed, + 'member_id1': self.member_id1_v4, + 'member_id2': self.member_id2_v4 + } + ) + cfg_content_disabled_listener = ( + CFG_FILE_TEMPLATE_DISABLED_LISTENER % { + 'listener_id': self.listener_id_v6, + 'ns_name': constants.AMPHORA_NAMESPACE, + } + ) + self.useFixture(test_utils.OpenFixture( + util.keepalived_lvs_cfg_path(self.listener_id_v4), cfg_content_v4)) + self.useFixture(test_utils.OpenFixture( + util.keepalived_lvs_cfg_path(self.listener_id_v6), cfg_content_v6)) + self.useFixture(test_utils.OpenFixture( + util.keepalived_lvs_cfg_path(self.listener_id_mixed), + cfg_content_mixed)) + self.useFixture(test_utils.OpenFixture( + util.keepalived_lvs_cfg_path( + self.listener_id_mixed_no_ipv6_member), + cfg_content_mixed_no_ipv6_member)) + self.useFixture(test_utils.OpenFixture( + util.keepalived_lvs_cfg_path(self.disabled_listener_id), + cfg_content_disabled_listener)) + + @mock.patch('subprocess.check_output') + def test_get_listener_realserver_mapping(self, mock_check_output): + # Ipv4 resolver + input_listener_ip_port = ['10.0.0.37:7777'] + target_ns = constants.AMPHORA_NAMESPACE + mock_check_output.return_value = KERNAL_FILE_SAMPLE_V4 + result = lvs_query.get_listener_realserver_mapping( + target_ns, input_listener_ip_port, + health_monitor_enabled=True) + expected = {'10.0.0.25:2222': {'status': 'UP', + 'Forward': 'Masq', + 'Weight': '3', + 'ActiveConn': '0', + 'InActConn': '0'}, + '10.0.0.35:3333': {'status': 'UP', + 'Forward': 'Masq', + 'Weight': '2', + 'ActiveConn': '0', + 'InActConn': '0'}} + self.assertEqual(expected, result) + + # Ipv6 resolver + input_listener_ip_port = [ + '[fd79:35e2:9963:0:f816:3eff:fe6d:7a2a]:7777'] + mock_check_output.return_value = KERNAL_FILE_SAMPLE_V6 + result = lvs_query.get_listener_realserver_mapping( + target_ns, input_listener_ip_port, + health_monitor_enabled=True) + expected = {'[fd79:35e2:9963:0:f816:3eff:feca:b7bf]:2222': + {'status': constants.UP, + 'Forward': 'Masq', + 'Weight': '3', + 'ActiveConn': '0', + 'InActConn': '0'}, + '[fd79:35e2:9963:0:f816:3eff:fe9d:94df]:3333': + {'status': constants.UP, + 'Forward': 'Masq', + 'Weight': '2', + 'ActiveConn': '0', + 'InActConn': '0'}, + '[fd79:35e2::8f3f]:4444': + {'status': constants.UP, + 'Forward': 'Masq', + 'Weight': '2', + 'ActiveConn': '0', + 'InActConn': '0'}} + self.assertEqual(expected, result) + + # mixed resolver + input_listener_ip_port = [ + '[fd79:35e2:9963:0:f816:3eff:fe6d:7a2a]:7777', + '10.0.0.37:7777'] + mock_check_output.return_value = KERNEL_FILE_SAMPLE_MIXED + result = lvs_query.get_listener_realserver_mapping( + target_ns, input_listener_ip_port, + health_monitor_enabled=True) + expected = {'[fd79:35e2:9963:0:f816:3eff:feca:b7bf]:2222': + {'status': constants.UP, + 'Forward': 'Masq', + 'Weight': '3', + 'ActiveConn': '0', + 'InActConn': '0'}, + '[fd79:35e2:9963:0:f816:3eff:fe9d:94df]:3333': + {'status': constants.UP, + 'Forward': 'Masq', + 'Weight': '2', + 'ActiveConn': '0', + 'InActConn': '0'}, + '[fd79:35e2::8f3f]:4444': + {'status': constants.UP, + 'Forward': 'Masq', + 'Weight': '2', + 'ActiveConn': '0', + 'InActConn': '0'}, + '10.0.0.25:2222': {'status': 'UP', + 'Forward': 'Masq', + 'Weight': '3', + 'ActiveConn': '0', + 'InActConn': '0'}, + '10.0.0.35:3333': {'status': 'UP', + 'Forward': 'Masq', + 'Weight': '2', + 'ActiveConn': '0', + 'InActConn': '0'}} + self.assertEqual(expected, result) + + # negetive cases + mock_check_output.return_value = KERNAL_FILE_SAMPLE_V4 + for listener_ip_port in ['10.0.0.37:7776', '10.0.0.31:7777']: + result = lvs_query.get_listener_realserver_mapping( + target_ns, [listener_ip_port], + health_monitor_enabled=True) + self.assertEqual({}, result) + + mock_check_output.return_value = KERNAL_FILE_SAMPLE_V6 + for listener_ip_port in [ + '[fd79:35e2:9963:0:f816:3eff:fe6d:7a2a]:7776', + '[fd79:35e2:9973:0:f816:3eff:fe6d:7a2a]:7777']: + result = lvs_query.get_listener_realserver_mapping( + target_ns, [listener_ip_port], + health_monitor_enabled=True) + self.assertEqual({}, result) + + def test_get_lvs_listener_resource_ipports_nsname(self): + # ipv4 + res = lvs_query.get_lvs_listener_resource_ipports_nsname( + self.listener_id_v4) + expected = {'Listener': {'id': self.listener_id_v4, + 'ipports': ['10.0.0.37:7777']}, + 'Pool': {'id': self.pool_id_v4}, + 'Members': [{'id': self.member_id1_v4, + 'ipport': '10.0.0.25:2222'}, + {'id': self.member_id2_v4, + 'ipport': '10.0.0.35:3333'}, + {'id': self.member_id3_v4, + 'ipport': '10.0.0.45:4444'}, + {'id': self.member_id4_v4, + 'ipport': None}]} + self.assertEqual((expected, constants.AMPHORA_NAMESPACE), res) + + # ipv6 + res = lvs_query.get_lvs_listener_resource_ipports_nsname( + self.listener_id_v6) + expected = {'Listener': { + 'id': self.listener_id_v6, + 'ipports': ['[fd79:35e2:9963:0:f816:3eff:fe6d:7a2a]:7777']}, + 'Pool': {'id': self.pool_id_v6}, + 'Members': [ + {'id': self.member_id1_v6, + 'ipport': '[fd79:35e2:9963:0:f816:3eff:feca:b7bf]:2222'}, + {'id': self.member_id2_v6, + 'ipport': '[fd79:35e2:9963:0:f816:3eff:fe9d:94df]:3333'}, + {'id': self.member_id3_v6, + 'ipport': '[fd79:35e2:9963:0:f816:3eff:fe9d:8f3f]:4444'}, + {'id': self.member_id5_v6, + 'ipport': '[fd79:35e2::8f3f]:4444'}, + {'id': self.member_id4_v6, + 'ipport': None}]} + self.assertEqual((expected, constants.AMPHORA_NAMESPACE), res) + + # disabled + res = lvs_query.get_lvs_listener_resource_ipports_nsname( + self.disabled_listener_id) + self.assertEqual((None, constants.AMPHORA_NAMESPACE), res) + + # multi-vip/mixed + res = lvs_query.get_lvs_listener_resource_ipports_nsname( + self.listener_id_mixed) + expected = {'Listener': { + 'id': self.listener_id_mixed, + 'ipports': [ + '10.0.0.37:7777', + '[fd79:35e2:9963:0:f816:3eff:fe6d:7a2a]:7777']}, + 'Pool': {'id': self.pool_id_mixed}, + 'Members': [ + {'id': self.member_id1_v4, + 'ipport': '10.0.0.25:2222'}, + {'id': self.member_id3_v6, + 'ipport': '[fd79:35e2:9963:0:f816:3eff:feca:b7bf]:2222'}, + {'id': self.member_id4_v6, + 'ipport': '[fd79:35e2:9963:0:f816:3eff:fe9d:94df]:3333'}, + {'id': self.member_id2_v4, + 'ipport': None}, + {'id': self.member_id5_v6, + 'ipport': None}]} + self.assertEqual((expected, constants.AMPHORA_NAMESPACE), res) + + @mock.patch('os.stat') + @mock.patch('subprocess.check_output') + def test_get_lvs_listener_pool_status(self, mock_check_output, + mock_os_stat): + mock_os_stat.side_effect = ( + mock.Mock(st_mtime=1234), + mock.Mock(st_mtime=1234), + ) + + # test with ipv4 and ipv6 + mock_check_output.return_value = KERNAL_FILE_SAMPLE_V4 + res = lvs_query.get_lvs_listener_pool_status(self.listener_id_v4) + expected = { + 'lvs': + {'uuid': self.pool_id_v4, + 'status': constants.UP, + 'members': {self.member_id1_v4: constants.UP, + self.member_id2_v4: constants.UP, + self.member_id3_v4: constants.DOWN, + self.member_id4_v4: constants.MAINT}}} + self.assertEqual(expected, res) + + mock_os_stat.side_effect = ( + mock.Mock(st_mtime=1234), + mock.Mock(st_mtime=1234), + ) + + mock_check_output.return_value = KERNAL_FILE_SAMPLE_V6 + res = lvs_query.get_lvs_listener_pool_status(self.listener_id_v6) + expected = { + 'lvs': + {'uuid': self.pool_id_v6, + 'status': constants.UP, + 'members': {self.member_id1_v6: constants.UP, + self.member_id2_v6: constants.UP, + self.member_id3_v6: constants.DOWN, + self.member_id4_v6: constants.MAINT, + self.member_id5_v6: constants.UP}}} + self.assertEqual(expected, res) + + @mock.patch('os.stat') + @mock.patch('subprocess.check_output') + def test_get_lvs_listener_pool_status_restarting(self, mock_check_output, + mock_os_stat): + mock_os_stat.side_effect = ( + mock.Mock(st_mtime=1234), # config file + mock.Mock(st_mtime=1220), # pid file + ) + + # test with ipv4 and ipv6 + mock_check_output.return_value = KERNAL_FILE_SAMPLE_V4 + res = lvs_query.get_lvs_listener_pool_status(self.listener_id_v4) + expected = { + 'lvs': + {'uuid': self.pool_id_v4, + 'status': constants.UP, + 'members': {self.member_id1_v4: constants.UP, + self.member_id2_v4: constants.UP, + self.member_id3_v4: constants.RESTARTING, + self.member_id4_v4: constants.MAINT}}} + self.assertEqual(expected, res) + + @mock.patch('octavia.amphorae.backends.utils.keepalivedlvs_query.' + 'get_lvs_listener_resource_ipports_nsname') + def test_get_lvs_listener_pool_status_when_no_pool( + self, mock_get_resource_ipports): + # Just test with ipv4, ipv6 tests is same. + # the returned resource_ipport_mapping doesn't contains the 'Pool' + # resource, that means the listener doesn't have a pool resource, it + # isn't usable at this moment, then the pool status will + # return nothing. + mock_get_resource_ipports.return_value = ( + { + 'Listener': { + 'id': self.listener_id_v4, + 'ipports': ['10.0.0.37:7777']}}, + constants.AMPHORA_NAMESPACE) + res = lvs_query.get_lvs_listener_pool_status(self.listener_id_v4) + self.assertEqual({}, res) + + @mock.patch('octavia.amphorae.backends.utils.keepalivedlvs_query.' + 'get_lvs_listener_resource_ipports_nsname') + def test_get_lvs_listener_pool_status_when_no_members( + self, mock_get_resource_ipports): + # Just test with ipv4, ipv6 tests is same. + # the returned resource_ipport_mapping doesn't contains the 'Members' + # resources, that means the pool of listener doesn't have a enabled + # pool resource, so the pool is not usable, then the pool status will + # return UP. + mock_get_resource_ipports.return_value = ( + { + 'Listener': {'id': self.listener_id_v4, + 'ipports': ['10.0.0.37:7777']}, + 'Pool': {'id': self.pool_id_v4}}, + constants.AMPHORA_NAMESPACE) + res = lvs_query.get_lvs_listener_pool_status(self.listener_id_v4) + expected = {'lvs': { + 'uuid': self.pool_id_v4, + 'status': constants.UP, + 'members': {} + }} + self.assertEqual(expected, res) + + @mock.patch('os.stat') + @mock.patch('octavia.amphorae.backends.utils.keepalivedlvs_query.' + 'get_listener_realserver_mapping') + def test_get_lvs_listener_pool_status_when_not_get_realserver_result( + self, mock_get_mapping, mock_os_stat): + # This will hit if the kernel lvs file (/proc/net/ip_vs) + # lose its content. So at this moment, even though we configure the + # pool and member into udp keepalived config file, we have to set + # ths status of pool and its members to DOWN. + mock_os_stat.side_effect = ( + mock.Mock(st_mtime=1234), + mock.Mock(st_mtime=1234), + ) + mock_get_mapping.return_value = {} + res = lvs_query.get_lvs_listener_pool_status(self.listener_id_v4) + expected = { + 'lvs': + {'uuid': self.pool_id_v4, + 'status': constants.DOWN, + 'members': {self.member_id1_v4: constants.DOWN, + self.member_id2_v4: constants.DOWN, + self.member_id3_v4: constants.DOWN, + self.member_id4_v4: constants.MAINT}}} + self.assertEqual(expected, res) + + @mock.patch('subprocess.check_output') + def test_get_ipvsadm_info(self, mock_check_output): + for ip_list in [["10.0.0.37:7777", "10.0.0.25:2222", "10.0.0.35:3333"], + ["[fd79:35e2:9963:0:f816:3eff:fe6d:7a2a]:7777", + "[fd79:35e2:9963:0:f816:3eff:feca:b7bf]:2222", + "[fd79:35e2:9963:0:f816:3eff:fe9d:94df]:3333"]]: + mock_check_output.return_value = IPVSADM_OUTPUT_TEMPLATE % { + "listener_ipport": ip_list[0], + "member1_ipport": ip_list[1], + "member2_ipport": ip_list[2]} + res = lvs_query.get_ipvsadm_info(constants.AMPHORA_NAMESPACE) + # This expected result can reference on IPVSADM_OUTPUT_TEMPLATE, + # that means the function can get every element of the virtual + # server and the real servers. + expected = { + ip_list[0]: { + 'Listener': [('Prot', 'UDP'), + ('LocalAddress:Port', ip_list[0]), + ('Scheduler', 'rr')], + 'Members': [[('RemoteAddress:Port', ip_list[1]), + ('Forward', 'Masq'), ('Weight', '3'), + ('ActiveConn', '0'), ('InActConn', '0')], + [('RemoteAddress:Port', ip_list[2]), + ('Forward', 'Masq'), ('Weight', '2'), + ('ActiveConn', '0'), ('InActConn', '0')]]}} + self.assertEqual(expected, res) + + # ipvsadm stats + mock_check_output.return_value = IPVSADM_STATS_OUTPUT_TEMPLATE % { + "listener_ipport": ip_list[0], + "member1_ipport": ip_list[1], + "member2_ipport": ip_list[2]} + res = lvs_query.get_ipvsadm_info(constants.AMPHORA_NAMESPACE, + is_stats_cmd=True) + expected = { + ip_list[0]: + {'Listener': [('Prot', 'UDP'), + ('LocalAddress:Port', ip_list[0]), + ('Conns', '5'), + ('InPkts', '4264'), + ('OutPkts', '5'), + ('InBytes', '6387472'), + ('OutBytes', '7490')], + 'Members': [[('RemoteAddress:Port', ip_list[1]), + ('Conns', '2'), + ('InPkts', '1706'), + ('OutPkts', '2'), + ('InBytes', '2555588'), + ('OutBytes', '2996')], + [('RemoteAddress:Port', ip_list[2]), + ('Conns', '3'), + ('InPkts', '2558'), + ('OutPkts', '3'), + ('InBytes', '3831884'), + ('OutBytes', '4494')]]}} + self.assertEqual(expected, res) + + @mock.patch('subprocess.check_output') + @mock.patch("octavia.amphorae.backends.agent.api_server.util." + "is_lvs_listener_running", return_value=True) + @mock.patch("octavia.amphorae.backends.agent.api_server.util." + "get_lvs_listeners") + def test_get_lvs_listeners_stats( + self, mock_get_listener, mock_is_running, mock_check_output): + # The ipv6 test is same with ipv4, so just test ipv4 here + mock_get_listener.return_value = [self.listener_id_v4] + output_list = list() + output_list.append(IPVSADM_OUTPUT_TEMPLATE % { + "listener_ipport": "10.0.0.37:7777", + "member1_ipport": "10.0.0.25:2222", + "member2_ipport": "10.0.0.35:3333"}) + output_list.append(IPVSADM_STATS_OUTPUT_TEMPLATE % { + "listener_ipport": "10.0.0.37:7777", + "member1_ipport": "10.0.0.25:2222", + "member2_ipport": "10.0.0.35:3333"}) + mock_check_output.side_effect = output_list + res = lvs_query.get_lvs_listeners_stats() + # We can check the expected result reference the stats sample, + # that means this func can compute the stats info of single listener. + expected = {self.listener_id_v4: { + 'status': constants.OPEN, + 'stats': {'bin': 6387472, 'stot': 5, 'bout': 7490, + 'ereq': 0, 'scur': 0}}} + self.assertEqual(expected, res) + + # if no udp listener need to be collected. + # Then this function will return nothing. + mock_is_running.return_value = False + res = lvs_query.get_lvs_listeners_stats() + self.assertEqual({}, res) + + # listener for both ipv4 and ipv6, but no member in the ipv6 pool + mock_is_running.return_value = True + mock_get_listener.return_value = [ + self.listener_id_mixed_no_ipv6_member] + output_list = list() + output_list.append(IPVSADM_OUTPUT_TEMPLATE % { + "listener_ipport": "10.0.0.37:7777", + "member1_ipport": "10.0.0.25:2222", + "member2_ipport": "10.0.0.35:3333"}) + output_list.append(IPVSADM_STATS_OUTPUT_TEMPLATE % { + "listener_ipport": "10.0.0.37:7777", + "member1_ipport": "10.0.0.25:2222", + "member2_ipport": "10.0.0.35:3333"}) + mock_check_output.side_effect = output_list + res = lvs_query.get_lvs_listeners_stats() + # We can check the expected result reference the stats sample, + # that means this func can compute the stats info of single listener. + expected = {self.listener_id_mixed_no_ipv6_member: { + 'status': constants.OPEN, + 'stats': {'bin': 6387472, 'stot': 5, 'bout': 7490, + 'ereq': 0, 'scur': 0}}} + self.assertEqual(expected, res) + + @mock.patch('subprocess.check_output') + @mock.patch("octavia.amphorae.backends.agent.api_server.util." + "is_lvs_listener_running", return_value=True) + @mock.patch("octavia.amphorae.backends.agent.api_server.util." + "get_lvs_listeners") + def test_get_lvs_listeners_stats_missing_listener( + self, mock_get_listener, mock_is_running, mock_check_output): + # The ipv6 test is same with ipv4, so just test ipv4 here + mock_get_listener.return_value = [self.listener_id_v4] + output_list = list() + output_list.append(IPVSADM_OUTPUT_TEMPLATE % { + "listener_ipport": "10.0.0.37:7778", + "member1_ipport": "10.0.0.25:2222", + "member2_ipport": "10.0.0.35:3333"}) + output_list.append(IPVSADM_STATS_OUTPUT_TEMPLATE % { + "listener_ipport": "10.0.0.37:7778", + "member1_ipport": "10.0.0.25:2222", + "member2_ipport": "10.0.0.35:3333"}) + mock_check_output.side_effect = output_list + res = lvs_query.get_lvs_listeners_stats() + expected = {self.listener_id_v4: { + 'status': constants.OPEN, + 'stats': {'bin': 0, 'stot': 0, 'bout': 0, + 'ereq': 0, 'scur': 0}}} + self.assertEqual(expected, res) + + @mock.patch('subprocess.check_output') + @mock.patch("octavia.amphorae.backends.agent.api_server.util." + "is_lvs_listener_running", return_value=True) + @mock.patch("octavia.amphorae.backends.agent.api_server.util." + "get_lvs_listeners") + def test_get_lvs_listeners_stats_disabled_listener( + self, mock_get_listener, mock_is_running, mock_check_output): + mock_get_listener.return_value = [self.disabled_listener_id] + res = lvs_query.get_lvs_listeners_stats() + self.assertEqual({}, res) diff --git a/octavia/tests/unit/amphorae/backends/utils/test_network_namespace.py b/octavia/tests/unit/amphorae/backends/utils/test_network_namespace.py new file mode 100644 index 0000000000..d1cb97ae5e --- /dev/null +++ b/octavia/tests/unit/amphorae/backends/utils/test_network_namespace.py @@ -0,0 +1,114 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import random +from unittest import mock + +from octavia.amphorae.backends.utils import network_namespace +from octavia.tests.common import utils as test_utils +import octavia.tests.unit.base as base + + +class TestNetworkNamespace(base.TestCase): + + def setUp(self): + super().setUp() + + @mock.patch('ctypes.get_errno') + @mock.patch('ctypes.CDLL') + def test_error_handler(self, mock_cdll, mock_get_errno): + FAKE_NETNS = 'fake-netns' + netns = network_namespace.NetworkNamespace(FAKE_NETNS) + + # Test result 0 + netns._error_handler(0, None, None) + + mock_get_errno.assert_not_called() + + # Test result -1 + mock_get_errno.reset_mock() + + self.assertRaises(OSError, netns._error_handler, -1, None, None) + + mock_get_errno.assert_called_once_with() + + @mock.patch('os.getpid') + @mock.patch('ctypes.CDLL') + def test_init(self, mock_cdll, mock_getpid): + FAKE_NETNS = 'fake-netns' + FAKE_PID = random.randrange(100000) + mock_cdll_obj = mock.MagicMock() + mock_cdll.return_value = mock_cdll_obj + mock_getpid.return_value = FAKE_PID + expected_current_netns = f'/proc/{FAKE_PID}/ns/net' + expected_target_netns = f'/var/run/netns/{FAKE_NETNS}' + + netns = network_namespace.NetworkNamespace(FAKE_NETNS) + + self.assertEqual(expected_current_netns, netns.current_netns) + self.assertEqual(expected_target_netns, netns.target_netns) + self.assertEqual(mock_cdll_obj.setns, netns.set_netns) + self.assertEqual(netns.set_netns.errcheck, netns._error_handler) + + @mock.patch('os.getpid') + @mock.patch('ctypes.CDLL') + def test_enter(self, mock_cdll, mock_getpid): + CLONE_NEWNET = 0x40000000 + FAKE_NETNS = 'fake-netns' + FAKE_PID = random.randrange(100000) + current_netns_fd = random.randrange(100000) + target_netns_fd = random.randrange(100000) + mock_getpid.return_value = FAKE_PID + mock_cdll_obj = mock.MagicMock() + mock_cdll.return_value = mock_cdll_obj + expected_current_netns = f'/proc/{FAKE_PID}/ns/net' + expected_target_netns = f'/var/run/netns/{FAKE_NETNS}' + + netns = network_namespace.NetworkNamespace(FAKE_NETNS) + + current_mock_open = self.useFixture( + test_utils.OpenFixture(expected_current_netns)).mock_open + current_mock_open.return_value = current_netns_fd + + target_mock_open = self.useFixture( + test_utils.OpenFixture(expected_target_netns)).mock_open + handle = target_mock_open() + handle.fileno.return_value = target_netns_fd + + netns.__enter__() + + self.assertEqual(current_netns_fd, netns.current_netns_fd) + netns.set_netns.assert_called_once_with(target_netns_fd, CLONE_NEWNET) + + @mock.patch('os.getpid') + @mock.patch('ctypes.CDLL') + def test_exit(self, mock_cdll, mock_getpid): + CLONE_NEWNET = 0x40000000 + FAKE_NETNS = 'fake-netns' + FAKE_PID = random.randrange(100000) + current_netns_fileno = random.randrange(100000) + mock_getpid.return_value = FAKE_PID + mock_cdll_obj = mock.MagicMock() + mock_cdll.return_value = mock_cdll_obj + mock_current_netns_fd = mock.MagicMock() + mock_current_netns_fd.fileno.return_value = current_netns_fileno + + netns = network_namespace.NetworkNamespace(FAKE_NETNS) + + netns.current_netns_fd = mock_current_netns_fd + + netns.__exit__() + + netns.set_netns.assert_called_once_with(current_netns_fileno, + CLONE_NEWNET) + mock_current_netns_fd.close.assert_called_once_with() diff --git a/octavia/tests/unit/amphorae/backends/utils/test_network_utils.py b/octavia/tests/unit/amphorae/backends/utils/test_network_utils.py new file mode 100644 index 0000000000..6b575798d1 --- /dev/null +++ b/octavia/tests/unit/amphorae/backends/utils/test_network_utils.py @@ -0,0 +1,140 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from octavia.amphorae.backends.utils import network_utils +from octavia.common import exceptions +from octavia.tests.common import sample_network_data +import octavia.tests.unit.base as base + + +class TestNetworkUtils(base.TestCase): + + def setUp(self): + super().setUp() + + def test_find_interface(self): + FAKE_INTERFACE = 'fake0' + IPV4_ADDRESS = '203.0.113.55' + BROADCAST_ADDRESS = '203.0.113.55' + IPV6_ADDRESS = '2001:db8::55' + SAMPLE_IPV4_ADDR = sample_network_data.create_iproute_ipv4_address( + IPV4_ADDRESS, BROADCAST_ADDRESS, FAKE_INTERFACE) + SAMPLE_IPV6_ADDR = sample_network_data.create_iproute_ipv6_address( + IPV6_ADDRESS, FAKE_INTERFACE) + SAMPLE_INTERFACE = sample_network_data.create_iproute_interface( + FAKE_INTERFACE) + BROKEN_INTERFACE = [{'attrs': []}] + + mock_ip_addr = mock.MagicMock() + mock_rtnl_api = mock.MagicMock() + mock_rtnl_api.get_addr.side_effect = [[], SAMPLE_IPV4_ADDR, + SAMPLE_IPV6_ADDR, + SAMPLE_IPV6_ADDR] + mock_rtnl_api.get_links.side_effect = [SAMPLE_INTERFACE, + SAMPLE_INTERFACE, + BROKEN_INTERFACE] + + # Test no match + IPV4_ADDRESS = '203.0.113.55' + mock_ip_addr.version = 4 + + self.assertIsNone(network_utils._find_interface(IPV4_ADDRESS, + mock_rtnl_api, + IPV4_ADDRESS)) + + # Test with IPv4 address + mock_rtnl_api.reset_mock() + mock_ip_addr.version = 4 + + result = network_utils._find_interface(IPV4_ADDRESS, mock_rtnl_api, + IPV4_ADDRESS) + + self.assertEqual(FAKE_INTERFACE, result) + mock_rtnl_api.get_addr.assert_called_once_with(address=IPV4_ADDRESS) + mock_rtnl_api.get_links.assert_called_once_with(2) + + # Test with IPv6 address + mock_rtnl_api.reset_mock() + mock_ip_addr.version = 6 + + result = network_utils._find_interface(IPV6_ADDRESS, mock_rtnl_api, + IPV6_ADDRESS) + + self.assertEqual(FAKE_INTERFACE, result) + mock_rtnl_api.get_addr.assert_called_once_with(address=IPV6_ADDRESS) + mock_rtnl_api.get_links.assert_called_once_with(2) + + # Test with a broken interface + mock_rtnl_api.reset_mock() + mock_ip_addr.version = 6 + + self.assertIsNone(network_utils._find_interface(IPV6_ADDRESS, + mock_rtnl_api, + IPV6_ADDRESS)) + mock_rtnl_api.get_addr.assert_called_once_with(address=IPV6_ADDRESS) + mock_rtnl_api.get_links.assert_called_once_with(2) + + @mock.patch('octavia.amphorae.backends.utils.network_utils.' + '_find_interface') + @mock.patch('pyroute2.IPRoute', create=True) + @mock.patch('pyroute2.NetNS', create=True) + def test_get_interface_name(self, mock_netns, mock_ipr, mock_find_int): + FAKE_INTERFACE = 'fake0' + FAKE_NETNS = 'fake-ns' + IPV4_ADDRESS = '203.0.113.64' + + mock_ipr_enter_obj = mock.MagicMock() + mock_ipr_obj = mock.MagicMock() + mock_ipr_obj.__enter__.return_value = mock_ipr_enter_obj + mock_ipr.return_value = mock_ipr_obj + + mock_netns_enter_obj = mock.MagicMock() + mock_netns_obj = mock.MagicMock() + mock_netns_obj.__enter__.return_value = mock_netns_enter_obj + mock_netns.return_value = mock_netns_obj + + mock_find_int.side_effect = [FAKE_INTERFACE, FAKE_INTERFACE, None] + + # Test a bogus IP address + self.assertRaises(exceptions.InvalidIPAddress, + network_utils.get_interface_name, 'not an IP', None) + + # Test with no network namespace + result = network_utils.get_interface_name(IPV4_ADDRESS) + + self.assertEqual(FAKE_INTERFACE, result) + mock_ipr.assert_called_once_with() + mock_find_int.assert_called_once_with(IPV4_ADDRESS, mock_ipr_enter_obj, + IPV4_ADDRESS) + + # Test with network namespace + mock_ipr.reset_mock() + mock_find_int.reset_mock() + + result = network_utils.get_interface_name(IPV4_ADDRESS, + net_ns=FAKE_NETNS) + self.assertEqual(FAKE_INTERFACE, result) + mock_netns.assert_called_once_with(FAKE_NETNS) + mock_find_int.assert_called_once_with(IPV4_ADDRESS, + mock_netns_enter_obj, + IPV4_ADDRESS) + + # Test no interface found + mock_ipr.reset_mock() + mock_find_int.reset_mock() + + self.assertRaises( + exceptions.NotFound, network_utils.get_interface_name, + IPV4_ADDRESS, net_ns=FAKE_NETNS) diff --git a/octavia/tests/unit/amphorae/backends/utils/test_nftable_utils.py b/octavia/tests/unit/amphorae/backends/utils/test_nftable_utils.py new file mode 100644 index 0000000000..f6a7dc83fa --- /dev/null +++ b/octavia/tests/unit/amphorae/backends/utils/test_nftable_utils.py @@ -0,0 +1,200 @@ +# Copyright 2024 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import os +import stat +import subprocess +from unittest import mock + +from octavia_lib.common import constants as lib_consts +from webob import exc + +from octavia.amphorae.backends.utils import nftable_utils +from octavia.common import constants as consts +from octavia.common import exceptions +import octavia.tests.unit.base as base + + +class TestNFTableUtils(base.TestCase): + @mock.patch('os.open') + @mock.patch('os.path.isfile') + def test_write_nftable_rules_file_exists(self, mock_isfile, mock_open): + """Test when a rules file exists and no new rules + + When an existing rules file is present and we call + write_nftable_rules_file with no rules, the method should not + overwrite the existing rules. + """ + mock_isfile.return_value = True + + nftable_utils.write_nftable_rules_file('fake-eth2', []) + + mock_open.assert_not_called() + + @mock.patch('os.open') + @mock.patch('os.path.isfile') + def test_write_nftable_rules_file_rules(self, mock_isfile, mock_open): + """Test when a rules file exists and rules are passed in + + This should create a simple rules file with the base chain and rules. + """ + mock_isfile.return_value = True + mock_open.return_value = 'fake-fd' + + test_rule_1 = {consts.CIDR: None, + consts.PROTOCOL: lib_consts.PROTOCOL_TCP, + consts.PORT: 1234} + test_rule_2 = {consts.CIDR: '192.0.2.0/24', + consts.PROTOCOL: consts.VRRP, + consts.PORT: 4321} + + mocked_open = mock.mock_open() + with mock.patch.object(os, 'fdopen', mocked_open): + nftable_utils.write_nftable_rules_file( + 'fake-eth2', [test_rule_1, test_rule_2]) + + mocked_open.assert_called_once_with('fake-fd', 'w') + mock_open.assert_called_once_with( + consts.NFT_RULES_FILE, + (os.O_WRONLY | os.O_CREAT | os.O_TRUNC), + (stat.S_IRUSR | stat.S_IWUSR)) + + handle = mocked_open() + handle.write.assert_has_calls([ + mock.call(f'table {consts.NFT_FAMILY} {consts.NFT_TABLE} ' + '{}\n'), + mock.call(f'delete table {consts.NFT_FAMILY} ' + f'{consts.NFT_TABLE}\n'), + mock.call(f'table {consts.NFT_FAMILY} {consts.NFT_TABLE} ' + '{\n'), + mock.call(f' chain {consts.NFT_CHAIN} {{\n'), + mock.call(' type filter hook input priority filter; ' + 'policy drop;\n'), + mock.call(' ct state vmap { established : accept, related : ' + 'accept, invalid : drop }\n'), + mock.call(' iif lo accept\n'), + mock.call(' ip saddr 127.0.0.0/8 drop\n'), + mock.call(' ip6 saddr ::1 drop\n'), + mock.call(' icmp type destination-unreachable accept\n'), + mock.call(' icmpv6 type { nd-neighbor-solicit, ' + 'nd-router-advert, nd-neighbor-advert, packet-too-big, ' + 'destination-unreachable } accept\n'), + mock.call(' udp sport 67 udp dport 68 accept\n'), + mock.call(' udp sport 547 udp dport 546 accept\n'), + mock.call(' iifname eth1 goto amphora_vip_chain\n'), + mock.call(' }\n'), + mock.call(' chain amphora_vip_chain {\n'), + mock.call(' tcp dport 1234 accept\n'), + mock.call(' ip saddr 192.0.2.0/24 ip protocol 112 accept\n'), + mock.call(' }\n'), + mock.call('}\n') + ]) + + @mock.patch('os.open') + @mock.patch('os.path.isfile') + def test_write_nftable_rules_file_missing(self, mock_isfile, mock_open): + """Test when a rules file does not exist and no new rules + + This should create a simple rules file with the base chain. + """ + mock_isfile.return_value = False + mock_open.return_value = 'fake-fd' + + mocked_open = mock.mock_open() + with mock.patch.object(os, 'fdopen', mocked_open): + nftable_utils.write_nftable_rules_file('fake-eth2', []) + + mocked_open.assert_called_once_with('fake-fd', 'w') + mock_open.assert_called_once_with( + consts.NFT_RULES_FILE, + (os.O_WRONLY | os.O_CREAT | os.O_TRUNC), + (stat.S_IRUSR | stat.S_IWUSR)) + + handle = mocked_open() + handle.write.assert_has_calls([ + mock.call(f'table {consts.NFT_FAMILY} {consts.NFT_TABLE} ' + '{\n'), + mock.call(f' chain {consts.NFT_CHAIN} {{\n'), + mock.call(' type filter hook input priority filter; ' + 'policy drop;\n'), + mock.call(' icmp type destination-unreachable accept\n'), + mock.call(' icmpv6 type { nd-neighbor-solicit, ' + 'nd-router-advert, nd-neighbor-advert, packet-too-big, ' + 'destination-unreachable } accept\n'), + mock.call(' udp sport 67 udp dport 68 accept\n'), + mock.call(' udp sport 547 udp dport 546 accept\n'), + mock.call(' }\n'), + mock.call('}\n') + ]) + + @mock.patch('octavia.common.utils.ip_version') + def test__build_rule_cmd(self, mock_ip_version): + + mock_ip_version.side_effect = [4, 6, 99] + + cmd = nftable_utils._build_rule_cmd({ + consts.CIDR: '192.0.2.0/24', + consts.PROTOCOL: lib_consts.PROTOCOL_SCTP, + consts.PORT: 1234}) + self.assertEqual('ip saddr 192.0.2.0/24 sctp dport 1234 accept', cmd) + + cmd = nftable_utils._build_rule_cmd({ + consts.CIDR: '2001:db8::/32', + consts.PROTOCOL: lib_consts.PROTOCOL_TCP, + consts.PORT: 1235}) + self.assertEqual('ip6 saddr 2001:db8::/32 tcp dport 1235 accept', cmd) + + self.assertRaises(exc.HTTPBadRequest, nftable_utils._build_rule_cmd, + {consts.CIDR: '192/32', + consts.PROTOCOL: lib_consts.PROTOCOL_TCP, + consts.PORT: 1237}) + + cmd = nftable_utils._build_rule_cmd({ + consts.CIDR: None, + consts.PROTOCOL: lib_consts.PROTOCOL_UDP, + consts.PORT: 1236}) + self.assertEqual('udp dport 1236 accept', cmd) + + cmd = nftable_utils._build_rule_cmd({ + consts.CIDR: None, + consts.PROTOCOL: consts.VRRP, + consts.PORT: 1237}) + self.assertEqual('ip protocol 112 accept', cmd) + + self.assertRaises(exc.HTTPBadRequest, nftable_utils._build_rule_cmd, + {consts.CIDR: None, + consts.PROTOCOL: 'bad-protocol', + consts.PORT: 1237}) + + @mock.patch('octavia.amphorae.backends.utils.network_namespace.' + 'NetworkNamespace') + @mock.patch('subprocess.check_output') + def test_load_nftables_file(self, mock_check_output, mock_netns): + + mock_netns.side_effect = [ + mock.DEFAULT, + subprocess.CalledProcessError(cmd=consts.NFT_CMD, returncode=-1), + exceptions.AmphoraNetworkConfigException] + + nftable_utils.load_nftables_file() + + mock_netns.assert_called_once_with(consts.AMPHORA_NAMESPACE) + mock_check_output.assert_called_once_with([ + consts.NFT_CMD, '-o', '-f', consts.NFT_RULES_FILE], + stderr=subprocess.STDOUT) + + self.assertRaises(subprocess.CalledProcessError, + nftable_utils.load_nftables_file) + + self.assertRaises(exceptions.AmphoraNetworkConfigException, + nftable_utils.load_nftables_file) diff --git a/octavia/tests/unit/amphorae/drivers/__init__.py b/octavia/tests/unit/amphorae/drivers/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/amphorae/drivers/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/amphorae/drivers/haproxy/__init__.py b/octavia/tests/unit/amphorae/drivers/haproxy/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/amphorae/drivers/haproxy/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/amphorae/drivers/haproxy/test_exceptions.py b/octavia/tests/unit/amphorae/drivers/haproxy/test_exceptions.py new file mode 100644 index 0000000000..f98f67cba1 --- /dev/null +++ b/octavia/tests/unit/amphorae/drivers/haproxy/test_exceptions.py @@ -0,0 +1,52 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from octavia.amphorae.drivers.haproxy import exceptions +import octavia.tests.unit.base as base + + +class TestHAProxyExceptions(base.TestCase): + + def setUp(self): + super().setUp() + + @mock.patch('octavia.amphorae.drivers.haproxy.exceptions.LOG') + def test_check_exception(self, mock_logger): + + response_mock = mock.MagicMock() + + # Test exception that should raise and log + response_mock.status_code = 404 + + self.assertRaises(exceptions.NotFound, exceptions.check_exception, + response_mock) + mock_logger.error.assert_called_once() + + # Test exception that should raise but not log + mock_logger.reset_mock() + response_mock.status_code = 403 + + self.assertRaises(exceptions.Forbidden, exceptions.check_exception, + response_mock, log_error=False) + mock_logger.error.assert_not_called() + + # Test exception that should be ignored + mock_logger.reset_mock() + response_mock.status_code = 401 + + result = exceptions.check_exception(response_mock, ignore=[401]) + + mock_logger.error.assert_not_called() + self.assertEqual(response_mock, result) diff --git a/octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver.py b/octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver.py new file mode 100644 index 0000000000..681fb5db01 --- /dev/null +++ b/octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver.py @@ -0,0 +1,114 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from octavia.amphorae.driver_exceptions import exceptions as driver_except +from octavia.amphorae.drivers.haproxy import exceptions as exc +from octavia.amphorae.drivers.haproxy import rest_api_driver +import octavia.tests.unit.base as base + + +class TestHAProxyAmphoraDriver(base.TestCase): + + def setUp(self): + super().setUp() + self.driver = rest_api_driver.HaproxyAmphoraLoadBalancerDriver() + + @mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.' + 'HaproxyAmphoraLoadBalancerDriver.' + '_populate_amphora_api_version') + def test_get_interface_from_ip(self, mock_api_version): + FAKE_INTERFACE = 'fake0' + IP_ADDRESS = '203.0.113.42' + TIMEOUT_DICT = {'outa': 'time'} + amphora_mock = mock.MagicMock() + amphora_mock.api_version = '0' + client_mock = mock.MagicMock() + client_mock.get_interface.side_effect = [ + {'interface': FAKE_INTERFACE}, {'interface': FAKE_INTERFACE}, + {}, exc.NotFound] + self.driver.clients['0'] = client_mock + + # Test interface found no timeout + + result = self.driver.get_interface_from_ip(amphora_mock, IP_ADDRESS) + + self.assertEqual(FAKE_INTERFACE, result) + mock_api_version.assert_called_once_with(amphora_mock, None) + client_mock.get_interface.assert_called_once_with( + amphora_mock, IP_ADDRESS, None, log_error=False) + + # Test interface found with timeout + mock_api_version.reset_mock() + client_mock.reset_mock() + + result = self.driver.get_interface_from_ip(amphora_mock, IP_ADDRESS, + timeout_dict=TIMEOUT_DICT) + + self.assertEqual(FAKE_INTERFACE, result) + mock_api_version.assert_called_once_with(amphora_mock, TIMEOUT_DICT) + client_mock.get_interface.assert_called_once_with( + amphora_mock, IP_ADDRESS, TIMEOUT_DICT, log_error=False) + + # Test no interface data + mock_api_version.reset_mock() + client_mock.reset_mock() + + result = self.driver.get_interface_from_ip(amphora_mock, IP_ADDRESS) + + self.assertIsNone(result) + mock_api_version.assert_called_once_with(amphora_mock, None) + client_mock.get_interface.assert_called_once_with( + amphora_mock, IP_ADDRESS, None, log_error=False) + + # Test NotFound + mock_api_version.reset_mock() + client_mock.reset_mock() + + self.assertRaises( + exc.NotFound, + self.driver.get_interface_from_ip, amphora_mock, IP_ADDRESS) + mock_api_version.assert_called_once_with(amphora_mock, None) + client_mock.get_interface.assert_called_once_with( + amphora_mock, IP_ADDRESS, None, log_error=False) + + def test_unsupported_api_version(self): + mock_amp = mock.MagicMock() + mock_amp.api_version = "0.5" + + self.assertRaises(driver_except.AmpVersionUnsupported, + self.driver._populate_amphora_api_version, + mock_amp) + + @mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.' + 'HaproxyAmphoraLoadBalancerDriver.' + '_populate_amphora_api_version') + def test_set_interface_rules(self, mock_api_version): + + IP_ADDRESS = '203.0.113.44' + amphora_mock = mock.MagicMock() + amphora_mock.api_version = '0' + client_mock = mock.MagicMock() + client_mock.set_interface_rules.side_effect = [mock.DEFAULT, + exc.NotFound] + self.driver.clients['0'] = client_mock + + self.driver.set_interface_rules(amphora_mock, IP_ADDRESS, 'fake_rules') + mock_api_version.assert_called_once_with(amphora_mock, None) + client_mock.set_interface_rules.assert_called_once_with( + amphora_mock, IP_ADDRESS, 'fake_rules', timeout_dict=None) + + self.assertRaises(driver_except.AmpDriverNotImplementedError, + self.driver.set_interface_rules, amphora_mock, + IP_ADDRESS, 'fake_rules') diff --git a/octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_1_0.py b/octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_1_0.py new file mode 100644 index 0000000000..73e0e2153d --- /dev/null +++ b/octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_1_0.py @@ -0,0 +1,1522 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# Copyright (c) 2015 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import hashlib +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils +import requests +import requests_mock + +from octavia.amphorae.driver_exceptions import exceptions as driver_except +from octavia.amphorae.drivers.haproxy import exceptions as exc +from octavia.amphorae.drivers.haproxy import rest_api_driver as driver +from octavia.common import constants +from octavia.common import data_models +from octavia.common import utils as octavia_utils +from octavia.db import models +from octavia.network import data_models as network_models +from octavia.tests.common import sample_certs +from octavia.tests.unit import base +from octavia.tests.unit.common.sample_configs import sample_configs_combined + +API_VERSION = '1.0' +FAKE_CIDR = '198.51.100.0/24' +FAKE_GATEWAY = '192.51.100.1' +FAKE_IP = '192.0.2.10' +FAKE_IPV6 = '2001:db8::cafe' +FAKE_IPV6_LLA = 'fe80::00ff:fe00:cafe' +FAKE_PEM_FILENAME = "file_name" +FAKE_UUID_1 = uuidutils.generate_uuid() +FAKE_VRRP_IP = '192.0.2.5' +FAKE_VIP_SUBNET = '192.0.2.0/24' +FAKE_MAC_ADDRESS = '123' +FAKE_MTU = 1450 +FAKE_MEMBER_IP_PORT_NAME_1 = "10.0.0.10:1003" +FAKE_MEMBER_IP_PORT_NAME_2 = "10.0.0.11:1004" + + +class TestHaproxyAmphoraLoadBalancerDriverTest(base.TestCase): + + def setUp(self): + super().setUp() + + DEST1 = '198.51.100.0/24' + DEST2 = '203.0.113.0/24' + NEXTHOP = '192.0.2.1' + + self.driver = driver.HaproxyAmphoraLoadBalancerDriver() + + self.driver.cert_manager = mock.MagicMock() + self.driver.cert_parser = mock.MagicMock() + self.driver.clients = { + 'base': mock.MagicMock(), + API_VERSION: mock.MagicMock()} + self.driver.clients['base'].get_api_version.return_value = { + 'api_version': API_VERSION} + self.driver.clients[ + API_VERSION].get_info.return_value = { + 'haproxy_version': '1.6.3-1ubuntu0.1', + 'api_version': API_VERSION} + self.driver.jinja_combo = mock.MagicMock() + self.driver.lvs_jinja = mock.MagicMock() + + # Build sample Listener and VIP configs + self.sl = sample_configs_combined.sample_listener_tuple( + tls=True, sni=True, client_ca_cert=True, client_crl_cert=True, + recursive_nest=True) + self.sl_udp = sample_configs_combined.sample_listener_tuple( + proto=constants.PROTOCOL_UDP, + persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, + persistence_timeout=33, + persistence_granularity='255.255.0.0', + monitor_proto=constants.HEALTH_MONITOR_UDP_CONNECT) + self.pool_has_cert = sample_configs_combined.sample_pool_tuple( + pool_cert=True, pool_ca_cert=True, pool_crl=True) + self.amp = self.sl.load_balancer.amphorae[0] + self.sv = sample_configs_combined.sample_vip_tuple() + self.lb = self.sl.load_balancer + self.lb_udp = ( + sample_configs_combined.sample_lb_with_udp_listener_tuple()) + self.fixed_ip = mock.MagicMock() + self.fixed_ip.ip_address = '198.51.100.5' + self.fixed_ip.subnet.cidr = '198.51.100.0/24' + self.fixed_ip.subnet.gateway_ip = FAKE_GATEWAY + self.network = network_models.Network(mtu=FAKE_MTU) + self.port = network_models.Port(mac_address=FAKE_MAC_ADDRESS, + fixed_ips=[self.fixed_ip], + network=self.network) + + self.host_routes = [network_models.HostRoute(destination=DEST1, + nexthop=NEXTHOP), + network_models.HostRoute(destination=DEST2, + nexthop=NEXTHOP)] + host_routes_data = [{'destination': DEST1, 'nexthop': NEXTHOP}, + {'destination': DEST2, 'nexthop': NEXTHOP}] + self.subnet_info = {'subnet_cidr': FAKE_CIDR, + 'gateway': FAKE_GATEWAY, + 'mac_address': FAKE_MAC_ADDRESS, + 'vrrp_ip': self.amp.vrrp_ip, + 'mtu': FAKE_MTU, + 'host_routes': host_routes_data, + 'additional_vips': [], + 'is_sriov': False} + + self.timeout_dict = {constants.REQ_CONN_TIMEOUT: 1, + constants.REQ_READ_TIMEOUT: 2, + constants.CONN_MAX_RETRIES: 3, + constants.CONN_RETRY_INTERVAL: 4} + self.amp_net_config = network_models.AmphoraNetworkConfig( + vip_subnet=network_models.Subnet( + id=self.lb.vip.subnet_id, + cidr=FAKE_VIP_SUBNET, + host_routes=[])).to_dict(recurse=True) + + @mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.' + 'HaproxyAmphoraLoadBalancerDriver._process_secret') + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_update_amphora_listeners(self, mock_load_cert, mock_secret): + mock_amphora = mock.MagicMock() + mock_amphora.id = 'mock_amphora_id' + mock_amphora.api_version = API_VERSION + mock_secret.return_value = 'filename.pem' + mock_load_cert.return_value = { + 'tls_cert': self.sl.default_tls_container, 'sni_certs': [], + 'client_ca_cert': None} + self.driver.jinja_combo.build_config.return_value = 'the_config' + + mock_empty_lb = mock.MagicMock() + mock_empty_lb.listeners = [] + self.driver.update_amphora_listeners(mock_empty_lb, mock_amphora, + self.timeout_dict) + mock_load_cert.assert_not_called() + self.driver.jinja_combo.build_config.assert_not_called() + self.driver.clients[API_VERSION].upload_config.assert_not_called() + self.driver.clients[API_VERSION].reload_listener.assert_not_called() + + self.driver.update_amphora_listeners(self.lb, + mock_amphora, self.timeout_dict) + self.driver.clients[API_VERSION].upload_config.assert_called_once_with( + mock_amphora, self.lb.id, 'the_config', + timeout_dict=self.timeout_dict) + self.driver.clients[API_VERSION].reload_listener( + mock_amphora, self.lb.id, timeout_dict=self.timeout_dict) + + mock_load_cert.reset_mock() + self.driver.jinja_combo.build_config.reset_mock() + self.driver.clients[API_VERSION].upload_config.reset_mock() + self.driver.clients[API_VERSION].reload_listener.reset_mock() + mock_amphora.status = constants.DELETED + self.driver.update_amphora_listeners(self.lb, + mock_amphora, self.timeout_dict) + mock_load_cert.assert_not_called() + self.driver.jinja_combo.build_config.assert_not_called() + self.driver.clients[API_VERSION].upload_config.assert_not_called() + self.driver.clients[API_VERSION].reload_listener.assert_not_called() + + @mock.patch('octavia.db.api.session') + @mock.patch('octavia.db.repositories.ListenerRepository.update') + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_update_amphora_listeners_bad_cert( + self, mock_load_cert, mock_list_update, mock_get_session): + mock_amphora = mock.MagicMock() + mock_amphora.id = 'mock_amphora_id' + mock_amphora.api_version = API_VERSION + + mock_session = mock_get_session().begin().__enter__() + + mock_load_cert.side_effect = [Exception] + self.driver.update_amphora_listeners(self.lb, + mock_amphora, self.timeout_dict) + mock_list_update.assert_called_once_with( + mock_session, self.lb.listeners[0].id, + provisioning_status=constants.ERROR, + operating_status=constants.ERROR) + self.driver.jinja_combo.build_config.assert_not_called() + (self.driver.clients[API_VERSION].delete_listener. + assert_called_once_with)(mock_amphora, self.lb.id) + + @mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.' + 'HaproxyAmphoraLoadBalancerDriver._process_secret') + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + @mock.patch('octavia.common.tls_utils.cert_parser.get_host_names') + def test_update(self, mock_cert, mock_load_crt, mock_secret): + mock_cert.return_value = {'cn': sample_certs.X509_CERT_CN} + mock_secret.side_effect = ['filename.pem', 'crl-filename.pem'] + sconts = [] + for sni_container in self.sl.sni_containers: + sconts.append(sni_container.tls_container) + mock_load_crt.side_effect = [{ + 'tls_cert': self.sl.default_tls_container, 'sni_certs': sconts}, + {'tls_cert': None, 'sni_certs': []}] + self.driver.clients[API_VERSION].get_cert_md5sum.side_effect = [ + exc.NotFound, 'Fake_MD5', 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', + 'CA_CERT_MD5'] + self.driver.jinja_combo.build_config.side_effect = ['fake_config'] + + # Execute driver method + self.driver.update(self.lb) + + # verify result + gcm_calls = [ + mock.call(self.amp, self.lb.id, + self.sl.default_tls_container.id + '.pem', + ignore=(404,)), + mock.call(self.amp, self.lb.id, + sconts[0].id + '.pem', ignore=(404,)), + mock.call(self.amp, self.lb.id, + sconts[1].id + '.pem', ignore=(404,)), + ] + + self.driver.clients[API_VERSION].get_cert_md5sum.assert_has_calls( + gcm_calls, any_order=True) + + # this is called three times (last MD5 matches) + fp1 = b'\n'.join([sample_certs.X509_CERT, + sample_certs.X509_CERT_KEY, + sample_certs.X509_IMDS]) + b'\n' + fp2 = b'\n'.join([sample_certs.X509_CERT_2, + sample_certs.X509_CERT_KEY_2, + sample_certs.X509_IMDS]) + b'\n' + fp3 = b'\n'.join([sample_certs.X509_CERT_3, + sample_certs.X509_CERT_KEY_3, + sample_certs.X509_IMDS]) + b'\n' + + ucp_calls = [ + mock.call(self.amp, self.lb.id, + self.sl.default_tls_container.id + '.pem', fp1), + mock.call(self.amp, self.lb.id, + sconts[0].id + '.pem', fp2), + mock.call(self.amp, self.lb.id, + sconts[1].id + '.pem', fp3), + ] + + self.driver.clients[API_VERSION].upload_cert_pem.assert_has_calls( + ucp_calls, any_order=True) + + # upload only one config file + self.driver.clients[API_VERSION].upload_config.assert_called_once_with( + self.amp, self.lb.id, 'fake_config', timeout_dict=None) + # start should be called once + self.driver.clients[ + API_VERSION].reload_listener.assert_called_once_with( + self.amp, self.lb.id, timeout_dict=None) + secret_calls = [ + mock.call(self.sl, self.sl.client_ca_tls_certificate_id, self.amp, + self.lb.id), + mock.call(self.sl, self.sl.client_crl_container_id, self.amp, + self.lb.id) + ] + mock_secret.assert_has_calls(secret_calls) + + def test_udp_update(self): + self.driver.lvs_jinja.build_config.side_effect = ['fake_udp_config'] + + # Execute driver method + self.driver.update(self.lb_udp) + + # upload only one config file + self.driver.clients[ + API_VERSION].upload_udp_config.assert_called_once_with( + self.amp, self.sl_udp.id, 'fake_udp_config', timeout_dict=None) + + # start should be called once + self.driver.clients[ + API_VERSION].reload_listener.assert_called_once_with( + self.amp, self.sl_udp.id, timeout_dict=None) + + def test_upload_cert_amp(self): + self.driver.upload_cert_amp(self.amp, octavia_utils.b('test')) + self.driver.clients[ + API_VERSION].update_cert_for_rotation.assert_called_once_with( + self.amp, octavia_utils.b('test')) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test__process_tls_certificates_no_ca_cert(self, mock_load_crt): + sample_listener = sample_configs_combined.sample_listener_tuple( + tls=True, sni=True) + sconts = [] + for sni_container in sample_listener.sni_containers: + sconts.append(sni_container.tls_container) + mock_load_crt.return_value = { + 'tls_cert': self.sl.default_tls_container, + 'sni_certs': sconts + } + self.driver.clients[API_VERSION].get_cert_md5sum.side_effect = [ + exc.NotFound, 'Fake_MD5', 'aaaaa', 'aaaaa'] + self.driver._process_tls_certificates( + sample_listener, self.amp, sample_listener.load_balancer.id) + gcm_calls = [ + mock.call(self.amp, self.lb.id, + self.sl.default_tls_container.id + '.pem', + ignore=(404,)), + mock.call(self.amp, self.lb.id, + sconts[0].id + '.pem', ignore=(404,)), + mock.call(self.amp, self.lb.id, + sconts[1].id + '.pem', ignore=(404,)) + ] + self.driver.clients[API_VERSION].get_cert_md5sum.assert_has_calls( + gcm_calls, any_order=True) + fp1 = b'\n'.join([sample_certs.X509_CERT, + sample_certs.X509_CERT_KEY, + sample_certs.X509_IMDS]) + b'\n' + fp2 = b'\n'.join([sample_certs.X509_CERT_2, + sample_certs.X509_CERT_KEY_2, + sample_certs.X509_IMDS]) + b'\n' + fp3 = b'\n'.join([sample_certs.X509_CERT_3, + sample_certs.X509_CERT_KEY_3, + sample_certs.X509_IMDS]) + b'\n' + ucp_calls = [ + mock.call(self.amp, self.lb.id, + self.sl.default_tls_container.id + '.pem', fp1), + mock.call(self.amp, self.lb.id, + sconts[0].id + '.pem', fp2), + mock.call(self.amp, self.lb.id, + sconts[1].id + '.pem', fp3) + ] + self.driver.clients[API_VERSION].upload_cert_pem.assert_has_calls( + ucp_calls, any_order=True) + self.assertEqual( + 4, self.driver.clients[API_VERSION].upload_cert_pem.call_count) + + @mock.patch('oslo_context.context.RequestContext') + @mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.' + 'HaproxyAmphoraLoadBalancerDriver._upload_cert') + def test_process_secret(self, mock_upload_cert, mock_oslo): + # Test bypass if no secret_ref + sample_listener = sample_configs_combined.sample_listener_tuple( + tls=True, sni=True) + + result = self.driver._process_secret(sample_listener, None) + + self.assertIsNone(result) + self.driver.cert_manager.get_secret.assert_not_called() + + # Test the secret process + sample_listener = sample_configs_combined.sample_listener_tuple( + tls=True, sni=True, client_ca_cert=True) + fake_context = 'fake context' + fake_secret = b'fake cert' + mock_oslo.return_value = fake_context + self.driver.cert_manager.get_secret.reset_mock() + self.driver.cert_manager.get_secret.return_value = fake_secret + ref_md5 = hashlib.md5( + fake_secret, usedforsecurity=False).hexdigest() # nosec + ref_id = hashlib.sha1(fake_secret).hexdigest() # nosec + ref_name = f'{ref_id}.pem' + + result = self.driver._process_secret( + sample_listener, sample_listener.client_ca_tls_certificate_id, + self.amp, sample_listener.id) + + mock_oslo.assert_called_once_with( + project_id=sample_listener.project_id) + self.driver.cert_manager.get_secret.assert_called_once_with( + fake_context, sample_listener.client_ca_tls_certificate_id) + mock_upload_cert.assert_called_once_with( + self.amp, sample_listener.id, pem=fake_secret, + md5sum=ref_md5, name=ref_name) + self.assertEqual(ref_name, result) + + @mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.' + 'HaproxyAmphoraLoadBalancerDriver._process_pool_certs') + def test__process_listener_pool_certs(self, mock_pool_cert): + sample_listener = sample_configs_combined.sample_listener_tuple( + l7=True) + + ref_pool_cert_1 = {'client_cert': '/some/fake/cert-1.pem'} + ref_pool_cert_2 = {'client_cert': '/some/fake/cert-2.pem'} + + mock_pool_cert.side_effect = [ref_pool_cert_1, ref_pool_cert_2] + + ref_cert_dict = {'sample_pool_id_1': ref_pool_cert_1, + 'sample_pool_id_2': ref_pool_cert_2} + + result = self.driver._process_listener_pool_certs( + sample_listener, self.amp, sample_listener.load_balancer.id) + + pool_certs_calls = [ + mock.call(sample_listener, sample_listener.default_pool, + self.amp, sample_listener.load_balancer.id), + mock.call(sample_listener, sample_listener.pools[1], + self.amp, sample_listener.load_balancer.id) + ] + + mock_pool_cert.assert_has_calls(pool_certs_calls, any_order=True) + + self.assertEqual(ref_cert_dict, result) + + @mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.' + 'HaproxyAmphoraLoadBalancerDriver._process_secret') + @mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.' + 'HaproxyAmphoraLoadBalancerDriver._upload_cert') + @mock.patch('octavia.common.tls_utils.cert_parser.build_pem') + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test__process_pool_certs(self, mock_load_certs, mock_build_pem, + mock_upload_cert, mock_secret): + fake_cert_dir = '/fake/cert/dir' + conf = oslo_fixture.Config(cfg.CONF) + conf.config(group="haproxy_amphora", base_cert_dir=fake_cert_dir) + sample_listener = sample_configs_combined.sample_listener_tuple( + pool_cert=True, pool_ca_cert=True, pool_crl=True) + pool_cert = data_models.TLSContainer( + id=uuidutils.generate_uuid(), certificate='pool cert') + pool_data = {'tls_cert': pool_cert, 'sni_certs': []} + mock_load_certs.return_value = pool_data + fake_pem = b'fake pem' + mock_build_pem.return_value = fake_pem + ref_md5 = hashlib.md5( + fake_pem, usedforsecurity=False).hexdigest() # nosec + ref_name = f'{pool_cert.id}.pem' + ref_path = (f'{fake_cert_dir}/{sample_listener.load_balancer.id}/' + f'{ref_name}') + ref_ca_name = 'fake_ca.pem' + ref_ca_path = '{cert_dir}/{lb_id}/{name}'.format( + cert_dir=fake_cert_dir, lb_id=sample_listener.load_balancer.id, + name=ref_ca_name) + ref_crl_name = 'fake_crl.pem' + ref_crl_path = '{cert_dir}/{lb_id}/{name}'.format( + cert_dir=fake_cert_dir, lb_id=sample_listener.load_balancer.id, + name=ref_crl_name) + ref_result = {'client_cert': ref_path, 'ca_cert': ref_ca_path, + 'crl': ref_crl_path} + mock_secret.side_effect = [ref_ca_name, ref_crl_name] + + result = self.driver._process_pool_certs( + sample_listener, sample_listener.default_pool, self.amp, + sample_listener.load_balancer.id) + + secret_calls = [ + mock.call(sample_listener, + sample_listener.default_pool.ca_tls_certificate_id, + self.amp, sample_listener.load_balancer.id), + mock.call(sample_listener, + sample_listener.default_pool.crl_container_id, + self.amp, sample_listener.load_balancer.id)] + + mock_build_pem.assert_called_once_with(pool_cert) + mock_upload_cert.assert_called_once_with( + self.amp, sample_listener.load_balancer.id, pem=fake_pem, + md5sum=ref_md5, name=ref_name) + mock_secret.assert_has_calls(secret_calls) + self.assertEqual(ref_result, result) + + def test_start(self): + amp1 = mock.MagicMock() + amp1.api_version = API_VERSION + amp2 = mock.MagicMock() + amp2.api_version = API_VERSION + amp2.status = constants.DELETED + loadbalancer = mock.MagicMock() + loadbalancer.id = uuidutils.generate_uuid() + loadbalancer.amphorae = [amp1, amp2] + loadbalancer.vip = self.sv + listener = mock.MagicMock() + listener.id = uuidutils.generate_uuid() + listener.protocol = constants.PROTOCOL_HTTP + loadbalancer.listeners = [listener] + listener.load_balancer = loadbalancer + self.driver.clients[ + API_VERSION].start_listener.__name__ = 'start_listener' + # Execute driver method + self.driver.start(loadbalancer) + self.driver.clients[ + API_VERSION].start_listener.assert_called_once_with( + amp1, loadbalancer.id, None) + + def test_reload(self): + amp1 = mock.MagicMock() + amp1.api_version = API_VERSION + amp2 = mock.MagicMock() + amp2.api_version = API_VERSION + amp2.status = constants.DELETED + loadbalancer = mock.MagicMock() + loadbalancer.id = uuidutils.generate_uuid() + loadbalancer.amphorae = [amp1, amp2] + loadbalancer.vip = self.sv + listener = mock.MagicMock() + listener.id = uuidutils.generate_uuid() + listener.protocol = constants.PROTOCOL_HTTP + loadbalancer.listeners = [listener] + listener.load_balancer = loadbalancer + self.driver.clients[ + API_VERSION].reload_listener.__name__ = 'reload_listener' + # Execute driver method + self.driver.reload(loadbalancer) + self.driver.clients[ + API_VERSION].reload_listener.assert_called_once_with( + amp1, loadbalancer.id, None) + + self.driver.clients[ + API_VERSION].reload_listener.reset_mock() + timeout_dict = { + 'elem1': 1000 + } + self.driver.reload(loadbalancer, timeout_dict=timeout_dict) + self.driver.clients[ + API_VERSION].reload_listener.assert_called_once_with( + amp1, loadbalancer.id, timeout_dict) + + def test_start_with_amphora(self): + # Execute driver method + amp = mock.MagicMock() + self.driver.clients[ + API_VERSION].start_listener.__name__ = 'start_listener' + self.driver.start(self.lb, self.amp) + self.driver.clients[ + API_VERSION].start_listener.assert_called_once_with( + self.amp, self.lb.id, None) + + self.driver.clients[API_VERSION].start_listener.reset_mock() + amp.status = constants.DELETED + self.driver.start(self.lb, amp) + self.driver.clients[API_VERSION].start_listener.assert_not_called() + + def test_udp_start(self): + self.driver.clients[ + API_VERSION].start_listener.__name__ = 'start_listener' + # Execute driver method + self.driver.start(self.lb_udp) + self.driver.clients[ + API_VERSION].start_listener.assert_called_once_with( + self.amp, self.sl_udp.id, None) + + @mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.' + 'HaproxyAmphoraLoadBalancerDriver._process_secret') + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + @mock.patch('octavia.common.tls_utils.cert_parser.get_host_names') + def test_delete_second_listener(self, mock_cert, mock_load_crt, + mock_secret): + self.driver.clients[ + API_VERSION].delete_listener.__name__ = 'delete_listener' + sl = sample_configs_combined.sample_listener_tuple( + tls=True, sni=True, client_ca_cert=True, client_crl_cert=True, + recursive_nest=True) + sl2 = sample_configs_combined.sample_listener_tuple( + id='sample_listener_id_2') + sl.load_balancer.listeners.append(sl2) + mock_cert.return_value = {'cn': sample_certs.X509_CERT_CN} + mock_secret.side_effect = ['filename.pem', 'crl-filename.pem'] + sconts = [] + for sni_container in self.sl.sni_containers: + sconts.append(sni_container.tls_container) + mock_load_crt.side_effect = [{ + 'tls_cert': self.sl.default_tls_container, 'sni_certs': sconts}, + {'tls_cert': None, 'sni_certs': []}] + self.driver.jinja_combo.build_config.side_effect = ['fake_config'] + # Execute driver method + self.driver.delete(sl) + + # All of the pem files should be removed + dcp_calls = [ + mock.call(self.amp, sl.load_balancer.id, + self.sl.default_tls_container.id + '.pem'), + mock.call(self.amp, sl.load_balancer.id, sconts[0].id + '.pem'), + mock.call(self.amp, sl.load_balancer.id, sconts[1].id + '.pem'), + ] + self.driver.clients[API_VERSION].delete_cert_pem.assert_has_calls( + dcp_calls, any_order=True) + + # Now just make sure we did an update and not a delete + self.driver.clients[API_VERSION].delete_listener.assert_not_called() + self.driver.clients[API_VERSION].upload_config.assert_called_once_with( + self.amp, sl.load_balancer.id, 'fake_config', timeout_dict=None) + # start should be called once + self.driver.clients[ + API_VERSION].reload_listener.assert_called_once_with( + self.amp, sl.load_balancer.id, timeout_dict=None) + + @mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.' + 'HaproxyAmphoraLoadBalancerDriver._process_secret') + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + @mock.patch('octavia.common.tls_utils.cert_parser.get_host_names') + def test_delete_second_listener_active_standby(self, mock_cert, + mock_load_crt, + mock_secret): + self.driver.clients[ + API_VERSION].delete_listener.__name__ = 'delete_listener' + sl = sample_configs_combined.sample_listener_tuple( + tls=True, sni=True, client_ca_cert=True, client_crl_cert=True, + recursive_nest=True, topology=constants.TOPOLOGY_ACTIVE_STANDBY) + sl2 = sample_configs_combined.sample_listener_tuple( + id='sample_listener_id_2', + topology=constants.TOPOLOGY_ACTIVE_STANDBY) + sl.load_balancer.listeners.append(sl2) + mock_cert.return_value = {'cn': sample_certs.X509_CERT_CN} + mock_secret.side_effect = ['filename.pem', 'crl-filename.pem', + 'filename.pem', 'crl-filename.pem'] + sconts = [] + for sni_container in self.sl.sni_containers: + sconts.append(sni_container.tls_container) + mock_load_crt.side_effect = [{ + 'tls_cert': self.sl.default_tls_container, 'sni_certs': sconts}, + {'tls_cert': None, 'sni_certs': []}, + {'tls_cert': None, 'sni_certs': []}, + {'tls_cert': None, 'sni_certs': []}] + self.driver.jinja_combo.build_config.side_effect = [ + 'fake_config', 'fake_config'] + # Execute driver method + self.driver.delete(sl) + + amp1 = sl.load_balancer.amphorae[0] + amp2 = sl.load_balancer.amphorae[1] + + # All of the pem files should be removed (using amp1 or amp2) + dcp_calls_list = [ + [ + mock.call(amp1, sl.load_balancer.id, + sl.default_tls_container.id + '.pem'), + mock.call(amp2, sl.load_balancer.id, + sl.default_tls_container.id + '.pem') + ], + [ + mock.call(amp1, sl.load_balancer.id, sconts[0].id + '.pem'), + mock.call(amp2, sl.load_balancer.id, sconts[0].id + '.pem') + ], + [ + mock.call(amp1, sl.load_balancer.id, sconts[1].id + '.pem'), + mock.call(amp2, sl.load_balancer.id, sconts[1].id + '.pem') + ] + ] + mock_calls = ( + self.driver.clients[API_VERSION].delete_cert_pem.mock_calls) + for dcp_calls in dcp_calls_list: + # Ensure that at least one call in each pair has been seen + if (dcp_calls[0] not in mock_calls and + dcp_calls[1] not in mock_calls): + raise Exception(f"{dcp_calls} not found in {mock_calls}") + + # Now just make sure we did an update and not a delete + self.driver.clients[API_VERSION].delete_listener.assert_not_called() + upload_config_calls = [ + mock.call(amp1, sl.load_balancer.id, 'fake_config', + timeout_dict=None), + mock.call(amp2, sl.load_balancer.id, 'fake_config', + timeout_dict=None) + ] + self.driver.clients[API_VERSION].upload_config.assert_has_calls( + upload_config_calls, any_order=True) + + # start should be called once per amp + reload_listener_calls = [ + mock.call(amp1, sl.load_balancer.id, timeout_dict=None), + mock.call(amp2, sl.load_balancer.id, timeout_dict=None) + ] + self.driver.clients[ + API_VERSION].reload_listener.assert_has_calls( + reload_listener_calls, any_order=True) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_delete_last_listener(self, mock_load_crt): + self.driver.clients[ + API_VERSION].delete_listener.__name__ = 'delete_listener' + sl = sample_configs_combined.sample_listener_tuple( + tls=True, sni=True, client_ca_cert=True, client_crl_cert=True, + recursive_nest=True) + mock_load_crt.side_effect = [{ + 'tls_cert': sl.default_tls_container, 'sni_certs': None}] + # Execute driver method + self.driver.delete(sl) + self.driver.clients[ + API_VERSION].delete_listener.assert_called_once_with( + self.amp, sl.load_balancer.id) + + def test_udp_delete(self): + self.driver.clients[ + API_VERSION].delete_listener.__name__ = 'delete_listener' + # Execute driver method + self.driver.delete(self.sl_udp) + self.driver.clients[ + API_VERSION].delete_listener.assert_called_once_with( + self.amp, self.sl_udp.id) + + def test_get_info(self): + expected_info = {'haproxy_version': '1.6.3-1ubuntu0.1', + 'api_version': '1.0'} + result = self.driver.get_info(self.amp) + self.assertEqual(expected_info, result) + + def test_get_diagnostics(self): + # TODO(johnsom) Implement once this exists on the amphora agent. + result = self.driver.get_diagnostics(self.amp) + self.assertIsNone(result) + + def test_finalize_amphora(self): + # TODO(johnsom) Implement once this exists on the amphora agent. + result = self.driver.finalize_amphora(self.amp) + self.assertIsNone(result) + + def test_post_vip_plug(self): + vip_subnet = mock.MagicMock() + vip_subnet.cidr = FAKE_CIDR + vip_subnet.gateway_ip = FAKE_GATEWAY + vip_subnet.host_routes = self.host_routes + vip_subnet.to_dict.return_value = { + 'cidr': FAKE_CIDR, + 'gateway_ip': FAKE_GATEWAY, + 'host_routes': [ + hr.to_dict(recurse=True) + for hr in self.host_routes] + } + + amphorae_network_config = mock.MagicMock() + amphorae_network_config.get().vip_subnet = vip_subnet + amphorae_network_config.get().vrrp_port = self.port + self.driver.post_vip_plug(self.amp, self.lb, amphorae_network_config, + self.port, vip_subnet, + additional_vip_data=[]) + self.driver.clients[API_VERSION].plug_vip.assert_called_once_with( + self.amp, self.lb.vip.ip_address, self.subnet_info) + + def test_post_vip_plug_additional_vips(self): + vip_subnet = mock.MagicMock() + vip_subnet.cidr = FAKE_CIDR + vip_subnet.gateway_ip = FAKE_GATEWAY + vip_subnet.host_routes = self.host_routes + vip_subnet.to_dict.return_value = { + 'cidr': FAKE_CIDR, + 'gateway_ip': FAKE_GATEWAY, + 'host_routes': [ + hr.to_dict(recurse=True) + for hr in self.host_routes] + } + + amphorae_network_config = mock.MagicMock() + amphorae_network_config.get().vip_subnet = vip_subnet + amphorae_network_config.get().vrrp_port = self.port + + vip1_subnet = mock.MagicMock() + vip1_subnet.cidr = mock.Mock() + vip1_subnet.gateway_ip = mock.Mock() + vip1_subnet.host_routes = self.host_routes + vip1_subnet.to_dict.return_value = { + 'cidr': vip1_subnet.cidr, + 'gateway_ip': vip1_subnet.gateway_ip, + 'host_routes': [ + hr.to_dict(recurse=True) + for hr in self.host_routes] + } + additional_vip1 = mock.MagicMock() + additional_vip1.ip_address = mock.Mock() + additional_vip1.subnet = vip1_subnet + additional_vip_data = [additional_vip1] + self.driver.post_vip_plug( + self.amp, self.lb, + amphorae_network_config, + self.port, vip_subnet, + additional_vip_data=additional_vip_data) + netinfo = self.subnet_info.copy() + netinfo['additional_vips'] = [ + { + 'ip_address': additional_vip1.ip_address, + 'subnet_cidr': additional_vip1.subnet.cidr, + 'gateway': additional_vip1.subnet.gateway_ip, + 'host_routes': netinfo['host_routes'] + } + ] + netinfo['is_sriov'] = False + self.driver.clients[API_VERSION].plug_vip.assert_called_once_with( + self.amp, self.lb.vip.ip_address, netinfo) + + def test_post_network_plug(self): + # Test dhcp path + port = network_models.Port(mac_address=FAKE_MAC_ADDRESS, + fixed_ips=[], + network=self.network) + self.driver.post_network_plug(self.amp, port, self.amp_net_config) + self.driver.clients[API_VERSION].plug_network.assert_called_once_with( + self.amp, dict(mac_address=FAKE_MAC_ADDRESS, + fixed_ips=[], + mtu=FAKE_MTU, + is_sriov=False)) + + self.driver.clients[API_VERSION].plug_network.reset_mock() + + # Test fixed IP path + self.driver.post_network_plug(self.amp, self.port, self.amp_net_config) + self.driver.clients[API_VERSION].plug_network.assert_called_once_with( + self.amp, dict(mac_address=FAKE_MAC_ADDRESS, + fixed_ips=[dict(ip_address='198.51.100.5', + subnet_cidr='198.51.100.0/24', + host_routes=[], + gateway=FAKE_GATEWAY)], + mtu=FAKE_MTU, is_sriov=False)) + + self.driver.clients[API_VERSION].plug_network.reset_mock() + + # Test member network on vip port + port = network_models.Port(id=self.amp.vrrp_port_id, + mac_address=FAKE_MAC_ADDRESS, + fixed_ips=[self.fixed_ip], + network=self.network) + self.driver.post_network_plug(self.amp, port, self.amp_net_config) + self.driver.clients[API_VERSION].plug_network.assert_called_once_with( + self.amp, dict(mac_address=FAKE_MAC_ADDRESS, + fixed_ips=[dict(ip_address='198.51.100.5', + subnet_cidr='198.51.100.0/24', + host_routes=[], + gateway=FAKE_GATEWAY)], + mtu=FAKE_MTU, + vip_net_info=dict( + vip=self.amp.ha_ip, + subnet_cidr=FAKE_VIP_SUBNET, + mac_address=FAKE_MAC_ADDRESS, + gateway=None, + vrrp_ip=self.amp.vrrp_ip, + host_routes=[], + additional_vips=[], + mtu=FAKE_MTU, + is_sriov=False + ), is_sriov=False)) + + def test_post_network_plug_with_host_routes(self): + SUBNET_ID = 'SUBNET_ID' + FIXED_IP1 = '192.0.2.2' + FIXED_IP2 = '192.0.2.3' + SUBNET_CIDR = '192.0.2.0/24' + DEST1 = '198.51.100.0/24' + DEST2 = '203.0.113.0/24' + NEXTHOP = '192.0.2.1' + host_routes = [network_models.HostRoute(destination=DEST1, + nexthop=NEXTHOP), + network_models.HostRoute(destination=DEST2, + nexthop=NEXTHOP)] + subnet = network_models.Subnet(id=SUBNET_ID, cidr=SUBNET_CIDR, + gateway_ip=FAKE_GATEWAY, + ip_version=4, host_routes=host_routes) + fixed_ips = [ + network_models.FixedIP(subnet_id=subnet.id, ip_address=FIXED_IP1, + subnet=subnet), + network_models.FixedIP(subnet_id=subnet.id, ip_address=FIXED_IP2, + subnet=subnet) + ] + port = network_models.Port(mac_address=FAKE_MAC_ADDRESS, + fixed_ips=fixed_ips, + network=self.network) + self.driver.post_network_plug(self.amp, port, self.amp_net_config) + expected_fixed_ips = [ + {'ip_address': FIXED_IP1, 'subnet_cidr': SUBNET_CIDR, + 'gateway': FAKE_GATEWAY, + 'host_routes': [{'destination': DEST1, 'nexthop': NEXTHOP}, + {'destination': DEST2, 'nexthop': NEXTHOP}]}, + {'ip_address': FIXED_IP2, 'subnet_cidr': SUBNET_CIDR, + 'gateway': FAKE_GATEWAY, + 'host_routes': [{'destination': DEST1, 'nexthop': NEXTHOP}, + {'destination': DEST2, 'nexthop': NEXTHOP}]} + ] + self.driver.clients[API_VERSION].plug_network.assert_called_once_with( + self.amp, dict(mac_address=FAKE_MAC_ADDRESS, + fixed_ips=expected_fixed_ips, + mtu=FAKE_MTU, + is_sriov=False)) + + def test_get_haproxy_versions(self): + ref_haproxy_versions = ['1', '6'] + result = self.driver._get_haproxy_versions(self.amp) + self.driver.clients[API_VERSION].get_info.assert_called_once_with( + self.amp, timeout_dict=None) + self.assertEqual(ref_haproxy_versions, result) + + def test_get_haproxy_versions_with_timeout_dict(self): + ref_haproxy_versions = ['1', '6'] + timeout_dict = { + constants.CONN_MAX_RETRIES: 100, + constants.CONN_RETRY_INTERVAL: 1 + } + result = self.driver._get_haproxy_versions(self.amp, + timeout_dict=timeout_dict) + self.driver.clients[API_VERSION].get_info.assert_called_once_with( + self.amp, timeout_dict=timeout_dict) + self.assertEqual(ref_haproxy_versions, result) + + def test_populate_amphora_api_version(self): + + # Normal path, populate the version + # clear out any previous values + ref_haproxy_version = list(map(int, API_VERSION.split('.'))) + mock_amp = mock.MagicMock() + mock_amp.api_version = None + result = self.driver._populate_amphora_api_version(mock_amp) + self.assertEqual(API_VERSION, mock_amp.api_version) + self.assertEqual(ref_haproxy_version, result) + + # Existing version passed in + fake_version = '9999.9999' + ref_haproxy_version = list(map(int, fake_version.split('.'))) + mock_amp = mock.MagicMock() + mock_amp.api_version = fake_version + result = self.driver._populate_amphora_api_version(mock_amp) + self.assertEqual(fake_version, mock_amp.api_version) + self.assertEqual(ref_haproxy_version, result) + + def test_update_amphora_agent_config(self): + self.driver.update_amphora_agent_config( + self.amp, octavia_utils.b('test')) + self.driver.clients[ + API_VERSION].update_agent_config.assert_called_once_with( + self.amp, octavia_utils.b('test'), timeout_dict=None) + + +class TestAmphoraAPIClientTest(base.TestCase): + + def setUp(self): + super().setUp() + self.driver = driver.AmphoraAPIClient1_0() + self.base_url = "https://192.0.2.77:9443/" + self.base_url_ver = self.base_url + "1.0" + self.amp = models.Amphora(lb_network_ip='192.0.2.77', compute_id='123') + self.amp.api_version = API_VERSION + self.port_info = dict(mac_address=FAKE_MAC_ADDRESS) + # Override with much lower values for testing purposes.. + conf = oslo_fixture.Config(cfg.CONF) + conf.config(group="haproxy_amphora", connection_max_retries=2) + + self.subnet_info = {'subnet_cidr': FAKE_CIDR, + 'gateway': FAKE_GATEWAY, + 'mac_address': FAKE_MAC_ADDRESS, + 'vrrp_ip': self.amp.vrrp_ip} + patcher = mock.patch('time.sleep').start() + self.addCleanup(patcher.stop) + self.timeout_dict = {constants.REQ_CONN_TIMEOUT: 1, + constants.REQ_READ_TIMEOUT: 2, + constants.CONN_MAX_RETRIES: 3, + constants.CONN_RETRY_INTERVAL: 4} + + def test_base_url(http://github.com/self): + url = self.driver._base_url(http://github.com/FAKE_IP) + self.assertEqual('https://192.0.2.10:9443/', url) + url = self.driver._base_url(http://github.com/FAKE_IPV6,%20self.amp.api_version) + self.assertEqual('https://[2001:db8::cafe]:9443/1.0/', url) + url = self.driver._base_url(http://github.com/FAKE_IPV6_LLA,%20self.amp.api_version) + self.assertEqual('https://[fe80::00ff:fe00:cafe%o-hm0]:9443/1.0/', url) + + @mock.patch('requests.Session.get', side_effect=requests.ConnectionError) + @mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.time.sleep') + def test_request(self, mock_sleep, mock_get): + self.assertRaises(driver_except.TimeOutException, + self.driver.request, 'get', self.amp, + 'unavailableURL', self.timeout_dict) + + @requests_mock.mock() + def test_get_api_version(self, mock_requests): + ref_api_version = {'api_version': '0.1'} + mock_requests.get(f'{self.base_url}/', + json=ref_api_version) + result = self.driver.get_api_version(self.amp) + self.assertEqual(ref_api_version, result) + + @requests_mock.mock() + def test_get_info(self, m): + info = {"hostname": "some_hostname", "version": "some_version", + "api_version": "1.0", "uuid": FAKE_UUID_1} + m.get(f"{self.base_url_ver}/info", + json=info) + information = self.driver.get_info(self.amp) + self.assertEqual(info, information) + + @requests_mock.mock() + def test_get_info_with_timeout_dict(self, m): + info = {"hostname": "some_hostname", "version": "some_version", + "api_version": "1.0", "uuid": FAKE_UUID_1} + m.get(f"{self.base_url_ver}/info", + json=info) + timeout_dict = { + constants.CONN_MAX_RETRIES: 100, + constants.CONN_RETRY_INTERVAL: 1 + } + information = self.driver.get_info(self.amp, timeout_dict=timeout_dict) + self.assertEqual(info, information) + + @requests_mock.mock() + def test_get_info_unauthorized(self, m): + m.get(f"{self.base_url_ver}/info", + status_code=401) + self.assertRaises(exc.Unauthorized, self.driver.get_info, self.amp) + + @requests_mock.mock() + def test_get_info_missing(self, m): + m.get(f"{self.base_url_ver}/info", + status_code=404, + headers={'content-type': 'application/json'}) + self.assertRaises(exc.NotFound, self.driver.get_info, self.amp) + + @requests_mock.mock() + def test_get_info_server_error(self, m): + m.get(f"{self.base_url_ver}/info", + status_code=500) + self.assertRaises(exc.InternalServerError, self.driver.get_info, + self.amp) + + @requests_mock.mock() + def test_get_info_service_unavailable(self, m): + m.get(f"{self.base_url_ver}/info", + status_code=503) + self.assertRaises(exc.ServiceUnavailable, self.driver.get_info, + self.amp) + + @requests_mock.mock() + def test_get_details(self, m): + details = {"hostname": "some_hostname", "version": "some_version", + "api_version": "1.0", "uuid": FAKE_UUID_1, + "network_tx": "some_tx", "network_rx": "some_rx", + "active": True, "haproxy_count": 10} + m.get(f"{self.base_url_ver}/details", + json=details) + amp_details = self.driver.get_details(self.amp) + self.assertEqual(details, amp_details) + + @requests_mock.mock() + def test_get_details_unauthorized(self, m): + m.get(f"{self.base_url_ver}/details", + status_code=401) + self.assertRaises(exc.Unauthorized, self.driver.get_details, self.amp) + + @requests_mock.mock() + def test_get_details_missing(self, m): + m.get(f"{self.base_url_ver}/details", + status_code=404, + headers={'content-type': 'application/json'}) + self.assertRaises(exc.NotFound, self.driver.get_details, self.amp) + + @requests_mock.mock() + def test_get_details_server_error(self, m): + m.get(f"{self.base_url_ver}/details", + status_code=500) + self.assertRaises(exc.InternalServerError, self.driver.get_details, + self.amp) + + @requests_mock.mock() + def test_get_details_service_unavailable(self, m): + m.get(f"{self.base_url_ver}/details", + status_code=503) + self.assertRaises(exc.ServiceUnavailable, self.driver.get_details, + self.amp) + + @requests_mock.mock() + def test_get_all_listeners(self, m): + listeners = [{"status": "ONLINE", "provisioning_status": "ACTIVE", + "type": "PASSIVE", "uuid": FAKE_UUID_1}] + m.get(f"{self.base_url_ver}/listeners", + json=listeners) + all_listeners = self.driver.get_all_listeners(self.amp) + self.assertEqual(listeners, all_listeners) + + @requests_mock.mock() + def test_get_all_listeners_unauthorized(self, m): + m.get(f"{self.base_url_ver}/listeners", + status_code=401) + self.assertRaises(exc.Unauthorized, self.driver.get_all_listeners, + self.amp) + + @requests_mock.mock() + def test_get_all_listeners_missing(self, m): + m.get(f"{self.base_url_ver}/listeners", + status_code=404, + headers={'content-type': 'application/json'}) + self.assertRaises(exc.NotFound, self.driver.get_all_listeners, + self.amp) + + @requests_mock.mock() + def test_get_all_listeners_server_error(self, m): + m.get(f"{self.base_url_ver}/listeners", + status_code=500) + self.assertRaises(exc.InternalServerError, + self.driver.get_all_listeners, self.amp) + + @requests_mock.mock() + def test_get_all_listeners_service_unavailable(self, m): + m.get(f"{self.base_url_ver}/listeners", + status_code=503) + self.assertRaises(exc.ServiceUnavailable, + self.driver.get_all_listeners, self.amp) + + @requests_mock.mock() + def test_start_loadbalancer(self, m): + m.put(f"{self.base_url_ver}/loadbalancer/{FAKE_UUID_1}/start") + self.driver.start_listener(self.amp, FAKE_UUID_1) + self.assertTrue(m.called) + + @requests_mock.mock() + def test_start_loadbalancer_missing(self, m): + m.put(f"{self.base_url_ver}/loadbalancer/{FAKE_UUID_1}/start", + status_code=404, + headers={'content-type': 'application/json'}) + self.assertRaises(exc.NotFound, self.driver.start_listener, + self.amp, FAKE_UUID_1) + + @requests_mock.mock() + def test_start_loadbalancer_unauthorized(self, m): + m.put(f"{self.base_url_ver}/loadbalancer/{FAKE_UUID_1}/start", + status_code=401) + self.assertRaises(exc.Unauthorized, self.driver.start_listener, + self.amp, FAKE_UUID_1) + + @requests_mock.mock() + def test_start_loadbalancer_server_error(self, m): + m.put(f"{self.base_url_ver}/loadbalancer/{FAKE_UUID_1}/start", + status_code=500) + self.assertRaises(exc.InternalServerError, self.driver.start_listener, + self.amp, FAKE_UUID_1) + + @requests_mock.mock() + def test_start_loadbalancer_service_unavailable(self, m): + m.put(f"{self.base_url_ver}/loadbalancer/{FAKE_UUID_1}/start", + status_code=503) + self.assertRaises(exc.ServiceUnavailable, self.driver.start_listener, + self.amp, FAKE_UUID_1) + + @requests_mock.mock() + def test_delete_listener(self, m): + m.delete(f"{self.base_url_ver}/listeners/{FAKE_UUID_1}", json={}) + self.driver.delete_listener(self.amp, FAKE_UUID_1) + self.assertTrue(m.called) + + @requests_mock.mock() + def test_delete_listener_missing(self, m): + m.delete(f"{self.base_url_ver}/listeners/{FAKE_UUID_1}", + status_code=404, + headers={'content-type': 'application/json'}) + self.driver.delete_listener(self.amp, FAKE_UUID_1) + self.assertTrue(m.called) + + @requests_mock.mock() + def test_delete_listener_unauthorized(self, m): + m.delete(f"{self.base_url_ver}/listeners/{FAKE_UUID_1}", + status_code=401) + self.assertRaises(exc.Unauthorized, self.driver.delete_listener, + self.amp, FAKE_UUID_1) + + @requests_mock.mock() + def test_delete_listener_server_error(self, m): + m.delete(f"{self.base_url_ver}/listeners/{FAKE_UUID_1}", + status_code=500) + self.assertRaises(exc.InternalServerError, self.driver.delete_listener, + self.amp, FAKE_UUID_1) + + @requests_mock.mock() + def test_delete_listener_service_unavailable(self, m): + m.delete(f"{self.base_url_ver}/listeners/{FAKE_UUID_1}", + status_code=503) + self.assertRaises(exc.ServiceUnavailable, self.driver.delete_listener, + self.amp, FAKE_UUID_1) + + @requests_mock.mock() + def test_upload_cert_pem(self, m): + m.put("{base}/loadbalancer/{loadbalancer_id}/certificates/" + "{filename}".format( + base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1, + filename=FAKE_PEM_FILENAME)) + self.driver.upload_cert_pem(self.amp, FAKE_UUID_1, + FAKE_PEM_FILENAME, + "some_file") + self.assertTrue(m.called) + + @requests_mock.mock() + def test_upload_invalid_cert_pem(self, m): + m.put("{base}/loadbalancer/{loadbalancer_id}/certificates/" + "{filename}".format( + base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1, + filename=FAKE_PEM_FILENAME), status_code=400) + self.assertRaises(exc.InvalidRequest, self.driver.upload_cert_pem, + self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME, + "some_file") + + @requests_mock.mock() + def test_upload_cert_pem_unauthorized(self, m): + m.put("{base}/loadbalancer/{loadbalancer_id}/certificates/" + "{filename}".format( + base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1, + filename=FAKE_PEM_FILENAME), status_code=401) + self.assertRaises(exc.Unauthorized, self.driver.upload_cert_pem, + self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME, + "some_file") + + @requests_mock.mock() + def test_upload_cert_pem_server_error(self, m): + m.put("{base}/loadbalancer/{loadbalancer_id}/certificates/" + "{filename}".format( + base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1, + filename=FAKE_PEM_FILENAME), status_code=500) + self.assertRaises(exc.InternalServerError, self.driver.upload_cert_pem, + self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME, + "some_file") + + @requests_mock.mock() + def test_upload_cert_pem_service_unavailable(self, m): + m.put("{base}/loadbalancer/{loadbalancer_id}/certificates/" + "{filename}".format( + base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1, + filename=FAKE_PEM_FILENAME), status_code=503) + self.assertRaises(exc.ServiceUnavailable, self.driver.upload_cert_pem, + self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME, + "some_file") + + @requests_mock.mock() + def test_update_cert_for_rotation(self, m): + m.put(f"{self.base_url_ver}/certificate") + resp_body = self.driver.update_cert_for_rotation(self.amp, + "some_file") + self.assertEqual(200, resp_body.status_code) + + @requests_mock.mock() + def test_update_invalid_cert_for_rotation(self, m): + m.put(f"{self.base_url_ver}/certificate", + status_code=400) + self.assertRaises(exc.InvalidRequest, + self.driver.update_cert_for_rotation, self.amp, + "some_file") + + @requests_mock.mock() + def test_update_cert_for_rotation_unauthorized(self, m): + m.put(f"{self.base_url_ver}/certificate", + status_code=401) + self.assertRaises(exc.Unauthorized, + self.driver.update_cert_for_rotation, self.amp, + "some_file") + + @requests_mock.mock() + def test_update_cert_for_rotation_error(self, m): + m.put(f"{self.base_url_ver}/certificate", + status_code=500) + self.assertRaises(exc.InternalServerError, + self.driver.update_cert_for_rotation, self.amp, + "some_file") + + @requests_mock.mock() + def test_update_cert_for_rotation_unavailable(self, m): + m.put(f"{self.base_url_ver}/certificate", + status_code=503) + self.assertRaises(exc.ServiceUnavailable, + self.driver.update_cert_for_rotation, self.amp, + "some_file") + + @requests_mock.mock() + def test_get_cert_5sum(self, m): + md5sum = {"md5sum": "some_real_sum"} + m.get("{base}/loadbalancer/{loadbalancer_id}/certificates/" + "{filename}".format( + base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1, + filename=FAKE_PEM_FILENAME), json=md5sum) + sum_test = self.driver.get_cert_md5sum(self.amp, FAKE_UUID_1, + FAKE_PEM_FILENAME) + self.assertIsNotNone(sum_test) + + @requests_mock.mock() + def test_get_cert_5sum_missing(self, m): + m.get("{base}/loadbalancer/{loadbalancer_id}/certificates/" + "{filename}".format( + base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1, + filename=FAKE_PEM_FILENAME), status_code=404, + headers={'content-type': 'application/json'}) + self.assertRaises(exc.NotFound, self.driver.get_cert_md5sum, + self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME) + + @requests_mock.mock() + def test_get_cert_5sum_unauthorized(self, m): + m.get("{base}/loadbalancer/{loadbalancer_id}/certificates/" + "{filename}".format( + base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1, + filename=FAKE_PEM_FILENAME), status_code=401) + self.assertRaises(exc.Unauthorized, self.driver.get_cert_md5sum, + self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME) + + @requests_mock.mock() + def test_get_cert_5sum_server_error(self, m): + m.get("{base}/loadbalancer/{loadbalancer_id}/certificates/" + "{filename}".format( + base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1, + filename=FAKE_PEM_FILENAME), status_code=500) + self.assertRaises(exc.InternalServerError, self.driver.get_cert_md5sum, + self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME) + + @requests_mock.mock() + def test_get_cert_5sum_service_unavailable(self, m): + m.get("{base}/loadbalancer/{loadbalancer_id}/certificates/" + "{filename}".format( + base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1, + filename=FAKE_PEM_FILENAME), status_code=503) + self.assertRaises(exc.ServiceUnavailable, self.driver.get_cert_md5sum, + self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME) + + @requests_mock.mock() + def test_delete_cert_pem(self, m): + m.delete( + "{base}/loadbalancer/{loadbalancer_id}/certificates/" + "{filename}".format( + base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1, + filename=FAKE_PEM_FILENAME)) + self.driver.delete_cert_pem(self.amp, FAKE_UUID_1, + FAKE_PEM_FILENAME) + self.assertTrue(m.called) + + @requests_mock.mock() + def test_delete_cert_pem_missing(self, m): + m.delete( + "{base}/loadbalancer/{loadbalancer_id}/certificates/" + "{filename}".format( + base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1, + filename=FAKE_PEM_FILENAME), status_code=404, + headers={'content-type': 'application/json'}) + self.driver.delete_cert_pem(self.amp, FAKE_UUID_1, + FAKE_PEM_FILENAME) + self.assertTrue(m.called) + + @requests_mock.mock() + def test_delete_cert_pem_unauthorized(self, m): + m.delete( + "{base}/loadbalancer/{loadbalancer_id}/certificates/" + "{filename}".format( + base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1, + filename=FAKE_PEM_FILENAME), status_code=401) + self.assertRaises(exc.Unauthorized, self.driver.delete_cert_pem, + self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME) + + @requests_mock.mock() + def test_delete_cert_pem_server_error(self, m): + m.delete( + "{base}/loadbalancer/{loadbalancer_id}/certificates/" + "{filename}".format( + base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1, + filename=FAKE_PEM_FILENAME), status_code=500) + self.assertRaises(exc.InternalServerError, self.driver.delete_cert_pem, + self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME) + + @requests_mock.mock() + def test_delete_cert_pem_service_unavailable(self, m): + m.delete( + "{base}/loadbalancer/{loadbalancer_id}/certificates/" + "{filename}".format( + base=self.base_url_ver, loadbalancer_id=FAKE_UUID_1, + filename=FAKE_PEM_FILENAME), status_code=503) + self.assertRaises(exc.ServiceUnavailable, self.driver.delete_cert_pem, + self.amp, FAKE_UUID_1, FAKE_PEM_FILENAME) + + @requests_mock.mock() + def test_upload_config(self, m): + config = {"name": "fake_config"} + m.put( + f"{self.base_url_ver}/loadbalancer/{self.amp.id}/" + f"{FAKE_UUID_1}/haproxy", + json=config) + self.driver.upload_config(self.amp, FAKE_UUID_1, + config) + self.assertTrue(m.called) + + @requests_mock.mock() + def test_upload_invalid_config(self, m): + config = '{"name": "bad_config"}' + m.put( + f"{self.base_url_ver}/loadbalancer/{self.amp.id}/" + f"{FAKE_UUID_1}/haproxy", + status_code=400) + self.assertRaises(exc.InvalidRequest, self.driver.upload_config, + self.amp, FAKE_UUID_1, config) + + @requests_mock.mock() + def test_upload_config_unauthorized(self, m): + config = '{"name": "bad_config"}' + m.put( + f"{self.base_url_ver}/loadbalancer/{self.amp.id}/" + f"{FAKE_UUID_1}/haproxy", + status_code=401) + self.assertRaises(exc.Unauthorized, self.driver.upload_config, + self.amp, FAKE_UUID_1, config) + + @requests_mock.mock() + def test_upload_config_server_error(self, m): + config = '{"name": "bad_config"}' + m.put( + f"{self.base_url_ver}/loadbalancer/{self.amp.id}/" + f"{FAKE_UUID_1}/haproxy", + status_code=500) + self.assertRaises(exc.InternalServerError, self.driver.upload_config, + self.amp, FAKE_UUID_1, config) + + @requests_mock.mock() + def test_upload_config_service_unavailable(self, m): + config = '{"name": "bad_config"}' + m.put( + f"{self.base_url_ver}/loadbalancer/{self.amp.id}/" + f"{FAKE_UUID_1}/haproxy", + status_code=503) + self.assertRaises(exc.ServiceUnavailable, self.driver.upload_config, + self.amp, FAKE_UUID_1, config) + + @requests_mock.mock() + def test_upload_udp_config(self, m): + config = {"name": "fake_config"} + m.put( + f"{self.base_url_ver}/listeners/{self.amp.id}/" + f"{FAKE_UUID_1}/udp_listener", + json=config) + self.driver.upload_udp_config(self.amp, FAKE_UUID_1, config) + self.assertTrue(m.called) + + @requests_mock.mock() + def test_upload_udp_invalid_config(self, m): + config = '{"name": "bad_config"}' + m.put( + f"{self.base_url_ver}/listeners/{self.amp.id}/" + f"{FAKE_UUID_1}/udp_listener", + status_code=400) + self.assertRaises(exc.InvalidRequest, self.driver.upload_udp_config, + self.amp, FAKE_UUID_1, config) + + @requests_mock.mock() + def test_upload_udp_config_unauthorized(self, m): + config = '{"name": "bad_config"}' + m.put( + f"{self.base_url_ver}/listeners/{self.amp.id}/" + f"{FAKE_UUID_1}/udp_listener", + status_code=401) + self.assertRaises(exc.Unauthorized, self.driver.upload_udp_config, + self.amp, FAKE_UUID_1, config) + + @requests_mock.mock() + def test_upload_udp_config_server_error(self, m): + config = '{"name": "bad_config"}' + m.put( + f"{self.base_url_ver}/listeners/{self.amp.id}/" + f"{FAKE_UUID_1}/udp_listener", + status_code=500) + self.assertRaises(exc.InternalServerError, + self.driver.upload_udp_config, + self.amp, FAKE_UUID_1, config) + + @requests_mock.mock() + def test_upload_udp_config_service_unavailable(self, m): + config = '{"name": "bad_config"}' + m.put( + f"{self.base_url_ver}/listeners/{self.amp.id}/" + f"{FAKE_UUID_1}/udp_listener", + status_code=503) + self.assertRaises(exc.ServiceUnavailable, + self.driver.upload_udp_config, + self.amp, FAKE_UUID_1, config) + + @requests_mock.mock() + def test_plug_vip(self, m): + m.post(f"{self.base_url_ver}/plug/vip/{FAKE_IP}") + self.driver.plug_vip(self.amp, FAKE_IP, self.subnet_info) + self.assertTrue(m.called) + + @requests_mock.mock() + def test_plug_vip_api_not_ready(self, m): + m.post(f"{self.base_url_ver}/plug/vip/{FAKE_IP}", + status_code=404, headers={'content-type': 'text/html'}) + self.assertRaises(driver_except.TimeOutException, + self.driver.plug_vip, + self.amp, FAKE_IP, self.subnet_info) + self.assertTrue(m.called) + + @requests_mock.mock() + def test_plug_network(self, m): + m.post(f"{self.base_url_ver}/plug/network") + self.driver.plug_network(self.amp, self.port_info) + self.assertTrue(m.called) + + @requests_mock.mock() + def test_upload_vrrp_config(self, m): + config = '{"name": "bad_config"}' + m.put(f"{self.base_url_ver}/vrrp/upload") + self.driver.upload_vrrp_config(self.amp, config) + self.assertTrue(m.called) + + @requests_mock.mock() + def test_vrrp_action(self, m): + action = 'start' + m.put(f"{self.base_url_ver}/vrrp/{action}") + self.driver._vrrp_action(action, self.amp) + self.assertTrue(m.called) + + @requests_mock.mock() + def test_get_interface(self, m): + interface = [{"interface": "eth1"}] + ip_addr = '192.51.100.1' + m.get(f"{self.base_url_ver}/interface/{ip_addr}", + json=interface) + self.driver.get_interface(self.amp, ip_addr) + self.assertTrue(m.called) + + m.register_uri('GET', + self.base_url_ver + '/interface/' + ip_addr, + status_code=500, reason='FAIL', json='FAIL') + self.assertRaises(exc.InternalServerError, + self.driver.get_interface, + self.amp, ip_addr) + + @requests_mock.mock() + def test_update_agent_config(self, m): + m.put(f"{self.base_url_ver}/config") + resp_body = self.driver.update_agent_config(self.amp, "some_file") + self.assertEqual(200, resp_body.status_code) + + @requests_mock.mock() + def test_update_agent_config_error(self, m): + m.put(f"{self.base_url_ver}/config", status_code=500) + self.assertRaises(exc.InternalServerError, + self.driver.update_agent_config, self.amp, + "some_file") + + @requests_mock.mock() + def test_set_interface_rules(self, m): + ip_addr = '192.0.2.44' + rules = ('[{"protocol":"TCP","cidr":"192.0.2.0/24","port":8080},' + '{"protocol":"UDP","cidr":null,"port":80}]') + m.put(f'{self.base_url_ver}/interface/{ip_addr}/rules') + + self.driver.set_interface_rules(self.amp, ip_addr, rules) + self.assertTrue(m.called) diff --git a/octavia/tests/unit/amphorae/drivers/health/__init__.py b/octavia/tests/unit/amphorae/drivers/health/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/amphorae/drivers/health/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/amphorae/drivers/health/test_heartbeat_udp.py b/octavia/tests/unit/amphorae/drivers/health/test_heartbeat_udp.py new file mode 100644 index 0000000000..1b9cda9e95 --- /dev/null +++ b/octavia/tests/unit/amphorae/drivers/health/test_heartbeat_udp.py @@ -0,0 +1,1599 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# Copyright (c) 2015 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import binascii +import random +import socket +import time +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils +import sqlalchemy + +from octavia.amphorae.drivers.health import heartbeat_udp +from octavia.common import constants +from octavia.common import data_models +from octavia.common import exceptions +from octavia.tests.unit import base + + +FAKE_ID = 1 +KEY = 'TEST' +IP = '192.0.2.10' +PORT = random.randrange(1, 9000) +RLIMIT = random.randrange(1, 100) +FAKE_ADDRINFO = ( + socket.AF_INET, + socket.SOCK_DGRAM, + socket.IPPROTO_UDP, + '', + (IP, PORT) +) + + +class TestException(Exception): + + def __init__(self, value): + self.value = value + + def __str__(self): + return repr(self.value) + + +class TestHeartbeatUDP(base.TestCase): + + def setUp(self): + super().setUp() + self.conf = oslo_fixture.Config(cfg.CONF) + self.conf.config(group="health_manager", heartbeat_key=KEY) + self.conf.config(group="health_manager", bind_ip=IP) + self.conf.config(group="health_manager", bind_port=PORT) + self.conf.config(group="health_manager", sock_rlimit=0) + self.amphora_id = uuidutils.generate_uuid() + self.listener_id = uuidutils.generate_uuid() + self.listener_stats = data_models.ListenerStatistics( + listener_id=self.listener_id, + amphora_id=self.amphora_id, + bytes_in=random.randrange(1000000000), + bytes_out=random.randrange(1000000000), + active_connections=random.randrange(1000000000), + total_connections=random.randrange(1000000000), + request_errors=random.randrange(1000000000), + received_time=float(random.randrange(1000000000))) + + @mock.patch('octavia.statistics.stats_base.update_stats_via_driver') + def test_update_stats_v1(self, mock_stats_base): + health = { + "id": self.amphora_id, + "ver": 1, + "listeners": { + self.listener_id: { + "status": constants.OPEN, + "stats": { + "ereq": self.listener_stats.request_errors, + "conns": self.listener_stats.active_connections, + "totconns": self.listener_stats.total_connections, + "rx": self.listener_stats.bytes_in, + "tx": self.listener_stats.bytes_out, + }, + "pools": { + "pool-id-1": { + "status": constants.UP, + "members": {"member-id-1": constants.ONLINE} + } + } + } + }, + 'recv_time': self.listener_stats.received_time + } + + heartbeat_udp.update_stats(health) + + mock_stats_base.assert_called_once_with( + [self.listener_stats], deltas=False) + + @mock.patch('octavia.statistics.stats_base.update_stats_via_driver') + def test_update_stats_v2(self, mock_stats_base): + health = { + "id": self.amphora_id, + "ver": 2, + "seq": 5, + "listeners": { + self.listener_id: { + "status": constants.OPEN, + "stats": { + "ereq": self.listener_stats.request_errors, + "conns": self.listener_stats.active_connections, + "totconns": self.listener_stats.total_connections, + "rx": self.listener_stats.bytes_in, + "tx": self.listener_stats.bytes_out, + } + } + }, + "pools": { + f"pool-id-1:{self.listener_id}": { + "status": constants.UP, + "members": { + "member-id-1": { + "status": constants.ONLINE + } + } + } + }, + 'recv_time': self.listener_stats.received_time + } + + heartbeat_udp.update_stats(health) + + mock_stats_base.assert_called_once_with( + [self.listener_stats], deltas=False) + + @mock.patch('octavia.statistics.stats_base.update_stats_via_driver') + def test_update_stats_v3(self, mock_stats_base): + health = { + "id": self.amphora_id, + "ver": 3, + "seq": 6, + "listeners": { + self.listener_id: { + "status": constants.OPEN, + "stats": { + "ereq": self.listener_stats.request_errors, + "conns": self.listener_stats.active_connections, + "totconns": self.listener_stats.total_connections, + "rx": self.listener_stats.bytes_in, + "tx": self.listener_stats.bytes_out, + } + } + }, + "pools": { + f"pool-id-1:{self.listener_id}": { + "status": constants.UP, + "members": { + "member-id-1": { + "status": constants.ONLINE + } + } + } + }, + 'recv_time': self.listener_stats.received_time + } + + heartbeat_udp.update_stats(health) + + mock_stats_base.assert_called_once_with( + [self.listener_stats], deltas=True) + + @mock.patch('socket.getaddrinfo') + @mock.patch('socket.socket') + def test_update(self, mock_socket, mock_getaddrinfo): + socket_mock = mock.MagicMock() + mock_socket.return_value = socket_mock + mock_getaddrinfo.return_value = [FAKE_ADDRINFO] + bind_mock = mock.MagicMock() + socket_mock.bind = bind_mock + + getter = heartbeat_udp.UDPStatusGetter() + + mock_getaddrinfo.assert_called_with(IP, PORT, 0, socket.SOCK_DGRAM) + self.assertEqual((IP, PORT), getter.sockaddr) + mock_socket.assert_called_with(socket.AF_INET, socket.SOCK_DGRAM) + bind_mock.assert_called_once_with((IP, PORT)) + + self.conf.config(group="health_manager", sock_rlimit=RLIMIT) + mock_getaddrinfo.return_value = [FAKE_ADDRINFO, FAKE_ADDRINFO] + getter.update(KEY, IP, PORT) + + @mock.patch('socket.getaddrinfo') + @mock.patch('socket.socket') + def test_dorecv(self, mock_socket, mock_getaddrinfo): + socket_mock = mock.MagicMock() + mock_socket.return_value = socket_mock + mock_getaddrinfo.return_value = [range(1, 6)] + recvfrom = mock.MagicMock() + socket_mock.recvfrom = recvfrom + + getter = heartbeat_udp.UDPStatusGetter() + + # key = 'TEST' msg = {"testkey": "TEST"} + sample_msg = ('78daab562a492d2ec94ead54b252500a710d0e5' + '1aa050041b506245806e5c1971e79951818394e' + 'a6e71ad989ff950945f9573f4ab6f83e25db8ed7') + bin_msg = binascii.unhexlify(sample_msg) + recvfrom.return_value = bin_msg, ('192.0.2.1', 2) + (obj, srcaddr) = getter.dorecv() + self.assertEqual('192.0.2.1', srcaddr) + self.assertIsNotNone(obj.pop('recv_time')) + self.assertEqual({"testkey": "TEST"}, obj) + + @mock.patch('octavia.amphorae.backends.health_daemon.status_message.' + 'unwrap_envelope') + @mock.patch('socket.getaddrinfo') + @mock.patch('socket.socket') + def test_dorecv_bad_packet(self, mock_socket, mock_getaddrinfo, + mock_unwrap): + socket_mock = mock.MagicMock() + mock_socket.return_value = socket_mock + mock_unwrap.side_effect = Exception('boom') + mock_getaddrinfo.return_value = [range(1, 6)] + recvfrom = mock.MagicMock() + socket_mock.recvfrom = recvfrom + + getter = heartbeat_udp.UDPStatusGetter() + + # key = 'TEST' msg = {"testkey": "TEST"} + sample_msg = ('78daab562a492d2ec94ead54b252500a710d0e5' + '1aa050041b506245806e5c1971e79951818394e' + 'a6e71ad989ff950945f9573f4ab6f83e25db8ed7') + bin_msg = binascii.unhexlify(sample_msg) + recvfrom.return_value = bin_msg, 2 + self.assertRaises(exceptions.InvalidHMACException, getter.dorecv) + + @mock.patch('socket.getaddrinfo') + @mock.patch('socket.socket') + def test_check(self, mock_socket, mock_getaddrinfo): + socket_mock = mock.MagicMock() + mock_socket.return_value = socket_mock + mock_getaddrinfo.return_value = [range(1, 6)] + mock_dorecv = mock.Mock() + mock_health_executor = mock.Mock() + mock_stats_executor = mock.Mock() + mock_health_updater = mock.Mock() + + getter = heartbeat_udp.UDPStatusGetter() + getter.dorecv = mock_dorecv + mock_dorecv.side_effect = [(dict(id=FAKE_ID), 2)] + getter.health_executor = mock_health_executor + getter.stats_executor = mock_stats_executor + getter.health_updater = mock_health_updater + + getter.check() + getter.health_executor.shutdown() + getter.stats_executor.shutdown() + mock_health_executor.submit.assert_has_calls( + [mock.call(getter.health_updater.update_health, {'id': 1}, 2)]) + mock_stats_executor.submit.assert_has_calls( + [mock.call(heartbeat_udp.update_stats, {'id': 1})]) + + @mock.patch('socket.getaddrinfo') + @mock.patch('socket.socket') + def test_socket_except(self, mock_socket, mock_getaddrinfo): + self.assertRaises(exceptions.NetworkConfig, + heartbeat_udp.UDPStatusGetter) + + @mock.patch('concurrent.futures.ThreadPoolExecutor.submit') + @mock.patch('socket.getaddrinfo') + @mock.patch('socket.socket') + def test_check_exception(self, mock_socket, mock_getaddrinfo, mock_submit): + self.mock_socket = mock_socket + self.mock_getaddrinfo = mock_getaddrinfo + self.mock_getaddrinfo.return_value = [range(1, 6)] + + mock_dorecv = mock.Mock() + getter = heartbeat_udp.UDPStatusGetter() + + getter.dorecv = mock_dorecv + mock_dorecv.side_effect = exceptions.InvalidHMACException + + getter.check() + self.assertFalse(mock_submit.called) + + +class TestUpdateHealthDb(base.TestCase): + FAKE_UUID_1 = uuidutils.generate_uuid() + + def setUp(self): + super().setUp() + + session_patch = mock.patch('octavia.db.api.get_session') + self.addCleanup(session_patch.stop) + self.mock_session = session_patch.start() + self.session_mock = mock.MagicMock() + self.mock_session.return_value = self.session_mock + + self.hm = heartbeat_udp.UpdateHealthDb() + self.amphora_repo = mock.MagicMock() + self.amphora_health_repo = mock.MagicMock() + self.listener_repo = mock.MagicMock() + self.loadbalancer_repo = mock.MagicMock() + self.member_repo = mock.MagicMock() + self.pool_repo = mock.MagicMock() + + self.hm.amphora_repo = self.amphora_repo + self.hm.amphora_health_repo = self.amphora_health_repo + self.hm.listener_repo = self.listener_repo + self.hm.listener_repo.count.return_value = 1 + self.hm.loadbalancer_repo = self.loadbalancer_repo + self.hm.member_repo = self.member_repo + self.hm.pool_repo = self.pool_repo + + def _make_mock_lb_tree(self, listener=True, pool=True, health_monitor=True, + members=1, lb_prov_status=constants.ACTIVE): + mock_lb = mock.Mock() + mock_lb.id = self.FAKE_UUID_1 + mock_lb.pools = [] + mock_lb.listeners = [] + mock_lb.provisioning_status = lb_prov_status + mock_lb.operating_status = 'blah' + + mock_listener1 = None + mock_pool1 = None + mock_members = None + + if listener: + mock_listener1 = mock.Mock() + mock_listener1.id = 'listener-id-1' + mock_lb.listeners = [mock_listener1] + + if pool: + mock_pool1 = mock.Mock() + mock_pool1.id = "pool-id-1" + mock_pool1.members = [] + if health_monitor: + mock_hm1 = mock.Mock() + mock_pool1.health_monitor = mock_hm1 + else: + mock_pool1.health_monitor = None + mock_lb.pools = [mock_pool1] + if mock_listener1: + mock_listener1.pools = [mock_pool1] + mock_listener1.default_pool = mock_pool1 + for i in range(members): + mock_member_x = mock.Mock() + mock_member_x.id = f'member-id-{i + 1}' + if health_monitor: + mock_member_x.operating_status = 'NOTHING_MATCHABLE' + else: + mock_member_x.operating_status = constants.NO_MONITOR + mock_pool1.members.append(mock_member_x) + mock_members = mock_pool1.members + + return mock_lb, mock_listener1, mock_pool1, mock_members + + def _make_fake_lb_health_dict(self, listener=True, pool=True, + health_monitor=True, members=1, + lb_prov_status=constants.ACTIVE, + listener_protocol=constants.PROTOCOL_TCP, + enabled=True): + + lb_ref = {'enabled': enabled, 'id': self.FAKE_UUID_1, + constants.OPERATING_STATUS: 'bogus', + constants.PROVISIONING_STATUS: lb_prov_status} + + if pool: + members_dict = {} + if health_monitor: + member_operating_status = 'NOTHING_MATCHABLE' + else: + member_operating_status = constants.NO_MONITOR + + for i in range(members): + member_id = f'member-id-{i + 1}' + members_dict[member_id] = { + constants.OPERATING_STATUS: member_operating_status} + + pool_ref = {'pool-id-1': {'members': members_dict, + constants.OPERATING_STATUS: 'bogus'}} + lb_ref['pools'] = pool_ref + + if listener: + listener_ref = {'listener-id-1': { + constants.OPERATING_STATUS: 'bogus', + 'protocol': listener_protocol, + 'enabled': True}} + lb_ref['listeners'] = listener_ref + + return lb_ref + + def test_update_health_no_listener(self): + + health = { + "id": self.FAKE_UUID_1, + "ver": 1, + "listeners": {}, + "recv_time": time.time() + } + + lb_ref = self._make_fake_lb_health_dict(listener=False, pool=False) + self.hm.amphora_repo.get_lb_for_health_update.return_value = lb_ref + + self.hm.update_health(health, '192.0.2.1') + self.assertTrue(self.amphora_repo.get_lb_for_health_update.called) + self.assertTrue(self.loadbalancer_repo.update.called) + self.assertTrue(self.amphora_health_repo.replace.called) + + def test_update_health_lb_disabled(self): + + health = { + "id": self.FAKE_UUID_1, + "ver": 1, + "listeners": {}, + "recv_time": time.time() + } + + lb_ref = self._make_fake_lb_health_dict( + listener=True, pool=True, enabled=False) + self.hm.amphora_repo.get_lb_for_health_update.return_value = lb_ref + self.hm.update_health(health, '192.0.2.1') + self.assertTrue(self.amphora_repo.get_lb_for_health_update.called) + self.assertTrue(self.loadbalancer_repo.update.called) + self.assertTrue(self.amphora_health_repo.replace.called) + + def test_update_health_lb_pending_no_listener(self): + + health = { + "id": self.FAKE_UUID_1, + "ver": 1, + "listeners": {}, + "recv_time": time.time() + } + + lb_ref = self._make_fake_lb_health_dict( + listener=True, pool=False, lb_prov_status=constants.PENDING_UPDATE) + self.hm.amphora_repo.get_lb_for_health_update.return_value = lb_ref + + self.hm.update_health(health, '192.0.2.1') + self.assertTrue(self.amphora_repo.get_lb_for_health_update.called) + self.assertTrue(self.loadbalancer_repo.update.called) + self.assertTrue(self.amphora_health_repo.replace.called) + + def test_update_health_missing_listener(self): + + health = { + "id": self.FAKE_UUID_1, + "ver": 1, + "listeners": {}, + "recv_time": time.time() + } + + lb_ref = self._make_fake_lb_health_dict(listener=True, pool=False) + self.hm.amphora_repo.get_lb_for_health_update.return_value = lb_ref + + self.hm.update_health(health, '192.0.2.1') + self.assertTrue(self.amphora_repo.get_lb_for_health_update.called) + self.assertTrue(self.loadbalancer_repo.update.called) + self.assertFalse(self.amphora_health_repo.replace.called) + + def test_update_health_recv_time_stale(self): + hb_interval = cfg.CONF.health_manager.heartbeat_interval + health = { + "id": self.FAKE_UUID_1, + "ver": 1, + "listeners": {}, + "recv_time": time.time() - hb_interval - 1 # extra -1 for buffer + } + + lb_ref = self._make_fake_lb_health_dict(listener=False, pool=False) + + self.hm.amphora_repo.get_lb_for_health_update.return_value = lb_ref + + self.hm.update_health(health, '192.0.2.1') + self.assertTrue(self.amphora_repo.get_lb_for_health_update.called) + # Receive time is stale, so we shouldn't see this called + self.assertFalse(self.loadbalancer_repo.update.called) + + def test_update_health_replace_error(self): + + health = { + "id": self.FAKE_UUID_1, + "ver": 1, + "listeners": { + "listener-id-1": {"status": constants.OPEN, "pools": { + "pool-id-1": {"status": constants.UP, + "members": {"member-id-1": constants.UP} + } + } + } + }, + "recv_time": time.time() + } + + self.session_mock.commit.side_effect = TestException('boom') + + lb_ref = self._make_fake_lb_health_dict() + + self.amphora_repo.get_lb_for_health_update.return_value = lb_ref + + self.hm.update_health(health, '192.0.2.1') + self.assertTrue(self.amphora_health_repo.replace.called) + self.session_mock.rollback.assert_called_once() + + def test_update_health_online(self): + health = { + "id": self.FAKE_UUID_1, + "ver": 1, + "listeners": { + "listener-id-1": {"status": constants.OPEN, "pools": { + "pool-id-1": {"status": constants.UP, + "members": {"member-id-1": constants.UP} + } + } + } + }, + "recv_time": time.time() + } + + lb_ref = self._make_fake_lb_health_dict() + self.amphora_repo.get_lb_for_health_update.return_value = lb_ref + self.hm.update_health(health, '192.0.2.1') + self.assertTrue(self.amphora_health_repo.replace.called) + + # test listener, member + for listener_id, listener in health.get('listeners', {}).items(): + + self.listener_repo.update.assert_any_call( + self.session_mock, listener_id, + operating_status=constants.ONLINE) + + for pool_id, pool in listener.get('pools', {}).items(): + + self.hm.pool_repo.update.assert_any_call( + self.session_mock, pool_id, + operating_status=constants.ONLINE) + + for member_id, member in pool.get('members', {}).items(): + self.member_repo.update.assert_any_call( + self.session_mock, member_id, + operating_status=constants.ONLINE) + + # If the listener count is wrong, make sure we don't update + lb_ref['listeners']['listener-id-2'] = { + constants.OPERATING_STATUS: 'bogus'} + + self.amphora_health_repo.replace.reset_mock() + + self.hm.update_health(health, '192.0.2.1') + self.assertTrue(not self.amphora_health_repo.replace.called) + + def test_update_health_listener_disabled(self): + health = { + "id": self.FAKE_UUID_1, + "ver": 1, + "listeners": { + "listener-id-1": {"status": constants.OPEN, "pools": { + "pool-id-1": {"status": constants.UP, + "members": {"member-id-1": constants.UP} + } + } + } + }, + "recv_time": time.time() + } + + lb_ref = self._make_fake_lb_health_dict() + lb_ref['listeners']['listener-id-2'] = { + 'enabled': False, constants.OPERATING_STATUS: constants.OFFLINE} + self.amphora_repo.get_lb_for_health_update.return_value = lb_ref + self.hm.update_health(health, '192.0.2.1') + self.assertTrue(self.amphora_health_repo.replace.called) + + # test listener, member + for listener_id, listener in health.get('listeners', {}).items(): + + self.listener_repo.update.assert_any_call( + self.session_mock, listener_id, + operating_status=constants.ONLINE) + + for pool_id, pool in listener.get('pools', {}).items(): + + self.hm.pool_repo.update.assert_any_call( + self.session_mock, pool_id, + operating_status=constants.ONLINE) + + for member_id, member in pool.get('members', {}).items(): + self.member_repo.update.assert_any_call( + self.session_mock, member_id, + operating_status=constants.ONLINE) + + def test_update_lb_pool_health_offline(self): + + health = { + "id": self.FAKE_UUID_1, + "ver": 1, + "listeners": { + "listener-id-1": {"status": constants.OPEN, "pools": {}} + }, + "recv_time": time.time() + } + + lb_ref = self._make_fake_lb_health_dict() + + self.amphora_repo.get_lb_for_health_update.return_value = lb_ref + self.hm.update_health(health, '192.0.2.1') + self.assertTrue(self.amphora_health_repo.replace.called) + + # test listener, member + for listener_id, listener in health.get('listeners', {}).items(): + + self.listener_repo.update.assert_any_call( + self.session_mock, listener_id, + operating_status=constants.ONLINE) + self.pool_repo.update.assert_any_call( + self.session_mock, 'pool-id-1', + operating_status=constants.OFFLINE + ) + + def test_update_lb_multiple_listeners_one_error_pool(self): + health = { + "id": self.FAKE_UUID_1, + "ver": 1, + "listeners": { + "listener-id-1": {"status": constants.OPEN, "pools": { + "pool-id-1": {"status": constants.DOWN, + "members": {"member-id-1": constants.ERROR}} + }}, + "listener-id-2": {"status": constants.OPEN, "pools": { + "pool-id-2": {"status": constants.UP, + "members": {"member-id-2": constants.UP}} + }} + }, + "recv_time": time.time() + } + + lb_ref = self._make_fake_lb_health_dict() + + lb_ref['pools']['pool-id-2'] = { + constants.OPERATING_STATUS: 'bogus', + 'members': {'member-id-2': {constants.OPERATING_STATUS: 'bogus'}}} + + lb_ref['listeners']['listener-id-2'] = { + constants.OPERATING_STATUS: 'bogus', + 'protocol': constants.PROTOCOL_TCP, + 'enabled': True} + + self.amphora_repo.get_lb_for_health_update.return_value = lb_ref + self.hm.update_health(health, '192.0.2.1') + self.assertTrue(self.amphora_health_repo.replace.called) + + # test listener, member + for listener_id, listener in health.get('listeners').items(): + self.listener_repo.update.assert_any_call( + self.session_mock, listener_id, + operating_status=constants.ONLINE) + + # Call count should be exactly 2, as each pool should be processed once + self.assertEqual(2, self.pool_repo.update.call_count) + self.pool_repo.update.assert_has_calls([ + mock.call(self.session_mock, 'pool-id-1', + operating_status=constants.ERROR), + mock.call(self.session_mock, 'pool-id-2', + operating_status=constants.ONLINE) + ], any_order=True) + + def test_update_lb_and_list_pool_health_online(self): + + health = { + "id": self.FAKE_UUID_1, + "ver": 1, + "listeners": { + "listener-id-1": {"status": constants.OPEN, "pools": { + "pool-id-1": {"status": constants.UP, + "members": {"member-id-1": constants.UP} + } + } + } + }, + "recv_time": time.time() + } + + lb_ref = self._make_fake_lb_health_dict() + self.amphora_repo.get_lb_for_health_update.return_value = lb_ref + self.hm.update_health(health, '192.0.2.1') + self.assertTrue(self.amphora_health_repo.replace.called) + + # test listener, member + for listener_id, listener in health.get('listeners', {}).items(): + + self.listener_repo.update.assert_any_call( + self.session_mock, listener_id, + operating_status=constants.ONLINE) + + for pool_id, pool in listener.get('pools', {}).items(): + + # We should not double process a shared pool + self.hm.pool_repo.update.assert_called_once_with( + self.session_mock, pool_id, + operating_status=constants.ONLINE) + + for member_id, member in pool.get('members', {}).items(): + self.member_repo.update.assert_any_call( + self.session_mock, member_id, + operating_status=constants.ONLINE) + + def test_update_v2_lb_and_list_pool_health_online(self): + + health = { + "id": self.FAKE_UUID_1, + "ver": 2, + "listeners": { + "listener-id-1": {"status": constants.OPEN} + }, + "pools": { + "pool-id-1:listener-id-1": { + "status": constants.UP, + "members": {"member-id-1": constants.UP}}, + "pool-id-1:listener-id-2": { + "status": constants.UP, + "members": {"member-id-1": constants.UP}}}, + "recv_time": time.time() + } + + lb_ref = self._make_fake_lb_health_dict() + self.amphora_repo.get_lb_for_health_update.return_value = lb_ref + self.hm.update_health(health, '192.0.2.1') + self.assertTrue(self.amphora_health_repo.replace.called) + + # test listener, member + for listener_id, listener in health.get('listeners', {}).items(): + + self.listener_repo.update.assert_any_call( + self.session_mock, listener_id, + operating_status=constants.ONLINE) + + for pool_id, pool in health.get('pools', {}).items(): + # We should not double process a shared pool + self.hm.pool_repo.update.assert_called_once_with( + self.session_mock, 'pool-id-1', + operating_status=constants.ONLINE) + + for member_id, member in pool.get('members', {}).items(): + self.member_repo.update.assert_any_call( + self.session_mock, member_id, + operating_status=constants.ONLINE) + + def test_update_pool_offline(self): + + health = { + "id": self.FAKE_UUID_1, + "ver": 1, + "listeners": { + "listener-id-1": {"status": constants.OPEN, "pools": { + "pool-id-5": {"status": constants.UP, + "members": {"member-id-1": constants.UP} + } + } + } + }, + "recv_time": time.time() + } + + lb_ref = self._make_fake_lb_health_dict() + + lb_ref['pools']['pool-id-2'] = { + constants.OPERATING_STATUS: constants.OFFLINE} + + self.amphora_repo.get_lb_for_health_update.return_value = lb_ref + self.hm.update_health(health, '192.0.2.1') + self.assertTrue(self.amphora_health_repo.replace.called) + + # test listener, member + for listener_id, listener in health.get('listeners', {}).items(): + + self.listener_repo.update.assert_any_call( + self.session_mock, listener_id, + operating_status=constants.ONLINE) + + self.hm.pool_repo.update.assert_any_call( + self.session_mock, "pool-id-1", + operating_status=constants.OFFLINE) + + def test_update_health_member_drain(self): + + health = { + "id": self.FAKE_UUID_1, + "ver": 1, + "listeners": { + "listener-id-1": { + "status": constants.OPEN, + "pools": { + "pool-id-1": { + "status": constants.UP, + "members": {"member-id-1": constants.DRAIN}}}}}, + "recv_time": time.time()} + + lb_ref = self._make_fake_lb_health_dict() + self.amphora_repo.get_lb_for_health_update.return_value = lb_ref + self.hm.update_health(health, '192.0.2.1') + self.assertTrue(self.amphora_health_repo.replace.called) + + # test listener, member + for listener_id, listener in health.get('listeners', {}).items(): + + self.listener_repo.update.assert_any_call( + self.session_mock, listener_id, + operating_status=constants.ONLINE) + + for pool_id, pool in listener.get('pools', {}).items(): + + self.hm.pool_repo.update.assert_any_call( + self.session_mock, pool_id, + operating_status=constants.ONLINE) + + for member_id, member in pool.get('members', {}).items(): + + self.member_repo.update.assert_any_call( + self.session_mock, member_id, + operating_status=constants.DRAINING) + + def test_update_health_member_maint(self): + + health = { + "id": self.FAKE_UUID_1, + "ver": 1, + "listeners": { + "listener-id-1": { + "status": constants.OPEN, + "pools": { + "pool-id-1": { + "status": constants.UP, + "members": {"member-id-1": constants.MAINT}}}}}, + "recv_time": time.time()} + + lb_ref = self._make_fake_lb_health_dict() + self.amphora_repo.get_lb_for_health_update.return_value = lb_ref + self.hm.update_health(health, '192.0.2.1') + self.assertTrue(self.amphora_health_repo.replace.called) + + # test listener, member + for listener_id, listener in health.get('listeners', {}).items(): + + self.listener_repo.update.assert_any_call( + self.session_mock, listener_id, + operating_status=constants.ONLINE) + + for pool_id, pool in listener.get('pools', {}).items(): + + self.hm.pool_repo.update.assert_any_call( + self.session_mock, pool_id, + operating_status=constants.ONLINE) + + for member_id, member in pool.get('members', {}).items(): + + self.member_repo.update.assert_any_call( + self.session_mock, member_id, + operating_status=constants.OFFLINE) + + def test_update_health_member_unknown(self): + + health = { + "id": self.FAKE_UUID_1, + "ver": 1, + "listeners": { + "listener-id-1": { + "status": constants.OPEN, + "pools": { + "pool-id-1": { + "status": constants.UP, + "members": {"member-id-1": "blah"}}}}}, + "recv_time": time.time()} + + lb_ref = self._make_fake_lb_health_dict() + self.amphora_repo.get_lb_for_health_update.return_value = lb_ref + self.hm.update_health(health, '192.0.2.1') + self.assertTrue(self.amphora_health_repo.replace.called) + + # test listener, member + for listener_id, listener in health.get('listeners', {}).items(): + + self.listener_repo.update.assert_any_call( + self.session_mock, listener_id, + operating_status=constants.ONLINE) + + for pool_id, pool in listener.get('pools', {}).items(): + + self.hm.pool_repo.update.assert_any_call( + self.session_mock, pool_id, + operating_status=constants.ONLINE) + self.assertTrue(not self.member_repo.update.called) + + def test_update_health_member_down(self): + + health = { + "id": self.FAKE_UUID_1, + "ver": 1, + "listeners": { + "listener-id-1": {"status": constants.OPEN, "pools": { + "pool-id-1": {"status": constants.UP, + "members": {"member-id-1": constants.DOWN} + } + } + } + }, + "recv_time": time.time() + } + + lb_ref = self._make_fake_lb_health_dict() + self.amphora_repo.get_lb_for_health_update.return_value = lb_ref + + self.hm.update_health(health, '192.0.2.1') + self.assertTrue(self.amphora_health_repo.replace.called) + + # test listener, member + for listener_id, listener in health.get('listeners', {}).items(): + + self.listener_repo.update.assert_any_call( + self.session_mock, listener_id, + operating_status=constants.ONLINE) + + for pool_id, pool in listener.get('pools', {}).items(): + + self.hm.pool_repo.update.assert_any_call( + self.session_mock, pool_id, + operating_status=constants.DEGRADED) + + for member_id, member in pool.get('members', {}).items(): + + self.member_repo.update.assert_any_call( + self.session_mock, member_id, + operating_status=constants.ERROR) + + def test_update_health_member_missing_no_hm(self): + + health = { + "id": self.FAKE_UUID_1, + "ver": 1, + "listeners": { + "listener-id-1": {"status": constants.OPEN, "pools": { + "pool-id-1": {"status": constants.UP, + "members": {} + } + } + } + }, + "recv_time": time.time() + } + + lb_ref = self._make_fake_lb_health_dict(health_monitor=False) + self.amphora_repo.get_lb_for_health_update.return_value = lb_ref + + self.hm.update_health(health, '192.0.2.1') + self.assertTrue(self.amphora_health_repo.replace.called) + + # test listener, member + for listener_id, listener in health.get('listeners', {}).items(): + + self.listener_repo.update.assert_any_call( + self.session_mock, listener_id, + operating_status=constants.ONLINE) + + for pool_id, pool in listener.get('pools', {}).items(): + + self.hm.pool_repo.update.assert_any_call( + self.session_mock, pool_id, + operating_status=constants.ONLINE) + + self.member_repo.update.assert_not_called() + + def test_update_health_member_down_no_hm(self): + + health = { + "id": self.FAKE_UUID_1, + "ver": 1, + "listeners": { + "listener-id-1": {"status": constants.OPEN, "pools": { + "pool-id-1": {"status": constants.UP, + "members": {"member-id-1": constants.MAINT} + } + } + } + }, + "recv_time": time.time() + } + + lb_ref = self._make_fake_lb_health_dict(health_monitor=False) + member1 = lb_ref['pools']['pool-id-1']['members']['member-id-1'] + member1[constants.OPERATING_STATUS] = constants.NO_MONITOR + + self.amphora_repo.get_lb_for_health_update.return_value = lb_ref + + self.hm.update_health(health, '192.0.2.1') + self.assertTrue(self.amphora_health_repo.replace.called) + + # test listener, member + for listener_id, listener in health.get('listeners', {}).items(): + + self.listener_repo.update.assert_any_call( + self.session_mock, listener_id, + operating_status=constants.ONLINE) + + for pool_id, pool in listener.get('pools', {}).items(): + + self.hm.pool_repo.update.assert_any_call( + self.session_mock, pool_id, + operating_status=constants.ONLINE) + + self.member_repo.update.assert_any_call( + self.session_mock, 'member-id-1', + operating_status=constants.OFFLINE) + + def test_update_health_member_no_check(self): + + health = { + "id": self.FAKE_UUID_1, + "ver": 1, + "listeners": { + "listener-id-1": {"status": constants.OPEN, "pools": { + "pool-id-1": {"status": constants.UP, + "members": {"member-id-1": + constants.NO_CHECK} + } + } + } + }, + "recv_time": time.time() + } + + lb_ref = self._make_fake_lb_health_dict() + self.amphora_repo.get_lb_for_health_update.return_value = lb_ref + self.hm.update_health(health, '192.0.2.1') + self.assertTrue(self.amphora_health_repo.replace.called) + + # test listener, member + for listener_id, listener in health.get('listeners', {}).items(): + + self.listener_repo.update.assert_any_call( + self.session_mock, listener_id, + operating_status=constants.ONLINE) + + for pool_id, pool in listener.get('pools', {}).items(): + + self.hm.pool_repo.update.assert_any_call( + self.session_mock, pool_id, + operating_status=constants.ONLINE) + + for member_id, member in pool.get('members', {}).items(): + + self.member_repo.update.assert_any_call( + self.session_mock, member_id, + operating_status=constants.NO_MONITOR) + + def test_update_health_member_admin_down(self): + + health = { + "id": self.FAKE_UUID_1, + "ver": 1, + "listeners": { + "listener-id-1": { + "status": constants.OPEN, + "pools": { + "pool-id-1": { + "status": constants.UP, + "members": { + "member-id-1": constants.UP}}}}}, + "recv_time": time.time()} + + lb_ref = self._make_fake_lb_health_dict(members=2) + + self.amphora_repo.get_lb_for_health_update.return_value = lb_ref + + self.hm.update_health(health, '192.0.2.1') + self.assertTrue(self.amphora_repo.get_lb_for_health_update.called) + + # test listener, member + for listener_id, listener in health.get('listeners', {}).items(): + + self.listener_repo.update.assert_any_call( + self.session_mock, listener_id, + operating_status=constants.ONLINE) + + for pool_id, pool in listener.get('pools', {}).items(): + + self.hm.pool_repo.update.assert_any_call( + self.session_mock, pool_id, + operating_status=constants.ONLINE) + + for member_id, member in pool.get('members', {}).items(): + + self.member_repo.update.assert_any_call( + self.session_mock, member_id, + operating_status=constants.ONLINE) + self.member_repo.update.assert_any_call( + self.session_mock, 'member-id-2', + operating_status=constants.OFFLINE) + + def test_update_health_list_full_member_down(self): + + health = { + "id": self.FAKE_UUID_1, + "ver": 1, + "listeners": { + "listener-id-1": {"status": constants.FULL, "pools": { + "pool-id-1": {"status": constants.UP, + "members": {"member-id-1": constants.DOWN} + } + } + } + }, + "recv_time": time.time() + } + + lb_ref = self._make_fake_lb_health_dict() + self.amphora_repo.get_lb_for_health_update.return_value = lb_ref + self.hm.update_health(health, '192.0.2.1') + self.assertTrue(self.amphora_health_repo.replace.called) + + # test listener, member + for listener_id, listener in health.get('listeners', {}).items(): + + self.listener_repo.update.assert_any_call( + self.session_mock, listener_id, + operating_status=constants.DEGRADED) + + for pool_id, pool in listener.get('pools', {}).items(): + + self.hm.pool_repo.update.assert_any_call( + self.session_mock, pool_id, + operating_status=constants.DEGRADED) + + for member_id, member in pool.get('members', {}).items(): + + self.member_repo.update.assert_any_call( + self.session_mock, member_id, + operating_status=constants.ERROR) + + lb_ref['listeners']['listener-id-2'] = { + constants.OPERATING_STATUS: 'bogus'} + + self.amphora_health_repo.replace.reset_mock() + + self.hm.update_health(health, '192.0.2.1') + self.assertTrue(not self.amphora_health_repo.replace.called) + + def test_update_health_error(self): + + health = { + "id": self.FAKE_UUID_1, + "ver": 1, + "listeners": { + "listener-id-1": {"status": constants.OPEN, "pools": { + "pool-id-1": {"status": constants.DOWN, + "members": {"member-id-1": constants.DOWN} + } + } + } + }, + "recv_time": time.time() + } + + lb_ref = self._make_fake_lb_health_dict() + self.amphora_repo.get_lb_for_health_update.return_value = lb_ref + + self.hm.update_health(health, '192.0.2.1') + self.assertTrue(self.amphora_health_repo.replace.called) + + # test listener, member + for listener_id, listener in health.get('listeners', {}).items(): + + self.listener_repo.update.assert_any_call( + self.session_mock, listener_id, + operating_status=constants.ONLINE) + + for pool_id, pool in listener.get('pools', {}).items(): + + self.hm.pool_repo.update.assert_any_call( + self.session_mock, pool_id, + operating_status=constants.ERROR) + + for member_id, member in pool.get('members', {}).items(): + + self.member_repo.update.assert_any_call( + self.session_mock, member_id, + operating_status=constants.ERROR) + + # Test the logic code paths + def test_update_health_full(self): + + health = { + "id": self.FAKE_UUID_1, + "ver": 1, + "listeners": { + "listener-id-1": {"status": constants.FULL, "pools": { + "pool-id-1": {"status": constants.DOWN, + "members": {"member-id-1": constants.DOWN} + } + } + }, + "listener-id-2": {"status": constants.FULL, "pools": { + "pool-id-2": {"status": constants.UP, + "members": {"member-id-2": constants.UP} + } + } + }, + "listener-id-3": {"status": constants.OPEN, "pools": { + "pool-id-3": {"status": constants.UP, + "members": {"member-id-3": constants.UP, + "member-id-31": constants.DOWN} + } + } + }, + "listener-id-4": { + "status": constants.OPEN, + "pools": { + "pool-id-4": { + "status": constants.UP, + "members": {"member-id-4": constants.DRAINING} + } + } + }, + "listener-id-5": { + "status": "bogus", + "pools": { + "pool-id-5": { + "status": "bogus", + "members": {"member-id-5": "bogus"} + } + } + } + }, + "recv_time": time.time() + } + + lb_ref = self._make_fake_lb_health_dict() + # Build our own custom listeners/pools/members + for i in [1, 2, 3, 4, 5]: + + lb_ref['listeners'][f'listener-id-{i}'] = { + constants.OPERATING_STATUS: 'bogus', + 'protocol': constants.PROTOCOL_TCP, + 'enabled': True} + + if i == 3: + members_dict = {'member-id-3': { + constants.OPERATING_STATUS: 'bogus'}, 'member-id-31': { + constants.OPERATING_STATUS: 'bogus'}} + else: + members_dict = {f'member-id-{i}': { + constants.OPERATING_STATUS: 'bogus'}} + lb_ref['pools'][f'pool-id-{i}'] = { + 'members': members_dict, constants.OPERATING_STATUS: 'bogus'} + + self.amphora_repo.get_lb_for_health_update.return_value = lb_ref + + self.hm.update_health(health, '192.0.2.1') + + # test listener + self.listener_repo.update.assert_any_call( + self.session_mock, "listener-id-1", + operating_status=constants.DEGRADED) + self.listener_repo.update.assert_any_call( + self.session_mock, "listener-id-2", + operating_status=constants.DEGRADED) + self.pool_repo.update.assert_any_call( + self.session_mock, "pool-id-1", + operating_status=constants.ERROR) + self.pool_repo.update.assert_any_call( + self.session_mock, "pool-id-2", + operating_status=constants.ONLINE) + self.pool_repo.update.assert_any_call( + self.session_mock, "pool-id-3", + operating_status=constants.DEGRADED) + self.pool_repo.update.assert_any_call( + self.session_mock, "pool-id-4", + operating_status=constants.ONLINE) + + # Test code paths where objects are not found in the database + def test_update_health_not_found(self): + + health = { + "id": self.FAKE_UUID_1, + "ver": 1, + "listeners": { + "listener-id-1": {"status": constants.OPEN, "pools": { + "pool-id-1": {"status": constants.UP, + "members": {"member-id-1": constants.UP} + } + } + } + }, + "recv_time": time.time() + } + + self.hm.listener_repo.update.side_effect = ( + [sqlalchemy.orm.exc.NoResultFound]) + self.hm.member_repo.update.side_effect = ( + [sqlalchemy.orm.exc.NoResultFound]) + self.hm.pool_repo.update.side_effect = ( + sqlalchemy.orm.exc.NoResultFound) + self.hm.loadbalancer_repo.update.side_effect = ( + [sqlalchemy.orm.exc.NoResultFound]) + + lb_ref = self._make_fake_lb_health_dict() + + lb_ref['pools']['pool-id-2'] = {constants.OPERATING_STATUS: 'bogus'} + + self.amphora_repo.get_lb_for_health_update.return_value = lb_ref + + self.hm.update_health(health, '192.0.2.1') + self.assertTrue(self.amphora_health_repo.replace.called) + + # test listener, member + for listener_id, listener in health.get('listeners', {}).items(): + + self.listener_repo.update.assert_any_call( + self.session_mock, listener_id, + operating_status=constants.ONLINE) + + for pool_id, pool in listener.get('pools', {}).items(): + + self.hm.pool_repo.update.assert_any_call( + self.session_mock, pool_id, + operating_status=constants.ONLINE) + + for member_id, member in pool.get('members', {}).items(): + + self.member_repo.update.assert_any_call( + self.session_mock, member_id, + operating_status=constants.ONLINE) + + @mock.patch('stevedore.driver.DriverManager.driver') + def test_update_health_zombie(self, mock_driver): + health = {"id": self.FAKE_UUID_1, "listeners": {}} + + self.amphora_repo.get_lb_for_health_update.return_value = None + amp_mock = mock.MagicMock() + self.amphora_repo.get.return_value = amp_mock + self.hm.update_health(health, '192.0.2.1') + mock_driver.delete.assert_called_once_with( + amp_mock.compute_id) + + def test_update_health_no_status_change(self): + health = { + "id": self.FAKE_UUID_1, + "ver": 1, + "listeners": { + "listener-id-1": { + "status": constants.OPEN, "pools": { + "pool-id-1": { + "status": constants.UP, "members": { + "member-id-1": constants.UP + } + } + } + } + }, + "recv_time": time.time() + } + + lb_ref = self._make_fake_lb_health_dict() + + # Start everything ONLINE + lb_ref[constants.OPERATING_STATUS] = constants.ONLINE + listener1 = lb_ref['listeners']['listener-id-1'] + listener1[constants.OPERATING_STATUS] = constants.ONLINE + pool1 = lb_ref['pools']['pool-id-1'] + pool1[constants.OPERATING_STATUS] = constants.ONLINE + member1 = pool1['members']['member-id-1'] + member1[constants.OPERATING_STATUS] = constants.ONLINE + + self.amphora_repo.get_lb_for_health_update.return_value = lb_ref + + self.hm.update_health(health, '192.0.2.1') + self.loadbalancer_repo.update.assert_not_called() + self.listener_repo.update.assert_not_called() + self.pool_repo.update.assert_not_called() + self.member_repo.update.assert_not_called() + + def test_update_health_lb_admin_down(self): + health = { + "id": self.FAKE_UUID_1, + "ver": 1, + "listeners": {}, + "recv_time": time.time()} + + lb_ref = self._make_fake_lb_health_dict(listener=False, pool=False) + lb_ref['enabled'] = False + self.hm.amphora_repo.get_lb_for_health_update.return_value = lb_ref + + self.hm.update_health(health, '192.0.2.1') + self.assertTrue(self.amphora_repo.get_lb_for_health_update.called) + self.assertTrue(self.loadbalancer_repo.update.called) + self.loadbalancer_repo.update.assert_called_with( + self.mock_session(), self.FAKE_UUID_1, + operating_status='OFFLINE') + + def test_update_health_lb_admin_up(self): + health = { + "id": self.FAKE_UUID_1, + "listeners": {}, + "recv_time": time.time(), + "ver": 1} + + lb_ref = self._make_fake_lb_health_dict(listener=False, pool=False) + self.hm.amphora_repo.get_lb_for_health_update.return_value = lb_ref + + self.hm.update_health(health, '192.0.2.1') + self.assertTrue(self.amphora_repo.get_lb_for_health_update.called) + self.assertTrue(self.loadbalancer_repo.update.called) + self.loadbalancer_repo.update.assert_called_with( + self.mock_session(), self.FAKE_UUID_1, + operating_status='ONLINE') + + def test_update_health_forbid_to_stale_udp_listener_amphora(self): + health = { + "id": self.FAKE_UUID_1, + "listeners": {}, + "recv_time": time.time() + } + + mock_lb = mock.Mock() + mock_lb.id = self.FAKE_UUID_1 + mock_lb.pools = [] + mock_lb.listeners = [] + mock_lb.provisioning_status = constants.ACTIVE + mock_lb.operating_status = 'blah' + + # The default pool of udp listener1 has no enabled member + mock_member1 = mock.Mock() + mock_member1.id = 'member-id-1' + mock_member1.enabled = False + mock_pool1 = mock.Mock() + mock_pool1.id = "pool-id-1" + mock_pool1.members = [mock_member1] + mock_listener1 = mock.Mock() + mock_listener1.id = 'listener-id-1' + mock_listener1.default_pool = mock_pool1 + mock_listener1.protocol = constants.PROTOCOL_UDP + + # The default pool of udp listener2 has no member + mock_pool2 = mock.Mock() + mock_pool2.id = "pool-id-2" + mock_pool2.members = [] + mock_listener2 = mock.Mock() + mock_listener2.id = 'listener-id-2' + mock_listener2.default_pool = mock_pool2 + mock_listener2.protocol = constants.PROTOCOL_UDP + + # The udp listener3 has no default_pool + mock_listener3 = mock.Mock() + mock_listener3.id = 'listener-id-3' + mock_listener3.default_pool = None + mock_listener3.protocol = constants.PROTOCOL_UDP + + mock_lb.listeners.extend([mock_listener1, mock_listener2, + mock_listener3]) + mock_lb.pools.extend([mock_pool1, mock_pool2]) + + self.loadbalancer_repo.get.return_value = mock_lb + + lb_ref = self._make_fake_lb_health_dict( + listener_protocol=constants.PROTOCOL_UDP) + lb_ref['listeners']['listener-id-2'] = { + constants.OPERATING_STATUS: 'bogus', + 'protocol': constants.PROTOCOL_UDP, + 'enabled': True} + lb_ref['listeners']['listener-id-3'] = { + constants.OPERATING_STATUS: 'bogus', + 'protocol': constants.PROTOCOL_UDP, + 'enabled': True} + + self.amphora_repo.get_lb_for_health_update.return_value = lb_ref + self.hm.update_health(health, '192.0.2.1') + self.assertTrue(self.amphora_repo.get_lb_for_health_update.called) + self.assertTrue(self.loadbalancer_repo.update.called) + self.assertTrue(self.amphora_health_repo.replace.called) + + def test_update_health_no_db_lb(self): + health = { + "id": self.FAKE_UUID_1, + "ver": 1, + "listeners": {}, + "recv_time": time.time() + } + self.hm.amphora_repo.get_lb_for_health_update.return_value = {} + + with mock.patch('stevedore.driver.DriverManager.driver') as m_driver: + self.hm.update_health(health, '192.0.2.1') + self.assertTrue(m_driver.delete.called) + + self.assertTrue(self.amphora_repo.get_lb_for_health_update.called) + self.assertFalse(self.amphora_health_repo.replace.called) + + # Test missing amp in addition to missing lb DB record + self.amphora_repo.get_lb_for_health_update.reset_mock() + self.amphora_health_repo.replace.reset_mock() + + mock_amphora = mock.MagicMock() + mock_amphora.load_balancer_id = None + self.amphora_repo.get.return_value = mock_amphora + + self.hm.update_health(health, '192.0.2.1') + + self.assertTrue(self.amphora_repo.get_lb_for_health_update.called) + self.assertTrue(self.amphora_repo.get.called) + self.assertTrue(self.amphora_health_repo.replace.called) + + def test_update_health_with_without_udp_listeners(self): + health = { + "id": self.FAKE_UUID_1, + "listeners": { + "listener-id-1": {"status": constants.OPEN, "pools": { + "pool-id-1": {"status": constants.UP, + "members": {"member-id-1": constants.DOWN} + } + }}}, + "recv_time": time.time() + } + + # Test with a TCP listener + lb_ref = self._make_fake_lb_health_dict( + listener_protocol=constants.PROTOCOL_TCP) + self.amphora_repo.get_lb_for_health_update.return_value = lb_ref + + self.hm.update_health(health, '192.0.2.1') + # We should have no calls to listener_repo.get, because we skip + # running the extra UDP function + self.assertFalse(self.listener_repo.get.called) + + # Reset the mocks to try again + self.listener_repo.reset_mock() + + # Test with a UDP listener + lb_ref = self._make_fake_lb_health_dict( + listener_protocol=constants.PROTOCOL_UDP) + self.amphora_repo.get_lb_for_health_update.return_value = lb_ref + + self.hm.update_health(health, '192.0.2.1') + # This time we should have a call to listener_repo.get because the + # UDP helper function is triggered + self.assertTrue(self.listener_repo.get.called) + + def test_update_listener_count_for_UDP(self): + mock_lb, mock_listener1, mock_pool1, mock_members = ( + self._make_mock_lb_tree()) + + mock_listener1.protocol = constants.PROTOCOL_TCP + + self.hm.listener_repo.get.return_value = mock_listener1 + + # Test only TCP listeners + lb_ref = self._make_fake_lb_health_dict( + listener_protocol=constants.PROTOCOL_TCP) + result = self.hm._update_listener_count_for_UDP( + 'bogus_session', lb_ref, 0) + self.assertEqual(0, result) + + # Test with a valid member + lb_ref = self._make_fake_lb_health_dict( + listener_protocol=constants.PROTOCOL_UDP) + mock_listener1.protocol = constants.PROTOCOL_UDP + + result = self.hm._update_listener_count_for_UDP( + 'bogus_session', lb_ref, 1) + self.assertEqual(1, result) + + # Test with a disabled member + mock_listener1.protocol = constants.PROTOCOL_UDP + mock_members[0].enabled = False + + result = self.hm._update_listener_count_for_UDP( + 'bogus_session', lb_ref, 1) + self.assertEqual(0, result) + + def test_update_status(self): + + # Test update with the same operating status + self.hm._update_status( + 'fake_session', self.loadbalancer_repo, constants.LOADBALANCER, + 1, 'ONLINE', 'ONLINE') + self.assertFalse(self.loadbalancer_repo.update.called) + + self.loadbalancer_repo.update.reset_mock() + + # Test stream with provisioning sync + self.hm._update_status( + 'fake_session', self.loadbalancer_repo, constants.LOADBALANCER, + 1, 'ONLINE', 'OFFLINE') + self.assertTrue(self.loadbalancer_repo.update.called) diff --git a/octavia/tests/unit/amphorae/drivers/keepalived/__init__.py b/octavia/tests/unit/amphorae/drivers/keepalived/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/amphorae/drivers/keepalived/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/amphorae/drivers/keepalived/jinja/__init__.py b/octavia/tests/unit/amphorae/drivers/keepalived/jinja/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/amphorae/drivers/keepalived/jinja/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/amphorae/drivers/keepalived/jinja/test_jinja_cfg.py b/octavia/tests/unit/amphorae/drivers/keepalived/jinja/test_jinja_cfg.py new file mode 100644 index 0000000000..870a6d9a6d --- /dev/null +++ b/octavia/tests/unit/amphorae/drivers/keepalived/jinja/test_jinja_cfg.py @@ -0,0 +1,358 @@ +# Copyright 2015 Hewlett Packard Enterprise Development Company LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +import copy +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture + +from octavia.amphorae.drivers.keepalived.jinja import jinja_cfg +from octavia.common import constants +from octavia.network import data_models as n_data_models +import octavia.tests.unit.base as base + + +class TestVRRPRestDriver(base.TestCase): + + def setUp(self): + super().setUp() + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(group="haproxy_amphora", base_path='/tmp/test') + conf.config(group="keepalived_vrrp", vrrp_garp_refresh_interval=5) + conf.config(group="keepalived_vrrp", vrrp_garp_refresh_count=2) + conf.config(group="keepalived_vrrp", vrrp_check_interval=5) + conf.config(group="keepalived_vrrp", vrrp_fail_count=2) + conf.config(group="keepalived_vrrp", vrrp_success_count=2) + + self.templater = jinja_cfg.KeepalivedJinjaTemplater() + + self.amphora1 = mock.MagicMock() + self.amphora1.status = constants.AMPHORA_ALLOCATED + self.amphora1.vrrp_ip = '10.0.0.1' + self.amphora1.role = constants.ROLE_MASTER + self.amphora1.vrrp_interface = 'eth1' + self.amphora1.vrrp_id = 1 + self.amphora1.vrrp_priority = 100 + + self.amphora2 = mock.MagicMock() + self.amphora2.status = constants.AMPHORA_ALLOCATED + self.amphora2.vrrp_ip = '10.0.0.2' + self.amphora2.role = constants.ROLE_BACKUP + self.amphora2.vrrp_interface = 'eth1' + self.amphora2.vrrp_id = 1 + self.amphora2.vrrp_priority = 90 + + self.lb = mock.MagicMock() + self.lb.amphorae = [self.amphora1, self.amphora2] + self.lb.vrrp_group.vrrp_group_name = 'TESTGROUP' + self.lb.vrrp_group.vrrp_auth_type = constants.VRRP_AUTH_DEFAULT + self.lb.vrrp_group.vrrp_auth_pass = 'TESTPASSWORD' + self.lb.vip.ip_address = '10.1.0.5' + self.lb.vrrp_group.advert_int = 10 + + self.ref_conf = ( + "vrrp_script check_script {\n" + " script /tmp/test/vrrp/check_script.sh\n" + " interval 5\n" + " fall 2\n" + " rise 2\n" + "}\n" + "\n" + "vrrp_instance TESTGROUP {\n" + " interface eth1\n" + " virtual_router_id 1\n" + " priority 100\n" + " nopreempt\n" + " accept\n" + " garp_master_refresh 5\n" + " garp_master_refresh_repeat 2\n" + " advert_int 10\n" + " authentication {\n" + " auth_type PASS\n" + " auth_pass TESTPASSWORD\n" + " }\n" + "\n" + " unicast_src_ip 10.0.0.1\n" + " unicast_peer {\n" + " 10.0.0.2\n" + " }\n" + "\n" + " virtual_ipaddress {\n" + " 10.1.0.5\n" + " }\n\n" + " virtual_ipaddress_excluded {\n" + " }\n\n" + " virtual_routes {\n" + " 10.1.0.0/24 dev eth1 src 10.1.0.5 scope link table 1\n" + " default via 10.1.0.1 dev eth1 onlink table 1\n" + " }\n\n" + " virtual_rules {\n" + " from 10.1.0.5/32 table 1 priority 100\n" + " }\n\n" + " track_script {\n" + " check_script\n" + " }\n" + "}") + + self.amphora1v6 = copy.deepcopy(self.amphora1) + self.amphora1v6.vrrp_ip = '2001:db8::10' + self.amphora2v6 = copy.deepcopy(self.amphora2) + self.amphora2v6.vrrp_ip = '2001:db8::11' + self.lbv6 = copy.deepcopy(self.lb) + self.lbv6.amphorae = [self.amphora1v6, self.amphora2v6] + self.lbv6.vip.ip_address = '2001:db8::15' + + self.ref_v6_conf = ( + "vrrp_script check_script {\n" + " script /tmp/test/vrrp/check_script.sh\n" + " interval 5\n" + " fall 2\n" + " rise 2\n" + "}\n" + "\n" + "vrrp_instance TESTGROUP {\n" + " interface eth1\n" + " virtual_router_id 1\n" + " priority 100\n" + " nopreempt\n" + " accept\n" + " garp_master_refresh 5\n" + " garp_master_refresh_repeat 2\n" + " advert_int 10\n" + " authentication {\n" + " auth_type PASS\n" + " auth_pass TESTPASSWORD\n" + " }\n" + "\n" + " unicast_src_ip 2001:db8::10\n" + " unicast_peer {\n" + " 2001:db8::11\n" + " }\n" + "\n" + " virtual_ipaddress {\n" + " 2001:db8::15\n" + " }\n\n" + " virtual_ipaddress_excluded {\n" + " }\n\n" + " virtual_routes {\n" + " 2001:db8::/64 dev eth1 src " + "2001:db8::15 scope link table 1\n" + " default via 2001:db8::ff dev eth1 onlink table 1\n" + " }\n\n" + " virtual_rules {\n" + " from 2001:db8::15/128 table 1 priority 100\n" + " }\n\n" + " track_script {\n" + " check_script\n" + " }\n" + "}") + + self.ref_v4_v6_conf = ( + "vrrp_script check_script {\n" + " script /tmp/test/vrrp/check_script.sh\n" + " interval 5\n" + " fall 2\n" + " rise 2\n" + "}\n" + "\n" + "vrrp_instance TESTGROUP {\n" + " interface eth1\n" + " virtual_router_id 1\n" + " priority 100\n" + " nopreempt\n" + " accept\n" + " garp_master_refresh 5\n" + " garp_master_refresh_repeat 2\n" + " advert_int 10\n" + " authentication {\n" + " auth_type PASS\n" + " auth_pass TESTPASSWORD\n" + " }\n" + "\n" + " unicast_src_ip 10.0.0.1\n" + " unicast_peer {\n" + " 10.0.0.2\n" + " }\n" + "\n" + " virtual_ipaddress {\n" + " 10.1.0.5\n" + " }\n\n" + " virtual_ipaddress_excluded {\n" + " 2001:db8::15\n" + " }\n\n" + " virtual_routes {\n" + " 10.1.0.0/24 dev eth1 src 10.1.0.5 scope link table 1\n" + " default via 10.1.0.1 dev eth1 onlink table 1\n" + " 2001:db8::/64 dev eth1 src " + "2001:db8::15 scope link table 1\n" + " default via 2001:db8::ff dev eth1 onlink table 1\n" + " }\n\n" + " virtual_rules {\n" + " from 10.1.0.5/32 table 1 priority 100\n" + " from 2001:db8::15/128 table 1 priority 100\n" + " }\n\n" + " track_script {\n" + " check_script\n" + " }\n" + "}") + + self.ref_v6_v4_conf = ( + "vrrp_script check_script {\n" + " script /tmp/test/vrrp/check_script.sh\n" + " interval 5\n" + " fall 2\n" + " rise 2\n" + "}\n" + "\n" + "vrrp_instance TESTGROUP {\n" + " interface eth1\n" + " virtual_router_id 1\n" + " priority 100\n" + " nopreempt\n" + " accept\n" + " garp_master_refresh 5\n" + " garp_master_refresh_repeat 2\n" + " advert_int 10\n" + " authentication {\n" + " auth_type PASS\n" + " auth_pass TESTPASSWORD\n" + " }\n" + "\n" + " unicast_src_ip 2001:db8::10\n" + " unicast_peer {\n" + " 2001:db8::11\n" + " }\n" + "\n" + " virtual_ipaddress {\n" + " 2001:db8::15\n" + " }\n\n" + " virtual_ipaddress_excluded {\n" + " 10.1.0.5\n" + " }\n\n" + " virtual_routes {\n" + " 2001:db8::/64 dev eth1 src " + "2001:db8::15 scope link table 1\n" + " default via 2001:db8::ff dev eth1 onlink table 1\n" + " 10.1.0.0/24 dev eth1 src 10.1.0.5 scope link table 1\n" + " default via 10.1.0.1 dev eth1 onlink table 1\n" + " }\n\n" + " virtual_rules {\n" + " from 2001:db8::15/128 table 1 priority 100\n" + " from 10.1.0.5/32 table 1 priority 100\n" + " }\n\n" + " track_script {\n" + " check_script\n" + " }\n" + "}") + + def test_build_keepalived_config(self): + mock_subnet = n_data_models.Subnet() + mock_subnet.cidr = '10.1.0.0/24' + mock_subnet.gateway_ip = '10.1.0.1' + mock_subnet.host_routes = [] + amp_net_config = n_data_models.AmphoraNetworkConfig( + vip_subnet=mock_subnet).to_dict(recurse=True) + + config = self.templater.build_keepalived_config( + self.lb, self.amphora1, amp_net_config) + self.assertEqual(self.ref_conf, config) + + def test_build_keepalived_ipv6_config(self): + mock_subnet = n_data_models.Subnet() + mock_subnet.cidr = '2001:db8::/64' + mock_subnet.gateway_ip = '2001:db8::ff' + mock_subnet.host_routes = [] + amp_net_config = n_data_models.AmphoraNetworkConfig( + vip_subnet=mock_subnet).to_dict(recurse=True) + + config = self.templater.build_keepalived_config( + self.lbv6, self.amphora1v6, amp_net_config) + self.assertEqual(self.ref_v6_conf, config) + + def test_build_keepalived_config_with_additional_vips(self): + mock_subnet1 = n_data_models.Subnet() + mock_subnet1.cidr = '10.1.0.0/24' + mock_subnet1.gateway_ip = '10.1.0.1' + mock_subnet1.host_routes = [] + mock_subnet2 = n_data_models.Subnet() + mock_subnet2.cidr = '2001:db8::/64' + mock_subnet2.gateway_ip = '2001:db8::ff' + mock_subnet2.host_routes = [] + + # Use IPv4 as the primary VIP, IPv6 as secondary + additional_vip = n_data_models.AdditionalVipData( + ip_address=self.lbv6.vip.ip_address, + subnet=mock_subnet2 + ) + amp_net_config = n_data_models.AmphoraNetworkConfig( + vip_subnet=mock_subnet1, + additional_vip_data=[additional_vip]).to_dict(recurse=True) + + config = self.templater.build_keepalived_config( + self.lb, self.amphora1, amp_net_config) + self.assertEqual(self.ref_v4_v6_conf, config) + + # Use IPv6 as the primary VIP, IPv4 as secondary + additional_vip = n_data_models.AdditionalVipData( + ip_address=self.lb.vip.ip_address, + subnet=mock_subnet1 + ) + amp_net_config = n_data_models.AmphoraNetworkConfig( + vip_subnet=mock_subnet2, + additional_vip_data=[additional_vip]).to_dict(recurse=True) + + config = self.templater.build_keepalived_config( + self.lbv6, self.amphora1v6, amp_net_config) + self.assertEqual(self.ref_v6_v4_conf, config) + + def test_build_keepalived_config_with_additional_vips_v2(self): + subnet1 = { + "cidr": '10.1.0.0/24', + "gateway_ip": '10.1.0.1', + "host_routes": [] + } + subnet2 = { + "cidr": '2001:db8::/64', + "gateway_ip": '2001:db8::ff', + "host_routes": [] + } + + # Use IPv4 as the primary VIP, IPv6 as secondary + additional_vip = { + "ip_address": self.lbv6.vip.ip_address, + "subnet": subnet2 + } + amp_net_config = { + "vip_subnet": subnet1, + "additional_vip_data": [additional_vip] + } + + config = self.templater.build_keepalived_config( + self.lb, self.amphora1, amp_net_config) + self.assertEqual(self.ref_v4_v6_conf, config) + + # Use IPv6 as the primary VIP, IPv4 as secondary + additional_vip = { + "ip_address": self.lb.vip.ip_address, + "subnet": subnet1 + } + amp_net_config = { + "vip_subnet": subnet2, + "additional_vip_data": [additional_vip] + } + + config = self.templater.build_keepalived_config( + self.lbv6, self.amphora1v6, amp_net_config) + self.assertEqual(self.ref_v6_v4_conf, config) diff --git a/octavia/tests/unit/amphorae/drivers/keepalived/test_vrrp_rest_driver.py b/octavia/tests/unit/amphorae/drivers/keepalived/test_vrrp_rest_driver.py new file mode 100644 index 0000000000..f626042829 --- /dev/null +++ b/octavia/tests/unit/amphorae/drivers/keepalived/test_vrrp_rest_driver.py @@ -0,0 +1,146 @@ +# Copyright 2015 Hewlett Packard Enterprise Development Company LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +from unittest import mock + +from oslo_utils import uuidutils + +from octavia.amphorae.drivers.keepalived import vrrp_rest_driver +from octavia.common import constants +from octavia.network import data_models as n_data_models +import octavia.tests.unit.base as base + +# Version 1.0 is functionally identical to all versions before it +API_VERSION = '1.0' + + +class TestVRRPRestDriver(base.TestCase): + + def setUp(self): + self.keepalived_mixin = vrrp_rest_driver.KeepalivedAmphoraDriverMixin() + self.keepalived_mixin.clients = { + 'base': mock.MagicMock(), + API_VERSION: mock.MagicMock()} + self.keepalived_mixin._populate_amphora_api_version = mock.MagicMock() + self.clients = self.keepalived_mixin.clients + self.FAKE_CONFIG = 'FAKE CONFIG' + self.lb_mock = mock.MagicMock() + self.amphora_mock = mock.MagicMock() + self.amphora_mock.id = uuidutils.generate_uuid() + self.amphora_mock.status = constants.AMPHORA_ALLOCATED + self.amphora_mock.api_version = API_VERSION + self.lb_mock.amphorae = [self.amphora_mock] + self.amphorae_network_config = {} + vip_subnet = mock.MagicMock() + self.vip_cidr = vip_subnet.cidr = '192.0.2.0/24' + one_amp_net_config = n_data_models.AmphoraNetworkConfig( + vip_subnet=vip_subnet + ) + self.amphorae_network_config[self.amphora_mock.id] = one_amp_net_config + + super().setUp() + + @mock.patch('octavia.amphorae.drivers.keepalived.jinja.' + 'jinja_cfg.KeepalivedJinjaTemplater.build_keepalived_config') + def test_update_vrrp_conf(self, mock_templater): + + mock_templater.return_value = self.FAKE_CONFIG + + self.keepalived_mixin.update_vrrp_conf( + self.lb_mock, self.amphorae_network_config, self.amphora_mock) + + mock_templater.assert_called_with( + self.lb_mock, self.amphora_mock, + self.amphorae_network_config[self.amphora_mock.id]) + self.clients[API_VERSION].upload_vrrp_config.assert_called_once_with( + self.amphora_mock, + self.FAKE_CONFIG) + + # Test with amphorav2 amphorae_network_config list of dicts + mock_templater.reset_mock() + self.clients[API_VERSION].upload_vrrp_config.reset_mock() + v2_amphorae_network_config = {} + vip_subnet_dict = { + constants.VIP_SUBNET: {constants.CIDR: '192.0.2.0/24'}} + v2_amphorae_network_config[self.amphora_mock.id] = vip_subnet_dict + + self.keepalived_mixin.update_vrrp_conf( + self.lb_mock, v2_amphorae_network_config, self.amphora_mock) + + self.clients[API_VERSION].upload_vrrp_config.assert_called_once_with( + self.amphora_mock, + self.FAKE_CONFIG) + + # Test amphora not in AMPHORA_ALLOCATED state + mock_templater.reset_mock() + self.clients[API_VERSION].upload_vrrp_config.reset_mock() + ready_amphora_mock = mock.MagicMock() + ready_amphora_mock.id = uuidutils.generate_uuid() + ready_amphora_mock.status = constants.ERROR + ready_amphora_mock.api_version = API_VERSION + + self.keepalived_mixin.update_vrrp_conf( + self.lb_mock, self.amphorae_network_config, ready_amphora_mock) + + mock_templater.assert_not_called() + self.clients[API_VERSION].upload_vrrp_config.assert_not_called() + + def test_stop_vrrp_service(self): + + self.keepalived_mixin.stop_vrrp_service(self.lb_mock) + + self.clients[API_VERSION].stop_vrrp.assert_called_once_with( + self.amphora_mock) + + def test_start_vrrp_service(self): + + self.keepalived_mixin.start_vrrp_service(self.amphora_mock) + + populate_mock = self.keepalived_mixin._populate_amphora_api_version + populate_mock.assert_called_once_with(self.amphora_mock, + timeout_dict=None) + self.clients[API_VERSION].start_vrrp.assert_called_once_with( + self.amphora_mock, timeout_dict=None) + + # Test amphora not in AMPHORA_ALLOCATED state + self.clients[API_VERSION].start_vrrp.reset_mock() + ready_amphora_mock = mock.MagicMock() + ready_amphora_mock.id = uuidutils.generate_uuid() + ready_amphora_mock.status = constants.ERROR + ready_amphora_mock.api_version = API_VERSION + + self.keepalived_mixin.start_vrrp_service(ready_amphora_mock) + + self.clients[API_VERSION].start_vrrp.assert_not_called() + + # With timeout_dict + self.clients[API_VERSION].start_vrrp.reset_mock() + populate_mock.reset_mock() + + timeout_dict = mock.Mock() + self.keepalived_mixin.start_vrrp_service(self.amphora_mock, + timeout_dict=timeout_dict) + + populate_mock = self.keepalived_mixin._populate_amphora_api_version + populate_mock.assert_called_once_with(self.amphora_mock, + timeout_dict=timeout_dict) + self.clients[API_VERSION].start_vrrp.assert_called_once_with( + self.amphora_mock, timeout_dict=timeout_dict) + + def test_reload_vrrp_service(self): + + self.keepalived_mixin.reload_vrrp_service(self.lb_mock) + + self.clients[API_VERSION].reload_vrrp.assert_called_once_with( + self.amphora_mock) diff --git a/octavia/tests/unit/amphorae/drivers/noop_driver/__init__.py b/octavia/tests/unit/amphorae/drivers/noop_driver/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/amphorae/drivers/noop_driver/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/amphorae/drivers/noop_driver/test_driver.py b/octavia/tests/unit/amphorae/drivers/noop_driver/test_driver.py new file mode 100644 index 0000000000..548224bf9b --- /dev/null +++ b/octavia/tests/unit/amphorae/drivers/noop_driver/test_driver.py @@ -0,0 +1,163 @@ +# Copyright 2014, Author: Min Wang,German Eichberger +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from oslo_utils import uuidutils + +from octavia.amphorae.drivers.noop_driver import driver +from octavia.common import constants +from octavia.common import data_models +from octavia.network import data_models as network_models +from octavia.tests.unit import base + + +FAKE_UUID_1 = uuidutils.generate_uuid() + + +class TestNoopAmphoraLoadBalancerDriver(base.TestCase): + FAKE_UUID_1 = uuidutils.generate_uuid() + + def setUp(self): + super().setUp() + self.driver = driver.NoopAmphoraLoadBalancerDriver() + self.listener = data_models.Listener() + self.listener.id = uuidutils.generate_uuid() + self.listener.protocol_port = 80 + self.vip = data_models.Vip() + self.vip.ip_address = "192.51.100.1" + self.amphora = data_models.Amphora() + self.amphora.id = self.FAKE_UUID_1 + self.load_balancer = data_models.LoadBalancer( + id=FAKE_UUID_1, amphorae=[self.amphora], vip=self.vip, + listeners=[self.listener]) + self.listener.load_balancer = self.load_balancer + self.network = network_models.Network(id=self.FAKE_UUID_1) + self.port = network_models.Port(id=uuidutils.generate_uuid()) + self.amphorae_net_configs = { + self.amphora.id: + network_models.AmphoraNetworkConfig( + amphora=self.amphora, + vip_subnet=network_models.Subnet(id=self.FAKE_UUID_1)) + } + self.pem_file = 'test_pem_file' + self.agent_config = 'test agent config' + self.timeout_dict = {constants.REQ_CONN_TIMEOUT: 1, + constants.REQ_READ_TIMEOUT: 2, + constants.CONN_MAX_RETRIES: 3, + constants.CONN_RETRY_INTERVAL: 4} + + @mock.patch('octavia.db.api.get_session') + def test_update_amphora_listeners(self, mock_session): + self.driver.update_amphora_listeners(self.load_balancer, self.amphora, + self.timeout_dict) + self.assertEqual((self.listener, self.amphora.id, self.timeout_dict, + 'update_amp'), + self.driver.driver.amphoraconfig[( + self.listener.id, + self.amphora.id)]) + + def test_update(self): + self.driver.update(self.load_balancer) + self.assertEqual(([self.listener], self.vip, 'active'), + self.driver.driver.amphoraconfig[( + (self.listener.protocol_port,), + self.vip.ip_address)]) + + def test_start(self): + mock_amphora = mock.MagicMock() + mock_amphora.id = '321' + self.driver.start(self.load_balancer, amphora=mock_amphora) + self.assertEqual((self.load_balancer, mock_amphora, 'start'), + self.driver.driver.amphoraconfig[( + self.load_balancer.id, '321')]) + + def test_reload(self): + mock_amphora = mock.MagicMock() + mock_amphora.id = '321' + self.driver.reload(self.load_balancer, amphora=mock_amphora) + self.assertEqual((self.load_balancer, mock_amphora, 'reload'), + self.driver.driver.amphoraconfig[( + self.load_balancer.id, '321')]) + + def test_delete(self): + self.driver.delete(self.listener) + self.assertEqual((self.listener, self.vip, 'delete'), + self.driver.driver.amphoraconfig[( + self.listener.protocol_port, + self.vip.ip_address)]) + + def test_get_info(self): + self.driver.get_info(self.amphora) + self.assertEqual((self.amphora.id, 'get_info'), + self.driver.driver.amphoraconfig[ + self.amphora.id]) + + def test_get_diagnostics(self): + self.driver.get_diagnostics(self.amphora) + self.assertEqual((self.amphora.id, 'get_diagnostics'), + self.driver.driver.amphoraconfig[ + self.amphora.id]) + + def test_finalize_amphora(self): + self.driver.finalize_amphora(self.amphora) + self.assertEqual((self.amphora.id, 'finalize amphora'), + self.driver.driver.amphoraconfig[ + self.amphora.id]) + + def test_post_network_plug(self): + self.driver.post_network_plug( + self.amphora, self.port, + self.amphorae_net_configs[self.amphora.id]) + self.assertEqual((self.amphora.id, self.port.id, 'post_network_plug'), + self.driver.driver.amphoraconfig[( + self.amphora.id, self.port.id)]) + + def test_post_vip_plug(self): + port = network_models.Port(id=uuidutils.generate_uuid()) + subnet = network_models.Subnet(id=uuidutils.generate_uuid()) + self.driver.post_vip_plug(self.amphora, self.load_balancer, + self.amphorae_net_configs, + port, subnet) + expected_method_and_args = (self.load_balancer.id, + self.amphorae_net_configs, + 'post_vip_plug') + actual_method_and_args = self.driver.driver.amphoraconfig[( + self.load_balancer.id, id(self.amphorae_net_configs) + )] + self.assertEqual(expected_method_and_args, actual_method_and_args) + + def test_upload_cert_amp(self): + self.driver.upload_cert_amp(self.amphora, self.pem_file) + self.assertEqual( + (self.amphora.id, self.pem_file, 'update_amp_cert_file'), + self.driver.driver.amphoraconfig[( + self.amphora.id, self.pem_file)]) + + def test_update_agent_config(self): + self.driver.update_amphora_agent_config(self.amphora, + self.agent_config) + self.assertEqual( + (self.amphora.id, self.agent_config, + 'update_amphora_agent_config'), + self.driver.driver.amphoraconfig[( + self.amphora.id, self.agent_config)]) + + def test_get_interface_from_ip(self): + result = self.driver.get_interface_from_ip(self.amphora, + '198.51.100.99') + self.assertEqual('noop0', result) + + result = self.driver.get_interface_from_ip(self.amphora, + '198.51.100.9') + self.assertIsNone(result) diff --git a/octavia/tests/unit/api/__init__.py b/octavia/tests/unit/api/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/api/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/api/common/__init__.py b/octavia/tests/unit/api/common/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/api/common/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/api/common/base.py b/octavia/tests/unit/api/common/base.py new file mode 100644 index 0000000000..d077ede4e5 --- /dev/null +++ b/octavia/tests/unit/api/common/base.py @@ -0,0 +1,225 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_utils import uuidutils +from wsme import exc +from wsme.rest import json as wsme_json + +from octavia.api.common import types as base_type +from octavia.common import constants +from octavia.tests.unit import base + + +def build_body(mandatory_fields, extra_attributes): + body = {} + for key in mandatory_fields: + body[key] = mandatory_fields[key] + for key in extra_attributes: + body[key] = extra_attributes[key] + return body + + +class BaseTypesTest(base.TestCase): + _type = base_type.BaseType + _mandatory_fields = {} + + +class BaseTestUuid(base.TestCase): + + def assert_uuid_attr(self, attr): + kwargs = {attr: uuidutils.generate_uuid()} + self._type(**kwargs) + + def assert_uuid_attr_fail_with_integer(self, attr): + kwargs = {attr: 1} + self.assertRaises(exc.InvalidInput, self._type, **kwargs) + + def assert_uuid_attr_fail_with_short_str(self, attr): + kwargs = {attr: '12345'} + self.assertRaises(exc.InvalidInput, self._type, **kwargs) + + def assert_uuid_attr_fail_with_shorter_than_uuid(self, attr): + kwargs = {attr: uuidutils.generate_uuid()[1:]} + self.assertRaises(exc.InvalidInput, self._type, **kwargs) + + def assert_uuid_attr_fail_with_longer_than_uuid(self, attr): + kwargs = {attr: uuidutils.generate_uuid() + "0"} + self.assertRaises(exc.InvalidInput, self._type, **kwargs) + + +class BaseTestString(base.TestCase): + + def _default_min_max_lengths(self, min_length=None, max_length=None): + if max_length is None: + if min_length is None: + max_length = 255 + min_length = 2 + else: + max_length = min_length + 1 + else: + if min_length is None: + min_length = max_length - 1 + return min_length, max_length + + def assert_string_attr(self, attr, min_length=None, max_length=None): + min_length, max_length = self._default_min_max_lengths(min_length, + max_length) + string_val = 'a' * (max_length - 1) + kwargs = {attr: string_val} + self._type(**kwargs) + + def assert_string_attr_min_length(self, attr, min_length): + min_length, max_length = self._default_min_max_lengths(min_length) + string_val = 'a' * (min_length - 1) + kwargs = {attr: string_val} + # No point in testing if min_length is <= 0 + if min_length > 0: + self.assertRaises(exc.InvalidInput, self._type, **kwargs) + + def assert_string_attr_max_length(self, attr, max_length=None): + min_length, max_length = self._default_min_max_lengths(max_length) + string_val = 'a' * (max_length + 1) + kwargs = {attr: string_val} + self.assertRaises(exc.InvalidInput, self._type, **kwargs) + + +class BaseTestBool(base.TestCase): + + def assert_bool_attr(self, attr): + kwargs = {attr: True} + self.assertIsNotNone(self._type(**kwargs)) + kwargs = {attr: False} + self.assertIsNotNone(self._type(**kwargs)) + + def assert_bool_attr_non_bool(self, attr): + kwargs = {attr: 'test'} + self.assertRaises(exc.InvalidInput, self._type, **kwargs) + + +class TestIdMixin(BaseTestUuid): + id_attr = 'id' + + def test_id(self): + self.assert_uuid_attr(self.id_attr) + self.assert_uuid_attr_fail_with_integer(self.id_attr) + self.assert_uuid_attr_fail_with_short_str(self.id_attr) + self.assert_uuid_attr_fail_with_shorter_than_uuid(self.id_attr) + self.assert_uuid_attr_fail_with_longer_than_uuid(self.id_attr) + + def test_id_readonly(self): + body = build_body(self._mandatory_fields, + {self.id_attr: uuidutils.generate_uuid()}) + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, + self._type, body) + + +class TestProjectIdMixin(BaseTestUuid): + project_id_attr = 'project_id' + + def test_project_id(self): + self.assert_uuid_attr(self.project_id_attr) + self.assert_uuid_attr_fail_with_integer(self.project_id_attr) + self.assert_uuid_attr_fail_with_short_str(self.project_id_attr) + self.assert_uuid_attr_fail_with_shorter_than_uuid(self.project_id_attr) + self.assert_uuid_attr_fail_with_longer_than_uuid(self.project_id_attr) + + def test_project_id_readonly(self): + body = build_body(self._mandatory_fields, + {self.project_id_attr: uuidutils.generate_uuid()}) + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, + self._type, body) + + +class TestNameMixin(BaseTestString): + name_attr = 'name' + + def test_name(self): + self.assert_string_attr(self.name_attr, min_length=0, max_length=255) + self.assert_string_attr_min_length(self.name_attr, 0) + self.assert_string_attr_max_length(self.name_attr, 255) + + def test_editable_name(self): + name = "Name" + body = build_body(self._mandatory_fields, {self.name_attr: name}) + type_instance = wsme_json.fromjson(self._type, body) + self.assertEqual(name, type_instance.name) + + +class TestDescriptionMixin(BaseTestString): + description_attr = 'description' + + def test_description(self): + self.assert_string_attr(self.description_attr, min_length=0, + max_length=255) + self.assert_string_attr_min_length(self.description_attr, 0) + self.assert_string_attr_max_length(self.description_attr, 255) + + def test_editable_description(self): + description = "Description" + body = build_body(self._mandatory_fields, + {self.description_attr: description}) + type_instance = wsme_json.fromjson(self._type, body) + self.assertEqual(description, type_instance.description) + + +class TestEnabledMixin(BaseTestBool): + enabled_attr = 'enabled' + + def test_enabled(self): + self.assert_bool_attr(self.enabled_attr) + self.assert_bool_attr_non_bool(self.enabled_attr) + + def test_default_enabled_true(self): + body = build_body(self._mandatory_fields, {}) + type_instance = wsme_json.fromjson(self._type, body) + self.assertTrue(type_instance.enabled) + + def test_editable_enabled(self): + body = build_body(self._mandatory_fields, {"enabled": False}) + type_instance = wsme_json.fromjson(self._type, body) + self.assertFalse(type_instance.enabled) + + +class TestProvisioningStatusMixin(BaseTestString): + provisioning_attr = 'provisioning_status' + + def test_provisioning_status(self): + self.assert_string_attr(self.provisioning_attr, min_length=0, + max_length=16) + self.assert_string_attr_min_length(self.provisioning_attr, 0) + self.assert_string_attr_max_length(self.provisioning_attr, 16) + + def test_provisioning_status_readonly(self): + status = constants.ACTIVE + body = build_body(self._mandatory_fields, + {self.provisioning_attr: status}) + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, + self._type, body) + + +class TestOperatingStatusMixin(BaseTestString): + operating_attr = 'operating_status' + + def test_operating_status(self): + self.assert_string_attr(self.operating_attr, min_length=0, + max_length=16) + self.assert_string_attr_min_length(self.operating_attr, 0) + self.assert_string_attr_max_length(self.operating_attr, 16) + + def test_operating_status_readonly(self): + status = constants.ONLINE + body = build_body(self._mandatory_fields, + {self.operating_attr: status}) + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, + self._type, body) diff --git a/octavia/tests/unit/api/common/test_pagination.py b/octavia/tests/unit/api/common/test_pagination.py new file mode 100644 index 0000000000..58480f8f50 --- /dev/null +++ b/octavia/tests/unit/api/common/test_pagination.py @@ -0,0 +1,315 @@ +# Copyright 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils + +from octavia.api.common import pagination +from octavia.common import exceptions +from octavia.db import models +from octavia.tests.unit import base + +DEFAULT_SORTS = [('created_at', 'asc'), ('id', 'asc')] + + +class TestPaginationHelper(base.TestCase): + + @mock.patch('octavia.api.common.pagination.request') + def test_no_params(self, request_mock): + params = {} + helper = pagination.PaginationHelper(params) + query_mock = mock.MagicMock() + + helper.apply(query_mock, models.LoadBalancer) + self.assertEqual(DEFAULT_SORTS, helper.sort_keys) + self.assertIsNone(helper.marker) + self.assertEqual(1000, helper.limit) + query_mock.order_by().order_by().limit.assert_called_with( + 1000) + + def test_sort_empty(self): + sort_params = "" + params = {'sort': sort_params} + act_params = pagination.PaginationHelper( + params).sort_keys + self.assertEqual([], act_params) + + def test_sort_none(self): + sort_params = None + params = {'sort': sort_params} + act_params = pagination.PaginationHelper( + params).sort_keys + self.assertEqual([], act_params) + + def test_sort_key_dir(self): + sort_keys = "key1,key2,key3" + sort_dirs = "asc,desc" + ref_sort_keys = [('key1', 'asc'), ('key2', 'desc'), ('key3', 'asc')] + params = {'sort_key': sort_keys, 'sort_dir': sort_dirs} + helper = pagination.PaginationHelper(params) + self.assertEqual(ref_sort_keys, helper.sort_keys) + + def test_invalid_sorts(self): + sort_params = "shoud_fail_exception:cause:of:this" + params = {'sort': sort_params} + self.assertRaises(exceptions.InvalidSortKey, + pagination.PaginationHelper, + params) + + sort_params = "ke1:asc,key2:InvalidDir,key3" + params = {'sort': sort_params} + self.assertRaises(exceptions.InvalidSortDirection, + pagination.PaginationHelper, + params) + + def test_marker(self): + marker = 'random_uuid' + params = {'marker': marker} + helper = pagination.PaginationHelper(params) + + self.assertEqual(marker, helper.marker) + + @mock.patch('octavia.api.common.pagination.request') + def test_limit(self, request_mock): + limit = 100 + params = {'limit': limit} + helper = pagination.PaginationHelper(params) + query_mock = mock.MagicMock() + + helper.apply(query_mock, models.LoadBalancer) + query_mock.order_by().order_by().limit.assert_called_with( + limit) + + @mock.patch('octavia.api.common.pagination.request') + def test_filter_correct_params(self, request_mock): + params = {'id': 'fake_id'} + helper = pagination.PaginationHelper(params) + query_mock = mock.MagicMock() + + helper.apply(query_mock, models.LoadBalancer) + self.assertEqual(params, helper.filters) + + @mock.patch('octavia.api.common.pagination.request') + def test_filter_with_booleans(self, request_mock): + params = {'backup': 'True', 'admin_state_up': 'false'} + expected_params = {'backup': True, 'enabled': False} + helper = pagination.PaginationHelper(params) + query_mock = mock.MagicMock() + + helper.apply(query_mock, models.Member) + self.assertEqual(expected_params, helper.filters) + + @mock.patch('octavia.api.common.pagination.request') + def test_filter_mismatched_params(self, request_mock): + params = { + 'id': 'fake_id', + 'fields': 'field', + 'limit': '10', + 'sort': None, + } + + filters = {'id': 'fake_id'} + + helper = pagination.PaginationHelper(params) + query_mock = mock.MagicMock() + + helper.apply(query_mock, models.LoadBalancer) + self.assertEqual(filters, helper.filters) + helper.apply(query_mock, models.LoadBalancer, + enforce_valid_params=True) + self.assertEqual(filters, helper.filters) + + @mock.patch('octavia.api.common.pagination.request') + def test_filter_with_invalid_params(self, request_mock): + params = {'id': 'fake_id', 'no_such_param': 'id'} + filters = {'id': 'fake_id'} + helper = pagination.PaginationHelper(params) + query_mock = mock.MagicMock() + + helper.apply(query_mock, models.LoadBalancer, + # silently ignore invalid parameter + enforce_valid_params=False) + self.assertEqual(filters, helper.filters) + + self.assertRaises( + exceptions.InvalidFilterArgument, + pagination.PaginationHelper.apply, + helper, + query_mock, + models.Amphora, + ) + + @mock.patch('octavia.api.common.pagination.request') + def test_duplicate_argument(self, request_mock): + params = {'loadbalacer_id': 'id1', 'load_balacer_id': 'id2'} + query_mock = mock.MagicMock() + helper = pagination.PaginationHelper(params) + + self.assertRaises( + exceptions.InvalidFilterArgument, + pagination.PaginationHelper.apply, + helper, + query_mock, + models.Amphora, + ) + + @mock.patch('octavia.api.common.pagination.request') + def test_fields_not_passed(self, request_mock): + params = {'fields': 'id'} + helper = pagination.PaginationHelper(params) + query_mock = mock.MagicMock() + + helper.apply(query_mock, models.LoadBalancer) + self.assertEqual({}, helper.filters) + + @mock.patch('octavia.api.common.pagination.request') + def test_make_links_next(self, request_mock): + request_mock.path = "/lbaas/v2/pools/1/members" + request_mock.path_url = "http://localhost/" + request_mock.path + member1 = models.Member() + member1.id = uuidutils.generate_uuid() + model_list = [member1] + + params = {'limit': 1} + helper = pagination.PaginationHelper(params) + links = helper._make_links(model_list) + self.assertEqual(links[0].rel, "next") + self.assertEqual( + links[0].href, + f"{request_mock.path_url}?limit={params['limit']}&" + f"marker={member1.id}") + + @mock.patch('octavia.api.common.pagination.request') + def test_make_links_prev(self, request_mock): + request_mock.path = "/lbaas/v2/pools/1/members" + request_mock.path_url = "http://localhost/" + request_mock.path + member1 = models.Member() + member1.id = uuidutils.generate_uuid() + model_list = [member1] + + params = {'limit': 1, 'marker': member1.id} + helper = pagination.PaginationHelper(params) + links = helper._make_links(model_list) + self.assertEqual(links[0].rel, "previous") + self.assertEqual( + links[1].href, + f"{request_mock.path_url}?limit={params['limit']}&" + f"marker={member1.id}") + self.assertEqual(links[1].rel, "next") + self.assertEqual( + links[1].href, + f"{request_mock.path_url}?limit={params['limit']}&" + f"marker={member1.id}") + + @mock.patch('octavia.api.common.pagination.request') + def test_make_links_with_configured_url(http://github.com/self,%20request_mock): + request_mock.path = "/lbaas/v2/pools/1/members" + request_mock.path_url = "http://localhost/" + request_mock.path + api_base_uri = "https://127.0.0.1/" + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(group='api_settings', api_base_uri=api_base_uri) + member1 = models.Member() + member1.id = uuidutils.generate_uuid() + model_list = [member1] + + params = {'limit': 1, 'marker': member1.id} + helper = pagination.PaginationHelper(params) + links = helper._make_links(model_list) + self.assertEqual(links[0].rel, "previous") + self.assertEqual( + links[0].href, + ("{base_uri}{path}?limit={limit}&marker={marker}" + "&page_reverse=True").format( + base_uri=api_base_uri, + path=request_mock.path, + limit=params['limit'], + marker=member1.id + )) + self.assertEqual(links[1].rel, "next") + self.assertEqual( + links[1].href, + "{base_uri}{path}?limit={limit}&marker={marker}".format( + base_uri=api_base_uri, + path=request_mock.path, + limit=params['limit'], + marker=member1.id)) + + @mock.patch('octavia.api.common.pagination.request') + def test_make_links_with_zero_limit(self, request_mock): + request_mock.path = "/lbaas/v2/pools/1/members" + request_mock.path_url = "http://localhost/" + request_mock.path + api_base_uri = "https://127.0.0.1/" + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(group='api_settings', api_base_uri=api_base_uri) + member1 = models.Member() + member1.id = uuidutils.generate_uuid() + model_list = [member1] + + params = {'limit': 0, 'marker': member1.id} + helper = pagination.PaginationHelper(params) + links = helper._make_links(model_list) + self.assertEqual(links[0].rel, "previous") + self.assertEqual( + links[0].href, + ("{base_uri}{path}?limit={limit}&marker={marker}" + "&page_reverse=True").format( + base_uri=api_base_uri, + path=request_mock.path, + limit=None, + marker=member1.id + )) + self.assertEqual(links[1].rel, "next") + self.assertEqual( + links[1].href, + "{base_uri}{path}?limit={limit}&marker={marker}".format( + base_uri=api_base_uri, + path=request_mock.path, + limit=None, + marker=member1.id)) + + @mock.patch('octavia.api.common.pagination.request') + def test_make_links_with_negative_limit(self, request_mock): + request_mock.path = "/lbaas/v2/pools/1/members" + request_mock.path_url = "http://localhost/" + request_mock.path + api_base_uri = "https://127.0.0.1/" + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(group='api_settings', api_base_uri=api_base_uri) + member1 = models.Member() + member1.id = uuidutils.generate_uuid() + model_list = [member1] + + params = {'limit': -1, 'marker': member1.id} + helper = pagination.PaginationHelper(params) + links = helper._make_links(model_list) + self.assertEqual(links[0].rel, "previous") + self.assertEqual( + links[0].href, + ("{base_uri}{path}?limit={limit}&marker={marker}" + "&page_reverse=True").format( + base_uri=api_base_uri, + path=request_mock.path, + limit=None, + marker=member1.id + )) + self.assertEqual(links[1].rel, "next") + self.assertEqual( + links[1].href, + "{base_uri}{path}?limit={limit}&marker={marker}".format( + base_uri=api_base_uri, + path=request_mock.path, + limit=None, + marker=member1.id)) diff --git a/octavia/tests/unit/api/common/test_types.py b/octavia/tests/unit/api/common/test_types.py new file mode 100644 index 0000000000..ea89d9cbc4 --- /dev/null +++ b/octavia/tests/unit/api/common/test_types.py @@ -0,0 +1,183 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from wsme import types as wtypes + +from octavia.api.common import types +from octavia.common import data_models +from octavia.tests.unit import base + + +class TestTypeRename(types.BaseType): + _type_to_model_map = {'renamed': 'original', + 'child_one': 'child.one', + 'child_two': 'child.two', + 'admin_state_up': 'enabled'} + id = wtypes.wsattr(wtypes.StringType()) + renamed = wtypes.wsattr(wtypes.StringType()) + child_one = wtypes.wsattr(wtypes.StringType()) + child_two = wtypes.wsattr(wtypes.StringType()) + admin_state_up = wtypes.wsattr(bool) + + +class TestTypeRenameSubset(types.BaseType): + _type_to_model_map = {'renamed': 'original', + 'child_one': 'child.one', + 'child_two': 'child.two'} + id = wtypes.wsattr(wtypes.StringType()) + renamed = wtypes.wsattr(wtypes.StringType()) + + +class TestTypeTenantProject(types.BaseType): + tenant_id = wtypes.wsattr(wtypes.StringType()) + project_id = wtypes.wsattr(wtypes.StringType()) + + +class ChildTestModel(data_models.BaseDataModel): + + def __init__(self, one=None, two=None): + self.one = one + self.two = two + + +class TestModel(data_models.BaseDataModel): + + def __init__(self, id=None, original=None, child=None, enabled=None): + self.id = id + self.original = original + self.child = child + self.enabled = enabled + + def to_dict(self): + result = super().to_dict() + result['child'] = self.child.to_dict() + return result + + +class TestTypeDataModelRenames(base.TestCase): + + def setUp(self): + super().setUp() + child_model = ChildTestModel(one='baby_turtle_one', + two='baby_turtle_two') + self.model = TestModel(id='1234', original='turtles', + child=child_model) + + def test_model_to_type(self): + new_type = TestTypeRename.from_data_model(self.model) + self.assertEqual(self.model.original, new_type.renamed) + self.assertEqual(self.model.child.one, new_type.child_one) + self.assertEqual(self.model.child.two, new_type.child_two) + self.assertEqual(self.model.id, new_type.id) + + def test_model_to_type_with_subset_of_fields(self): + new_type = TestTypeRenameSubset.from_data_model(self.model) + self.assertEqual(self.model.original, new_type.renamed) + self.assertEqual(self.model.id, new_type.id) + self.assertFalse(hasattr(new_type, 'child_one')) + self.assertFalse(hasattr(new_type, 'child_two')) + + def test_type_to_dict(self): + new_type = TestTypeRename(id='1234', renamed='turtles', + child_one='baby_turtle_one', + child_two='baby_turtle_two') + type_dict = new_type.to_dict() + self.assertEqual(new_type.id, type_dict.get('id')) + self.assertEqual(new_type.renamed, type_dict.get('original')) + self.assertIn('child', type_dict) + child_dict = type_dict.pop('child') + self.assertEqual(new_type.child_one, child_dict.get('one')) + self.assertEqual(new_type.child_two, child_dict.get('two')) + + def test_translate_dict_keys_to_data_model(self): + new_type = TestTypeRename.from_data_model(self.model) + new_type_vars = { + k: getattr(new_type, k) for k in dir(new_type) if not ( + callable(getattr(new_type, k)) or k.startswith('_')) + } + self.assertEqual( + set(vars(self.model)), + set(new_type.translate_dict_keys_to_data_model(new_type_vars)), + ) + + def test_type_to_dict_with_tenant_id(self): + type_dict = TestTypeTenantProject(tenant_id='1234').to_dict() + self.assertEqual('1234', type_dict['project_id']) + self.assertNotIn('tenant_id', type_dict) + + def test_type_to_dict_when_admin_state_up_is_null(self): + rtype = TestTypeRename(id='1234', renamed='turtles', + child_one='baby_turtle_one', + child_two='baby_turtle_two', + admin_state_up=None) + rtype_dict = rtype.to_dict() + self.assertFalse(rtype_dict['enabled']) + + +class TestToDictModel(data_models.BaseDataModel): + def __init__(self, text, parent=None): + self.parent = parent + self.child = None + self.children = None + self.text = text + + def set_children(self, children): + self.children = children + + def set_child(self, child): + self.child = child + + def set_parent(self, parent): + self.parent = parent + + +class TestDataModelToDict(base.TestCase): + RECURSED_RESULT = {'parent': None, + 'text': 'parent_text', + 'child': {'parent': None, + 'text': 'child_text', + 'child': None, + 'children': None}, + 'children': [ + {'parent': None, + 'text': 'child1_text', + 'child': None, + 'children': None}, + {'parent': None, + 'text': 'child2_text', + 'child': None, + 'children': None}]} + + NO_RECURSE_RESULT = {'parent': None, + 'text': 'parent_text', + 'child': None, + 'children': []} + + def setUp(self): + super().setUp() + self.model = TestToDictModel('parent_text') + self.model.set_child(TestToDictModel('child_text', self.model)) + self.model.set_children([TestToDictModel('child1_text', self.model), + TestToDictModel('child2_text', self.model)]) + + def test_to_dict_no_recurse(self): + self.assertEqual(self.model.to_dict(), + self.NO_RECURSE_RESULT) + + def test_to_dict_recurse(self): + self.assertEqual(self.model.to_dict(recurse=True), + self.RECURSED_RESULT) + + def test_type_to_dict_with_project_id(self): + type_dict = TestTypeTenantProject(project_id='1234').to_dict() + self.assertEqual('1234', type_dict['project_id']) + self.assertNotIn('tenant_id', type_dict) diff --git a/octavia/tests/unit/api/drivers/__init__.py b/octavia/tests/unit/api/drivers/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/api/drivers/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/api/drivers/amphora_driver/__init__.py b/octavia/tests/unit/api/drivers/amphora_driver/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/api/drivers/amphora_driver/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/api/drivers/amphora_driver/v2/__init__.py b/octavia/tests/unit/api/drivers/amphora_driver/v2/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/api/drivers/amphora_driver/v2/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/api/drivers/amphora_driver/v2/test_driver.py b/octavia/tests/unit/api/drivers/amphora_driver/v2/test_driver.py new file mode 100644 index 0000000000..f6929dcf21 --- /dev/null +++ b/octavia/tests/unit/api/drivers/amphora_driver/v2/test_driver.py @@ -0,0 +1,938 @@ +# Copyright 2018 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from octavia_lib.api.drivers import data_models as driver_dm +from octavia_lib.api.drivers import exceptions +from octavia_lib.common import constants as lib_consts +from oslo_utils import uuidutils + +from octavia.api.drivers.amphora_driver.v2 import driver +from octavia.common import constants as consts +from octavia.network import base as network_base +from octavia.tests.common import sample_data_models +from octavia.tests.unit import base + + +class TestAmphoraDriver(base.TestRpc): + def setUp(self): + super().setUp() + self.amp_driver = driver.AmphoraProviderDriver() + self.sample_data = sample_data_models.SampleDriverDataModels() + + @mock.patch('octavia.common.utils.get_network_driver') + def test_create_vip_port(self, mock_get_net_driver): + mock_net_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_net_driver + mock_net_driver.allocate_vip.return_value = self.sample_data.db_vip, [] + + provider_vip_dict, add_vip_dicts = self.amp_driver.create_vip_port( + self.sample_data.lb_id, self.sample_data.project_id, + self.sample_data.provider_vip_dict, + [self.sample_data.provider_additional_vip_dict]) + + self.assertEqual(self.sample_data.provider_vip_dict, provider_vip_dict) + self.assertFalse(add_vip_dicts) + + @mock.patch('octavia.common.utils.get_network_driver') + def test_create_vip_port_without_port_security_enabled( + self, mock_get_net_driver): + mock_net_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_net_driver + network = mock.MagicMock() + network.port_security_enabled = False + mock_net_driver.get_network.return_value = network + mock_net_driver.allocate_vip.return_value = self.sample_data.db_vip + + self.assertRaises(exceptions.DriverError, + self.amp_driver.create_vip_port, + self.sample_data.lb_id, self.sample_data.project_id, + self.sample_data.provider_vip_dict, + [self.sample_data.provider_additional_vip_dict]) + + @mock.patch('octavia.common.utils.get_network_driver') + def test_create_vip_port_failed(self, mock_get_net_driver): + mock_net_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_net_driver + mock_net_driver.allocate_vip.side_effect = ( + network_base.AllocateVIPException()) + + self.assertRaises(exceptions.DriverError, + self.amp_driver.create_vip_port, + self.sample_data.lb_id, self.sample_data.project_id, + self.sample_data.provider_vip_dict, + [self.sample_data.provider_additional_vip_dict]) + + @mock.patch('octavia.common.utils.get_network_driver') + def test_create_vip_port_conflict(self, mock_get_net_driver): + mock_net_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_net_driver + mock_net_driver.allocate_vip.side_effect = ( + network_base.VIPInUseException()) + + self.assertRaises(exceptions.Conflict, + self.amp_driver.create_vip_port, + self.sample_data.lb_id, self.sample_data.project_id, + self.sample_data.provider_vip_dict, + [self.sample_data.provider_additional_vip_dict]) + + # Load Balancer + @mock.patch('oslo_messaging.RPCClient.cast') + def test_loadbalancer_create(self, mock_cast): + provider_lb = driver_dm.LoadBalancer( + loadbalancer_id=self.sample_data.lb_id) + self.amp_driver.loadbalancer_create(provider_lb) + payload = {consts.LOADBALANCER: provider_lb.to_dict(), + consts.FLAVOR: None, + consts.AVAILABILITY_ZONE: None} + mock_cast.assert_called_with({}, 'create_load_balancer', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_loadbalancer_delete(self, mock_cast): + provider_lb = driver_dm.LoadBalancer( + loadbalancer_id=self.sample_data.lb_id) + self.amp_driver.loadbalancer_delete(provider_lb) + payload = {consts.LOADBALANCER: provider_lb.to_dict(), + 'cascade': False} + mock_cast.assert_called_with({}, 'delete_load_balancer', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_loadbalancer_failover(self, mock_cast): + self.amp_driver.loadbalancer_failover(self.sample_data.lb_id) + payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id} + mock_cast.assert_called_with({}, 'failover_load_balancer', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_loadbalancer_update(self, mock_cast): + old_provider_lb = driver_dm.LoadBalancer( + loadbalancer_id=self.sample_data.lb_id) + provider_lb = driver_dm.LoadBalancer( + loadbalancer_id=self.sample_data.lb_id, admin_state_up=True) + lb_dict = {'enabled': True} + self.amp_driver.loadbalancer_update(old_provider_lb, provider_lb) + payload = {consts.ORIGINAL_LOADBALANCER: old_provider_lb.to_dict(), + consts.LOAD_BALANCER_UPDATES: lb_dict} + mock_cast.assert_called_with({}, 'update_load_balancer', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_loadbalancer_update_name(self, mock_cast): + old_provider_lb = driver_dm.LoadBalancer( + loadbalancer_id=self.sample_data.lb_id) + provider_lb = driver_dm.LoadBalancer( + loadbalancer_id=self.sample_data.lb_id, name='Great LB') + lb_dict = {'name': 'Great LB'} + self.amp_driver.loadbalancer_update(old_provider_lb, provider_lb) + payload = {consts.ORIGINAL_LOADBALANCER: old_provider_lb.to_dict(), + consts.LOAD_BALANCER_UPDATES: lb_dict} + mock_cast.assert_called_with({}, 'update_load_balancer', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_loadbalancer_update_qos(self, mock_cast): + qos_policy_id = uuidutils.generate_uuid() + old_provider_lb = driver_dm.LoadBalancer( + loadbalancer_id=self.sample_data.lb_id) + provider_lb = driver_dm.LoadBalancer( + loadbalancer_id=self.sample_data.lb_id, + vip_qos_policy_id=qos_policy_id) + lb_dict = {'vip': {'qos_policy_id': qos_policy_id}} + self.amp_driver.loadbalancer_update(old_provider_lb, provider_lb) + payload = {consts.ORIGINAL_LOADBALANCER: old_provider_lb.to_dict(), + consts.LOAD_BALANCER_UPDATES: lb_dict} + mock_cast.assert_called_with({}, 'update_load_balancer', **payload) + + # Listener + @mock.patch('oslo_messaging.RPCClient.cast') + def test_listener_create(self, mock_cast): + provider_listener = driver_dm.Listener( + listener_id=self.sample_data.listener1_id, + protocol=consts.PROTOCOL_HTTPS, + alpn_protocols=consts.AMPHORA_SUPPORTED_ALPN_PROTOCOLS) + self.amp_driver.listener_create(provider_listener) + payload = {consts.LISTENER: provider_listener.to_dict()} + mock_cast.assert_called_with({}, 'create_listener', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_listener_create_unsupported_alpn(self, mock_cast): + provider_listener = driver_dm.Listener( + listener_id=self.sample_data.listener1_id, + protocol=consts.PROTOCOL_HTTPS) + provider_listener.alpn_protocols = ['http/1.1', 'eureka'] + self.assertRaises( + exceptions.UnsupportedOptionError, + self.amp_driver.listener_create, + provider_listener) + mock_cast.assert_not_called() + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_listener_create_unsupported_protocol(self, mock_cast): + provider_listener = driver_dm.Listener( + listener_id=self.sample_data.listener1_id, + protocol='UNSUPPORTED_PROTO') + self.assertRaises( + exceptions.UnsupportedOptionError, + self.amp_driver.listener_create, + provider_listener) + mock_cast.assert_not_called() + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_listener_delete(self, mock_cast): + provider_listener = driver_dm.Listener( + listener_id=self.sample_data.listener1_id) + self.amp_driver.listener_delete(provider_listener) + payload = {consts.LISTENER: provider_listener.to_dict()} + mock_cast.assert_called_with({}, 'delete_listener', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_listener_update(self, mock_cast): + old_provider_listener = driver_dm.Listener( + listener_id=self.sample_data.listener1_id) + provider_listener = driver_dm.Listener( + listener_id=self.sample_data.listener1_id, admin_state_up=False) + listener_dict = provider_listener.to_dict() + listener_dict['admin_state_up'] = False + self.amp_driver.listener_update(old_provider_listener, + provider_listener) + payload = {consts.ORIGINAL_LISTENER: old_provider_listener.to_dict(), + consts.LISTENER_UPDATES: listener_dict} + mock_cast.assert_called_with({}, 'update_listener', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_listener_update_name(self, mock_cast): + old_provider_listener = driver_dm.Listener( + listener_id=self.sample_data.listener1_id) + provider_listener = driver_dm.Listener( + listener_id=self.sample_data.listener1_id, name='Great Listener') + listener_dict = provider_listener.to_dict() + listener_dict['name'] = 'Great Listener' + self.amp_driver.listener_update(old_provider_listener, + provider_listener) + payload = {consts.ORIGINAL_LISTENER: old_provider_listener.to_dict(), + consts.LISTENER_UPDATES: listener_dict} + mock_cast.assert_called_with({}, 'update_listener', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_listener_update_unsupported_alpn(self, mock_cast): + old_provider_listener = driver_dm.Listener( + listener_id=self.sample_data.listener1_id) + provider_listener = driver_dm.Listener( + listener_id=self.sample_data.listener1_id, + alpn_protocols=['http/1.1', 'eureka']) + self.assertRaises( + exceptions.UnsupportedOptionError, + self.amp_driver.listener_update, + old_provider_listener, + provider_listener) + + # Pool + @mock.patch('oslo_messaging.RPCClient.cast') + def test_pool_create(self, mock_cast): + provider_pool = driver_dm.Pool( + pool_id=self.sample_data.pool1_id, + lb_algorithm=consts.LB_ALGORITHM_ROUND_ROBIN, + alpn_protocols=consts.AMPHORA_SUPPORTED_ALPN_PROTOCOLS) + self.amp_driver.pool_create(provider_pool) + payload = {consts.POOL: provider_pool.to_dict(recurse=True)} + mock_cast.assert_called_with({}, 'create_pool', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_pool_create_unsupported_algorithm(self, mock_cast): + provider_pool = driver_dm.Pool( + pool_id=self.sample_data.pool1_id) + provider_pool.lb_algorithm = 'foo' + self.assertRaises( + exceptions.UnsupportedOptionError, + self.amp_driver.pool_create, + provider_pool) + mock_cast.assert_not_called() + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_pool_create_unsupported_alpn(self, mock_cast): + provider_pool = driver_dm.Pool(pool_id=self.sample_data.pool1_id) + provider_pool.alpn_protocols = ['http/1.1', 'eureka'] + self.assertRaises( + exceptions.UnsupportedOptionError, + self.amp_driver.pool_create, + provider_pool) + mock_cast.assert_not_called() + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_pool_delete(self, mock_cast): + provider_pool = driver_dm.Pool( + pool_id=self.sample_data.pool1_id) + self.amp_driver.pool_delete(provider_pool) + payload = {consts.POOL: provider_pool.to_dict()} + mock_cast.assert_called_with({}, 'delete_pool', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_pool_update(self, mock_cast): + old_provider_pool = driver_dm.Pool( + pool_id=self.sample_data.pool1_id) + provider_pool = driver_dm.Pool( + pool_id=self.sample_data.pool1_id, admin_state_up=True, + ca_tls_container_data='CA DATA', ca_tls_container_ref='CA REF', + crl_container_data='CRL DATA', crl_container_ref='CRL REF', + description='TEST DESCRIPTION', name='TEST NAME', + lb_algorithm=consts.LB_ALGORITHM_SOURCE_IP, + session_persistence='FAKE SP', tls_container_data='TLS DATA', + tls_container_ref='TLS REF', tls_enabled=False) + pool_dict = {'description': 'TEST DESCRIPTION', + 'lb_algorithm': 'SOURCE_IP', 'name': 'TEST NAME', + 'session_persistence': 'FAKE SP', 'tls_enabled': False, + 'enabled': True, 'tls_certificate_id': 'TLS REF', + 'ca_tls_certificate_id': 'CA REF', + 'crl_container_id': 'CRL REF'} + self.amp_driver.pool_update(old_provider_pool, provider_pool) + payload = {consts.ORIGINAL_POOL: old_provider_pool.to_dict(), + consts.POOL_UPDATES: pool_dict} + mock_cast.assert_called_with({}, 'update_pool', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_pool_update_name(self, mock_cast): + old_provider_pool = driver_dm.Pool( + pool_id=self.sample_data.pool1_id) + provider_pool = driver_dm.Pool( + pool_id=self.sample_data.pool1_id, name='Great pool', + admin_state_up=True, tls_enabled=True) + pool_dict = {'name': 'Great pool', + 'enabled': True, + 'tls_enabled': True} + self.amp_driver.pool_update(old_provider_pool, provider_pool) + payload = {consts.ORIGINAL_POOL: old_provider_pool.to_dict(), + consts.POOL_UPDATES: pool_dict} + mock_cast.assert_called_with({}, 'update_pool', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_pool_update_unsupported_algorithm(self, mock_cast): + old_provider_pool = driver_dm.Pool( + pool_id=self.sample_data.pool1_id) + provider_pool = driver_dm.Pool( + pool_id=self.sample_data.pool1_id) + provider_pool.lb_algorithm = 'foo' + self.assertRaises( + exceptions.UnsupportedOptionError, + self.amp_driver.pool_update, + old_provider_pool, + provider_pool) + mock_cast.assert_not_called() + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_pool_update_unsupported_alpn(self, mock_cast): + old_provider_pool = driver_dm.Pool(pool_id=self.sample_data.pool1_id) + provider_pool = driver_dm.Pool( + listener_id=self.sample_data.pool1_id, + alpn_protocols=['http/1.1', 'eureka']) + self.assertRaises( + exceptions.UnsupportedOptionError, + self.amp_driver.pool_update, + old_provider_pool, + provider_pool) + + # Member + @mock.patch('octavia.db.api.get_session') + @mock.patch('octavia.db.repositories.PoolRepository.get') + @mock.patch('oslo_messaging.RPCClient.cast') + def test_member_create(self, mock_cast, mock_pool_get, mock_session): + provider_member = driver_dm.Member( + member_id=self.sample_data) + self.amp_driver.member_create(provider_member) + payload = {consts.MEMBER: provider_member.to_dict()} + mock_cast.assert_called_with({}, 'create_member', **payload) + + @mock.patch('octavia.db.api.get_session') + @mock.patch('octavia.db.repositories.PoolRepository.get') + @mock.patch('oslo_messaging.RPCClient.cast') + def test_member_create_udp_ipv4(self, mock_cast, mock_pool_get, + mock_session): + mock_lb = mock.MagicMock() + mock_lb.vip = mock.MagicMock() + mock_lb.vip.ip_address = "192.0.1.1" + mock_listener = mock.MagicMock() + mock_listener.load_balancer = mock_lb + mock_pool = mock.MagicMock() + mock_pool.protocol = consts.PROTOCOL_UDP + mock_pool.listeners = [mock_listener] + mock_pool_get.return_value = mock_pool + + provider_member = driver_dm.Member( + member_id=self.sample_data.member1_id, + address="192.0.2.1") + self.amp_driver.member_create(provider_member) + payload = {consts.MEMBER: provider_member.to_dict()} + mock_cast.assert_called_with({}, 'create_member', **payload) + + @mock.patch('octavia.db.api.get_session') + @mock.patch('octavia.db.repositories.PoolRepository.get') + @mock.patch('oslo_messaging.RPCClient.cast') + def test_member_create_udp_ipv4_ipv6(self, mock_cast, mock_pool_get, + mock_session): + mock_lb = mock.MagicMock() + mock_lb.vip = mock.MagicMock() + mock_lb.vip.ip_address = "fe80::1" + mock_listener = mock.MagicMock() + mock_listener.load_balancer = mock_lb + mock_pool = mock.MagicMock() + mock_pool.protocol = consts.PROTOCOL_UDP + mock_pool.listeners = [mock_listener] + mock_pool_get.return_value = mock_pool + + provider_member = driver_dm.Member( + member_id=self.sample_data.member1_id, + address="192.0.2.1") + self.assertRaises(exceptions.UnsupportedOptionError, + self.amp_driver.member_create, + provider_member) + + @mock.patch('octavia.db.api.get_session') + @mock.patch('octavia.db.repositories.PoolRepository.get') + @mock.patch('oslo_messaging.RPCClient.cast') + def test_member_create_sctp_ipv4(self, mock_cast, mock_pool_get, + mock_session): + mock_lb = mock.MagicMock() + mock_lb.vip = mock.MagicMock() + mock_lb.vip.ip_address = "192.0.1.1" + mock_listener = mock.MagicMock() + mock_listener.load_balancer = mock_lb + mock_pool = mock.MagicMock() + mock_pool.protocol = lib_consts.PROTOCOL_SCTP + mock_pool.listeners = [mock_listener] + mock_pool_get.return_value = mock_pool + + provider_member = driver_dm.Member( + member_id=self.sample_data.member1_id, + address="192.0.2.1") + self.amp_driver.member_create(provider_member) + payload = {consts.MEMBER: provider_member.to_dict()} + mock_cast.assert_called_with({}, 'create_member', **payload) + + @mock.patch('octavia.db.api.get_session') + @mock.patch('octavia.db.repositories.PoolRepository.get') + @mock.patch('oslo_messaging.RPCClient.cast') + def test_member_create_sctp_ipv4_ipv6(self, mock_cast, mock_pool_get, + mock_session): + mock_lb = mock.MagicMock() + mock_lb.vip = mock.MagicMock() + mock_lb.vip.ip_address = "fe80::1" + mock_listener = mock.MagicMock() + mock_listener.load_balancer = mock_lb + mock_pool = mock.MagicMock() + mock_pool.protocol = lib_consts.PROTOCOL_SCTP + mock_pool.listeners = [mock_listener] + mock_pool_get.return_value = mock_pool + + provider_member = driver_dm.Member( + member_id=self.sample_data.member1_id, + address="192.0.2.1") + self.assertRaises(exceptions.UnsupportedOptionError, + self.amp_driver.member_create, + provider_member) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_member_delete(self, mock_cast): + provider_member = driver_dm.Member( + member_id=self.sample_data.member1_id) + self.amp_driver.member_delete(provider_member) + payload = {consts.MEMBER: provider_member.to_dict()} + mock_cast.assert_called_with({}, 'delete_member', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_member_update(self, mock_cast): + old_provider_member = driver_dm.Member( + member_id=self.sample_data.member1_id) + provider_member = driver_dm.Member( + member_id=self.sample_data.member1_id, admin_state_up=True) + member_dict = provider_member.to_dict() + member_dict.pop(consts.MEMBER_ID) + member_dict['enabled'] = member_dict.pop('admin_state_up') + self.amp_driver.member_update(old_provider_member, + provider_member) + payload = {consts.ORIGINAL_MEMBER: old_provider_member.to_dict(), + consts.MEMBER_UPDATES: member_dict} + mock_cast.assert_called_with({}, 'update_member', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_member_update_name(self, mock_cast): + old_provider_member = driver_dm.Member( + member_id=self.sample_data.member1_id) + provider_member = driver_dm.Member( + member_id=self.sample_data.member1_id, name='Great member') + member_dict = provider_member.to_dict() + member_dict.pop(consts.MEMBER_ID) + self.amp_driver.member_update(old_provider_member, + provider_member) + payload = {consts.ORIGINAL_MEMBER: old_provider_member.to_dict(), + consts.MEMBER_UPDATES: member_dict} + mock_cast.assert_called_with({}, 'update_member', **payload) + + @mock.patch('octavia.db.api.get_session') + @mock.patch('octavia.db.repositories.PoolRepository.get') + @mock.patch('oslo_messaging.RPCClient.cast') + def test_member_batch_update(self, mock_cast, mock_pool_get, mock_session): + mock_pool = mock.MagicMock() + mock_pool.members = self.sample_data.db_pool1_members + mock_pool_get.return_value = mock_pool + + prov_mem_update = driver_dm.Member( + member_id=self.sample_data.member2_id, + pool_id=self.sample_data.pool1_id, admin_state_up=False, + address='192.0.2.17', monitor_address='192.0.2.77', + protocol_port=80, name='updated-member2') + prov_new_member = driver_dm.Member( + member_id=self.sample_data.member3_id, + pool_id=self.sample_data.pool1_id, + address='192.0.2.18', monitor_address='192.0.2.28', + protocol_port=80, name='member3') + prov_members = [prov_mem_update, prov_new_member] + + update_mem_dict = {'ip_address': '192.0.2.17', + 'name': 'updated-member2', + 'monitor_address': '192.0.2.77', + 'id': self.sample_data.member2_id, + 'enabled': False, + 'protocol_port': 80, + 'pool_id': self.sample_data.pool1_id} + + self.amp_driver.member_batch_update( + self.sample_data.pool1_id, prov_members) + + payload = { + 'old_members': [self.sample_data.db_pool1_members[0].to_dict()], + 'new_members': [prov_new_member.to_dict()], + 'updated_members': [update_mem_dict]} + mock_cast.assert_called_with({}, 'batch_update_members', **payload) + + @mock.patch('octavia.db.api.get_session') + @mock.patch('octavia.db.repositories.PoolRepository.get') + @mock.patch('oslo_messaging.RPCClient.cast') + def test_member_batch_update_no_admin_addr(self, mock_cast, + mock_pool_get, mock_session): + mock_pool = mock.MagicMock() + mock_pool.members = self.sample_data.db_pool1_members + mock_pool_get.return_value = mock_pool + + prov_mem_update = driver_dm.Member( + member_id=self.sample_data.member2_id, + pool_id=self.sample_data.pool1_id, + monitor_address='192.0.2.77', + protocol_port=80, name='updated-member2') + prov_new_member = driver_dm.Member( + member_id=self.sample_data.member3_id, + pool_id=self.sample_data.pool1_id, + address='192.0.2.18', monitor_address='192.0.2.28', + protocol_port=80, name='member3') + prov_members = [prov_mem_update, prov_new_member] + + update_mem_dict = {'name': 'updated-member2', + 'monitor_address': '192.0.2.77', + 'id': self.sample_data.member2_id, + 'protocol_port': 80, + 'pool_id': self.sample_data.pool1_id} + + self.amp_driver.member_batch_update( + self.sample_data.pool1_id, prov_members) + + payload = { + 'old_members': [self.sample_data.db_pool1_members[0].to_dict()], + 'new_members': [prov_new_member.to_dict()], + 'updated_members': [update_mem_dict]} + mock_cast.assert_called_with({}, 'batch_update_members', **payload) + + @mock.patch('octavia.db.api.get_session') + @mock.patch('octavia.db.repositories.PoolRepository.get') + @mock.patch('oslo_messaging.RPCClient.cast') + def test_member_batch_update_clear_already_empty( + self, mock_cast, mock_pool_get, mock_session): + """Expect that we will pass an empty payload if directed. + + Logic for whether or not to attempt this will be done above the driver + layer, so our driver is responsible to forward the request even if it + is a perceived no-op. + """ + mock_pool = mock.MagicMock() + mock_pool_get.return_value = mock_pool + + self.amp_driver.member_batch_update( + self.sample_data.pool1_id, []) + + payload = {'old_members': [], + 'new_members': [], + 'updated_members': []} + mock_cast.assert_called_with({}, 'batch_update_members', **payload) + + # Health Monitor + @mock.patch('oslo_messaging.RPCClient.cast') + def test_health_monitor_create(self, mock_cast): + provider_HM = driver_dm.HealthMonitor( + healthmonitor_id=self.sample_data.hm1_id) + self.amp_driver.health_monitor_create(provider_HM) + payload = {consts.HEALTH_MONITOR: provider_HM.to_dict()} + mock_cast.assert_called_with({}, 'create_health_monitor', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_health_monitor_delete(self, mock_cast): + provider_HM = driver_dm.HealthMonitor( + healthmonitor_id=self.sample_data.hm1_id) + self.amp_driver.health_monitor_delete(provider_HM) + payload = {consts.HEALTH_MONITOR: provider_HM.to_dict()} + mock_cast.assert_called_with({}, 'delete_health_monitor', **payload) + + @mock.patch('octavia.db.api.get_session') + @mock.patch('octavia.db.repositories.PoolRepository.get') + @mock.patch('oslo_messaging.RPCClient.cast') + def test_member_batch_update_udp_ipv4(self, mock_cast, mock_pool_get, + mock_session): + + mock_lb = mock.MagicMock() + mock_lb.vip = mock.MagicMock() + mock_lb.vip.ip_address = "192.0.1.1" + mock_listener = mock.MagicMock() + mock_listener.load_balancer = mock_lb + mock_pool = mock.MagicMock() + mock_pool.protocol = consts.PROTOCOL_UDP + mock_pool.listeners = [mock_listener] + mock_pool.members = self.sample_data.db_pool1_members + mock_pool_get.return_value = mock_pool + + prov_mem_update = driver_dm.Member( + member_id=self.sample_data.member2_id, + pool_id=self.sample_data.pool1_id, admin_state_up=False, + address='192.0.2.17', monitor_address='192.0.2.77', + protocol_port=80, name='updated-member2') + prov_new_member = driver_dm.Member( + member_id=self.sample_data.member3_id, + pool_id=self.sample_data.pool1_id, + address='192.0.2.18', monitor_address='192.0.2.28', + protocol_port=80, name='member3') + prov_members = [prov_mem_update, prov_new_member] + + update_mem_dict = {'ip_address': '192.0.2.17', + 'name': 'updated-member2', + 'monitor_address': '192.0.2.77', + 'id': self.sample_data.member2_id, + 'enabled': False, + 'protocol_port': 80, + 'pool_id': self.sample_data.pool1_id} + + self.amp_driver.member_batch_update( + self.sample_data.pool1_id, prov_members) + + payload = {'old_members': + [self.sample_data.db_pool1_members[0].to_dict()], + 'new_members': [prov_new_member.to_dict()], + 'updated_members': [update_mem_dict]} + mock_cast.assert_called_with({}, 'batch_update_members', **payload) + + @mock.patch('octavia.db.api.get_session') + @mock.patch('octavia.db.repositories.PoolRepository.get') + @mock.patch('oslo_messaging.RPCClient.cast') + def test_member_batch_update_udp_ipv4_ipv6(self, mock_cast, mock_pool_get, + mock_session): + + mock_lb = mock.MagicMock() + mock_lb.vip = mock.MagicMock() + mock_lb.vip.ip_address = "192.0.1.1" + mock_listener = mock.MagicMock() + mock_listener.load_balancer = mock_lb + mock_pool = mock.MagicMock() + mock_pool.protocol = consts.PROTOCOL_UDP + mock_pool.listeners = [mock_listener] + mock_pool.members = self.sample_data.db_pool1_members + mock_pool_get.return_value = mock_pool + + prov_mem_update = driver_dm.Member( + member_id=self.sample_data.member2_id, + pool_id=self.sample_data.pool1_id, admin_state_up=False, + address='fe80::1', monitor_address='fe80::2', + protocol_port=80, name='updated-member2') + prov_new_member = driver_dm.Member( + member_id=self.sample_data.member3_id, + pool_id=self.sample_data.pool1_id, + address='192.0.2.18', monitor_address='192.0.2.28', + protocol_port=80, name='member3') + prov_members = [prov_mem_update, prov_new_member] + + self.assertRaises(exceptions.UnsupportedOptionError, + self.amp_driver.member_batch_update, + self.sample_data.pool1_id, prov_members) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_health_monitor_update(self, mock_cast): + old_provider_hm = driver_dm.HealthMonitor( + healthmonitor_id=self.sample_data.hm1_id) + provider_hm = driver_dm.HealthMonitor( + healthmonitor_id=self.sample_data.hm1_id, admin_state_up=True, + max_retries=1, max_retries_down=2) + hm_dict = {'enabled': True, 'rise_threshold': 1, 'fall_threshold': 2} + self.amp_driver.health_monitor_update(old_provider_hm, provider_hm) + payload = {consts.ORIGINAL_HEALTH_MONITOR: old_provider_hm.to_dict(), + consts.HEALTH_MONITOR_UPDATES: hm_dict} + mock_cast.assert_called_with({}, 'update_health_monitor', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_health_monitor_update_name(self, mock_cast): + old_provider_hm = driver_dm.HealthMonitor( + healthmonitor_id=self.sample_data.hm1_id) + provider_hm = driver_dm.HealthMonitor( + healthmonitor_id=self.sample_data.hm1_id, name='Great HM') + hm_dict = {'name': 'Great HM'} + self.amp_driver.health_monitor_update(old_provider_hm, provider_hm) + payload = {consts.ORIGINAL_HEALTH_MONITOR: old_provider_hm.to_dict(), + consts.HEALTH_MONITOR_UPDATES: hm_dict} + mock_cast.assert_called_with({}, 'update_health_monitor', **payload) + + # L7 Policy + @mock.patch('octavia.db.api.get_session') + @mock.patch('octavia.db.repositories.ListenerRepository.get') + @mock.patch('oslo_messaging.RPCClient.cast') + def test_l7policy_create(self, mock_cast, mock_listener_get, mock_session): + mock_listener = mock.MagicMock() + mock_listener.protocol = consts.PROTOCOL_HTTP + mock_listener_get.return_value = mock_listener + provider_l7policy = driver_dm.L7Policy( + l7policy_id=self.sample_data.l7policy1_id) + self.amp_driver.l7policy_create(provider_l7policy) + payload = {consts.L7POLICY: provider_l7policy.to_dict()} + mock_cast.assert_called_with({}, 'create_l7policy', **payload) + + @mock.patch('octavia.db.api.get_session') + @mock.patch('octavia.db.repositories.ListenerRepository.get') + def test_l7policy_create_invalid_listener_protocol(self, mock_listener_get, + mock_session): + mock_listener = mock.MagicMock() + mock_listener.protocol = consts.PROTOCOL_UDP + mock_listener_get.return_value = mock_listener + provider_l7policy = driver_dm.L7Policy( + l7policy_id=self.sample_data.l7policy1_id) + self.assertRaises(exceptions.UnsupportedOptionError, + self.amp_driver.l7policy_create, + provider_l7policy) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_l7policy_delete(self, mock_cast): + provider_l7policy = driver_dm.L7Policy( + l7policy_id=self.sample_data.l7policy1_id) + self.amp_driver.l7policy_delete(provider_l7policy) + payload = {consts.L7POLICY: provider_l7policy.to_dict()} + mock_cast.assert_called_with({}, 'delete_l7policy', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_l7policy_update(self, mock_cast): + old_provider_l7policy = driver_dm.L7Policy( + l7policy_id=self.sample_data.l7policy1_id) + provider_l7policy = driver_dm.L7Policy( + l7policy_id=self.sample_data.l7policy1_id, admin_state_up=True) + l7policy_dict = {'enabled': True} + self.amp_driver.l7policy_update(old_provider_l7policy, + provider_l7policy) + payload = {consts.ORIGINAL_L7POLICY: old_provider_l7policy.to_dict(), + consts.L7POLICY_UPDATES: l7policy_dict} + mock_cast.assert_called_with({}, 'update_l7policy', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_l7policy_update_name(self, mock_cast): + old_provider_l7policy = driver_dm.L7Policy( + l7policy_id=self.sample_data.l7policy1_id) + provider_l7policy = driver_dm.L7Policy( + l7policy_id=self.sample_data.l7policy1_id, name='Great L7Policy') + l7policy_dict = {'name': 'Great L7Policy'} + self.amp_driver.l7policy_update(old_provider_l7policy, + provider_l7policy) + payload = {consts.ORIGINAL_L7POLICY: old_provider_l7policy.to_dict(), + consts.L7POLICY_UPDATES: l7policy_dict} + mock_cast.assert_called_with({}, 'update_l7policy', **payload) + + # L7 Rules + @mock.patch('oslo_messaging.RPCClient.cast') + def test_l7rule_create(self, mock_cast): + provider_l7rule = driver_dm.L7Rule( + l7rule_id=self.sample_data.l7rule1_id) + self.amp_driver.l7rule_create(provider_l7rule) + payload = {consts.L7RULE: provider_l7rule.to_dict()} + mock_cast.assert_called_with({}, 'create_l7rule', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_l7rule_delete(self, mock_cast): + provider_l7rule = driver_dm.L7Rule( + l7rule_id=self.sample_data.l7rule1_id) + self.amp_driver.l7rule_delete(provider_l7rule) + payload = {consts.L7RULE: provider_l7rule.to_dict()} + mock_cast.assert_called_with({}, 'delete_l7rule', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_l7rule_update(self, mock_cast): + old_provider_l7rule = driver_dm.L7Rule( + l7rule_id=self.sample_data.l7rule1_id) + provider_l7rule = driver_dm.L7Rule( + l7rule_id=self.sample_data.l7rule1_id, admin_state_up=True) + l7rule_dict = {'enabled': True} + self.amp_driver.l7rule_update(old_provider_l7rule, + provider_l7rule) + payload = {consts.ORIGINAL_L7RULE: old_provider_l7rule.to_dict(), + consts.L7RULE_UPDATES: l7rule_dict} + mock_cast.assert_called_with({}, 'update_l7rule', **payload) + + @mock.patch('oslo_messaging.RPCClient.cast') + def test_l7rule_update_invert(self, mock_cast): + old_provider_l7rule = driver_dm.L7Rule( + l7rule_id=self.sample_data.l7rule1_id) + provider_l7rule = driver_dm.L7Rule( + l7rule_id=self.sample_data.l7rule1_id, invert=True) + l7rule_dict = {'invert': True} + self.amp_driver.l7rule_update(old_provider_l7rule, provider_l7rule) + payload = {consts.ORIGINAL_L7RULE: old_provider_l7rule.to_dict(), + consts.L7RULE_UPDATES: l7rule_dict} + mock_cast.assert_called_with({}, 'update_l7rule', **payload) + + # Flavor + def test_get_supported_flavor_metadata(self): + test_schema = { + "properties": { + "test_name": {"description": "Test description"}, + "test_name2": {"description": "Another description"}}} + ref_dict = {"test_name": "Test description", + "test_name2": "Another description"} + + # mock out the supported_flavor_metadata + with mock.patch('octavia.api.drivers.amphora_driver.flavor_schema.' + 'SUPPORTED_FLAVOR_SCHEMA', test_schema): + result = self.amp_driver.get_supported_flavor_metadata() + self.assertEqual(ref_dict, result) + + # Test for bad schema + with mock.patch('octavia.api.drivers.amphora_driver.flavor_schema.' + 'SUPPORTED_FLAVOR_SCHEMA', 'bogus'): + self.assertRaises(exceptions.DriverError, + self.amp_driver.get_supported_flavor_metadata) + + def test_validate_flavor(self): + ref_dict = {consts.LOADBALANCER_TOPOLOGY: consts.TOPOLOGY_SINGLE} + self.amp_driver.validate_flavor(ref_dict) + + # Test bad flavor metadata value is bad + ref_dict = {consts.LOADBALANCER_TOPOLOGY: 'bogus'} + self.assertRaises(exceptions.UnsupportedOptionError, + self.amp_driver.validate_flavor, + ref_dict) + + # Test bad flavor metadata key + ref_dict = {'bogus': 'bogus'} + self.assertRaises(exceptions.UnsupportedOptionError, + self.amp_driver.validate_flavor, + ref_dict) + + # Test for bad schema + with mock.patch('octavia.api.drivers.amphora_driver.flavor_schema.' + 'SUPPORTED_FLAVOR_SCHEMA', 'bogus'): + self.assertRaises(exceptions.DriverError, + self.amp_driver.validate_flavor, 'bogus') + + # Availability Zone + def test_get_supported_availability_zone_metadata(self): + test_schema = { + "properties": { + "test_name": {"description": "Test description"}, + "test_name2": {"description": "Another description"}}} + ref_dict = {"test_name": "Test description", + "test_name2": "Another description"} + + # mock out the supported_availability_zone_metadata + with mock.patch('octavia.api.drivers.amphora_driver.' + 'availability_zone_schema.' + 'SUPPORTED_AVAILABILITY_ZONE_SCHEMA', test_schema): + result = self.amp_driver.get_supported_availability_zone_metadata() + self.assertEqual(ref_dict, result) + + # Test for bad schema + with mock.patch('octavia.api.drivers.amphora_driver.' + 'availability_zone_schema.' + 'SUPPORTED_AVAILABILITY_ZONE_SCHEMA', 'bogus'): + self.assertRaises( + exceptions.DriverError, + self.amp_driver.get_supported_availability_zone_metadata) + + def test_validate_availability_zone(self): + # Test compute zone + with mock.patch('stevedore.driver.DriverManager.driver') as m_driver: + m_driver.validate_availability_zone.return_value = None + ref_dict = {consts.COMPUTE_ZONE: 'my_compute_zone'} + self.amp_driver.validate_availability_zone(ref_dict) + m_driver.validate_availability_zone.assert_called_once_with( + 'my_compute_zone') + + # Test volume zone + with mock.patch('stevedore.driver.DriverManager.driver') as m_driver: + m_driver.validate_availability_zone.return_value = None + ref_dict = {consts.VOLUME_ZONE: 'my_volume_zone'} + self.amp_driver.validate_availability_zone(ref_dict) + m_driver.validate_availability_zone.assert_called_once_with( + 'my_volume_zone') + + with mock.patch('octavia.common.utils.get_network_driver') as m_driver: + # Test vip networks + m_driver.get_network.return_value = None + ref_dict = {consts.VALID_VIP_NETWORKS: ['my_vip_net']} + self.amp_driver.validate_availability_zone(ref_dict) + + # Test management network + ref_dict = {consts.MANAGEMENT_NETWORK: 'my_management_net'} + self.amp_driver.validate_availability_zone(ref_dict) + + # Test bad availability zone metadata key + ref_dict = {'bogus': 'bogus'} + self.assertRaises(exceptions.UnsupportedOptionError, + self.amp_driver.validate_availability_zone, + ref_dict) + + # Test for bad schema + with mock.patch('octavia.api.drivers.amphora_driver.' + 'availability_zone_schema.' + 'SUPPORTED_AVAILABILITY_ZONE_SCHEMA', 'bogus'): + self.assertRaises(exceptions.DriverError, + self.amp_driver.validate_availability_zone, + 'bogus') + + @mock.patch('cryptography.fernet.MultiFernet') + def test_encrypt_listener_dict(self, mock_fernet): + mock_fern = mock.MagicMock() + mock_fernet.return_value = mock_fern + TEST_DATA = {'cert': b'some data'} + TEST_DATA2 = {'test': 'more data'} + FAKE_ENCRYPTED_DATA = b'alqwkhjetrhth' + mock_fern.encrypt.return_value = FAKE_ENCRYPTED_DATA + + # We need a class instance with the mock + amp_driver = driver.AmphoraProviderDriver() + + # Test just default_tls_container_data + list_dict = {consts.DEFAULT_TLS_CONTAINER_DATA: TEST_DATA} + + amp_driver._encrypt_listener_dict(list_dict) + + mock_fern.encrypt.assert_called_once_with(b'some data') + + self.assertEqual({'cert': FAKE_ENCRYPTED_DATA}, + list_dict[consts.DEFAULT_TLS_CONTAINER_DATA]) + + mock_fern.reset_mock() + + # Test just sni_container_data + TEST_DATA = {'cert': b'some data'} + sni_dict = {consts.SNI_CONTAINER_DATA: [TEST_DATA, TEST_DATA2]} + + amp_driver._encrypt_listener_dict(sni_dict) + + mock_fern.encrypt.assert_called_once_with(b'some data') + + encrypted_sni = [{'cert': FAKE_ENCRYPTED_DATA}, + TEST_DATA2] + self.assertEqual(encrypted_sni, sni_dict[consts.SNI_CONTAINER_DATA]) diff --git a/octavia/tests/unit/api/drivers/driver_agent/__init__.py b/octavia/tests/unit/api/drivers/driver_agent/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/api/drivers/driver_agent/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/api/drivers/driver_agent/test_driver_get.py b/octavia/tests/unit/api/drivers/driver_agent/test_driver_get.py new file mode 100644 index 0000000000..9f5452ee36 --- /dev/null +++ b/octavia/tests/unit/api/drivers/driver_agent/test_driver_get.py @@ -0,0 +1,121 @@ +# Copyright 2019 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from octavia_lib.common import constants as lib_consts +from oslo_utils import uuidutils + +from octavia.api.drivers.driver_agent import driver_get +from octavia.common import constants +import octavia.tests.unit.base as base + + +class TestDriverGet(base.TestCase): + + @mock.patch('octavia.db.api.get_session') + def _test_process_get_object(self, object_name, mock_object_repo, + mock_object_to_provider, mock_get_session): + mock_get_session.return_value = mock.MagicMock() + object_repo_mock = mock.MagicMock() + mock_object_repo.return_value = object_repo_mock + db_object_mock = mock.MagicMock() + object_repo_mock.get.return_value = db_object_mock + + mock_prov_object = mock.MagicMock() + mock_object_to_provider.return_value = mock_prov_object + ref_prov_dict = mock_prov_object.to_dict(recurse=True, + render_unsets=True) + + object_id = uuidutils.generate_uuid() + + data = {constants.OBJECT: object_name, lib_consts.ID: object_id} + + # Happy path + result = driver_get.process_get(data) + + mock_object_repo.assert_called_once_with() + object_repo_mock.get.assert_called_once_with( + mock_get_session(), id=object_id, show_deleted=False) + mock_object_to_provider.assert_called_once_with(db_object_mock) + self.assertEqual(ref_prov_dict, result) + + # No matching listener + mock_object_repo.reset_mock() + mock_object_to_provider.reset_mock() + + object_repo_mock.get.return_value = None + + result = driver_get.process_get(data) + + mock_object_repo.assert_called_once_with() + object_repo_mock.get.assert_called_once_with( + mock_get_session(), id=object_id, show_deleted=False) + mock_object_to_provider.assert_not_called() + self.assertEqual({}, result) + + @mock.patch('octavia.api.drivers.utils.' + 'db_loadbalancer_to_provider_loadbalancer') + @mock.patch('octavia.db.repositories.LoadBalancerRepository') + def test_process_get_loadbalancer(self, mock_lb_repo, mock_lb_to_provider): + self._test_process_get_object( + lib_consts.LOADBALANCERS, mock_lb_repo, mock_lb_to_provider) + + @mock.patch('octavia.api.drivers.utils.db_listener_to_provider_listener') + @mock.patch('octavia.db.repositories.ListenerRepository') + def test_process_get_listener(self, mock_listener_repo, + mock_listener_to_provider): + self._test_process_get_object(lib_consts.LISTENERS, mock_listener_repo, + mock_listener_to_provider) + + @mock.patch('octavia.api.drivers.utils.db_pool_to_provider_pool') + @mock.patch('octavia.db.repositories.PoolRepository') + def test_process_get_pool(self, mock_pool_repo, mock_pool_to_provider): + self._test_process_get_object(lib_consts.POOLS, mock_pool_repo, + mock_pool_to_provider) + + @mock.patch('octavia.api.drivers.utils.db_member_to_provider_member') + @mock.patch('octavia.db.repositories.MemberRepository') + def test_process_get_member(self, mock_member_repo, + mock_member_to_provider): + self._test_process_get_object(lib_consts.MEMBERS, mock_member_repo, + mock_member_to_provider) + + @mock.patch('octavia.api.drivers.utils.db_HM_to_provider_HM') + @mock.patch('octavia.db.repositories.HealthMonitorRepository') + def test_process_get_healthmonitor(self, mock_hm_repo, + mock_hm_to_provider): + self._test_process_get_object(lib_consts.HEALTHMONITORS, mock_hm_repo, + mock_hm_to_provider) + + @mock.patch('octavia.api.drivers.utils.db_l7policy_to_provider_l7policy') + @mock.patch('octavia.db.repositories.L7PolicyRepository') + def test_process_get_l7policy(self, mock_l7policy_repo, + mock_l7policy_to_provider): + self._test_process_get_object(lib_consts.L7POLICIES, + mock_l7policy_repo, + mock_l7policy_to_provider) + + @mock.patch('octavia.api.drivers.utils.db_l7rule_to_provider_l7rule') + @mock.patch('octavia.db.repositories.L7RuleRepository') + def test_process_get_l7rule(self, mock_l7rule_repo, + mock_l7rule_to_provider): + self._test_process_get_object(lib_consts.L7RULES, mock_l7rule_repo, + mock_l7rule_to_provider) + + @mock.patch('octavia.db.api.get_session') + def test_process_get_bogus_object(self, mock_get_session): + data = {constants.OBJECT: 'bogus', lib_consts.ID: 'bad ID'} + result = driver_get.process_get(data) + self.assertEqual({}, result) diff --git a/octavia/tests/unit/api/drivers/driver_agent/test_driver_listener.py b/octavia/tests/unit/api/drivers/driver_agent/test_driver_listener.py new file mode 100644 index 0000000000..3514d0fb2b --- /dev/null +++ b/octavia/tests/unit/api/drivers/driver_agent/test_driver_listener.py @@ -0,0 +1,359 @@ +# Copyright 2018 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import errno +import socket +import time +from unittest import mock + +from oslo_config import cfg +from oslo_serialization import jsonutils + +from octavia.api.drivers.driver_agent import driver_listener +import octavia.tests.unit.base as base + +CONF = cfg.CONF + + +class TestDriverListener(base.TestCase): + + def setUp(self): + super().setUp() + + def _wait_until_called(self, mock_to_be_called): + """Wait for up to 1 sec for the given mock to be called.""" + for _ in range(10): + if mock_to_be_called.call_count > 0: + break + time.sleep(.1) + + @mock.patch('octavia.api.drivers.driver_agent.driver_listener.memoryview') + def test_recv(self, mock_memoryview): + # TEST_STRING len() is 15 + TEST_STRING = '{"test": "msg"}' + ref_object = jsonutils.loads(TEST_STRING) + + mock_recv_socket = mock.MagicMock() + mock_recv = mock.MagicMock() + mock_recv.side_effect = [b'1', b'5', b'\n'] + mock_recv_socket.recv = mock_recv + mock_recv_socket.recv_into.return_value = 15 + mock_mv_buffer = mock.MagicMock() + mock_tobytes = mock.MagicMock() + mock_tobytes.return_value = TEST_STRING + mock_mv_buffer.tobytes = mock_tobytes + mock_memoryview.return_value = mock_mv_buffer + + result = driver_listener._recv(mock_recv_socket) + + self.assertEqual(ref_object, result) + calls = [mock.call(1), mock.call(1), mock.call(1)] + mock_recv.assert_has_calls(calls) + mock_memoryview.assert_called_once_with(bytearray(15)) + mock_recv_socket.recv_into.assert_called_once_with(mock_mv_buffer[0:], + 15) + + @mock.patch('octavia.api.drivers.driver_agent.driver_updater.' + 'DriverUpdater') + @mock.patch('octavia.api.drivers.driver_agent.driver_listener._recv') + def test_StatusRequestHandler_handle(self, mock_recv, mock_driverupdater): + TEST_OBJECT = {"test": "msg"} + mock_recv.return_value = 'bogus' + mock_updater = mock.MagicMock() + mock_update_loadbalancer_status = mock.MagicMock() + mock_update_loadbalancer_status.return_value = TEST_OBJECT + mock_updater.update_loadbalancer_status = ( + mock_update_loadbalancer_status) + mock_driverupdater.return_value = mock_updater + mock_request = mock.MagicMock() + mock_send = mock.MagicMock() + mock_sendall = mock.MagicMock() + mock_request.send = mock_send + mock_request.sendall = mock_sendall + + StatusRequestHandler = driver_listener.StatusRequestHandler( + mock_request, 'bogus', 'bogus') + StatusRequestHandler.handle() + + mock_recv.assert_called_with(mock_request) + mock_update_loadbalancer_status.assert_called_with('bogus') + mock_send.assert_called_with(b'15\n') + mock_sendall.assert_called_with( + jsonutils.dumps(TEST_OBJECT).encode('utf-8')) + + @mock.patch('octavia.api.drivers.driver_agent.driver_updater.' + 'DriverUpdater') + @mock.patch('octavia.api.drivers.driver_agent.driver_listener._recv') + def test_StatusRequestHandler_handle_recv_timeout(self, mock_recv, + mock_driverupdater): + TEST_OBJECT = {"test": "msg"} + mock_recv.side_effect = socket.timeout + mock_updater = mock.MagicMock() + mock_update_loadbalancer_status = mock.MagicMock() + mock_update_loadbalancer_status.return_value = TEST_OBJECT + mock_updater.update_loadbalancer_status = ( + mock_update_loadbalancer_status) + mock_driverupdater.return_value = mock_updater + mock_request = mock.MagicMock() + + StatusRequestHandler = driver_listener.StatusRequestHandler( + mock_request, 'bogus', 'bogus') + StatusRequestHandler.handle() + + mock_recv.assert_called_with(mock_request) + mock_update_loadbalancer_status.assert_not_called() + + @mock.patch('octavia.api.drivers.driver_agent.driver_updater.' + 'DriverUpdater') + @mock.patch('octavia.api.drivers.driver_agent.driver_listener._recv') + def test_StatusRequestHandler_handle_send_timeout(self, mock_recv, + mock_driverupdater): + TEST_OBJECT = {"test": "msg"} + mock_recv.return_value = 'bogus' + mock_updater = mock.MagicMock() + mock_update_loadbalancer_status = mock.MagicMock() + mock_update_loadbalancer_status.return_value = TEST_OBJECT + mock_updater.update_loadbalancer_status = ( + mock_update_loadbalancer_status) + mock_driverupdater.return_value = mock_updater + mock_request = mock.MagicMock() + mock_send = mock.MagicMock() + mock_send.side_effect = socket.timeout + mock_sendall = mock.MagicMock() + mock_request.send = mock_send + mock_request.sendall = mock_sendall + + StatusRequestHandler = driver_listener.StatusRequestHandler( + mock_request, 'bogus', 'bogus') + StatusRequestHandler.handle() + + mock_recv.assert_called_with(mock_request) + mock_update_loadbalancer_status.assert_called_with('bogus') + mock_send.assert_called_with(b'15\n') + mock_sendall.assert_not_called() + + @mock.patch('octavia.api.drivers.driver_agent.driver_updater.' + 'DriverUpdater') + @mock.patch('octavia.api.drivers.driver_agent.driver_listener._recv') + def test_StatsRequestHandler_handle(self, mock_recv, mock_driverupdater): + TEST_OBJECT = {"test": "msg"} + mock_recv.return_value = 'bogus' + mock_updater = mock.MagicMock() + mock_update_listener_stats = mock.MagicMock() + mock_update_listener_stats.return_value = TEST_OBJECT + mock_updater.update_listener_statistics = (mock_update_listener_stats) + mock_driverupdater.return_value = mock_updater + mock_request = mock.MagicMock() + mock_send = mock.MagicMock() + mock_sendall = mock.MagicMock() + mock_request.send = mock_send + mock_request.sendall = mock_sendall + + StatsRequestHandler = driver_listener.StatsRequestHandler( + mock_request, 'bogus', 'bogus') + StatsRequestHandler.handle() + + mock_recv.assert_called_with(mock_request) + mock_update_listener_stats.assert_called_with('bogus') + mock_send.assert_called_with(b'15\n') + mock_sendall.assert_called_with(jsonutils.dump_as_bytes(TEST_OBJECT)) + + @mock.patch('octavia.api.drivers.driver_agent.driver_updater.' + 'DriverUpdater') + @mock.patch('octavia.api.drivers.driver_agent.driver_listener._recv') + def test_StatsRequestHandler_handle_recv_timeout(self, mock_recv, + mock_driverupdater): + TEST_OBJECT = {"test": "msg"} + mock_recv.side_effect = socket.timeout + mock_updater = mock.MagicMock() + mock_update_listener_stats = mock.MagicMock() + mock_update_listener_stats.return_value = TEST_OBJECT + mock_updater.update_listener_statistics = (mock_update_listener_stats) + mock_driverupdater.return_value = mock_updater + mock_request = mock.MagicMock() + + StatsRequestHandler = driver_listener.StatsRequestHandler( + mock_request, 'bogus', 'bogus') + StatsRequestHandler.handle() + + mock_recv.assert_called_with(mock_request) + mock_update_listener_stats.assert_not_called() + + @mock.patch('octavia.api.drivers.driver_agent.driver_updater.' + 'DriverUpdater') + @mock.patch('octavia.api.drivers.driver_agent.driver_listener._recv') + def test_StatsRequestHandler_handle_send_timeout(self, mock_recv, + mock_driverupdater): + TEST_OBJECT = {"test": "msg"} + mock_recv.return_value = 'bogus' + mock_updater = mock.MagicMock() + mock_update_listener_stats = mock.MagicMock() + mock_update_listener_stats.return_value = TEST_OBJECT + mock_updater.update_listener_statistics = (mock_update_listener_stats) + mock_driverupdater.return_value = mock_updater + mock_request = mock.MagicMock() + mock_send = mock.MagicMock() + mock_send.side_effect = socket.timeout + mock_sendall = mock.MagicMock() + mock_request.send = mock_send + mock_request.sendall = mock_sendall + + StatsRequestHandler = driver_listener.StatsRequestHandler( + mock_request, 'bogus', 'bogus') + StatsRequestHandler.handle() + + mock_recv.assert_called_with(mock_request) + mock_update_listener_stats.assert_called_with('bogus') + mock_send.assert_called_with(b'15\n') + mock_sendall.assert_not_called() + + @mock.patch('octavia.api.drivers.driver_agent.driver_get.' + 'process_get') + @mock.patch('octavia.api.drivers.driver_agent.driver_listener._recv') + def test_GetRequestHandler_handle(self, mock_recv, mock_process_get): + TEST_OBJECT = {"test": "msg"} + + mock_recv.return_value = 'bogus' + + mock_process_get.return_value = TEST_OBJECT + mock_request = mock.MagicMock() + mock_send = mock.MagicMock() + mock_sendall = mock.MagicMock() + mock_request.send = mock_send + mock_request.sendall = mock_sendall + + GetRequestHandler = driver_listener.GetRequestHandler( + mock_request, 'bogus', 'bogus') + GetRequestHandler.handle() + + mock_recv.assert_called_with(mock_request) + mock_process_get.assert_called_with('bogus') + + mock_send.assert_called_with(b'15\n') + mock_sendall.assert_called_with(jsonutils.dump_as_bytes(TEST_OBJECT)) + + @mock.patch('octavia.api.drivers.driver_agent.driver_get.' + 'process_get') + @mock.patch('octavia.api.drivers.driver_agent.driver_listener._recv') + def test_GetRequestHandler_handle_recv_timeout(self, mock_recv, + mock_process_get): + TEST_OBJECT = {"test": "msg"} + + mock_recv.side_effect = socket.timeout + + mock_process_get.return_value = TEST_OBJECT + mock_request = mock.MagicMock() + + GetRequestHandler = driver_listener.GetRequestHandler( + mock_request, 'bogus', 'bogus') + GetRequestHandler.handle() + + mock_recv.assert_called_with(mock_request) + mock_process_get.assert_not_called() + + @mock.patch('octavia.api.drivers.driver_agent.driver_get.' + 'process_get') + @mock.patch('octavia.api.drivers.driver_agent.driver_listener._recv') + def test_GetRequestHandler_handle_send_timeout(self, mock_recv, + mock_process_get): + TEST_OBJECT = {"test": "msg"} + + mock_recv.return_value = 'bogus' + + mock_process_get.return_value = TEST_OBJECT + mock_request = mock.MagicMock() + mock_send = mock.MagicMock() + mock_send.side_effect = socket.timeout + mock_sendall = mock.MagicMock() + mock_request.send = mock_send + mock_request.sendall = mock_sendall + + GetRequestHandler = driver_listener.GetRequestHandler( + mock_request, 'bogus', 'bogus') + GetRequestHandler.handle() + + mock_recv.assert_called_with(mock_request) + mock_process_get.assert_called_with('bogus') + + mock_send.assert_called_with(b'15\n') + mock_sendall.assert_not_called() + + @mock.patch('os.remove') + def test_cleanup_socket_file(self, mock_remove): + mock_remove.side_effect = [mock.DEFAULT, OSError, + OSError(errno.ENOENT, 'no_file')] + driver_listener._cleanup_socket_file('fake_filename') + mock_remove.assert_called_once_with('fake_filename') + + self.assertRaises(OSError, driver_listener._cleanup_socket_file, + 'fake_filename') + # Make sure we just pass if the file was not found + driver_listener._cleanup_socket_file('fake_filename') + + @mock.patch('octavia.api.drivers.driver_agent.driver_listener.' + '_cleanup_socket_file') + @mock.patch('octavia.api.drivers.driver_agent.driver_listener.' + 'ForkingUDSServer') + def test_status_listener(self, mock_forking_server, mock_cleanup): + mock_server = mock.MagicMock() + mock_active_children = mock.PropertyMock( + side_effect=['a', 'a', 'a', + 'a' * CONF.driver_agent.status_max_processes, 'a', + 'a' * 1000, '']) + type(mock_server).active_children = mock_active_children + mock_forking_server.return_value.__enter__.return_value = mock_server + mock_exit_event = mock.MagicMock() + mock_exit_event.is_set.side_effect = [False, False, False, False, True] + + driver_listener.status_listener(mock_exit_event) + self._wait_until_called(mock_server.serve_forever) + mock_server.serve_forever.assert_called() + self.assertEqual(2, mock_cleanup.call_count) + + @mock.patch('octavia.api.drivers.driver_agent.driver_listener.' + '_cleanup_socket_file') + @mock.patch('octavia.api.drivers.driver_agent.driver_listener.' + 'ForkingUDSServer') + def test_stats_listener(self, mock_forking_server, mock_cleanup): + mock_server = mock.MagicMock() + mock_active_children = mock.PropertyMock( + side_effect=['a', 'a', 'a', + 'a' * CONF.driver_agent.status_max_processes, 'a', + 'a' * 1000, '']) + type(mock_server).active_children = mock_active_children + mock_forking_server.return_value.__enter__.return_value = mock_server + mock_exit_event = mock.MagicMock() + mock_exit_event.is_set.side_effect = [False, False, False, False, True] + + driver_listener.stats_listener(mock_exit_event) + self._wait_until_called(mock_server.serve_forever) + mock_server.serve_forever.assert_called() + + @mock.patch('octavia.api.drivers.driver_agent.driver_listener.' + '_cleanup_socket_file') + @mock.patch('octavia.api.drivers.driver_agent.driver_listener.' + 'ForkingUDSServer') + def test_get_listener(self, mock_forking_server, mock_cleanup): + mock_server = mock.MagicMock() + mock_active_children = mock.PropertyMock( + side_effect=['a', 'a', 'a', + 'a' * CONF.driver_agent.status_max_processes, 'a', + 'a' * 1000, '']) + type(mock_server).active_children = mock_active_children + mock_forking_server.return_value.__enter__.return_value = mock_server + mock_exit_event = mock.MagicMock() + mock_exit_event.is_set.side_effect = [False, False, False, False, True] + + driver_listener.get_listener(mock_exit_event) + self._wait_until_called(mock_server.serve_forever) + mock_server.serve_forever.assert_called() diff --git a/octavia/tests/unit/api/drivers/driver_agent/test_driver_updater.py b/octavia/tests/unit/api/drivers/driver_agent/test_driver_updater.py new file mode 100644 index 0000000000..471f73ef90 --- /dev/null +++ b/octavia/tests/unit/api/drivers/driver_agent/test_driver_updater.py @@ -0,0 +1,372 @@ +# Copyright 2018 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import copy +from unittest import mock +from unittest.mock import call + +from octavia_lib.api.drivers import exceptions as driver_exceptions +from octavia_lib.common import constants as lib_consts +from oslo_utils import uuidutils + +from octavia.api.drivers.driver_agent import driver_updater +from octavia.common import data_models +from octavia.common import exceptions +import octavia.tests.unit.base as base + + +class TestDriverUpdater(base.TestCase): + + @mock.patch('octavia.db.repositories.LoadBalancerRepository') + @mock.patch('octavia.db.repositories.ListenerRepository') + @mock.patch('octavia.db.repositories.L7PolicyRepository') + @mock.patch('octavia.db.repositories.L7RuleRepository') + @mock.patch('octavia.db.repositories.PoolRepository') + @mock.patch('octavia.db.repositories.HealthMonitorRepository') + @mock.patch('octavia.db.repositories.MemberRepository') + @mock.patch('octavia.db.api.get_session') + def setUp(self, mock_get_session, mock_member_repo, mock_health_repo, + mock_pool_repo, mock_l7r_repo, mock_l7p_repo, mock_list_repo, + mock_lb_repo): + super().setUp() + self.mock_session = mock.MagicMock() + mock_get_session.return_value = self.mock_session + + member_mock = mock.MagicMock() + mock_member_repo.return_value = member_mock + self.mock_member_repo = member_mock + health_mock = mock.MagicMock() + mock_health_repo.return_value = health_mock + self.mock_health_repo = health_mock + pool_mock = mock.MagicMock() + mock_pool_repo.return_value = pool_mock + self.mock_pool_repo = pool_mock + l7r_mock = mock.MagicMock() + mock_l7r_repo.return_value = l7r_mock + self.mock_l7r_repo = l7r_mock + l7p_mock = mock.MagicMock() + mock_l7p_repo.return_value = l7p_mock + self.mock_l7p_repo = l7p_mock + list_mock = mock.MagicMock() + mock_list_repo.return_value = list_mock + self.mock_list_repo = list_mock + lb_mock = mock.MagicMock() + mock_lb_repo.return_value = lb_mock + self.mock_lb_repo = lb_mock + self.driver_updater = driver_updater.DriverUpdater() + self.ref_ok_response = {lib_consts.STATUS_CODE: + lib_consts.DRVR_STATUS_CODE_OK} + mock_lb = mock.MagicMock() + self.lb_id = uuidutils.generate_uuid() + self.lb_project_id = uuidutils.generate_uuid() + mock_lb.id = self.lb_id + mock_lb.project_id = self.lb_project_id + mock_lb.provisioning_status = lib_consts.ACTIVE + self.lb_data_model = 'FakeLBDataModel' + self.mock_lb_repo.model_class.__data_model__ = self.lb_data_model + self.mock_lb_repo.get.return_value = mock_lb + + @mock.patch('octavia.common.utils.get_network_driver') + def test_check_for_lb_vip_deallocate(self, mock_get_net_drvr): + mock_repo = mock.MagicMock() + mock_lb = mock.MagicMock() + mock_vip = mock.MagicMock() + mock_octavia_owned = mock.PropertyMock(side_effect=[True, False]) + type(mock_vip).octavia_owned = mock_octavia_owned + mock_lb.vip = mock_vip + mock_repo.get.return_value = mock_lb + mock_net_drvr = mock.MagicMock() + mock_get_net_drvr.return_value = mock_net_drvr + + self.driver_updater._check_for_lb_vip_deallocate(mock_repo, 'bogus_id') + mock_net_drvr.deallocate_vip.assert_called_once_with(mock_vip) + + mock_net_drvr.reset_mock() + self.driver_updater._check_for_lb_vip_deallocate(mock_repo, 'bogus_id') + mock_net_drvr.deallocate_vip.assert_not_called() + + @mock.patch('octavia.db.repositories.Repositories.decrement_quota') + @mock.patch('octavia.db.api.get_session') + def test_decrement_quota(self, mock_get_session, mock_dec_quota): + mock_dec_quota.side_effect = [mock.DEFAULT, + exceptions.OctaviaException('Boom')] + + self.driver_updater._decrement_quota(self.mock_lb_repo, + 'FakeName', self.lb_id) + mock_dec_quota.assert_called_once_with( + self.mock_session, self.mock_lb_repo.model_class.__data_model__, + self.lb_project_id) + self.mock_session.commit.assert_called_once() + self.mock_session.rollback.assert_not_called() + + # Test exception path + mock_dec_quota.reset_mock() + self.mock_session.reset_mock() + self.assertRaises(exceptions.OctaviaException, + self.driver_updater._decrement_quota, + self.mock_lb_repo, 'FakeName', self.lb_id) + mock_dec_quota.assert_called_once_with( + self.mock_session, self.mock_lb_repo.model_class.__data_model__, + self.lb_project_id) + self.mock_session.commit.assert_not_called() + self.mock_session.rollback.assert_called_once() + + # Test already deleted path + mock_dec_quota.reset_mock() + self.mock_session.reset_mock() + # Create a local mock LB and LB_repo for this test + mock_lb = mock.MagicMock() + mock_lb.id = self.lb_id + mock_lb.provisioning_status = lib_consts.DELETED + mock_lb_repo = mock.MagicMock() + mock_lb_repo.model_class.__data_model__ = self.lb_data_model + mock_lb_repo.get.return_value = mock_lb + self.driver_updater._decrement_quota(mock_lb_repo, + 'FakeName', self.lb_id) + mock_dec_quota.assert_not_called() + self.mock_session.commit.assert_not_called() + self.mock_session.rollback.assert_called_once() + + @mock.patch('octavia.api.drivers.driver_agent.driver_updater.' + 'DriverUpdater._decrement_quota') + @mock.patch('octavia.api.drivers.driver_agent.driver_updater.' + 'DriverUpdater._check_for_lb_vip_deallocate') + def test_process_status_update(self, mock_deallocate, + mock_decrement_quota): + mock_repo = mock.MagicMock() + list_dict = {"id": 2, + lib_consts.PROVISIONING_STATUS: lib_consts.ACTIVE, + lib_consts.OPERATING_STATUS: lib_consts.ONLINE} + list_prov_dict = {"id": 2, + lib_consts.PROVISIONING_STATUS: lib_consts.ACTIVE} + list_oper_dict = {"id": 2, + lib_consts.OPERATING_STATUS: lib_consts.ONLINE} + list_deleted_dict = { + "id": 2, lib_consts.PROVISIONING_STATUS: lib_consts.DELETED, + lib_consts.OPERATING_STATUS: lib_consts.ONLINE} + + # Test with full record + self.driver_updater._process_status_update(mock_repo, 'FakeName', + list_dict) + mock_repo.update.assert_called_once_with( + self.mock_session, 2, provisioning_status=lib_consts.ACTIVE, + operating_status=lib_consts.ONLINE) + mock_repo.delete.assert_not_called() + + # Test with only provisioning status record + mock_repo.reset_mock() + self.driver_updater._process_status_update(mock_repo, 'FakeName', + list_prov_dict) + mock_repo.update.assert_called_once_with( + self.mock_session, 2, provisioning_status=lib_consts.ACTIVE) + mock_repo.delete.assert_not_called() + + # Test with only operating status record + mock_repo.reset_mock() + self.driver_updater._process_status_update(mock_repo, 'FakeName', + list_oper_dict) + mock_repo.update.assert_called_once_with( + self.mock_session, 2, operating_status=lib_consts.ONLINE) + mock_repo.delete.assert_not_called() + + # Test with deleted but delete_record False + mock_repo.reset_mock() + self.driver_updater._process_status_update(mock_repo, 'FakeName', + list_deleted_dict) + mock_repo.update.assert_called_once_with( + self.mock_session, 2, provisioning_status=lib_consts.DELETED, + operating_status=lib_consts.ONLINE) + mock_repo.delete.assert_not_called() + mock_decrement_quota.assert_called_once_with(mock_repo, 'FakeName', 2) + + # Test with an empty update + mock_repo.reset_mock() + self.driver_updater._process_status_update(mock_repo, 'FakeName', + {"id": 2}) + mock_repo.update.assert_not_called() + mock_repo.delete.assert_not_called() + + # Test with deleted and delete_record True + mock_decrement_quota.reset_mock() + mock_repo.reset_mock() + self.driver_updater._process_status_update( + mock_repo, 'FakeName', list_deleted_dict, delete_record=True) + mock_repo.delete.assert_called_once_with(self.mock_session, id=2) + mock_repo.update.assert_not_called() + mock_decrement_quota.assert_called_once_with(mock_repo, 'FakeName', 2) + + # Test with LB Delete + mock_decrement_quota.reset_mock() + mock_repo.reset_mock() + self.driver_updater._process_status_update( + mock_repo, lib_consts.LOADBALANCERS, list_deleted_dict) + mock_deallocate.assert_called_once_with(mock_repo, 2) + mock_decrement_quota.assert_called_once_with( + mock_repo, lib_consts.LOADBALANCERS, 2) + + # Test with an exception + mock_repo.reset_mock() + mock_repo.update.side_effect = Exception('boom') + self.assertRaises(driver_exceptions.UpdateStatusError, + self.driver_updater._process_status_update, + mock_repo, 'FakeName', list_dict) + + # Test with no ID record + mock_repo.reset_mock() + self.assertRaises(driver_exceptions.UpdateStatusError, + self.driver_updater._process_status_update, + mock_repo, 'FakeName', {"fake": "data"}) + + @mock.patch('octavia.api.drivers.driver_agent.driver_updater.' + 'DriverUpdater._process_status_update') + def test_update_loadbalancer_status(self, mock_status_update): + mock_status_update.side_effect = [ + mock.DEFAULT, mock.DEFAULT, mock.DEFAULT, mock.DEFAULT, + mock.DEFAULT, mock.DEFAULT, mock.DEFAULT, + driver_exceptions.UpdateStatusError( + fault_string='boom', status_object='fruit', + status_object_id='1', status_record='grape'), + Exception('boom')] + lb_dict = {"id": 1, lib_consts.PROVISIONING_STATUS: lib_consts.ACTIVE, + lib_consts.OPERATING_STATUS: lib_consts.ONLINE} + list_dict = {"id": 2, + lib_consts.PROVISIONING_STATUS: lib_consts.ACTIVE, + lib_consts.OPERATING_STATUS: lib_consts.ONLINE} + pool_dict = {"id": 3, + lib_consts.PROVISIONING_STATUS: lib_consts.ACTIVE, + lib_consts.OPERATING_STATUS: lib_consts.ONLINE} + member_dict = {"id": 4, + lib_consts.PROVISIONING_STATUS: lib_consts.ACTIVE, + lib_consts.OPERATING_STATUS: lib_consts.ONLINE} + hm_dict = {"id": 5, lib_consts.PROVISIONING_STATUS: lib_consts.ACTIVE, + lib_consts.OPERATING_STATUS: lib_consts.ONLINE} + l7p_dict = {"id": 6, lib_consts.PROVISIONING_STATUS: lib_consts.ACTIVE, + lib_consts.OPERATING_STATUS: lib_consts.ONLINE} + l7r_dict = {"id": 7, lib_consts.PROVISIONING_STATUS: lib_consts.ACTIVE, + lib_consts.OPERATING_STATUS: lib_consts.ONLINE} + status_dict = {lib_consts.LOADBALANCERS: [lb_dict], + lib_consts.LISTENERS: [list_dict], + lib_consts.POOLS: [pool_dict], + lib_consts.MEMBERS: [member_dict], + lib_consts.HEALTHMONITORS: [hm_dict], + lib_consts.L7POLICIES: [l7p_dict], + lib_consts.L7RULES: [l7r_dict]} + + result = self.driver_updater.update_loadbalancer_status( + copy.deepcopy(status_dict)) + + calls = [call(self.mock_member_repo, lib_consts.MEMBERS, member_dict, + delete_record=True), + call(self.mock_health_repo, lib_consts.HEALTHMONITORS, + hm_dict, delete_record=True), + call(self.mock_pool_repo, lib_consts.POOLS, pool_dict, + delete_record=True), + call(self.mock_l7r_repo, lib_consts.L7RULES, l7r_dict, + delete_record=True), + call(self.mock_l7p_repo, lib_consts.L7POLICIES, l7p_dict, + delete_record=True), + call(self.mock_list_repo, lib_consts.LISTENERS, list_dict, + delete_record=True), + call(self.mock_lb_repo, lib_consts.LOADBALANCERS, + lb_dict)] + mock_status_update.assert_has_calls(calls) + self.assertEqual(self.ref_ok_response, result) + + # Test empty status updates + mock_status_update.reset_mock() + result = self.driver_updater.update_loadbalancer_status({}) + mock_status_update.assert_not_called() + self.assertEqual(self.ref_ok_response, result) + + # Test UpdateStatusError case + ref_update_status_error = { + lib_consts.FAULT_STRING: 'boom', + lib_consts.STATUS_CODE: lib_consts.DRVR_STATUS_CODE_FAILED, + lib_consts.STATUS_OBJECT: 'fruit', + lib_consts.STATUS_OBJECT_ID: '1'} + result = self.driver_updater.update_loadbalancer_status( + copy.deepcopy(status_dict)) + self.assertEqual(ref_update_status_error, result) + + # Test general exceptions + result = self.driver_updater.update_loadbalancer_status( + copy.deepcopy(status_dict)) + self.assertEqual({ + lib_consts.STATUS_CODE: lib_consts.DRVR_STATUS_CODE_FAILED, + lib_consts.FAULT_STRING: 'boom'}, result) + + @mock.patch('time.time') + @mock.patch('octavia.statistics.stats_base.update_stats_via_driver') + def test_update_listener_statistics(self, mock_stats_base, mock_time): + mock_time.return_value = 12345.6 + listener_stats_li = [ + {"id": 1, + "active_connections": 10, + "bytes_in": 20, + "bytes_out": 30, + "request_errors": 40, + "total_connections": 50}, + {"id": 2, + "active_connections": 60, + "bytes_in": 70, + "bytes_out": 80, + "request_errors": 90, + "total_connections": 100}] + listener_stats_dict = {"listeners": listener_stats_li} + + mock_stats_base.side_effect = [mock.DEFAULT, Exception('boom')] + result = self.driver_updater.update_listener_statistics( + listener_stats_dict) + listener_stats_objects = [ + data_models.ListenerStatistics( + listener_id=listener_stats_li[0]['id'], + active_connections=listener_stats_li[0]['active_connections'], + bytes_in=listener_stats_li[0]['bytes_in'], + bytes_out=listener_stats_li[0]['bytes_out'], + request_errors=listener_stats_li[0]['request_errors'], + total_connections=listener_stats_li[0]['total_connections'], + received_time=mock_time.return_value), + data_models.ListenerStatistics( + listener_id=listener_stats_li[1]['id'], + active_connections=listener_stats_li[1]['active_connections'], + bytes_in=listener_stats_li[1]['bytes_in'], + bytes_out=listener_stats_li[1]['bytes_out'], + request_errors=listener_stats_li[1]['request_errors'], + total_connections=listener_stats_li[1]['total_connections'], + received_time=mock_time.return_value)] + mock_stats_base.assert_called_once_with(listener_stats_objects) + self.assertEqual(self.ref_ok_response, result) + + # Test empty stats updates + mock_stats_base.reset_mock() + result = self.driver_updater.update_listener_statistics({}) + mock_stats_base.assert_not_called() + self.assertEqual(self.ref_ok_response, result) + + # Test missing ID + bad_id_dict = {"listeners": [{"notID": "one"}]} + result = self.driver_updater.update_listener_statistics(bad_id_dict) + ref_update_listener_stats_error = { + lib_consts.STATUS_CODE: lib_consts.DRVR_STATUS_CODE_FAILED, + lib_consts.STATS_OBJECT: lib_consts.LISTENERS, + lib_consts.FAULT_STRING: "'id'"} + self.assertEqual(ref_update_listener_stats_error, result) + + # Test for replace exception + result = self.driver_updater.update_listener_statistics( + listener_stats_dict) + ref_update_listener_stats_error = { + lib_consts.STATUS_CODE: lib_consts.DRVR_STATUS_CODE_FAILED, + lib_consts.STATS_OBJECT: lib_consts.LISTENERS, + lib_consts.FAULT_STRING: 'boom'} + self.assertEqual(ref_update_listener_stats_error, result) diff --git a/octavia/tests/unit/api/drivers/noop_driver/__init__.py b/octavia/tests/unit/api/drivers/noop_driver/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/api/drivers/noop_driver/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/api/drivers/noop_driver/test_agent.py b/octavia/tests/unit/api/drivers/noop_driver/test_agent.py new file mode 100644 index 0000000000..b32a0e7f97 --- /dev/null +++ b/octavia/tests/unit/api/drivers/noop_driver/test_agent.py @@ -0,0 +1,33 @@ +# Copyright 2019 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from octavia.api.drivers.noop_driver import agent +import octavia.tests.unit.base as base + + +class TestNoopProviderAgent(base.TestCase): + + def setUp(self): + super().setUp() + + @mock.patch('time.sleep') + def test_noop_provider_agent(self, mock_sleep): + mock_exit_event = mock.MagicMock() + mock_exit_event.is_set.side_effect = [False, True] + + agent.noop_provider_agent(mock_exit_event) + + mock_sleep.assert_called_once_with(1) diff --git a/octavia/tests/unit/api/drivers/noop_driver/test_driver.py b/octavia/tests/unit/api/drivers/noop_driver/test_driver.py new file mode 100644 index 0000000000..cc123be4df --- /dev/null +++ b/octavia/tests/unit/api/drivers/noop_driver/test_driver.py @@ -0,0 +1,327 @@ +# Copyright 2018 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from octavia_lib.api.drivers import data_models +from oslo_utils import uuidutils + +from octavia.api.drivers.noop_driver import driver +import octavia.tests.unit.base as base + + +class TestNoopProviderDriver(base.TestCase): + + def setUp(self): + super().setUp() + self.driver = driver.NoopProviderDriver() + + self.loadbalancer_id = uuidutils.generate_uuid() + self.vip_address = '192.0.2.10' + self.vip_network_id = uuidutils.generate_uuid() + self.vip_port_id = uuidutils.generate_uuid() + self.vip_subnet_id = uuidutils.generate_uuid() + self.listener_id = uuidutils.generate_uuid() + self.pool_id = uuidutils.generate_uuid() + self.member_id = uuidutils.generate_uuid() + self.member_subnet_id = uuidutils.generate_uuid() + self.healthmonitor_id = uuidutils.generate_uuid() + self.l7policy_id = uuidutils.generate_uuid() + self.l7rule_id = uuidutils.generate_uuid() + self.project_id = uuidutils.generate_uuid() + self.default_tls_container_ref = uuidutils.generate_uuid() + self.sni_container_ref_1 = uuidutils.generate_uuid() + self.sni_container_ref_2 = uuidutils.generate_uuid() + + self.ref_vip = data_models.VIP( + vip_address=self.vip_address, + vip_network_id=self.vip_network_id, + vip_port_id=self.vip_port_id, + vip_subnet_id=self.vip_subnet_id) + + self.ref_member = data_models.Member( + address='198.51.100.4', + admin_state_up=True, + member_id=self.member_id, + monitor_address='203.0.113.2', + monitor_port=66, + name='jacket', + pool_id=self.pool_id, + protocol_port=99, + subnet_id=self.member_subnet_id, + weight=55) + + self.ref_healthmonitor = data_models.HealthMonitor( + admin_state_up=False, + delay=2, + expected_codes="500", + healthmonitor_id=self.healthmonitor_id, + http_method='TRACE', + max_retries=1, + max_retries_down=0, + name='doc', + pool_id=self.pool_id, + timeout=3, + type='PHD', + url_path='/index.html') + + self.ref_pool = data_models.Pool( + admin_state_up=True, + description='Olympic swimming pool', + healthmonitor=self.ref_healthmonitor, + lb_algorithm='A_Fast_One', + loadbalancer_id=self.loadbalancer_id, + listener_id=self.listener_id, + members=[self.ref_member], + name='Osborn', + pool_id=self.pool_id, + protocol='avian', + session_persistence={'type': 'glue'}) + + self.ref_l7rule = data_models.L7Rule( + admin_state_up=True, + compare_type='store_brand', + invert=True, + key='board', + l7policy_id=self.l7policy_id, + l7rule_id=self.l7rule_id, + type='strict', + value='gold') + + self.ref_l7policy = data_models.L7Policy( + action='http://github.com/packed', + admin_state_up=False, + description='Corporate policy', + l7policy_id=self.l7policy_id, + listener_id=self.listener_id, + name='more_policy', + position=1, + redirect_pool_id=self.pool_id, + redirect_url='/hr', + rules=[self.ref_l7rule]) + + self.ref_listener = data_models.Listener( + admin_state_up=False, + connection_limit=5, + default_pool=self.ref_pool, + default_pool_id=self.pool_id, + default_tls_container_data='default_cert_data', + default_tls_container_ref=self.default_tls_container_ref, + description='The listener', + insert_headers={'X-Forwarded-For': 'true'}, + l7policies=[self.ref_l7policy], + listener_id=self.listener_id, + loadbalancer_id=self.loadbalancer_id, + name='super_listener', + protocol='avian', + protocol_port=42, + sni_container_data=['sni_cert_data_1', 'sni_cert_data_2'], + sni_container_refs=[self.sni_container_ref_1, + self.sni_container_ref_2]) + + self.ref_lb = data_models.LoadBalancer( + admin_state_up=False, + description='One great load balancer', + flavor={'cake': 'chocolate'}, + listeners=[self.ref_listener], + loadbalancer_id=self.loadbalancer_id, + name='favorite_lb', + project_id=self.project_id, + vip_address=self.vip_address, + vip_network_id=self.vip_network_id, + vip_port_id=self.vip_port_id, + vip_subnet_id=self.vip_subnet_id) + + self.ref_flavor_metadata = {"amp_image_tag": "The glance image tag " + "to use for this load balancer."} + self.ref_availability_zone_metadata = { + "compute_zone": "The compute availability zone to use for this " + "loadbalancer.", + "volume_zone": "The volume availability zone to use for this " + "loadbalancer."} + + def test_create_vip_port(self): + vip_dict, additional_vip_dicts = self.driver.create_vip_port( + self.loadbalancer_id, + self.project_id, + self.ref_vip.to_dict(), + None) + + self.assertEqual(self.ref_vip.to_dict(), vip_dict) + + def test_loadbalancer_create(self): + self.driver.loadbalancer_create(self.ref_lb) + + self.assertEqual((self.ref_lb, 'loadbalancer_create'), + self.driver.driver.driverconfig[self.loadbalancer_id]) + + def test_loadbalancer_delete(self): + self.driver.loadbalancer_delete(self.ref_lb, cascade=True) + + self.assertEqual((self.loadbalancer_id, True, 'loadbalancer_delete'), + self.driver.driver.driverconfig[self.loadbalancer_id]) + + def test_loadbalancer_failover(self): + self.driver.loadbalancer_failover(self.loadbalancer_id) + + self.assertEqual((self.loadbalancer_id, 'loadbalancer_failover'), + self.driver.driver.driverconfig[self.loadbalancer_id]) + + def test_loadbalancer_update(self): + self.driver.loadbalancer_update(self.ref_lb, self.ref_lb) + + self.assertEqual((self.ref_lb, 'loadbalancer_update'), + self.driver.driver.driverconfig[self.loadbalancer_id]) + + def test_listener_create(self): + self.driver.listener_create(self.ref_listener) + + self.assertEqual((self.ref_listener, 'listener_create'), + self.driver.driver.driverconfig[self.listener_id]) + + def test_listener_delete(self): + self.driver.listener_delete(self.ref_listener) + + self.assertEqual((self.listener_id, 'listener_delete'), + self.driver.driver.driverconfig[self.listener_id]) + + def test_listener_update(self): + self.driver.listener_update(self.ref_listener, self.ref_listener) + + self.assertEqual((self.ref_listener, 'listener_update'), + self.driver.driver.driverconfig[self.listener_id]) + + def test_pool_create(self): + self.driver.pool_create(self.ref_pool) + + self.assertEqual((self.ref_pool, 'pool_create'), + self.driver.driver.driverconfig[self.pool_id]) + + def test_pool_delete(self): + self.driver.pool_delete(self.ref_pool) + + self.assertEqual((self.pool_id, 'pool_delete'), + self.driver.driver.driverconfig[self.pool_id]) + + def test_pool_update(self): + self.driver.pool_update(self.ref_pool, self.ref_pool) + + self.assertEqual((self.ref_pool, 'pool_update'), + self.driver.driver.driverconfig[self.pool_id]) + + def test_member_create(self): + self.driver.member_create(self.ref_member) + + self.assertEqual((self.ref_member, 'member_create'), + self.driver.driver.driverconfig[self.member_id]) + + def test_member_delete(self): + self.driver.member_delete(self.ref_member) + + self.assertEqual((self.member_id, 'member_delete'), + self.driver.driver.driverconfig[self.member_id]) + + def test_member_update(self): + self.driver.member_update(self.ref_member, self.ref_member) + + self.assertEqual((self.ref_member, 'member_update'), + self.driver.driver.driverconfig[self.member_id]) + + def test_member_batch_update(self): + self.driver.member_batch_update(self.pool_id, [self.ref_member]) + + self.assertEqual((self.ref_member, 'member_batch_update'), + self.driver.driver.driverconfig[self.member_id]) + + def test_health_monitor_create(self): + self.driver.health_monitor_create(self.ref_healthmonitor) + + self.assertEqual( + (self.ref_healthmonitor, 'health_monitor_create'), + self.driver.driver.driverconfig[self.healthmonitor_id]) + + def test_health_monitor_delete(self): + self.driver.health_monitor_delete(self.ref_healthmonitor) + + self.assertEqual( + (self.healthmonitor_id, 'health_monitor_delete'), + self.driver.driver.driverconfig[self.healthmonitor_id]) + + def test_health_monitor_update(self): + self.driver.health_monitor_update(self.ref_healthmonitor, + self.ref_healthmonitor) + + self.assertEqual( + (self.ref_healthmonitor, 'health_monitor_update'), + self.driver.driver.driverconfig[self.healthmonitor_id]) + + def test_l7policy_create(self): + self.driver.l7policy_create(self.ref_l7policy) + + self.assertEqual((self.ref_l7policy, 'l7policy_create'), + self.driver.driver.driverconfig[self.l7policy_id]) + + def test_l7policy_delete(self): + self.driver.l7policy_delete(self.ref_l7policy) + + self.assertEqual((self.l7policy_id, 'l7policy_delete'), + self.driver.driver.driverconfig[self.l7policy_id]) + + def test_l7policy_update(self): + self.driver.l7policy_update(self.ref_l7policy, self.ref_l7policy) + + self.assertEqual((self.ref_l7policy, 'l7policy_update'), + self.driver.driver.driverconfig[self.l7policy_id]) + + def test_l7rule_create(self): + self.driver.l7rule_create(self.ref_l7rule) + + self.assertEqual((self.ref_l7rule, 'l7rule_create'), + self.driver.driver.driverconfig[self.l7rule_id]) + + def test_l7rule_delete(self): + self.driver.l7rule_delete(self.ref_l7rule) + + self.assertEqual((self.l7rule_id, 'l7rule_delete'), + self.driver.driver.driverconfig[self.l7rule_id]) + + def test_l7rule_update(self): + self.driver.l7rule_update(self.ref_l7rule, self.ref_l7rule) + + self.assertEqual((self.ref_l7rule, 'l7rule_update'), + self.driver.driver.driverconfig[self.l7rule_id]) + + def test_get_supported_flavor_metadata(self): + metadata = self.driver.get_supported_flavor_metadata() + + self.assertEqual(self.ref_flavor_metadata, metadata) + + def test_validate_flavor(self): + self.driver.validate_flavor(self.ref_flavor_metadata) + + flavor_hash = hash(frozenset(self.ref_flavor_metadata)) + self.assertEqual((self.ref_flavor_metadata, 'validate_flavor'), + self.driver.driver.driverconfig[flavor_hash]) + + def test_get_supported_availability_zone_metadata(self): + metadata = self.driver.get_supported_availability_zone_metadata() + + self.assertEqual(self.ref_availability_zone_metadata, metadata) + + def test_validate_availability_zone(self): + self.driver.validate_availability_zone( + self.ref_availability_zone_metadata) + + az_hash = hash(frozenset(self.ref_availability_zone_metadata)) + self.assertEqual((self.ref_availability_zone_metadata, + 'validate_availability_zone'), + self.driver.driver.driverconfig[az_hash]) diff --git a/octavia/tests/unit/api/drivers/test_driver_factory.py b/octavia/tests/unit/api/drivers/test_driver_factory.py new file mode 100644 index 0000000000..f30f309929 --- /dev/null +++ b/octavia/tests/unit/api/drivers/test_driver_factory.py @@ -0,0 +1,48 @@ +# Copyright 2018 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from octavia.api.drivers import driver_factory +from octavia.common import exceptions +import octavia.tests.unit.base as base + + +class TestDriverFactory(base.TestCase): + + def setUp(self): + super().setUp() + + @mock.patch('stevedore.driver.DriverManager') + def test_driver_factory_no_provider(self, mock_drivermgr): + mock_mgr = mock.MagicMock() + mock_drivermgr.return_value = mock_mgr + + driver = driver_factory.get_driver(None) + + self.assertEqual(mock_mgr.driver, driver) + + @mock.patch('stevedore.driver.DriverManager') + def test_driver_factory_failed_to_load_driver(self, mock_drivermgr): + mock_drivermgr.side_effect = Exception('boom') + + self.assertRaises(exceptions.ProviderNotFound, + driver_factory.get_driver, None) + + @mock.patch('stevedore.driver.DriverManager') + def test_driver_factory_not_enabled(self, mock_drivermgr): + + self.assertRaises(exceptions.ProviderNotEnabled, + driver_factory.get_driver, + 'dont-enable-this-fake-driver-name') diff --git a/octavia/tests/unit/api/drivers/test_utils.py b/octavia/tests/unit/api/drivers/test_utils.py new file mode 100644 index 0000000000..3917da44a5 --- /dev/null +++ b/octavia/tests/unit/api/drivers/test_utils.py @@ -0,0 +1,504 @@ +# Copyright 2018 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import copy +from unittest import mock + +from octavia_lib.api.drivers import data_models as driver_dm +from octavia_lib.api.drivers import exceptions as lib_exceptions +from oslo_utils import uuidutils + +from octavia.api.drivers import utils +from octavia.common import constants +from octavia.common import data_models +from octavia.common import exceptions +from octavia.tests.common import sample_data_models +from octavia.tests.unit import base + + +class TestUtils(base.TestCase): + def setUp(self): + super().setUp() + self.sample_data = sample_data_models.SampleDriverDataModels() + + def test_call_provider(self): + mock_driver_method = mock.MagicMock() + + # Test happy path + utils.call_provider("provider_name", mock_driver_method, + "arg1", foo="arg2") + mock_driver_method.assert_called_with("arg1", foo="arg2") + + # Test driver raising VIPAddressConflict + mock_driver_method.side_effect = lib_exceptions.Conflict + self.assertRaises(exceptions.VIPAddressConflict, + utils.call_provider, "provider_name", + mock_driver_method) + + # Test driver raising DriverError + mock_driver_method.side_effect = lib_exceptions.DriverError + self.assertRaises(exceptions.ProviderDriverError, + utils.call_provider, "provider_name", + mock_driver_method) + + # Test driver raising different types of NotImplementedError + mock_driver_method.side_effect = NotImplementedError + self.assertRaises(exceptions.ProviderNotImplementedError, + utils.call_provider, "provider_name", + mock_driver_method) + mock_driver_method.side_effect = lib_exceptions.NotImplementedError + self.assertRaises(exceptions.ProviderNotImplementedError, + utils.call_provider, "provider_name", + mock_driver_method) + + # Test driver raising UnsupportedOptionError + mock_driver_method.side_effect = ( + lib_exceptions.UnsupportedOptionError) + self.assertRaises(exceptions.ProviderUnsupportedOptionError, + utils.call_provider, "provider_name", + mock_driver_method) + + # Test driver raising ProviderDriverError + mock_driver_method.side_effect = Exception + self.assertRaises(exceptions.ProviderDriverError, + utils.call_provider, "provider_name", + mock_driver_method) + + def test_base_to_provider_dict(self): + + test_dict = {'provisioning_status': constants.ACTIVE, + 'operating_status': constants.ONLINE, + 'provider': 'octavia', + 'created_at': 'now', + 'updated_at': 'then', + 'enabled': True, + 'project_id': 1} + + result_dict = utils._base_to_provider_dict(test_dict, + include_project_id=True) + self.assertEqual({'admin_state_up': True, 'project_id': 1}, + result_dict) + + result_dict = utils._base_to_provider_dict(test_dict, + include_project_id=False) + self.assertEqual({'admin_state_up': True}, + result_dict) + + @mock.patch('octavia.db.repositories.FlavorRepository.' + 'get_flavor_metadata_dict') + @mock.patch('octavia.db.api.get_session') + @mock.patch('octavia.api.drivers.utils._get_secret_data') + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_lb_dict_to_provider_dict(self, mock_load_cert, mock_secret, + mock_get_session, mock_get_flavor): + cert1 = data_models.TLSContainer(certificate='cert 1') + cert2 = data_models.TLSContainer(certificate='cert 2') + cert3 = data_models.TLSContainer(certificate='cert 3') + mock_secret.side_effect = ['X509 POOL CA CERT FILE', + 'X509 POOL CRL FILE', 'ca cert', + 'X509 CRL FILE', 'ca cert', 'X509 CRL FILE', + 'X509 POOL CA CERT FILE', + 'X509 CRL FILE'] + listener_certs = {'tls_cert': cert1, 'sni_certs': [cert2, cert3]} + pool_cert = data_models.TLSContainer(certificate='pool cert') + pool_certs = {'tls_cert': pool_cert, 'sni_certs': []} + mock_load_cert.side_effect = [pool_certs, listener_certs, + listener_certs, listener_certs, + listener_certs] + mock_get_flavor.return_value = {'shaved_ice': 'cherry'} + sg_id = uuidutils.generate_uuid() + test_lb_dict = {'name': 'lb1', + 'project_id': self.sample_data.project_id, + 'vip_subnet_id': self.sample_data.subnet_id, + 'vip_port_id': self.sample_data.port_id, + 'vip_address': self.sample_data.ip_address, + 'vip_network_id': self.sample_data.network_id, + 'vip_qos_policy_id': self.sample_data.qos_policy_id, + 'vip_sg_ids': [sg_id], + 'id': self.sample_data.lb_id, + 'listeners': [], + 'pools': [], + 'description': '', 'admin_state_up': True, + 'provisioning_status': constants.PENDING_CREATE, + 'operating_status': constants.OFFLINE, + 'flavor_id': 'flavor_id', + 'provider': 'noop_driver'} + ref_listeners = copy.deepcopy(self.sample_data.provider_listeners) + ref_prov_lb_dict = { + 'vip_address': self.sample_data.ip_address, + 'admin_state_up': True, + 'loadbalancer_id': self.sample_data.lb_id, + 'vip_subnet_id': self.sample_data.subnet_id, + 'listeners': ref_listeners, + 'description': '', + 'project_id': self.sample_data.project_id, + 'vip_port_id': self.sample_data.port_id, + 'vip_qos_policy_id': self.sample_data.qos_policy_id, + 'vip_network_id': self.sample_data.network_id, + 'vip_sg_ids': [sg_id], + 'pools': self.sample_data.provider_pools, + 'flavor': {'shaved_ice': 'cherry'}, + 'name': 'lb1'} + vip = data_models.Vip(ip_address=self.sample_data.ip_address, + network_id=self.sample_data.network_id, + port_id=self.sample_data.port_id, + subnet_id=self.sample_data.subnet_id, + qos_policy_id=self.sample_data.qos_policy_id, + sg_ids=[sg_id]) + + provider_lb_dict = utils.lb_dict_to_provider_dict( + test_lb_dict, vip=vip, db_pools=self.sample_data.test_db_pools, + db_listeners=self.sample_data.test_db_listeners) + + self.assertEqual(ref_prov_lb_dict, provider_lb_dict) + + @mock.patch('octavia.db.repositories.FlavorRepository.' + 'get_flavor_metadata_dict') + @mock.patch('octavia.db.api.get_session') + def test_db_loadbalancer_to_provider_loadbalancer(self, mock_get_session, + mock_get_flavor): + mock_get_flavor.return_value = {'shaved_ice': 'cherry'} + vip = data_models.Vip(ip_address=self.sample_data.ip_address, + network_id=self.sample_data.network_id, + port_id=self.sample_data.port_id, + subnet_id=self.sample_data.subnet_id) + test_db_lb = data_models.LoadBalancer(id=1, flavor_id='2', vip=vip) + provider_lb = utils.db_loadbalancer_to_provider_loadbalancer( + test_db_lb) + ref_provider_lb = driver_dm.LoadBalancer( + loadbalancer_id=1, + flavor={'shaved_ice': 'cherry'}, + vip_address=self.sample_data.ip_address, + vip_network_id=self.sample_data.network_id, + vip_port_id=self.sample_data.port_id, + vip_subnet_id=self.sample_data.subnet_id) + self.assertEqual(ref_provider_lb.to_dict(render_unsets=True), + provider_lb.to_dict(render_unsets=True)) + + def test_db_listener_to_provider_listener(self): + test_db_list = data_models.Listener(id=1) + provider_list = utils.db_listener_to_provider_listener(test_db_list) + ref_provider_list = driver_dm.Listener(listener_id=1, + insert_headers={}) + self.assertEqual(ref_provider_list.to_dict(render_unsets=True), + provider_list.to_dict(render_unsets=True)) + + @mock.patch('octavia.api.drivers.utils._get_secret_data') + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_db_listeners_to_provider_listeners(self, mock_load_cert, + mock_secret): + mock_secret.side_effect = ['ca cert', 'X509 CRL FILE', + 'ca cert', 'X509 CRL FILE', + 'ca cert', 'X509 CRL FILE'] + cert1 = data_models.TLSContainer(certificate='cert 1') + cert2 = data_models.TLSContainer(certificate='cert 2') + cert3 = data_models.TLSContainer(certificate='cert 3') + mock_load_cert.return_value = {'tls_cert': cert1, + 'sni_certs': [cert2, cert3]} + provider_listeners = utils.db_listeners_to_provider_listeners( + self.sample_data.test_db_listeners) + ref_listeners = copy.deepcopy(self.sample_data.provider_listeners) + self.assertEqual(ref_listeners, provider_listeners) + + @mock.patch('oslo_context.context.RequestContext', return_value=None) + def test_get_secret_data_errors(self, mock_context): + mock_cert_mngr = mock.MagicMock() + + mock_cert_mngr.get_secret.side_effect = [Exception, Exception] + + # Test for_delete == False path + self.assertRaises(exceptions.CertificateRetrievalException, + utils._get_secret_data, mock_cert_mngr, + 'fake_project_id', 1) + + # Test for_delete == True path + self.assertIsNone( + utils._get_secret_data(mock_cert_mngr, 'fake_project_id', + 2, for_delete=True)) + + @mock.patch('octavia.api.drivers.utils._get_secret_data') + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_listener_dict_to_provider_dict(self, mock_load_cert, mock_secret): + mock_secret.side_effect = ['ca cert', 'X509 CRL FILE', + 'X509 POOL CA CERT FILE', + 'X509 POOL CRL FILE'] + cert1 = data_models.TLSContainer(certificate='cert 1') + cert2 = data_models.TLSContainer(certificate='cert 2') + cert3 = data_models.TLSContainer(certificate='cert 3') + listener_certs = {'tls_cert': cert1, 'sni_certs': [cert2, cert3]} + pool_cert = data_models.TLSContainer(certificate='pool cert') + pool_certs = {'tls_cert': pool_cert, 'sni_certs': []} + mock_load_cert.side_effect = [listener_certs, pool_certs] + # The reason to do this, as before the logic arrives the test func, + # there are two data sources, one is from db_dict, the other is from + # the api layer model_dict, actually, they are different and contain + # different fields. That's why the test_listener1_dict from sample data + # just contain the client_ca_tls_certificate_id for client certificate, + # not any other related fields. So we need to delete them. + expect_prov = copy.deepcopy(self.sample_data.provider_listener1_dict) + expect_pool_prov = copy.deepcopy(self.sample_data.provider_pool1_dict) + expect_prov['default_pool'] = expect_pool_prov + provider_listener = utils.listener_dict_to_provider_dict( + self.sample_data.test_listener1_dict) + self.assertEqual(expect_prov, provider_listener) + + @mock.patch('octavia.api.drivers.utils._get_secret_data') + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_listener_dict_to_provider_dict_load_cert_error( + self, mock_load_cert, mock_secret): + mock_secret.side_effect = ['ca cert', 'X509 CRL FILE', + 'X509 POOL CA CERT FILE', + 'X509 POOL CRL FILE'] + mock_load_cert.side_effect = [exceptions.OctaviaException, + Exception] + + # Test load_cert exception for_delete == False path + self.assertRaises(exceptions.OctaviaException, + utils.listener_dict_to_provider_dict, + self.sample_data.test_listener1_dict) + + @mock.patch('octavia.api.drivers.utils._get_secret_data') + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_listener_dict_to_provider_dict_load_cert_error_for_delete( + self, mock_load_cert, mock_secret): + mock_secret.side_effect = ['ca cert', 'X509 CRL FILE', + 'X509 POOL CA CERT FILE', + 'X509 POOL CRL FILE'] + mock_load_cert.side_effect = [Exception] + + # Test load_cert exception for_delete == True path + expect_prov = copy.deepcopy(self.sample_data.provider_listener1_dict) + expect_pool_prov = copy.deepcopy(self.sample_data.provider_pool1_dict) + del expect_pool_prov['tls_container_data'] + expect_prov['default_pool'] = expect_pool_prov + del expect_prov['default_tls_container_data'] + del expect_prov['sni_container_data'] + provider_listener = utils.listener_dict_to_provider_dict( + self.sample_data.test_listener1_dict, for_delete=True) + args, kwargs = mock_secret.call_args + self.assertEqual(kwargs['for_delete'], True) + self.assertEqual(expect_prov, provider_listener) + + @mock.patch('octavia.api.drivers.utils._get_secret_data') + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_listener_dict_to_provider_dict_SNI(self, mock_load_cert, + mock_secret): + mock_secret.return_value = 'ca cert' + cert1 = data_models.TLSContainer(certificate='cert 1') + cert2 = data_models.TLSContainer(certificate='cert 2') + cert3 = data_models.TLSContainer(certificate='cert 3') + mock_load_cert.return_value = {'tls_cert': cert1, + 'sni_certs': [cert2, cert3]} + # Test with bad SNI content + test_listener = copy.deepcopy(self.sample_data.test_listener1_dict) + test_listener['sni_containers'] = [()] + self.assertRaises(exceptions.ValidationException, + utils.listener_dict_to_provider_dict, + test_listener) + + @mock.patch('octavia.api.drivers.utils._get_secret_data') + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_db_pool_to_provider_pool(self, mock_load_cert, mock_secret): + pool_cert = data_models.TLSContainer(certificate='pool cert') + mock_load_cert.return_value = {'tls_cert': pool_cert, + 'sni_certs': None, + 'client_ca_cert': None} + mock_secret.side_effect = ['X509 POOL CA CERT FILE', + 'X509 POOL CRL FILE'] + provider_pool = utils.db_pool_to_provider_pool( + self.sample_data.db_pool1) + self.assertEqual(self.sample_data.provider_pool1, provider_pool) + + @mock.patch('octavia.api.drivers.utils._get_secret_data') + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_db_pool_to_provider_pool_partial(self, mock_load_cert, + mock_secret): + pool_cert = data_models.TLSContainer(certificate='pool cert') + mock_load_cert.return_value = {'tls_cert': pool_cert, + 'sni_certs': None, + 'client_ca_cert': None} + mock_secret.side_effect = ['X509 POOL CA CERT FILE', + 'X509 POOL CRL FILE'] + test_db_pool = self.sample_data.db_pool1 + test_db_pool.members = [self.sample_data.db_member1] + provider_pool = utils.db_pool_to_provider_pool(test_db_pool) + self.assertEqual(self.sample_data.provider_pool1, provider_pool) + + @mock.patch('octavia.api.drivers.utils._get_secret_data') + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_db_pools_to_provider_pools(self, mock_load_cert, mock_secret): + pool_cert = data_models.TLSContainer(certificate='pool cert') + mock_load_cert.return_value = {'tls_cert': pool_cert, + 'sni_certs': None, + 'client_ca_cert': None} + mock_secret.side_effect = ['X509 POOL CA CERT FILE', + 'X509 POOL CRL FILE'] + provider_pools = utils.db_pools_to_provider_pools( + self.sample_data.test_db_pools) + self.assertEqual(self.sample_data.provider_pools, provider_pools) + + @mock.patch('octavia.api.drivers.utils._get_secret_data') + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_pool_dict_to_provider_dict(self, mock_load_cert, mock_secret): + pool_cert = data_models.TLSContainer(certificate='pool cert') + mock_load_cert.return_value = {'tls_cert': pool_cert, + 'sni_certs': None, + 'client_ca_cert': None} + mock_secret.side_effect = ['X509 POOL CA CERT FILE', + 'X509 POOL CRL FILE'] + expect_prov = copy.deepcopy(self.sample_data.provider_pool1_dict) + expect_prov.pop('crl_container_ref') + provider_pool_dict = utils.pool_dict_to_provider_dict( + self.sample_data.test_pool1_dict) + provider_pool_dict.pop('crl_container_ref') + self.assertEqual(expect_prov, provider_pool_dict) + + @mock.patch('octavia.api.drivers.utils._get_secret_data') + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_pool_dict_to_provider_dict_load_cert_error( + self, mock_load_cert, mock_secret): + + mock_load_cert.side_effect = [exceptions.OctaviaException, + Exception] + + # Test load_cert exception for_delete == False path + self.assertRaises(exceptions.OctaviaException, + utils.pool_dict_to_provider_dict, + self.sample_data.test_pool1_dict) + + @mock.patch('octavia.api.drivers.utils._get_secret_data') + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_pool_dict_to_provider_dict_load_cert_error_for_delete( + self, mock_load_cert, mock_secret): + + mock_load_cert.side_effect = [Exception] + + # Test load_cert exception for_delete == True path + mock_secret.side_effect = ['X509 POOL CA CERT FILE', + 'X509 POOL CRL FILE'] + expect_prov = copy.deepcopy(self.sample_data.provider_pool1_dict) + expect_prov.pop('crl_container_ref') + del expect_prov['tls_container_data'] + provider_pool_dict = utils.pool_dict_to_provider_dict( + self.sample_data.test_pool1_dict, for_delete=True) + provider_pool_dict.pop('crl_container_ref') + args, kwargs = mock_secret.call_args + self.assertEqual(kwargs['for_delete'], True) + self.assertEqual(expect_prov, provider_pool_dict) + + def test_db_HM_to_provider_HM(self): + provider_hm = utils.db_HM_to_provider_HM(self.sample_data.db_hm1) + self.assertEqual(self.sample_data.provider_hm1, provider_hm) + + def test_hm_dict_to_provider_dict(self): + provider_hm_dict = utils.hm_dict_to_provider_dict( + self.sample_data.test_hm1_dict) + self.assertEqual(self.sample_data.provider_hm1_dict, provider_hm_dict) + + def test_HM_to_provider_HM_with_http_version_and_domain_name(self): + provider_hm = utils.db_HM_to_provider_HM(self.sample_data.db_hm2) + self.assertEqual(self.sample_data.provider_hm2, provider_hm) + + provider_hm_dict = utils.hm_dict_to_provider_dict( + self.sample_data.test_hm2_dict) + self.assertEqual(self.sample_data.provider_hm2_dict, provider_hm_dict) + + def test_hm_dict_to_provider_dict_partial(self): + provider_hm_dict = utils.hm_dict_to_provider_dict({'id': 1}) + self.assertEqual({'healthmonitor_id': 1}, provider_hm_dict) + + def test_db_members_to_provider_members(self): + provider_members = utils.db_members_to_provider_members( + self.sample_data.db_pool1_members) + self.assertEqual(self.sample_data.provider_pool1_members, + provider_members) + + def test_member_dict_to_provider_dict(self): + provider_member_dict = utils.member_dict_to_provider_dict( + self.sample_data.test_member1_dict) + self.assertEqual(self.sample_data.provider_member1_dict, + provider_member_dict) + + def test_db_l7policies_to_provider_l7policies(self): + provider_rules = utils.db_l7policies_to_provider_l7policies( + self.sample_data.db_l7policies) + self.assertEqual(self.sample_data.provider_l7policies, provider_rules) + + def test_l7policy_dict_to_provider_dict(self): + provider_l7policy_dict = utils.l7policy_dict_to_provider_dict( + self.sample_data.test_l7policy1_dict) + self.assertEqual(self.sample_data.provider_l7policy1_dict, + provider_l7policy_dict) + + def test_db_l7rules_to_provider_l7rules(self): + provider_rules = utils.db_l7rules_to_provider_l7rules( + self.sample_data.db_l7Rules) + self.assertEqual(self.sample_data.provider_rules, provider_rules) + + def test_l7rule_dict_to_provider_dict(self): + provider_rules_dict = utils.l7rule_dict_to_provider_dict( + self.sample_data.test_l7rule1_dict) + self.assertEqual(self.sample_data.provider_l7rule1_dict, + provider_rules_dict) + + def test_vip_dict_to_provider_dict(self): + new_vip_dict = utils.vip_dict_to_provider_dict( + self.sample_data.test_vip_dict) + self.assertEqual(self.sample_data.provider_vip_dict, new_vip_dict) + + def test_vip_dict_to_provider_dict_partial(self): + new_vip_dict = utils.vip_dict_to_provider_dict( + {'ip_address': '192.0.2.44'}) + self.assertEqual({'vip_address': '192.0.2.44'}, new_vip_dict) + + def test_additional_vip_dict_to_provider_dict(self): + new_additional_vip_dict = ( + utils.additional_vip_dict_to_provider_dict( + self.sample_data.test_additional_vip_dict)) + self.assertEqual(self.sample_data.provider_additional_vip_dict, + new_additional_vip_dict) + + vip_dict = { + constants.IP_ADDRESS: mock.Mock()} + new_additional_vip_dict = ( + utils.additional_vip_dict_to_provider_dict(vip_dict)) + expected_dict = vip_dict + self.assertEqual(expected_dict, new_additional_vip_dict) + + def test_provider_vip_dict_to_vip_obj(self): + new_provider_vip = utils.provider_vip_dict_to_vip_obj( + self.sample_data.provider_vip_dict) + self.assertEqual(self.sample_data.db_vip, new_provider_vip) + + def test_provider_additional_vip_to_vip_obj(self): + new_additional_vip = ( + utils.provider_additional_vip_dict_to_additional_vip_obj( + self.sample_data.provider_additional_vip_dict)) + self.assertEqual(self.sample_data.db_additional_vip, + new_additional_vip) + + vip_dict = {constants.IP_ADDRESS: mock.Mock()} + new_additional_vip = ( + utils.provider_additional_vip_dict_to_additional_vip_obj( + vip_dict)) + expected_obj = data_models.AdditionalVip( + ip_address=vip_dict[constants.IP_ADDRESS]) + self.assertEqual(expected_obj, new_additional_vip) + + vip_dict = {constants.SUBNET_ID: mock.Mock()} + new_additional_vip = ( + utils.provider_additional_vip_dict_to_additional_vip_obj( + vip_dict)) + expected_obj = data_models.AdditionalVip( + subnet_id=vip_dict[constants.SUBNET_ID]) + self.assertEqual(expected_obj, new_additional_vip) diff --git a/octavia/tests/unit/api/test_config.py b/octavia/tests/unit/api/test_config.py new file mode 100644 index 0000000000..4258c5d32b --- /dev/null +++ b/octavia/tests/unit/api/test_config.py @@ -0,0 +1,35 @@ +# Copyright 2022 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from octavia.api.common import hooks +from octavia.api.config import app +from octavia.api.config import wsme +from octavia.tests.unit import base + + +class TestConfig(base.TestCase): + + def test_app_config(self): + self.assertEqual( + 'octavia.api.root_controller.RootController', app['root']) + self.assertEqual(['octavia.api'], app['modules']) + expected_hook_types = [ + hooks.ContentTypeHook, + hooks.ContextHook, + hooks.QueryParametersHook + ] + self.assertEqual(expected_hook_types, list(map(type, app['hooks']))) + self.assertFalse(app['debug']) + + def test_wsme_config(self): + self.assertFalse(wsme['debug']) diff --git a/octavia/tests/unit/api/v2/__init__.py b/octavia/tests/unit/api/v2/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/api/v2/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/api/v2/types/__init__.py b/octavia/tests/unit/api/v2/types/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/api/v2/types/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/api/v2/types/base.py b/octavia/tests/unit/api/v2/types/base.py new file mode 100644 index 0000000000..d077ede4e5 --- /dev/null +++ b/octavia/tests/unit/api/v2/types/base.py @@ -0,0 +1,225 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_utils import uuidutils +from wsme import exc +from wsme.rest import json as wsme_json + +from octavia.api.common import types as base_type +from octavia.common import constants +from octavia.tests.unit import base + + +def build_body(mandatory_fields, extra_attributes): + body = {} + for key in mandatory_fields: + body[key] = mandatory_fields[key] + for key in extra_attributes: + body[key] = extra_attributes[key] + return body + + +class BaseTypesTest(base.TestCase): + _type = base_type.BaseType + _mandatory_fields = {} + + +class BaseTestUuid(base.TestCase): + + def assert_uuid_attr(self, attr): + kwargs = {attr: uuidutils.generate_uuid()} + self._type(**kwargs) + + def assert_uuid_attr_fail_with_integer(self, attr): + kwargs = {attr: 1} + self.assertRaises(exc.InvalidInput, self._type, **kwargs) + + def assert_uuid_attr_fail_with_short_str(self, attr): + kwargs = {attr: '12345'} + self.assertRaises(exc.InvalidInput, self._type, **kwargs) + + def assert_uuid_attr_fail_with_shorter_than_uuid(self, attr): + kwargs = {attr: uuidutils.generate_uuid()[1:]} + self.assertRaises(exc.InvalidInput, self._type, **kwargs) + + def assert_uuid_attr_fail_with_longer_than_uuid(self, attr): + kwargs = {attr: uuidutils.generate_uuid() + "0"} + self.assertRaises(exc.InvalidInput, self._type, **kwargs) + + +class BaseTestString(base.TestCase): + + def _default_min_max_lengths(self, min_length=None, max_length=None): + if max_length is None: + if min_length is None: + max_length = 255 + min_length = 2 + else: + max_length = min_length + 1 + else: + if min_length is None: + min_length = max_length - 1 + return min_length, max_length + + def assert_string_attr(self, attr, min_length=None, max_length=None): + min_length, max_length = self._default_min_max_lengths(min_length, + max_length) + string_val = 'a' * (max_length - 1) + kwargs = {attr: string_val} + self._type(**kwargs) + + def assert_string_attr_min_length(self, attr, min_length): + min_length, max_length = self._default_min_max_lengths(min_length) + string_val = 'a' * (min_length - 1) + kwargs = {attr: string_val} + # No point in testing if min_length is <= 0 + if min_length > 0: + self.assertRaises(exc.InvalidInput, self._type, **kwargs) + + def assert_string_attr_max_length(self, attr, max_length=None): + min_length, max_length = self._default_min_max_lengths(max_length) + string_val = 'a' * (max_length + 1) + kwargs = {attr: string_val} + self.assertRaises(exc.InvalidInput, self._type, **kwargs) + + +class BaseTestBool(base.TestCase): + + def assert_bool_attr(self, attr): + kwargs = {attr: True} + self.assertIsNotNone(self._type(**kwargs)) + kwargs = {attr: False} + self.assertIsNotNone(self._type(**kwargs)) + + def assert_bool_attr_non_bool(self, attr): + kwargs = {attr: 'test'} + self.assertRaises(exc.InvalidInput, self._type, **kwargs) + + +class TestIdMixin(BaseTestUuid): + id_attr = 'id' + + def test_id(self): + self.assert_uuid_attr(self.id_attr) + self.assert_uuid_attr_fail_with_integer(self.id_attr) + self.assert_uuid_attr_fail_with_short_str(self.id_attr) + self.assert_uuid_attr_fail_with_shorter_than_uuid(self.id_attr) + self.assert_uuid_attr_fail_with_longer_than_uuid(self.id_attr) + + def test_id_readonly(self): + body = build_body(self._mandatory_fields, + {self.id_attr: uuidutils.generate_uuid()}) + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, + self._type, body) + + +class TestProjectIdMixin(BaseTestUuid): + project_id_attr = 'project_id' + + def test_project_id(self): + self.assert_uuid_attr(self.project_id_attr) + self.assert_uuid_attr_fail_with_integer(self.project_id_attr) + self.assert_uuid_attr_fail_with_short_str(self.project_id_attr) + self.assert_uuid_attr_fail_with_shorter_than_uuid(self.project_id_attr) + self.assert_uuid_attr_fail_with_longer_than_uuid(self.project_id_attr) + + def test_project_id_readonly(self): + body = build_body(self._mandatory_fields, + {self.project_id_attr: uuidutils.generate_uuid()}) + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, + self._type, body) + + +class TestNameMixin(BaseTestString): + name_attr = 'name' + + def test_name(self): + self.assert_string_attr(self.name_attr, min_length=0, max_length=255) + self.assert_string_attr_min_length(self.name_attr, 0) + self.assert_string_attr_max_length(self.name_attr, 255) + + def test_editable_name(self): + name = "Name" + body = build_body(self._mandatory_fields, {self.name_attr: name}) + type_instance = wsme_json.fromjson(self._type, body) + self.assertEqual(name, type_instance.name) + + +class TestDescriptionMixin(BaseTestString): + description_attr = 'description' + + def test_description(self): + self.assert_string_attr(self.description_attr, min_length=0, + max_length=255) + self.assert_string_attr_min_length(self.description_attr, 0) + self.assert_string_attr_max_length(self.description_attr, 255) + + def test_editable_description(self): + description = "Description" + body = build_body(self._mandatory_fields, + {self.description_attr: description}) + type_instance = wsme_json.fromjson(self._type, body) + self.assertEqual(description, type_instance.description) + + +class TestEnabledMixin(BaseTestBool): + enabled_attr = 'enabled' + + def test_enabled(self): + self.assert_bool_attr(self.enabled_attr) + self.assert_bool_attr_non_bool(self.enabled_attr) + + def test_default_enabled_true(self): + body = build_body(self._mandatory_fields, {}) + type_instance = wsme_json.fromjson(self._type, body) + self.assertTrue(type_instance.enabled) + + def test_editable_enabled(self): + body = build_body(self._mandatory_fields, {"enabled": False}) + type_instance = wsme_json.fromjson(self._type, body) + self.assertFalse(type_instance.enabled) + + +class TestProvisioningStatusMixin(BaseTestString): + provisioning_attr = 'provisioning_status' + + def test_provisioning_status(self): + self.assert_string_attr(self.provisioning_attr, min_length=0, + max_length=16) + self.assert_string_attr_min_length(self.provisioning_attr, 0) + self.assert_string_attr_max_length(self.provisioning_attr, 16) + + def test_provisioning_status_readonly(self): + status = constants.ACTIVE + body = build_body(self._mandatory_fields, + {self.provisioning_attr: status}) + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, + self._type, body) + + +class TestOperatingStatusMixin(BaseTestString): + operating_attr = 'operating_status' + + def test_operating_status(self): + self.assert_string_attr(self.operating_attr, min_length=0, + max_length=16) + self.assert_string_attr_min_length(self.operating_attr, 0) + self.assert_string_attr_max_length(self.operating_attr, 16) + + def test_operating_status_readonly(self): + status = constants.ONLINE + body = build_body(self._mandatory_fields, + {self.operating_attr: status}) + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, + self._type, body) diff --git a/octavia/tests/unit/api/v2/types/test_availability_zone_profile.py b/octavia/tests/unit/api/v2/types/test_availability_zone_profile.py new file mode 100644 index 0000000000..b27e0041dc --- /dev/null +++ b/octavia/tests/unit/api/v2/types/test_availability_zone_profile.py @@ -0,0 +1,70 @@ +# Copyright 2019 Verizon Media +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from wsme import exc +from wsme.rest import json as wsme_json + +from octavia.api.v2.types import availability_zone_profile as azp_type +from octavia.common import constants +from octavia.tests.unit.api.common import base + + +class TestAvailabilityZoneProfile: + + _type = None + + def test_availability_zone_profile(self): + body = {"name": "test_name", "provider_name": "test1", + constants.AVAILABILITY_ZONE_DATA: '{"hello": "world"}'} + availability_zone = wsme_json.fromjson(self._type, body) + self.assertEqual(availability_zone.name, body["name"]) + + def test_invalid_name(self): + body = {"name": 0} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_name_length(self): + body = {"name": "x" * 256} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_provider_name_length(self): + body = {"name": "x" * 250, + "provider_name": "X" * 256} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, + self._type, body) + + def test_name_mandatory(self): + body = {"provider_name": "test1", + constants.AVAILABILITY_ZONE_DATA: '{"hello": "world"}'} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_provider_name_mandatory(self): + body = {"name": "test_name", + constants.AVAILABILITY_ZONE_DATA: '{"hello": "world"}'} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_meta_mandatory(self): + body = {"name": "test_name", "provider_name": "test1"} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + +class TestAvailabilityZoneProfilePOST(base.BaseTypesTest, + TestAvailabilityZoneProfile): + + _type = azp_type.AvailabilityZoneProfilePOST diff --git a/octavia/tests/unit/api/v2/types/test_availability_zones.py b/octavia/tests/unit/api/v2/types/test_availability_zones.py new file mode 100644 index 0000000000..b48d95698b --- /dev/null +++ b/octavia/tests/unit/api/v2/types/test_availability_zones.py @@ -0,0 +1,87 @@ +# Copyright 2017 Walmart Stores Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_utils import uuidutils +from wsme import exc +from wsme.rest import json as wsme_json + +from octavia.api.v2.types import availability_zones as availability_zone_type +from octavia.tests.unit.api.common import base + + +class TestAvailabilityZone: + + _type = None + + def test_availability_zone(self): + body = {"name": "test_name", "description": "test_description", + "availability_zone_profile_id": uuidutils.generate_uuid()} + availability_zone = wsme_json.fromjson(self._type, body) + self.assertTrue(availability_zone.enabled) + + def test_invalid_name(self): + body = {"name": 0, + "availability_zone_profile_id": uuidutils.generate_uuid()} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_name_length(self): + body = {"name": "x" * 256, + "availability_zone_profile_id": uuidutils.generate_uuid()} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_description(self): + body = {"availability_zone_profile_id": uuidutils.generate_uuid(), + "description": 0, "name": "test"} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_description_length(self): + body = {"name": "x" * 250, + "availability_zone_profile_id": uuidutils.generate_uuid(), + "description": "0" * 256} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_enabled(self): + body = {"name": "test_name", + "availability_zone_profile_id": uuidutils.generate_uuid(), + "enabled": "notvalid"} + self.assertRaises(ValueError, wsme_json.fromjson, self._type, + body) + + def test_name_mandatory(self): + body = {"description": "xyz", + "availability_zone_profile_id": uuidutils.generate_uuid(), + "enabled": True} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_availability_zone_profile_id_mandatory(self): + body = {"name": "test_name"} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + +class TestAvailabilityZonePOST(base.BaseTypesTest, TestAvailabilityZone): + + _type = availability_zone_type.AvailabilityZonePOST + + def test_non_uuid_project_id(self): + body = {"name": "test_name", "description": "test_description", + "availability_zone_profile_id": uuidutils.generate_uuid()} + lb = wsme_json.fromjson(self._type, body) + self.assertEqual(lb.availability_zone_profile_id, + body['availability_zone_profile_id']) diff --git a/octavia/tests/unit/api/v2/types/test_flavor_profile.py b/octavia/tests/unit/api/v2/types/test_flavor_profile.py new file mode 100644 index 0000000000..5e34893337 --- /dev/null +++ b/octavia/tests/unit/api/v2/types/test_flavor_profile.py @@ -0,0 +1,69 @@ +# Copyright 2017 Walmart Stores Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from wsme import exc +from wsme.rest import json as wsme_json + +from octavia.api.v2.types import flavor_profile as fp_type +from octavia.common import constants +from octavia.tests.unit.api.common import base + + +class TestFlavorProfile: + + _type = None + + def test_flavor_profile(self): + body = {"name": "test_name", "provider_name": "test1", + constants.FLAVOR_DATA: '{"hello": "world"}'} + flavor = wsme_json.fromjson(self._type, body) + self.assertEqual(flavor.name, body["name"]) + + def test_invalid_name(self): + body = {"name": 0} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_name_length(self): + body = {"name": "x" * 256} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_provider_name_length(self): + body = {"name": "x" * 250, + "provider_name": "X" * 256} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, + self._type, body) + + def test_name_mandatory(self): + body = {"provider_name": "test1", + constants.FLAVOR_DATA: '{"hello": "world"}'} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_provider_name_mandatory(self): + body = {"name": "test_name", + constants.FLAVOR_DATA: '{"hello": "world"}'} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_meta_mandatory(self): + body = {"name": "test_name", "provider_name": "test1"} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + +class TestFlavorProfilePOST(base.BaseTypesTest, TestFlavorProfile): + + _type = fp_type.FlavorProfilePOST diff --git a/octavia/tests/unit/api/v2/types/test_flavors.py b/octavia/tests/unit/api/v2/types/test_flavors.py new file mode 100644 index 0000000000..9f10f92334 --- /dev/null +++ b/octavia/tests/unit/api/v2/types/test_flavors.py @@ -0,0 +1,85 @@ +# Copyright 2017 Walmart Stores Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_utils import uuidutils +from wsme import exc +from wsme.rest import json as wsme_json + +from octavia.api.v2.types import flavors as flavor_type +from octavia.tests.unit.api.common import base + + +class TestFlavor: + + _type = None + + def test_flavor(self): + body = {"name": "test_name", "description": "test_description", + "flavor_profile_id": uuidutils.generate_uuid()} + flavor = wsme_json.fromjson(self._type, body) + self.assertTrue(flavor.enabled) + + def test_invalid_name(self): + body = {"name": 0, "flavor_profile_id": uuidutils.generate_uuid()} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_name_length(self): + body = {"name": "x" * 256, + "flavor_profile_id": uuidutils.generate_uuid()} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_description(self): + body = {"flavor_profile_id": uuidutils.generate_uuid(), + "description": 0, "name": "test"} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_description_length(self): + body = {"name": "x" * 250, + "flavor_profile_id": uuidutils.generate_uuid(), + "description": "0" * 256} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_enabled(self): + body = {"name": "test_name", + "flavor_profile_id": uuidutils.generate_uuid(), + "enabled": "notvalid"} + self.assertRaises(ValueError, wsme_json.fromjson, self._type, + body) + + def test_name_mandatory(self): + body = {"description": "xyz", + "flavor_profile_id": uuidutils.generate_uuid(), + "enabled": True} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_flavor_profile_id_mandatory(self): + body = {"name": "test_name"} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + +class TestFlavorPOST(base.BaseTypesTest, TestFlavor): + + _type = flavor_type.FlavorPOST + + def test_non_uuid_project_id(self): + body = {"name": "test_name", "description": "test_description", + "flavor_profile_id": uuidutils.generate_uuid()} + lb = wsme_json.fromjson(self._type, body) + self.assertEqual(lb.flavor_profile_id, body['flavor_profile_id']) diff --git a/octavia/tests/unit/api/v2/types/test_health_monitor.py b/octavia/tests/unit/api/v2/types/test_health_monitor.py new file mode 100644 index 0000000000..81aa3b6aec --- /dev/null +++ b/octavia/tests/unit/api/v2/types/test_health_monitor.py @@ -0,0 +1,208 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_utils import uuidutils +from wsme import exc +from wsme.rest import json as wsme_json +from wsme import types as wsme_types + +from octavia.api.v2.types import health_monitor as hm_type +from octavia.common import constants +from octavia.tests.unit.api.v2.types import base + + +class TestHealthMonitor: + + _type = None + + def test_invalid_type(self): + body = {"delay": 1, "timeout": 1, "max_retries": 1} + if self._type is hm_type.HealthMonitorPOST: + body.update({"type": 1, "pool_id": uuidutils.generate_uuid()}) + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_delay(self): + body = {"delay": "one", "timeout": 1, "max_retries": 1} + if self._type is hm_type.HealthMonitorPOST: + body.update({"type": constants.PROTOCOL_HTTP, + "pool_id": uuidutils.generate_uuid()}) + self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) + + def test_invalid_timeout(self): + body = {"delay": 1, "timeout": "one", "max_retries": 1} + if self._type is hm_type.HealthMonitorPOST: + body.update({"type": constants.PROTOCOL_HTTP, + "pool_id": uuidutils.generate_uuid()}) + self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) + + def test_invalid_max_retries_down(self): + body = {"delay": 1, "timeout": 1, "max_retries": "one"} + if self._type is hm_type.HealthMonitorPOST: + body.update({"type": constants.PROTOCOL_HTTP, + "pool_id": uuidutils.generate_uuid()}) + self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) + + def test_invalid_max_retries(self): + body = {"delay": 1, "timeout": 1, "max_retries": "one"} + if self._type is hm_type.HealthMonitorPOST: + body.update({"type": constants.PROTOCOL_HTTP, + "pool_id": uuidutils.generate_uuid()}) + self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) + + def test_invalid_http_method(self): + body = {"delay": 1, "timeout": 1, "max_retries": 1, + "http_method": 1} + if self._type is hm_type.HealthMonitorPOST: + body.update({"type": constants.PROTOCOL_HTTP, + "pool_id": uuidutils.generate_uuid()}) + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_url_path(self): + body = {"delay": 1, "timeout": 1, "max_retries": 1, "url_path": 1} + if self._type is hm_type.HealthMonitorPOST: + body.update({"type": constants.PROTOCOL_HTTP, + "pool_id": uuidutils.generate_uuid()}) + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, + self._type, body) + + def test_invalid_url_path_with_url(http://github.com/self): + body = {"delay": 1, "timeout": 1, "max_retries": 1, + "url_path": 'https://www.openstack.org/'} + if self._type is hm_type.HealthMonitorPOST: + body.update({"type": constants.PROTOCOL_HTTP, + "pool_id": uuidutils.generate_uuid()}) + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, + self._type, body) + + def test_invalid_url_path_no_leading_slash(self): + body = {"delay": 1, "timeout": 1, "max_retries": 1, + "url_path": 'blah'} + if self._type is hm_type.HealthMonitorPOST: + body.update({"type": constants.PROTOCOL_HTTP, + "pool_id": uuidutils.generate_uuid()}) + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, + self._type, body) + + def test_invalid_expected_codes(self): + body = {"delay": 1, "timeout": 1, "max_retries": 1, + "expected_codes": "lol"} + if self._type is hm_type.HealthMonitorPOST: + body.update({"type": constants.PROTOCOL_HTTP, + "pool_id": uuidutils.generate_uuid()}) + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_tags(self): + body = {"tags": "invalid_tag"} + if self._type is hm_type.HealthMonitorPOST: + body.update({"type": constants.PROTOCOL_HTTP, + "pool_id": uuidutils.generate_uuid(), + "delay": 1, "timeout": 1, "max_retries": 1}) + self.assertRaises(ValueError, wsme_json.fromjson, self._type, + body) + body = {"tags": [1, 2]} + if self._type is hm_type.HealthMonitorPOST: + body.update({"type": constants.PROTOCOL_HTTP, + "pool_id": uuidutils.generate_uuid(), + "delay": 1, "timeout": 1, "max_retries": 1}) + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + +class TestHealthMonitorPOST(base.BaseTypesTest, TestHealthMonitor): + + _type = hm_type.HealthMonitorPOST + + def test_health_monitor(self): + body = {"type": constants.HEALTH_MONITOR_HTTP, "delay": 1, + "timeout": 1, "max_retries_down": 1, "max_retries": 1, + "pool_id": uuidutils.generate_uuid(), + "tags": ['test_tag']} + hm = wsme_json.fromjson(self._type, body) + self.assertTrue(hm.admin_state_up) + + def test_type_mandatory(self): + body = {"delay": 80, "timeout": 1, "max_retries": 1, + "pool_id": uuidutils.generate_uuid()} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_delay_mandatory(self): + body = {"type": constants.HEALTH_MONITOR_HTTP, "timeout": 1, + "max_retries": 1, "pool_id": uuidutils.generate_uuid()} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_timeout_mandatory(self): + body = {"type": constants.HEALTH_MONITOR_HTTP, "delay": 1, + "max_retries": 1, "pool_id": uuidutils.generate_uuid()} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_max_retries_mandatory(self): + body = {"type": constants.HEALTH_MONITOR_HTTP, "delay": 1, + "timeout": 1, "pool_id": uuidutils.generate_uuid()} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_default_health_monitor_values(self): + # http_method = 'GET' + # url_path = '/' + # expected_codes = '200' + # max_retries_down = 3 + # admin_state_up = True + # The above are not required but should have the above example defaults + body = {"type": constants.HEALTH_MONITOR_HTTP, "delay": 1, + "timeout": 1, "max_retries": 1, + "pool_id": uuidutils.generate_uuid()} + hmpost = wsme_json.fromjson(self._type, body) + self.assertEqual(wsme_types.Unset, hmpost.http_method) + self.assertEqual(wsme_types.Unset, hmpost.url_path) + self.assertEqual(wsme_types.Unset, hmpost.expected_codes) + self.assertEqual(3, hmpost.max_retries_down) + self.assertTrue(hmpost.admin_state_up) + + def test_url_path_with_query_and_fragment(self): + url_path = "/v2/index?a=12,b=34#123dd" + body = {"type": constants.HEALTH_MONITOR_HTTP, "delay": 1, + "timeout": 1, "max_retries": 1, + "pool_id": uuidutils.generate_uuid(), + "url_path": url_path} + hmpost = wsme_json.fromjson(self._type, body) + self.assertEqual(wsme_types.Unset, hmpost.http_method) + self.assertEqual(url_path, hmpost.url_path) + self.assertEqual(wsme_types.Unset, hmpost.expected_codes) + self.assertEqual(3, hmpost.max_retries_down) + self.assertTrue(hmpost.admin_state_up) + + def test_non_uuid_project_id(self): + body = {"type": constants.HEALTH_MONITOR_HTTP, "delay": 1, + "timeout": 1, "max_retries_down": 1, "max_retries": 1, + "project_id": "non-uuid", + "pool_id": uuidutils.generate_uuid()} + hm = wsme_json.fromjson(self._type, body) + self.assertEqual(hm.project_id, body['project_id']) + + +class TestHealthMonitorPUT(base.BaseTypesTest, TestHealthMonitor): + + _type = hm_type.HealthMonitorPUT + + def test_health_monitor(self): + body = {"http_method": constants.HEALTH_MONITOR_HTTP_METHOD_HEAD, + "tags": ['test_tag']} + hm = wsme_json.fromjson(self._type, body) + self.assertEqual(wsme_types.Unset, hm.admin_state_up) diff --git a/octavia/tests/unit/api/v2/types/test_l7policy.py b/octavia/tests/unit/api/v2/types/test_l7policy.py new file mode 100644 index 0000000000..c0eddaf249 --- /dev/null +++ b/octavia/tests/unit/api/v2/types/test_l7policy.py @@ -0,0 +1,172 @@ +# Copyright 2016 Blue Box, an IBM Company +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_utils import uuidutils +from wsme import exc +from wsme.rest import json as wsme_json +from wsme import types as wsme_types + +from octavia.api.v2.types import l7policy as l7policy_type +from octavia.common import constants +from octavia.tests.unit.api.common import base + + +class TestL7PolicyPOST(base.BaseTypesTest): + + _type = l7policy_type.L7PolicyPOST + + def setUp(self): + super().setUp() + self.listener_id = uuidutils.generate_uuid() + + def test_l7policy(self): + body = {"listener_id": self.listener_id, + "action": constants.L7POLICY_ACTION_REJECT, + "tags": ['test_tag']} + l7policy = wsme_json.fromjson(self._type, body) + self.assertEqual(self.listener_id, l7policy.listener_id) + self.assertEqual(constants.MAX_POLICY_POSITION, l7policy.position) + self.assertEqual(wsme_types.Unset, l7policy.redirect_url) + self.assertEqual(wsme_types.Unset, l7policy.redirect_pool_id) + self.assertTrue(l7policy.admin_state_up) + + def test_action_mandatory(self): + body = {"listener_id": self.listener_id} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_listener_id_mandatory(self): + body = {"action": constants.L7POLICY_ACTION_REJECT} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_action(self): + body = {"listener_id": self.listener_id, + "action": "test"} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_with_redirect_url(http://github.com/self): + url = "http://www.example.com/" + body = {"listener_id": self.listener_id, + "action": constants.L7POLICY_ACTION_REDIRECT_TO_URL, + "redirect_url": url} + l7policy = wsme_json.fromjson(self._type, body) + self.assertEqual(constants.MAX_POLICY_POSITION, l7policy.position) + self.assertEqual(url, l7policy.redirect_url) + self.assertEqual(wsme_types.Unset, l7policy.redirect_pool_id) + + def test_invalid_position(self): + body = {"listener_id": self.listener_id, + "action": constants.L7POLICY_ACTION_REJECT, + "position": "notvalid"} + self.assertRaises(ValueError, wsme_json.fromjson, self._type, + body) + + def test_invalid_tags(self): + body = {"listener_id": self.listener_id, + "action": constants.L7POLICY_ACTION_REJECT, + "tags": "invalid_tag"} + self.assertRaises(ValueError, wsme_json.fromjson, self._type, + body) + body = {"listener_id": self.listener_id, + "action": constants.L7POLICY_ACTION_REJECT, + "tags": [1, 2]} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_l7policy_min_position(self): + body = {"listener_id": self.listener_id, + "action": constants.L7POLICY_ACTION_REJECT, + "position": constants.MIN_POLICY_POSITION - 1} + self.assertRaises( + exc.InvalidInput, wsme_json.fromjson, self._type, body) + body = {"listener_id": self.listener_id, + "action": constants.L7POLICY_ACTION_REJECT, + "position": constants.MIN_POLICY_POSITION} + l7policy = wsme_json.fromjson(self._type, body) + self.assertEqual(constants.MIN_POLICY_POSITION, l7policy.position) + + def test_l7policy_max_position(self): + body = {"listener_id": self.listener_id, + "action": constants.L7POLICY_ACTION_REJECT, + "position": constants.MAX_POLICY_POSITION + 1} + self.assertRaises( + exc.InvalidInput, wsme_json.fromjson, self._type, body) + body = {"listener_id": self.listener_id, + "action": constants.L7POLICY_ACTION_REJECT, + "position": constants.MAX_POLICY_POSITION} + l7policy = wsme_json.fromjson(self._type, body) + self.assertEqual(constants.MAX_POLICY_POSITION, l7policy.position) + + def test_invalid_admin_state_up(self): + body = {"listener_id": self.listener_id, + "action": constants.L7POLICY_ACTION_REJECT, + "admin_state_up": "notvalid"} + self.assertRaises(ValueError, wsme_json.fromjson, self._type, + body) + + def test_invalid_url(http://github.com/self): + body = {"listener_id": self.listener_id, + "action": constants.L7POLICY_ACTION_REDIRECT_TO_URL, + "redirect_url": "notvalid"} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + +class TestL7PolicyPUT(base.BaseTypesTest): + + _type = l7policy_type.L7PolicyPUT + + def test_l7policy(self): + body = {"action": constants.L7POLICY_ACTION_REJECT, + "position": constants.MIN_POLICY_POSITION, + "tags": ['test_tag']} + l7policy = wsme_json.fromjson(self._type, body) + self.assertEqual(constants.MIN_POLICY_POSITION, l7policy.position) + self.assertEqual(wsme_types.Unset, l7policy.redirect_url) + self.assertEqual(wsme_types.Unset, l7policy.redirect_pool_id) + + def test_l7policy_min_position(self): + body = {"position": constants.MIN_POLICY_POSITION - 1} + self.assertRaises( + exc.InvalidInput, wsme_json.fromjson, self._type, body) + body = {"position": constants.MIN_POLICY_POSITION} + l7policy = wsme_json.fromjson(self._type, body) + self.assertEqual(constants.MIN_POLICY_POSITION, l7policy.position) + + def test_l7policy_max_position(self): + body = {"position": constants.MAX_POLICY_POSITION + 1} + self.assertRaises( + exc.InvalidInput, wsme_json.fromjson, self._type, body) + body = {"position": constants.MAX_POLICY_POSITION} + l7policy = wsme_json.fromjson(self._type, body) + self.assertEqual(constants.MAX_POLICY_POSITION, l7policy.position) + + def test_invalid_position(self): + body = {"position": "test"} + self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) + + def test_invalid_action(self): + body = {"action": "test"} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_tags(self): + body = {"tags": "invalid_tag"} + self.assertRaises(ValueError, wsme_json.fromjson, self._type, + body) + body = {"tags": [1, 2]} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) diff --git a/octavia/tests/unit/api/v2/types/test_l7rule.py b/octavia/tests/unit/api/v2/types/test_l7rule.py new file mode 100644 index 0000000000..1fecd2f0e2 --- /dev/null +++ b/octavia/tests/unit/api/v2/types/test_l7rule.py @@ -0,0 +1,187 @@ +# Copyright 2016 Blue Box, an IBM Company +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from wsme import exc +from wsme.rest import json as wsme_json +from wsme import types as wsme_types + +from octavia.api.v2.types import l7rule as l7rule_type +from octavia.common import constants +from octavia.tests.unit.api.common import base + + +class TestL7RulePOST(base.BaseTypesTest): + + _type = l7rule_type.L7RulePOST + + def test_l7rule(self): + body = {"type": constants.L7RULE_TYPE_PATH, + "compare_type": constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + "value": "/api", "tags": ['test_tag']} + l7rule = wsme_json.fromjson(self._type, body) + self.assertEqual(wsme_types.Unset, l7rule.key) + self.assertFalse(l7rule.invert) + self.assertTrue(l7rule.admin_state_up) + + def test_type_mandatory(self): + body = {"compare_type": constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + "value": "/api"} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_compare_type_mandatory(self): + body = {"type": constants.L7RULE_TYPE_PATH, + "value": "/api"} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_value_mandatory(self): + body = {"type": constants.L7RULE_TYPE_PATH, + "compare_type": constants.L7RULE_COMPARE_TYPE_STARTS_WITH} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_type(self): + body = {"type": "notvalid", + "compare_type": constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + "value": "/api"} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_compare_type(self): + body = {"type": constants.L7RULE_TYPE_PATH, + "compare_type": "notvalid", + "value": "/api"} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_value(self): + body = {"type": constants.L7RULE_TYPE_PATH, + "compare_type": constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + "value": 123} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_value_whitespace(self): + body = {"type": constants.L7RULE_TYPE_PATH, + "compare_type": constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + "value": "12\n3"} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_key_whitespace(self): + body = {"type": constants.L7RULE_TYPE_PATH, + "compare_type": constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + "key": "12\n3"} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_invert(self): + body = {"type": constants.L7RULE_TYPE_PATH, + "compare_type": constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + "value": "/api", + "invert": "notvalid"} + self.assertRaises(ValueError, wsme_json.fromjson, self._type, + body) + + def test_invalid_admin_state_up(self): + body = {"type": constants.L7RULE_TYPE_PATH, + "compare_type": constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + "value": "/api", + "admin_state_up": "notvalid"} + self.assertRaises(ValueError, wsme_json.fromjson, self._type, + body) + + def test_invalid_key(self): + body = {"type": constants.L7RULE_TYPE_PATH, + "compare_type": constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + "value": "/api", + "key": 123} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_tags(self): + body = {"type": constants.L7RULE_TYPE_PATH, + "compare_type": constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + "value": "/api", + "tags": "invalid_tag"} + self.assertRaises(ValueError, wsme_json.fromjson, self._type, + body) + body = {"type": constants.L7RULE_TYPE_PATH, + "compare_type": constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + "value": "/api", + "tags": [1, 2]} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + +class TestL7RulePUT(base.BaseTypesTest): + + _type = l7rule_type.L7RulePUT + + def test_l7rule(self): + body = {"type": constants.L7RULE_TYPE_PATH, + "compare_type": constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + "value": "/api", "tags": ['test_tag']} + l7rule = wsme_json.fromjson(self._type, body) + self.assertEqual(wsme_types.Unset, l7rule.key) + self.assertFalse(l7rule.invert) + + def test_invalid_type(self): + body = {"type": "notvalid"} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_compare_type(self): + body = {"compare_type": "notvalid"} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_value(self): + body = {"value": 123} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_value_linefeed(self): + body = {"value": "12\n3"} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_key_linefeed(self): + body = {"key": "12\n3"} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_invert(self): + body = {"invert": "notvalid"} + self.assertRaises(ValueError, wsme_json.fromjson, self._type, + body) + + def test_invalid_admin_state_up(self): + body = {"admin_state_up": "notvalid"} + self.assertRaises(ValueError, wsme_json.fromjson, self._type, + body) + + def test_invalid_key(self): + body = {"key": 123} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_tags(self): + body = {"tags": "invalid_tag"} + self.assertRaises(ValueError, wsme_json.fromjson, self._type, + body) + body = {"tags": [1, 2]} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) diff --git a/octavia/tests/unit/api/v2/types/test_listener.py b/octavia/tests/unit/api/v2/types/test_listener.py new file mode 100644 index 0000000000..e8705b8ddd --- /dev/null +++ b/octavia/tests/unit/api/v2/types/test_listener.py @@ -0,0 +1,167 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_utils import uuidutils +from wsme import exc +from wsme.rest import json as wsme_json +from wsme import types as wsme_types + +from octavia.api.v2.types import listener as lis_type +from octavia.common import constants +from octavia.tests.unit.api.common import base + + +class TestListener: + + _type = None + + def test_invalid_name(self): + body = {"name": 0} + if self._type is lis_type.ListenerPOST: + body.update({"protocol": constants.PROTOCOL_HTTP, + "protocol_port": 80, + "loadbalancer_id": uuidutils.generate_uuid()}) + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_description(self): + body = {"description": 0} + if self._type is lis_type.ListenerPOST: + body.update({"protocol": constants.PROTOCOL_HTTP, + "protocol_port": 80, + "loadbalancer_id": uuidutils.generate_uuid()}) + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_admin_state_up(self): + body = {"admin_state_up": "notvalid"} + if self._type is lis_type.ListenerPOST: + body.update({"protocol": constants.PROTOCOL_HTTP, + "protocol_port": 80, + "loadbalancer_id": uuidutils.generate_uuid()}) + self.assertRaises(ValueError, wsme_json.fromjson, self._type, + body) + + def test_invalid_connection_limit(self): + body = {"connection_limit": "test"} + if self._type is lis_type.ListenerPOST: + body.update({"protocol": constants.PROTOCOL_HTTP, + "protocol_port": 80, + "loadbalancer_id": uuidutils.generate_uuid()}) + self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) + + def test_invalid_tags(self): + body = {"tags": "invalid_tag"} + if self._type is lis_type.ListenerPOST: + body.update({"protocol": constants.PROTOCOL_HTTP, + "protocol_port": 80, + "loadbalancer_id": uuidutils.generate_uuid()}) + self.assertRaises(ValueError, wsme_json.fromjson, self._type, + body) + body = {"tags": [1, 2]} + if self._type is lis_type.ListenerPOST: + body.update({"protocol": constants.PROTOCOL_HTTP, + "protocol_port": 80, + "loadbalancer_id": uuidutils.generate_uuid()}) + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + +class TestListenerPOST(base.BaseTypesTest, TestListener): + + _type = lis_type.ListenerPOST + + def test_listener(self): + body = {"name": "test", "description": "test", "connection_limit": 10, + "protocol": constants.PROTOCOL_HTTP, "protocol_port": 80, + "default_pool_id": uuidutils.generate_uuid(), + "loadbalancer_id": uuidutils.generate_uuid(), + "tags": ['test_tag']} + listener = wsme_json.fromjson(self._type, body) + self.assertTrue(listener.admin_state_up) + + def test_protocol_mandatory(self): + body = {"protocol_port": 80, + "loadbalancer_id": uuidutils.generate_uuid()} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_protocol_port_mandatory(self): + body = {"protocol": constants.PROTOCOL_HTTP, + "loadbalancer_id": uuidutils.generate_uuid()} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_protocol(self): + body = {"protocol": "http", "protocol_port": 80} + if self._type is lis_type.ListenerPOST: + body.update({"loadbalancer_id": uuidutils.generate_uuid()}) + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_protocol_port(self): + body = {"protocol": constants.PROTOCOL_HTTP, "protocol_port": "test"} + if self._type is lis_type.ListenerPOST: + body.update({"loadbalancer_id": uuidutils.generate_uuid()}) + self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) + + def test_loadbalancer_id_mandatory(self): + body = {"protocol": constants.PROTOCOL_HTTP, + "protocol_port": 80} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_loadbalancer_id(self): + body = {"protocol": constants.PROTOCOL_HTTP, "protocol_port": 80, + "loadbalancer_id": "a"} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_non_uuid_project_id(self): + body = {"name": "test", "description": "test", "connection_limit": 10, + "protocol": constants.PROTOCOL_HTTP, "protocol_port": 80, + "default_pool_id": uuidutils.generate_uuid(), + "loadbalancer_id": uuidutils.generate_uuid(), + "project_id": "non-uuid"} + listener = wsme_json.fromjson(self._type, body) + self.assertEqual(listener.project_id, body['project_id']) + + def test_invalid_alpn_protocols(self): + body = {"protocol": constants.PROTOCOL_HTTP, "protocol_port": 80, + "loadbalancer_id": uuidutils.generate_uuid(), + "alpn_protocols": ["bad", "boy"]} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + +class TestListenerPUT(base.BaseTypesTest, TestListener): + + _type = lis_type.ListenerPUT + + def test_listener(self): + body = {"name": "test", "description": "test", + "connection_limit": 10, + "default_tls_container_ref": uuidutils.generate_uuid(), + "sni_container_refs": [uuidutils.generate_uuid(), + uuidutils.generate_uuid()], + "default_pool_id": uuidutils.generate_uuid(), + "insert_headers": {"a": "1", "b": "2"}, + "tags": ['test_tag']} + listener = wsme_json.fromjson(self._type, body) + self.assertEqual(wsme_types.Unset, listener.admin_state_up) + + def test_invalid_alpn_protocols(self): + body = {"alpn_protocols": ["bad", "boy"]} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) diff --git a/octavia/tests/unit/api/v2/types/test_load_balancer.py b/octavia/tests/unit/api/v2/types/test_load_balancer.py new file mode 100644 index 0000000000..376f1fba63 --- /dev/null +++ b/octavia/tests/unit/api/v2/types/test_load_balancer.py @@ -0,0 +1,126 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_utils import uuidutils +from wsme import exc +from wsme.rest import json as wsme_json +from wsme import types as wsme_types + +from octavia.api.v2.types import load_balancer as lb_type +from octavia.tests.unit.api.common import base + + +class TestLoadBalancer: + + _type = None + + def test_load_balancer(self): + body = {"name": "test_name", "description": "test_description", + "vip_subnet_id": uuidutils.generate_uuid(), + "tags": ['test']} + lb = wsme_json.fromjson(self._type, body) + self.assertTrue(lb.admin_state_up) + + def test_invalid_name(self): + body = {"name": 0} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_name_length(self): + body = {"name": "x" * 256} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_description(self): + body = {"description": 0} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_description_length(self): + body = {"name": "x" * 256} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_enabled(self): + body = {"admin_state_up": "notvalid"} + self.assertRaises(ValueError, wsme_json.fromjson, self._type, + body) + + def test_invalid_qos_policy_id(self): + body = {"vip_qos_policy_id": "invalid_uuid"} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_tags(self): + body = {"tags": "invalid_tag"} + self.assertRaises(ValueError, wsme_json.fromjson, self._type, + body) + body = {"tags": [1, 2]} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + +class TestLoadBalancerPOST(base.BaseTypesTest, TestLoadBalancer): + + _type = lb_type.LoadBalancerPOST + + def test_non_uuid_project_id(self): + body = {"name": "test_name", "description": "test_description", + "vip_subnet_id": uuidutils.generate_uuid(), + "project_id": "non-uuid"} + lb = wsme_json.fromjson(self._type, body) + self.assertEqual(lb.project_id, body['project_id']) + + def test_vip(self): + body = {"vip_subnet_id": uuidutils.generate_uuid(), + "vip_port_id": uuidutils.generate_uuid(), + "vip_qos_policy_id": uuidutils.generate_uuid()} + wsme_json.fromjson(self._type, body) + + def test_invalid_ip_address(self): + body = {"vip_address": uuidutils.generate_uuid()} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_port_id(self): + body = {"vip_port_id": "invalid_uuid"} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_subnet_id(self): + body = {"vip_subnet_id": "invalid_uuid"} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_additional_vips(self): + body = {"additional_vips": [{"subnet_id": uuidutils.generate_uuid(), + "ip_address": "192.0.2.1"}]} + wsme_json.fromjson(self._type, body) + + def test_additional_vips_invalid_ip(self): + body = {"additional_vips": [{"subnet_id": uuidutils.generate_uuid(), + "ip_address": "foo.0.0.1"}]} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + +class TestLoadBalancerPUT(base.BaseTypesTest, TestLoadBalancer): + + _type = lb_type.LoadBalancerPUT + + def test_load_balancer(self): + body = {"name": "test_name", "description": "test_description", + "tags": ['test_tag']} + lb = wsme_json.fromjson(self._type, body) + self.assertEqual(wsme_types.Unset, lb.admin_state_up) diff --git a/octavia/tests/unit/api/v2/types/test_member.py b/octavia/tests/unit/api/v2/types/test_member.py new file mode 100644 index 0000000000..19ba30f50d --- /dev/null +++ b/octavia/tests/unit/api/v2/types/test_member.py @@ -0,0 +1,158 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from wsme import exc +from wsme.rest import json as wsme_json +from wsme import types as wsme_types + +from octavia.api.v2.types import member as member_type +from octavia.common import constants +from octavia.tests.unit.api.v2.types import base + + +class TestMemberPOST(base.BaseTypesTest): + + _type = member_type.MemberPOST + + def test_member(self): + body = {"name": "member1", "address": "10.0.0.1", + "protocol_port": 80, "tags": ['test_tag']} + member = wsme_json.fromjson(self._type, body) + self.assertTrue(member.admin_state_up) + self.assertEqual(1, member.weight) + self.assertEqual(wsme_types.Unset, member.subnet_id) + + def test_address_mandatory(self): + body = {} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_protocol_mandatory(self): + body = {} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_address(self): + body = {"address": "test", "protocol_port": 443} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_subnet_id(self): + body = {"address": "10.0.0.1", "protocol_port": 443, + "subnet_id": "invalid_uuid"} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_admin_state_up(self): + body = {"address": "10.0.0.1", "protocol_port": 443, + "admin_state_up": "notvalid"} + self.assertRaises(ValueError, wsme_json.fromjson, self._type, + body) + + def test_invalid_protocol_port(self): + body = {"address": "10.0.0.1", "protocol_port": "test"} + self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) + + def test_invalid_weight(self): + body = {"address": "10.0.0.1", "protocol_port": 443, + "weight": "test"} + self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) + + def test_invalid_tags(self): + body = {"address": "10.0.0.1", "protocol_port": 443, + "tags": "invalid_tag"} + self.assertRaises(ValueError, wsme_json.fromjson, self._type, + body) + body = {"address": "10.0.0.1", "protocol_port": 443, "tags": [1, 2]} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_min_weight(self): + body = {"address": "10.0.0.1", "protocol_port": 443, + "weight": constants.MIN_WEIGHT - 1} + self.assertRaises( + exc.InvalidInput, wsme_json.fromjson, self._type, body) + body = {"address": "10.0.0.1", "protocol_port": 443, + "weight": constants.MIN_WEIGHT} + member = wsme_json.fromjson(self._type, body) + self.assertEqual(constants.MIN_WEIGHT, member.weight) + + def test_max_weight(self): + body = {"address": "10.0.0.1", "protocol_port": 443, + "weight": constants.MAX_WEIGHT + 1} + self.assertRaises( + exc.InvalidInput, wsme_json.fromjson, self._type, body) + body = {"address": "10.0.0.1", "protocol_port": 443, + "weight": constants.MAX_WEIGHT} + member = wsme_json.fromjson(self._type, body) + self.assertEqual(constants.MAX_WEIGHT, member.weight) + + def test_non_uuid_project_id(self): + body = {"address": "10.0.0.1", "protocol_port": 80, + "project_id": "non-uuid"} + member = wsme_json.fromjson(self._type, body) + self.assertEqual(member.project_id, body['project_id']) + + +class TestMemberPUT(base.BaseTypesTest): + + _type = member_type.MemberPUT + + def test_member(self): + body = {"name": "new_name", "tags": ['new_tag']} + member = wsme_json.fromjson(self._type, body) + self.assertEqual(wsme_types.Unset, member.weight) + self.assertEqual(wsme_types.Unset, member.admin_state_up) + + def test_member_full(self): + name = "new_name" + weight = 1 + admin_state = True + body = {"name": name, "weight": weight, "admin_state_up": admin_state} + member = wsme_json.fromjson(self._type, body) + self.assertEqual(name, member.name) + self.assertEqual(weight, member.weight) + self.assertEqual(admin_state, member.admin_state_up) + + def test_invalid_admin_state(self): + body = {"admin_state_up": "test"} + self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) + + def test_invalid_weight(self): + body = {"weight": "test"} + self.assertRaises(ValueError, wsme_json.fromjson, self._type, body) + + def test_invalid_tags(self): + body = {"tags": "invalid_tag"} + self.assertRaises(ValueError, wsme_json.fromjson, self._type, + body) + body = {"tags": [1, 2]} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_min_weight(self): + body = {"weight": constants.MIN_WEIGHT - 1} + self.assertRaises( + exc.InvalidInput, wsme_json.fromjson, self._type, body) + body = {"weight": constants.MIN_WEIGHT} + member = wsme_json.fromjson(self._type, body) + self.assertEqual(constants.MIN_WEIGHT, member.weight) + + def test_max_weight(self): + body = {"weight": constants.MAX_WEIGHT + 1} + self.assertRaises( + exc.InvalidInput, wsme_json.fromjson, self._type, body) + body = {"weight": constants.MAX_WEIGHT} + member = wsme_json.fromjson(self._type, body) + self.assertEqual(constants.MAX_WEIGHT, member.weight) diff --git a/octavia/tests/unit/api/v2/types/test_pool.py b/octavia/tests/unit/api/v2/types/test_pool.py new file mode 100644 index 0000000000..1e4ed77dfe --- /dev/null +++ b/octavia/tests/unit/api/v2/types/test_pool.py @@ -0,0 +1,297 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_utils import uuidutils +from wsme import exc +from wsme.rest import json as wsme_json +from wsme import types as wsme_types + +from octavia.api.common import types +from octavia.api.v2.types import health_monitor as health_monitor_type +from octavia.api.v2.types import member as member_type +from octavia.api.v2.types import pool as pool_type +from octavia.common import constants +from octavia.common import data_models +from octavia.tests.unit.api.common import base + + +class TestSessionPersistence: + + _type = None + + def test_session_persistence(self): + body = {"type": constants.SESSION_PERSISTENCE_HTTP_COOKIE} + sp = wsme_json.fromjson(self._type, body) + self.assertIsNotNone(sp.type) + + def test_invalid_type(self): + body = {"type": "source_ip"} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_cookie_name(self): + body = {"type": constants.SESSION_PERSISTENCE_HTTP_COOKIE, + "cookie_name": 10} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + +class TestPoolPOST(base.BaseTypesTest): + + _type = pool_type.PoolPOST + + def test_pool(self): + body = { + "loadbalancer_id": uuidutils.generate_uuid(), + "listener_id": uuidutils.generate_uuid(), + "protocol": constants.PROTOCOL_HTTP, + "lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN, + "tags": ['test_tag']} + pool = wsme_json.fromjson(self._type, body) + self.assertTrue(pool.admin_state_up) + + def test_load_balancer_mandatory(self): + body = {"loadbalancer_id": uuidutils.generate_uuid()} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_protocol_mandatory(self): + body = {"lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_lb_algorithm_mandatory(self): + body = {"protocol": constants.PROTOCOL_HTTP} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_name(self): + body = {"name": 10, + "loadbalancer_id": uuidutils.generate_uuid(), + "protocol": constants.PROTOCOL_HTTP, + "lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_description(self): + body = {"description": 10, + "loadbalancer_id": uuidutils.generate_uuid(), + "protocol": constants.PROTOCOL_HTTP, + "lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_too_long_name(self): + body = {"name": "n" * 256, + "loadbalancer_id": uuidutils.generate_uuid(), + "protocol": constants.PROTOCOL_HTTP, + "lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_too_long_description(self): + body = {"description": "d" * 256, + "loadbalancer_id": uuidutils.generate_uuid(), + "protocol": constants.PROTOCOL_HTTP, + "lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_load_balacer_id(self): + body = {"loadbalancer_id": 10, + "protocol": constants.PROTOCOL_HTTP, + "lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_protocol(self): + body = {"loadbalancer_id": uuidutils.generate_uuid(), + "protocol": "http", + "lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_lb_algorithm(self): + body = {"loadbalancer_id": uuidutils.generate_uuid(), + "protocol": constants.PROTOCOL_HTTP, + "lb_algorithm": "source_ip"} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_non_uuid_project_id(self): + body = {"loadbalancer_id": uuidutils.generate_uuid(), + "protocol": constants.PROTOCOL_HTTP, + "lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN, + "project_id": "non-uuid"} + pool = wsme_json.fromjson(self._type, body) + self.assertEqual(pool.project_id, body['project_id']) + + def test_invalid_tags(self): + body = {"protocol": constants.PROTOCOL_HTTP, + "lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN, + "tags": "invalid_tag"} + self.assertRaises(ValueError, wsme_json.fromjson, self._type, + body) + body = {"protocol": constants.PROTOCOL_HTTP, + "lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN, + "tags": [1, 2]} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_alpn_protocols(self): + body = {"protocol": constants.PROTOCOL_HTTP, + "loadbalancer_id": uuidutils.generate_uuid(), + "alpn_protocols": ["bad", "boy"]} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + +class TestPoolPUT(base.BaseTypesTest): + + _type = pool_type.PoolPUT + + def test_pool(self): + body = {"name": "test_name", "tags": ['new_tag']} + pool = wsme_json.fromjson(self._type, body) + self.assertEqual(wsme_types.Unset, pool.admin_state_up) + + def test_invalid_name(self): + body = {"name": 10} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_too_long_name(self): + body = {"name": "n" * 256} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_too_long_description(self): + body = {"description": "d" * 256} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_description(self): + body = {"description": 10} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_lb_algorithm(self): + body = {"lb_algorithm": "source_ip"} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_tags(self): + body = {"tags": "invalid_tag"} + self.assertRaises(ValueError, wsme_json.fromjson, self._type, + body) + body = {"tags": [1, 2]} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_alpn_protocols(self): + body = {"alpn_protocols": ["bad", "boy"]} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + +class TestSessionPersistencePOST(base.BaseTypesTest, TestSessionPersistence): + + _type = pool_type.SessionPersistencePOST + + def test_type_mandatory(self): + body = {"cookie_name": "test_name"} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_app_cookie_name(self): + body = {"cookie_name": "cookie,monster"} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + +class TestSessionPersistencePUT(base.BaseTypesTest, TestSessionPersistence): + + _type = pool_type.SessionPersistencePUT + + def test_invalid_app_cookie_name(self): + body = {"cookie_name": "cookie\nmonster"} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + +class TestPoolResponse(base.BaseTypesTest): + + _type = pool_type.PoolResponse + + def test_pool_response_with_health_monitor(self): + health_monitor_id = uuidutils.generate_uuid() + health_monitor_model = data_models.HealthMonitor(id=health_monitor_id) + pool_model = data_models.Pool(health_monitor=health_monitor_model) + pool = self._type.from_data_model(data_model=pool_model) + self.assertEqual(pool.healthmonitor_id, health_monitor_id) + + def test_pool_response_with_members(self): + member_id = uuidutils.generate_uuid() + members = [data_models.Member(id=member_id)] + pool_model = data_models.Pool(members=members) + pool = self._type.from_data_model(data_model=pool_model) + self.assertIsInstance(pool.members[0], types.IdOnlyType) + self.assertEqual(pool.members[0].id, member_id) + + def test_pool_response_with_load_balancer(self): + load_balancer_id = uuidutils.generate_uuid() + load_balancer = data_models.LoadBalancer(id=load_balancer_id) + pool_model = data_models.Pool(load_balancer=load_balancer) + pool = self._type.from_data_model(data_model=pool_model) + self.assertIsInstance(pool.loadbalancers[0], types.IdOnlyType) + self.assertEqual(pool.loadbalancers[0].id, load_balancer_id) + + def test_pool_response_with_session_persistence(self): + session_persistence = data_models.SessionPersistence( + cookie_name="test" + ) + pool_model = data_models.Pool(session_persistence=session_persistence) + pool = self._type.from_data_model(data_model=pool_model) + self.assertEqual(pool.session_persistence.cookie_name, "test") + + def test_pool_response_without_children(self): + pool = self._type.from_data_model(data_model=data_models.Pool()) + self.assertEqual(len(pool.loadbalancers), 0) + self.assertIsNone(pool.session_persistence) + self.assertEqual(len(pool.members), 0) + self.assertEqual(len(pool.listeners), 0) + self.assertEqual(pool.healthmonitor_id, wsme_types.Unset) + + +class TestPoolFullResponse(base.BaseTypesTest): + + _type = pool_type.PoolFullResponse + + def test_pool_full_response_with_health_monitor(self): + health_monitor_model = data_models.HealthMonitor() + pool_model = data_models.Pool(health_monitor=health_monitor_model) + pool = self._type.from_data_model(data_model=pool_model) + self.assertIsInstance( + pool.healthmonitor, health_monitor_type.HealthMonitorFullResponse + ) + + def test_pool_full_response_with_members(self): + members = [data_models.Member()] + pool_model = data_models.Pool(members=members) + pool = self._type.from_data_model(data_model=pool_model) + self.assertIsInstance(pool.members[0], member_type.MemberFullResponse) + + def test_pool_full_response_without_children(self): + pool = self._type.from_data_model(data_model=data_models.Pool()) + self.assertIsNone(pool.healthmonitor) diff --git a/octavia/tests/unit/api/v2/types/test_quotas.py b/octavia/tests/unit/api/v2/types/test_quotas.py new file mode 100644 index 0000000000..bdacfc2c97 --- /dev/null +++ b/octavia/tests/unit/api/v2/types/test_quotas.py @@ -0,0 +1,87 @@ +# Copyright (c) 2018 China Telecom Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from wsme import exc +from wsme.rest import json as wsme_json +from wsme import types as wsme_types + +from octavia.api.v2.types import quotas as quota_type +from octavia.common import constants +from octavia.tests.unit.api.v2.types import base + + +class TestQuotaPut(base.BaseTypesTest): + + _type = quota_type.QuotaPUT + + def test_quota(self): + body = {'quota': {'loadbalancer': 5}} + quota = wsme_json.fromjson(self._type, body) + self.assertEqual(wsme_types.Unset, quota.quota.listener) + self.assertEqual(wsme_types.Unset, quota.quota.pool) + self.assertEqual(wsme_types.Unset, quota.quota.member) + self.assertEqual(wsme_types.Unset, quota.quota.healthmonitor) + self.assertEqual(wsme_types.Unset, quota.quota.l7policy) + self.assertEqual(wsme_types.Unset, quota.quota.l7rule) + + def test_invalid_quota(self): + body = {'quota': {'loadbalancer': constants.MAX_QUOTA + 1}} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, + self._type, body) + body = {'quota': {'loadbalancer': constants.MIN_QUOTA - 1}} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, + self._type, body) + + body = {'quota': {'listener': constants.MAX_QUOTA + 1}} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, + self._type, body) + body = {'quota': {'listener': constants.MIN_QUOTA - 1}} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, + self._type, body) + + body = {'quota': {'pool': constants.MAX_QUOTA + 1}} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, + self._type, body) + body = {'quota': {'pool': constants.MIN_QUOTA - 1}} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, + self._type, body) + + body = {'quota': {'member': constants.MAX_QUOTA + 1}} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, + self._type, body) + body = {'quota': {'member': constants.MIN_QUOTA - 1}} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, + self._type, body) + + body = {'quota': {'healthmonitor': constants.MAX_QUOTA + 1}} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, + self._type, body) + body = {'quota': {'healthmonitor': constants.MIN_QUOTA - 1}} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, + self._type, body) + + body = {'quota': {'l7policy': constants.MAX_QUOTA + 1}} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, + self._type, body) + body = {'quota': {'l7policy': constants.MIN_QUOTA - 1}} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, + self._type, body) + + body = {'quota': {'l7rule': constants.MAX_QUOTA + 1}} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, + self._type, body) + body = {'quota': {'l7rule': constants.MIN_QUOTA - 1}} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, + self._type, body) diff --git a/octavia/tests/unit/base.py b/octavia/tests/unit/base.py new file mode 100644 index 0000000000..5b0052190a --- /dev/null +++ b/octavia/tests/unit/base.py @@ -0,0 +1,72 @@ +# Copyright 2014, Doug Wiegley, A10 Networks. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +import fixtures +from oslo_config import cfg +import oslo_messaging as messaging +from oslo_messaging import conffixture as messaging_conffixture +import testtools + +from octavia.common import clients +from octavia.common import rpc + +# needed for tests to function when run independently: +from octavia.common import config # noqa: F401 + +from octavia.tests import fixtures as oc_fixtures + + +class TestCase(testtools.TestCase): + + def setUp(self): + super().setUp() + config.register_cli_opts() + self.addCleanup(mock.patch.stopall) + self.addCleanup(self.clean_caches) + self.addCleanup(cfg.CONF.reset) + + self.warning_fixture = self.useFixture(oc_fixtures.WarningsFixture()) + + def clean_caches(self): + clients.NovaAuth.nova_client = None + clients.NeutronAuth.neutron_client = None + + +class TestRpc(testtools.TestCase): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._buses = {} + + self.warning_fixture = self.useFixture(oc_fixtures.WarningsFixture()) + + def _fake_create_transport(self, url): + if url not in self._buses: + self._buses[url] = messaging.get_rpc_transport( + cfg.CONF, + url=url) + return self._buses[url] + + def setUp(self): + super().setUp() + self.addCleanup(rpc.cleanup) + self.messaging_conf = messaging_conffixture.ConfFixture(cfg.CONF) + self.messaging_conf.transport_url = 'fake:/' + self.useFixture(self.messaging_conf) + self.useFixture(fixtures.MonkeyPatch( + 'octavia.common.rpc.create_transport', + self._fake_create_transport)) + with mock.patch('octavia.common.rpc.get_transport_url') as mock_gtu: + mock_gtu.return_value = None + rpc.init() diff --git a/octavia/tests/unit/certificates/__init__.py b/octavia/tests/unit/certificates/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/certificates/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/certificates/common/__init__.py b/octavia/tests/unit/certificates/common/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/certificates/common/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/certificates/common/auth/__init__.py b/octavia/tests/unit/certificates/common/auth/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/certificates/common/auth/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/certificates/common/auth/test_barbican_acl.py b/octavia/tests/unit/certificates/common/auth/test_barbican_acl.py new file mode 100644 index 0000000000..ed279ca45f --- /dev/null +++ b/octavia/tests/unit/certificates/common/auth/test_barbican_acl.py @@ -0,0 +1,120 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture + +import octavia.certificates.common.auth.barbican_acl as barbican_acl +import octavia.certificates.manager.barbican as barbican_cert_mgr +from octavia.common import keystone +import octavia.tests.unit.base as base + +CONF = cfg.CONF + + +class TestBarbicanACLAuth(base.TestCase): + + def setUp(self): + super().setUp() + # Reset the client + keystone._SESSION = None + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + self.region_name = 'RegionOne' + self.endpoint_type = 'publicURL' + self.endpoint = 'barbican' + self.conf.config(group="certificates", region_name=self.region_name) + self.conf.config(group="certificates", + endpoint_type=self.endpoint_type) + self.conf.config(group="certificates", endpoint=self.endpoint) + + @mock.patch('barbicanclient.client.Client') + @mock.patch('keystoneauth1.session.Session') + def test_get_barbican_client(self, mock_ksession, mock_client): + session_mock = mock.Mock() + mock_ksession.return_value = session_mock + mock_client.return_value = mock.MagicMock() + + # Mock out the keystone session and get the client + acl_auth_object = barbican_acl.BarbicanACLAuth() + bc1 = acl_auth_object.get_barbican_client() + + mock_client.assert_called_once_with(session=session_mock, + region_name=self.region_name, + interface=self.endpoint_type) + + mock_client.reset_mock() + # Getting the session again with new class should get the same object + acl_auth_object2 = barbican_acl.BarbicanACLAuth() + bc2 = acl_auth_object2.get_barbican_client() + self.assertIs(bc1, bc2) + + mock_client.assert_not_called() + + def test_load_auth_driver(self): + bcm = barbican_cert_mgr.BarbicanCertManager() + self.assertIsInstance(bcm.auth, barbican_acl.BarbicanACLAuth) + + @mock.patch('barbicanclient.client.Client') + @mock.patch('octavia.common.keystone.KeystoneSession') + def test_ensure_secret_access(self, mock_ksession, mock_client): + service_user_id = 'uuid1' + client_mock = mock.MagicMock() + mock_client.return_value = client_mock + mock_ksession().get_service_user_id.return_value = service_user_id + + mock_acl = mock.MagicMock() + client_mock.acls.get.return_value = mock_acl + + mock_read = mock.MagicMock() + mock_read.users = [] + mock_acl.get.return_value = mock_read + + acl_auth_object = barbican_acl.BarbicanACLAuth() + acl_auth_object.ensure_secret_access(mock.Mock(), mock.Mock()) + mock_acl.submit.assert_called_once() + self.assertEqual([service_user_id], mock_read.users) + + @mock.patch('barbicanclient.client.Client') + @mock.patch('octavia.common.keystone.KeystoneSession') + def test_revoke_secret_access(self, mock_ksession, mock_client): + service_user_id = 'uuid1' + + client_mock = mock.MagicMock() + mock_client.return_value = client_mock + mock_ksession().get_service_user_id.return_value = service_user_id + + mock_acl = mock.MagicMock() + client_mock.acls.get.return_value = mock_acl + + mock_read = mock.MagicMock() + mock_read.users = [service_user_id] + mock_acl.get.return_value = mock_read + + acl_auth_object = barbican_acl.BarbicanACLAuth() + acl_auth_object.revoke_secret_access(mock.Mock(), mock.Mock()) + mock_acl.submit.assert_called_once() + + @mock.patch('octavia.common.keystone.KeystoneSession') + @mock.patch('barbicanclient.client.Client') + @mock.patch('keystoneauth1.session.Session') + def test_get_barbican_client_user_auth(self, mock_ksession, mock_client, + mock_keystone): + session_mock = mock.MagicMock() + mock_ksession.return_value = session_mock + acl_auth_object = barbican_acl.BarbicanACLAuth() + acl_auth_object.get_barbican_client_user_auth(mock.Mock()) + + mock_client.assert_called_once_with(session=session_mock, + endpoint=self.endpoint) diff --git a/octavia/tests/unit/certificates/common/test_barbican.py b/octavia/tests/unit/certificates/common/test_barbican.py new file mode 100644 index 0000000000..e41fb99b0d --- /dev/null +++ b/octavia/tests/unit/certificates/common/test_barbican.py @@ -0,0 +1,100 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from barbicanclient.v1 import containers +from barbicanclient.v1 import secrets + +import octavia.certificates.common.barbican as barbican_common +from octavia.common import utils as octavia_utils +import octavia.tests.common.sample_certs as sample +import octavia.tests.unit.base as base + + +class TestBarbicanCert(base.TestCase): + + def _prepare(self): + self.certificate_secret = secrets.Secret( + api=mock.MagicMock(), + payload=self.certificate + ) + self.intermediates_secret = secrets.Secret( + api=mock.MagicMock(), + payload=sample.X509_IMDS + ) + self.private_key_secret = secrets.Secret( + api=mock.MagicMock(), + payload=self.private_key + ) + self.private_key_passphrase_secret = secrets.Secret( + api=mock.MagicMock(), + payload=self.private_key_passphrase + ) + + def test_barbican_cert(self): + # Certificate data + self.certificate = bytes(sample.X509_CERT) + self.intermediates = sample.X509_IMDS_LIST + self.private_key = bytes(sample.X509_CERT_KEY_ENCRYPTED) + self.private_key_passphrase = sample.X509_CERT_KEY_PASSPHRASE + self._prepare() + + container = containers.CertificateContainer( + api=mock.MagicMock(), + certificate=self.certificate_secret, + intermediates=self.intermediates_secret, + private_key=self.private_key_secret, + private_key_passphrase=self.private_key_passphrase_secret + ) + # Create a cert + cert = barbican_common.BarbicanCert( + cert_container=container + ) + + # Validate the cert functions + self.assertEqual(cert.get_certificate(), sample.X509_CERT) + self.assertEqual(cert.get_intermediates(), sample.X509_IMDS_LIST) + self.assertEqual(cert.get_private_key(), + sample.X509_CERT_KEY_ENCRYPTED) + self.assertEqual(cert.get_private_key_passphrase(), + octavia_utils.b(sample.X509_CERT_KEY_PASSPHRASE)) + + def test_barbican_cert_text(self): + # Certificate data + self.certificate = str(sample.X509_CERT) + self.intermediates = str(sample.X509_IMDS_LIST) + self.private_key = str(sample.X509_CERT_KEY_ENCRYPTED) + self.private_key_passphrase = str(sample.X509_CERT_KEY_PASSPHRASE) + self._prepare() + + container = containers.CertificateContainer( + api=mock.MagicMock(), + certificate=self.certificate_secret, + intermediates=self.intermediates_secret, + private_key=self.private_key_secret, + private_key_passphrase=self.private_key_passphrase_secret + ) + # Create a cert + cert = barbican_common.BarbicanCert( + cert_container=container + ) + + # Validate the cert functions + self.assertEqual(cert.get_certificate(), + octavia_utils.b(str(sample.X509_CERT))) + self.assertEqual(cert.get_intermediates(), sample.X509_IMDS_LIST) + self.assertEqual(cert.get_private_key(), octavia_utils.b(str( + sample.X509_CERT_KEY_ENCRYPTED))) + self.assertEqual(cert.get_private_key_passphrase(), + octavia_utils.b(sample.X509_CERT_KEY_PASSPHRASE)) diff --git a/octavia/tests/unit/certificates/common/test_local.py b/octavia/tests/unit/certificates/common/test_local.py new file mode 100644 index 0000000000..8bb13bf2f3 --- /dev/null +++ b/octavia/tests/unit/certificates/common/test_local.py @@ -0,0 +1,42 @@ +# Copyright 2014 Rackspace US, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import octavia.certificates.common.local as local_cert +import octavia.tests.unit.base as base + + +class TestLocalCommon(base.TestCase): + + def setUp(self): + self.certificate = "My Certificate" + self.intermediates = "My Intermediates" + self.private_key = "My Private Key" + self.private_key_passphrase = "My Private Key Passphrase" + + super().setUp() + + def test_local_cert(self): + # Create a cert + cert = local_cert.LocalCert( + certificate=self.certificate, + intermediates=self.intermediates, + private_key=self.private_key, + private_key_passphrase=self.private_key_passphrase + ) + + # Validate the cert functions + self.assertEqual(self.certificate, cert.get_certificate()) + self.assertEqual(self.intermediates, cert.get_intermediates()) + self.assertEqual(self.private_key, cert.get_private_key()) + self.assertEqual(self.private_key_passphrase, + cert.get_private_key_passphrase()) diff --git a/octavia/tests/unit/certificates/generator/__init__.py b/octavia/tests/unit/certificates/generator/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/certificates/generator/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/certificates/generator/local_csr.py b/octavia/tests/unit/certificates/generator/local_csr.py new file mode 100644 index 0000000000..9f19d1e29b --- /dev/null +++ b/octavia/tests/unit/certificates/generator/local_csr.py @@ -0,0 +1,115 @@ +# Copyright 2015 Hewlett Packard Enterprise Development Company LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from cryptography.hazmat import backends +from cryptography.hazmat.primitives.asymmetric import rsa +from cryptography.hazmat.primitives import hashes +from cryptography.hazmat.primitives import serialization +from cryptography import x509 + +import octavia.tests.unit.base as base + + +class BaseLocalCSRTestCase(base.TestCase): + def setUp(self): + self.signing_digest = "sha256" + + # Set up CSR data + csr_key = rsa.generate_private_key( + public_exponent=65537, + key_size=2048, + backend=backends.default_backend() + ) + csr = x509.CertificateSigningRequestBuilder().subject_name( + x509.Name([ + x509.NameAttribute(x509.oid.NameOID.COMMON_NAME, "test"), + ])).sign(csr_key, hashes.SHA256(), backends.default_backend()) + self.certificate_signing_request = csr.public_bytes( + serialization.Encoding.PEM) + + # Set up keys + self.ca_key = rsa.generate_private_key( + public_exponent=65537, + key_size=2048, + backend=backends.default_backend() + ) + + self.ca_private_key_passphrase = b"Testing" + self.ca_private_key = self.ca_key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.TraditionalOpenSSL, + encryption_algorithm=serialization.BestAvailableEncryption( + self.ca_private_key_passphrase), + ) + + super().setUp() + + def test_generate_csr(self): + cn = 'testCN' + # Attempt to generate a CSR + csr = self.cert_generator._generate_csr( + cn=cn, + private_key=self.ca_private_key, + passphrase=self.ca_private_key_passphrase + ) + + # Attempt to load the generated CSR + csro = x509.load_pem_x509_csr(data=csr, + backend=backends.default_backend()) + + # Make sure the CN is correct + self.assertEqual(cn, csro.subject.get_attributes_for_oid( + x509.oid.NameOID.COMMON_NAME)[0].value) + + def test_generate_private_key(self): + bit_length = 1024 + # Attempt to generate a private key + pk = self.cert_generator._generate_private_key( + bit_length=bit_length + ) + + # Attempt to load the generated private key + pko = serialization.load_pem_private_key( + data=pk, password=None, backend=backends.default_backend()) + + # Make sure the bit_length is what we set + self.assertEqual(pko.key_size, bit_length) + + def test_generate_private_key_with_passphrase(self): + bit_length = 2048 + # Attempt to generate a private key + pk = self.cert_generator._generate_private_key( + bit_length=bit_length, + passphrase=self.ca_private_key_passphrase + ) + + # Attempt to load the generated private key + pko = serialization.load_pem_private_key( + data=pk, password=self.ca_private_key_passphrase, + backend=backends.default_backend()) + + # Make sure the bit_length is what we set + self.assertEqual(pko.key_size, bit_length) + + def test_generate_cert_key_pair_mock(self): + cn = 'testCN' + + with mock.patch.object(self.cert_generator, 'sign_cert') as m: + # Attempt to generate a cert/key pair + self.cert_generator.generate_cert_key_pair( + cn=cn, + validity=2 * 365 * 24 * 60 * 60, + ) + self.assertTrue(m.called) diff --git a/octavia/tests/unit/certificates/generator/test_local.py b/octavia/tests/unit/certificates/generator/test_local.py new file mode 100644 index 0000000000..e5dc37bec1 --- /dev/null +++ b/octavia/tests/unit/certificates/generator/test_local.py @@ -0,0 +1,184 @@ +# Copyright 2014 Rackspace US, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import datetime + +from cryptography import exceptions as crypto_exceptions +from cryptography.hazmat import backends +from cryptography.hazmat.primitives import hashes +from cryptography.hazmat.primitives import serialization +from cryptography import x509 +from oslo_utils import timeutils + +import octavia.certificates.generator.local as local_cert_gen +from octavia.tests.unit.certificates.generator import local_csr + + +class TestLocalGenerator(local_csr.BaseLocalCSRTestCase): + def setUp(self): + super().setUp() + self.signing_digest = "sha256" + + # Setup CA data + + ca_cert = x509.CertificateBuilder() + valid_from_datetime = timeutils.utcnow() + valid_until_datetime = (timeutils.utcnow() + + datetime.timedelta( + seconds=2 * 365 * 24 * 60 * 60)) + ca_cert = ca_cert.not_valid_before(valid_from_datetime) + ca_cert = ca_cert.not_valid_after(valid_until_datetime) + ca_cert = ca_cert.serial_number(1) + subject_name = x509.Name([ + x509.NameAttribute(x509.oid.NameOID.COUNTRY_NAME, "US"), + x509.NameAttribute(x509.oid.NameOID.STATE_OR_PROVINCE_NAME, + "Oregon"), + x509.NameAttribute(x509.oid.NameOID.LOCALITY_NAME, "Springfield"), + x509.NameAttribute(x509.oid.NameOID.ORGANIZATION_NAME, + "Springfield Nuclear Power Plant"), + x509.NameAttribute(x509.oid.NameOID.COMMON_NAME, "maggie1"), + ]) + ca_cert = ca_cert.subject_name(subject_name) + ca_cert = ca_cert.issuer_name(subject_name) + ca_cert = ca_cert.public_key(self.ca_key.public_key()) + signed_cert = ca_cert.sign(private_key=self.ca_key, + algorithm=hashes.SHA256(), + backend=backends.default_backend()) + + self.ca_certificate = signed_cert.public_bytes( + encoding=serialization.Encoding.PEM) + + self.cert_generator = local_cert_gen.LocalCertGenerator + + def test_sign_cert(self): + # Attempt sign a cert + signed_cert = self.cert_generator.sign_cert( + csr=self.certificate_signing_request, + validity=2 * 365 * 24 * 60 * 60, + ca_cert=self.ca_certificate, + ca_key=self.ca_private_key, + ca_key_pass=self.ca_private_key_passphrase, + ca_digest=self.signing_digest + ) + + self.assertIn("-----BEGIN CERTIFICATE-----", + signed_cert.decode('ascii')) + + # Load the cert for specific tests + cert = x509.load_pem_x509_certificate( + data=signed_cert, backend=backends.default_backend()) + + # Make sure expiry time is accurate + should_expire = (timeutils.utcnow() + + datetime.timedelta(seconds=2 * 365 * 24 * 60 * 60)) + diff = should_expire - cert.not_valid_after + self.assertLess(diff, datetime.timedelta(seconds=10)) + + # Make sure this is a version 3 X509. + self.assertEqual('v3', cert.version.name) + + # Make sure this cert is marked as Server and Client Cert via the + # extended Key Usage extension + self.assertIn(x509.oid.ExtendedKeyUsageOID.SERVER_AUTH, + cert.extensions.get_extension_for_class( + x509.ExtendedKeyUsage).value._usages) + self.assertIn(x509.oid.ExtendedKeyUsageOID.CLIENT_AUTH, + cert.extensions.get_extension_for_class( + x509.ExtendedKeyUsage).value._usages) + + # Make sure this cert can't sign other certs + self.assertFalse(cert.extensions.get_extension_for_class( + x509.BasicConstraints).value.ca) + + def test_sign_cert_passphrase_none(self): + # Attempt sign a cert + ca_private_key = self.ca_key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.TraditionalOpenSSL, + encryption_algorithm=serialization.NoEncryption() + ) + signed_cert = self.cert_generator.sign_cert( + csr=self.certificate_signing_request, + validity=2 * 365 * 24 * 60 * 60, + ca_cert=self.ca_certificate, + ca_key=ca_private_key, + ca_key_pass=None, + ca_digest=self.signing_digest + ) + + self.assertIn("-----BEGIN CERTIFICATE-----", + signed_cert.decode('ascii')) + + # Load the cert for specific tests + cert = x509.load_pem_x509_certificate( + data=signed_cert, backend=backends.default_backend()) + + # Make sure expiry time is accurate + should_expire = (timeutils.utcnow() + + datetime.timedelta(seconds=2 * 365 * 24 * 60 * 60)) + diff = should_expire - cert.not_valid_after + self.assertLess(diff, datetime.timedelta(seconds=10)) + + # Make sure this is a version 3 X509. + self.assertEqual('v3', cert.version.name) + + # Make sure this cert is marked as Server and Client Cert via the + # extended Key Usage extension + self.assertIn(x509.oid.ExtendedKeyUsageOID.SERVER_AUTH, + cert.extensions.get_extension_for_class( + x509.ExtendedKeyUsage).value._usages) + self.assertIn(x509.oid.ExtendedKeyUsageOID.CLIENT_AUTH, + cert.extensions.get_extension_for_class( + x509.ExtendedKeyUsage).value._usages) + + # Make sure this cert can't sign other certs + self.assertFalse(cert.extensions.get_extension_for_class( + x509.BasicConstraints).value.ca) + + def test_sign_cert_invalid_algorithm(self): + self.assertRaises( + crypto_exceptions.UnsupportedAlgorithm, + self.cert_generator.sign_cert, + csr=self.certificate_signing_request, + validity=2 * 365 * 24 * 60 * 60, + ca_cert=self.ca_certificate, + ca_key=self.ca_private_key, + ca_key_pass=self.ca_private_key_passphrase, + ca_digest='not_an_algorithm' + ) + + def test_generate_cert_key_pair(self): + cn = 'testCN' + bit_length = 1024 + + # Attempt to generate a cert/key pair + cert_object = self.cert_generator.generate_cert_key_pair( + cn=cn, + validity=2 * 365 * 24 * 60 * 60, + bit_length=bit_length, + passphrase=self.ca_private_key_passphrase, + ca_cert=self.ca_certificate, + ca_key=self.ca_private_key, + ca_key_pass=self.ca_private_key_passphrase + ) + + # Validate that the cert and key are loadable + cert = x509.load_pem_x509_certificate( + data=cert_object.certificate, backend=backends.default_backend()) + self.assertIsNotNone(cert) + + key = serialization.load_pem_private_key( + data=cert_object.private_key, + password=cert_object.private_key_passphrase, + backend=backends.default_backend()) + self.assertIsNotNone(key) diff --git a/octavia/tests/unit/certificates/manager/__init__.py b/octavia/tests/unit/certificates/manager/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/certificates/manager/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/certificates/manager/test_barbican.py b/octavia/tests/unit/certificates/manager/test_barbican.py new file mode 100644 index 0000000000..a2816e9766 --- /dev/null +++ b/octavia/tests/unit/certificates/manager/test_barbican.py @@ -0,0 +1,218 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock +import uuid + +from barbicanclient.v1 import secrets + +import octavia.certificates.common.barbican as barbican_common +import octavia.certificates.common.cert as cert +import octavia.certificates.manager.barbican as barbican_cert_mgr +from octavia.common import exceptions +import octavia.tests.common.sample_certs as sample +import octavia.tests.unit.base as base + + +PROJECT_ID = "12345" + + +class TestBarbicanManager(base.TestCase): + + def setUp(self): + # Make a fake Secret and contents + self.barbican_endpoint = 'http://localhost:9311/v1' + self.secret_uuid = uuid.uuid4() + + self.secret_ref = (f'{self.barbican_endpoint}/secrets/' + f'{self.secret_uuid}') + + self.name = 'My Fancy Cert' + self.secret_pkcs12 = secrets.Secret( + api=mock.MagicMock(), + payload=sample.PKCS12_BUNDLE + ) + + self.fake_secret = 'Fake secret' + self.secret = secrets.Secret(api=mock.MagicMock(), + payload=self.fake_secret) + self.empty_secret = mock.Mock(spec=secrets.Secret) + + # Mock out the client + self.bc = mock.Mock() + barbican_auth = mock.Mock(spec=barbican_common.BarbicanAuth) + barbican_auth.get_barbican_client.return_value = self.bc + + self.cert_manager = barbican_cert_mgr.BarbicanCertManager() + self.cert_manager.auth = barbican_auth + + self.context = mock.Mock() + self.context.project_id = PROJECT_ID + + super().setUp() + + def test_store_cert(self): + # Mock out the client + self.bc.secrets.create.return_value = ( + self.empty_secret) + + # Attempt to store a cert + secret_ref = self.cert_manager.store_cert( + context=self.context, + certificate=sample.X509_CERT, + private_key=sample.X509_CERT_KEY, + intermediates=sample.X509_IMDS, + name=self.name + ) + + self.assertEqual(secret_ref, self.empty_secret.secret_ref) + + # create_secret should be called once with our data + calls = [ + mock.call(payload=mock.ANY, expiration=None, + name=self.name) + ] + self.bc.secrets.create.assert_has_calls(calls) + + # Container should be stored once + self.empty_secret.store.assert_called_once_with() + + def test_store_cert_failure(self): + # Mock out the client + self.bc.secrets.create.return_value = ( + self.empty_secret) + + self.empty_secret.store.side_effect = ValueError() + + # Attempt to store a cert + self.assertRaises( + ValueError, + self.cert_manager.store_cert, + context=self.context, + certificate=sample.X509_CERT, + private_key=sample.X509_CERT_KEY, + intermediates=sample.X509_IMDS, + name=self.name + ) + + # create_certificate should be called once + self.assertEqual(1, self.bc.secrets.create.call_count) + + # Container should be stored once + self.empty_secret.store.assert_called_once_with() + + def test_get_cert(self): + # Mock out the client + self.bc.secrets.get.return_value = self.secret_pkcs12 + + # Get the secret data + data = self.cert_manager.get_cert( + context=self.context, + cert_ref=self.secret_ref, + resource_ref=self.secret_ref, + service_name='Octavia' + ) + + # 'get_secret' should be called once with the secret_ref + self.bc.secrets.get.assert_called_once_with( + secret_ref=self.secret_ref + ) + + # The returned data should be a Cert object with the correct values + self.assertIsInstance(data, cert.Cert) + self.assertEqual(sample.X509_CERT_KEY, data.get_private_key()) + self.assertEqual(sample.X509_CERT, data.get_certificate()) + self.assertEqual(sorted(sample.X509_IMDS_LIST), + sorted(data.get_intermediates())) + self.assertIsNone(data.get_private_key_passphrase()) + + @mock.patch('cryptography.hazmat.primitives.serialization.pkcs12.' + 'load_pkcs12') + def test_get_cert_bad_pkcs12(self, mock_load_pkcs12): + + mock_load_pkcs12.side_effect = [ValueError] + + # Mock out the client + self.bc.secrets.get.return_value = self.secret_pkcs12 + + # Test bad pkcs12 bundle re-raises UnreadablePKCS12 + self.assertRaises(exceptions.UnreadablePKCS12, + self.cert_manager.get_cert, + context=self.context, + cert_ref=self.secret_ref, + resource_ref=self.secret_ref, + service_name='Octavia') + + def test_delete_cert_legacy(self): + # Attempt to deregister as a consumer + self.cert_manager.delete_cert( + context=self.context, + cert_ref=self.secret_ref, + resource_ref=self.secret_ref, + service_name='Octavia' + ) + + # remove_consumer should be called once with the container_ref (legacy) + self.bc.containers.remove_consumer.assert_called_once_with( + container_ref=self.secret_ref, + url=self.secret_ref, + name='Octavia' + ) + + def test_set_acls(self): + # if used pkcs12 certificate containers.get raises exception + self.bc.containers.get.side_effect = Exception("container not found") + self.cert_manager.set_acls( + context=self.context, + cert_ref=self.secret_ref + ) + + # our mock_bc should have one call to ensure_secret_access + self.cert_manager.auth.ensure_secret_access.assert_called_once_with( + self.context, self.secret_ref + ) + + def test_unset_acls(self): + # if used pkcs12 certificate containers.get raises exception + self.bc.containers.get.side_effect = Exception("container not found") + self.cert_manager.unset_acls( + context=self.context, + cert_ref=self.secret_ref + ) + + # our mock_bc should have one call to revoke_secret_access + self.cert_manager.auth.revoke_secret_access.assert_called_once_with( + self.context, self.secret_ref + ) + + def test_get_secret(self): + # Mock out the client + self.bc.secrets.get.side_effect = [self.secret, Exception] + + # Get the secret data + data = self.cert_manager.get_secret( + context=self.context, + secret_ref=self.secret_ref, + ) + + # 'get_secret' should be called once with the secret_ref + self.bc.secrets.get.assert_called_once_with( + secret_ref=self.secret_ref + ) + + self.assertEqual(self.fake_secret, data) + + # Test with a failure + self.assertRaises(exceptions.CertificateRetrievalException, + self.cert_manager.get_secret, + context=self.context, secret_ref=self.secret_ref) diff --git a/octavia/tests/unit/certificates/manager/test_barbican_legacy.py b/octavia/tests/unit/certificates/manager/test_barbican_legacy.py new file mode 100644 index 0000000000..d0b1fb0d33 --- /dev/null +++ b/octavia/tests/unit/certificates/manager/test_barbican_legacy.py @@ -0,0 +1,305 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from barbicanclient.v1 import containers +from barbicanclient.v1 import secrets +from oslo_utils import uuidutils + +import octavia.certificates.common.barbican as barbican_common +import octavia.certificates.common.cert as cert +import octavia.certificates.manager.barbican_legacy as barbican_cert_mgr +from octavia.common import utils as octavia_utils +import octavia.tests.common.sample_certs as sample +import octavia.tests.unit.base as base + + +PROJECT_ID = "12345" + + +class TestBarbicanManager(base.TestCase): + + def setUp(self): + # Make a fake Container and contents + self.barbican_endpoint = 'http://localhost:9311/v1' + self.container_uuid = uuidutils.generate_uuid() + self.certificate_uuid = uuidutils.generate_uuid() + self.intermediates_uuid = uuidutils.generate_uuid() + self.private_key_uuid = uuidutils.generate_uuid() + self.private_key_passphrase_uuid = uuidutils.generate_uuid() + + self.container_ref = '{}/containers/{}'.format( + self.barbican_endpoint, self.container_uuid + ) + + self.barbican_api = mock.MagicMock() + + self.name = 'My Fancy Cert' + self.certificate = secrets.Secret( + api=self.barbican_api, + payload=sample.X509_CERT, + secret_ref=self.certificate_uuid + ) + self.intermediates = secrets.Secret( + api=self.barbican_api, + payload=sample.X509_IMDS, + secret_ref=self.intermediates_uuid + ) + self.private_key = secrets.Secret( + api=self.barbican_api, + payload=sample.X509_CERT_KEY_ENCRYPTED, + secret_ref=self.private_key_uuid + ) + self.private_key_passphrase = secrets.Secret( + api=self.barbican_api, + payload=sample.X509_CERT_KEY_PASSPHRASE, + secret_ref=self.private_key_passphrase_uuid + ) + + container = mock.Mock(spec=containers.CertificateContainer) + container.container_ref = self.container_ref + container.name = self.name + container.private_key = self.private_key + container.certificate = self.certificate + container.intermediates = self.intermediates + container.private_key_passphrase = self.private_key_passphrase + self.container = container + + self.empty_container = mock.Mock(spec=containers.CertificateContainer) + + self.secret1 = mock.Mock() + self.secret2 = mock.Mock() + self.secret3 = mock.Mock() + self.secret4 = mock.Mock() + + # Mock out the client + self.bc = mock.Mock() + self.bc.containers.get.return_value = self.container + barbican_auth = mock.Mock(spec=barbican_common.BarbicanAuth) + barbican_auth.get_barbican_client.return_value = self.bc + + self.cert_manager = barbican_cert_mgr.BarbicanCertManager() + self.cert_manager.auth = barbican_auth + + self.context = mock.Mock() + self.context.project_id = PROJECT_ID + + super().setUp() + + def test_store_cert(self): + # Mock out the client + self.bc.containers.create_certificate.return_value = ( + self.empty_container) + + # Attempt to store a cert + container_ref = self.cert_manager.store_cert( + context=self.context, + certificate=self.certificate, + private_key=self.private_key, + intermediates=self.intermediates, + private_key_passphrase=self.private_key_passphrase, + name=self.name + ) + + self.assertEqual(self.empty_container.container_ref, container_ref) + + # create_secret should be called four times with our data + calls = [ + mock.call(payload=self.certificate, expiration=None, + name=mock.ANY), + mock.call(payload=self.private_key, expiration=None, + name=mock.ANY), + mock.call(payload=self.intermediates, expiration=None, + name=mock.ANY), + mock.call(payload=self.private_key_passphrase, expiration=None, + name=mock.ANY) + ] + self.bc.secrets.create.assert_has_calls(calls, any_order=True) + + # create_certificate should be called once + self.assertEqual(1, self.bc.containers.create_certificate.call_count) + + # Container should be stored once + self.empty_container.store.assert_called_once_with() + + def test_store_cert_failure(self): + # Mock out the client + self.bc.containers.create_certificate.return_value = ( + self.empty_container) + test_secrets = [ + self.secret1, + self.secret2, + self.secret3, + self.secret4 + ] + self.bc.secrets.create.side_effect = test_secrets + self.empty_container.store.side_effect = ValueError() + + # Attempt to store a cert + self.assertRaises( + ValueError, + self.cert_manager.store_cert, + context=self.context, + certificate=self.certificate, + private_key=self.private_key, + intermediates=self.intermediates, + private_key_passphrase=self.private_key_passphrase, + name=self.name + ) + + # create_secret should be called four times with our data + calls = [ + mock.call(payload=self.certificate, expiration=None, + name=mock.ANY), + mock.call(payload=self.private_key, expiration=None, + name=mock.ANY), + mock.call(payload=self.intermediates, expiration=None, + name=mock.ANY), + mock.call(payload=self.private_key_passphrase, expiration=None, + name=mock.ANY) + ] + self.bc.secrets.create.assert_has_calls(calls, any_order=True) + + # create_certificate should be called once + self.assertEqual(1, self.bc.containers.create_certificate.call_count) + + # Container should be stored once + self.empty_container.store.assert_called_once_with() + + # All secrets should be deleted (or at least an attempt made) + for s in test_secrets: + s.delete.assert_called_once_with() + + def test_get_cert(self): + # Mock out the client + self.bc.containers.register_consumer.return_value = self.container + + # Get the container data + data = self.cert_manager.get_cert( + context=self.context, + cert_ref=self.container_ref, + resource_ref=self.container_ref, + service_name='Octavia' + ) + + # 'register_consumer' should be called once with the container_ref + self.bc.containers.register_consumer.assert_called_once_with( + container_ref=self.container_ref, + url=self.container_ref, + name='Octavia' + ) + + # The returned data should be a Cert object with the correct values + self.assertIsInstance(data, cert.Cert) + self.assertEqual(data.get_private_key(), + self.private_key.payload) + self.assertEqual(data.get_certificate(), + self.certificate.payload) + self.assertEqual(data.get_intermediates(), + sample.X509_IMDS_LIST) + self.assertEqual(data.get_private_key_passphrase(), + octavia_utils.b(self.private_key_passphrase.payload)) + + def test_get_cert_no_registration(self): + self.bc.containers.get.return_value = self.container + + # Get the container data + data = self.cert_manager.get_cert( + context=self.context, + cert_ref=self.container_ref, check_only=True + ) + + # 'get' should be called once with the container_ref + self.bc.containers.get.assert_called_once_with( + container_ref=self.container_ref + ) + + # The returned data should be a Cert object with the correct values + self.assertIsInstance(data, cert.Cert) + self.assertEqual(data.get_private_key(), + self.private_key.payload) + self.assertEqual(data.get_certificate(), + self.certificate.payload) + self.assertEqual(data.get_intermediates(), + sample.X509_IMDS_LIST) + self.assertEqual(data.get_private_key_passphrase(), + octavia_utils.b(self.private_key_passphrase.payload)) + + def test_get_cert_no_registration_raise_on_secret_access_failure(self): + self.bc.containers.get.return_value = self.container + with mock.patch('barbicanclient.v1.secrets.Secret.payload', + new_callable=mock.PropertyMock) as mock_payload: + mock_payload.side_effect = ValueError + + # Get the container data + self.assertRaises( + ValueError, self.cert_manager.get_cert, + context=self.context, + cert_ref=self.container_ref, check_only=True + ) + + # 'get' should be called once with the container_ref + self.bc.containers.get.assert_called_once_with( + container_ref=self.container_ref + ) + + def test_delete_cert(self): + # Attempt to deregister as a consumer + self.cert_manager.delete_cert( + context=self.context, + cert_ref=self.container_ref, + resource_ref=self.container_ref, + service_name='Octavia' + ) + + # remove_consumer should be called once with the container_ref + self.bc.containers.remove_consumer.assert_called_once_with( + container_ref=self.container_ref, + url=self.container_ref, + name='Octavia' + ) + + def test_set_acls(self): + self.cert_manager.set_acls( + context=self.context, + cert_ref=self.container_ref + ) + + # our mock_bc should have one call to ensure_secret_access for each + # of our secrets, and the container + self.cert_manager.auth.ensure_secret_access.assert_has_calls([ + mock.call(self.context, self.certificate_uuid), + mock.call(self.context, self.intermediates_uuid), + mock.call(self.context, self.private_key_uuid), + mock.call(self.context, self.private_key_passphrase_uuid) + ], any_order=True) + + def test_unset_acls(self): + self.cert_manager.unset_acls( + context=self.context, + cert_ref=self.container_ref + ) + + # our mock_bc should have one call to revoke_secret_access for each + # of our secrets, and the container + self.cert_manager.auth.revoke_secret_access.assert_has_calls([ + mock.call(self.context, self.certificate_uuid), + mock.call(self.context, self.intermediates_uuid), + mock.call(self.context, self.private_key_uuid), + mock.call(self.context, self.private_key_passphrase_uuid) + ], any_order=True) + + def test_get_secret(self): + self.assertIsNone(self.cert_manager.get_secret('fake context', + 'fake secret ref')) diff --git a/octavia/tests/unit/certificates/manager/test_castellan_mgr.py b/octavia/tests/unit/certificates/manager/test_castellan_mgr.py new file mode 100644 index 0000000000..5c97df0bb1 --- /dev/null +++ b/octavia/tests/unit/certificates/manager/test_castellan_mgr.py @@ -0,0 +1,49 @@ +# Copyright 2019 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from octavia.certificates.manager import castellan_mgr +from octavia.common import exceptions +import octavia.tests.unit.base as base + + +class TestCastellanCertManager(base.TestCase): + + def setUp(self): + + self.fake_secret = 'Fake secret' + self.manager = mock.MagicMock() + self.certbag = mock.MagicMock() + self.manager.get.return_value = self.certbag + + super().setUp() + + @mock.patch('castellan.key_manager.API') + def test_get_secret(self, mock_api): + mock_api.return_value = self.manager + + castellan_mgr_obj = castellan_mgr.CastellanCertManager() + self.certbag.get_encoded.side_effect = [self.fake_secret, + Exception('boom')] + + result = castellan_mgr_obj.get_secret('context', 'secret_ref') + + self.assertEqual(self.fake_secret, result) + self.manager.get.assert_called_once_with('context', 'secret_ref') + self.certbag.get_encoded.assert_called_once() + + self.assertRaises(exceptions.CertificateRetrievalException, + castellan_mgr_obj.get_secret, 'context', + 'secret_ref') diff --git a/octavia/tests/unit/certificates/manager/test_local.py b/octavia/tests/unit/certificates/manager/test_local.py new file mode 100644 index 0000000000..99aab5749a --- /dev/null +++ b/octavia/tests/unit/certificates/manager/test_local.py @@ -0,0 +1,162 @@ +# Copyright 2014 Rackspace US, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import stat +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils + +import octavia.certificates.common.cert as cert +import octavia.certificates.manager.local as local_cert_mgr +from octavia.common import exceptions +from octavia.tests.common import sample_certs +import octavia.tests.unit.base as base + + +class TestLocalManager(base.TestCase): + + def setUp(self): + self.certificate = sample_certs.X509_CERT.decode('utf-8') + self.intermediates = sample_certs.X509_IMDS.decode('utf-8') + self.private_key = sample_certs.X509_CERT_KEY.decode('utf-8') + self.private_key_passphrase = "My Private Key Passphrase" + + conf = oslo_fixture.Config(cfg.CONF) + conf.config(group="certificates", storage_path="/tmp/") + + super().setUp() + + def _store_cert(self): + fd_mock = mock.mock_open() + open_mock = mock.Mock() + # Attempt to store the cert + with mock.patch('os.open', open_mock), mock.patch.object( + os, 'fdopen', fd_mock): + cert_id = local_cert_mgr.LocalCertManager.store_cert( + context=None, + certificate=self.certificate, + intermediates=self.intermediates, + private_key=self.private_key, + private_key_passphrase=self.private_key_passphrase + ) + + # Check that something came back + self.assertIsNotNone(cert_id) + + # Verify the correct files were opened + flags = os.O_WRONLY | os.O_CREAT + mode = stat.S_IRUSR | stat.S_IWUSR # mode 0600 + open_mock.assert_has_calls([ + mock.call( + os.path.join(f'/tmp/{cert_id}.crt'), flags, mode), + mock.call( + os.path.join(f'/tmp/{cert_id}.key'), flags, mode), + mock.call( + os.path.join(f'/tmp/{cert_id}.int'), flags, mode), + mock.call( + os.path.join(f'/tmp/{cert_id}.pass'), flags, mode) + ], any_order=True) + + # Verify the writes were made + fd_mock().write.assert_has_calls([ + mock.call(self.certificate), + mock.call(self.intermediates), + mock.call(self.private_key), + mock.call(self.private_key_passphrase) + ], any_order=True) + + return cert_id + + def _get_cert(self, cert_id): + fd_mock = mock.mock_open() + fd_mock.side_effect = [ + mock.mock_open(read_data=self.certificate).return_value, + mock.mock_open(read_data=self.private_key).return_value, + mock.mock_open(read_data=self.intermediates).return_value, + mock.mock_open(read_data=self.private_key_passphrase).return_value + ] + open_mock = mock.Mock() + # Attempt to retrieve the cert + with mock.patch('os.open', open_mock), mock.patch.object( + os, 'fdopen', fd_mock): + data = local_cert_mgr.LocalCertManager.get_cert(None, cert_id) + + # Verify the correct files were opened + flags = os.O_RDONLY + open_mock.assert_has_calls([ + mock.call(os.path.join(f'/tmp/{cert_id}.crt'), flags), + mock.call(os.path.join(f'/tmp/{cert_id}.key'), flags), + mock.call(os.path.join(f'/tmp/{cert_id}.int'), flags), + mock.call(os.path.join(f'/tmp/{cert_id}.pass'), flags) + ], any_order=True) + + # The returned data should be a Cert object + self.assertIsInstance(data, cert.Cert) + + return data + + def _delete_cert(self, cert_id): + remove_mock = mock.Mock() + # Delete the cert + with mock.patch('os.remove', remove_mock): + local_cert_mgr.LocalCertManager.delete_cert(None, cert_id) + + # Verify the correct files were removed + remove_mock.assert_has_calls([ + mock.call(os.path.join(f'/tmp/{cert_id}.crt')), + mock.call(os.path.join(f'/tmp/{cert_id}.key')), + mock.call(os.path.join(f'/tmp/{cert_id}.int')), + mock.call(os.path.join(f'/tmp/{cert_id}.pass')) + ], any_order=True) + + def test_store_cert(self): + self._store_cert() + + def test_get_cert(self): + # Get the cert + self._get_cert("cert1") + + def test_delete_cert(self): + # Store a cert + cert_id = self._store_cert() + + # Verify the cert exists + self._get_cert(cert_id) + + # Delete the cert + self._delete_cert(cert_id) + + def test_get_secret(self): + fd_mock = mock.mock_open() + open_mock = mock.Mock() + secret_id = uuidutils.generate_uuid() + # Attempt to retrieve the secret + with mock.patch('os.open', open_mock), mock.patch.object( + os, 'fdopen', fd_mock): + local_cert_mgr.LocalCertManager.get_secret(None, secret_id) + + # Verify the correct files were opened + flags = os.O_RDONLY + open_mock.assert_called_once_with(f'/tmp/{secret_id}.crt', flags) + + # Test failure path + with mock.patch('os.open', open_mock), mock.patch.object( + os, 'fdopen', fd_mock) as mock_open: + mock_open.side_effect = IOError + self.assertRaises(exceptions.CertificateRetrievalException, + local_cert_mgr.LocalCertManager.get_secret, + None, secret_id) diff --git a/octavia/tests/unit/certificates/manager/test_noop.py b/octavia/tests/unit/certificates/manager/test_noop.py new file mode 100644 index 0000000000..29f77c0d38 --- /dev/null +++ b/octavia/tests/unit/certificates/manager/test_noop.py @@ -0,0 +1,53 @@ +# Copyright 2023 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_utils import uuidutils + +from octavia.certificates.common import cert +from octavia.certificates.manager import noop as noop_cert_mgr +from octavia.tests.common import sample_certs +import octavia.tests.unit.base as base + + +class TestNoopManager(base.TestCase): + + def setUp(self): + super().setUp() + self.manager = noop_cert_mgr.NoopCertManager() + + def test_store_cert(self): + certificate = self.manager.store_cert( + None, + sample_certs.X509_CERT, + sample_certs.X509_CERT_KEY_ENCRYPTED, + sample_certs.X509_IMDS, + private_key_passphrase=sample_certs.X509_CERT_KEY_PASSPHRASE) + self.assertIsNotNone(certificate) + self.assertIsInstance(certificate, cert.Cert) + + def test_get_cert(self): + cert_ref = uuidutils.generate_uuid() + certificate = self.manager.get_cert( + context=None, + cert_ref=cert_ref) + self.assertIsNotNone(certificate) + self.assertIsInstance(certificate, cert.Cert) + + def test_get_secret(self): + secret_ref = uuidutils.generate_uuid() + secret = self.manager.get_secret( + context=None, + secret_ref=secret_ref) + self.assertIsNotNone(secret) + self.assertIsInstance(secret, cert.Cert) diff --git a/octavia/tests/unit/cmd/__init__.py b/octavia/tests/unit/cmd/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/cmd/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/cmd/test_agent.py b/octavia/tests/unit/cmd/test_agent.py new file mode 100644 index 0000000000..9656733137 --- /dev/null +++ b/octavia/tests/unit/cmd/test_agent.py @@ -0,0 +1,46 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import ssl +from unittest import mock + +from octavia.cmd import agent +from octavia.tests.unit import base + + +class TestAmphoraAgentCMD(base.TestCase): + + def setUp(self): + super().setUp() + + @mock.patch('octavia.cmd.agent.AmphoraAgent') + @mock.patch('octavia.amphorae.backends.agent.api_server.server.Server') + @mock.patch('multiprocessing.Process') + @mock.patch('octavia.common.service.prepare_service') + def test_main(self, mock_service, mock_process, mock_server, mock_amp): + mock_health_proc = mock.MagicMock() + mock_server_instance = mock.MagicMock() + mock_amp_instance = mock.MagicMock() + + mock_process.return_value = mock_health_proc + mock_server.return_value = mock_server_instance + mock_amp.return_value = mock_amp_instance + + agent.main() + + # Ensure gunicorn is initialized with the correct cert_reqs option. + # This option is what enforces use of a valid client certificate. + self.assertEqual( + ssl.CERT_REQUIRED, + mock_amp.call_args[0][1]['cert_reqs']) + + mock_health_proc.start.assert_called_once_with() + mock_amp_instance.run.assert_called_once() diff --git a/octavia/tests/unit/cmd/test_driver_agent.py b/octavia/tests/unit/cmd/test_driver_agent.py new file mode 100644 index 0000000000..01ab8cffeb --- /dev/null +++ b/octavia/tests/unit/cmd/test_driver_agent.py @@ -0,0 +1,222 @@ +# Copyright 2018 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import signal +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture + +import octavia.api.drivers.driver_agent.driver_listener +from octavia.cmd import driver_agent +from octavia.tests.unit import base + +CONF = cfg.CONF + + +class TestDriverAgentCMD(base.TestCase): + + def setUp(self): + super().setUp() + self.CONF = self.useFixture(oslo_fixture.Config(cfg.CONF)) + + @mock.patch('os.kill') + @mock.patch('octavia.cmd.driver_agent.CONF') + def test_handle_mutate_config(self, mock_conf, mock_os_kill): + driver_agent._handle_mutate_config(1, 2) + mock_conf.mutate_config_files.assert_called_once() + os_calls = [mock.call(1, signal.SIGHUP), mock.call(2, signal.SIGHUP)] + mock_os_kill.assert_has_calls(os_calls, any_order=True) + + def test_check_if_provider_agent_enabled(self): + mock_extension = mock.MagicMock() + self.CONF.config(group="driver_agent", + enabled_provider_agents=[ + 'spiffy_agent', 'super_agent']) + mock_extension.name = 'super_agent' + self.assertTrue( + driver_agent._check_if_provider_agent_enabled(mock_extension)) + mock_extension.name = 'bogus_agent' + self.assertFalse( + driver_agent._check_if_provider_agent_enabled(mock_extension)) + + @mock.patch('setproctitle.setproctitle') + @mock.patch('signal.signal') + def test_process_wrapper(self, mock_signal, mock_setproctitle): + mock_exit_event = mock.MagicMock() + mock_function = mock.MagicMock() + mock_function.side_effect = [ + mock.DEFAULT, Exception('boom'), mock.DEFAULT, Exception('boom'), + mock.DEFAULT] + mock_exit_event.is_set.side_effect = [False, False, True, + False, False, True] + + signal_calls = [mock.call(signal.SIGINT, signal.SIG_IGN), + mock.call(signal.SIGHUP, driver_agent._mutate_config)] + # With agent_name + driver_agent._process_wrapper( + mock_exit_event, 'test_proc_name', mock_function, + agent_name='test_agent_name') + mock_signal.assert_has_calls(signal_calls) + mock_setproctitle.assert_called_once_with( + 'octavia-driver-agent - test_proc_name -- test_agent_name') + mock_function.assert_called_once_with(mock_exit_event) + + # With agent_name - With function exception + mock_signal.reset_mock() + mock_setproctitle.reset_mock() + mock_function.reset_mock() + driver_agent._process_wrapper( + mock_exit_event, 'test_proc_name', mock_function, + agent_name='test_agent_name') + mock_signal.assert_has_calls(signal_calls) + mock_setproctitle.assert_called_once_with( + 'octavia-driver-agent - test_proc_name -- test_agent_name') + mock_function.assert_called_once_with(mock_exit_event) + + # Without agent_name + mock_signal.reset_mock() + mock_setproctitle.reset_mock() + mock_function.reset_mock() + driver_agent._process_wrapper( + mock_exit_event, 'test_proc_name', mock_function) + mock_signal.assert_has_calls(signal_calls) + mock_setproctitle.assert_called_once_with( + 'octavia-driver-agent - test_proc_name') + mock_function.assert_called_once_with(mock_exit_event) + + # Without agent_name - With function exception + mock_signal.reset_mock() + mock_setproctitle.reset_mock() + mock_function.reset_mock() + driver_agent._process_wrapper( + mock_exit_event, 'test_proc_name', mock_function) + mock_signal.assert_has_calls(signal_calls) + mock_setproctitle.assert_called_once_with( + 'octavia-driver-agent - test_proc_name') + mock_function.assert_called_once_with(mock_exit_event) + + @mock.patch('octavia.cmd.driver_agent.multiprocessing') + @mock.patch('stevedore.enabled.EnabledExtensionManager') + def test_start_provider_agents(self, mock_stevedore, mock_multiprocessing): + mock_extension = mock.MagicMock() + mock_extension.name = 'test_extension' + mock_exit_event = mock.MagicMock() + mock_stevedore.return_value = [mock_extension] + mock_ext_proc = mock.MagicMock() + mock_multiprocessing.Process.return_value = mock_ext_proc + + driver_agent._start_provider_agents(mock_exit_event) + + mock_stevedore.assert_called_once_with( + namespace='octavia.driver_agent.provider_agents', + check_func=driver_agent._check_if_provider_agent_enabled) + mock_multiprocessing.Process.assert_called_once_with( + name='test_extension', target=driver_agent._process_wrapper, + args=(mock_exit_event, 'provider_agent', mock_extension.plugin), + kwargs={'agent_name': 'test_extension'}) + mock_ext_proc.start.assert_called_once_with() + + @mock.patch('os.kill') + @mock.patch('octavia.cmd.driver_agent.multiprocessing') + @mock.patch('oslo_reports.guru_meditation_report.TextGuruMeditation.' + 'setup_autorun') + @mock.patch('octavia.common.service.prepare_service') + def test_main(self, mock_prep_srvc, mock_gmr, mock_multiprocessing, + mock_kill): + mock_exit_event = mock.MagicMock() + mock_multiprocessing.Event.return_value = mock_exit_event + mock_status_listener_proc = mock.MagicMock() + mock_stats_listener_proc = mock.MagicMock() + mock_get_listener_proc = mock.MagicMock() + mock_multiprocessing.Process.side_effect = [ + mock_status_listener_proc, mock_stats_listener_proc, + mock_get_listener_proc, + mock_status_listener_proc, mock_stats_listener_proc, + mock_get_listener_proc, + mock_status_listener_proc, mock_stats_listener_proc, + mock_get_listener_proc, + mock_status_listener_proc, mock_stats_listener_proc, + mock_get_listener_proc, + mock_status_listener_proc, mock_stats_listener_proc, + mock_get_listener_proc] + driver_agent.main() + mock_prep_srvc.assert_called_once() + mock_gmr.assert_called_once() + mock_status_listener_proc.start.assert_called_once() + mock_stats_listener_proc.start.assert_called_once() + mock_get_listener_proc.start.assert_called_once() + process_calls = [mock.call( + args=(mock_exit_event, + 'status_listener', + octavia.api.drivers.driver_agent.driver_listener. + status_listener), + name='status_listener', + target=driver_agent._process_wrapper), + mock.call( + args=(mock_exit_event, + 'stats_listener', + octavia.api.drivers.driver_agent.driver_listener. + stats_listener), + name='stats_listener', + target=driver_agent._process_wrapper), + mock.call( + args=(mock_exit_event, + 'get_listener', + octavia.api.drivers.driver_agent.driver_listener. + get_listener), + name='get_listener', + target=driver_agent._process_wrapper)] + mock_multiprocessing.Process.assert_has_calls(process_calls) + + # Test keyboard interrupt path + mock_stats_listener_proc.join.side_effect = [KeyboardInterrupt, None] + driver_agent.main() + mock_exit_event.set.assert_called_once() + + # Test keyboard interrupt with provider agents + mock_exit_event.reset_mock() + mock_stats_listener_proc.join.side_effect = [KeyboardInterrupt, None] + mock_provider_proc = mock.MagicMock() + mock_provider_proc.pid = 'not-valid-pid' + mock_provider_proc.exitcode = 1 + driver_agent.PROVIDER_AGENT_PROCESSES = [mock_provider_proc] + driver_agent.main() + mock_exit_event.set.assert_called_once() + mock_provider_proc.join.assert_called_once_with( + CONF.driver_agent.provider_agent_shutdown_timeout) + + # Test keyboard interrupt with provider agents fails to stop + mock_exit_event.reset_mock() + mock_stats_listener_proc.join.side_effect = [KeyboardInterrupt, None] + mock_provider_proc = mock.MagicMock() + mock_provider_proc.pid = 'not-valid-pid' + mock_provider_proc.exitcode = None + driver_agent.PROVIDER_AGENT_PROCESSES = [mock_provider_proc] + driver_agent.main() + mock_exit_event.set.assert_called_once() + mock_provider_proc.join.assert_called_once_with( + CONF.driver_agent.provider_agent_shutdown_timeout) + mock_kill.assert_called_once_with('not-valid-pid', signal.SIGKILL) + + # Test keyboard interrupt with provider agents join exception + mock_exit_event.reset_mock() + mock_stats_listener_proc.join.side_effect = [KeyboardInterrupt, None] + mock_provider_proc = mock.MagicMock() + mock_provider_proc.pid = 'not-valid-pid' + mock_provider_proc.join.side_effect = Exception('boom') + driver_agent.PROVIDER_AGENT_PROCESSES = [mock_provider_proc] + driver_agent.main() + mock_exit_event.set.assert_called_once() + mock_provider_proc.join.assert_called_once_with( + CONF.driver_agent.provider_agent_shutdown_timeout) diff --git a/octavia/tests/unit/cmd/test_haproxy_vrrp_check.py b/octavia/tests/unit/cmd/test_haproxy_vrrp_check.py new file mode 100644 index 0000000000..a785b846e6 --- /dev/null +++ b/octavia/tests/unit/cmd/test_haproxy_vrrp_check.py @@ -0,0 +1,44 @@ +# Copyright 2015 Hewlett Packard Enterprise Development Company LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from octavia.cmd import haproxy_vrrp_check +from octavia.tests.unit import base + + +class TestHAproxyVRRPCheckCMD(base.TestCase): + + def setUp(self): + super().setUp() + + @mock.patch('socket.socket') + def test_health_check(self, mock_socket): + socket_mock = mock.MagicMock() + mock_socket.return_value = socket_mock + recv_mock = mock.MagicMock() + recv_mock.side_effect = [b'1', Exception('BREAK')] + socket_mock.recv = recv_mock + + self.assertRaisesRegex(Exception, 'BREAK', + haproxy_vrrp_check.health_check, + '10.0.0.1') + + @mock.patch('octavia.cmd.haproxy_vrrp_check.health_check') + @mock.patch('sys.argv') + @mock.patch('sys.exit') + def test_main(self, mock_exit, mock_argv, mock_health_check): + mock_health_check.side_effect = [1, Exception('FAIL')] + haproxy_vrrp_check.main() + mock_exit.assert_called_once_with(1) diff --git a/octavia/tests/unit/cmd/test_health_checker.py b/octavia/tests/unit/cmd/test_health_checker.py new file mode 100644 index 0000000000..befb6864b2 --- /dev/null +++ b/octavia/tests/unit/cmd/test_health_checker.py @@ -0,0 +1,328 @@ +# Copyright 2020 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import socket +import struct +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture + +from octavia.cmd import health_checker +from octavia.tests.common import utils as test_utils +from octavia.tests.unit import base + +CONF = cfg.CONF + + +class TestHealthCheckerCMD(base.TestCase): + + def setUp(self): + super().setUp() + self.CONF = self.useFixture(oslo_fixture.Config(cfg.CONF)) + + def test_crc32c(self): + data = b'STRING1234' + + result = health_checker.crc32c(data) + + self.assertEqual(result, 0x30e0e107) + + @mock.patch('random.randint', return_value=42424242) + def test__sctp_build_init_packet(self, mock_randint): + expected_packet = bytearray( + b'\x04\xd2\x16.\x00\x00\x00\x00\x1d9\x96\r\x01\x00\x00\x14:\xde' + b'h\xb1\x00\x01\xa0\x00\x00\n\xff\xff\x02\x87W\xb2') + + src_port = 1234 + dest_port = 5678 + tag = 987654321 + + pkt = health_checker._sctp_build_init_packet( + src_port, dest_port, tag) + + self.assertEqual(pkt, expected_packet) + + decoded_src_port = struct.unpack_from('!H', pkt, 0)[0] + decoded_dest_port = struct.unpack_from('!H', pkt, 2)[0] + + self.assertEqual(src_port, decoded_src_port) + self.assertEqual(dest_port, decoded_dest_port) + + decoded_tag = struct.unpack_from('!L', pkt, 16)[0] + + self.assertEqual(tag, decoded_tag) + + decoded_checksum = struct.unpack_from('!L', pkt, 8)[0] + + # Reset and re-compute checksum + pkt[8] = pkt[9] = pkt[10] = pkt[11] = 0 + checksum = health_checker.crc32c(pkt) + + self.assertEqual(checksum, decoded_checksum) + + def test__sctp_build_abort_packet(self): + expected_packet = bytearray( + b'\x04\xd2\x16.\x02\x93wM3\x83\xbbN\x06\x01\x00\x04') + + src_port = 1234 + dest_port = 5678 + verification_tag = 43218765 + + pkt = health_checker._sctp_build_abort_packet( + src_port, dest_port, verification_tag) + + self.assertEqual(pkt, expected_packet) + + decoded_src_port = struct.unpack_from('!H', pkt, 0)[0] + decoded_dest_port = struct.unpack_from('!H', pkt, 2)[0] + + self.assertEqual(src_port, decoded_src_port) + self.assertEqual(dest_port, decoded_dest_port) + + decoded_tag = struct.unpack_from('!L', pkt, 4)[0] + + self.assertEqual(verification_tag, decoded_tag) + + decoded_checksum = struct.unpack_from('!L', pkt, 8)[0] + + # Reset and re-compute checksum + pkt[8] = pkt[9] = pkt[10] = pkt[11] = 0 + checksum = health_checker.crc32c(pkt) + + self.assertEqual(checksum, decoded_checksum) + + def test__sctp_decode_packet(self): + # IPv4 INIT ACK packet + data = (b'\x45\x00\x00\x00\x00\x01\x01\x01' + b'\x00\x00\xff\x06\x7f\x00\x00\x00' + b'\x7f\x00\x00\x02\x16.\x04\xd2' + b'\x02\x93\x77\x4d\x00\x00\x00\x32' + b'\x02\x00\x00\x16') + + family = socket.AF_INET + expected_tag = 43218765 + + ret = health_checker._sctp_decode_packet(data, family, expected_tag) + + self.assertEqual(ret, 2) # INIT ACK + + # IPv6 ABORT packet + data = (b'\x16.\x04\xd2\x02\x93\x77\x4d\x00\x00\x00\x32' + b'\x06\x00\x00\x16') + + family = socket.AF_INET6 + expected_tag = 43218765 + + ret = health_checker._sctp_decode_packet(data, family, expected_tag) + + self.assertEqual(ret, 6) # ABORT + + def test__sctp_decode_packet_too_short(self): + # IPv4 packet with different verification tag + data = (b'\x45\x00\x00\x00\x00\x01') + + family = socket.AF_INET + expected_tag = 43218765 + + ret = health_checker._sctp_decode_packet(data, family, expected_tag) + self.assertFalse(ret) + + def test__sctp_decode_packet_unexpected(self): + # IPv4 packet with different verification tag + data = (b'\x45\x00\x00\x00\x00\x01\x01\x01' + b'\x00\x00\xff\x06\x7f\x00\x00\x00' + b'\x7f\x00\x00\x02\x16.\x04\xd2' + b'\x02\x91\x17\x4d\x00\x00\x00\x32' + b'\x02\x00\x00\x16') + + family = socket.AF_INET + expected_tag = 43218765 + + ret = health_checker._sctp_decode_packet(data, family, expected_tag) + self.assertFalse(ret) + + @mock.patch("time.time") + @mock.patch("socket.socket") + @mock.patch("octavia.cmd.health_checker._sctp_decode_packet") + @mock.patch("octavia.cmd.health_checker._sctp_build_abort_packet") + def test_sctp_health_check(self, mock_build_abort_packet, + mock_decode_packet, mock_socket, + mock_time): + mock_time.side_effect = [1, 2, 3, 4] + socket_mock = mock.Mock() + socket_mock.recvfrom = mock.Mock() + socket_mock.recvfrom.side_effect = [ + socket.timeout(), + (None, None) + ] + mock_socket.return_value = socket_mock + + mock_decode_packet.return_value = 2 # INIT ACK + + abrt_mock = mock.Mock() + mock_build_abort_packet.return_value = abrt_mock + + mock_open = self.useFixture( + test_utils.OpenFixture('/proc/net/protocols', + 'bar\n')).mock_open + + with mock.patch('builtins.open', mock_open): + ret = health_checker.sctp_health_check( + "192.168.0.27", 1234, timeout=3) + + self.assertEqual(0, ret) # Success + + mock_decode_packet.assert_called() + socket_mock.send.assert_called_with(abrt_mock) + + @mock.patch("time.time") + @mock.patch("socket.socket") + @mock.patch("octavia.cmd.health_checker._sctp_decode_packet") + @mock.patch("octavia.cmd.health_checker._sctp_build_abort_packet") + def test_sctp_health_check_with_sctp_support(self, + mock_build_abort_packet, + mock_decode_packet, + mock_socket, + mock_time): + mock_time.side_effect = [1, 2, 3, 4] + socket_mock = mock.Mock() + socket_mock.recvfrom = mock.Mock() + socket_mock.recvfrom.side_effect = [ + socket.timeout(), + (None, None) + ] + mock_socket.return_value = socket_mock + + mock_decode_packet.return_value = 2 # INIT ACK + + abrt_mock = mock.Mock() + mock_build_abort_packet.return_value = abrt_mock + + mock_open = self.useFixture( + test_utils.OpenFixture('/proc/net/protocols', + 'SCTP\n')).mock_open + + with mock.patch('builtins.open', mock_open): + ret = health_checker.sctp_health_check( + "192.168.0.27", 1234, timeout=3) + + self.assertEqual(0, ret) # Success + + mock_decode_packet.assert_called() + for call in socket_mock.send.mock_calls: + self.assertNotEqual(mock.call(abrt_mock), call) + + @mock.patch("time.time") + @mock.patch("socket.socket") + @mock.patch("octavia.cmd.health_checker._sctp_decode_packet") + @mock.patch("octavia.cmd.health_checker._sctp_build_abort_packet") + def test_sctp_health_check_fail(self, mock_build_abort_packet, + mock_decode_packet, mock_socket, + mock_time): + mock_time.side_effect = [1, 2, 3, 4] + socket_mock = mock.Mock() + socket_mock.recvfrom = mock.Mock() + socket_mock.recvfrom.side_effect = [ + socket.timeout(), + (None, None) + ] + mock_socket.return_value = socket_mock + + mock_decode_packet.return_value = 6 # ABRT + + abrt_mock = mock.Mock() + mock_build_abort_packet.return_value = abrt_mock + + mock_open = self.useFixture( + test_utils.OpenFixture('/proc/net/protocols', + 'bar\n')).mock_open + + with mock.patch('builtins.open', mock_open): + ret = health_checker.sctp_health_check( + "192.168.0.27", 1234, timeout=3) + + self.assertEqual(1, ret) # Error + + mock_decode_packet.assert_called() + for call in socket_mock.send.mock_calls: + self.assertNotEqual(mock.call(abrt_mock), call) + + @mock.patch("time.time") + @mock.patch("socket.socket") + @mock.patch("octavia.cmd.health_checker._sctp_decode_packet") + @mock.patch("octavia.cmd.health_checker._sctp_build_abort_packet") + def test_sctp_health_check_error(self, mock_build_abort_packet, + mock_decode_packet, mock_socket, + mock_time): + mock_time.side_effect = [1, 2, 3, 4] + socket_mock = mock.Mock() + socket_mock.recvfrom = mock.Mock() + socket_mock.recvfrom.side_effect = [ + socket.timeout(), + (None, None) + ] + mock_socket.return_value = socket_mock + + mock_decode_packet.return_value = 1234 # Unknown + + abrt_mock = mock.Mock() + mock_build_abort_packet.return_value = abrt_mock + + mock_open = self.useFixture( + test_utils.OpenFixture('/proc/net/protocols', + 'bar\n')).mock_open + + with mock.patch('builtins.open', mock_open): + ret = health_checker.sctp_health_check( + "192.168.0.27", 1234, timeout=3) + + self.assertEqual(3, ret) # Unknown error + + mock_decode_packet.assert_called() + socket_mock.send.assert_called_with(abrt_mock) + + @mock.patch("time.time") + @mock.patch("socket.socket") + @mock.patch("octavia.cmd.health_checker._sctp_decode_packet") + @mock.patch("octavia.cmd.health_checker._sctp_build_abort_packet") + def test_sctp_health_check_timeout(self, mock_build_abort_packet, + mock_decode_packet, mock_socket, + mock_time): + mock_time.side_effect = [1, 2, 3, 4] + socket_mock = mock.Mock() + socket_mock.recvfrom = mock.Mock() + socket_mock.recvfrom.side_effect = [ + socket.timeout(), + socket.timeout(), + socket.timeout(), + socket.timeout(), + ] + mock_socket.return_value = socket_mock + + abrt_mock = mock.Mock() + mock_build_abort_packet.return_value = abrt_mock + + mock_open = self.useFixture( + test_utils.OpenFixture('/proc/net/protocols', + 'bar\n')).mock_open + + with mock.patch('builtins.open', mock_open): + ret = health_checker.sctp_health_check( + "192.168.0.27", 1234, timeout=3) + + self.assertEqual(2, ret) # Timeout + + mock_decode_packet.assert_not_called() + for call in socket_mock.send.mock_calls: + self.assertNotEqual(mock.call(abrt_mock), call) diff --git a/octavia/tests/unit/cmd/test_health_manager.py b/octavia/tests/unit/cmd/test_health_manager.py new file mode 100644 index 0000000000..c6a38a61d2 --- /dev/null +++ b/octavia/tests/unit/cmd/test_health_manager.py @@ -0,0 +1,103 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import signal +from unittest import mock + +from octavia.cmd import health_manager +from octavia.tests.unit import base + + +class TestHealthManagerCMD(base.TestCase): + + def setUp(self): + super().setUp() + + @mock.patch('multiprocessing.Event') + @mock.patch('octavia.amphorae.drivers.health.' + 'heartbeat_udp.UDPStatusGetter') + def test_hm_listener(self, mock_getter, + mock_event): + mock_event.is_set.side_effect = [False, False, True] + getter_mock = mock.MagicMock() + check_mock = mock.MagicMock() + getter_mock.check = check_mock + getter_mock.check.side_effect = [None, Exception('break')] + mock_getter.return_value = getter_mock + health_manager.hm_listener(mock_event) + mock_getter.assert_called_once() + self.assertEqual(2, getter_mock.check.call_count) + + @mock.patch('multiprocessing.Event') + @mock.patch('futurist.periodics.PeriodicWorker.start') + @mock.patch('futurist.periodics.PeriodicWorker.__init__') + @mock.patch('signal.signal') + @mock.patch('octavia.controller.healthmanager.' + 'health_manager.HealthManager') + def test_hm_health_check(self, mock_health, mock_signal, mock_worker, + mock_start, mock_event): + mock_event.is_set.side_effect = [False, True] + hm_mock = mock.MagicMock() + mock_worker.return_value = None + health_check_mock = mock.MagicMock() + hm_mock.health_check = health_check_mock + mock_health.return_value = hm_mock + health_manager.hm_health_check(mock_event) + mock_health.assert_called_once_with(mock_event) + + @mock.patch('multiprocessing.Process') + @mock.patch('octavia.common.service.prepare_service') + def test_main(self, mock_service, mock_process): + mock_listener_proc = mock.MagicMock() + mock_health_proc = mock.MagicMock() + + mock_process.side_effect = [mock_listener_proc, mock_health_proc] + + health_manager.main() + + mock_listener_proc.start.assert_called_once_with() + mock_health_proc.start.assert_called_once_with() + mock_listener_proc.join.assert_called_once_with() + mock_health_proc.join.assert_called_once_with() + + @mock.patch('os.kill') + @mock.patch('multiprocessing.Process') + @mock.patch('octavia.common.service.prepare_service') + def test_main_keyboard_interrupt(self, mock_service, mock_process, + mock_kill): + mock_listener_proc = mock.MagicMock() + mock_health_proc = mock.MagicMock() + mock_join = mock.MagicMock() + mock_join.side_effect = [KeyboardInterrupt, None] + mock_listener_proc.join = mock_join + + mock_process.side_effect = [mock_listener_proc, mock_health_proc] + + health_manager.main() + + mock_listener_proc.start.assert_called_once_with() + mock_health_proc.start.assert_called_once_with() + self.assertEqual(2, mock_listener_proc.join.call_count) + mock_health_proc.join.assert_called_once_with() + mock_kill.assert_called_once_with(mock_health_proc.pid, + signal.SIGINT) + + @mock.patch('os.kill') + @mock.patch('oslo_config.cfg.CONF.mutate_config_files') + def test_handle_mutate_config(self, mock_mutate, mock_kill): + health_manager._handle_mutate_config(1, 2) + + mock_mutate.assert_called_once() + + calls = [mock.call(1, signal.SIGHUP), mock.call(2, signal.SIGHUP)] + mock_kill.assert_has_calls(calls) diff --git a/octavia/tests/unit/cmd/test_house_keeping.py b/octavia/tests/unit/cmd/test_house_keeping.py new file mode 100644 index 0000000000..d0c64e9eea --- /dev/null +++ b/octavia/tests/unit/cmd/test_house_keeping.py @@ -0,0 +1,154 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from octavia.cmd import house_keeping +from octavia.tests.unit import base + + +class TestHouseKeepingCMD(base.TestCase): + def setUp(self): + super().setUp() + + @mock.patch('octavia.cmd.house_keeping.db_cleanup_thread_event') + @mock.patch('octavia.controller.housekeeping.' + 'house_keeping.DatabaseCleanup') + def test_db_cleanup(self, mock_DatabaseCleanup, + db_cleanup_event_mock): + db_cleanup = mock.MagicMock() + delete_old_amphorae = mock.MagicMock() + db_cleanup.delete_old_amphorae = delete_old_amphorae + mock_DatabaseCleanup.return_value = db_cleanup + + # mock db_cleanup_thread_event.is_set() in the while loop + db_cleanup_event_mock.is_set = mock.MagicMock() + db_cleanup_event_mock.is_set.side_effect = [False, Exception('break')] + + self.assertRaisesRegex(Exception, 'break', house_keeping.db_cleanup) + + mock_DatabaseCleanup.assert_called_once_with() + self.assertEqual(1, db_cleanup.delete_old_amphorae.call_count) + + @mock.patch('octavia.cmd.house_keeping.cert_rotate_thread_event') + @mock.patch('octavia.controller.housekeeping.' + 'house_keeping.CertRotation') + def test_hk_cert_rotation_with_exception(self, mock_CertRotation, + cert_rotate_event_mock): + # mock cert_rotate object + cert_rotate_mock = mock.MagicMock() + # mock rotate() + rotate_mock = mock.MagicMock() + + cert_rotate_mock.rotate = rotate_mock + + mock_CertRotation.return_value = cert_rotate_mock + + # mock cert_rotate_thread_event.is_set() in the while loop + cert_rotate_event_mock.is_set = mock.MagicMock() + cert_rotate_event_mock.is_set.side_effect = [False, Exception('break')] + + self.assertRaisesRegex(Exception, 'break', + house_keeping.cert_rotation) + + mock_CertRotation.assert_called_once_with() + self.assertEqual(1, cert_rotate_mock.rotate.call_count) + + @mock.patch('octavia.cmd.house_keeping.cert_rotate_thread_event') + @mock.patch('octavia.controller.housekeeping.' + 'house_keeping.CertRotation') + def test_hk_cert_rotation_without_exception(self, mock_CertRotation, + cert_rotate_event_mock): + # mock cert_rotate object + cert_rotate_mock = mock.MagicMock() + # mock rotate() + rotate_mock = mock.MagicMock() + + cert_rotate_mock.rotate = rotate_mock + + mock_CertRotation.return_value = cert_rotate_mock + + # mock cert_rotate_thread_event.is_set() in the while loop + cert_rotate_event_mock.is_set = mock.MagicMock() + cert_rotate_event_mock.is_set.side_effect = [False, True] + + self.assertIsNone(house_keeping.cert_rotation()) + + mock_CertRotation.assert_called_once_with() + self.assertEqual(1, cert_rotate_mock.rotate.call_count) + + @mock.patch('octavia.cmd.house_keeping.cert_rotate_thread_event') + @mock.patch('octavia.cmd.house_keeping.db_cleanup_thread_event') + @mock.patch('threading.Thread') + @mock.patch('octavia.common.service.prepare_service') + def test_main(self, mock_service, mock_thread, + db_cleanup_thread_event_mock, + cert_rotate_thread_event_mock): + + db_cleanup_thread_mock = mock.MagicMock() + cert_rotate_thread_mock = mock.MagicMock() + + mock_thread.side_effect = [db_cleanup_thread_mock, + cert_rotate_thread_mock] + + db_cleanup_thread_mock.daemon.return_value = True + cert_rotate_thread_mock.daemon.return_value = True + + house_keeping.main() + + db_cleanup_thread_mock.start.assert_called_once_with() + cert_rotate_thread_mock.start.assert_called_once_with() + + self.assertTrue(db_cleanup_thread_mock.daemon) + self.assertTrue(cert_rotate_thread_mock.daemon) + + @mock.patch('octavia.cmd.house_keeping.cert_rotate_thread_event') + @mock.patch('octavia.cmd.house_keeping.db_cleanup_thread_event') + @mock.patch('threading.Thread') + @mock.patch('octavia.common.service.prepare_service') + def test_main_keyboard_interrupt(self, mock_service, mock_thread, + db_cleanup_thread_event_mock, + cert_rotate_thread_event_mock): + db_cleanup_thread_mock = mock.MagicMock() + cert_rotate_thread_mock = mock.MagicMock() + + mock_thread.side_effect = [db_cleanup_thread_mock, + cert_rotate_thread_mock] + + db_cleanup_thread_mock.daemon.return_value = True + cert_rotate_thread_mock.daemon.return_value = True + + mock_join = mock.MagicMock() + mock_join.side_effect = [KeyboardInterrupt, None] + db_cleanup_thread_mock.join = mock_join + + house_keeping.main() + + db_cleanup_thread_event_mock.set.assert_called_once_with() + + cert_rotate_thread_event_mock.set.assert_called_once_with() + + db_cleanup_thread_mock.start.assert_called_once_with() + cert_rotate_thread_mock.start.assert_called_once_with() + + self.assertTrue(db_cleanup_thread_mock.daemon) + self.assertTrue(cert_rotate_thread_mock.daemon) + self.assertEqual(2, db_cleanup_thread_mock.join.call_count) + cert_rotate_thread_mock.join.assert_called_once_with() + + @mock.patch('oslo_config.cfg.CONF.mutate_config_files') + def test_mutate_config(self, mock_mutate): + house_keeping._mutate_config() + + mock_mutate.assert_called_once() diff --git a/octavia/tests/unit/cmd/test_interface.py b/octavia/tests/unit/cmd/test_interface.py new file mode 100644 index 0000000000..da35c20dac --- /dev/null +++ b/octavia/tests/unit/cmd/test_interface.py @@ -0,0 +1,137 @@ +# Copyright 2021 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from octavia.amphorae.backends.utils import interface_file +from octavia.cmd import interface +from octavia.tests.unit import base + + +class TestInterfaceCMD(base.TestCase): + + def setUp(self): + super().setUp() + + self.interface1 = interface_file.InterfaceFile("eth1", if_type="type1") + self.interface2 = interface_file.InterfaceFile("eth2", if_type="type2") + + def test_interfaces_find(self): + controller = mock.Mock() + controller.list = mock.Mock() + controller.list.return_value = { + "eth1": self.interface1, + "eth2": self.interface2 + } + + ret = interface.interfaces_find(controller, "eth2") + self.assertCountEqual([self.interface2], ret) + controller.list.assert_called_once() + + def test_interfaces_find_all(self): + controller = mock.Mock() + controller.list = mock.Mock() + controller.list.return_value = { + "eth1": self.interface1, + "eth2": self.interface2 + } + + ret = interface.interfaces_find(controller, "all") + self.assertCountEqual([self.interface1, self.interface2], ret) + controller.list.assert_called_once() + + def test_interfaces_find_all_empty(self): + controller = mock.Mock() + controller.list = mock.Mock() + controller.list.return_value = {} + + ret = interface.interfaces_find(controller, "all") + self.assertEqual(0, len(ret)) + controller.list.assert_called_once() + + def test_interfaces_find_not_found(self): + controller = mock.Mock() + controller.list = mock.Mock() + controller.list.return_value = { + "eth1": self.interface1, + "eth2": self.interface2 + } + + self.assertRaisesRegex( + interface.InterfaceException, + "Could not find interface 'eth3'.", + interface.interfaces_find, + controller, "eth3") + controller.list.assert_called_once() + + def test_interfaces_update(self): + action_fn = mock.Mock() + action_str = mock.Mock() + interfaces = [self.interface1, self.interface2] + + interface.interfaces_update(interfaces, action_fn, action_str) + self.assertEqual(2, len(action_fn.mock_calls)) + action_fn.assert_called_with(self.interface2) + + def test_interfaces_update_with_errors(self): + action_fn = mock.Mock() + action_str = mock.Mock() + interfaces = [self.interface1, self.interface2] + action_fn.side_effect = [None, Exception("error msg")] + + self.assertRaisesRegex( + interface.InterfaceException, + "Could not configure interface:.*eth2.*error msg", + interface.interfaces_update, + interfaces, action_fn, action_str) + self.assertEqual(2, len(action_fn.mock_calls)) + + @mock.patch("octavia.amphorae.backends.utils.interface." + "InterfaceController") + @mock.patch("octavia.cmd.interface.interfaces_find") + @mock.patch("octavia.cmd.interface.interfaces_update") + def test_interface_cmd(self, mock_interfaces_update, + mock_interfaces_find, mock_controller): + controller = mock.Mock() + controller.up = mock.Mock() + controller.down = mock.Mock() + mock_controller.return_value = controller + mock_interfaces_find.return_value = [self.interface1] + + interface.interface_cmd("eth1", "up") + + mock_interfaces_find.assert_called_once_with( + controller, "eth1") + mock_interfaces_update.assert_called_once_with( + [self.interface1], mock_controller.return_value.up, "up") + + mock_interfaces_find.reset_mock() + mock_interfaces_update.reset_mock() + + mock_interfaces_find.return_value = [self.interface2] + + interface.interface_cmd("eth2", "down") + + mock_interfaces_find.assert_called_once_with( + controller, "eth2") + mock_interfaces_update.assert_called_once_with( + [self.interface2], mock_controller.return_value.down, "down") + + @mock.patch("octavia.amphorae.backends.utils.interface." + "InterfaceController") + def test_interface_cmd_invalid_action(self, mock_controller): + self.assertRaisesRegex( + interface.InterfaceException, + "Unknown action.*invalidaction", + interface.interface_cmd, + "eth1", "invalidaction") diff --git a/octavia/tests/unit/cmd/test_prometheus_proxy.py b/octavia/tests/unit/cmd/test_prometheus_proxy.py new file mode 100644 index 0000000000..1b38758525 --- /dev/null +++ b/octavia/tests/unit/cmd/test_prometheus_proxy.py @@ -0,0 +1,173 @@ +# Copyright 2022 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import signal +from unittest import mock + +from octavia.cmd import prometheus_proxy +from octavia.tests.unit import base + + +class TestPrometheusProxyCMD(base.TestCase): + + @mock.patch('http.server.SimpleHTTPRequestHandler.log_request') + @mock.patch('http.server.SimpleHTTPRequestHandler.__init__') + def test_log_request(self, mock_req_handler_init, mock_log_request): + mock_req_handler_init.return_value = None + proxy = prometheus_proxy.PrometheusProxy() + proxy.log_request() + mock_log_request.assert_not_called() + + @mock.patch('os.cpu_count', return_value=2) + @mock.patch('psutil.getloadavg', return_value=(1, 2, 3)) + @mock.patch('http.server.SimpleHTTPRequestHandler.__init__') + def test_add_cpu_utilization(self, mock_req_handler_init, mock_getloadavg, + mock_cpu_count): + mock_req_handler_init.return_value = None + proxy = prometheus_proxy.PrometheusProxy() + test_buffer = "TestStringBuffer\n" + result = proxy._add_cpu_utilization(test_buffer) + + expected_result = ( + "TestStringBuffer\n" + "# HELP octavia_loadbalancer_cpu Load balancer CPU utilization " + "(percentage).\n" + "# TYPE octavia_loadbalancer_cpu gauge\n" + "octavia_loadbalancer_cpu 50.0\n") + + self.assertEqual(expected_result, result) + + @mock.patch('psutil.virtual_memory', return_value=(1, 2, 23.5)) + @mock.patch('http.server.SimpleHTTPRequestHandler.__init__') + def test__add_memory_utilization(self, mock_req_handler_init, + mock_virt_mem): + mock_req_handler_init.return_value = None + proxy = prometheus_proxy.PrometheusProxy() + test_buffer = "TestStringMemoryBuffer\n" + result = proxy._add_memory_utilization(test_buffer) + + expected_result = ( + "TestStringMemoryBuffer\n" + "# HELP octavia_loadbalancer_memory Load balancer memory " + "utilization (percentage).\n" + "# TYPE octavia_loadbalancer_memory gauge\n" + "octavia_loadbalancer_memory 23.5\n") + + self.assertEqual(expected_result, result) + + @mock.patch('octavia.cmd.prometheus_proxy.PRINT_REJECTED', True) + # No need to print all of the rejected lines to the log + @mock.patch('builtins.print') + @mock.patch('urllib.request.urlopen') + @mock.patch('os.cpu_count', return_value=2) + @mock.patch('psutil.getloadavg', return_value=(1, 2, 3)) + @mock.patch('psutil.virtual_memory', return_value=(1, 2, 23.5)) + @mock.patch('http.server.SimpleHTTPRequestHandler.__init__') + def test_do_get(self, mock_req_handler_init, mock_virt_mem, + mock_getloadavg, mock_cpu_count, mock_urlopen, mock_print): + mock_req_handler_init.return_value = None + proxy = prometheus_proxy.PrometheusProxy() + + mock_send_response = mock.MagicMock() + proxy.send_response = mock_send_response + mock_send_header = mock.MagicMock() + proxy.send_header = mock_send_header + mock_end_headers = mock.MagicMock() + proxy.end_headers = mock_end_headers + mock_wfile = mock.MagicMock() + proxy.wfile = mock_wfile + + with open("octavia/tests/common/sample_haproxy_prometheus", + "rb") as file: + mock_urlopen.return_value = file + + proxy.do_GET() + + mock_send_response.assert_called_once_with(200) + + with open("octavia/tests/common/sample_octavia_prometheus", + "rb") as file2: + octavia_metrics = file2.read() + mock_wfile.write.assert_called_once_with(octavia_metrics) + + @mock.patch('urllib.request.urlopen') + @mock.patch('os.cpu_count', return_value=2) + @mock.patch('psutil.getloadavg', return_value=(1, 2, 3)) + @mock.patch('psutil.virtual_memory', return_value=(1, 2, 23.5)) + @mock.patch('http.server.SimpleHTTPRequestHandler.__init__') + def test_do_get_exception(self, mock_req_handler_init, mock_virt_mem, + mock_getloadavg, mock_cpu_count, mock_urlopen): + mock_urlopen.side_effect = [Exception('boom')] + mock_req_handler_init.return_value = None + proxy = prometheus_proxy.PrometheusProxy() + + mock_send_response = mock.MagicMock() + proxy.send_response = mock_send_response + mock_send_header = mock.MagicMock() + proxy.send_header = mock_send_header + mock_end_headers = mock.MagicMock() + proxy.end_headers = mock_end_headers + + proxy.do_GET() + + mock_send_response.assert_called_once_with(502) + + @mock.patch('signal.signal') + def test_signalhandler(self, mock_signal): + + sig_handler = prometheus_proxy.SignalHandler() + + calls = [mock.call(signal.SIGINT, sig_handler.shutdown), + mock.call(signal.SIGTERM, sig_handler.shutdown)] + mock_signal.assert_has_calls(calls) + + self.assertFalse(prometheus_proxy.EXIT_EVENT.is_set()) + sig_handler.shutdown() + self.assertTrue(prometheus_proxy.EXIT_EVENT.is_set()) + + @mock.patch('octavia.cmd.prometheus_proxy.EXIT_EVENT') + @mock.patch('signal.signal') + def test_shutdown_thread(self, mock_signal, mock_exit_event): + + mock_http = mock.MagicMock() + + prometheus_proxy.shutdown_thread(mock_http) + + mock_exit_event.wait.assert_called_once() + mock_http.shutdown.assert_called_once() + + @mock.patch('threading.Thread') + @mock.patch('http.server.ThreadingHTTPServer.__init__') + @mock.patch('http.server.ThreadingHTTPServer.serve_forever') + @mock.patch('octavia.amphorae.backends.utils.network_namespace.' + 'NetworkNamespace.__exit__') + @mock.patch('octavia.amphorae.backends.utils.network_namespace.' + 'NetworkNamespace.__enter__') + @mock.patch('octavia.cmd.prometheus_proxy.EXIT_EVENT') + @mock.patch('octavia.cmd.prometheus_proxy.SignalHandler') + def test_main(self, mock_signal_handler, mock_exit_event, mock_netns_enter, + mock_netns_exit, mock_serve_forever, mock_server_init, + mock_thread): + + mock_exit_event.is_set.side_effect = [False, False, True] + mock_netns_enter.side_effect = [Exception('boom'), True] + + mock_server_init.return_value = None + + prometheus_proxy.main() + + mock_signal_handler.assert_called_once() + mock_server_init.assert_called_once_with( + ('127.0.0.1', 9102), + prometheus_proxy.PrometheusProxy) + mock_serve_forever.assert_called_once() diff --git a/octavia/tests/unit/cmd/test_status.py b/octavia/tests/unit/cmd/test_status.py new file mode 100644 index 0000000000..644cbb12be --- /dev/null +++ b/octavia/tests/unit/cmd/test_status.py @@ -0,0 +1,126 @@ +# Copyright (c) 2018 NEC, Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_upgradecheck.upgradecheck import Code + +from octavia.cmd import status +from octavia.common import constants +from octavia.tests.unit import base + + +class TestUpgradeChecks(base.TestCase): + + def setUp(self): + super().setUp() + self.cmd = status.Checks() + + def test__check_amphorav2_not_enabled(self): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + self.conf.config(group='api_settings', + default_provider_driver='other_provider', + enabled_provider_drivers={'other_provider': "Test"}) + check_result = self.cmd._check_amphorav2() + self.assertEqual( + Code.SUCCESS, check_result.code) + + def test__check_persistence_sqlite(self): + check_result = self.cmd._check_persistence() + self.assertEqual( + Code.WARNING, check_result.code) + + @mock.patch('octavia.controller.worker.v2.taskflow_jobboard_driver.' + 'MysqlPersistenceDriver') + def test__check_persistence_error(self, mysql_driver): + mysql_driver().get_persistence.side_effect = Exception + check_result = self.cmd._check_persistence() + self.assertEqual( + Code.FAILURE, check_result.code) + + @mock.patch('octavia.controller.worker.v2.taskflow_jobboard_driver.' + 'MysqlPersistenceDriver') + def test__check_persistence(self, mysql_driver): + pers_mock = mock.MagicMock() + mysql_driver().get_persistence().__enter__.return_value = pers_mock + check_result = self.cmd._check_persistence() + self.assertEqual(pers_mock, check_result) + + @mock.patch('octavia.controller.worker.v2.taskflow_jobboard_driver.' + 'RedisTaskFlowDriver') + def test__check_jobboard_error(self, redis_driver): + pers_mock = mock.MagicMock() + redis_driver().job_board.side_effect = Exception + check_result = self.cmd._check_jobboard(pers_mock) + self.assertEqual(Code.FAILURE, check_result.code) + + @mock.patch('octavia.controller.worker.v2.taskflow_jobboard_driver.' + 'RedisTaskFlowDriver') + def test__check_jobboard_not_connected(self, redis_driver): + jb_connected = mock.Mock(connected=False) + redis_driver().job_board().__enter__.return_value = jb_connected + check_result = self.cmd._check_jobboard(mock.MagicMock()) + self.assertEqual(Code.FAILURE, check_result.code) + + @mock.patch('octavia.controller.worker.v2.taskflow_jobboard_driver.' + 'RedisTaskFlowDriver') + def test__check_jobboard(self, redis_driver): + jb_connected = mock.Mock(connected=True) + redis_driver().job_board().__enter__.return_value = jb_connected + check_result = self.cmd._check_jobboard(mock.MagicMock()) + self.assertEqual(Code.SUCCESS, check_result.code) + + @mock.patch('octavia.controller.worker.v2.taskflow_jobboard_driver.' + 'RedisTaskFlowDriver') + @mock.patch('octavia.controller.worker.v2.taskflow_jobboard_driver.' + 'MysqlPersistenceDriver') + def test__check_amphorav2_success(self, mysql_driver, redis_driver): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + self.conf.config(group='api_settings', + default_provider_driver=constants.AMPHORA, + enabled_provider_drivers={constants.AMPHORAV2: + "Test"}) + jb_connected = mock.Mock(connected=True) + redis_driver().job_board().__enter__.return_value = jb_connected + check_result = self.cmd._check_amphorav2() + self.assertEqual( + Code.SUCCESS, check_result.code) + + @mock.patch('octavia.controller.worker.v2.taskflow_jobboard_driver.' + 'RedisTaskFlowDriver') + def test__check_amphorav2_warning(self, redis_driver): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + self.conf.config(group='api_settings', + default_provider_driver=constants.AMPHORA, + enabled_provider_drivers={constants.AMPHORAV2: + "Test"}) + check_result = self.cmd._check_amphorav2() + self.assertEqual( + Code.WARNING, check_result.code) + + @mock.patch('octavia.controller.worker.v2.taskflow_jobboard_driver.' + 'RedisTaskFlowDriver') + @mock.patch('octavia.controller.worker.v2.taskflow_jobboard_driver.' + 'MysqlPersistenceDriver') + def test__check_amphorav2_failure(self, mysql_driver, redis_driver): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + self.conf.config(group='api_settings', + default_provider_driver=constants.AMPHORAV2, + enabled_provider_drivers={constants.AMPHORA: "Test"}) + jb_connected = mock.Mock(connected=False) + redis_driver().job_board().__enter__.return_value = jb_connected + check_result = self.cmd._check_amphorav2() + self.assertEqual( + Code.FAILURE, check_result.code) diff --git a/octavia/tests/unit/common/__init__.py b/octavia/tests/unit/common/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/common/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/common/jinja/__init__.py b/octavia/tests/unit/common/jinja/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/common/jinja/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/common/jinja/haproxy/__init__.py b/octavia/tests/unit/common/jinja/haproxy/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/common/jinja/haproxy/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/common/jinja/haproxy/combined_listeners/__init__.py b/octavia/tests/unit/common/jinja/haproxy/combined_listeners/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/octavia/tests/unit/common/jinja/haproxy/combined_listeners/test_jinja_cfg.py b/octavia/tests/unit/common/jinja/haproxy/combined_listeners/test_jinja_cfg.py new file mode 100644 index 0000000000..d9259ee031 --- /dev/null +++ b/octavia/tests/unit/common/jinja/haproxy/combined_listeners/test_jinja_cfg.py @@ -0,0 +1,2088 @@ +# Copyright 2014 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import os +from unittest import mock + +from octavia_lib.common import constants as lib_consts +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture + +from octavia.common import constants +from octavia.common.jinja.haproxy.combined_listeners import jinja_cfg +from octavia.tests.unit import base +from octavia.tests.unit.common.sample_configs import sample_configs_combined + +CONF = cfg.CONF + + +class TestHaproxyCfg(base.TestCase): + def setUp(self): + super().setUp() + self.jinja_cfg = jinja_cfg.JinjaTemplater( + base_amp_path='/var/lib/octavia', + base_crt_dir='/var/lib/octavia/certs') + + def test_get_template(self): + template = self.jinja_cfg._get_template() + self.assertEqual('haproxy.cfg.j2', template.name) + + def test_render_template_tls(self): + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(group="haproxy_amphora", base_cert_dir='/fake_cert_dir') + FAKE_CRT_LIST_FILENAME = os.path.join( + CONF.haproxy_amphora.base_cert_dir, + 'sample_loadbalancer_id_1/sample_listener_id_1.pem') + fe = ("frontend sample_listener_id_1\n" + f" maxconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " redirect scheme https if !{ ssl_fc }\n" + " http-response set-header Strict-Transport-Security " + "\"max-age=10000000; includeSubDomains; preload;\"\n" + " bind 10.0.0.2:443 " + f"ssl crt-list {FAKE_CRT_LIST_FILENAME} " + "ca-file /var/lib/octavia/certs/sample_loadbalancer_id_1/" + "client_ca.pem verify required crl-file /var/lib/octavia/" + "certs/sample_loadbalancer_id_1/SHA_ID.pem ciphers " + f"{constants.CIPHERS_OWASP_SUITE_B} " + "no-sslv3 no-tlsv10 no-tlsv11 alpn " + f"{','.join(constants.AMPHORA_SUPPORTED_ALPN_PROTOCOLS)}\n" + " mode http\n" + " default_backend sample_pool_id_1:sample_listener_id_1\n" + " timeout client 50000\n") + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + " option httpchk GET /index.html HTTP/1.0\n" + " http-check expect rstatus 418\n" + f" fullconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_1\n" + " server sample_member_id_2 10.0.0.98:82 " + "weight 13 check inter 30s fall 3 rise 2 cookie " + "sample_member_id_2\n\n") + tls_tupe = {'cont_id_1': + sample_configs_combined.sample_tls_container_tuple( + id='tls_container_id', + certificate='imaCert1', private_key='imaPrivateKey1', + primary_cn='FakeCN'), + 'cont_id_ca': 'client_ca.pem', + 'cont_id_crl': 'SHA_ID.pem'} + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + proto='TERMINATED_HTTPS', tls=True, sni=True, + client_ca_cert=True, client_crl_cert=True)], + tls_tupe) + self.assertEqual( + sample_configs_combined.sample_base_expected_config( + frontend=fe, backend=be), + rendered_obj) + + def test_render_template_tls_no_sni(self): + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(group="haproxy_amphora", base_cert_dir='/fake_cert_dir') + FAKE_CRT_LIST_FILENAME = os.path.join( + CONF.haproxy_amphora.base_cert_dir, + 'sample_loadbalancer_id_1/sample_listener_id_1.pem') + alpn = ",".join(constants.AMPHORA_SUPPORTED_ALPN_PROTOCOLS) + fe = ("frontend sample_listener_id_1\n" + f" maxconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " redirect scheme https if !{ ssl_fc }\n" + " http-response set-header Strict-Transport-Security " + "\"max-age=10000000; includeSubDomains; preload;\"\n" + f" bind 10.0.0.2:443 ssl crt-list {FAKE_CRT_LIST_FILENAME}" + f" ciphers {constants.CIPHERS_OWASP_SUITE_B} no-sslv3 " + f"no-tlsv10 no-tlsv11 alpn {alpn}\n" + " mode http\n" + " default_backend sample_pool_id_1:sample_listener_id_1\n" + " timeout client 50000\n") + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + " option httpchk GET /index.html HTTP/1.0\n" + " http-check expect rstatus 418\n" + f" fullconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_1\n" + " server sample_member_id_2 10.0.0.98:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_2\n\n") + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + proto='TERMINATED_HTTPS', tls=True)], + tls_certs={'cont_id_1': + sample_configs_combined.sample_tls_container_tuple( + id='tls_container_id', + certificate='ImAalsdkfjCert', + private_key='ImAsdlfksdjPrivateKey', + primary_cn="FakeCN")}) + self.assertEqual( + sample_configs_combined.sample_base_expected_config( + frontend=fe, backend=be), + rendered_obj) + + def test_render_template_tls_no_ciphers(self): + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(group="haproxy_amphora", base_cert_dir='/fake_cert_dir') + FAKE_CRT_LIST_FILENAME = os.path.join( + CONF.haproxy_amphora.base_cert_dir, + 'sample_loadbalancer_id_1/sample_listener_id_1.pem') + alpn = ",".join(constants.AMPHORA_SUPPORTED_ALPN_PROTOCOLS) + fe = ("frontend sample_listener_id_1\n" + f" maxconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " redirect scheme https if !{ ssl_fc }\n" + " http-response set-header Strict-Transport-Security " + "\"max-age=10000000; includeSubDomains; preload;\"\n" + " bind 10.0.0.2:443 ssl crt-list " + f"{FAKE_CRT_LIST_FILENAME} " + f"no-sslv3 no-tlsv10 no-tlsv11 alpn {alpn}\n" + " mode http\n" + " default_backend sample_pool_id_1:sample_listener_id_1\n" + " timeout client 50000\n") + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + " option httpchk GET /index.html HTTP/1.0\n" + " http-check expect rstatus 418\n" + " fullconn {maxconn}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_1\n" + " server sample_member_id_2 10.0.0.98:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_2\n\n").format( + maxconn=constants.HAPROXY_DEFAULT_MAXCONN) + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + proto='TERMINATED_HTTPS', tls=True, tls_ciphers=None)], + tls_certs={'cont_id_1': + sample_configs_combined.sample_tls_container_tuple( + id='tls_container_id', + certificate='ImAalsdkfjCert', + private_key='ImAsdlfksdjPrivateKey', + primary_cn="FakeCN")}) + self.assertEqual( + sample_configs_combined.sample_base_expected_config( + frontend=fe, backend=be), + rendered_obj) + + def test_render_template_tls_no_versions(self): + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(group="haproxy_amphora", base_cert_dir='/fake_cert_dir') + FAKE_CRT_LIST_FILENAME = os.path.join( + CONF.haproxy_amphora.base_cert_dir, + 'sample_loadbalancer_id_1/sample_listener_id_1.pem') + alpn = ",".join(constants.AMPHORA_SUPPORTED_ALPN_PROTOCOLS) + fe = ("frontend sample_listener_id_1\n" + f" maxconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " redirect scheme https if !{ ssl_fc }\n" + " http-response set-header Strict-Transport-Security " + "\"max-age=10000000; includeSubDomains; preload;\"\n" + " bind 10.0.0.2:443 " + f"ssl crt-list {FAKE_CRT_LIST_FILENAME} " + "ca-file /var/lib/octavia/certs/sample_loadbalancer_id_1/" + "client_ca.pem verify required crl-file /var/lib/octavia/" + "certs/sample_loadbalancer_id_1/SHA_ID.pem ciphers " + f"{constants.CIPHERS_OWASP_SUITE_B} " + f"alpn {alpn}\n" + " mode http\n" + " default_backend sample_pool_id_1:sample_listener_id_1\n" + " timeout client 50000\n") + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + " option httpchk GET /index.html HTTP/1.0\n" + " http-check expect rstatus 418\n" + " fullconn {maxconn}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_1\n" + " server sample_member_id_2 10.0.0.98:82 " + "weight 13 check inter 30s fall 3 rise 2 cookie " + "sample_member_id_2\n\n").format( + maxconn=constants.HAPROXY_DEFAULT_MAXCONN) + tls_tupe = {'cont_id_1': + sample_configs_combined.sample_tls_container_tuple( + id='tls_container_id', + certificate='imaCert1', private_key='imaPrivateKey1', + primary_cn='FakeCN'), + 'cont_id_ca': 'client_ca.pem', + 'cont_id_crl': 'SHA_ID.pem'} + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + proto='TERMINATED_HTTPS', tls=True, sni=True, + client_ca_cert=True, client_crl_cert=True, tls_versions=None)], + tls_tupe) + self.assertEqual( + sample_configs_combined.sample_base_expected_config( + frontend=fe, backend=be), + rendered_obj) + + def test_render_template_tls_no_ciphers_or_versions(self): + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(group="haproxy_amphora", base_cert_dir='/fake_cert_dir') + FAKE_CRT_LIST_FILENAME = os.path.join( + CONF.haproxy_amphora.base_cert_dir, + 'sample_loadbalancer_id_1/sample_listener_id_1.pem') + alpn = ",".join(constants.AMPHORA_SUPPORTED_ALPN_PROTOCOLS) + fe = ("frontend sample_listener_id_1\n" + f" maxconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " redirect scheme https if !{ ssl_fc }\n" + " http-response set-header Strict-Transport-Security " + "\"max-age=10000000; includeSubDomains; preload;\"\n" + " bind 10.0.0.2:443 ssl crt-list " + f"{FAKE_CRT_LIST_FILENAME} " + f"alpn {alpn}\n" + " mode http\n" + " default_backend sample_pool_id_1:sample_listener_id_1\n" + " timeout client 50000\n") + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + " option httpchk GET /index.html HTTP/1.0\n" + " http-check expect rstatus 418\n" + " fullconn {maxconn}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_1\n" + " server sample_member_id_2 10.0.0.98:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_2\n\n").format( + maxconn=constants.HAPROXY_DEFAULT_MAXCONN) + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + proto='TERMINATED_HTTPS', tls=True, tls_ciphers=None, + tls_versions=None)], + tls_certs={'cont_id_1': + sample_configs_combined.sample_tls_container_tuple( + id='tls_container_id', + certificate='ImAalsdkfjCert', + private_key='ImAsdlfksdjPrivateKey', + primary_cn="FakeCN")}) + self.assertEqual( + sample_configs_combined.sample_base_expected_config( + frontend=fe, backend=be), + rendered_obj) + + def test_render_template_tls_alpn(self): + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(group="haproxy_amphora", base_cert_dir='/fake_cert_dir') + FAKE_CRT_LIST_FILENAME = os.path.join( + CONF.haproxy_amphora.base_cert_dir, + 'sample_loadbalancer_id_1/sample_listener_id_1.pem') + alpn_protocols = ['chip', 'dale'] + alpn = ",".join(alpn_protocols) + fe = ("frontend sample_listener_id_1\n" + f" maxconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " redirect scheme https if !{ ssl_fc }\n" + " http-response set-header Strict-Transport-Security " + "\"max-age=10000000; includeSubDomains; preload;\"\n" + " bind 10.0.0.2:443 ssl crt-list " + f"{FAKE_CRT_LIST_FILENAME} " + f"ciphers {constants.CIPHERS_OWASP_SUITE_B} " + f"no-sslv3 no-tlsv10 no-tlsv11 alpn {alpn}\n" + " mode http\n" + " default_backend sample_pool_id_1:sample_listener_id_1\n" + " timeout client 50000\n") + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + " option httpchk GET /index.html HTTP/1.0\n" + " http-check expect rstatus 418\n" + " fullconn {maxconn}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_1\n" + " server sample_member_id_2 10.0.0.98:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_2\n\n").format( + maxconn=constants.HAPROXY_DEFAULT_MAXCONN) + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + proto='TERMINATED_HTTPS', tls=True, + alpn_protocols=alpn_protocols)], + tls_certs={'cont_id_1': + sample_configs_combined.sample_tls_container_tuple( + id='tls_container_id', + certificate='ImAalsdkfjCert', + private_key='ImAsdlfksdjPrivateKey', + primary_cn="FakeCN")}) + self.assertEqual( + sample_configs_combined.sample_base_expected_config( + frontend=fe, backend=be), + rendered_obj) + + def test_render_template_tls_no_alpn(self): + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(group="haproxy_amphora", base_cert_dir='/fake_cert_dir') + FAKE_CRT_LIST_FILENAME = os.path.join( + CONF.haproxy_amphora.base_cert_dir, + 'sample_loadbalancer_id_1/sample_listener_id_1.pem') + fe = ("frontend sample_listener_id_1\n" + f" maxconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " redirect scheme https if !{ ssl_fc }\n" + " http-response set-header Strict-Transport-Security " + "\"max-age=10000000; includeSubDomains; preload;\"\n" + " bind 10.0.0.2:443 ssl crt-list " + f"{FAKE_CRT_LIST_FILENAME} " + f"ciphers {constants.CIPHERS_OWASP_SUITE_B} no-sslv3 " + f"no-tlsv10 no-tlsv11\n" + " mode http\n" + " default_backend sample_pool_id_1:sample_listener_id_1\n" + " timeout client 50000\n") + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + " option httpchk GET /index.html HTTP/1.0\n" + " http-check expect rstatus 418\n" + " fullconn {maxconn}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_1\n" + " server sample_member_id_2 10.0.0.98:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_2\n\n").format( + maxconn=constants.HAPROXY_DEFAULT_MAXCONN) + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + proto='TERMINATED_HTTPS', tls=True, + alpn_protocols=None)], + tls_certs={'cont_id_1': + sample_configs_combined.sample_tls_container_tuple( + id='tls_container_id', + certificate='ImAalsdkfjCert', + private_key='ImAsdlfksdjPrivateKey', + primary_cn="FakeCN")}) + self.assertEqual( + sample_configs_combined.sample_base_expected_config( + frontend=fe, backend=be), + rendered_obj) + + def test_render_template_tls_no_alpn_hsts_max_age_only(self): + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(group="haproxy_amphora", base_cert_dir='/fake_cert_dir') + FAKE_CRT_LIST_FILENAME = os.path.join( + CONF.haproxy_amphora.base_cert_dir, + 'sample_loadbalancer_id_1/sample_listener_id_1.pem') + fe = ("frontend sample_listener_id_1\n" + f" maxconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " redirect scheme https if !{ ssl_fc }\n" + " http-response set-header Strict-Transport-Security " + "\"max-age=10000000;\"\n" + " bind 10.0.0.2:443 ssl crt-list " + f"{FAKE_CRT_LIST_FILENAME} " + f"ciphers {constants.CIPHERS_OWASP_SUITE_B} " + "no-sslv3 no-tlsv10 no-tlsv11\n" + " mode http\n" + " default_backend sample_pool_id_1:sample_listener_id_1\n" + " timeout client 50000\n") + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + " option httpchk GET /index.html HTTP/1.0\n" + " http-check expect rstatus 418\n" + " fullconn {maxconn}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_1\n" + " server sample_member_id_2 10.0.0.98:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_2\n\n").format( + maxconn=constants.HAPROXY_DEFAULT_MAXCONN) + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + proto='TERMINATED_HTTPS', tls=True, + alpn_protocols=None, hsts_include_subdomains=False, + hsts_preload=False)], + tls_certs={'cont_id_1': + sample_configs_combined.sample_tls_container_tuple( + id='tls_container_id', + certificate='ImAalsdkfjCert', + private_key='ImAsdlfksdjPrivateKey', + primary_cn="FakeCN")}) + self.assertEqual( + sample_configs_combined.sample_base_expected_config( + frontend=fe, backend=be), + rendered_obj) + + def test_render_template_tls_no_hsts(self): + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(group="haproxy_amphora", base_cert_dir='/fake_cert_dir') + FAKE_CRT_LIST_FILENAME = os.path.join( + CONF.haproxy_amphora.base_cert_dir, + 'sample_loadbalancer_id_1/sample_listener_id_1.pem') + alpn = ",".join(constants.AMPHORA_SUPPORTED_ALPN_PROTOCOLS) + fe = ("frontend sample_listener_id_1\n" + f" maxconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " redirect scheme https if !{ ssl_fc }\n" + " bind 10.0.0.2:443 " + f"ssl crt-list {FAKE_CRT_LIST_FILENAME} " + "ca-file /var/lib/octavia/certs/sample_loadbalancer_id_1/" + "client_ca.pem verify required crl-file /var/lib/octavia/" + "certs/sample_loadbalancer_id_1/SHA_ID.pem ciphers " + f"{constants.CIPHERS_OWASP_SUITE_B} " + f"no-sslv3 no-tlsv10 no-tlsv11 alpn {alpn}\n" + " mode http\n" + " default_backend sample_pool_id_1:sample_listener_id_1\n" + " timeout client 50000\n") + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + " option httpchk GET /index.html HTTP/1.0\n" + " http-check expect rstatus 418\n" + " fullconn {maxconn}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_1\n" + " server sample_member_id_2 10.0.0.98:82 " + "weight 13 check inter 30s fall 3 rise 2 cookie " + "sample_member_id_2\n\n").format( + maxconn=constants.HAPROXY_DEFAULT_MAXCONN) + tls_tupe = {'cont_id_1': + sample_configs_combined.sample_tls_container_tuple( + id='tls_container_id', + certificate='imaCert1', private_key='imaPrivateKey1', + primary_cn='FakeCN'), + 'cont_id_ca': 'client_ca.pem', + 'cont_id_crl': 'SHA_ID.pem'} + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + proto='TERMINATED_HTTPS', tls=True, sni=True, + client_ca_cert=True, client_crl_cert=True, + hsts_max_age=None)], + tls_tupe) + self.assertEqual( + sample_configs_combined.sample_base_expected_config( + frontend=fe, backend=be), + rendered_obj) + + def test_render_template_http(self): + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + " option httpchk GET /index.html HTTP/1.0\n" + " http-check expect rstatus 418\n" + " fullconn {maxconn}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_1\n" + " server sample_member_id_2 10.0.0.98:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_2\n\n").format( + maxconn=constants.HAPROXY_DEFAULT_MAXCONN) + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple()]) + self.assertEqual( + sample_configs_combined.sample_base_expected_config(backend=be), + rendered_obj) + + def test_render_template_prometheus(self): + fe = ("frontend sample_listener_id_1\n" + " maxconn {maxconn}\n" + " bind 10.0.0.2:80\n" + " mode http\n" + " timeout client 50000\n" + " default_backend prometheus-exporter-internal\n").format( + maxconn=constants.HAPROXY_DEFAULT_MAXCONN) + be = "" + defaults = ("defaults\n" + " log global\n" + " retries 3\n" + " option redispatch\n" + " option splice-request\n" + " option splice-response\n" + " option http-keep-alive\n\n\n\n" + "frontend prometheus-exporter-internal-endpoint\n" + " bind 127.0.0.1:9101\n" + " mode http\n" + " no log\n" + " http-request use-service prometheus-exporter if { " + "path /metrics }\n" + " http-request reject\n" + " timeout http-request 5s\n" + " timeout client 5s\n" + "backend prometheus-exporter-internal\n" + " mode http\n" + " no log\n" + " balance first\n" + " timeout connect 5s\n" + " timeout server 5s\n" + " server prometheus-internal 127.0.0.1:9102") + logging = (" log-format 12345\\ sample_loadbalancer_id_1\\ %f\\ " + "%ci\\ %cp\\ %t\\ -\\ -\\ %B\\ %U\\ " + "%[ssl_c_verify]\\ %{+Q}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ " + "%tsc\n\n") + + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + proto=lib_consts.PROTOCOL_PROMETHEUS, include_pools=False)], + feature_compatibility={lib_consts.PROTOCOL_PROMETHEUS: True}) + + self.assertEqual( + sample_configs_combined.sample_base_expected_config( + frontend=fe, backend=be, logging=logging, defaults=defaults), + rendered_obj) + + def test_render_template_additional_vips(self): + fe = ("frontend sample_listener_id_1\n" + " maxconn {maxconn}\n" + " bind 10.0.0.2:80\n" + " bind 10.0.1.2:80\n" + " bind 2001:db8::2:80\n" + " mode http\n" + " default_backend sample_pool_id_1:sample_listener_id_1\n" + " timeout client 50000\n").format( + maxconn=constants.HAPROXY_DEFAULT_MAXCONN) + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + additional_vips=True)]) + self.assertEqual( + sample_configs_combined.sample_base_expected_config(frontend=fe), + rendered_obj) + + def test_render_template_member_backup(self): + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + " option httpchk GET /index.html HTTP/1.0\n" + " http-check expect rstatus 418\n" + " fullconn {maxconn}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "addr 192.168.1.1 port 9000 " + "cookie sample_member_id_1\n" + " server sample_member_id_2 10.0.0.98:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "addr 192.168.1.1 port 9000 " + "cookie sample_member_id_2 backup\n\n").format( + maxconn=constants.HAPROXY_DEFAULT_MAXCONN) + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + monitor_ip_port=True, backup_member=True)]) + self.assertEqual( + sample_configs_combined.sample_base_expected_config(backend=be), + rendered_obj) + + def test_render_template_custom_timeouts(self): + fe = ("frontend sample_listener_id_1\n" + " maxconn {maxconn}\n" + " bind 10.0.0.2:80\n" + " mode http\n" + " default_backend sample_pool_id_1:sample_listener_id_1\n" + " timeout client 2\n").format( + maxconn=constants.HAPROXY_DEFAULT_MAXCONN) + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + " option httpchk GET /index.html HTTP/1.0\n" + " http-check expect rstatus 418\n" + " fullconn {maxconn}\n" + " option allbackups\n" + " timeout connect 1\n" + " timeout server 3\n" + " server sample_member_id_1 10.0.0.99:82 weight 13 " + "check inter 30s fall 3 rise 2 cookie sample_member_id_1\n" + " server sample_member_id_2 10.0.0.98:82 weight 13 " + "check inter 30s fall 3 rise 2 cookie " + "sample_member_id_2\n\n").format( + maxconn=constants.HAPROXY_DEFAULT_MAXCONN) + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + timeout_member_connect=1, timeout_client_data=2, + timeout_member_data=3)]) + self.assertEqual( + sample_configs_combined.sample_base_expected_config( + frontend=fe, backend=be), + rendered_obj) + + def test_render_template_null_timeouts(self): + fe = ("frontend sample_listener_id_1\n" + " maxconn {maxconn}\n" + " bind 10.0.0.2:80\n" + " mode http\n" + " default_backend sample_pool_id_1:sample_listener_id_1\n" + " timeout client 50000\n").format( + maxconn=constants.HAPROXY_DEFAULT_MAXCONN) + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + " option httpchk GET /index.html HTTP/1.0\n" + " http-check expect rstatus 418\n" + " fullconn {maxconn}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 weight 13 " + "check inter 30s fall 3 rise 2 cookie sample_member_id_1\n" + " server sample_member_id_2 10.0.0.98:82 weight 13 " + "check inter 30s fall 3 rise 2 cookie " + "sample_member_id_2\n\n").format( + maxconn=constants.HAPROXY_DEFAULT_MAXCONN) + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + timeout_member_connect=None, timeout_client_data=None, + timeout_member_data=None)]) + self.assertEqual( + sample_configs_combined.sample_base_expected_config( + frontend=fe, backend=be), + rendered_obj) + + def test_render_template_member_monitor_addr_port(self): + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + " option httpchk GET /index.html HTTP/1.0\n" + " http-check expect rstatus 418\n" + " fullconn {maxconn}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "addr 192.168.1.1 port 9000 " + "cookie sample_member_id_1\n" + " server sample_member_id_2 10.0.0.98:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "addr 192.168.1.1 port 9000 " + "cookie sample_member_id_2\n\n").format( + maxconn=constants.HAPROXY_DEFAULT_MAXCONN) + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + monitor_ip_port=True)]) + self.assertEqual( + sample_configs_combined.sample_base_expected_config(backend=be), + rendered_obj) + + def test_render_template_https_real_monitor(self): + fe = ("frontend sample_listener_id_1\n" + " maxconn {maxconn}\n" + " bind 10.0.0.2:443\n" + " mode tcp\n" + " default_backend sample_pool_id_1:sample_listener_id_1\n" + " timeout client 50000\n").format( + maxconn=constants.HAPROXY_DEFAULT_MAXCONN) + lg = (" log-format 12345\\ sample_loadbalancer_id_1\\ %f\\ " + "%ci\\ %cp\\ %t\\ -\\ -\\ %B\\ %U\\ " + "%[ssl_c_verify]\\ %{+Q}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ " + "%tsc\n\n") + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode tcp\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + " option httpchk GET /index.html HTTP/1.0\n" + " http-check expect rstatus 418\n" + " fullconn {maxconn}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 " + "weight 13 check check-ssl verify none inter 30s fall 3 rise 2 " + "cookie sample_member_id_1\n" + " server sample_member_id_2 10.0.0.98:82 " + "weight 13 check check-ssl verify none inter 30s fall 3 rise 2 " + "cookie sample_member_id_2\n\n").format( + maxconn=constants.HAPROXY_DEFAULT_MAXCONN) + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple(proto='HTTPS')]) + self.assertEqual(sample_configs_combined.sample_base_expected_config( + frontend=fe, logging=lg, backend=be), rendered_obj) + + def test_render_template_https_hello_monitor(self): + fe = ("frontend sample_listener_id_1\n" + " maxconn {maxconn}\n" + " bind 10.0.0.2:443\n" + " mode tcp\n" + " default_backend sample_pool_id_1:sample_listener_id_1\n" + " timeout client 50000\n").format( + maxconn=constants.HAPROXY_DEFAULT_MAXCONN) + lg = (" log-format 12345\\ sample_loadbalancer_id_1\\ %f\\ " + "%ci\\ %cp\\ %t\\ -\\ -\\ %B\\ %U\\ " + "%[ssl_c_verify]\\ %{+Q}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ " + "%tsc\n\n") + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode tcp\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + " fullconn {maxconn}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_1\n" + " server sample_member_id_2 10.0.0.98:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_2\n\n").format( + maxconn=constants.HAPROXY_DEFAULT_MAXCONN) + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + proto='HTTPS', monitor_proto='TLS-HELLO')]) + self.assertEqual(sample_configs_combined.sample_base_expected_config( + frontend=fe, logging=lg, backend=be), rendered_obj) + + def test_render_template_no_monitor_http(self): + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " fullconn {maxconn}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 weight 13 " + "cookie sample_member_id_1\n" + " server sample_member_id_2 10.0.0.98:82 weight 13 " + "cookie sample_member_id_2\n\n").format( + maxconn=constants.HAPROXY_DEFAULT_MAXCONN) + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple(proto='HTTP', + monitor=False)]) + self.assertEqual(sample_configs_combined.sample_base_expected_config( + backend=be), rendered_obj) + + def test_render_template_disabled_member(self): + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " fullconn {maxconn}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 weight 13 " + "cookie sample_member_id_1\n" + " server sample_member_id_2 10.0.0.98:82 weight 13 " + "cookie sample_member_id_2 disabled\n\n").format( + maxconn=constants.HAPROXY_DEFAULT_MAXCONN) + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + proto='HTTP', monitor=False, disabled_member=True)]) + self.assertEqual(sample_configs_combined.sample_base_expected_config( + backend=be), rendered_obj) + + def test_render_template_ping_monitor_http(self): + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + " option external-check\n" + " external-check command /var/lib/octavia/ping-wrapper.sh\n" + f" fullconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_1\n" + " server sample_member_id_2 10.0.0.98:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_2\n\n") + go = (f" maxconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + f" external-check\n\n") + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + proto='HTTP', monitor_proto='PING')]) + self.assertEqual(sample_configs_combined.sample_base_expected_config( + backend=be, global_opts=go), rendered_obj) + + def test_render_template_ping_monitor_http_insecure_fork(self): + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " load-server-state-from-file global\n" + " timeout check 31s\n" + " option external-check\n" + " external-check command /var/lib/octavia/ping-wrapper.sh\n" + f" fullconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_1\n" + " server sample_member_id_2 10.0.0.98:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_2\n\n") + go = ( + " server-state-file /var/lib/octavia/sample_loadbalancer_id_1/" + "servers-state\n" + f" maxconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " external-check\n insecure-fork-wanted\n\n") + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + proto='HTTP', monitor_proto='PING')], + feature_compatibility={ + "requires_insecure_fork": True, + constants.SERVER_STATE_FILE: True}) + self.assertEqual(sample_configs_combined.sample_base_expected_config( + backend=be, global_opts=go), rendered_obj) + + def test_render_template_no_monitor_https(self): + fe = ("frontend sample_listener_id_1\n" + f" maxconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " bind 10.0.0.2:443\n" + " mode tcp\n" + " default_backend sample_pool_id_1:sample_listener_id_1\n" + " timeout client 50000\n") + lg = (" log-format 12345\\ sample_loadbalancer_id_1\\ %f\\ " + "%ci\\ %cp\\ %t\\ -\\ -\\ %B\\ %U\\ " + "%[ssl_c_verify]\\ %{+Q}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ " + "%tsc\n\n") + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode tcp\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + f" fullconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 weight 13 " + "cookie sample_member_id_1\n" + " server sample_member_id_2 10.0.0.98:82 weight 13 " + "cookie sample_member_id_2\n\n") + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple(proto='HTTPS', + monitor=False)]) + self.assertEqual(sample_configs_combined.sample_base_expected_config( + frontend=fe, logging=lg, backend=be), rendered_obj) + + def test_render_template_health_monitor_http_check(self): + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + " option httpchk GET /index.html HTTP/1.1\n" + " http-check send hdr Host testlab.com\n" + " http-check expect rstatus 418\n" + f" fullconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_1\n" + " server sample_member_id_2 10.0.0.98:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_2\n\n") + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + proto='HTTP', monitor_proto='HTTP', hm_host_http_check=True)]) + self.assertEqual(sample_configs_combined.sample_base_expected_config( + backend=be), rendered_obj) + + def test_render_template_no_persistence_https(self): + fe = ("frontend sample_listener_id_1\n" + f" maxconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " bind 10.0.0.2:443\n" + " mode tcp\n" + " default_backend sample_pool_id_1:sample_listener_id_1\n" + " timeout client 50000\n") + lg = (" log-format 12345\\ sample_loadbalancer_id_1\\ %f\\ " + "%ci\\ %cp\\ %t\\ -\\ -\\ %B\\ %U\\ " + "%[ssl_c_verify]\\ %{+Q}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ " + "%tsc\n\n") + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode tcp\n" + " balance roundrobin\n" + f" fullconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 weight 13\n" + " server sample_member_id_2 10.0.0.98:82 " + "weight 13\n\n") + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + proto='HTTPS', monitor=False, persistence=False)]) + self.assertEqual(sample_configs_combined.sample_base_expected_config( + frontend=fe, logging=lg, backend=be), rendered_obj) + + def test_render_template_no_persistence_http(self): + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + f" fullconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 weight 13\n" + " server sample_member_id_2 10.0.0.98:82 " + "weight 13\n\n") + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + proto='HTTP', monitor=False, persistence=False)]) + self.assertEqual(sample_configs_combined.sample_base_expected_config( + backend=be), rendered_obj) + + def test_render_template_sourceip_persistence(self): + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " stick-table type ipv6 size 10k\n" + " stick on src\n" + " timeout check 31s\n" + " option httpchk GET /index.html HTTP/1.0\n" + " http-check expect rstatus 418\n" + f" fullconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 " + "weight 13 check inter 30s fall 3 rise 2\n" + " server sample_member_id_2 10.0.0.98:82 " + "weight 13 check inter 30s fall 3 rise 2\n\n") + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + persistence_type='SOURCE_IP')]) + self.assertEqual( + sample_configs_combined.sample_base_expected_config(backend=be), + rendered_obj) + + def test_render_template_appcookie_persistence(self): + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " stick-table type string len 64 size 10k\n" + " stick store-response res.cook(JSESSIONID)\n" + " stick match req.cook(JSESSIONID)\n" + " timeout check 31s\n" + " option httpchk GET /index.html HTTP/1.0\n" + " http-check expect rstatus 418\n" + f" fullconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 " + "weight 13 check inter 30s fall 3 rise 2\n" + " server sample_member_id_2 10.0.0.98:82 " + "weight 13 check inter 30s fall 3 rise 2\n\n") + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + persistence_type='APP_COOKIE', + persistence_cookie='JSESSIONID')]) + self.assertEqual( + sample_configs_combined.sample_base_expected_config(backend=be), + rendered_obj) + + def test_render_template_unlimited_connections(self): + sample_amphora = sample_configs_combined.sample_amphora_tuple() + sample_listener = sample_configs_combined.sample_listener_tuple( + proto='HTTPS', monitor=False) + fe = ("frontend {listener_id}\n" + " maxconn {maxconn}\n" + " bind 10.0.0.2:443\n" + " mode tcp\n" + " default_backend {pool_id}:{listener_id}\n" + " timeout client 50000\n").format( + maxconn=constants.HAPROXY_DEFAULT_MAXCONN, + pool_id=sample_listener.default_pool.id, + listener_id=sample_listener.id) + lg = (" log-format 12345\\ sample_loadbalancer_id_1\\ %f\\ " + "%ci\\ %cp\\ %t\\ -\\ -\\ %B\\ %U\\ " + "%[ssl_c_verify]\\ %{+Q}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ " + "%tsc\n\n") + be = (f"backend {sample_listener.default_pool.id}:" + f"{sample_listener.id}\n" + " mode tcp\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + f" fullconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 weight 13 " + "cookie sample_member_id_1\n" + " server sample_member_id_2 10.0.0.98:82 weight 13 " + "cookie sample_member_id_2\n\n") + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_amphora, + [sample_listener]) + self.assertEqual(sample_configs_combined.sample_base_expected_config( + frontend=fe, logging=lg, backend=be), rendered_obj) + + def test_render_template_limited_connections(self): + fe = ("frontend sample_listener_id_1\n" + " maxconn 2014\n" + " bind 10.0.0.2:443\n" + " mode tcp\n" + " default_backend sample_pool_id_1:sample_listener_id_1\n" + " timeout client 50000\n") + lg = (" log-format 12345\\ sample_loadbalancer_id_1\\ %f\\ " + "%ci\\ %cp\\ %t\\ -\\ -\\ %B\\ %U\\ " + "%[ssl_c_verify]\\ %{+Q}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ " + "%tsc\n\n") + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode tcp\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " fullconn 2014\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 weight 13 " + "cookie sample_member_id_1\n" + " server sample_member_id_2 10.0.0.98:82 weight 13 " + "cookie sample_member_id_2\n\n") + g_opts = " maxconn 2014\n\n" + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + proto='HTTPS', monitor=False, connection_limit=2014)]) + self.assertEqual(sample_configs_combined.sample_base_expected_config( + frontend=fe, logging=lg, backend=be, global_opts=g_opts), + rendered_obj) + + def test_render_template_tls_cachesize(self): + g_opts = (f" maxconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + f" tune.ssl.cachesize 101722232\n\n") + fe = ("frontend sample_listener_id_1\n" + f" maxconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " redirect scheme https if !{ ssl_fc }\n" + " http-response set-header Strict-Transport-Security " + "\"max-age=10000000; includeSubDomains; preload;\"\n" + " bind 10.0.0.2:443 " + f"ciphers {constants.CIPHERS_OWASP_SUITE_B} " + "no-sslv3 no-tlsv10 no-tlsv11 alpn " + f"{','.join(constants.AMPHORA_SUPPORTED_ALPN_PROTOCOLS)}\n" + " mode http\n" + " default_backend sample_pool_id_1:sample_listener_id_1\n" + " timeout client 50000\n") + tls_tupe = {'cont_id_1': + sample_configs_combined.sample_tls_container_tuple( + id='tls_container_id', + certificate='imaCert1', private_key='imaPrivateKey1', + primary_cn='FakeCN'), + 'cont_id_ca': 'client_ca.pem', + 'cont_id_crl': 'SHA_ID.pem'} + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + proto='TERMINATED_HTTPS')], + tls_tupe, + # 32GiB total + amp_details={"memory": { + "free": 32864004, + "buffers": 32312392 // 2, + "cached": 32312392 // 2, + }}) + self.assertEqual(sample_configs_combined.sample_base_expected_config( + frontend=fe, global_opts=g_opts), rendered_obj) + + def test_render_template_l7policies(self): + fe = ("frontend sample_listener_id_1\n" + " maxconn {maxconn}\n" + " bind 10.0.0.2:80\n" + " mode http\n" + " acl sample_l7rule_id_1 path -m beg /api\n" + " use_backend sample_pool_id_2:sample_listener_id_1" + " if sample_l7rule_id_1\n" + " acl sample_l7rule_id_2 req.hdr(Some-header) -m sub " + "This\\ string\\\\\\ with\\ stuff\n" + " acl sample_l7rule_id_3 req.cook(some-cookie) -m reg " + "this.*|that\n" + " redirect code 302 location http://www.example.com if " + "!sample_l7rule_id_2 sample_l7rule_id_3\n" + " acl sample_l7rule_id_4 path_end jpg\n" + " acl sample_l7rule_id_5 req.hdr(host) -i -m end " + ".example.com\n" + " http-request deny if sample_l7rule_id_4 " + "sample_l7rule_id_5\n" + " acl sample_l7rule_id_2 req.hdr(Some-header) -m sub " + "This\\ string\\\\\\ with\\ stuff\n" + " acl sample_l7rule_id_3 req.cook(some-cookie) -m reg " + "this.*|that\n" + " redirect code 302 prefix https://example.com if " + "!sample_l7rule_id_2 sample_l7rule_id_3\n" + " default_backend sample_pool_id_1:sample_listener_id_1\n" + " timeout client 50000\n").format( + maxconn=constants.HAPROXY_DEFAULT_MAXCONN) + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + " option httpchk GET /index.html HTTP/1.0\n" + " http-check expect rstatus 418\n" + f" fullconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 weight 13 check " + "inter 30s fall 3 rise 2 cookie sample_member_id_1\n" + " server sample_member_id_2 10.0.0.98:82 weight 13 check " + "inter 30s fall 3 rise 2 cookie sample_member_id_2\n" + "\n" + "backend sample_pool_id_2:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + " option httpchk GET /healthmon.html HTTP/1.0\n" + " http-check expect rstatus 418\n" + f" fullconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_3 10.0.0.97:82 weight 13 check " + "inter 30s fall 3 rise 2 cookie sample_member_id_3\n\n") + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple(l7=True)]) + self.assertEqual(sample_configs_combined.sample_base_expected_config( + frontend=fe, backend=be), rendered_obj) + + def test_render_template_http_xff(self): + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + " option httpchk GET /index.html HTTP/1.0\n" + " http-check expect rstatus 418\n" + " option forwardfor\n" + f" fullconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_1\n" + " server sample_member_id_2 10.0.0.98:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_2\n\n") + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + insert_headers={'X-Forwarded-For': 'true'})]) + self.assertEqual( + sample_configs_combined.sample_base_expected_config(backend=be), + rendered_obj) + + def test_render_template_http_xff_xfport(self): + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + " option httpchk GET /index.html HTTP/1.0\n" + " http-check expect rstatus 418\n" + " option forwardfor\n" + " http-request set-header X-Forwarded-Port %[dst_port]\n" + f" fullconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_1\n" + " server sample_member_id_2 10.0.0.98:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_2\n\n") + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + insert_headers={'X-Forwarded-For': 'true', + 'X-Forwarded-Port': 'true'})]) + self.assertEqual( + sample_configs_combined.sample_base_expected_config(backend=be), + rendered_obj) + + def test_render_template_pool_proxy_protocol(self): + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + f" fullconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_1 send-proxy\n" + " server sample_member_id_2 10.0.0.98:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_2 send-proxy\n\n") + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple(be_proto='PROXY')]) + self.assertEqual( + sample_configs_combined.sample_base_expected_config(backend=be), + rendered_obj) + + def test_render_template_pool_cert(self): + feature_compatibility = {constants.POOL_ALPN: True} + cert_file_path = os.path.join(self.jinja_cfg.base_crt_dir, + 'sample_listener_id_1', 'fake path') + opts = (f"ssl crt {cert_file_path} verify none sni ssl_fc_sni " + f"ciphers {constants.CIPHERS_OWASP_SUITE_B} " + f"no-sslv3 no-tlsv10 no-tlsv11") + alpn = ",".join(constants.AMPHORA_SUPPORTED_ALPN_PROTOCOLS) + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + " option httpchk GET /index.html HTTP/1.0\n" + " http-check expect rstatus 418\n" + f" fullconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 weight 13 " + f"check check-alpn {alpn} inter 30s fall 3 rise 2 cookie " + f"sample_member_id_1 {opts} alpn {alpn}\n" + " server sample_member_id_2 10.0.0.98:82 weight 13 " + f"check check-alpn {alpn} inter 30s fall 3 rise 2 cookie " + f"sample_member_id_2 {opts} alpn {alpn}\n\n") + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + pool_cert=True, tls_enabled=True, + backend_tls_ciphers=constants.CIPHERS_OWASP_SUITE_B)], + tls_certs={ + 'sample_pool_id_1': + {'client_cert': cert_file_path, + 'ca_cert': None, 'crl': None}}, + feature_compatibility=feature_compatibility) + self.assertEqual( + sample_configs_combined.sample_base_expected_config(backend=be), + rendered_obj) + + def test_render_template_pool_cert_no_alpn(self): + feature_compatibility = {constants.POOL_ALPN: False} + cert_file_path = os.path.join(self.jinja_cfg.base_crt_dir, + 'sample_listener_id_1', 'fake path') + opts = (f"ssl crt {cert_file_path} verify none sni ssl_fc_sni " + f"ciphers {constants.CIPHERS_OWASP_SUITE_B} " + f"no-sslv3 no-tlsv10 no-tlsv11") + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + " option httpchk GET /index.html HTTP/1.0\n" + " http-check expect rstatus 418\n" + f" fullconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 weight 13 " + "check inter 30s fall 3 rise 2 cookie sample_member_id_1 " + f"{opts}\n" + " server sample_member_id_2 10.0.0.98:82 weight 13 " + "check inter 30s fall 3 rise 2 cookie sample_member_id_2 " + f"{opts}\n\n") + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + pool_cert=True, tls_enabled=True, + backend_tls_ciphers=constants.CIPHERS_OWASP_SUITE_B)], + tls_certs={ + 'sample_pool_id_1': + {'client_cert': cert_file_path, + 'ca_cert': None, 'crl': None}}, + feature_compatibility=feature_compatibility) + self.assertEqual( + sample_configs_combined.sample_base_expected_config(backend=be), + rendered_obj) + + def test_render_template_pool_cert_no_versions(self): + feature_compatibility = {constants.POOL_ALPN: True} + cert_file_path = os.path.join(self.jinja_cfg.base_crt_dir, + 'sample_listener_id_1', 'fake path') + opts = (f"ssl crt {cert_file_path} verify none sni ssl_fc_sni " + f"ciphers {constants.CIPHERS_OWASP_SUITE_B}") + alpn = ",".join(constants.AMPHORA_SUPPORTED_ALPN_PROTOCOLS) + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + " option httpchk GET /index.html HTTP/1.0\n" + " http-check expect rstatus 418\n" + f" fullconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 weight 13 " + f"check check-alpn {alpn} inter 30s fall 3 rise 2 cookie " + f"sample_member_id_1 {opts} alpn {alpn}\n" + " server sample_member_id_2 10.0.0.98:82 weight 13 " + f"check check-alpn {alpn} inter 30s fall 3 rise 2 cookie " + f"sample_member_id_2 {opts} alpn {alpn}\n\n" + ) + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + pool_cert=True, tls_enabled=True, + backend_tls_ciphers=constants.CIPHERS_OWASP_SUITE_B, + backend_tls_versions=None)], + tls_certs={ + 'sample_pool_id_1': + {'client_cert': cert_file_path, + 'ca_cert': None, 'crl': None}}, + feature_compatibility=feature_compatibility) + self.assertEqual( + sample_configs_combined.sample_base_expected_config(backend=be), + rendered_obj) + + def test_render_template_pool_cert_no_ciphers(self): + feature_compatibility = {constants.POOL_ALPN: True} + cert_file_path = os.path.join(self.jinja_cfg.base_crt_dir, + 'sample_listener_id_1', 'fake path') + opts = (f"ssl crt {cert_file_path} verify none sni ssl_fc_sni " + f"no-sslv3 no-tlsv10 no-tlsv11") + alpn = ",".join(constants.AMPHORA_SUPPORTED_ALPN_PROTOCOLS) + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + " option httpchk GET /index.html HTTP/1.0\n" + " http-check expect rstatus 418\n" + f" fullconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 weight 13 " + f"check check-alpn {alpn} inter 30s fall 3 rise 2 cookie " + f"sample_member_id_1 {opts} alpn {alpn}\n" + " server sample_member_id_2 10.0.0.98:82 weight 13 " + f"check check-alpn {alpn} inter 30s fall 3 rise 2 cookie " + f"sample_member_id_2 {opts} alpn {alpn}\n\n") + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + pool_cert=True, tls_enabled=True)], + tls_certs={ + 'sample_pool_id_1': + {'client_cert': cert_file_path, + 'ca_cert': None, 'crl': None}}, + feature_compatibility=feature_compatibility) + self.assertEqual( + sample_configs_combined.sample_base_expected_config(backend=be), + rendered_obj) + + def test_render_template_pool_cert_no_ciphers_or_versions_or_alpn(self): + cert_file_path = os.path.join(self.jinja_cfg.base_crt_dir, + 'sample_listener_id_1', 'fake path') + opts = f"ssl crt {cert_file_path} verify none sni ssl_fc_sni" + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + " option httpchk GET /index.html HTTP/1.0\n" + " http-check expect rstatus 418\n" + f" fullconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 weight 13 " + "check inter 30s fall 3 rise 2 cookie sample_member_id_1 " + f"{opts}\n" + " server sample_member_id_2 10.0.0.98:82 weight 13 " + "check inter 30s fall 3 rise 2 cookie sample_member_id_2 " + f"{opts}\n\n") + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + pool_cert=True, tls_enabled=True, backend_tls_versions=None, + backend_alpn_protocols=None)], + tls_certs={ + 'sample_pool_id_1': + {'client_cert': cert_file_path, + 'ca_cert': None, 'crl': None}}) + self.assertEqual( + sample_configs_combined.sample_base_expected_config(backend=be), + rendered_obj) + + def test_render_template_pool_no_alpn(self): + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + " option httpchk GET /index.html HTTP/1.0\n" + " http-check expect rstatus 418\n" + f" fullconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 weight 13 " + "check inter 30s fall 3 rise 2 cookie sample_member_id_1\n" + " server sample_member_id_2 10.0.0.98:82 weight 13 " + "check inter 30s fall 3 rise 2 cookie sample_member_id_2" + "\n\n") + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + backend_alpn_protocols=None)]) + self.assertEqual( + sample_configs_combined.sample_base_expected_config(backend=be), + rendered_obj) + + def test_render_template_with_full_pool_cert(self): + feature_compatibility = {constants.POOL_ALPN: True} + pool_client_cert = '/foo/cert.pem' + pool_ca_cert = '/foo/ca.pem' + pool_crl = '/foo/crl.pem' + opts = (f"ssl crt {pool_client_cert} ca-file {pool_ca_cert} " + f"crl-file {pool_crl} verify required sni ssl_fc_sni " + f"no-sslv3 no-tlsv10 no-tlsv11") + alpn = ",".join(constants.AMPHORA_SUPPORTED_ALPN_PROTOCOLS) + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + " option httpchk GET /index.html HTTP/1.0\n" + " http-check expect rstatus 418\n" + f" fullconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 weight 13 " + f"check check-alpn {alpn} inter 30s fall 3 rise 2 cookie " + f"sample_member_id_1 {opts} alpn {alpn}\n" + " server sample_member_id_2 10.0.0.98:82 weight 13 " + f"check check-alpn {alpn} inter 30s fall 3 rise 2 cookie " + f"sample_member_id_2 {opts} alpn {alpn}\n\n") + rendered_obj = self.jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple( + pool_cert=True, pool_ca_cert=True, pool_crl=True, + tls_enabled=True)], + tls_certs={ + 'sample_pool_id_1': + {'client_cert': pool_client_cert, + 'ca_cert': pool_ca_cert, + 'crl': pool_crl}}, + feature_compatibility=feature_compatibility) + self.assertEqual( + sample_configs_combined.sample_base_expected_config(backend=be), + rendered_obj) + + def test_transform_session_persistence(self): + in_persistence = ( + sample_configs_combined.sample_session_persistence_tuple()) + ret = self.jinja_cfg._transform_session_persistence( + in_persistence, {}) + self.assertEqual(sample_configs_combined.RET_PERSISTENCE, ret) + + def test_transform_health_monitor(self): + in_persistence = sample_configs_combined.sample_health_monitor_tuple() + ret = self.jinja_cfg._transform_health_monitor(in_persistence, {}) + self.assertEqual(sample_configs_combined.RET_MONITOR_1, ret) + + def test_transform_member(self): + in_member = sample_configs_combined.sample_member_tuple( + 'sample_member_id_1', '10.0.0.99') + ret = self.jinja_cfg._transform_member(in_member, {}) + self.assertEqual(sample_configs_combined.RET_MEMBER_1, ret) + + def test_transform_pool(self): + in_pool = sample_configs_combined.sample_pool_tuple() + ret = self.jinja_cfg._transform_pool(in_pool, {}, False) + self.assertEqual(sample_configs_combined.RET_POOL_1, ret) + + def test_transform_pool_2(self): + in_pool = sample_configs_combined.sample_pool_tuple(sample_pool=2) + ret = self.jinja_cfg._transform_pool(in_pool, {}, False) + self.assertEqual(sample_configs_combined.RET_POOL_2, ret) + + def test_transform_pool_http_reuse(self): + in_pool = sample_configs_combined.sample_pool_tuple(sample_pool=2) + ret = self.jinja_cfg._transform_pool( + in_pool, {constants.HTTP_REUSE: True}, False) + expected_config = copy.copy(sample_configs_combined.RET_POOL_2) + expected_config[constants.HTTP_REUSE] = True + self.assertEqual(expected_config, ret) + + def test_transform_pool_cert(self): + in_pool = sample_configs_combined.sample_pool_tuple(pool_cert=True) + cert_path = os.path.join(self.jinja_cfg.base_crt_dir, + 'test_listener_id', 'pool_cert.pem') + ret = self.jinja_cfg._transform_pool( + in_pool, {}, False, pool_tls_certs={'client_cert': cert_path}) + expected_config = copy.copy(sample_configs_combined.RET_POOL_1) + expected_config['client_cert'] = cert_path + self.assertEqual(expected_config, ret) + + def test_transform_listener(self): + in_listener = sample_configs_combined.sample_listener_tuple() + ret = self.jinja_cfg._transform_listener(in_listener, None, {}, + in_listener.load_balancer) + self.assertEqual(sample_configs_combined.RET_LISTENER, ret) + + def test_transform_listener_with_l7(self): + in_listener = sample_configs_combined.sample_listener_tuple(l7=True) + ret = self.jinja_cfg._transform_listener(in_listener, None, {}, + in_listener.load_balancer) + self.assertEqual(sample_configs_combined.RET_LISTENER_L7, ret) + + def test_transform_listener_PROMETHEUS(self): + in_listener = sample_configs_combined.sample_listener_tuple() + ret = self.jinja_cfg._transform_listener( + in_listener, None, {lib_consts.PROTOCOL_PROMETHEUS: True}, + in_listener.load_balancer) + expected_config = copy.copy(sample_configs_combined.RET_LISTENER) + expected_config[lib_consts.PROTOCOL_PROMETHEUS] = True + self.assertEqual(expected_config, ret) + + def test_transform_loadbalancer(self): + in_amphora = sample_configs_combined.sample_amphora_tuple() + in_listener = sample_configs_combined.sample_listener_tuple() + ret = self.jinja_cfg._transform_loadbalancer( + in_amphora, in_listener.load_balancer, [in_listener], None, {}) + self.assertEqual(sample_configs_combined.RET_LB, ret) + + def test_transform_two_loadbalancers(self): + in_amphora = sample_configs_combined.sample_amphora_tuple() + in_listener1 = sample_configs_combined.sample_listener_tuple() + in_listener2 = sample_configs_combined.sample_listener_tuple() + + ret = self.jinja_cfg._transform_loadbalancer( + in_amphora, in_listener1.load_balancer, + [in_listener1, in_listener2], None, {}) + self.assertEqual(ret['global_connection_limit'], + constants.HAPROXY_DEFAULT_MAXCONN + + constants.HAPROXY_DEFAULT_MAXCONN) + + def test_transform_many_loadbalancers(self): + in_amphora = sample_configs_combined.sample_amphora_tuple() + + in_listeners = [] + + # Create many listeners, until the sum of connection_limits + # is greater than MAX_MAXCONN + connection_limit_sum = 0 + while connection_limit_sum <= constants.HAPROXY_MAX_MAXCONN: + in_listener = ( + sample_configs_combined.sample_listener_tuple()) + connection_limit_sum += constants.HAPROXY_DEFAULT_MAXCONN + + in_listeners.append(in_listener) + + ret = self.jinja_cfg._transform_loadbalancer( + in_amphora, in_listeners[0].load_balancer, + in_listeners, None, {}) + self.assertEqual(ret['global_connection_limit'], + constants.HAPROXY_MAX_MAXCONN) + self.assertLess(ret['global_connection_limit'], + connection_limit_sum) + + def test_transform_with_disabled_listeners(self): + in_amphora = sample_configs_combined.sample_amphora_tuple() + + in_listeners = [] + + connection_limit_sum = 0 + + in_listener = ( + sample_configs_combined.sample_listener_tuple()) + connection_limit_sum += constants.HAPROXY_DEFAULT_MAXCONN + in_listeners.append(in_listener) + + disabled_listener = ( + sample_configs_combined.sample_listener_tuple(enabled=False)) + in_listeners.append(disabled_listener) + + ret = self.jinja_cfg._transform_loadbalancer( + in_amphora, in_listeners[0].load_balancer, + in_listeners, None, {}) + self.assertEqual(ret['global_connection_limit'], + connection_limit_sum) + + def test_transform_amphora(self): + in_amphora = sample_configs_combined.sample_amphora_tuple() + ret = self.jinja_cfg._transform_amphora(in_amphora, {}) + self.assertEqual(sample_configs_combined.RET_AMPHORA, ret) + + def test_transform_loadbalancer_with_l7(self): + in_amphora = sample_configs_combined.sample_amphora_tuple() + in_listener = sample_configs_combined.sample_listener_tuple(l7=True) + ret = self.jinja_cfg._transform_loadbalancer( + in_amphora, in_listener.load_balancer, [in_listener], None, {}) + self.assertEqual(sample_configs_combined.RET_LB_L7, ret) + + def test_transform_l7policy(self): + in_l7policy = sample_configs_combined.sample_l7policy_tuple( + 'sample_l7policy_id_1') + ret = self.jinja_cfg._transform_l7policy(in_l7policy, {}, False) + self.assertEqual(sample_configs_combined.RET_L7POLICY_1, ret) + + def test_transform_l7policy_2_8(self): + in_l7policy = sample_configs_combined.sample_l7policy_tuple( + 'sample_l7policy_id_2', sample_policy=2) + ret = self.jinja_cfg._transform_l7policy(in_l7policy, {}, False) + self.assertEqual(sample_configs_combined.RET_L7POLICY_2, ret) + + # test invalid action without redirect_http_code + in_l7policy = sample_configs_combined.sample_l7policy_tuple( + 'sample_l7policy_id_8', sample_policy=2, redirect_http_code=None) + ret = self.jinja_cfg._transform_l7policy(in_l7policy, {}, False) + self.assertEqual(sample_configs_combined.RET_L7POLICY_8, ret) + + def test_transform_l7policy_disabled_rule(self): + in_l7policy = sample_configs_combined.sample_l7policy_tuple( + 'sample_l7policy_id_6', sample_policy=6) + ret = self.jinja_cfg._transform_l7policy(in_l7policy, {}, False) + self.assertEqual(sample_configs_combined.RET_L7POLICY_6, ret) + + def test_escape_haproxy_config_string(self): + self.assertEqual(self.jinja_cfg._escape_haproxy_config_string( + 'string_with_none'), 'string_with_none') + self.assertEqual(self.jinja_cfg._escape_haproxy_config_string( + 'string with spaces'), 'string\\ with\\ spaces') + self.assertEqual(self.jinja_cfg._escape_haproxy_config_string( + 'string\\with\\backslashes'), 'string\\\\with\\\\backslashes') + self.assertEqual(self.jinja_cfg._escape_haproxy_config_string( + 'string\\ with\\ all'), 'string\\\\\\ with\\\\\\ all') + + def test_render_template_no_log(self): + j_cfg = jinja_cfg.JinjaTemplater( + base_amp_path='/var/lib/octavia', + base_crt_dir='/var/lib/octavia/certs', + connection_logging=False) + defaults = ("defaults\n" + " no log\n" + " retries 3\n" + " option redispatch\n" + " option splice-request\n" + " option splice-response\n" + " option http-keep-alive\n\n\n") + rendered_obj = j_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple()] + ) + self.assertEqual( + sample_configs_combined.sample_base_expected_config( + defaults=defaults, logging="\n"), + rendered_obj) + + def test_render_template_amp_details(self): + j_cfg = jinja_cfg.JinjaTemplater( + base_amp_path='/var/lib/octavia', + base_crt_dir='/var/lib/octavia/certs', + connection_logging=False) + rendered_obj = j_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple()], + amp_details={"cpu_count": 7, + "active_tuned_profiles": 'virtual-guest ' + 'optimize-serial-console ' + 'amphora'} + ) + defaults = ("defaults\n" + " no log\n" + " retries 3\n" + " option redispatch\n" + " option splice-request\n" + " option splice-response\n" + " option http-keep-alive\n\n\n") + global_opts = (" maxconn 50000\n" + " nbthread 6\n" + " cpu-map auto:1/1-6 1-6\n") + self.assertEqual( + sample_configs_combined.sample_base_expected_config( + defaults=defaults, logging="\n", global_opts=global_opts), + rendered_obj) + + def test_render_template_amp_details_cpu_count_none(self): + j_cfg = jinja_cfg.JinjaTemplater( + base_amp_path='/var/lib/octavia', + base_crt_dir='/var/lib/octavia/certs', + connection_logging=False) + rendered_obj = j_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_amphora_tuple(), + [sample_configs_combined.sample_listener_tuple()], + amp_details={"cpu_count": None}, + ) + defaults = ("defaults\n" + " no log\n" + " retries 3\n" + " option redispatch\n" + " option splice-request\n" + " option splice-response\n" + " option http-keep-alive\n\n\n") + global_opts = " maxconn 50000\n\n" + self.assertEqual( + sample_configs_combined.sample_base_expected_config( + defaults=defaults, logging="\n", global_opts=global_opts), + rendered_obj) + + def test_haproxy_cfg_1_8_vs_1_5(self): + j_cfg = jinja_cfg.JinjaTemplater( + base_amp_path='/var/lib/octavia', + base_crt_dir='/var/lib/octavia/certs') + + sample_amphora = sample_configs_combined.sample_amphora_tuple() + sample_proxy_listener = sample_configs_combined.sample_listener_tuple( + be_proto='PROXY') + # With http-reuse and server-state-file + go = ( + " server-state-file /var/lib/octavia/sample_loadbalancer_id_1/" + "servers-state\n" + " maxconn {maxconn}\n\n").format( + maxconn=constants.HAPROXY_DEFAULT_MAXCONN) + be = (f"backend {sample_proxy_listener.default_pool.id}:" + f"{sample_proxy_listener.id}\n" + " mode http\n" + " http-reuse safe\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " load-server-state-from-file global\n" + " timeout check 31s\n" + f" fullconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_1 send-proxy\n" + " server sample_member_id_2 10.0.0.98:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_2 send-proxy\n\n") + rendered_obj = j_cfg.build_config( + sample_amphora, + [sample_proxy_listener], + tls_certs=None, + haproxy_versions=("1", "8", "1"), + amp_details=None) + self.assertEqual( + sample_configs_combined.sample_base_expected_config( + global_opts=go, backend=be), + rendered_obj) + + # Without http-reuse and server-state-file + be = (f"backend {sample_proxy_listener.default_pool.id}:" + f"{sample_proxy_listener.id}\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + f" fullconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_1 send-proxy\n" + " server sample_member_id_2 10.0.0.98:82 " + "weight 13 check inter 30s fall 3 rise 2 " + "cookie sample_member_id_2 send-proxy\n\n") + rendered_obj = j_cfg.build_config( + sample_amphora, + [sample_proxy_listener], + tls_certs=None, + haproxy_versions=("1", "5", "18"), + amp_details=None) + self.assertEqual( + sample_configs_combined.sample_base_expected_config(backend=be), + rendered_obj) + + def test_ssl_types_l7rules(self): + j_cfg = jinja_cfg.JinjaTemplater( + base_amp_path='/var/lib/octavia', + base_crt_dir='/var/lib/octavia/certs') + alpn = ",".join(constants.AMPHORA_SUPPORTED_ALPN_PROTOCOLS) + fe = ("frontend sample_listener_id_1\n" + f" maxconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " redirect scheme https if !{ ssl_fc }\n" + " http-response set-header Strict-Transport-Security " + "\"max-age=10000000; includeSubDomains; preload;\"\n" + " bind 10.0.0.2:443 ciphers " + f"{constants.CIPHERS_OWASP_SUITE_B} " + f"no-sslv3 no-tlsv10 no-tlsv11 alpn {alpn}\n" + " mode http\n" + " acl sample_l7rule_id_1 path -m beg /api\n" + " use_backend sample_pool_id_2:sample_listener_id_1" + " if sample_l7rule_id_1\n" + " acl sample_l7rule_id_2 req.hdr(Some-header) -m sub " + "This\\ string\\\\\\ with\\ stuff\n" + " acl sample_l7rule_id_3 req.cook(some-cookie) -m reg " + "this.*|that\n" + " redirect code 302 location http://www.example.com " + "if !sample_l7rule_id_2 sample_l7rule_id_3\n" + " acl sample_l7rule_id_4 path_end jpg\n" + " acl sample_l7rule_id_5 req.hdr(host) -i -m end " + ".example.com\n" + " http-request deny " + "if sample_l7rule_id_4 sample_l7rule_id_5\n" + " acl sample_l7rule_id_2 req.hdr(Some-header) -m sub " + "This\\ string\\\\\\ with\\ stuff\n" + " acl sample_l7rule_id_3 req.cook(some-cookie) -m reg " + "this.*|that\n" + " redirect code 302 prefix https://example.com " + "if !sample_l7rule_id_2 sample_l7rule_id_3\n" + " acl sample_l7rule_id_7 ssl_c_used\n" + " acl sample_l7rule_id_8 ssl_c_verify eq 1\n" + " acl sample_l7rule_id_9 ssl_c_s_dn(STREET) -m reg " + "^STREET.*NO\\\\.$\n" + " acl sample_l7rule_id_10 ssl_c_s_dn(OU-3) -m beg " + "Orgnization\\ Bala\n" + " acl sample_l7rule_id_11 path -m beg /api\n" + " redirect code 302 location " + "http://www.ssl-type-l7rule-test.com/" + "if sample_l7rule_id_7 !sample_l7rule_id_8 !sample_l7rule_id_9 " + "!sample_l7rule_id_10 sample_l7rule_id_11\n" + " default_backend sample_pool_id_1:sample_listener_id_1\n" + " timeout client 50000\n") + be = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + " option httpchk GET /index.html HTTP/1.0\n" + " http-check expect rstatus 418\n" + f" fullconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 weight 13 check " + "inter 30s fall 3 rise 2 cookie sample_member_id_1\n" + " server sample_member_id_2 10.0.0.98:82 weight 13 check " + "inter 30s fall 3 rise 2 cookie sample_member_id_2\n\n" + "backend sample_pool_id_2:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + " option httpchk GET /healthmon.html HTTP/1.0\n" + " http-check expect rstatus 418\n" + f" fullconn {constants.HAPROXY_DEFAULT_MAXCONN}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_3 10.0.0.97:82 weight 13 check " + "inter 30s fall 3 rise 2 cookie sample_member_id_3\n\n") + sample_listener = sample_configs_combined.sample_listener_tuple( + proto=constants.PROTOCOL_TERMINATED_HTTPS, l7=True, + ssl_type_l7=True) + rendered_obj = j_cfg.build_config( + sample_configs_combined.sample_amphora_tuple(), + [sample_listener], + tls_certs=None, + haproxy_versions=("1", "5", "18"), + amp_details=None) + self.assertEqual( + sample_configs_combined.sample_base_expected_config( + frontend=fe, backend=be), + rendered_obj) + + @mock.patch("octavia.common.jinja.haproxy.combined_listeners.jinja_cfg." + "JinjaTemplater.render_loadbalancer_obj") + def test_build_config(self, mock_render_loadbalancer_obj): + mock_amp = mock.Mock() + mock_listeners = mock.Mock() + mock_tls_certs = mock.Mock() + mock_socket_path = mock.Mock() + + j_cfg = jinja_cfg.JinjaTemplater() + j_cfg.build_config(mock_amp, mock_listeners, mock_tls_certs, + haproxy_versions=("0", "7", "0"), + socket_path=mock_socket_path, amp_details=None) + + expected_fc = {} + mock_render_loadbalancer_obj.assert_called_once_with( + mock_amp, mock_listeners, tls_certs=mock_tls_certs, + socket_path=mock_socket_path, amp_details=None, + feature_compatibility=expected_fc) + + mock_render_loadbalancer_obj.reset_mock() + + j_cfg.build_config(mock_amp, mock_listeners, mock_tls_certs, + haproxy_versions=("1", "6", "0"), + socket_path=mock_socket_path, amp_details=None) + + expected_fc = { + constants.HTTP_REUSE: True, + constants.SERVER_STATE_FILE: True + } + mock_render_loadbalancer_obj.assert_called_once_with( + mock_amp, mock_listeners, tls_certs=mock_tls_certs, + socket_path=mock_socket_path, amp_details=None, + feature_compatibility=expected_fc) + + mock_render_loadbalancer_obj.reset_mock() + + j_cfg.build_config(mock_amp, mock_listeners, mock_tls_certs, + haproxy_versions=("1", "9", "0"), + socket_path=mock_socket_path, amp_details=None) + + expected_fc = { + constants.HTTP_REUSE: True, + constants.POOL_ALPN: True, + constants.SERVER_STATE_FILE: True + } + mock_render_loadbalancer_obj.assert_called_once_with( + mock_amp, mock_listeners, tls_certs=mock_tls_certs, + socket_path=mock_socket_path, amp_details=None, + feature_compatibility=expected_fc) + + mock_render_loadbalancer_obj.reset_mock() + + j_cfg.build_config(mock_amp, mock_listeners, mock_tls_certs, + haproxy_versions=("2", "1", "1"), + socket_path=mock_socket_path, amp_details=None) + + expected_fc = { + constants.HTTP_REUSE: True, + constants.POOL_ALPN: True, + lib_consts.PROTOCOL_PROMETHEUS: True, + constants.SERVER_STATE_FILE: True + } + mock_render_loadbalancer_obj.assert_called_once_with( + mock_amp, mock_listeners, tls_certs=mock_tls_certs, + socket_path=mock_socket_path, amp_details=None, + feature_compatibility=expected_fc) + + mock_render_loadbalancer_obj.reset_mock() + + j_cfg.build_config(mock_amp, mock_listeners, mock_tls_certs, + haproxy_versions=("2", "2", "1"), amp_details=None, + socket_path=mock_socket_path) + + expected_fc = { + constants.HTTP_REUSE: True, + constants.POOL_ALPN: True, + lib_consts.PROTOCOL_PROMETHEUS: True, + constants.INSECURE_FORK: True, + constants.SERVER_STATE_FILE: True + } + mock_render_loadbalancer_obj.assert_called_once_with( + mock_amp, mock_listeners, tls_certs=mock_tls_certs, + socket_path=mock_socket_path, amp_details=None, + feature_compatibility=expected_fc) + + mock_render_loadbalancer_obj.reset_mock() + + j_cfg.build_config(mock_amp, mock_listeners, mock_tls_certs, + haproxy_versions=("2", "4", "0"), + socket_path=mock_socket_path, amp_details=None) + + mock_render_loadbalancer_obj.assert_called_once_with( + mock_amp, mock_listeners, tls_certs=mock_tls_certs, + socket_path=mock_socket_path, amp_details=None, + feature_compatibility=expected_fc) + + mock_render_loadbalancer_obj.reset_mock() + + j_cfg.build_config(mock_amp, mock_listeners, mock_tls_certs, + haproxy_versions=("3", "1", "0"), + socket_path=mock_socket_path, amp_details=None) + + mock_render_loadbalancer_obj.assert_called_once_with( + mock_amp, mock_listeners, tls_certs=mock_tls_certs, + socket_path=mock_socket_path, amp_details=None, + feature_compatibility=expected_fc) diff --git a/octavia/tests/unit/common/jinja/logging/__init__.py b/octavia/tests/unit/common/jinja/logging/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/octavia/tests/unit/common/jinja/logging/test_logging_jinja_cfg.py b/octavia/tests/unit/common/jinja/logging/test_logging_jinja_cfg.py new file mode 100644 index 0000000000..7a47681eca --- /dev/null +++ b/octavia/tests/unit/common/jinja/logging/test_logging_jinja_cfg.py @@ -0,0 +1,96 @@ +# Copyright 2018 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from octavia_lib.common import constants as lib_consts +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture + +from octavia.common.jinja.logging import logging_jinja_cfg +import octavia.tests.unit.base as base + + +class LoggingJinjaTestCase(base.TestCase): + def test_build_agent_config(self): + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(debug=False) + conf.config( + group="amphora_agent", + admin_log_targets='192.0.2.17:10514,192.51.100.4:10514') + conf.config( + group="amphora_agent", + tenant_log_targets='192.0.2.7:20514,192.51.100.9:20514') + conf.config(group="amphora_agent", + log_protocol=lib_consts.PROTOCOL_UDP) + conf.config(group="amphora_agent", log_retry_count=5) + conf.config(group="amphora_agent", log_retry_interval=2) + conf.config(group="amphora_agent", log_queue_size=10000) + + lj = logging_jinja_cfg.LoggingJinjaTemplater() + expected_config = ( + 'ruleset(name="tenant_forwarding" queue.type="linkedList" ' + 'queue.size="10000") {\n' + ' action(type="omfwd"\n' + ' target="192.0.2.7"\n' + ' port="20514"\n' + ' protocol="UDP"\n' + ' action.resumeRetryCount="5"\n' + ' action.resumeInterval="2"\n' + ' )\n' + ' action(type="omfwd"\n' + ' target="192.51.100.9"\n' + ' port="20514"\n' + ' protocol="UDP"\n' + ' action.resumeRetryCount="5"\n' + ' action.resumeInterval="2"\n' + ' action.execOnlyWhenPreviousIsSuspended="on")\n' + '}\n' + 'local0.=info call tenant_forwarding\n' + '\n' + 'ruleset(name="admin_forwarding" queue.type="linkedList" ' + 'queue.size="10000") {\n' + ' action(type="omfwd"\n' + ' target="192.0.2.17"\n' + ' port="10514"\n' + ' protocol="UDP"\n' + ' action.resumeRetryCount="5"\n' + ' action.resumeInterval="2"\n' + ' )\n' + ' action(type="omfwd"\n' + ' target="192.51.100.4"\n' + ' port="10514"\n' + ' protocol="UDP"\n' + ' action.resumeRetryCount="5"\n' + ' action.resumeInterval="2"\n' + ' action.execOnlyWhenPreviousIsSuspended="on")\n' + '}\n' + 'local1.* call admin_forwarding' + ) + logging_cfg = lj.build_logging_config() + + self.assertEqual(expected_config, logging_cfg) + + def test_build_agent_config_disable_logs(self): + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(debug=False) + conf.config( + group="amphora_agent", + disable_local_log_storage=True) + + lj = logging_jinja_cfg.LoggingJinjaTemplater() + expected_config = ( + '*.* stop') + + logging_cfg = lj.build_logging_config() + + self.assertEqual(expected_config, logging_cfg) diff --git a/octavia/tests/unit/common/jinja/lvs/__init__.py b/octavia/tests/unit/common/jinja/lvs/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/octavia/tests/unit/common/jinja/lvs/test_jinja_cfg.py b/octavia/tests/unit/common/jinja/lvs/test_jinja_cfg.py new file mode 100644 index 0000000000..e43e6e818d --- /dev/null +++ b/octavia/tests/unit/common/jinja/lvs/test_jinja_cfg.py @@ -0,0 +1,932 @@ +# Copyright 2018 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from octavia_lib.common import constants as lib_consts + +from octavia.common import constants +from octavia.common.jinja.lvs import jinja_cfg +from octavia.tests.unit import base +from octavia.tests.unit.common.sample_configs import sample_configs_combined + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture + +BASE_PATH = '/var/lib/octavia' + + +class TestLvsCfg(base.TestCase): + def setUp(self): + super().setUp() + self.lvs_jinja_cfg = jinja_cfg.LvsJinjaTemplater() + conf = oslo_fixture.Config(cfg.CONF) + conf.config(group="haproxy_amphora", base_path=BASE_PATH) + + def test_udp_get_template(self): + template = self.lvs_jinja_cfg._get_template() + self.assertEqual('keepalivedlvs.cfg.j2', template.name) + + def test_render_template_udp_source_ip(self): + exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" + "# Configuration for Listener sample_listener_id_1\n\n" + "net_namespace amphora-haproxy\n\n" + "virtual_server_group ipv4-group {\n" + " 10.0.0.2 80\n" + "}\n\n" + "virtual_server group ipv4-group {\n" + " lb_algo wrr\n" + " lb_kind NAT\n" + " protocol UDP\n" + " persistence_timeout 33\n" + " persistence_granularity 255.255.0.0\n" + " delay_loop 30\n" + " delay_before_retry 30\n" + " retry 3\n\n\n" + " # Configuration for Pool sample_pool_id_1\n" + " # Configuration for HealthMonitor sample_monitor_id_1\n" + " # Configuration for Member sample_member_id_1\n" + " real_server 10.0.0.99 82 {\n" + " weight 13\n" + " uthreshold 98\n" + " MISC_CHECK {\n" + " misc_path \"/var/lib/octavia/lvs/check/" + "udp_check.sh 10.0.0.99 82\"\n" + " misc_timeout 31\n" + " }\n" + " }\n\n" + " # Configuration for Member sample_member_id_2\n" + " real_server 10.0.0.98 82 {\n" + " weight 13\n" + " uthreshold 98\n" + " MISC_CHECK {\n" + " misc_path \"/var/lib/octavia/lvs/check/" + "udp_check.sh 10.0.0.98 82\"\n" + " misc_timeout 31\n" + " }\n" + " }\n\n" + "}\n\n") + rendered_obj = self.lvs_jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_listener_tuple( + proto=constants.PROTOCOL_UDP, + persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, + persistence_timeout=33, + persistence_granularity='255.255.0.0', + monitor_proto=constants.HEALTH_MONITOR_UDP_CONNECT, + connection_limit=98)) + self.assertEqual(exp, rendered_obj) + + def test_render_template_udp_ipv6_session_persistence_default_values(self): + # The session persistence default values refer to + # persistence_timeout and persistence_granularity + exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" + "# Configuration for Listener sample_listener_id_1\n\n" + "net_namespace amphora-haproxy\n\n" + "virtual_server_group ipv6-group {\n" + " 2001:db8::2 80\n" + "}\n\n" + "virtual_server group ipv6-group {\n" + " lb_algo wrr\n" + " lb_kind NAT\n" + " protocol UDP\n" + " persistence_timeout 360\n" + " persistence_granularity 128\n" + " delay_loop 30\n" + " delay_before_retry 30\n" + " retry 3\n\n\n" + " # Configuration for Pool sample_pool_id_1\n" + " # Configuration for HealthMonitor sample_monitor_id_1\n" + "}\n\n") + udp_sample = sample_configs_combined.sample_lb_with_udp_listener_tuple( + listeners=[sample_configs_combined.sample_listener_tuple( + proto=constants.PROTOCOL_UDP, + persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, + monitor_proto=constants.HEALTH_MONITOR_UDP_CONNECT)] + ) + udp_listener = udp_sample.listeners[0] + ipv6_lb = sample_configs_combined.sample_listener_loadbalancer_tuple( + vip=sample_configs_combined.sample_vip_tuple('2001:db8::2')) + udp_listener = udp_listener._replace(load_balancer=ipv6_lb) + rendered_obj = self.lvs_jinja_cfg.render_loadbalancer_obj(udp_listener) + self.assertEqual(exp, rendered_obj) + + def test_render_template_udp_one_packet(self): + exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" + "# Configuration for Listener sample_listener_id_1\n\n" + "net_namespace amphora-haproxy\n\n" + "virtual_server_group ipv4-group {\n" + " 10.0.0.2 80\n" + "}\n\n" + "virtual_server group ipv4-group {\n" + " lb_algo wrr\n" + " lb_kind NAT\n" + " protocol UDP\n" + " delay_loop 30\n" + " delay_before_retry 30\n" + " retry 3\n\n\n" + " # Configuration for Pool sample_pool_id_1\n" + " # Configuration for HealthMonitor sample_monitor_id_1\n" + " # Configuration for Member sample_member_id_1\n" + " real_server 10.0.0.99 82 {\n" + " weight 13\n" + " uthreshold 98\n" + " MISC_CHECK {\n" + " misc_path \"/var/lib/octavia/lvs/check/" + "udp_check.sh 10.0.0.99 82\"\n" + " misc_timeout 31\n" + " }\n" + " }\n\n" + " # Configuration for Member sample_member_id_2\n" + " real_server 10.0.0.98 82 {\n" + " weight 13\n" + " uthreshold 98\n" + " MISC_CHECK {\n" + " misc_path \"/var/lib/octavia/lvs/check/" + "udp_check.sh 10.0.0.98 82\"\n" + " misc_timeout 31\n" + " }\n" + " }\n\n" + "}\n\n") + + listener = sample_configs_combined.sample_listener_tuple( + proto=constants.PROTOCOL_UDP, + monitor_proto=constants.HEALTH_MONITOR_UDP_CONNECT, + connection_limit=98, + persistence=False) + rendered_obj = self.lvs_jinja_cfg.render_loadbalancer_obj(listener) + self.assertEqual(exp, rendered_obj) + + def test_render_template_udp_with_health_monitor(self): + exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" + "# Configuration for Listener sample_listener_id_1\n\n" + "net_namespace amphora-haproxy\n\n" + "virtual_server_group ipv4-group {\n" + " 10.0.0.2 80\n" + "}\n\n" + "virtual_server group ipv4-group {\n" + " lb_algo wrr\n" + " lb_kind NAT\n" + " protocol UDP\n" + " delay_loop 30\n" + " delay_before_retry 30\n" + " retry 3\n\n\n" + " # Configuration for Pool sample_pool_id_1\n" + " # Configuration for HealthMonitor sample_monitor_id_1\n" + " # Configuration for Member sample_member_id_1\n" + " real_server 10.0.0.99 82 {\n" + " weight 13\n" + " uthreshold 98\n" + " MISC_CHECK {\n" + " misc_path \"/var/lib/octavia/lvs/check/" + "udp_check.sh 10.0.0.99 82\"\n" + " misc_timeout 31\n" + " }\n" + " }\n\n" + " # Configuration for Member sample_member_id_2\n" + " real_server 10.0.0.98 82 {\n" + " weight 13\n" + " uthreshold 98\n" + " MISC_CHECK {\n" + " misc_path \"/var/lib/octavia/lvs/check/" + "udp_check.sh 10.0.0.98 82\"\n" + " misc_timeout 31\n" + " }\n" + " }\n\n" + "}\n\n") + + rendered_obj = self.lvs_jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_listener_tuple( + proto=constants.PROTOCOL_UDP, + monitor_proto=constants.HEALTH_MONITOR_UDP_CONNECT, + persistence=False, + connection_limit=98)) + self.assertEqual(exp, rendered_obj) + + def test_render_template_udp_with_health_monitor_ip_port(self): + exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" + "# Configuration for Listener sample_listener_id_1\n\n" + "net_namespace amphora-haproxy\n\n" + "virtual_server_group ipv4-group {\n" + " 10.0.0.2 80\n" + "}\n\n" + "virtual_server group ipv4-group {\n" + " lb_algo wrr\n" + " lb_kind NAT\n" + " protocol UDP\n" + " delay_loop 30\n" + " delay_before_retry 30\n" + " retry 3\n\n\n" + " # Configuration for Pool sample_pool_id_1\n" + " # Configuration for HealthMonitor sample_monitor_id_1\n" + " # Configuration for Member sample_member_id_1\n" + " real_server 10.0.0.99 82 {\n" + " weight 13\n" + " uthreshold 98\n" + " MISC_CHECK {\n" + " misc_path \"/var/lib/octavia/lvs/check/" + "udp_check.sh 192.168.1.1 9000\"\n" + " misc_timeout 31\n" + " }\n" + " }\n\n" + " # Configuration for Member sample_member_id_2\n" + " real_server 10.0.0.98 82 {\n" + " weight 13\n" + " uthreshold 98\n" + " MISC_CHECK {\n" + " misc_path \"/var/lib/octavia/lvs/check/" + "udp_check.sh 192.168.1.1 9000\"\n" + " misc_timeout 31\n" + " }\n" + " }\n\n" + "}\n\n") + + rendered_obj = self.lvs_jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_listener_tuple( + proto=constants.PROTOCOL_UDP, + monitor_ip_port=True, + monitor_proto=constants.HEALTH_MONITOR_UDP_CONNECT, + persistence=False, + connection_limit=98)) + self.assertEqual(exp, rendered_obj) + + def test_render_template_udp_no_other_resources(self): + exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" + "# Configuration for Listener sample_listener_id_1\n\n" + "net_namespace amphora-haproxy\n\n\n") + + rendered_obj = self.lvs_jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_listener_tuple( + proto=constants.PROTOCOL_UDP, monitor=False, + persistence=False, alloc_default_pool=False)) + self.assertEqual(exp, rendered_obj) + + def test_render_template_udp_with_pool_no_member(self): + exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" + "# Configuration for Listener sample_listener_id_1\n\n" + "net_namespace amphora-haproxy\n\n" + "virtual_server_group ipv4-group {\n" + " 10.0.0.2 80\n" + "}\n\n" + "virtual_server group ipv4-group {\n" + " lb_algo wrr\n" + " lb_kind NAT\n" + " protocol UDP\n\n\n" + " # Configuration for Pool sample_pool_id_0\n" + "}\n\n") + + rendered_obj = self.lvs_jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_listener_tuple( + proto=constants.PROTOCOL_UDP, monitor=False, + persistence=False, alloc_default_pool=True, + sample_default_pool=0)) + self.assertEqual(exp, rendered_obj) + + def test_render_template_udp_with_disabled_pool(self): + exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" + "# Configuration for Listener sample_listener_id_1\n\n" + "net_namespace amphora-haproxy\n\n" + "virtual_server_group ipv4-group {\n" + " 10.0.0.2 80\n" + "}\n\n" + "virtual_server group ipv4-group {\n" + " lb_algo wrr\n" + " lb_kind NAT\n" + " protocol UDP\n\n\n" + " # Pool sample_pool_id_1 is disabled\n" + "}\n\n") + + rendered_obj = self.lvs_jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_listener_tuple( + proto=constants.PROTOCOL_UDP, monitor=False, + persistence=False, alloc_default_pool=True, + pool_enabled=False)) + self.assertEqual(exp, rendered_obj) + + def test_udp_transform_session_persistence(self): + persistence_src_ip = ( + sample_configs_combined.sample_session_persistence_tuple( + persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, + persistence_cookie=None, + persistence_timeout=33, + persistence_granularity='255.0.0.0' + )) + exp = sample_configs_combined.UDP_SOURCE_IP_BODY + ret = self.lvs_jinja_cfg._transform_session_persistence( + persistence_src_ip) + self.assertEqual(exp, ret) + + def test_udp_transform_health_monitor(self): + in_hm = sample_configs_combined.sample_health_monitor_tuple( + proto=constants.HEALTH_MONITOR_UDP_CONNECT + ) + ret = self.lvs_jinja_cfg._transform_health_monitor(in_hm) + self.assertEqual(sample_configs_combined.RET_UDP_HEALTH_MONITOR, ret) + + def test_udp_transform_member(self): + in_member = sample_configs_combined.sample_member_tuple( + 'member_id_1', '192.0.2.10') + ret = self.lvs_jinja_cfg._transform_member(in_member) + self.assertEqual(sample_configs_combined.RET_UDP_MEMBER, ret) + + in_member = sample_configs_combined.sample_member_tuple( + 'member_id_1', + '192.0.2.10', + monitor_ip_port=True) + ret = self.lvs_jinja_cfg._transform_member(in_member) + self.assertEqual( + sample_configs_combined.RET_UDP_MEMBER_MONITOR_IP_PORT, ret) + + def test_udp_transform_pool(self): + in_pool = sample_configs_combined.sample_pool_tuple( + proto=constants.PROTOCOL_UDP, + persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, + persistence_timeout=33, persistence_granularity='255.0.0.0', + ) + ret = self.lvs_jinja_cfg._transform_pool(in_pool) + self.assertEqual(sample_configs_combined.RET_UDP_POOL, ret) + + in_pool = sample_configs_combined.sample_pool_tuple( + proto=constants.PROTOCOL_UDP, + persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, + persistence_timeout=33, persistence_granularity='255.0.0.0', + lb_algorithm=None, + ) + ret = self.lvs_jinja_cfg._transform_pool(in_pool) + self.assertEqual(sample_configs_combined.RET_UDP_POOL, ret) + + in_pool = sample_configs_combined.sample_pool_tuple( + proto=constants.PROTOCOL_UDP, + persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, + persistence_timeout=33, persistence_granularity='255.0.0.0', + monitor=False) + sample_configs_combined.RET_UDP_POOL['health_monitor'] = '' + ret = self.lvs_jinja_cfg._transform_pool(in_pool) + self.assertEqual(sample_configs_combined.RET_UDP_POOL, ret) + + def test_udp_transform_listener(self): + in_listener = sample_configs_combined.sample_listener_tuple( + proto=constants.PROTOCOL_UDP, + persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, + persistence_timeout=33, + persistence_granularity='255.0.0.0', + monitor_proto=constants.HEALTH_MONITOR_UDP_CONNECT, + connection_limit=98 + ) + ret = self.lvs_jinja_cfg._transform_listener(in_listener) + self.assertEqual(sample_configs_combined.RET_UDP_LISTENER, ret) + + in_listener = sample_configs_combined.sample_listener_tuple( + proto=constants.PROTOCOL_UDP, + persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, + persistence_timeout=33, + persistence_granularity='255.0.0.0', + monitor_proto=constants.HEALTH_MONITOR_UDP_CONNECT, + connection_limit=-1) + + ret = self.lvs_jinja_cfg._transform_listener(in_listener) + sample_configs_combined.RET_UDP_LISTENER.pop('connection_limit') + self.assertEqual(sample_configs_combined.RET_UDP_LISTENER, ret) + + def test_render_template_udp_listener_with_http_health_monitor(self): + exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" + "# Configuration for Listener sample_listener_id_1\n\n" + "net_namespace amphora-haproxy\n\n" + "virtual_server_group ipv4-group {\n" + " 10.0.0.2 80\n" + "}\n\n" + "virtual_server group ipv4-group {\n" + " lb_algo wrr\n" + " lb_kind NAT\n" + " protocol UDP\n" + " delay_loop 30\n" + " delay_before_retry 30\n" + " retry 3\n\n\n" + " # Configuration for Pool sample_pool_id_1\n" + " # Configuration for HealthMonitor sample_monitor_id_1\n" + " # Configuration for Member sample_member_id_1\n" + " real_server 10.0.0.99 82 {\n" + " weight 13\n" + " uthreshold 98\n" + " HTTP_GET {\n" + " url {\n" + " path /index.html\n" + " status_code 200\n" + " }\n" + " url {\n" + " path /index.html\n" + " status_code 201\n" + " }\n" + " connect_ip 10.0.0.99\n" + " connect_port 82\n" + " connect_timeout 31\n" + " }\n" + " }\n\n" + " # Configuration for Member sample_member_id_2\n" + " real_server 10.0.0.98 82 {\n" + " weight 13\n" + " uthreshold 98\n" + " HTTP_GET {\n" + " url {\n" + " path /index.html\n" + " status_code 200\n" + " }\n" + " url {\n" + " path /index.html\n" + " status_code 201\n" + " }\n" + " connect_ip 10.0.0.98\n" + " connect_port 82\n" + " connect_timeout 31\n" + " }\n" + " }\n\n" + "}\n\n") + + listener = sample_configs_combined.sample_listener_tuple( + proto=constants.PROTOCOL_UDP, + monitor_proto=constants.HEALTH_MONITOR_HTTP, + connection_limit=98, + persistence=False, + monitor_expected_codes='200-201') + + rendered_obj = self.lvs_jinja_cfg.render_loadbalancer_obj(listener) + self.assertEqual(exp, rendered_obj) + + def test_render_template_udp_listener_with_tcp_health_monitor(self): + exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" + "# Configuration for Listener sample_listener_id_1\n\n" + "net_namespace amphora-haproxy\n\n" + "virtual_server_group ipv4-group {\n" + " 10.0.0.2 80\n" + "}\n\n" + "virtual_server group ipv4-group {\n" + " lb_algo wrr\n" + " lb_kind NAT\n" + " protocol UDP\n" + " delay_loop 30\n" + " delay_before_retry 30\n" + " retry 3\n\n\n" + " # Configuration for Pool sample_pool_id_1\n" + " # Configuration for HealthMonitor sample_monitor_id_1\n" + " # Configuration for Member sample_member_id_1\n" + " real_server 10.0.0.99 82 {\n" + " weight 13\n" + " uthreshold 98\n" + " TCP_CHECK {\n" + " connect_ip 10.0.0.99\n" + " connect_port 82\n" + " connect_timeout 31\n" + " }\n" + " }\n\n" + " # Configuration for Member sample_member_id_2\n" + " real_server 10.0.0.98 82 {\n" + " weight 13\n" + " uthreshold 98\n" + " TCP_CHECK {\n" + " connect_ip 10.0.0.98\n" + " connect_port 82\n" + " connect_timeout 31\n" + " }\n" + " }\n\n" + "}\n\n") + listener = sample_configs_combined.sample_listener_tuple( + proto=constants.PROTOCOL_UDP, + monitor_proto=constants.HEALTH_MONITOR_TCP, + connection_limit=98, + persistence=False) + + rendered_obj = self.lvs_jinja_cfg.render_loadbalancer_obj(listener) + self.assertEqual(exp, rendered_obj) + + def test_render_template_disabled_udp_listener(self): + exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" + "# Listener sample_listener_id_1 is disabled\n\n" + "net_namespace amphora-haproxy\n\n") + rendered_obj = self.lvs_jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_listener_tuple( + enabled=False, + proto=constants.PROTOCOL_UDP, + persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, + persistence_timeout=33, + persistence_granularity='255.255.0.0', + monitor_proto=constants.HEALTH_MONITOR_UDP_CONNECT, + connection_limit=98)) + self.assertEqual(exp, rendered_obj) + + def test_render_template_sctp_source_ip(self): + exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" + "# Configuration for Listener sample_listener_id_1\n\n" + "net_namespace amphora-haproxy\n\n" + "virtual_server_group ipv4-group {\n" + " 10.0.0.2 80\n" + "}\n\n" + "virtual_server group ipv4-group {\n" + " lb_algo wrr\n" + " lb_kind NAT\n" + " protocol SCTP\n" + " persistence_timeout 33\n" + " persistence_granularity 255.255.0.0\n" + " delay_loop 30\n" + " delay_before_retry 30\n" + " retry 3\n\n\n" + " # Configuration for Pool sample_pool_id_1\n" + " # Configuration for HealthMonitor sample_monitor_id_1\n" + " # Configuration for Member sample_member_id_1\n" + " real_server 10.0.0.99 82 {\n" + " weight 13\n" + " uthreshold 98\n" + " MISC_CHECK {\n" + " misc_path \"amphora-health-checker sctp -t 31 " + "10.0.0.99 82\"\n" + " misc_timeout 32\n" + " }\n" + " }\n\n" + " # Configuration for Member sample_member_id_2\n" + " real_server 10.0.0.98 82 {\n" + " weight 13\n" + " uthreshold 98\n" + " MISC_CHECK {\n" + " misc_path \"amphora-health-checker sctp -t 31 " + "10.0.0.98 82\"\n" + " misc_timeout 32\n" + " }\n" + " }\n\n" + "}\n\n") + rendered_obj = self.lvs_jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_listener_tuple( + proto=lib_consts.PROTOCOL_SCTP, + persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, + persistence_timeout=33, + persistence_granularity='255.255.0.0', + monitor_proto=lib_consts.HEALTH_MONITOR_SCTP, + connection_limit=98)) + self.assertEqual(exp, rendered_obj) + + def test_render_template_sctp_one_packet(self): + exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" + "# Configuration for Listener sample_listener_id_1\n\n" + "net_namespace amphora-haproxy\n\n" + "virtual_server_group ipv4-group {\n" + " 10.0.0.2 80\n" + "}\n\n" + "virtual_server group ipv4-group {\n" + " lb_algo wrr\n" + " lb_kind NAT\n" + " protocol SCTP\n" + " delay_loop 30\n" + " delay_before_retry 30\n" + " retry 3\n\n\n" + " # Configuration for Pool sample_pool_id_1\n" + " # Configuration for HealthMonitor sample_monitor_id_1\n" + " # Configuration for Member sample_member_id_1\n" + " real_server 10.0.0.99 82 {\n" + " weight 13\n" + " uthreshold 98\n" + " MISC_CHECK {\n" + " misc_path \"amphora-health-checker sctp -t 31 " + "10.0.0.99 82\"\n" + " misc_timeout 32\n" + " }\n" + " }\n\n" + " # Configuration for Member sample_member_id_2\n" + " real_server 10.0.0.98 82 {\n" + " weight 13\n" + " uthreshold 98\n" + " MISC_CHECK {\n" + " misc_path \"amphora-health-checker sctp -t 31 " + "10.0.0.98 82\"\n" + " misc_timeout 32\n" + " }\n" + " }\n\n" + "}\n\n") + + listener = sample_configs_combined.sample_listener_tuple( + proto=lib_consts.PROTOCOL_SCTP, + monitor_proto=lib_consts.HEALTH_MONITOR_SCTP, + connection_limit=98, + persistence=False) + rendered_obj = self.lvs_jinja_cfg.render_loadbalancer_obj(listener) + self.assertEqual(exp, rendered_obj) + + def test_render_template_sctp_with_health_monitor(self): + exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" + "# Configuration for Listener sample_listener_id_1\n\n" + "net_namespace amphora-haproxy\n\n" + "virtual_server_group ipv4-group {\n" + " 10.0.0.2 80\n" + "}\n\n" + "virtual_server group ipv4-group {\n" + " lb_algo wrr\n" + " lb_kind NAT\n" + " protocol SCTP\n" + " delay_loop 30\n" + " delay_before_retry 30\n" + " retry 3\n\n\n" + " # Configuration for Pool sample_pool_id_1\n" + " # Configuration for HealthMonitor sample_monitor_id_1\n" + " # Configuration for Member sample_member_id_1\n" + " real_server 10.0.0.99 82 {\n" + " weight 13\n" + " uthreshold 98\n" + " MISC_CHECK {\n" + " misc_path \"amphora-health-checker sctp -t 31 " + "10.0.0.99 82\"\n" + " misc_timeout 32\n" + " }\n" + " }\n\n" + " # Configuration for Member sample_member_id_2\n" + " real_server 10.0.0.98 82 {\n" + " weight 13\n" + " uthreshold 98\n" + " MISC_CHECK {\n" + " misc_path \"amphora-health-checker sctp -t 31 " + "10.0.0.98 82\"\n" + " misc_timeout 32\n" + " }\n" + " }\n\n" + "}\n\n") + + rendered_obj = self.lvs_jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_listener_tuple( + proto=lib_consts.PROTOCOL_SCTP, + monitor_proto=lib_consts.HEALTH_MONITOR_SCTP, + persistence=False, + connection_limit=98)) + self.assertEqual(exp, rendered_obj) + + def test_render_template_sctp_with_health_monitor_ip_port(self): + exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" + "# Configuration for Listener sample_listener_id_1\n\n" + "net_namespace amphora-haproxy\n\n" + "virtual_server_group ipv4-group {\n" + " 10.0.0.2 80\n" + "}\n\n" + "virtual_server group ipv4-group {\n" + " lb_algo wrr\n" + " lb_kind NAT\n" + " protocol SCTP\n" + " delay_loop 30\n" + " delay_before_retry 30\n" + " retry 3\n\n\n" + " # Configuration for Pool sample_pool_id_1\n" + " # Configuration for HealthMonitor sample_monitor_id_1\n" + " # Configuration for Member sample_member_id_1\n" + " real_server 10.0.0.99 82 {\n" + " weight 13\n" + " uthreshold 98\n" + " MISC_CHECK {\n" + " misc_path \"amphora-health-checker sctp -t 31 " + "192.168.1.1 9000\"\n" + " misc_timeout 32\n" + " }\n" + " }\n\n" + " # Configuration for Member sample_member_id_2\n" + " real_server 10.0.0.98 82 {\n" + " weight 13\n" + " uthreshold 98\n" + " MISC_CHECK {\n" + " misc_path \"amphora-health-checker sctp -t 31 " + "192.168.1.1 9000\"\n" + " misc_timeout 32\n" + " }\n" + " }\n\n" + "}\n\n") + + rendered_obj = self.lvs_jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_listener_tuple( + proto=lib_consts.PROTOCOL_SCTP, + monitor_ip_port=True, + monitor_proto=lib_consts.HEALTH_MONITOR_SCTP, + persistence=False, + connection_limit=98)) + self.assertEqual(exp, rendered_obj) + + def test_render_template_sctp_no_other_resources(self): + exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" + "# Configuration for Listener sample_listener_id_1\n\n" + "net_namespace amphora-haproxy\n\n\n") + + rendered_obj = self.lvs_jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_listener_tuple( + proto=lib_consts.PROTOCOL_SCTP, monitor=False, + persistence=False, alloc_default_pool=False)) + self.assertEqual(exp, rendered_obj) + + def test_sctp_transform_session_persistence(self): + persistence_src_ip = ( + sample_configs_combined.sample_session_persistence_tuple( + persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, + persistence_cookie=None, + persistence_timeout=33, + persistence_granularity='255.0.0.0' + )) + exp = sample_configs_combined.SCTP_SOURCE_IP_BODY + ret = self.lvs_jinja_cfg._transform_session_persistence( + persistence_src_ip) + self.assertEqual(exp, ret) + + def test_sctp_transform_health_monitor(self): + in_hm = sample_configs_combined.sample_health_monitor_tuple( + proto=lib_consts.HEALTH_MONITOR_SCTP + ) + ret = self.lvs_jinja_cfg._transform_health_monitor(in_hm) + self.assertEqual(sample_configs_combined.RET_SCTP_HEALTH_MONITOR, ret) + + def test_sctp_transform_member(self): + in_member = sample_configs_combined.sample_member_tuple( + 'member_id_1', '192.0.2.10') + ret = self.lvs_jinja_cfg._transform_member(in_member) + self.assertEqual(sample_configs_combined.RET_SCTP_MEMBER, ret) + + in_member = sample_configs_combined.sample_member_tuple( + 'member_id_1', + '192.0.2.10', + monitor_ip_port=True) + ret = self.lvs_jinja_cfg._transform_member(in_member) + self.assertEqual( + sample_configs_combined.RET_SCTP_MEMBER_MONITOR_IP_PORT, ret) + + def test_sctp_transform_pool(self): + in_pool = sample_configs_combined.sample_pool_tuple( + proto=lib_consts.PROTOCOL_SCTP, + persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, + persistence_timeout=33, persistence_granularity='255.0.0.0', + ) + ret = self.lvs_jinja_cfg._transform_pool(in_pool) + self.assertEqual(sample_configs_combined.RET_SCTP_POOL, ret) + + in_pool = sample_configs_combined.sample_pool_tuple( + proto=lib_consts.PROTOCOL_SCTP, + persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, + persistence_timeout=33, persistence_granularity='255.0.0.0', + lb_algorithm=None, + ) + ret = self.lvs_jinja_cfg._transform_pool(in_pool) + self.assertEqual(sample_configs_combined.RET_SCTP_POOL, ret) + + in_pool = sample_configs_combined.sample_pool_tuple( + proto=lib_consts.PROTOCOL_SCTP, + persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, + persistence_timeout=33, persistence_granularity='255.0.0.0', + monitor=False) + sample_configs_combined.RET_SCTP_POOL['health_monitor'] = '' + ret = self.lvs_jinja_cfg._transform_pool(in_pool) + self.assertEqual(sample_configs_combined.RET_SCTP_POOL, ret) + + def test_sctp_transform_listener(self): + in_listener = sample_configs_combined.sample_listener_tuple( + proto=lib_consts.PROTOCOL_SCTP, + persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, + persistence_timeout=33, + persistence_granularity='255.0.0.0', + monitor_proto=lib_consts.HEALTH_MONITOR_SCTP, + connection_limit=98 + ) + ret = self.lvs_jinja_cfg._transform_listener(in_listener) + self.assertEqual(sample_configs_combined.RET_SCTP_LISTENER, ret) + + in_listener = sample_configs_combined.sample_listener_tuple( + proto=lib_consts.PROTOCOL_SCTP, + persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, + persistence_timeout=33, + persistence_granularity='255.0.0.0', + monitor_proto=lib_consts.HEALTH_MONITOR_SCTP, + connection_limit=-1) + + ret = self.lvs_jinja_cfg._transform_listener(in_listener) + sample_configs_combined.RET_SCTP_LISTENER.pop('connection_limit') + self.assertEqual(sample_configs_combined.RET_SCTP_LISTENER, ret) + + def test_render_template_sctp_listener_with_http_health_monitor(self): + exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" + "# Configuration for Listener sample_listener_id_1\n\n" + "net_namespace amphora-haproxy\n\n" + "virtual_server_group ipv4-group {\n" + " 10.0.0.2 80\n" + "}\n\n" + "virtual_server group ipv4-group {\n" + " lb_algo wrr\n" + " lb_kind NAT\n" + " protocol SCTP\n" + " delay_loop 30\n" + " delay_before_retry 30\n" + " retry 3\n\n\n" + " # Configuration for Pool sample_pool_id_1\n" + " # Configuration for HealthMonitor sample_monitor_id_1\n" + " # Configuration for Member sample_member_id_1\n" + " real_server 10.0.0.99 82 {\n" + " weight 13\n" + " uthreshold 98\n" + " HTTP_GET {\n" + " url {\n" + " path /index.html\n" + " status_code 200\n" + " }\n" + " url {\n" + " path /index.html\n" + " status_code 201\n" + " }\n" + " connect_ip 10.0.0.99\n" + " connect_port 82\n" + " connect_timeout 31\n" + " }\n" + " }\n\n" + " # Configuration for Member sample_member_id_2\n" + " real_server 10.0.0.98 82 {\n" + " weight 13\n" + " uthreshold 98\n" + " HTTP_GET {\n" + " url {\n" + " path /index.html\n" + " status_code 200\n" + " }\n" + " url {\n" + " path /index.html\n" + " status_code 201\n" + " }\n" + " connect_ip 10.0.0.98\n" + " connect_port 82\n" + " connect_timeout 31\n" + " }\n" + " }\n\n" + "}\n\n") + + listener = sample_configs_combined.sample_listener_tuple( + proto=lib_consts.PROTOCOL_SCTP, + monitor_proto=constants.HEALTH_MONITOR_HTTP, + connection_limit=98, + persistence=False, + monitor_expected_codes='200-201') + + rendered_obj = self.lvs_jinja_cfg.render_loadbalancer_obj(listener) + self.assertEqual(exp, rendered_obj) + + def test_render_template_sctp_listener_with_tcp_health_monitor(self): + exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" + "# Configuration for Listener sample_listener_id_1\n\n" + "net_namespace amphora-haproxy\n\n" + "virtual_server_group ipv4-group {\n" + " 10.0.0.2 80\n" + "}\n\n" + "virtual_server group ipv4-group {\n" + " lb_algo wrr\n" + " lb_kind NAT\n" + " protocol SCTP\n" + " delay_loop 30\n" + " delay_before_retry 30\n" + " retry 3\n\n\n" + " # Configuration for Pool sample_pool_id_1\n" + " # Configuration for HealthMonitor sample_monitor_id_1\n" + " # Configuration for Member sample_member_id_1\n" + " real_server 10.0.0.99 82 {\n" + " weight 13\n" + " uthreshold 98\n" + " TCP_CHECK {\n" + " connect_ip 10.0.0.99\n" + " connect_port 82\n" + " connect_timeout 31\n" + " }\n" + " }\n\n" + " # Configuration for Member sample_member_id_2\n" + " real_server 10.0.0.98 82 {\n" + " weight 13\n" + " uthreshold 98\n" + " TCP_CHECK {\n" + " connect_ip 10.0.0.98\n" + " connect_port 82\n" + " connect_timeout 31\n" + " }\n" + " }\n\n" + "}\n\n") + listener = sample_configs_combined.sample_listener_tuple( + proto=lib_consts.PROTOCOL_SCTP, + monitor_proto=constants.HEALTH_MONITOR_TCP, + connection_limit=98, + persistence=False) + + rendered_obj = self.lvs_jinja_cfg.render_loadbalancer_obj(listener) + self.assertEqual(exp, rendered_obj) + + def test_render_template_disabled_sctp_listener(self): + exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" + "# Listener sample_listener_id_1 is disabled\n\n" + "net_namespace amphora-haproxy\n\n") + rendered_obj = self.lvs_jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_listener_tuple( + enabled=False, + proto=lib_consts.PROTOCOL_SCTP, + persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, + persistence_timeout=33, + persistence_granularity='255.255.0.0', + monitor_proto=lib_consts.HEALTH_MONITOR_SCTP, + connection_limit=98)) + self.assertEqual(exp, rendered_obj) diff --git a/octavia/tests/unit/common/jinja/test_user_data_jinja_cfg.py b/octavia/tests/unit/common/jinja/test_user_data_jinja_cfg.py new file mode 100644 index 0000000000..4f6ed9d805 --- /dev/null +++ b/octavia/tests/unit/common/jinja/test_user_data_jinja_cfg.py @@ -0,0 +1,64 @@ +# Copyright 2016 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from octavia.common.jinja import user_data_jinja_cfg +import octavia.tests.unit.base as base + +TEST_CONFIG = ('[DEFAULT]\n' + 'debug = False\n' + '[haproxy_amphora]\n' + 'base_cert_dir = /var/lib/octavia/certs\n') +EXPECTED_TEST_CONFIG = (' [DEFAULT]\n' + ' debug = False\n' + ' [haproxy_amphora]\n' + ' base_cert_dir = /var/lib/octavia/certs\n\n') +BASE_CFG = ('#cloud-config\n' + '# vim: syntax=yaml\n' + '#\n' + '# This configuration with take user-data dict and ' + 'build a cloud-init\n' + '# script utilizing the write_files module. ' + 'The user-data dict should be a\n' + '# Key Value pair where the Key is the path to store the ' + 'file and the Value\n' + '# is the data to store at that location\n' + '#\n' + '# Example:\n' + '# {\'/root/path/to/file.cfg\': \'I\'m a file, ' + 'write things in me\'}\n') +WRITE_FILES_CFG = ('write_files:\n') +RUN_CMD = ('runcmd:\n' + '- systemctl restart rsyslog\n') +WRITE_FILES_CMD = ('- service amphora-agent restart\n') +TIMEZONE = '\ntimezone: UTC' + + +class TestUserDataJinjaCfg(base.TestCase): + def setUp(self): + super().setUp() + + def test_build_user_data_config(self): + udc = user_data_jinja_cfg.UserDataJinjaCfg() + expected_config = (BASE_CFG + WRITE_FILES_CFG + + '- path: /test/config/path\n' + ' content: |\n' + EXPECTED_TEST_CONFIG + + RUN_CMD + WRITE_FILES_CMD + TIMEZONE) + ud_cfg = udc.build_user_data_config({'/test/config/path': TEST_CONFIG}) + self.assertEqual(expected_config, ud_cfg) + + def test_build_user_data_config_no_files(self): + udc = user_data_jinja_cfg.UserDataJinjaCfg() + expected_config = (BASE_CFG + '\n' + RUN_CMD + '\n' + TIMEZONE) + ud_cfg = udc.build_user_data_config({}) + self.assertEqual(expected_config, ud_cfg) diff --git a/octavia/tests/unit/common/sample_configs/__init__.py b/octavia/tests/unit/common/sample_configs/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/common/sample_configs/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/common/sample_configs/sample_configs_combined.py b/octavia/tests/unit/common/sample_configs/sample_configs_combined.py new file mode 100644 index 0000000000..9de75dc2e0 --- /dev/null +++ b/octavia/tests/unit/common/sample_configs/sample_configs_combined.py @@ -0,0 +1,1282 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import collections + +from octavia_lib.common import constants as lib_consts +from oslo_config import cfg + +from octavia.common import constants +from octavia.tests.common import sample_certs + +CONF = cfg.CONF + + +class AmphoraTuple(collections.namedtuple( + 'amphora', 'id, lb_network_ip, vrrp_ip, ha_ip, vrrp_port_id, ' + 'ha_port_id, role, status, vrrp_interface,' + 'vrrp_priority, api_version')): + def to_dict(self): + return self._asdict() + + +def sample_amphora_tuple(id='sample_amphora_id_1', lb_network_ip='10.0.1.1', + vrrp_ip='10.1.1.1', ha_ip='192.168.10.1', + vrrp_port_id='1234', ha_port_id='1234', role=None, + status='ACTIVE', vrrp_interface=None, + vrrp_priority=None, api_version='1.0'): + amp = AmphoraTuple( + id=id, + lb_network_ip=lb_network_ip, + vrrp_ip=vrrp_ip, + ha_ip=ha_ip, + vrrp_port_id=vrrp_port_id, + ha_port_id=ha_port_id, + role=role, + status=status, + vrrp_interface=vrrp_interface, + vrrp_priority=vrrp_priority, + api_version=api_version) + return amp + + +RET_PERSISTENCE = { + 'type': 'HTTP_COOKIE', + 'cookie_name': None} + +RET_MONITOR_1 = { + 'id': 'sample_monitor_id_1', + 'type': 'HTTP', + 'delay': 30, + 'timeout': 31, + 'fall_threshold': 3, + 'rise_threshold': 2, + 'http_method': 'GET', + 'url_path': '/index.html', + 'expected_codes': '418', + 'enabled': True, + 'http_version': 1.0, + 'domain_name': None} + +RET_MONITOR_2 = { + 'id': 'sample_monitor_id_2', + 'type': 'HTTP', + 'delay': 30, + 'timeout': 31, + 'fall_threshold': 3, + 'rise_threshold': 2, + 'http_method': 'GET', + 'url_path': '/healthmon.html', + 'expected_codes': '418', + 'enabled': True, + 'http_version': 1.0, + 'domain_name': None} + +RET_MEMBER_1 = { + 'id': 'sample_member_id_1', + 'address': '10.0.0.99', + 'protocol_port': 82, + 'weight': 13, + 'subnet_id': '10.0.0.1/24', + 'enabled': True, + 'operating_status': 'ACTIVE', + 'monitor_address': None, + 'monitor_port': None, + 'backup': False} + +RET_MEMBER_2 = { + 'id': 'sample_member_id_2', + 'address': '10.0.0.98', + 'protocol_port': 82, + 'weight': 13, + 'subnet_id': '10.0.0.1/24', + 'enabled': True, + 'operating_status': 'ACTIVE', + 'monitor_address': None, + 'monitor_port': None, + 'backup': False} + +RET_MEMBER_3 = { + 'id': 'sample_member_id_3', + 'address': '10.0.0.97', + 'protocol_port': 82, + 'weight': 13, + 'subnet_id': '10.0.0.1/24', + 'enabled': True, + 'operating_status': 'ACTIVE', + 'monitor_address': None, + 'monitor_port': None, + 'backup': False} + +RET_POOL_1 = { + 'id': 'sample_pool_id_1', + 'protocol': 'http', + 'proxy_protocol': None, + 'lb_algorithm': 'roundrobin', + 'listener_tls_enabled': False, + 'members': [RET_MEMBER_1, RET_MEMBER_2], + 'health_monitor': RET_MONITOR_1, + 'session_persistence': RET_PERSISTENCE, + 'enabled': True, + 'operating_status': 'ACTIVE', + 'stick_size': '10k', + constants.HTTP_REUSE: False, + 'ca_tls_path': '', + 'crl_path': '', + 'tls_enabled': False, +} + +RET_POOL_2 = { + 'id': 'sample_pool_id_2', + 'protocol': 'http', + 'proxy_protocol': None, + 'lb_algorithm': 'roundrobin', + 'listener_tls_enabled': False, + 'members': [RET_MEMBER_3], + 'health_monitor': RET_MONITOR_2, + 'session_persistence': RET_PERSISTENCE, + 'enabled': True, + 'operating_status': 'ACTIVE', + 'stick_size': '10k', + constants.HTTP_REUSE: False, + 'ca_tls_path': '', + 'crl_path': '', + 'tls_enabled': False, +} + +RET_DEF_TLS_CONT = {'id': 'cont_id_1', 'allencompassingpem': 'imapem', + 'primary_cn': 'FakeCn'} +RET_SNI_CONT_1 = {'id': 'cont_id_2', 'allencompassingpem': 'imapem2', + 'primary_cn': 'FakeCn'} +RET_SNI_CONT_2 = {'id': 'cont_id_3', 'allencompassingpem': 'imapem3', + 'primary_cn': 'FakeCn2'} + +RET_L7RULE_1 = { + 'id': 'sample_l7rule_id_1', + 'type': constants.L7RULE_TYPE_PATH, + 'compare_type': constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + 'key': None, + 'value': '/api', + 'invert': False, + 'enabled': True} + +RET_L7RULE_2 = { + 'id': 'sample_l7rule_id_2', + 'type': constants.L7RULE_TYPE_HEADER, + 'compare_type': constants.L7RULE_COMPARE_TYPE_CONTAINS, + 'key': 'Some-header', + 'value': 'This\\ string\\\\\\ with\\ stuff', + 'invert': True, + 'enabled': True} + +RET_L7RULE_3 = { + 'id': 'sample_l7rule_id_3', + 'type': constants.L7RULE_TYPE_COOKIE, + 'compare_type': constants.L7RULE_COMPARE_TYPE_REGEX, + 'key': 'some-cookie', + 'value': 'this.*|that', + 'invert': False, + 'enabled': True} + +RET_L7RULE_4 = { + 'id': 'sample_l7rule_id_4', + 'type': constants.L7RULE_TYPE_FILE_TYPE, + 'compare_type': constants.L7RULE_COMPARE_TYPE_EQUAL_TO, + 'key': None, + 'value': 'jpg', + 'invert': False, + 'enabled': True} + +RET_L7RULE_5 = { + 'id': 'sample_l7rule_id_5', + 'type': constants.L7RULE_TYPE_HOST_NAME, + 'compare_type': constants.L7RULE_COMPARE_TYPE_ENDS_WITH, + 'key': None, + 'value': '.example.com', + 'invert': False, + 'enabled': True} + +RET_L7RULE_6 = { + 'id': 'sample_l7rule_id_6', + 'type': constants.L7RULE_TYPE_HOST_NAME, + 'compare_type': constants.L7RULE_COMPARE_TYPE_ENDS_WITH, + 'key': None, + 'value': '.example.com', + 'invert': False, + 'enabled': False} + +RET_L7POLICY_1 = { + 'id': 'sample_l7policy_id_1', + 'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + 'redirect_pool': RET_POOL_2, + 'redirect_url': None, + 'redirect_prefix': None, + 'enabled': True, + 'l7rules': [RET_L7RULE_1], + 'redirect_http_code': None} + +RET_L7POLICY_2 = { + 'id': 'sample_l7policy_id_2', + 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, + 'redirect_pool': None, + 'redirect_url': 'http://www.example.com/', + 'redirect_prefix': None, + 'enabled': True, + 'l7rules': [RET_L7RULE_2, RET_L7RULE_3], + 'redirect_http_code': 302} + +RET_L7POLICY_3 = { + 'id': 'sample_l7policy_id_3', + 'action': constants.L7POLICY_ACTION_REJECT, + 'redirect_pool': None, + 'redirect_url': None, + 'redirect_prefix': None, + 'enabled': True, + 'l7rules': [RET_L7RULE_4, RET_L7RULE_5], + 'redirect_http_code': None} + +RET_L7POLICY_4 = { + 'id': 'sample_l7policy_id_4', + 'action': constants.L7POLICY_ACTION_REJECT, + 'redirect_pool': None, + 'redirect_url': None, + 'redirect_prefix': None, + 'enabled': True, + 'l7rules': [], + 'redirect_http_code': None} + +RET_L7POLICY_5 = { + 'id': 'sample_l7policy_id_5', + 'action': constants.L7POLICY_ACTION_REJECT, + 'redirect_pool': None, + 'redirect_url': None, + 'redirect_prefix': None, + 'enabled': False, + 'l7rules': [RET_L7RULE_5], + 'redirect_http_code': None} + +RET_L7POLICY_6 = { + 'id': 'sample_l7policy_id_6', + 'action': constants.L7POLICY_ACTION_REJECT, + 'redirect_pool': None, + 'redirect_url': None, + 'redirect_prefix': None, + 'enabled': True, + 'l7rules': [], + 'redirect_http_code': None} + +RET_L7POLICY_7 = { + 'id': 'sample_l7policy_id_7', + 'action': constants.L7POLICY_ACTION_REDIRECT_PREFIX, + 'redirect_pool': None, + 'redirect_url': None, + 'redirect_prefix': 'https://example.com/', + 'enabled': True, + 'l7rules': [RET_L7RULE_2, RET_L7RULE_3], + 'redirect_http_code': 302} + +RET_L7POLICY_8 = { + 'id': 'sample_l7policy_id_8', + 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, + 'redirect_pool': None, + 'redirect_url': 'http://www.example.com/', + 'redirect_prefix': None, + 'enabled': True, + 'l7rules': [RET_L7RULE_2, RET_L7RULE_3], + 'redirect_http_code': None} + +RET_LISTENER = { + 'id': 'sample_listener_id_1', + 'protocol_port': '80', + 'protocol': 'HTTP', + 'protocol_mode': 'http', + 'default_pool': RET_POOL_1, + 'connection_limit': constants.HAPROXY_DEFAULT_MAXCONN, + 'user_log_format': '12345\\ sample_loadbalancer_id_1\\ %f\\ %ci\\ %cp\\ ' + '%t\\ %{+Q}r\\ %ST\\ %B\\ %U\\ %[ssl_c_verify]\\ ' + '%{+Q}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ %tsc', + 'pools': [RET_POOL_1], + 'l7policies': [], + 'enabled': True, + 'insert_headers': {}, + 'timeout_client_data': 50000, + 'timeout_member_connect': 5000, + 'timeout_member_data': 50000, + 'timeout_tcp_inspect': 0, + 'PROMETHEUS': False, +} + +RET_LISTENER_L7 = { + 'id': 'sample_listener_id_1', + 'protocol_port': '80', + 'protocol': 'HTTP', + 'protocol_mode': 'http', + 'default_pool': RET_POOL_1, + 'connection_limit': constants.HAPROXY_DEFAULT_MAXCONN, + 'user_log_format': '12345\\ sample_loadbalancer_id_1\\ %f\\ %ci\\ %cp\\ ' + '%t\\ %{+Q}r\\ %ST\\ %B\\ %U\\ %[ssl_c_verify]\\ ' + '%{+Q}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ %tsc', + 'l7policies': [RET_L7POLICY_1, RET_L7POLICY_2, RET_L7POLICY_3, + RET_L7POLICY_4, RET_L7POLICY_5, RET_L7POLICY_6, + RET_L7POLICY_7], + 'pools': [RET_POOL_1, RET_POOL_2], + 'enabled': True, + 'insert_headers': {}, + 'timeout_client_data': 50000, + 'timeout_member_connect': 5000, + 'timeout_member_data': 50000, + 'timeout_tcp_inspect': 0, + 'PROMETHEUS': False, +} + +RET_LISTENER_TLS = { + 'id': 'sample_listener_id_1', + 'protocol_port': '443', + 'protocol': 'TERMINATED_HTTPS', + 'protocol_mode': 'http', + 'default_pool': RET_POOL_1, + 'connection_limit': constants.HAPROXY_DEFAULT_MAXCONN, + 'tls_certificate_id': 'cont_id_1', + 'default_tls_path': '/etc/ssl/sample_loadbalancer_id_1/fakeCN.pem', + 'default_tls_container': RET_DEF_TLS_CONT, + 'pools': [RET_POOL_1], + 'l7policies': [], + 'enabled': True, + 'insert_headers': {}} + +RET_LISTENER_TLS_SNI = { + 'id': 'sample_listener_id_1', + 'protocol_port': '443', + 'protocol': 'TERMINATED_HTTPS', + 'default_pool': RET_POOL_1, + 'connection_limit': constants.HAPROXY_DEFAULT_MAXCONN, + 'tls_certificate_id': 'cont_id_1', + 'default_tls_path': '/etc/ssl/sample_loadbalancer_id_1/fakeCN.pem', + 'default_tls_container': RET_DEF_TLS_CONT, + 'crt_dir': '/v2/sample_loadbalancer_id_1', + 'sni_container_ids': ['cont_id_2', 'cont_id_3'], + 'sni_containers': [RET_SNI_CONT_1, RET_SNI_CONT_2], + 'pools': [RET_POOL_1], + 'l7policies': [], + 'enabled': True, + 'insert_headers': {}} + +RET_AMPHORA = { + 'id': 'sample_amphora_id_1', + 'lb_network_ip': '10.0.1.1', + 'vrrp_ip': '10.1.1.1', + 'ha_ip': '192.168.10.1', + 'vrrp_port_id': '1234', + 'ha_port_id': '1234', + 'role': None, + 'status': 'ACTIVE', + 'vrrp_interface': None, + 'vrrp_priority': None} + +RET_LB = { + 'additional_vips': [], + 'host_amphora': RET_AMPHORA, + 'id': 'sample_loadbalancer_id_1', + 'vip_address': '10.0.0.2', + 'listeners': [RET_LISTENER], + 'peer_port': 1024, + 'topology': 'SINGLE', + 'enabled': True, + 'global_connection_limit': constants.HAPROXY_DEFAULT_MAXCONN, + 'amphorae': [sample_amphora_tuple()]} + +RET_LB_L7 = { + 'additional_vips': [], + 'host_amphora': RET_AMPHORA, + 'id': 'sample_loadbalancer_id_1', + 'vip_address': '10.0.0.2', + 'listeners': [RET_LISTENER_L7], + 'peer_port': 1024, + 'topology': 'SINGLE', + 'enabled': True, + 'global_connection_limit': constants.HAPROXY_DEFAULT_MAXCONN, + 'amphorae': [sample_amphora_tuple()]} + +UDP_SOURCE_IP_BODY = { + 'type': constants.SESSION_PERSISTENCE_SOURCE_IP, + 'persistence_timeout': 33, + 'persistence_granularity': '255.0.0.0' +} + +RET_UDP_HEALTH_MONITOR = { + 'id': 'sample_monitor_id_1', + 'type': constants.HEALTH_MONITOR_UDP_CONNECT, + 'delay': 30, + 'timeout': 31, + 'enabled': True, + 'fall_threshold': 3, + 'check_script_path': (CONF.haproxy_amphora.base_path + + '/lvs/check/udp_check.sh') +} + +RET_UDP_MEMBER = { + 'id': 'member_id_1', + 'address': '192.0.2.10', + 'ip_version': 4, + 'protocol_port': 82, + 'weight': 13, + 'enabled': True, + 'monitor_address': None, + 'monitor_port': None +} + +RET_UDP_MEMBER_MONITOR_IP_PORT = { + 'id': 'member_id_1', + 'address': '192.0.2.10', + 'ip_version': 4, + 'protocol_port': 82, + 'weight': 13, + 'enabled': True, + 'monitor_address': '192.168.1.1', + 'monitor_port': 9000 +} + +UDP_MEMBER_1 = { + 'id': 'sample_member_id_1', + 'address': '10.0.0.99', + 'ip_version': 4, + 'enabled': True, + 'protocol_port': 82, + 'weight': 13, + 'monitor_address': None, + 'monitor_port': None, +} + +UDP_MEMBER_2 = { + 'id': 'sample_member_id_2', + 'address': '10.0.0.98', + 'ip_version': 4, + 'enabled': True, + 'protocol_port': 82, + 'weight': 13, + 'monitor_address': None, + 'monitor_port': None +} + +RET_UDP_POOL = { + 'id': 'sample_pool_id_1', + 'enabled': True, + 'health_monitor': RET_UDP_HEALTH_MONITOR, + 'lb_algorithm': 'wrr', + 'members': [UDP_MEMBER_1, UDP_MEMBER_2], + 'protocol': 'udp', + 'session_persistence': UDP_SOURCE_IP_BODY +} + +RET_UDP_LISTENER = { + 'connection_limit': 98, + 'default_pool': { + 'id': 'sample_pool_id_1', + 'enabled': True, + 'health_monitor': RET_UDP_HEALTH_MONITOR, + 'lb_algorithm': 'wrr', + 'members': [UDP_MEMBER_1, UDP_MEMBER_2], + 'protocol': 'udp', + 'session_persistence': UDP_SOURCE_IP_BODY + }, + 'enabled': True, + 'id': 'sample_listener_id_1', + 'protocol_mode': 'udp', + 'protocol_port': '80' +} + +SCTP_SOURCE_IP_BODY = { + 'type': constants.SESSION_PERSISTENCE_SOURCE_IP, + 'persistence_timeout': 33, + 'persistence_granularity': '255.0.0.0' +} + +RET_SCTP_HEALTH_MONITOR = { + 'id': 'sample_monitor_id_1', + 'type': lib_consts.HEALTH_MONITOR_SCTP, + 'delay': 30, + 'timeout': 31, + 'enabled': True, + 'fall_threshold': 3, + 'check_script_path': 'amphora-health-checker sctp' +} + +RET_SCTP_MEMBER = { + 'id': 'member_id_1', + 'address': '192.0.2.10', + 'ip_version': 4, + 'protocol_port': 82, + 'weight': 13, + 'enabled': True, + 'monitor_address': None, + 'monitor_port': None +} + +RET_SCTP_MEMBER_MONITOR_IP_PORT = { + 'id': 'member_id_1', + 'address': '192.0.2.10', + 'ip_version': 4, + 'protocol_port': 82, + 'weight': 13, + 'enabled': True, + 'monitor_address': '192.168.1.1', + 'monitor_port': 9000 +} + +SCTP_MEMBER_1 = { + 'id': 'sample_member_id_1', + 'address': '10.0.0.99', + 'ip_version': 4, + 'enabled': True, + 'protocol_port': 82, + 'weight': 13, + 'monitor_address': None, + 'monitor_port': None, +} + +SCTP_MEMBER_2 = { + 'id': 'sample_member_id_2', + 'address': '10.0.0.98', + 'ip_version': 4, + 'enabled': True, + 'protocol_port': 82, + 'weight': 13, + 'monitor_address': None, + 'monitor_port': None +} + +RET_SCTP_POOL = { + 'id': 'sample_pool_id_1', + 'enabled': True, + 'health_monitor': RET_SCTP_HEALTH_MONITOR, + 'lb_algorithm': 'wrr', + 'members': [SCTP_MEMBER_1, SCTP_MEMBER_2], + 'protocol': 'sctp', + 'session_persistence': SCTP_SOURCE_IP_BODY +} + +RET_SCTP_LISTENER = { + 'connection_limit': 98, + 'default_pool': { + 'id': 'sample_pool_id_1', + 'enabled': True, + 'health_monitor': RET_SCTP_HEALTH_MONITOR, + 'lb_algorithm': 'wrr', + 'members': [SCTP_MEMBER_1, SCTP_MEMBER_2], + 'protocol': 'sctp', + 'session_persistence': SCTP_SOURCE_IP_BODY + }, + 'enabled': True, + 'id': 'sample_listener_id_1', + 'protocol_mode': 'sctp', + 'protocol_port': '80' +} + + +def sample_listener_loadbalancer_tuple( + topology=None, enabled=True, vip=None, pools=None, + additional_vips=False): + if topology and topology in ['ACTIVE_STANDBY', 'ACTIVE_ACTIVE']: + more_amp = True + else: + more_amp = False + topology = constants.TOPOLOGY_SINGLE + in_lb = collections.namedtuple( + 'load_balancer', 'id, name, vip, amphorae, topology, ' + 'pools, listeners, enabled, project_id, additional_vips') + return in_lb( + id='sample_loadbalancer_id_1', + name='test-lb', + vip=vip or sample_vip_tuple(), + amphorae=[sample_amphora_tuple(role=constants.ROLE_MASTER), + sample_amphora_tuple( + id='sample_amphora_id_2', + lb_network_ip='10.0.1.2', + vrrp_ip='10.1.1.2', + role=constants.ROLE_BACKUP)] + if more_amp else [sample_amphora_tuple()], + topology=topology, + pools=pools or [], + listeners=[], + enabled=enabled, + project_id='12345', + additional_vips=[sample_vip_tuple('10.0.1.2'), + sample_vip_tuple('2001:db8::2')] + if additional_vips else [] + ) + + +def sample_lb_with_udp_listener_tuple( + topology=None, enabled=True, listeners=None, pools=None): + if topology and topology in ['ACTIVE_STANDBY', 'ACTIVE_ACTIVE']: + more_amp = True + else: + more_amp = False + topology = constants.TOPOLOGY_SINGLE + listeners = listeners or [sample_listener_tuple( + proto=constants.PROTOCOL_UDP, + persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, + persistence_timeout=33, + persistence_granularity='255.255.0.0', + monitor_proto=constants.HEALTH_MONITOR_UDP_CONNECT)] + + in_lb = collections.namedtuple( + 'load_balancer', 'id, name, vip, amphorae, topology, ' + 'pools, enabled, project_id, listeners, additional_vips') + return in_lb( + id='sample_loadbalancer_id_1', + name='test-lb', + vip=sample_vip_tuple(), + amphorae=[sample_amphora_tuple(role=constants.ROLE_MASTER), + sample_amphora_tuple( + id='sample_amphora_id_2', + lb_network_ip='10.0.1.2', + vrrp_ip='10.1.1.2', + role=constants.ROLE_BACKUP)] + if more_amp else [sample_amphora_tuple()], + topology=topology, + listeners=listeners, + pools=pools or [], + enabled=enabled, + project_id='12345', + additional_vips=[] + ) + + +def sample_vrrp_group_tuple(): + in_vrrp_group = collections.namedtuple( + 'vrrp_group', 'load_balancer_id, vrrp_auth_type, vrrp_auth_pass, ' + 'advert_int, smtp_server, smtp_connect_timeout, ' + 'vrrp_group_name') + return in_vrrp_group( + vrrp_group_name='sample_loadbalancer_id_1', + load_balancer_id='sample_loadbalancer_id_1', + vrrp_auth_type='PASS', + vrrp_auth_pass='123', + advert_int='1', + smtp_server='', + smtp_connect_timeout='') + + +def sample_vip_tuple(ip_address='10.0.0.2', subnet_id='vip_subnet_uuid', + vnic_type='normal'): + vip = collections.namedtuple('vip', ('ip_address', 'subnet_id', + 'vnic_type')) + return vip(ip_address=ip_address, subnet_id=subnet_id, vnic_type=vnic_type) + + +def sample_listener_tuple(proto=None, monitor=True, alloc_default_pool=True, + persistence=True, persistence_type=None, + persistence_cookie=None, persistence_timeout=None, + persistence_granularity=None, + tls=False, sni=False, peer_port=None, topology=None, + l7=False, enabled=True, insert_headers=None, + be_proto=None, monitor_ip_port=False, + monitor_proto=None, monitor_expected_codes=None, + backup_member=False, disabled_member=False, + connection_limit=constants.DEFAULT_CONNECTION_LIMIT, + timeout_client_data=50000, + timeout_member_connect=5000, + timeout_member_data=50000, + timeout_tcp_inspect=0, + client_ca_cert=False, client_crl_cert=False, + ssl_type_l7=False, pool_cert=False, + pool_ca_cert=False, pool_crl=False, + tls_enabled=False, hm_host_http_check=False, + id='sample_listener_id_1', recursive_nest=False, + provisioning_status=constants.ACTIVE, + tls_ciphers=constants.CIPHERS_OWASP_SUITE_B, + backend_tls_ciphers=None, + tls_versions=constants.TLS_VERSIONS_OWASP_SUITE_B, + backend_tls_versions=constants. + TLS_VERSIONS_OWASP_SUITE_B, + alpn_protocols=constants. + AMPHORA_SUPPORTED_ALPN_PROTOCOLS, + sample_default_pool=1, + pool_enabled=True, + backend_alpn_protocols=constants. + AMPHORA_SUPPORTED_ALPN_PROTOCOLS, + include_pools=True, + additional_vips=False, + hsts_max_age=10_000_000, + hsts_include_subdomains=True, hsts_preload=True): + proto = 'HTTP' if proto is None else proto + if be_proto is None: + be_proto = 'HTTP' if proto == 'TERMINATED_HTTPS' else proto + if proto != constants.PROTOCOL_TERMINATED_HTTPS: + tls_ciphers = None + tls_versions = None + alpn_protocols = None + if pool_cert is False: + backend_tls_versions = None + topology = 'SINGLE' if topology is None else topology + port = '443' if proto in ['HTTPS', 'TERMINATED_HTTPS'] else '80' + peer_port = 1024 if peer_port is None else peer_port + insert_headers = insert_headers or {} + in_listener = collections.namedtuple( + 'listener', 'id, project_id, protocol_port, protocol, default_pool, ' + 'connection_limit, tls_certificate_id, ' + 'sni_container_ids, default_tls_container, ' + 'sni_containers, load_balancer, peer_port, pools, ' + 'l7policies, enabled, insert_headers, timeout_client_data,' + 'timeout_member_connect, timeout_member_data, ' + 'timeout_tcp_inspect, client_ca_tls_certificate_id, ' + 'client_ca_tls_certificate, client_authentication, ' + 'client_crl_container_id, provisioning_status, ' + 'tls_ciphers, tls_versions, alpn_protocols, ' + 'hsts_max_age, hsts_include_subdomains, hsts_preload' + ) + if l7: + pools = [ + sample_pool_tuple( + proto=be_proto, monitor=monitor, persistence=persistence, + persistence_type=persistence_type, + persistence_cookie=persistence_cookie, + monitor_ip_port=monitor_ip_port, monitor_proto=monitor_proto, + pool_cert=pool_cert, pool_ca_cert=pool_ca_cert, + pool_crl=pool_crl, tls_enabled=tls_enabled, + hm_host_http_check=hm_host_http_check, + listener_id='sample_listener_id_1', + tls_ciphers=backend_tls_ciphers, + tls_versions=backend_tls_versions, + enabled=pool_enabled, + alpn_protocols=backend_alpn_protocols), + sample_pool_tuple( + proto=be_proto, monitor=monitor, persistence=persistence, + persistence_type=persistence_type, + persistence_cookie=persistence_cookie, sample_pool=2, + monitor_ip_port=monitor_ip_port, monitor_proto=monitor_proto, + pool_cert=pool_cert, pool_ca_cert=pool_ca_cert, + pool_crl=pool_crl, tls_enabled=tls_enabled, + hm_host_http_check=hm_host_http_check, + listener_id='sample_listener_id_1', + tls_ciphers=backend_tls_ciphers, + tls_versions=None, + enabled=pool_enabled, + alpn_protocols=backend_alpn_protocols)] + l7policies = [ + sample_l7policy_tuple('sample_l7policy_id_1', sample_policy=1), + sample_l7policy_tuple('sample_l7policy_id_2', sample_policy=2), + sample_l7policy_tuple('sample_l7policy_id_3', sample_policy=3), + sample_l7policy_tuple('sample_l7policy_id_4', sample_policy=4), + sample_l7policy_tuple('sample_l7policy_id_5', sample_policy=5), + sample_l7policy_tuple('sample_l7policy_id_6', sample_policy=6), + sample_l7policy_tuple('sample_l7policy_id_7', sample_policy=7)] + if ssl_type_l7: + l7policies.append(sample_l7policy_tuple( + 'sample_l7policy_id_8', sample_policy=8)) + else: + pools = [ + sample_pool_tuple( + proto=be_proto, monitor=monitor, persistence=persistence, + persistence_type=persistence_type, + persistence_cookie=persistence_cookie, + monitor_ip_port=monitor_ip_port, monitor_proto=monitor_proto, + backup_member=backup_member, disabled_member=disabled_member, + pool_cert=pool_cert, pool_ca_cert=pool_ca_cert, + pool_crl=pool_crl, tls_enabled=tls_enabled, + hm_host_http_check=hm_host_http_check, + listener_id='sample_listener_id_1', + tls_ciphers=backend_tls_ciphers, + tls_versions=backend_tls_versions, + enabled=pool_enabled, + alpn_protocols=backend_alpn_protocols)] + l7policies = [] + listener = in_listener( + id=id, + project_id='12345', + protocol_port=port, + protocol=proto, + load_balancer=sample_listener_loadbalancer_tuple( + topology=topology, pools=pools, additional_vips=additional_vips), + peer_port=peer_port, + default_pool=sample_pool_tuple( + listener_id='sample_listener_id_1', + proto=be_proto, monitor=monitor, persistence=persistence, + persistence_type=persistence_type, + persistence_cookie=persistence_cookie, + persistence_timeout=persistence_timeout, + persistence_granularity=persistence_granularity, + monitor_ip_port=monitor_ip_port, + monitor_proto=monitor_proto, + monitor_expected_codes=monitor_expected_codes, + pool_cert=pool_cert, + pool_ca_cert=pool_ca_cert, + pool_crl=pool_crl, + tls_enabled=tls_enabled, + hm_host_http_check=hm_host_http_check, + sample_pool=sample_default_pool, + enabled=pool_enabled + ) if alloc_default_pool else '', + connection_limit=connection_limit, + tls_certificate_id='cont_id_1' if tls else '', + sni_container_ids=['cont_id_2', 'cont_id_3'] if sni else [], + default_tls_container=sample_tls_container_tuple( + id='cont_id_1', certificate=sample_certs.X509_CERT, + private_key=sample_certs.X509_CERT_KEY, + intermediates=sample_certs.X509_IMDS_LIST, + primary_cn=sample_certs.X509_CERT_CN + ) if tls else '', + sni_containers=[ + sample_tls_sni_container_tuple( + tls_container_id='cont_id_2', + tls_container=sample_tls_container_tuple( + id='cont_id_2', certificate=sample_certs.X509_CERT_2, + private_key=sample_certs.X509_CERT_KEY_2, + intermediates=sample_certs.X509_IMDS_LIST, + primary_cn=sample_certs.X509_CERT_CN_2)), + sample_tls_sni_container_tuple( + tls_container_id='cont_id_3', + tls_container=sample_tls_container_tuple( + id='cont_id_3', certificate=sample_certs.X509_CERT_3, + private_key=sample_certs.X509_CERT_KEY_3, + intermediates=sample_certs.X509_IMDS_LIST, + primary_cn=sample_certs.X509_CERT_CN_3))] + if sni else [], + pools=pools if include_pools else '', + l7policies=l7policies, + enabled=enabled, + insert_headers=insert_headers, + timeout_client_data=timeout_client_data, + timeout_member_connect=timeout_member_connect, + timeout_member_data=timeout_member_data, + timeout_tcp_inspect=timeout_tcp_inspect, + client_ca_tls_certificate_id='cont_id_ca' if client_ca_cert else '', + client_ca_tls_certificate=sample_tls_container_tuple( + id='cont_id_ca', certificate=sample_certs.X509_CA_CERT, + primary_cn=sample_certs.X509_CA_CERT_CN + ) if client_ca_cert else '', + client_authentication=( + constants.CLIENT_AUTH_MANDATORY if client_ca_cert else + constants.CLIENT_AUTH_NONE), + client_crl_container_id='cont_id_crl' if client_crl_cert else '', + provisioning_status=provisioning_status, + tls_ciphers=tls_ciphers, + tls_versions=tls_versions, + alpn_protocols=alpn_protocols, + hsts_max_age=hsts_max_age, + hsts_include_subdomains=hsts_include_subdomains, + hsts_preload=hsts_preload, + ) + if recursive_nest: + listener.load_balancer.listeners.append(listener) + return listener + + +def sample_tls_sni_container_tuple(tls_container_id=None, tls_container=None): + sc = collections.namedtuple('sni_container', 'tls_container_id, ' + 'tls_container') + return sc(tls_container_id=tls_container_id, tls_container=tls_container) + + +def sample_tls_sni_containers_tuple(tls_container_id=None, tls_container=None): + sc = collections.namedtuple('sni_containers', 'tls_container_id, ' + 'tls_container') + return [sc(tls_container_id=tls_container_id, tls_container=tls_container)] + + +def sample_tls_container_tuple(id='cont_id_1', certificate=None, + private_key=None, intermediates=None, + primary_cn=None): + sc = collections.namedtuple( + 'tls_container', + 'id, certificate, private_key, intermediates, primary_cn') + return sc(id=id, certificate=certificate, private_key=private_key, + intermediates=intermediates or [], primary_cn=primary_cn) + + +def sample_pool_tuple(listener_id=None, proto=None, monitor=True, + persistence=True, persistence_type=None, + persistence_cookie=None, persistence_timeout=None, + persistence_granularity=None, sample_pool=1, + monitor_ip_port=False, monitor_proto=None, + monitor_expected_codes=None, + backup_member=False, disabled_member=False, + has_http_reuse=True, pool_cert=False, pool_ca_cert=False, + pool_crl=False, tls_enabled=False, + hm_host_http_check=False, + provisioning_status=constants.ACTIVE, + tls_ciphers=constants.CIPHERS_OWASP_SUITE_B, + tls_versions=constants.TLS_VERSIONS_OWASP_SUITE_B, + lb_algorithm=constants.LB_ALGORITHM_ROUND_ROBIN, + enabled=True, + alpn_protocols=constants. + AMPHORA_SUPPORTED_ALPN_PROTOCOLS): + proto = 'HTTP' if proto is None else proto + if not tls_enabled: + tls_ciphers = None + tls_versions = None + alpn_protocols = None + if monitor_proto is None: + if proto == constants.PROTOCOL_UDP: + monitor_proto = constants.HEALTH_MONITOR_UDP_CONNECT + else: + monitor_proto = proto + in_pool = collections.namedtuple( + 'pool', 'id, protocol, lb_algorithm, members, health_monitor, ' + 'session_persistence, enabled, operating_status, ' + 'tls_certificate_id, ca_tls_certificate_id, ' + 'crl_container_id, tls_enabled, tls_ciphers, ' + 'tls_versions, provisioning_status, alpn_protocols, ' + + constants.HTTP_REUSE) + if (proto in constants.LVS_PROTOCOLS and + persistence_type == constants.SESSION_PERSISTENCE_SOURCE_IP): + kwargs = {'persistence_type': persistence_type, + 'persistence_timeout': persistence_timeout, + 'persistence_granularity': persistence_granularity} + else: + kwargs = {'persistence_type': persistence_type, + 'persistence_cookie': persistence_cookie} + persis = sample_session_persistence_tuple(**kwargs) + mon = None + if sample_pool == 0: + id = 'sample_pool_id_0' + members = [] + if monitor is True: + mon = sample_health_monitor_tuple( + proto=monitor_proto, + host_http_check=hm_host_http_check, + expected_codes=monitor_expected_codes) + elif sample_pool == 1: + id = 'sample_pool_id_1' + members = [sample_member_tuple('sample_member_id_1', '10.0.0.99', + monitor_ip_port=monitor_ip_port), + sample_member_tuple('sample_member_id_2', '10.0.0.98', + monitor_ip_port=monitor_ip_port, + backup=backup_member, + enabled=not disabled_member)] + if monitor is True: + mon = sample_health_monitor_tuple( + proto=monitor_proto, + host_http_check=hm_host_http_check, + expected_codes=monitor_expected_codes) + elif sample_pool == 2: + id = 'sample_pool_id_2' + members = [sample_member_tuple('sample_member_id_3', '10.0.0.97', + monitor_ip_port=monitor_ip_port)] + if monitor is True: + mon = sample_health_monitor_tuple( + proto=monitor_proto, sample_hm=2, + host_http_check=hm_host_http_check, + expected_codes=monitor_expected_codes) + return in_pool( + id=id, + protocol=proto, + lb_algorithm=lb_algorithm, + members=members, + health_monitor=mon, + session_persistence=persis if persistence is True else None, + enabled=enabled, + operating_status='ACTIVE', has_http_reuse=has_http_reuse, + tls_certificate_id='pool_cont_1' if pool_cert else None, + ca_tls_certificate_id='pool_ca_1' if pool_ca_cert else None, + crl_container_id='pool_crl' if pool_crl else None, + tls_enabled=tls_enabled, + tls_ciphers=tls_ciphers, + tls_versions=tls_versions, + provisioning_status=provisioning_status, + alpn_protocols=alpn_protocols) + + +def sample_member_tuple(id, ip, enabled=True, operating_status='ACTIVE', + provisioning_status=constants.ACTIVE, + monitor_ip_port=False, backup=False): + in_member = collections.namedtuple('member', + 'id, ip_address, protocol_port, ' + 'weight, subnet_id, ' + 'enabled, operating_status, ' + 'monitor_address, monitor_port, ' + 'backup, provisioning_status') + monitor_address = '192.168.1.1' if monitor_ip_port else None + monitor_port = 9000 if monitor_ip_port else None + return in_member( + id=id, + ip_address=ip, + protocol_port=82, + weight=13, + subnet_id='10.0.0.1/24', + enabled=enabled, + operating_status=operating_status, + monitor_address=monitor_address, + monitor_port=monitor_port, + backup=backup, provisioning_status=provisioning_status) + + +def sample_session_persistence_tuple(persistence_type=None, + persistence_cookie=None, + persistence_timeout=None, + persistence_granularity=None): + spersistence = collections.namedtuple('SessionPersistence', + 'type, cookie_name, ' + 'persistence_timeout, ' + 'persistence_granularity') + pt = 'HTTP_COOKIE' if persistence_type is None else persistence_type + return spersistence(type=pt, + cookie_name=persistence_cookie, + persistence_timeout=persistence_timeout, + persistence_granularity=persistence_granularity) + + +def sample_health_monitor_tuple(proto='HTTP', sample_hm=1, + host_http_check=False, expected_codes=None, + provisioning_status=constants.ACTIVE): + proto = 'HTTP' if proto == 'TERMINATED_HTTPS' else proto + monitor = collections.namedtuple( + 'monitor', 'id, type, delay, timeout, fall_threshold, rise_threshold,' + 'http_method, url_path, expected_codes, enabled, ' + 'check_script_path, http_version, domain_name, ' + 'provisioning_status') + + if sample_hm == 1: + id = 'sample_monitor_id_1' + url_path = '/index.html' + elif sample_hm == 2: + id = 'sample_monitor_id_2' + url_path = '/healthmon.html' + kwargs = { + 'id': id, + 'type': proto, + 'delay': 30, + 'timeout': 31, + 'fall_threshold': 3, + 'rise_threshold': 2, + 'http_method': 'GET', + 'url_path': url_path, + 'expected_codes': '418', + 'enabled': True, + 'provisioning_status': provisioning_status, + } + if host_http_check: + kwargs.update({'http_version': 1.1, 'domain_name': 'testlab.com'}) + else: + kwargs.update({'http_version': 1.0, 'domain_name': None}) + if expected_codes: + kwargs.update({'expected_codes': expected_codes}) + if proto == constants.HEALTH_MONITOR_UDP_CONNECT: + kwargs['check_script_path'] = (CONF.haproxy_amphora.base_path + + 'lvs/check/' + 'udp_check.sh') + elif proto == lib_consts.HEALTH_MONITOR_SCTP: + kwargs['check_script_path'] = 'amphora-health-checker sctp' + else: + kwargs['check_script_path'] = None + return monitor(**kwargs) + + +def sample_l7policy_tuple(id, + action=constants.L7POLICY_ACTION_REJECT, + redirect_pool=None, redirect_url=None, + redirect_prefix=None, + enabled=True, redirect_http_code=302, + sample_policy=1, + provisioning_status=constants.ACTIVE): + in_l7policy = collections.namedtuple('l7policy', + 'id, action, redirect_pool, ' + 'redirect_url, redirect_prefix, ' + 'l7rules, enabled,' + 'redirect_http_code,' + 'provisioning_status') + l7rules = [] + if sample_policy == 1: + action = constants.L7POLICY_ACTION_REDIRECT_TO_POOL + redirect_pool = sample_pool_tuple(sample_pool=2) + l7rules = [sample_l7rule_tuple('sample_l7rule_id_1')] + elif sample_policy == 2: + action = constants.L7POLICY_ACTION_REDIRECT_TO_URL + redirect_url = 'http://www.example.com/' + l7rules = [sample_l7rule_tuple('sample_l7rule_id_2', sample_rule=2), + sample_l7rule_tuple('sample_l7rule_id_3', sample_rule=3)] + elif sample_policy == 3: + action = constants.L7POLICY_ACTION_REJECT + l7rules = [sample_l7rule_tuple('sample_l7rule_id_4', sample_rule=4), + sample_l7rule_tuple('sample_l7rule_id_5', sample_rule=5)] + elif sample_policy == 4: + action = constants.L7POLICY_ACTION_REJECT + elif sample_policy == 5: + action = constants.L7POLICY_ACTION_REJECT + enabled = False + l7rules = [sample_l7rule_tuple('sample_l7rule_id_5', sample_rule=5)] + elif sample_policy == 6: + action = constants.L7POLICY_ACTION_REJECT + l7rules = [sample_l7rule_tuple('sample_l7rule_id_6', sample_rule=6)] + elif sample_policy == 7: + action = constants.L7POLICY_ACTION_REDIRECT_PREFIX + redirect_prefix = 'https://example.com/' + l7rules = [sample_l7rule_tuple('sample_l7rule_id_2', sample_rule=2), + sample_l7rule_tuple('sample_l7rule_id_3', sample_rule=3)] + elif sample_policy == 8: + action = constants.L7POLICY_ACTION_REDIRECT_TO_URL + redirect_url = 'http://www.ssl-type-l7rule-test.com/' + l7rules = [sample_l7rule_tuple('sample_l7rule_id_7', sample_rule=7), + sample_l7rule_tuple('sample_l7rule_id_8', sample_rule=8), + sample_l7rule_tuple('sample_l7rule_id_9', sample_rule=9), + sample_l7rule_tuple('sample_l7rule_id_10', sample_rule=10), + sample_l7rule_tuple('sample_l7rule_id_11', sample_rule=11)] + return in_l7policy( + id=id, + action=action, + redirect_pool=redirect_pool, + redirect_url=redirect_url, + redirect_prefix=redirect_prefix, + l7rules=l7rules, + enabled=enabled, + redirect_http_code=redirect_http_code + if (action in [constants.L7POLICY_ACTION_REDIRECT_TO_URL, + constants.L7POLICY_ACTION_REDIRECT_PREFIX] and + redirect_http_code) else None, + provisioning_status=provisioning_status) + + +def sample_l7rule_tuple(id, + type=constants.L7RULE_TYPE_PATH, + compare_type=constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + key=None, + value='/api', + invert=False, + enabled=True, + sample_rule=1, + provisioning_status=constants.ACTIVE): + in_l7rule = collections.namedtuple('l7rule', + 'id, type, compare_type, ' + 'key, value, invert, enabled,' + 'provisioning_status') + if sample_rule == 2: + type = constants.L7RULE_TYPE_HEADER + compare_type = constants.L7RULE_COMPARE_TYPE_CONTAINS + key = 'Some-header' + value = 'This string\\ with stuff' + invert = True + enabled = True + if sample_rule == 3: + type = constants.L7RULE_TYPE_COOKIE + compare_type = constants.L7RULE_COMPARE_TYPE_REGEX + key = 'some-cookie' + value = 'this.*|that' + invert = False + enabled = True + if sample_rule == 4: + type = constants.L7RULE_TYPE_FILE_TYPE + compare_type = constants.L7RULE_COMPARE_TYPE_EQUAL_TO + key = None + value = 'jpg' + invert = False + enabled = True + if sample_rule == 5: + type = constants.L7RULE_TYPE_HOST_NAME + compare_type = constants.L7RULE_COMPARE_TYPE_ENDS_WITH + key = None + value = '.example.com' + invert = False + enabled = True + if sample_rule == 6: + type = constants.L7RULE_TYPE_HOST_NAME + compare_type = constants.L7RULE_COMPARE_TYPE_ENDS_WITH + key = None + value = '.example.com' + invert = False + enabled = False + if sample_rule == 7: + type = constants.L7RULE_TYPE_SSL_CONN_HAS_CERT + compare_type = constants.L7RULE_COMPARE_TYPE_EQUAL_TO + key = None + value = 'tRuE' + invert = False + enabled = True + if sample_rule == 8: + type = constants.L7RULE_TYPE_SSL_VERIFY_RESULT + compare_type = constants.L7RULE_COMPARE_TYPE_EQUAL_TO + key = None + value = '1' + invert = True + enabled = True + if sample_rule == 9: + type = constants.L7RULE_TYPE_SSL_DN_FIELD + compare_type = constants.L7RULE_COMPARE_TYPE_REGEX + key = 'STREET' + value = r'^STREET.*NO\.$' + invert = True + enabled = True + if sample_rule == 10: + type = constants.L7RULE_TYPE_SSL_DN_FIELD + compare_type = constants.L7RULE_COMPARE_TYPE_STARTS_WITH + key = 'OU-3' + value = 'Orgnization Bala' + invert = True + enabled = True + return in_l7rule( + id=id, + type=type, + compare_type=compare_type, + key=key, + value=value, + invert=invert, + enabled=enabled, + provisioning_status=provisioning_status) + + +def sample_base_expected_config(frontend=None, logging=None, backend=None, + peers=None, global_opts=None, defaults=None): + if frontend is None: + frontend = ("frontend sample_listener_id_1\n" + " maxconn {maxconn}\n" + " bind 10.0.0.2:80\n" + " mode http\n" + " default_backend sample_pool_id_1:sample_listener_id_1" + "\n" + " timeout client 50000\n").format( + maxconn=constants.HAPROXY_DEFAULT_MAXCONN) + if logging is None: + logging = (" log-format 12345\\ sample_loadbalancer_id_1\\ %f\\ " + "%ci\\ %cp\\ %t\\ %{+Q}r\\ %ST\\ %B\\ %U\\ " + "%[ssl_c_verify]\\ %{+Q}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ " + "%tsc\n\n") + if backend is None: + backend = ("backend sample_pool_id_1:sample_listener_id_1\n" + " mode http\n" + " balance roundrobin\n" + " cookie SRV insert indirect nocache\n" + " timeout check 31s\n" + " option httpchk GET /index.html HTTP/1.0\n" + " http-check expect rstatus 418\n" + " fullconn {maxconn}\n" + " option allbackups\n" + " timeout connect 5000\n" + " timeout server 50000\n" + " server sample_member_id_1 10.0.0.99:82 weight 13 " + "check inter 30s fall 3 rise 2 cookie sample_member_id_1\n" + " server sample_member_id_2 10.0.0.98:82 weight 13 " + "check inter 30s fall 3 rise 2 cookie sample_member_id_2\n" + "\n").format(maxconn=constants.HAPROXY_DEFAULT_MAXCONN) + + if peers is None: + peers = "\n\n" + if global_opts is None: + global_opts = f" maxconn {constants.HAPROXY_DEFAULT_MAXCONN}\n\n" + if defaults is None: + defaults = ("defaults\n" + " log global\n" + " retries 3\n" + " option redispatch\n" + " option splice-request\n" + " option splice-response\n" + " option http-keep-alive\n\n\n") + return ("# Configuration for loadbalancer sample_loadbalancer_id_1\n" + "global\n" + " daemon\n" + " user nobody\n" + " log /run/rsyslog/octavia/log local0\n" + " log /run/rsyslog/octavia/log local1 notice\n" + " stats socket /var/lib/octavia/sample_loadbalancer_id_1.sock" + " mode 0666 level user\n" + + global_opts + defaults + peers + frontend + logging + backend) diff --git a/octavia/tests/unit/common/sample_configs/sample_pkcs12.p12 b/octavia/tests/unit/common/sample_configs/sample_pkcs12.p12 new file mode 100644 index 0000000000..bb5d006bdc Binary files /dev/null and b/octavia/tests/unit/common/sample_configs/sample_pkcs12.p12 differ diff --git a/octavia/tests/unit/common/test_base_taskflow.py b/octavia/tests/unit/common/test_base_taskflow.py new file mode 100644 index 0000000000..0d60862d63 --- /dev/null +++ b/octavia/tests/unit/common/test_base_taskflow.py @@ -0,0 +1,236 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +import concurrent.futures +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from taskflow import engines as tf_engines +from taskflow.jobs.base import Job + +from octavia.common import base_taskflow +import octavia.tests.unit.base as base + + +MAX_WORKERS = 1 +ENGINE = 'parallel' + +_engine_mock = mock.MagicMock() + + +class TestBaseTaskFlowEngine(base.TestCase): + + def setUp(self): + + conf = oslo_fixture.Config(cfg.CONF) + conf.config(group="task_flow", max_workers=MAX_WORKERS) + conf.config(group="task_flow", engine=ENGINE) + conf.config(group="task_flow", disable_revert=True) + super().setUp() + + @mock.patch('concurrent.futures.ThreadPoolExecutor', + return_value='TESTEXECUTOR') + @mock.patch('taskflow.engines.load', + return_value=_engine_mock) + def test_taskflow_load(self, + mock_tf_engine_load, + mock_ThreadPoolExecutor): + + # Test __init__ + + base_taskflow_engine = base_taskflow.BaseTaskFlowEngine() + + concurrent.futures.ThreadPoolExecutor.assert_called_once_with( + max_workers=MAX_WORKERS) + + # Test taskflow_load + + base_taskflow_engine.taskflow_load('TEST') + + tf_engines.load.assert_called_once_with( + 'TEST', + engine=ENGINE, + executor='TESTEXECUTOR', + never_resolve=True) + + _engine_mock.compile.assert_called_once_with() + _engine_mock.prepare.assert_called_once_with() + + +class TestTaskFlowServiceController(base.TestCase): + + _mock_uuid = '9a2ebc48-cd3e-429e-aa04-e32f5fc5442a' + + def setUp(self): + self.conf = oslo_fixture.Config(cfg.CONF) + self.conf.config(group="task_flow", engine='parallel') + self.conf.config(group="task_flow", max_workers=MAX_WORKERS) + self.driver_mock = mock.MagicMock() + self.persistence_mock = mock.MagicMock() + self.jobboard_mock = mock.MagicMock() + self.driver_mock.job_board.return_value = self.jobboard_mock + self.driver_mock.persistence_driver.get_persistence.return_value = ( + self.persistence_mock) + self.service_controller = base_taskflow.TaskFlowServiceController( + self.driver_mock) + super().setUp() + + @mock.patch('oslo_utils.uuidutils.generate_uuid', return_value=_mock_uuid) + @mock.patch('taskflow.engines.save_factory_details') + def test_run_poster(self, mock_engines, mockuuid): + flow_factory = mock.MagicMock() + flow_factory.__name__ = 'testname' + job_name = f'testname-{self._mock_uuid}' + job_details = {'store': 'test'} + with mock.patch.object(self.service_controller, '_wait_for_job' + ) as wait: + uuid = self.service_controller.run_poster(flow_factory, + **job_details) + save_logbook = self.persistence_mock.__enter__().get_connection( + ).save_logbook + save_logbook.assert_called() + self.assertEqual(job_name, save_logbook.call_args[0][0].name) + + mock_engines.assert_called() + save_args = mock_engines.call_args + self.assertEqual(job_name, save_args[0][0].name) + self.assertEqual(self._mock_uuid, save_args[0][0].uuid) + self.assertEqual(flow_factory, save_args[0][1]) + self.assertEqual(self.persistence_mock.__enter__(), + save_args[1]['backend']) + + self.jobboard_mock.__enter__().post.assert_called() + post_args = self.jobboard_mock.__enter__().post.call_args + self.assertEqual(job_name, post_args[0][0]) + self.assertEqual(job_details, post_args[1]['details']) + wait.assert_called() + self.assertEqual(self._mock_uuid, uuid) + + def test__wait_for_job(self): + job1 = mock.MagicMock() + job2 = mock.MagicMock() + job_board = mock.MagicMock() + job_board.iterjobs.side_effect = [ + [job1, job2] + ] + self.service_controller._wait_for_job(job_board) + + job1.wait.assert_called_once() + job2.wait.assert_called_once() + + @mock.patch('octavia.common.base_taskflow.' + 'ExtendExpiryDynamicLoggingConductor') + @mock.patch('octavia.common.base_taskflow.DynamicLoggingConductor') + @mock.patch('concurrent.futures.ThreadPoolExecutor') + def test_run_conductor(self, mock_threadpoolexec, dynamiccond, expirycond): + self.service_controller.run_conductor("test") + expirycond.assert_called_once_with( + "test", self.jobboard_mock.__enter__(), + persistence=self.persistence_mock.__enter__(), + engine='parallel', + engine_options={ + 'max_workers': MAX_WORKERS, + }) + self.conf.config(group="task_flow", + jobboard_backend_driver='zookeeper_taskflow_driver') + + self.service_controller.run_conductor("test2") + dynamiccond.assert_called_once_with( + "test2", self.jobboard_mock.__enter__(), + persistence=self.persistence_mock.__enter__(), + engine='parallel') + + def test__extend_jobs(self): + conductor = mock.MagicMock() + conductor._name = 'mycontroller' + + job1 = mock.MagicMock() + job1.expires_in.return_value = 10 + job2 = mock.MagicMock() + job2.expires_in.return_value = 10 + job3 = mock.MagicMock() + job3.expires_in.return_value = 30 + self.jobboard_mock.__enter__().iterjobs.return_value = [ + job1, job2, job3] + + self.jobboard_mock.__enter__().find_owner.side_effect = [ + 'mycontroller', + TypeError('no owner'), + 'mycontroller'] + + self.service_controller._extend_jobs(conductor, 30) + + job1.extend_expiry.assert_called_once_with(30) + job2.extend_expiry.assert_not_called() + job3.extend_expiry.assert_not_called() + + +class TestJobDetailsFilter(base.TestCase): + def test_filter(self): + log_filter = base_taskflow.JobDetailsFilter() + + tls_container_data = { + 'certificate': '', + 'private_key': '', + 'passphrase': '', + 'intermediates': [ + '', + '' + ] + } + + job = mock.Mock(spec=Job) + job.details = { + 'store': { + 'listeners': [ + { + 'name': 'listener_name', + 'default_tls_container_data': tls_container_data + } + ], + 'any_recursive': { + 'type': [ + { + 'other_list': [ + tls_container_data, + { + 'test': tls_container_data, + } + ] + } + ] + } + } + } + + record = mock.Mock() + record.args = (job, 'something') + + ret = log_filter.filter(record) + self.assertTrue(ret) + + self.assertNotIn(tls_container_data['certificate'], record.args[0]) + self.assertNotIn(tls_container_data['private_key'], record.args[0]) + self.assertNotIn(tls_container_data['passphrase'], record.args[0]) + self.assertNotIn(tls_container_data['intermediates'][0], + record.args[0]) + self.assertNotIn(tls_container_data['intermediates'][1], + record.args[0]) + self.assertIn('listener_name', record.args[0]) + + record.args = ('arg1', 2) + + ret = log_filter.filter(record) + self.assertTrue(ret) diff --git a/octavia/tests/unit/common/test_clients.py b/octavia/tests/unit/common/test_clients.py new file mode 100644 index 0000000000..fc72c8989c --- /dev/null +++ b/octavia/tests/unit/common/test_clients.py @@ -0,0 +1,137 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +import cinderclient.v3 +import glanceclient.v2 +import novaclient.v2 +from oslo_config import cfg + +from octavia.common import clients +from octavia.common import keystone +from octavia.tests.unit import base + +CONF = cfg.CONF + + +class TestNovaAuth(base.TestCase): + + def setUp(self): + # Reset the session and client + clients.NovaAuth.nova_client = None + keystone._SESSION = None + + super().setUp() + + @mock.patch('keystoneauth1.session.Session', mock.Mock()) + def test_get_nova_client(self): + # There should be no existing client + self.assertIsNone( + clients.NovaAuth.nova_client + ) + + # Mock out the keystone session and get the client + keystone._SESSION = mock.MagicMock() + bc1 = clients.NovaAuth.get_nova_client(region=None, + endpoint_type='publicURL') + + # Our returned client should also be the saved client + self.assertIsInstance( + clients.NovaAuth.nova_client, + novaclient.v2.client.Client + ) + self.assertIs( + clients.NovaAuth.nova_client, + bc1 + ) + + # Getting the session again should return the same object + bc2 = clients.NovaAuth.get_nova_client( + region="test-region", service_name='novaEndpoint1', + endpoint="test-endpoint", endpoint_type='adminURL', insecure=True) + self.assertIs(bc1, bc2) + + +class TestGlanceAuth(base.TestCase): + + def setUp(self): + # Reset the session and client + clients.GlanceAuth.glance_client = None + keystone._SESSION = None + + super().setUp() + + @mock.patch('keystoneauth1.session.Session', mock.Mock()) + def test_get_glance_client(self): + # There should be no existing client + self.assertIsNone( + clients.GlanceAuth.glance_client + ) + + # Mock out the keystone session and get the client + keystone._SESSION = mock.MagicMock() + bc1 = clients.GlanceAuth.get_glance_client( + region=None, endpoint_type='publicURL', insecure=True) + + # Our returned client should also be the saved client + self.assertIsInstance( + clients.GlanceAuth.glance_client, + glanceclient.v2.client.Client + ) + self.assertIs( + clients.GlanceAuth.glance_client, + bc1 + ) + + # Getting the session again should return the same object + bc2 = clients.GlanceAuth.get_glance_client( + region="test-region", service_name="glanceEndpoint1", + endpoint="test-endpoint", endpoint_type='publicURL', insecure=True) + self.assertIs(bc1, bc2) + + +class TestCinderAuth(base.TestCase): + + def setUp(self): + # Reset the session and client + clients.CinderAuth.cinder_client = None + keystone._SESSION = None + + super().setUp() + + @mock.patch('keystoneauth1.session.Session', mock.Mock()) + def test_get_cinder_client(self): + # There should be no existing client + self.assertIsNone( + clients.CinderAuth.cinder_client + ) + + # Mock out the keystone session and get the client + keystone._SESSION = mock.MagicMock() + bc1 = clients.CinderAuth.get_cinder_client( + region=None, endpoint_type='publicURL', insecure=True) + + # Our returned client should also be the saved client + self.assertIsInstance( + clients.CinderAuth.cinder_client, + cinderclient.v3.client.Client + ) + self.assertIs( + clients.CinderAuth.cinder_client, + bc1 + ) + + # Getting the session again should return the same object + bc2 = clients.CinderAuth.get_cinder_client( + region="test-region", service_name="cinderEndpoint1", + endpoint="test-endpoint", endpoint_type='publicURL', insecure=True) + self.assertIs(bc1, bc2) diff --git a/octavia/tests/unit/common/test_config.py b/octavia/tests/unit/common/test_config.py new file mode 100644 index 0000000000..55fddd6757 --- /dev/null +++ b/octavia/tests/unit/common/test_config.py @@ -0,0 +1,115 @@ +# Copyright 2014, Doug Wiegley, A10 Networks. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import tempfile + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture + +import octavia.common.config as config +import octavia.tests.unit.base as base + + +class TestConfig(base.TestCase): + + def test_sanity(self): + config.init([]) + config.setup_logging(cfg.CONF) + # Resetting because this will cause inconsistent errors when run with + # other tests + self.addCleanup(cfg.CONF.reset) + + def test_validate_server_certs_key_passphrase(self): + conf = self.useFixture(oslo_fixture.Config(config.cfg.CONF)) + conf.config( + group="certificates", + server_certs_key_passphrase="insecure-key-do-not-use-this-key" + ) + + # Test too short + self.assertRaises(ValueError, conf.config, + group="certificates", + server_certs_key_passphrase="short_passphrase") + + # Test too long + self.assertRaises( + ValueError, conf.config, group="certificates", + server_certs_key_passphrase="long-insecure-key-do-not-use-this") + + # Test invalid characters + self.assertRaises( + ValueError, conf.config, group="certificates", + server_certs_key_passphrase="insecure-key-do-not-u$e-this-key") + + def test_active_connection_retry_interval(self): + conf = self.useFixture(oslo_fixture.Config(config.cfg.CONF)) + + # Test new name + with tempfile.NamedTemporaryFile(mode='w', delete=True) as tmp: + tmp.write("[haproxy_amphora]\n" + "active_connection_retry_interval=4\n") + tmp.flush() + + conf.set_config_files([tmp.name]) + + self.assertEqual( + 4, + conf.conf.haproxy_amphora.active_connection_retry_interval) + + # Test deprecated name + with tempfile.NamedTemporaryFile(mode='w', delete=True) as tmp: + tmp.write("[haproxy_amphora]\n" + "active_connection_rety_interval=3\n") + tmp.flush() + + conf.set_config_files([tmp.name]) + + self.assertEqual( + 3, + conf.conf.haproxy_amphora.active_connection_retry_interval) + + def test_handle_neutron_deprecations(self): + conf = self.useFixture(oslo_fixture.Config(config.cfg.CONF)) + + # The deprecated settings are copied to the new settings + conf.config(endpoint='my_endpoint', + endpoint_type='internal', + ca_certificates_file='/path/to/certs', + group='neutron') + + config.handle_neutron_deprecations() + + self.assertEqual('my_endpoint', conf.conf.neutron.endpoint_override) + self.assertEqual(['internal'], conf.conf.neutron.valid_interfaces) + self.assertEqual('/path/to/certs', conf.conf.neutron.cafile) + + # Test case for https://bugs.launchpad.net/octavia/+bug/2051604 + def test_handle_neutron_deprecations_with_precedence(self): + conf = self.useFixture(oslo_fixture.Config(config.cfg.CONF)) + + # The deprecated settings should not override the new settings when + # they exist + conf.config(endpoint='my_old_endpoint', + endpoint_type='old_type', + ca_certificates_file='/path/to/old_certs', + endpoint_override='my_endpoint', + valid_interfaces=['internal'], + cafile='/path/to/certs', + group='neutron') + + config.handle_neutron_deprecations() + + self.assertEqual('my_endpoint', conf.conf.neutron.endpoint_override) + self.assertEqual(['internal'], conf.conf.neutron.valid_interfaces) + self.assertEqual('/path/to/certs', conf.conf.neutron.cafile) diff --git a/octavia/tests/unit/common/test_constants.py b/octavia/tests/unit/common/test_constants.py new file mode 100644 index 0000000000..bc75e44d5e --- /dev/null +++ b/octavia/tests/unit/common/test_constants.py @@ -0,0 +1,23 @@ +# Copyright 2014, Doug Wiegley, A10 Networks. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import octavia.common.constants as constants +import octavia.tests.unit.base as base + + +class TestConstants(base.TestCase): + # Rough sanity test of module import; not meant to be exhaustive + + def test_import(self): + self.assertEqual(constants.PROTOCOL_TCP, 'TCP') diff --git a/octavia/tests/unit/common/test_data_models.py b/octavia/tests/unit/common/test_data_models.py new file mode 100644 index 0000000000..a15998d4f5 --- /dev/null +++ b/octavia/tests/unit/common/test_data_models.py @@ -0,0 +1,556 @@ +# Copyright 2018 Rackspace US Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import datetime +import json +import random + +from oslo_utils import timeutils +from oslo_utils import uuidutils + +from octavia.common import constants +from octavia.common import data_models +import octavia.tests.unit.base as base + + +class TestDataModels(base.TestCase): + + def setUp(self): + + self.LB_ID = uuidutils.generate_uuid() + self.LISTENER_ID = uuidutils.generate_uuid() + self.PROJECT_ID = uuidutils.generate_uuid() + self.SERVER_GROUP_ID = uuidutils.generate_uuid() + self.CREATED_AT = datetime.datetime.now() + self.UPDATED_AT = timeutils.utcnow() + self.VIP_IP = '192.0.2.10' + self.VIP_SUBNET_ID = uuidutils.generate_uuid() + self.VIP_NETWORK_ID = uuidutils.generate_uuid() + self.VIP_PORT_ID = uuidutils.generate_uuid() + self.VIP_QOS_ID = uuidutils.generate_uuid() + self.POOL_ID = uuidutils.generate_uuid() + self.AMP_ID = uuidutils.generate_uuid() + self.COMPUTE_ID = uuidutils.generate_uuid() + self.IMAGE_ID = uuidutils.generate_uuid() + self.COMPUTE_FLAVOR = uuidutils.generate_uuid() + self.TLS_CONTAINER_ID = uuidutils.generate_uuid() + + self.LB_obj = data_models.LoadBalancer( + id=self.LB_ID, + project_id=self.PROJECT_ID, + name='test-lb', + description='test-lb-description', + provisioning_status='great', + operating_status='even-better', + enabled=True, + vip=None, + vrrp_group=1, + topology='infinite', + listeners=[], + amphorae=[], + pools=[], + server_group_id=self.SERVER_GROUP_ID, + created_at=self.CREATED_AT, + updated_at=self.UPDATED_AT) + + self.VIP_obj = data_models.Vip( + load_balancer_id=self.LB_ID, + ip_address=self.VIP_IP, + subnet_id=self.VIP_SUBNET_ID, + network_id=self.VIP_NETWORK_ID, + port_id=self.VIP_PORT_ID, + qos_policy_id=self.VIP_QOS_ID) + + self.POOL_obj = data_models.Pool( + id=self.POOL_ID, + project_id=self.PROJECT_ID, + name='test-pool', + description='test-pool-description', + load_balancer_id=self.LB_ID, + load_balancer=None, + protocol='avian', + lb_algorithm='UseAllofThem', + enabled=True, + provisioning_status='great', + operating_status='even-better', + members=[], + health_monitor=None, + session_persistence=None, + listeners=[], + l7policies=[], + created_at=self.CREATED_AT, + updated_at=self.UPDATED_AT) + + self.SP_obj = data_models.SessionPersistence( + pool_id=self.POOL_ID, + type='adhesive', + cookie_name='chocolate', + pool=None) + + self.AMP_obj = data_models.Amphora( + id=self.AMP_ID, + load_balancer_id=self.LB_ID, + compute_id=self.COMPUTE_ID, + status=constants.ACTIVE, + lb_network_ip=None, + vrrp_ip=None, + ha_ip=None, + vrrp_port_id=None, + ha_port_id=self.VIP_PORT_ID, + load_balancer=self.LB_obj, + role=constants.ROLE_MASTER, + cert_expiration=None, + cert_busy=False, + vrrp_interface=None, + vrrp_id=None, + vrrp_priority=constants.ROLE_MASTER_PRIORITY, + cached_zone=None, + created_at=self.CREATED_AT, + updated_at=self.UPDATED_AT, + image_id=self.IMAGE_ID, + compute_flavor=self.COMPUTE_FLAVOR + ) + + self.QUOTA_obj = data_models.Quotas( + project_id=self.PROJECT_ID, + load_balancer=None, + listener=None, + pool=None, + health_monitor=None, + member=None, + l7policy=None, + l7rule=None, + in_use_health_monitor=None, + in_use_listener=None, + in_use_load_balancer=None, + in_use_member=None, + in_use_pool=None, + in_use_l7policy=None, + in_use_l7rule=None + ) + + super().setUp() + + def test_LoadBalancer_update(self): + + new_id = uuidutils.generate_uuid() + new_project_id = uuidutils.generate_uuid() + new_server_group_id = uuidutils.generate_uuid() + new_created_at = self.CREATED_AT + datetime.timedelta(minutes=5) + new_updated_at = self.UPDATED_AT + datetime.timedelta(minutes=10) + new_name = 'new-test-lb' + new_description = 'new-test-lb-description' + new_provisioning_status = 'new-great' + new_operating_status = 'new-even-better' + new_enabled = False + new_vrrp_group = 2 + new_topology = 'new-infinite' + + reference_LB_obj = data_models.LoadBalancer( + id=new_id, + project_id=new_project_id, + name=new_name, + description=new_description, + provisioning_status=new_provisioning_status, + operating_status=new_operating_status, + enabled=new_enabled, + vip=None, + vrrp_group=new_vrrp_group, + topology=new_topology, + listeners=[], + amphorae=[], + pools=[], + server_group_id=new_server_group_id, + created_at=new_created_at, + updated_at=new_updated_at) + + update_dict = { + 'id': new_id, + 'project_id': new_project_id, + 'name': new_name, + 'description': new_description, + 'provisioning_status': new_provisioning_status, + 'operating_status': new_operating_status, + 'enabled': new_enabled, + 'vrrp_group': new_vrrp_group, + 'topology': new_topology, + 'server_group_id': new_server_group_id, + 'created_at': new_created_at, + 'updated_at': new_updated_at + } + + test_LB_obj = copy.deepcopy(self.LB_obj) + + test_LB_obj.update(update_dict) + + self.assertEqual(reference_LB_obj, test_LB_obj) + + def test_LoadBalancer_update_add_vip(self): + + new_ip = '192.0.2.44' + new_subnet_id = uuidutils.generate_uuid() + new_network_id = uuidutils.generate_uuid() + new_port_id = uuidutils.generate_uuid() + new_qos_id = uuidutils.generate_uuid() + + reference_VIP_obj = data_models.Vip( + load_balancer_id=self.LB_ID, + ip_address=new_ip, + subnet_id=new_subnet_id, + network_id=new_network_id, + port_id=new_port_id, + load_balancer=None, + qos_policy_id=new_qos_id + ) + + update_dict = { + 'vip': { + 'ip_address': new_ip, + 'subnet_id': new_subnet_id, + 'network_id': new_network_id, + 'port_id': new_port_id, + 'load_balancer': None, + 'qos_policy_id': new_qos_id + } + } + + test_LB_obj = copy.deepcopy(self.LB_obj) + + test_LB_obj.update(update_dict) + + self.assertEqual(reference_VIP_obj, test_LB_obj.vip) + + def test_LoadBalancer_update_vip_update(self): + + new_id = uuidutils.generate_uuid() + new_ip = '192.0.2.44' + new_subnet_id = uuidutils.generate_uuid() + new_network_id = uuidutils.generate_uuid() + new_port_id = uuidutils.generate_uuid() + new_qos_id = uuidutils.generate_uuid() + + reference_VIP_obj = data_models.Vip( + load_balancer_id=new_id, + ip_address=new_ip, + subnet_id=new_subnet_id, + network_id=new_network_id, + port_id=new_port_id, + qos_policy_id=new_qos_id + ) + + update_dict = { + 'vip': { + 'load_balancer_id': new_id, + 'ip_address': new_ip, + 'subnet_id': new_subnet_id, + 'network_id': new_network_id, + 'port_id': new_port_id, + 'qos_policy_id': new_qos_id + } + } + + test_LB_obj = copy.deepcopy(self.LB_obj) + + test_LB_obj.vip = copy.deepcopy(self.VIP_obj) + + test_LB_obj.update(update_dict) + + self.assertEqual(reference_VIP_obj, test_LB_obj.vip) + + def test_Pool_update(self): + + new_id = uuidutils.generate_uuid() + new_project_id = uuidutils.generate_uuid() + new_name = 'new-test-pool' + new_description = 'new-test-pool-description' + new_lb_id = uuidutils.generate_uuid() + new_protocol = 'sneaker' + new_lb_algorithm = 'JustOne' + new_enabled = False + new_provisioning_status = 'new-great' + new_operating_status = 'new-even-better' + new_created_at = self.CREATED_AT + datetime.timedelta(minutes=5) + new_updated_at = self.UPDATED_AT + datetime.timedelta(minutes=10) + + reference_Pool_obj = data_models.Pool( + id=new_id, + project_id=new_project_id, + name=new_name, + description=new_description, + load_balancer_id=new_lb_id, + protocol=new_protocol, + lb_algorithm=new_lb_algorithm, + enabled=new_enabled, + provisioning_status=new_provisioning_status, + operating_status=new_operating_status, + members=[], + health_monitor=None, + session_persistence=None, + listeners=[], + l7policies=[], + created_at=new_created_at, + updated_at=new_updated_at) + + update_dict = { + 'id': new_id, + 'project_id': new_project_id, + 'name': new_name, + 'description': new_description, + 'load_balancer_id': new_lb_id, + 'protocol': new_protocol, + 'lb_algorithm': new_lb_algorithm, + 'enabled': new_enabled, + 'provisioning_status': new_provisioning_status, + 'operating_status': new_operating_status, + 'created_at': new_created_at, + 'updated_at': new_updated_at} + + test_Pool_obj = copy.deepcopy(self.POOL_obj) + + test_Pool_obj.update(update_dict) + + self.assertEqual(reference_Pool_obj, test_Pool_obj) + + def test_Pool_update_add_SP(self): + + new_type = 'glue' + new_cookie_name = 'chip' + + reference_SP_obj = data_models.SessionPersistence( + pool_id=self.POOL_ID, + type=new_type, + cookie_name=new_cookie_name, + pool=None) + + update_dict = { + 'session_persistence': { + 'type': new_type, + 'cookie_name': new_cookie_name + } + } + + test_Pool_obj = copy.deepcopy(self.POOL_obj) + + test_Pool_obj.update(update_dict) + + self.assertEqual(reference_SP_obj, test_Pool_obj.session_persistence) + + def test_Pool_update_delete_SP(self): + + update_dict = {'session_persistence': {}} + + test_Pool_obj = copy.deepcopy(self.POOL_obj) + + test_Pool_obj.session_persistence = copy.deepcopy(self.SP_obj) + + test_Pool_obj.session_persistence.pool = test_Pool_obj + + test_Pool_obj.update(update_dict) + + self.assertIsNone(test_Pool_obj.session_persistence) + + def test_Pool_update_SP_update(self): + + new_type = 'glue' + new_cookie_name = 'chip' + + update_dict = { + 'session_persistence': { + 'type': new_type, + 'cookie_name': new_cookie_name + } + } + + test_Pool_obj = copy.deepcopy(self.POOL_obj) + + reference_SP_obj = data_models.SessionPersistence( + pool_id=self.POOL_ID, + type=new_type, + cookie_name=new_cookie_name, + pool=test_Pool_obj) + + test_Pool_obj.session_persistence = copy.deepcopy(self.SP_obj) + + test_Pool_obj.session_persistence.pool = test_Pool_obj + + test_Pool_obj.update(update_dict) + + self.assertEqual(reference_SP_obj, test_Pool_obj.session_persistence) + + def test_Amphora_update(self): + + new_id = uuidutils.generate_uuid() + new_status = constants.ERROR + new_role = constants.ROLE_BACKUP + new_vrrp_priority = constants.ROLE_BACKUP_PRIORITY + new_created_at = self.CREATED_AT + datetime.timedelta(minutes=5) + new_updated_at = self.UPDATED_AT + datetime.timedelta(minutes=10) + new_image_id = uuidutils.generate_uuid() + new_compute_flavor = uuidutils.generate_uuid() + + update_dict = { + 'id': new_id, + 'status': new_status, + 'role': new_role, + 'vrrp_priority': new_vrrp_priority, + 'created_at': new_created_at, + 'updated_at': new_updated_at, + 'image_id': new_image_id, + 'compute_flavor': new_compute_flavor + } + + test_Amp_obj = copy.deepcopy(self.AMP_obj) + + reference_Amp_obj = data_models.Amphora( + id=new_id, + load_balancer_id=self.LB_ID, + compute_id=self.COMPUTE_ID, + status=new_status, + lb_network_ip=None, + vrrp_ip=None, + ha_ip=None, + vrrp_port_id=None, + ha_port_id=self.VIP_PORT_ID, + load_balancer=self.LB_obj, + role=new_role, + cert_expiration=None, + cert_busy=False, + vrrp_interface=None, + vrrp_id=None, + vrrp_priority=constants.ROLE_BACKUP_PRIORITY, + cached_zone=None, + created_at=new_created_at, + updated_at=new_updated_at, + image_id=new_image_id, + compute_flavor=new_compute_flavor + ) + + test_Amp_obj.update(update_dict) + + self.assertEqual(reference_Amp_obj, test_Amp_obj) + + def test_Quota_update(self): + + new_loadbalancer_quota = 10 + new_listener_quota = 11 + new_pool_quota = 12 + new_healthmonitor_quota = 13 + new_member_quota = 14 + new_l7policy_quota = 15 + new_l7rule_quota = 16 + + update_dict = { + 'load_balancer': new_loadbalancer_quota, + 'listener': new_listener_quota, + 'pool': new_pool_quota, + 'health_monitor': new_healthmonitor_quota, + 'member': new_member_quota, + 'l7policy': new_l7policy_quota, + 'l7rule': new_l7rule_quota + } + + test_Quota_obj = copy.deepcopy(self.QUOTA_obj) + + reference_Quota_obj = data_models.Quotas( + project_id=self.PROJECT_ID, + load_balancer=new_loadbalancer_quota, + listener=new_listener_quota, + pool=new_pool_quota, + health_monitor=new_healthmonitor_quota, + member=new_member_quota, + l7policy=new_l7policy_quota, + l7rule=new_l7rule_quota, + in_use_health_monitor=None, + in_use_listener=None, + in_use_load_balancer=None, + in_use_member=None, + in_use_pool=None, + in_use_l7policy=None, + in_use_l7rule=None + ) + + test_Quota_obj.update(update_dict) + + self.assertEqual(reference_Quota_obj, test_Quota_obj) + + def test_ListenerStatistics_iadd(self): + # test incrementing add function + + bytes_in1 = random.randrange(1000000000) + bytes_out1 = random.randrange(1000000000) + active_conns1 = random.randrange(1000000000) + total_conns1 = random.randrange(1000000000) + request_errors1 = random.randrange(1000000000) + stats_1 = data_models.ListenerStatistics( + listener_id=self.LISTENER_ID, + amphora_id=self.AMP_ID, + bytes_in=bytes_in1, + bytes_out=bytes_out1, + active_connections=active_conns1, + total_connections=total_conns1, + request_errors=request_errors1 + ) + + bytes_in2 = random.randrange(1000000000) + bytes_out2 = random.randrange(1000000000) + active_conns2 = random.randrange(1000000000) + total_conns2 = random.randrange(1000000000) + request_errors2 = random.randrange(1000000000) + stats_2 = data_models.ListenerStatistics( + listener_id="listener 2", + amphora_id="amphora 2", + bytes_in=bytes_in2, + bytes_out=bytes_out2, + active_connections=active_conns2, + total_connections=total_conns2, + request_errors=request_errors2 + ) + + # test successful += + stats_1 += stats_2 + + # not a delta, so it won't be incremented + self.assertEqual(stats_1.active_connections, active_conns1) + self.assertEqual(stats_1.listener_id, self.LISTENER_ID) + self.assertEqual(stats_1.amphora_id, self.AMP_ID) + + # deltas will be incremented + self.assertEqual(stats_1.bytes_in, bytes_in1 + bytes_in2) + self.assertEqual(stats_1.bytes_out, bytes_out1 + bytes_out2) + self.assertEqual(stats_1.total_connections, + total_conns1 + total_conns2) + self.assertEqual(stats_1.request_errors, + request_errors1 + request_errors2) + + # test incrementing an incompatible object + self.assertRaises(TypeError, stats_1.__iadd__, "boom") + + def test_TLSContainer_serialization(self): + tls_container = data_models.TLSContainer( + id=self.TLS_CONTAINER_ID, + primary_cn='fake_cn', + certificate=b'certificate_buffer1', + private_key=b'private_key1', + passphrase=b'passphrase1', + intermediates=[ + b'intermediate_buffer1', + b'intermediate_buffer2', + ] + ) + tls_container_dict = tls_container.to_dict(recurse=True) + json_buffer = json.dumps(tls_container_dict) + json_doc = json.loads(json_buffer) + + self.assertEqual(tls_container_dict, json_doc) diff --git a/octavia/tests/unit/common/test_decorators.py b/octavia/tests/unit/common/test_decorators.py new file mode 100644 index 0000000000..afeb77c37f --- /dev/null +++ b/octavia/tests/unit/common/test_decorators.py @@ -0,0 +1,56 @@ +# Copyright 2016 +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +"""Test for the decorator which provides backward compatibility for V1 API.""" + +import octavia.common.decorators as dec +import octavia.tests.unit.base as base + + +class TestDecorator(base.TestCase): + + @dec.rename_kwargs(a='z') + class TestClass: + def __init__(self, x, z=None): + self.x = x + self.z = z + + @dec.rename_kwargs(a='z') + class TestClassDupe: + def __init__(self, x, z=None): + self.x = x + self.z = z + + def test_get(self): + obj = self.TestClass(1, a=3) + self.assertEqual(1, obj.x) + self.assertEqual(3, obj.z) + self.assertEqual(obj.z, obj.a) + + def test_set(self): + obj = self.TestClass(1, a=3) + obj.a = 5 + self.assertEqual(1, obj.x) + self.assertEqual(5, obj.z) + self.assertEqual(obj.z, obj.a) + + def test_similar_classes(self): + obj = self.TestClass(1, z=5) + obj2 = self.TestClassDupe(2, z=10) + self.assertEqual(5, obj.z) + self.assertEqual(10, obj2.z) + self.assertEqual(obj.z, obj.a) + self.assertEqual(obj2.z, obj2.a) diff --git a/octavia/tests/unit/common/test_exceptions.py b/octavia/tests/unit/common/test_exceptions.py new file mode 100644 index 0000000000..5f75a6155d --- /dev/null +++ b/octavia/tests/unit/common/test_exceptions.py @@ -0,0 +1,26 @@ +# Copyright 2014, Doug Wiegley, A10 Networks. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import octavia.common.exceptions as exc +import octavia.tests.unit.base as base + + +class TestExceptions(base.TestCase): + # Rough sanity test of module import; not meant to be exhaustive + + def test_exception(self): + try: + raise exc.NotFound(resource="test", id="test") + except exc.NotFound: + pass diff --git a/octavia/tests/unit/common/test_keystone.py b/octavia/tests/unit/common/test_keystone.py new file mode 100644 index 0000000000..16774243e2 --- /dev/null +++ b/octavia/tests/unit/common/test_keystone.py @@ -0,0 +1,86 @@ +# Copyright Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock +from unittest.mock import call + +from keystoneauth1 import exceptions as ks_exceptions +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture + +import octavia.common.keystone as ks +import octavia.tests.unit.base as base + + +class TestKeystoneSession(base.TestCase): + + @mock.patch("oslo_config.cfg.ConfigOpts.get_location", return_value=None) + @mock.patch("octavia.common.keystone.ks_loading" + ".load_auth_from_conf_options") + @mock.patch("octavia.common.keystone.LOG") + def test_get_auth_neutron_override(self, mock_log, mock_load_auth, + mock_get_location): + opt_mock = mock.MagicMock() + opt_mock.dest = "foo" + conf = oslo_fixture.Config(cfg.CONF) + conf.conf.service_auth.cafile = "bar" + + mock_load_auth.side_effect = [ + ks_exceptions.auth_plugins.MissingRequiredOptions( + [opt_mock]), + None, + None + ] + + sess = ks.KeystoneSession("neutron") + sess.get_auth() + + mock_load_auth.assert_has_calls([call(cfg.CONF, 'neutron'), + call(cfg.CONF, 'service_auth'), + call(cfg.CONF, 'neutron')]) + mock_log.debug.assert_has_calls( + [call("Overriding [%s].%s with '%s'", 'neutron', 'cafile', + 'bar')] + ) + + # Test case for https://bugs.launchpad.net/octavia/+bug/2051604 + @mock.patch("octavia.common.keystone.ks_loading" + ".load_auth_from_conf_options") + @mock.patch("octavia.common.keystone.LOG") + def test_get_auth_neutron_override_endpoint(self, + mock_log, + mock_load_auth): + opt_mock = mock.MagicMock() + opt_mock.dest = "foo" + conf = oslo_fixture.Config(cfg.CONF) + conf.conf.set_default('endpoint_override', 'default_endpoint', + 'service_auth') + conf.conf.set_default('endpoint_override', 'new_endpoint', + 'neutron') + + mock_load_auth.side_effect = [ + ks_exceptions.auth_plugins.MissingRequiredOptions( + [opt_mock]), + None, + None + ] + + sess = ks.KeystoneSession("neutron") + sess.get_auth() + + # [service_auth].endpoint_override should not override + # [neutron].endpoint_override + self.assertNotIn( + call("Overriding [%s].%s with '%s'", 'neutron', + 'endpoint_override', 'default_endpoint'), + mock_log.debug.mock_calls) diff --git a/octavia/tests/unit/common/test_policy.py b/octavia/tests/unit/common/test_policy.py new file mode 100644 index 0000000000..cfb92d4a33 --- /dev/null +++ b/octavia/tests/unit/common/test_policy.py @@ -0,0 +1,253 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Test of Policy Engine For Octavia.""" + +import tempfile + +from oslo_config import fixture as oslo_fixture +from oslo_log import log as logging +from oslo_policy import policy as oslo_policy +import requests_mock + +from octavia.common import config +from octavia.common import context +from octavia.common import exceptions +from octavia.common import policy +from octavia.tests.unit import base + +CONF = config.cfg.CONF +LOG = logging.getLogger(__name__) + + +class PolicyFileTestCase(base.TestCase): + + def setUp(self): + super().setUp() + + self.conf = self.useFixture(oslo_fixture.Config(CONF)) + policy.reset() + self.target = {} + + def test_modified_policy_reloads(self): + with tempfile.NamedTemporaryFile(mode='w', delete=True) as tmp: + self.conf.load_raw_values( + group='oslo_policy', policy_file=tmp.name) + + tmp.write('{"example:test": ""}') + tmp.flush() + + self.context = context.RequestContext('fake', project_id='fake') + + rule = oslo_policy.RuleDefault('example:test', "") + policy.get_enforcer().register_defaults([rule]) + + action = "example:test" + policy.get_enforcer().authorize(action, self.target, self.context) + + tmp.seek(0) + tmp.write('{"example:test": "!"}') + tmp.flush() + policy.get_enforcer().load_rules(True) + self.assertRaises(exceptions.PolicyForbidden, + policy.get_enforcer().authorize, + action, self.target, self.context) + + +class PolicyTestCase(base.TestCase): + + def setUp(self): + super().setUp() + + self.conf = self.useFixture(oslo_fixture.Config()) + # diltram: this one must be removed after fixing issue in oslo.config + # https://bugs.launchpad.net/oslo.config/+bug/1645868 + self.conf.conf.__call__(args=[]) + policy.reset() + self.context = context.RequestContext('fake', project_id='fake', + roles=['member']) + + self.rules = [ + oslo_policy.RuleDefault("true", "@"), + oslo_policy.RuleDefault("example:allowed", "@"), + oslo_policy.RuleDefault("example:denied", "!"), + oslo_policy.RuleDefault("example:get_http", + "http://www.example.com/"), + oslo_policy.RuleDefault("example:my_file", + "role:compute_admin or " + "project_id:%(project_id)s"), + oslo_policy.RuleDefault("example:early_and_fail", "! and @"), + oslo_policy.RuleDefault("example:early_or_success", "@ or !"), + oslo_policy.RuleDefault("example:lowercase_admin", + "role:admin or role:sysadmin"), + oslo_policy.RuleDefault("example:uppercase_admin", + "role:ADMIN or role:sysadmin"), + ] + policy.get_enforcer().register_defaults(self.rules) + self.target = {} + + def test_authorize_nonexistent_action_throws(self): + action = "example:noexist" + self.assertRaises( + oslo_policy.PolicyNotRegistered, policy.get_enforcer().authorize, + action, self.target, self.context) + + def test_authorize_bad_action_throws(self): + action = "example:denied" + self.assertRaises( + exceptions.PolicyForbidden, policy.get_enforcer().authorize, + action, self.target, self.context) + + def test_authorize_bad_action_noraise(self): + action = "example:denied" + result = policy.get_enforcer().authorize(action, self.target, + self.context, False) + self.assertFalse(result) + + def test_authorize_good_action(self): + action = "example:allowed" + result = policy.get_enforcer().authorize(action, self.target, + self.context) + self.assertTrue(result) + + @requests_mock.mock() + def test_authorize_http(self, req_mock): + req_mock.post('http://www.example.com/', text='False') + action = "example:get_http" + self.assertRaises(exceptions.PolicyForbidden, + policy.get_enforcer().authorize, action, self.target, + self.context) + + def test_templatized_authorization(self): + target_mine = {'project_id': 'fake'} + target_not_mine = {'project_id': 'another'} + action = "example:my_file" + + policy.get_enforcer().authorize(action, target_mine, self.context) + self.assertRaises(exceptions.PolicyForbidden, + policy.get_enforcer().authorize, + action, target_not_mine, self.context) + + def test_early_AND_authorization(self): + action = "example:early_and_fail" + self.assertRaises(exceptions.PolicyForbidden, + policy.get_enforcer().authorize, action, self.target, + self.context) + + def test_early_OR_authorization(self): + action = "example:early_or_success" + policy.get_enforcer().authorize(action, self.target, self.context) + + def test_ignore_case_role_check(self): + lowercase_action = "example:lowercase_admin" + uppercase_action = "example:uppercase_admin" + + # NOTE(dprince) we mix case in the Admin role here to ensure + # case is ignored + self.context = context.RequestContext('admin', project_id='fake', + roles=['AdMiN']) + + policy.get_enforcer().authorize(lowercase_action, self.target, + self.context) + policy.get_enforcer().authorize(uppercase_action, self.target, + self.context) + + def test_check_is_admin_fail(self): + self.assertFalse(policy.get_enforcer().check_is_admin(self.context)) + + def test_check_is_admin(self): + self.context = context.RequestContext('admin', project_id='fake', + roles=['AdMiN']) + + self.assertTrue(policy.get_enforcer().check_is_admin(self.context)) + + def test_check_is_admin_with_system_scope_token(self): + conf = oslo_fixture.Config(config.cfg.CONF) + conf.config(group="oslo_policy", enforce_new_defaults=True) + conf.config(group="oslo_policy", enforce_scope=True) + self.context = context.RequestContext('admin', roles=['AdMiN'], + system_scope='all') + + self.assertFalse(policy.get_enforcer().check_is_admin(self.context)) + + def test_get_enforcer(self): + self.assertTrue(isinstance(policy.get_no_context_enforcer(), + oslo_policy.Enforcer)) + + +class IsAdminCheckTestCase(base.TestCase): + + def setUp(self): + super().setUp() + + self.conf = self.useFixture(oslo_fixture.Config()) + # diltram: this one must be removed after fixing issue in oslo.config + # https://bugs.launchpad.net/oslo.config/+bug/1645868 + self.conf.conf.__call__(args=[]) + + self.context = context.RequestContext('fake', project_id='fake') + + def test_init_true(self): + check = policy.IsAdminCheck('is_admin', 'True') + + self.assertEqual(check.kind, 'is_admin') + self.assertEqual(check.match, 'True') + self.assertTrue(check.expected) + + def test_init_false(self): + check = policy.IsAdminCheck('is_admin', 'nottrue') + + self.assertEqual(check.kind, 'is_admin') + self.assertEqual(check.match, 'False') + self.assertFalse(check.expected) + + def test_call_true(self): + check = policy.IsAdminCheck('is_admin', 'True') + + self.assertTrue( + check('target', dict(is_admin=True), policy.get_enforcer())) + self.assertFalse( + check('target', dict(is_admin=False), policy.get_enforcer())) + + def test_call_false(self): + check = policy.IsAdminCheck('is_admin', 'False') + + self.assertFalse( + check('target', dict(is_admin=True), policy.get_enforcer())) + self.assertTrue( + check('target', dict(is_admin=False), policy.get_enforcer())) + + +class AdminRolePolicyTestCase(base.TestCase): + + def setUp(self): + super().setUp() + + self.conf = self.useFixture(oslo_fixture.Config()) + # diltram: this one must be removed after fixing issue in oslo.config + # https://bugs.launchpad.net/oslo.config/+bug/1645868 + self.conf.conf.__call__(args=[]) + + self.context = context.RequestContext('fake', project_id='fake', + roles=['member']) + self.actions = policy.get_enforcer().get_rules().keys() + self.target = {} + + def test_authorize_admin_actions_with_nonadmin_context_throws(self): + """Check if non-admin context passed to admin actions throws + + Policy not authorized exception + """ + for action in self.actions: + self.assertRaises( + exceptions.PolicyForbidden, policy.get_enforcer().authorize, + action, self.target, self.context) diff --git a/octavia/tests/unit/common/test_stats.py b/octavia/tests/unit/common/test_stats.py new file mode 100644 index 0000000000..7be20eb68a --- /dev/null +++ b/octavia/tests/unit/common/test_stats.py @@ -0,0 +1,95 @@ +# Copyright 2016 IBM +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import random +from unittest import mock + +from oslo_utils import uuidutils + +from octavia.common import constants +from octavia.common import data_models +from octavia.common import stats +from octavia.tests.unit import base + + +class TestStatsMixin(base.TestCase): + + def setUp(self): + super().setUp() + self.sm = stats.StatsMixin() + + self.session = mock.MagicMock() + self.listener_id = uuidutils.generate_uuid() + self.amphora_id = uuidutils.generate_uuid() + + self.repo_listener_stats = mock.MagicMock() + self.sm.listener_stats_repo = self.repo_listener_stats + + self.fake_stats = data_models.ListenerStatistics( + listener_id=self.listener_id, + amphora_id=self.amphora_id, + bytes_in=random.randrange(1000000000), + bytes_out=random.randrange(1000000000), + active_connections=random.randrange(1000000000), + total_connections=random.randrange(1000000000), + request_errors=random.randrange(1000000000)) + + self.sm.listener_stats_repo.get_all.return_value = ([self.fake_stats], + None) + + self.repo_amphora = mock.MagicMock() + self.sm.repo_amphora = self.repo_amphora + + def test_get_listener_stats(self): + fake_amp = mock.MagicMock() + fake_amp.status = constants.AMPHORA_ALLOCATED + self.sm.repo_amphora.get.return_value = fake_amp + + ls_stats = self.sm.get_listener_stats( + self.session, self.listener_id) + self.repo_listener_stats.get_all.assert_called_once_with( + self.session, listener_id=self.listener_id) + self.repo_amphora.get.assert_called_once_with( + self.session, id=self.amphora_id) + + self.assertEqual(self.fake_stats.bytes_in, ls_stats.bytes_in) + self.assertEqual(self.fake_stats.bytes_out, ls_stats.bytes_out) + self.assertEqual( + self.fake_stats.active_connections, ls_stats.active_connections) + self.assertEqual( + self.fake_stats.total_connections, ls_stats.total_connections) + self.assertEqual( + self.fake_stats.request_errors, ls_stats.request_errors) + self.assertEqual(self.listener_id, ls_stats.listener_id) + self.assertIsNone(ls_stats.amphora_id) + + def test_get_listener_stats_with_amphora_deleted(self): + fake_amp = mock.MagicMock() + fake_amp.status = constants.DELETED + self.sm.repo_amphora.get.return_value = fake_amp + + ls_stats = self.sm.get_listener_stats(self.session, self.listener_id) + self.repo_listener_stats.get_all.assert_called_once_with( + self.session, listener_id=self.listener_id) + self.repo_amphora.get.assert_called_once_with( + self.session, id=self.amphora_id) + + self.assertEqual(self.fake_stats.bytes_in, ls_stats.bytes_in) + self.assertEqual(self.fake_stats.bytes_out, ls_stats.bytes_out) + self.assertEqual(0, ls_stats.active_connections) + self.assertEqual( + self.fake_stats.total_connections, ls_stats.total_connections) + self.assertEqual( + self.fake_stats.request_errors, ls_stats.request_errors) + self.assertEqual(self.listener_id, ls_stats.listener_id) + self.assertIsNone(ls_stats.amphora_id) diff --git a/octavia/tests/unit/common/test_utils.py b/octavia/tests/unit/common/test_utils.py new file mode 100644 index 0000000000..d7176ce7ee --- /dev/null +++ b/octavia/tests/unit/common/test_utils.py @@ -0,0 +1,221 @@ +# Copyright 2014, Doug Wiegley, A10 Networks. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from cryptography import fernet +from octavia_lib.common import constants as lib_consts +from oslo_config import cfg +from oslo_utils import uuidutils + +from octavia.common import constants +import octavia.common.utils as utils +import octavia.tests.unit.base as base + + +class TestConfig(base.TestCase): + + def test_get_hostname(self): + self.assertNotEqual(utils.get_hostname(), '') + + def test_is_ipv4(self): + self.assertTrue(utils.is_ipv4('192.0.2.10')) + self.assertTrue(utils.is_ipv4('169.254.0.10')) + self.assertTrue(utils.is_ipv4('0.0.0.0')) + self.assertFalse(utils.is_ipv4('::')) + self.assertFalse(utils.is_ipv4('2001:db8::1')) + self.assertFalse(utils.is_ipv4('fe80::225:90ff:fefb:53ad')) + + def test_is_ipv6(self): + self.assertFalse(utils.is_ipv6('192.0.2.10')) + self.assertFalse(utils.is_ipv6('169.254.0.10')) + self.assertFalse(utils.is_ipv6('0.0.0.0')) + self.assertTrue(utils.is_ipv6('::')) + self.assertTrue(utils.is_ipv6('2001:db8::1')) + self.assertTrue(utils.is_ipv6('fe80::225:90ff:fefb:53ad')) + + def test_is_cidr_ipv6(self): + self.assertTrue(utils.is_cidr_ipv6('2001:db8::/32')) + self.assertFalse(utils.is_cidr_ipv6('192.0.2.0/32')) + + def test_is_ipv6_lla(self): + self.assertFalse(utils.is_ipv6_lla('192.0.2.10')) + self.assertFalse(utils.is_ipv6_lla('169.254.0.10')) + self.assertFalse(utils.is_ipv6_lla('0.0.0.0')) + self.assertFalse(utils.is_ipv6_lla('::')) + self.assertFalse(utils.is_ipv6_lla('2001:db8::1')) + self.assertTrue(utils.is_ipv6_lla('fe80::225:90ff:fefb:53ad')) + + def test_ip_port_str(self): + self.assertEqual("127.0.0.1:8080", + utils.ip_port_str('127.0.0.1', 8080)) + self.assertEqual("[::1]:8080", + utils.ip_port_str('::1', 8080)) + + def test_netmask_to_prefix(self): + self.assertEqual(utils.netmask_to_prefix('255.0.0.0'), 8) + self.assertEqual(utils.netmask_to_prefix('255.255.0.0'), 16) + self.assertEqual(utils.netmask_to_prefix('255.255.255.0'), 24) + self.assertEqual(utils.netmask_to_prefix('255.255.255.128'), 25) + + def test_ip_netmask_to_cidr(self): + self.assertEqual('10.0.0.0/8', + utils.ip_netmask_to_cidr('10.0.0.1', '255.0.0.0')) + self.assertEqual('10.0.0.0/9', + utils.ip_netmask_to_cidr('10.0.0.1', '255.128.0.0')) + self.assertEqual('10.0.0.0/16', + utils.ip_netmask_to_cidr('10.0.0.1', '255.255.0.0')) + self.assertEqual('10.0.0.0/20', + utils.ip_netmask_to_cidr('10.0.0.1', '255.255.240.0')) + self.assertEqual('10.0.0.0/30', utils.ip_netmask_to_cidr( + '10.0.0.1', '255.255.255.252')) + + def test_expand_expected_codes(self): + exp_codes = '' + self.assertEqual(utils.expand_expected_codes(exp_codes), + set()) + exp_codes = '200' + self.assertEqual(utils.expand_expected_codes(exp_codes), + {'200'}) + exp_codes = '200, 201' + self.assertEqual(utils.expand_expected_codes(exp_codes), + {'200', '201'}) + exp_codes = '200, 201,202' + self.assertEqual(utils.expand_expected_codes(exp_codes), + {'200', '201', '202'}) + exp_codes = '200-202' + self.assertEqual(utils.expand_expected_codes(exp_codes), + {'200', '201', '202'}) + exp_codes = '200-202, 205' + self.assertEqual(utils.expand_expected_codes(exp_codes), + {'200', '201', '202', '205'}) + exp_codes = '200, 201-203' + self.assertEqual(utils.expand_expected_codes(exp_codes), + {'200', '201', '202', '203'}) + exp_codes = '200, 201-203, 205' + self.assertEqual(utils.expand_expected_codes(exp_codes), + {'200', '201', '202', '203', '205'}) + exp_codes = '201-200, 205' + self.assertEqual(utils.expand_expected_codes(exp_codes), + {'205'}) + + def test_base64_sha1_string(self): + str_to_sha1 = [ + # no special cases str (no altchars) + ('77e7d60d-e137-4246-8a84-a25db33571cd', + 'iVZVQ5AKmk2Ae0uGLP0Ue4OseRM='), + # backward compat amphorae with - in str[1:] + ('9c6e5f27-a0da-4ceb-afe5-5a81230be42e', + 'NjrNgt3Egl-H5ScbYM5ChtUH3M8='), + # sha1 would start with -, now replaced with x + ('4db4a3cf-9fef-4057-b1fd-b2afbf7a8a0f', + 'xxqntK8jJ_gE3QEmh-D1-XgCW_E=') + ] + for str, sha1 in str_to_sha1: + self.assertEqual(sha1, utils.base64_sha1_string(str)) + + @mock.patch('stevedore.driver.DriverManager') + def test_get_amphora_driver(self, mock_stevedore_driver): + FAKE_AMP_DRIVER = 'fake_amp_drvr' + driver_mock = mock.MagicMock() + driver_mock.driver = FAKE_AMP_DRIVER + mock_stevedore_driver.return_value = driver_mock + + result = utils.get_amphora_driver() + + self.assertEqual(FAKE_AMP_DRIVER, result) + + def test_get_vip_secuirty_group_name(self): + FAKE_LB_ID = uuidutils.generate_uuid() + self.assertIsNone(utils.get_vip_security_group_name(None)) + + expected_sg_name = constants.VIP_SECURITY_GROUP_PREFIX + FAKE_LB_ID + self.assertEqual(expected_sg_name, + utils.get_vip_security_group_name(FAKE_LB_ID)) + + def test_map_protocol_to_nftable_protocol(self): + result = utils.map_protocol_to_nftable_protocol( + {constants.PROTOCOL: lib_consts.PROTOCOL_TCP}) + self.assertEqual({constants.PROTOCOL: lib_consts.PROTOCOL_TCP}, result) + + result = utils.map_protocol_to_nftable_protocol( + {constants.PROTOCOL: lib_consts.PROTOCOL_HTTP}) + self.assertEqual({constants.PROTOCOL: lib_consts.PROTOCOL_TCP}, result) + + result = utils.map_protocol_to_nftable_protocol( + {constants.PROTOCOL: lib_consts.PROTOCOL_HTTPS}) + self.assertEqual({constants.PROTOCOL: lib_consts.PROTOCOL_TCP}, result) + + result = utils.map_protocol_to_nftable_protocol( + {constants.PROTOCOL: lib_consts.PROTOCOL_TERMINATED_HTTPS}) + self.assertEqual({constants.PROTOCOL: lib_consts.PROTOCOL_TCP}, result) + + result = utils.map_protocol_to_nftable_protocol( + {constants.PROTOCOL: lib_consts.PROTOCOL_PROXY}) + self.assertEqual({constants.PROTOCOL: lib_consts.PROTOCOL_TCP}, result) + + result = utils.map_protocol_to_nftable_protocol( + {constants.PROTOCOL: lib_consts.PROTOCOL_PROXYV2}) + self.assertEqual({constants.PROTOCOL: lib_consts.PROTOCOL_TCP}, result) + + result = utils.map_protocol_to_nftable_protocol( + {constants.PROTOCOL: lib_consts.PROTOCOL_UDP}) + self.assertEqual({constants.PROTOCOL: lib_consts.PROTOCOL_UDP}, result) + + result = utils.map_protocol_to_nftable_protocol( + {constants.PROTOCOL: lib_consts.PROTOCOL_SCTP}) + self.assertEqual({constants.PROTOCOL: lib_consts.PROTOCOL_SCTP}, + result) + + result = utils.map_protocol_to_nftable_protocol( + {constants.PROTOCOL: lib_consts.PROTOCOL_PROMETHEUS}) + self.assertEqual({constants.PROTOCOL: lib_consts.PROTOCOL_TCP}, result) + + def test_rotate_server_certs_key_passphrase(self): + """Test rotate server_certs_key_passphrase.""" + # Use one key (default) and encrypt/decrypt it + cfg.CONF.set_override( + 'server_certs_key_passphrase', + ['insecure-key-do-not-use-this-key'], + group='certificates') + fer = utils.get_server_certs_key_passphrases_fernet() + data1 = 'some data one' + enc1 = fer.encrypt(data1.encode('utf-8')) + self.assertEqual( + data1, fer.decrypt(enc1).decode('utf-8')) + + # Use two keys, first key is new and used for encrypting + # and default key can still be used for decryption + cfg.CONF.set_override( + 'server_certs_key_passphrase', + ['another-insecure-key-do-not-use2', + 'insecure-key-do-not-use-this-key'], + group='certificates') + fer = utils.get_server_certs_key_passphrases_fernet() + data2 = 'some other data' + enc2 = fer.encrypt(data2.encode('utf-8')) + self.assertEqual( + data2, fer.decrypt(enc2).decode('utf-8')) + self.assertEqual( + data1, fer.decrypt(enc1).decode('utf-8')) + + # Remove first key and we should only be able to + # decrypt the newest data + cfg.CONF.set_override( + 'server_certs_key_passphrase', + ['another-insecure-key-do-not-use2'], + group='certificates') + fer = utils.get_server_certs_key_passphrases_fernet() + self.assertEqual( + data2, fer.decrypt(enc2).decode('utf-8')) + self.assertRaises(fernet.InvalidToken, fer.decrypt, enc1) diff --git a/octavia/tests/unit/common/test_validate.py b/octavia/tests/unit/common/test_validate.py new file mode 100644 index 0000000000..0e56d66d6c --- /dev/null +++ b/octavia/tests/unit/common/test_validate.py @@ -0,0 +1,605 @@ +# Copyright 2016 Blue Box, an IBM Company +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils +from wsme import types as wtypes + +import octavia.common.constants as constants +import octavia.common.exceptions as exceptions +import octavia.common.validate as validate +from octavia.network import base as network_base +from octavia.network import data_models as network_models +import octavia.tests.unit.base as base + + +class TestValidations(base.TestCase): + # Note that particularly complex validation testing is handled via + # functional tests elsewhere (ex. repository tests) + + def setUp(self): + super().setUp() + self.conf = oslo_fixture.Config(cfg.CONF) + + def test_validate_url(http://github.com/self): + ret = validate.url('http://example.com/') + self.assertTrue(ret) + + def test_validate_bad_url(http://github.com/self): + self.assertRaises(exceptions.InvalidURL, validate.url, 'bad url') + + def test_validate_url_bad_schema(self): + self.assertRaises(exceptions.InvalidURL, validate.url, + 'ssh://www.example.com/') + + def test_validate_url_path(self): + self.assertTrue(validate.url_path('/foo')) + self.assertTrue(validate.url_path('/foo%0Abar')) + + def test_validate_bad_url_path(self): + self.assertRaises(exceptions.InvalidURLPath, validate.url_path, 'foo') + self.assertRaises(exceptions.InvalidURLPath, validate.url_path, + '/foo\nbar') + + def test_validate_header_name(self): + ret = validate.header_name('Some-header') + self.assertTrue(ret) + + def test_validate_bad_header_name(self): + self.assertRaises(exceptions.InvalidString, + validate.cookie_value_string, + 'bad header') + + def test_validate_cookie_value_string(self): + ret = validate.cookie_value_string('some-cookie') + self.assertTrue(ret) + + def test_validate_bad_cookie_value_string(self): + self.assertRaises(exceptions.InvalidString, + validate.cookie_value_string, + 'bad cookie value;') + + def test_validate_header_value_string(self): + ret = validate.header_value_string('some-value') + self.assertTrue(ret) + + def test_validate_header_value_string_quoted(self): + ret = validate.header_value_string('"some value"') + self.assertTrue(ret) + + def test_validate_bad_header_value_string(self): + self.assertRaises(exceptions.InvalidString, + validate.header_value_string, + '\x18') + + def test_validate_regex(self): + ret = validate.regex('some regex.*') + self.assertTrue(ret) + + def test_validate_bad_regex(self): + self.assertRaises(exceptions.InvalidRegex, validate.regex, + 'bad regex\\') + + def test_sanitize_l7policy_api_args_action_reject(self): + l7p = {'action': constants.L7POLICY_ACTION_REJECT, + 'redirect_url': 'http://www.example.com/', + 'redirect_pool_id': 'test-pool', + 'redirect_pool': { + 'protocol': constants.PROTOCOL_HTTP, + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN}} + s_l7p = validate.sanitize_l7policy_api_args(l7p) + self.assertIsNone(s_l7p['redirect_url']) + self.assertIsNone(s_l7p['redirect_pool_id']) + self.assertNotIn('redirect_pool', s_l7p.keys()) + + def test_sanitize_l7policy_api_args_action_rdr_pool_id(self): + l7p = {'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + 'redirect_url': 'http://www.example.com/', + 'redirect_pool_id': 'test-pool', + 'redirect_pool': { + 'protocol': constants.PROTOCOL_HTTP, + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN}} + s_l7p = validate.sanitize_l7policy_api_args(l7p) + self.assertIsNone(s_l7p['redirect_url']) + self.assertNotIn('redirect_pool', s_l7p.keys()) + self.assertIn('redirect_pool_id', s_l7p.keys()) + + def test_sanitize_l7policy_api_args_action_rdr_pool_model(self): + l7p = {'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + 'redirect_url': 'http://www.example.com/', + 'redirect_pool_id': None, + 'redirect_pool': { + 'protocol': constants.PROTOCOL_HTTP, + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN}} + s_l7p = validate.sanitize_l7policy_api_args(l7p) + self.assertIsNone(s_l7p['redirect_url']) + self.assertNotIn('redirect_pool_id', s_l7p.keys()) + self.assertIn('redirect_pool', s_l7p.keys()) + + def test_sanitize_l7policy_api_args_action_rdr_url(http://github.com/self): + l7p = {'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, + 'redirect_url': 'http://www.example.com/', + 'redirect_pool_id': 'test-pool', + 'redirect_pool': { + 'protocol': constants.PROTOCOL_HTTP, + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN}} + s_l7p = validate.sanitize_l7policy_api_args(l7p) + self.assertIn('redirect_url', s_l7p.keys()) + self.assertIsNone(s_l7p['redirect_pool_id']) + self.assertNotIn('redirect_pool', s_l7p.keys()) + + def test_sanitize_l7policy_api_args_bad_action(self): + l7p = {'action': 'bad-action', + 'redirect_url': 'http://www.example.com/', + 'redirect_pool_id': 'test-pool', + 'redirect_pool': { + 'protocol': constants.PROTOCOL_HTTP, + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN}} + self.assertRaises(exceptions.InvalidL7PolicyAction, + validate.sanitize_l7policy_api_args, l7p) + + def test_sanitize_l7policy_api_args_action_none(self): + l7p = {'action': None} + self.assertRaises(exceptions.InvalidL7PolicyAction, + validate.sanitize_l7policy_api_args, l7p, True) + + def test_sanitize_l7policy_api_args_both_rdr_args_a(self): + l7p = {'redirect_url': 'http://www.example.com/', + 'redirect_pool_id': 'test-pool'} + self.assertRaises(exceptions.InvalidL7PolicyArgs, + validate.sanitize_l7policy_api_args, l7p) + + def test_sanitize_l7policy_api_args_both_rdr_args_b(self): + l7p = {'redirect_url': 'http://www.example.com/', + 'redirect_pool': { + 'protocol': constants.PROTOCOL_HTTP, + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN}} + self.assertRaises(exceptions.InvalidL7PolicyArgs, + validate.sanitize_l7policy_api_args, l7p) + + def test_sanitize_l7policy_api_args_rdr_pool_id(self): + l7p = {'redirect_pool_id': 'test-pool', + 'redirect_url': None, + 'redirect_pool': None} + s_l7p = validate.sanitize_l7policy_api_args(l7p) + self.assertIn('redirect_pool_id', s_l7p.keys()) + self.assertIsNone(s_l7p['redirect_url']) + self.assertNotIn('redirect_pool', s_l7p.keys()) + self.assertIn('action', s_l7p.keys()) + self.assertEqual(constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + s_l7p['action']) + + def test_sanitize_l7policy_api_args_rdr_pool_noid(self): + l7p = {'redirect_pool_id': None, + 'redirect_url': None, + 'redirect_pool': { + 'protocol': constants.PROTOCOL_HTTP, + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN}} + s_l7p = validate.sanitize_l7policy_api_args(l7p) + self.assertIn('redirect_pool', s_l7p.keys()) + self.assertIsNone(s_l7p['redirect_url']) + self.assertNotIn('redirect_pool_id', s_l7p.keys()) + self.assertIn('action', s_l7p.keys()) + self.assertEqual(constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + s_l7p['action']) + + def test_sanitize_l7policy_api_args_rdr_pool_id_none_create(self): + l7p = {'redirect_pool_id': None} + self.assertRaises(exceptions.InvalidL7PolicyAction, + validate.sanitize_l7policy_api_args, l7p, True) + + def test_sanitize_l7policy_api_args_rdr_pool_noid_none_create(self): + l7p = {'redirect_pool': None} + self.assertRaises(exceptions.InvalidL7PolicyAction, + validate.sanitize_l7policy_api_args, l7p, True) + + def test_sanitize_l7policy_api_args_rdr_pool_both_none_create(self): + l7p = {'redirect_pool': None, + 'redirect_pool_id': None} + self.assertRaises(exceptions.InvalidL7PolicyAction, + validate.sanitize_l7policy_api_args, l7p, True) + + def test_sanitize_l7policy_api_args_rdr_url(http://github.com/self): + l7p = {'redirect_pool_id': None, + 'redirect_url': 'http://www.example.com/', + 'redirect_pool': None} + s_l7p = validate.sanitize_l7policy_api_args(l7p) + self.assertIsNone(s_l7p['redirect_pool_id']) + self.assertNotIn('redirect_pool', s_l7p.keys()) + self.assertIn('redirect_url', s_l7p.keys()) + self.assertIn('action', s_l7p.keys()) + self.assertEqual(constants.L7POLICY_ACTION_REDIRECT_TO_URL, + s_l7p['action']) + + def test_sanitize_l7policy_api_args_rdr_url_none_create(self): + l7p = {'redirect_url': None} + self.assertRaises(exceptions.InvalidL7PolicyAction, + validate.sanitize_l7policy_api_args, l7p, True) + + def test_sanitize_l7policy_api_args_rdr_url_bad_url(http://github.com/self): + l7p = {'redirect_url': 'bad url'} + self.assertRaises(exceptions.InvalidURL, + validate.sanitize_l7policy_api_args, l7p, True) + + def test_sanitize_l7policy_api_args_update_action_rdr_pool_arg(self): + l7p = {'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + 'redirect_url': None, + 'redirect_pool_id': None, + 'redirect_pool': None} + self.assertRaises(exceptions.InvalidL7PolicyArgs, + validate.sanitize_l7policy_api_args, l7p) + + def test_sanitize_l7policy_api_args_update_action_rdr_url_arg(self): + l7p = {'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, + 'redirect_url': None, + 'redirect_pool_id': None, + 'redirect_pool': None} + self.assertRaises(exceptions.InvalidL7PolicyArgs, + validate.sanitize_l7policy_api_args, l7p) + + def test_sanitize_l7policy_api_args_create_must_have_action(self): + l7p = {} + self.assertRaises(exceptions.InvalidL7PolicyAction, + validate.sanitize_l7policy_api_args, l7p, True) + + def test_sanitize_l7policy_api_args_update_must_have_args(self): + l7p = {} + self.assertRaises(exceptions.InvalidL7PolicyArgs, + validate.sanitize_l7policy_api_args, l7p) + + def test_port_exists_with_bad_port(self): + port_id = uuidutils.generate_uuid() + with mock.patch( + 'octavia.common.utils.get_network_driver') as net_mock: + net_mock.return_value.get_port = mock.Mock( + side_effect=network_base.PortNotFound('Port not found')) + self.assertRaises( + exceptions.InvalidSubresource, + validate.port_exists, port_id) + + def test_port_exists_with_valid_port(self): + port_id = uuidutils.generate_uuid() + port = network_models.Port(id=port_id) + with mock.patch( + 'octavia.common.utils.get_network_driver') as net_mock: + net_mock.return_value.get_port.return_value = port + self.assertEqual(validate.port_exists(port_id), port) + + def test_check_port_in_use(self): + port_id = uuidutils.generate_uuid() + device_id = uuidutils.generate_uuid() + port = network_models.Port(id=port_id, device_id=device_id) + with mock.patch( + 'octavia.common.utils.get_network_driver') as net_mock: + net_mock.return_value.get_port.device_id = port + self.assertRaises( + exceptions.ValidationException, + validate.check_port_in_use, port) + + def test_subnet_exists_with_bad_subnet(self): + subnet_id = uuidutils.generate_uuid() + with mock.patch( + 'octavia.common.utils.get_network_driver') as net_mock: + net_mock.return_value.get_subnet = mock.Mock( + side_effect=network_base.SubnetNotFound('Subnet not found')) + self.assertRaises( + exceptions.InvalidSubresource, + validate.subnet_exists, subnet_id) + + def test_subnet_exists_with_valid_subnet(self): + subnet_id = uuidutils.generate_uuid() + subnet = network_models.Subnet(id=subnet_id) + with mock.patch( + 'octavia.common.utils.get_network_driver') as net_mock: + net_mock.return_value.get_subnet.return_value = subnet + self.assertEqual(validate.subnet_exists(subnet_id), subnet) + + def test_network_exists_with_bad_network(self): + network_id = uuidutils.generate_uuid() + with mock.patch( + 'octavia.common.utils.get_network_driver') as net_mock: + net_mock.return_value.get_network = mock.Mock( + side_effect=network_base.NetworkNotFound('Network not found')) + self.assertRaises( + exceptions.InvalidSubresource, + validate.network_exists_optionally_contains_subnet, network_id) + + def test_network_exists_with_valid_network(self): + network_id = uuidutils.generate_uuid() + network = network_models.Network(id=network_id) + with mock.patch( + 'octavia.common.utils.get_network_driver') as net_mock: + net_mock.return_value.get_network.return_value = network + self.assertEqual( + validate.network_exists_optionally_contains_subnet(network_id), + network) + + def test_network_exists_with_valid_subnet(self): + network_id = uuidutils.generate_uuid() + subnet_id = uuidutils.generate_uuid() + network = network_models.Network( + id=network_id, + subnets=[subnet_id]) + with mock.patch( + 'octavia.common.utils.get_network_driver') as net_mock: + net_mock.return_value.get_network.return_value = network + self.assertEqual( + validate.network_exists_optionally_contains_subnet( + network_id, subnet_id), + network) + + def test_network_exists_with_bad_subnet(self): + network_id = uuidutils.generate_uuid() + subnet_id = uuidutils.generate_uuid() + network = network_models.Network(id=network_id) + with mock.patch( + 'octavia.common.utils.get_network_driver') as net_mock: + net_mock.return_value.get_network.return_value = network + self.assertRaises( + exceptions.InvalidSubresource, + validate.network_exists_optionally_contains_subnet, + network_id, subnet_id) + + def test_network_allowed_by_config(self): + net_id1 = uuidutils.generate_uuid() + net_id2 = uuidutils.generate_uuid() + net_id3 = uuidutils.generate_uuid() + self.conf.config(group="networking", + valid_vip_networks=[net_id1, net_id2]) + validate.network_allowed_by_config(net_id1) + validate.network_allowed_by_config(net_id2) + self.assertRaises( + exceptions.ValidationException, + validate.network_allowed_by_config, net_id3) + + def test_qos_policy_exists(self): + qos_policy_id = uuidutils.generate_uuid() + qos_policy = network_models.QosPolicy(id=qos_policy_id) + with mock.patch( + 'octavia.common.utils.get_network_driver') as net_mock: + net_mock.return_value.get_qos_policy.return_value = qos_policy + self.assertEqual( + validate.qos_policy_exists(qos_policy_id), + qos_policy) + + net_mock.return_value.get_qos_policy.side_effect = Exception + self.assertRaises(exceptions.InvalidSubresource, + validate.qos_policy_exists, + qos_policy_id) + + def test_qos_extension_enabled(self): + network_driver = mock.Mock() + network_driver.qos_enabled.return_value = True + self.assertIsNone(validate.qos_extension_enabled(network_driver)) + + def test_qos_extension_disabled(self): + network_driver = mock.Mock() + network_driver.qos_enabled.return_value = False + self.assertRaises(exceptions.ValidationException, + validate.qos_extension_enabled, + network_driver) + + def test_check_session_persistence(self): + valid_cookie_name_dict = {'type': 'APP_COOKIE', + 'cookie_name': 'chocolate_chip'} + invalid_cookie_name_dict = {'type': 'APP_COOKIE', + 'cookie_name': '@chocolate_chip'} + invalid_type_HTTP_cookie_name_dict = {'type': 'HTTP_COOKIE', + 'cookie_name': 'chocolate_chip'} + invalid_type_IP_cookie_name_dict = {'type': 'SOURCE_IP', + 'cookie_name': 'chocolate_chip'} + invalid_missing_cookie_name_dict = {'type': 'APP_COOKIE'} + + # Validate that a good cookie name passes + validate.check_session_persistence(valid_cookie_name_dict) + + # Test raises with providing an invalid cookie name + self.assertRaises(exceptions.ValidationException, + validate.check_session_persistence, + invalid_cookie_name_dict) + + # Test raises type HTTP_COOKIE and providing cookie_name + self.assertRaises(exceptions.ValidationException, + validate.check_session_persistence, + invalid_type_HTTP_cookie_name_dict) + + # Test raises type SOURCE_IP and providing cookie_name + self.assertRaises(exceptions.ValidationException, + validate.check_session_persistence, + invalid_type_IP_cookie_name_dict) + + # Test raises when type APP_COOKIE but no cookie_name + self.assertRaises(exceptions.ValidationException, + validate.check_session_persistence, + invalid_missing_cookie_name_dict) + + # Test catch all exception raises a user friendly message + with mock.patch('re.compile') as compile_mock: + compile_mock.side_effect = Exception + self.assertRaises(exceptions.ValidationException, + validate.check_session_persistence, + valid_cookie_name_dict) + + def test_ip_not_reserved(self): + self.conf.config(group="networking", reserved_ips=['198.51.100.4']) + + # Test good address + validate.ip_not_reserved('203.0.113.5') + + # Test IPv4 reserved address + self.assertRaises(exceptions.InvalidOption, + validate.ip_not_reserved, + '198.51.100.4') + + self.conf.config( + group="networking", + reserved_ips=['2001:0DB8:0000:0000:0000:0000:0000:0005']) + + # Test good IPv6 address + validate.ip_not_reserved('2001:0DB8::9') + + # Test reserved IPv6 expanded + self.assertRaises(exceptions.InvalidOption, + validate.ip_not_reserved, + '2001:0DB8:0000:0000:0000:0000:0000:0005') + + # Test reserved IPv6 short hand notation + self.assertRaises(exceptions.InvalidOption, + validate.ip_not_reserved, + '2001:0DB8::5') + + def test_check_default_ciphers_prohibit_list_conflict(self): + self.conf.config(group='api_settings', + tls_cipher_prohibit_list='PSK-AES128-CBC-SHA') + self.conf.config(group='api_settings', + default_listener_ciphers='ECDHE-ECDSA-AES256-SHA:' + 'PSK-AES128-CBC-SHA:TLS_AES_256_GCM_SHA384') + + self.assertRaises( + exceptions.ValidationException, + validate.check_default_ciphers_prohibit_list_conflict) + + def test_check_tls_version_list(self): + # Test valid list + validate.check_tls_version_list(['TLSv1.1', 'TLSv1.2', 'TLSv1.3']) + # Test invalid list + self.assertRaises( + exceptions.ValidationException, + validate.check_tls_version_list, + ['SSLv3', 'TLSv1.0']) + # Test empty list + self.assertRaises( + exceptions.ValidationException, + validate.check_tls_version_list, + []) + + def test_check_tls_version_min(self): + self.conf.config(group="api_settings", minimum_tls_version='TLSv1.2') + + # Test valid list + validate.check_tls_version_min(['TLSv1.2', 'TLSv1.3']) + + # Test invalid list + self.assertRaises(exceptions.ValidationException, + validate.check_tls_version_min, + ['TLSv1', 'TLSv1.1', 'TLSv1.2']) + + def test_check_default_tls_versions_min_conflict(self): + self.conf.config(group="api_settings", minimum_tls_version='TLSv1.2') + + # Test conflict in listener default + self.conf.config(group="api_settings", default_listener_tls_versions=[ + 'SSLv3', 'TLSv1.2']) + self.assertRaises(exceptions.ValidationException, + validate.check_default_tls_versions_min_conflict) + + # Test conflict in pool default + self.conf.config(group="api_settings", default_listener_tls_versions=[ + 'TLSv1.2']) + self.conf.config(group="api_settings", default_pool_tls_versions=[ + 'TLSv1', 'TLSv1.3']) + self.assertRaises(exceptions.ValidationException, + validate.check_default_tls_versions_min_conflict) + + def test_check_alpn_protocols(self): + # Test valid list + validate.check_alpn_protocols(['h2', 'http/1.1', 'http/1.0']) + # Test invalid list + self.assertRaises( + exceptions.ValidationException, + validate.check_alpn_protocols, + ['httpie', 'foobar/1.2.3']) + # Test empty list + self.assertRaises( + exceptions.ValidationException, + validate.check_alpn_protocols, + []) + + def test_is_ip_member_of_cidr(self): + self.assertTrue(validate.is_ip_member_of_cidr('192.0.0.5', + '192.0.0.0/24')) + self.assertFalse(validate.is_ip_member_of_cidr('198.51.100.5', + '192.0.0.0/24')) + self.assertTrue(validate.is_ip_member_of_cidr('2001:db8::5', + '2001:db8::/32')) + self.assertFalse(validate.is_ip_member_of_cidr('::ffff:0:203.0.113.5', + '2001:db8::/32')) + + def test_check_hsts_options(self): + self.assertRaises( + exceptions.ValidationException, + validate.check_hsts_options, + {'hsts_include_subdomains': True, + 'hsts_preload': wtypes.Unset, + 'hsts_max_age': wtypes.Unset} + ) + self.assertRaises( + exceptions.ValidationException, + validate.check_hsts_options, + {'hsts_include_subdomains': wtypes.Unset, + 'hsts_preload': True, + 'hsts_max_age': wtypes.Unset} + ) + self.assertRaises( + exceptions.ValidationException, + validate.check_hsts_options, + {'protocol': constants.PROTOCOL_UDP, + 'hsts_include_subdomains': wtypes.Unset, + 'hsts_preload': wtypes.Unset, + 'hsts_max_age': 1} + ) + self.assertIsNone( + validate.check_hsts_options( + {'protocol': constants.PROTOCOL_TERMINATED_HTTPS, + 'hsts_include_subdomains': wtypes.Unset, + 'hsts_preload': wtypes.Unset, + 'hsts_max_age': 1}) + ) + + def test_check_hsts_options_put(self): + listener = mock.MagicMock() + db_listener = mock.MagicMock() + db_listener.protocol = constants.PROTOCOL_TERMINATED_HTTPS + + listener.hsts_max_age = wtypes.Unset + db_listener.hsts_max_age = None + for obj in (listener, db_listener): + obj.hsts_include_subdomains = False + obj.hsts_preload = False + self.assertIsNone(validate.check_hsts_options_put( + listener, db_listener)) + + for i in range(2): + listener.hsts_include_subdomains = bool(i % 2) + listener.hsts_preload = not bool(i % 2) + self.assertRaises( + exceptions.ValidationException, + validate.check_hsts_options_put, + listener, db_listener) + + listener.hsts_max_age, db_listener.hsts_max_age = wtypes.Unset, 0 + self.assertIsNone(validate.check_hsts_options_put( + listener, db_listener)) + + listener.hsts_max_age, db_listener.hsts_max_age = 3, None + self.assertIsNone(validate.check_hsts_options_put( + listener, db_listener)) + + db_listener.protocol = constants.PROTOCOL_HTTP + self.assertRaises( + exceptions.ValidationException, + validate.check_hsts_options_put, + listener, db_listener) diff --git a/octavia/tests/unit/common/tls_utils/__init__.py b/octavia/tests/unit/common/tls_utils/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/common/tls_utils/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/common/tls_utils/test_cert_parser.py b/octavia/tests/unit/common/tls_utils/test_cert_parser.py new file mode 100644 index 0000000000..eafc0d6162 --- /dev/null +++ b/octavia/tests/unit/common/tls_utils/test_cert_parser.py @@ -0,0 +1,256 @@ +# +# Copyright 2014 OpenStack Foundation. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import datetime +from unittest import mock + +from cryptography import x509 + +from octavia.common import data_models +import octavia.common.exceptions as exceptions +import octavia.common.tls_utils.cert_parser as cert_parser +from octavia.tests.common import sample_certs +from octavia.tests.unit import base +from octavia.tests.unit.common.sample_configs import sample_configs_combined + + +class TestTLSParseUtils(base.TestCase): + def test_alt_subject_name_parses(self): + hosts = cert_parser.get_host_names(sample_certs.ALT_EXT_CRT) + self.assertIn('www.cnfromsubject.org', hosts['cn']) + self.assertIn('www.hostFromDNSName1.com', hosts['dns_names']) + self.assertIn('www.hostFromDNSName2.com', hosts['dns_names']) + self.assertIn('www.hostFromDNSName3.com', hosts['dns_names']) + self.assertIn('www.hostFromDNSName4.com', hosts['dns_names']) + + def test_x509_parses(self): + self.assertRaises(exceptions.UnreadableCert, + cert_parser.validate_cert, "BAD CERT") + self.assertTrue(cert_parser.validate_cert(sample_certs.X509_CERT)) + self.assertTrue(cert_parser.validate_cert(sample_certs.X509_CERT, + private_key=sample_certs.X509_CERT_KEY)) + + def test_read_private_key_pkcs8(self): + self.assertRaises(exceptions.NeedsPassphrase, + cert_parser._read_private_key, + sample_certs.ENCRYPTED_PKCS8_CRT_KEY) + cert_parser._read_private_key( + sample_certs.ENCRYPTED_PKCS8_CRT_KEY, + passphrase=sample_certs.ENCRYPTED_PKCS8_CRT_KEY_PASSPHRASE) + + def test_read_private_key_pem(self): + self.assertRaises(exceptions.NeedsPassphrase, + cert_parser._read_private_key, + sample_certs.X509_CERT_KEY_ENCRYPTED) + cert_parser._read_private_key( + sample_certs.X509_CERT_KEY_ENCRYPTED, + passphrase=sample_certs.X509_CERT_KEY_PASSPHRASE) + + def test_prepare_private_key(self): + self.assertEqual( + cert_parser.prepare_private_key( + sample_certs.X509_CERT_KEY_ENCRYPTED, + passphrase=sample_certs.X509_CERT_KEY_PASSPHRASE), + sample_certs.X509_CERT_KEY) + + def test_prepare_private_key_orig_not_encrypted(self): + self.assertEqual( + cert_parser.prepare_private_key( + sample_certs.X509_CERT_KEY), + sample_certs.X509_CERT_KEY) + + def test_validate_cert_and_key_match(self): + self.assertTrue( + cert_parser.validate_cert( + sample_certs.X509_CERT, + private_key=sample_certs.X509_CERT_KEY)) + self.assertTrue( + cert_parser.validate_cert( + sample_certs.X509_CERT, + private_key=sample_certs.X509_CERT_KEY.decode('utf-8'))) + self.assertRaises(exceptions.MisMatchedKey, + cert_parser.validate_cert, + sample_certs.X509_CERT, + private_key=sample_certs.X509_CERT_KEY_2) + + def test_validate_cert_handles_intermediates(self): + self.assertTrue( + cert_parser.validate_cert( + sample_certs.X509_CERT, + private_key=sample_certs.X509_CERT_KEY, + intermediates=(sample_certs.X509_IMDS + + b"\nParser should ignore junk\n"))) + self.assertTrue( + cert_parser.validate_cert( + sample_certs.X509_CERT, + private_key=sample_certs.X509_CERT_KEY, + intermediates=sample_certs.X509_IMDS_LIST)) + + def test_split_x509s(self): + imds = [] + for x509Pem in cert_parser._split_x509s(sample_certs.TEST_X509_IMDS): + imds.append(cert_parser._get_x509_from_pem_bytes(x509Pem)) + + for i in range(0, len(imds)): + self.assertEqual(sample_certs.EXPECTED_IMD_TEST_SUBJS[i], + imds[i].subject.get_attributes_for_oid( + x509.OID_COMMON_NAME)[0].value) + + def test_get_intermediates_pem_chain(self): + self.assertEqual( + sample_certs.X509_IMDS_LIST, + list(cert_parser.get_intermediates_pems(sample_certs.X509_IMDS))) + + def test_get_intermediates_pkcs7_pem(self): + self.assertEqual( + sample_certs.X509_IMDS_LIST, + list(cert_parser.get_intermediates_pems(sample_certs.PKCS7_PEM))) + + def test_get_intermediates_pkcs7_pem_bad(self): + self.assertRaises( + exceptions.UnreadableCert, + lambda: list(cert_parser.get_intermediates_pems( + b'-----BEGIN PKCS7-----\nbad data\n-----END PKCS7-----'))) + + def test_get_intermediates_pkcs7_der(self): + self.assertEqual( + sample_certs.X509_IMDS_LIST, + list(cert_parser.get_intermediates_pems(sample_certs.PKCS7_DER))) + + def test_get_intermediates_pkcs7_der_bad(self): + self.assertRaises( + exceptions.UnreadableCert, + lambda: list(cert_parser.get_intermediates_pems( + b'\xfe\xfe\xff\xff'))) + + def test_get_x509_from_der_bytes_bad(self): + self.assertRaises( + exceptions.UnreadableCert, + cert_parser._get_x509_from_der_bytes, b'bad data') + + @mock.patch('oslo_context.context.RequestContext') + def test_load_certificates(self, mock_oslo): + listener = sample_configs_combined.sample_listener_tuple( + tls=True, sni=True, client_ca_cert=True) + client = mock.MagicMock() + context = mock.Mock() + context.project_id = '12345' + with mock.patch.object(cert_parser, + 'get_host_names') as cp: + with mock.patch.object(cert_parser, + '_map_cert_tls_container'): + cp.return_value = {'cn': 'fakeCN'} + cert_parser.load_certificates_data(client, listener, context) + + # Ensure upload_cert is called three times + calls_cert_mngr = [ + mock.call.get_cert(context, 'cont_id_1', check_only=True), + mock.call.get_cert(context, 'cont_id_2', check_only=True), + mock.call.get_cert(context, 'cont_id_3', check_only=True) + ] + client.assert_has_calls(calls_cert_mngr) + + # Test asking for nothing + listener = sample_configs_combined.sample_listener_tuple( + tls=False, sni=False, client_ca_cert=False) + client = mock.MagicMock() + with mock.patch.object(cert_parser, + '_map_cert_tls_container') as mock_map: + result = cert_parser.load_certificates_data(client, listener) + + mock_map.assert_not_called() + ref_empty_dict = {'tls_cert': None, 'sni_certs': []} + self.assertEqual(ref_empty_dict, result) + mock_oslo.assert_called() + + def test_load_certificates_get_cert_errors(self): + mock_cert_mngr = mock.MagicMock() + mock_obj = mock.MagicMock() + mock_sni_container = mock.MagicMock() + mock_sni_container.tls_container_id = 2 + + mock_cert_mngr.get_cert.side_effect = [Exception, Exception] + + # Test tls_certificate_id error + mock_obj.tls_certificate_id = 1 + + self.assertRaises(exceptions.CertificateRetrievalException, + cert_parser.load_certificates_data, + mock_cert_mngr, mock_obj) + + # Test sni_containers error + mock_obj.tls_certificate_id = None + mock_obj.sni_containers = [mock_sni_container] + + self.assertRaises(exceptions.CertificateRetrievalException, + cert_parser.load_certificates_data, + mock_cert_mngr, mock_obj) + + @mock.patch('octavia.certificates.common.cert.Cert') + def test_map_cert_tls_container(self, cert_mock): + tls = data_models.TLSContainer( + id=sample_certs.X509_CERT_SHA1, + primary_cn=sample_certs.X509_CERT_CN, + certificate=sample_certs.X509_CERT, + private_key=sample_certs.X509_CERT_KEY_ENCRYPTED, + passphrase=sample_certs.X509_CERT_KEY_PASSPHRASE, + intermediates=sample_certs.X509_IMDS_LIST) + cert_mock.get_private_key.return_value = tls.private_key + cert_mock.get_certificate.return_value = tls.certificate + cert_mock.get_intermediates.return_value = tls.intermediates + cert_mock.get_private_key_passphrase.return_value = tls.passphrase + with mock.patch.object(cert_parser, 'get_host_names') as cp: + cp.return_value = {'cn': sample_certs.X509_CERT_CN} + self.assertEqual( + tls.id, cert_parser._map_cert_tls_container( + cert_mock).id) + self.assertEqual( + tls.primary_cn, cert_parser._map_cert_tls_container( + cert_mock).primary_cn) + self.assertEqual( + tls.certificate, cert_parser._map_cert_tls_container( + cert_mock).certificate) + self.assertEqual( + sample_certs.X509_CERT_KEY, + cert_parser._map_cert_tls_container( + cert_mock).private_key) + self.assertEqual( + tls.intermediates, cert_parser._map_cert_tls_container( + cert_mock).intermediates) + + def test_build_pem(self): + expected = b'imacert\nimakey\nimainter\nimainter2\n' + tls_tuple = sample_configs_combined.sample_tls_container_tuple( + certificate=b'imacert', private_key=b'imakey', + intermediates=[b'imainter', b'imainter2']) + self.assertEqual(expected, cert_parser.build_pem(tls_tuple)) + + def test_get_primary_cn(self): + cert = sample_certs.X509_CERT + + with mock.patch.object(cert_parser, 'get_host_names') as cp: + cp.return_value = {'cn': 'fakeCN'} + cn = cert_parser.get_primary_cn(cert) + self.assertEqual('fakeCN', cn) + + def test_get_cert_expiration(self): + exp_date = cert_parser.get_cert_expiration(sample_certs.X509_EXPIRED) + self.assertEqual( + datetime.datetime(2016, 9, 25, 18, 1, 54, + tzinfo=datetime.timezone.utc), + exp_date) + + # test the exception + self.assertRaises(exceptions.UnreadableCert, + cert_parser.get_cert_expiration, 'bad-cert-file') diff --git a/octavia/tests/unit/compute/__init__.py b/octavia/tests/unit/compute/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/compute/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/compute/drivers/__init__.py b/octavia/tests/unit/compute/drivers/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/compute/drivers/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/compute/drivers/noop_driver/__init__.py b/octavia/tests/unit/compute/drivers/noop_driver/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/compute/drivers/noop_driver/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/compute/drivers/noop_driver/test_driver.py b/octavia/tests/unit/compute/drivers/noop_driver/test_driver.py new file mode 100644 index 0000000000..403280b38e --- /dev/null +++ b/octavia/tests/unit/compute/drivers/noop_driver/test_driver.py @@ -0,0 +1,161 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from oslo_utils import uuidutils + +from octavia.compute.drivers.noop_driver import driver +import octavia.tests.unit.base as base + + +class TestNoopComputeDriver(base.TestCase): + FAKE_UUID_1 = uuidutils.generate_uuid() + FAKE_UUID_2 = uuidutils.generate_uuid() + FAKE_UUID_3 = uuidutils.generate_uuid() + FAKE_UUID_4 = uuidutils.generate_uuid() + FAKE_UUID_5 = uuidutils.generate_uuid() + FAKE_UUID_6 = uuidutils.generate_uuid() + + def setUp(self): + super().setUp() + self.mock_engine = mock.MagicMock() + with mock.patch('octavia.compute.drivers.noop_driver.driver.' + 'create_engine') as mock_create_engine: + mock_create_engine.return_value = self.mock_engine + self.driver = driver.NoopComputeDriver() + mock_create_engine.assert_called_once_with( + 'sqlite:////tmp/octavia-network-noop.db', + isolation_level='SERIALIZABLE') + + self.name = "amphora_name" + self.amphora_flavor = "m1.tiny" + self.image_tag = "faketag" + self.image_owner = self.FAKE_UUID_2 + self.key_name = "key_name" + self.sec_groups = ["default"] + self.network_ids = [self.FAKE_UUID_3] + self.confdrivefiles = ["config_driver_file1"] + self.user_data = "user_data" + self.amphora_id = self.FAKE_UUID_4 + self.loadbalancer_id = self.FAKE_UUID_5 + self.server_group_policy = 'anti-affinity' + self.server_group_name = 'my_server_group' + self.server_group_id = self.FAKE_UUID_6 + self.port_ids = ['port-id-1'] + self.port_id = 88 + self.network_id = uuidutils.generate_uuid() + self.ip_address = "192.0.2.2" + self.flavor_id = uuidutils.generate_uuid() + self.availability_zone = 'my_test_az' + + def test_build(self): + self.driver.build(self.name, self.amphora_flavor, + self.image_tag, self.image_owner, + self.key_name, self.sec_groups, self.network_ids, + self.confdrivefiles, self.user_data, self.port_ids, + self.server_group_id) + + self.assertEqual((self.name, self.amphora_flavor, + self.image_tag, self.image_owner, + self.key_name, self.sec_groups, self.network_ids, + self.confdrivefiles, self.user_data, self.port_ids, + self.server_group_id, 'build'), + self.driver.driver.computeconfig[( + self.name, + self.amphora_flavor, + self.image_tag, + self.image_owner, + self.key_name, + self.user_data, + self.server_group_id + )]) + + def test_delete(self): + self.driver.delete(self.amphora_id) + self.assertEqual((self.amphora_id, 'delete'), + self.driver.driver.computeconfig[ + self.amphora_id]) + + def test_status(self): + self.driver.status(self.amphora_id) + self.assertEqual((self.amphora_id, 'status'), + self.driver.driver.computeconfig[ + self.amphora_id]) + + def test_get_amphora(self): + management_network_id = uuidutils.generate_uuid() + self.driver.get_amphora(self.amphora_id, management_network_id) + self.assertEqual( + (self.amphora_id, management_network_id, 'get_amphora'), + self.driver.driver.computeconfig[ + self.amphora_id, management_network_id]) + + def test_create_server_group(self): + self.driver.create_server_group(self.server_group_name, + self.server_group_policy) + self.assertEqual((self.server_group_name, self.server_group_policy, + 'create'), + self.driver.driver.computeconfig[ + self.server_group_name, self.server_group_policy]) + + def test_delete_server_group(self): + self.driver.delete_server_group(self.server_group_id) + self.assertEqual((self.server_group_id, 'delete'), + self.driver.driver.computeconfig[ + self.server_group_id]) + + @mock.patch('octavia.compute.drivers.noop_driver.driver.update') + def test_attach_network_or_port(self, mock_update): + update_mock = mock.MagicMock() + mock_update.return_value = update_mock + connect_mock = mock.MagicMock() + connection_mock = mock.MagicMock() + self.mock_engine.connect.return_value = connect_mock + connect_mock.__enter__.return_value = connection_mock + + self.driver.attach_network_or_port(self.amphora_id, self.network_id, + self.ip_address, self.port_id) + self.assertEqual((self.amphora_id, self.network_id, self.ip_address, + self.port_id, 'attach_network_or_port'), + self.driver.driver.computeconfig[( + self.amphora_id, self.network_id, + self.ip_address, self.port_id)]) + + self.mock_engine.connect.assert_called_once() + interfaces_table = self.driver.driver.interfaces_table + connection_mock.assert_has_calls([ + mock.call.execute( + update_mock.where( + interfaces_table.c.port_id == self.port_id).values( + compute_id=self.amphora_id)), + mock.call.commit()]) + connect_mock.__enter__.assert_called_once() + + def test_detach_port(self): + self.driver.detach_port(self.amphora_id, self.port_id) + self.assertEqual((self.amphora_id, self.port_id, + 'detach_port'), + self.driver.driver.computeconfig[( + self.amphora_id, self.port_id)]) + + def test_validate_flavor(self): + self.driver.validate_flavor(self.flavor_id) + self.assertEqual((self.flavor_id, 'validate_flavor'), + self.driver.driver.computeconfig[self.flavor_id]) + + def test_validate_availability_zone(self): + self.driver.validate_availability_zone(self.availability_zone) + self.assertEqual( + (self.availability_zone, 'validate_availability_zone'), + self.driver.driver.computeconfig[self.availability_zone]) diff --git a/octavia/tests/unit/compute/drivers/test_nova_driver.py b/octavia/tests/unit/compute/drivers/test_nova_driver.py new file mode 100644 index 0000000000..dff64d1d43 --- /dev/null +++ b/octavia/tests/unit/compute/drivers/test_nova_driver.py @@ -0,0 +1,567 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from novaclient import exceptions as nova_exceptions +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils + +from octavia.common import constants +from octavia.common import data_models as models +from octavia.common import exceptions +import octavia.compute.drivers.nova_driver as nova_common +import octavia.tests.unit.base as base + + +CONF = cfg.CONF + + +class TestNovaClient(base.TestCase): + + def setUp(self): + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + self.conf = conf + self.net_name = "lb-mgmt-net" + conf.config(group="controller_worker", + amp_boot_network_list=['1', '2']) + conf.config(group="controller_worker", + image_driver='image_noop_driver') + self.conf = conf + self.fake_image_uuid = uuidutils.generate_uuid() + + self.amphora = models.Amphora( + compute_id=uuidutils.generate_uuid(), + status='ACTIVE', + lb_network_ip='10.0.0.1', + image_id=self.fake_image_uuid, + compute_flavor=uuidutils.generate_uuid() + ) + + self.nova_response = mock.Mock() + self.nova_response.id = self.amphora.compute_id + self.nova_response.status = 'ACTIVE' + self.nova_response.fault = 'FAKE_FAULT' + setattr(self.nova_response, 'OS-EXT-AZ:availability_zone', None) + self.nova_response.image = {'id': self.amphora.image_id} + self.nova_response.flavor = {'id': self.amphora.compute_flavor} + + self.interface_list = mock.MagicMock() + self.interface_list.net_id = '1' + self.interface_list.fixed_ips = [mock.MagicMock()] + self.interface_list.fixed_ips[0] = {'ip_address': '10.0.0.1'} + + self.loadbalancer_id = uuidutils.generate_uuid() + self.server_group_policy = constants.ANTI_AFFINITY + self.server_group_id = uuidutils.generate_uuid() + + self.manager = nova_common.VirtualMachineManager() + self.manager.manager = mock.MagicMock() + self.manager.server_groups = mock.MagicMock() + self.manager._nova_client = mock.MagicMock() + self.manager.flavor_manager = mock.MagicMock() + self.manager.availability_zone_manager = mock.MagicMock() + + self.nova_response.interface_list.side_effect = [[self.interface_list]] + self.manager.manager.get.return_value = self.nova_response + self.manager.manager.create.return_value = self.nova_response + self.manager.server_groups.create.return_value = mock.Mock() + + self.nova_response.addresses = {self.net_name: [{'addr': '10.0.0.1'}]} + + self.nova_network = mock.Mock() + self.nova_network.label = self.net_name + + self.server_group_name = 'octavia-lb-' + self.loadbalancer_id + self.server_group_kwargs = {'name': self.server_group_name, + 'policies': [self.server_group_policy]} + + self.server_group_mock = mock.Mock() + self.server_group_mock.name = self.server_group_name + self.server_group_mock.policy = self.server_group_policy + self.server_group_mock.id = self.server_group_id + + self.volume_mock = mock.MagicMock() + setattr(self.volume_mock, 'volumeId', '1') + + self.port_id = uuidutils.generate_uuid() + self.compute_id = uuidutils.generate_uuid() + self.network_id = uuidutils.generate_uuid() + self.flavor_id = uuidutils.generate_uuid() + self.availability_zone = 'my_test_az' + + super().setUp() + + def test_build(self): + amphora_id = self.manager.build(amphora_flavor=1, image_tag='stout', + key_name=1, + sec_groups=1, + network_ids=[1], + port_ids=[2], + user_data='Blah', + config_drive_files='Files Blah') + + self.assertEqual(self.amphora.compute_id, amphora_id) + + self.manager.manager.create.assert_called_with( + name="amphora_name", + nics=[{'net-id': 1}, {'port-id': 2}], + image=1, + flavor=1, + key_name=1, + security_groups=1, + files='Files Blah', + userdata='Blah', + config_drive=True, + scheduler_hints=None, + availability_zone=None, + block_device_mapping={} + ) + + @mock.patch('stevedore.driver.DriverManager.driver') + def test_build_with_cinder_volume(self, mock_driver): + self.conf.config(group="controller_worker", + volume_driver='volume_cinder_driver') + self.manager.volume_driver = mock_driver + mock_driver.create_volume_from_image.return_value = 1 + amphora_id = self.manager.build(amphora_flavor=1, image_tag='pilsner', + key_name=1, + sec_groups=1, + network_ids=[1], + port_ids=[2], + user_data='Blah', + config_drive_files='Files Blah') + + self.assertEqual(self.amphora.compute_id, amphora_id) + mock_driver.create_volume_from_image.assert_called_with(1, None) + self.manager.manager.create.assert_called_with( + name="amphora_name", + nics=[{'net-id': 1}, {'port-id': 2}], + image=None, + flavor=1, + key_name=1, + security_groups=1, + files='Files Blah', + userdata='Blah', + config_drive=True, + scheduler_hints=None, + availability_zone=None, + block_device_mapping={'vda': '1:::true'} + ) + + def test_build_with_availability_zone(self): + FAKE_AZ_NAME = "my_availability_zone" + self.conf.config(group="nova", availability_zone=FAKE_AZ_NAME) + FAKE_AZ = {constants.COMPUTE_ZONE: FAKE_AZ_NAME} + + amphora_id = self.manager.build(amphora_flavor=1, image_tag='malt', + key_name=1, + sec_groups=1, + network_ids=[1], + port_ids=[2], + user_data='Blah', + config_drive_files='Files Blah', + availability_zone=FAKE_AZ) + + self.assertEqual(self.amphora.compute_id, amphora_id) + + self.manager.manager.create.assert_called_with( + name="amphora_name", + nics=[{'net-id': 1}, {'port-id': 2}], + image=1, + flavor=1, + key_name=1, + security_groups=1, + files='Files Blah', + userdata='Blah', + config_drive=True, + scheduler_hints=None, + availability_zone=FAKE_AZ_NAME, + block_device_mapping={} + ) + + @mock.patch('stevedore.driver.DriverManager.driver') + def test_build_with_availability_zone_and_volume(self, mock_driver): + FAKE_AZ_NAME = "my_availability_zone" + self.conf.config(group="controller_worker", + volume_driver='volume_cinder_driver') + self.conf.config(group="nova", availability_zone=FAKE_AZ_NAME) + self.conf.config(group="cinder", availability_zone=FAKE_AZ_NAME) + FAKE_AZ = {constants.COMPUTE_ZONE: FAKE_AZ_NAME, + constants.VOLUME_ZONE: FAKE_AZ_NAME} + + self.manager.volume_driver = mock_driver + mock_driver.create_volume_from_image.return_value = 1 + + amphora_id = self.manager.build(amphora_flavor=1, image_tag='pilsner', + key_name=1, + sec_groups=1, + network_ids=[1], + port_ids=[2], + user_data='Blah', + config_drive_files='Files Blah', + availability_zone=FAKE_AZ) + + self.assertEqual(self.amphora.compute_id, amphora_id) + + self.manager.manager.create.assert_called_with( + name="amphora_name", + nics=[{'net-id': 1}, {'port-id': 2}], + image=None, + flavor=1, + key_name=1, + security_groups=1, + files='Files Blah', + userdata='Blah', + config_drive=True, + scheduler_hints=None, + availability_zone=FAKE_AZ_NAME, + block_device_mapping={'vda': '1:::true'} + ) + mock_driver.create_volume_from_image.assert_called_with(1, FAKE_AZ) + + def test_build_with_availability_zone_config(self): + FAKE_AZ = "my_availability_zone" + self.conf.config(group="nova", availability_zone=FAKE_AZ) + + amphora_id = self.manager.build(amphora_flavor=1, image_tag='ipa', + key_name=1, + sec_groups=1, + network_ids=[1], + port_ids=[2], + user_data='Blah', + config_drive_files='Files Blah') + + self.assertEqual(self.amphora.compute_id, amphora_id) + + self.manager.manager.create.assert_called_with( + name="amphora_name", + nics=[{'net-id': 1}, {'port-id': 2}], + image=1, + flavor=1, + key_name=1, + security_groups=1, + files='Files Blah', + userdata='Blah', + config_drive=True, + scheduler_hints=None, + availability_zone=FAKE_AZ, + block_device_mapping={} + ) + + def test_build_with_random_amphora_name_length(self): + self.conf.config(group="nova", random_amphora_name_length=15) + self.addCleanup(self.conf.config, + group='nova', random_amphora_name_length=0) + + self.manager.build(name="b" * 50) + self.assertEqual( + 15, len(self.manager.manager.create.call_args[1]['name'])) + + def test_build_with_default_boot_network(self): + self.conf.config(group="controller_worker", + amp_boot_network_list='') + amphora_id = self.manager.build(amphora_flavor=1, image_tag='porter', + key_name=1, + sec_groups=1, + network_ids=None, + port_ids=[2], + user_data='Blah', + config_drive_files='Files Blah') + + self.assertEqual(self.amphora.compute_id, amphora_id) + + self.manager.manager.create.assert_called_with( + name="amphora_name", + nics=[{'port-id': 2}], + image=1, + flavor=1, + key_name=1, + security_groups=1, + files='Files Blah', + userdata='Blah', + config_drive=True, + scheduler_hints=None, + availability_zone=None, + block_device_mapping={} + ) + + def test_bad_build(self): + self.manager.manager.create.side_effect = Exception + self.assertRaises(exceptions.ComputeBuildException, self.manager.build) + + def test_build_extracts_image_id_by_tag(self): + self.manager.build(image_tag='tag') + self.assertEqual(1, self.manager.manager.create.call_args[1]['image']) + + def test_delete(self): + amphora_id = self.manager.build(amphora_flavor=1, image_tag='pale_ale', + key_name=1, sec_groups=1, + network_ids=[1]) + self.manager.delete(amphora_id) + self.manager.manager.delete.assert_called_with(server=amphora_id) + + def test_bad_delete(self): + self.manager.manager.delete.side_effect = Exception + amphora_id = self.manager.build(amphora_flavor=1, image_tag='lager', + key_name=1, sec_groups=1, + network_ids=[1]) + self.assertRaises(exceptions.ComputeDeleteException, + self.manager.delete, amphora_id) + + def test_status(self): + status = self.manager.status(self.amphora.id) + self.assertEqual(constants.UP, status) + + def test_bad_status(self): + self.manager.manager.get.side_effect = Exception + self.assertRaises(exceptions.ComputeStatusException, + self.manager.status, self.amphora.id) + + def test_get_amphora(self): + amphora, fault = self.manager.get_amphora(self.amphora.compute_id) + self.assertEqual(self.amphora, amphora) + self.assertEqual(self.nova_response.fault, fault) + self.manager.manager.get.assert_called_once_with(amphora.compute_id) + + def test_bad_get_amphora(self): + self.manager.manager.get.side_effect = Exception + self.assertRaises(exceptions.ComputeGetException, + self.manager.get_amphora, self.amphora.id) + + def test_get_amphora_retried(self): + self.manager.manager.get.side_effect = [Exception, self.nova_response] + amphora, fault = self.manager.get_amphora(self.amphora.compute_id) + self.assertEqual(self.amphora, amphora) + self.assertEqual(self.nova_response.fault, fault) + self.assertEqual(2, self.manager.manager.get.call_count) + self.manager.manager.get.assert_has_calls( + [mock.call(amphora.compute_id)] * 2) + + def test_translate_amphora(self): + amphora, fault = self.manager._translate_amphora(self.nova_response) + self.assertEqual(self.amphora, amphora) + self.assertEqual(self.nova_response.fault, fault) + self.nova_response.interface_list.assert_called_once_with() + + def test_translate_amphora_no_availability_zone(self): + delattr(self.nova_response, 'OS-EXT-AZ:availability_zone') + amphora, fault = self.manager._translate_amphora(self.nova_response) + self.assertEqual(self.amphora, amphora) + self.assertEqual(self.nova_response.fault, fault) + self.nova_response.interface_list.assert_called_once_with() + + def test_bad_translate_amphora(self): + self.nova_response.interface_list.side_effect = Exception + self.manager._nova_client.networks.get.side_effect = Exception + amphora, fault = self.manager._translate_amphora(self.nova_response) + self.assertIsNone(amphora.lb_network_ip) + self.nova_response.interface_list.assert_called_once_with() + + @mock.patch('stevedore.driver.DriverManager.driver') + def test_translate_amphora_use_cinder(self, mock_driver): + self.conf.config(group="controller_worker", + volume_driver='volume_cinder_driver') + volumes_manager = self.manager._nova_client.volumes + volumes_manager.get_server_volumes.return_value = [self.volume_mock] + self.manager.volume_driver = mock_driver + mock_driver.get_image_from_volume.return_value = self.fake_image_uuid + amphora, fault = self.manager._translate_amphora(self.nova_response) + self.assertEqual(self.amphora, amphora) + self.assertEqual(self.nova_response.fault, fault) + self.nova_response.interface_list.assert_called_once_with() + volumes_manager.get_server_volumes.assert_called_with( + self.nova_response.id) + mock_driver.get_image_from_volume.assert_called_with('1') + + def test_create_server_group(self): + self.manager.server_groups.create.return_value = self.server_group_mock + + sg = self.manager.create_server_group(self.server_group_name, + self.server_group_policy) + + self.assertEqual(sg.id, self.server_group_id) + self.assertEqual(sg.name, self.server_group_name) + self.assertEqual(sg.policy, self.server_group_policy) + self.manager.server_groups.create.assert_called_once_with( + **self.server_group_kwargs) + + def test_bad_create_server_group(self): + self.manager.server_groups.create.side_effect = Exception + self.assertRaises(exceptions.ServerGroupObjectCreateException, + self.manager.create_server_group, + self.server_group_name, self.server_group_policy) + self.manager.server_groups.create.assert_called_once_with( + **self.server_group_kwargs) + + def test_delete_server_group(self): + self.manager.delete_server_group(self.server_group_id) + self.manager.server_groups.delete.assert_called_once_with( + self.server_group_id) + + def test_bad_delete_server_group(self): + self.manager.server_groups.delete.side_effect = [ + nova_exceptions.NotFound('test_exception'), Exception] + + # NotFound should not raise an exception + + self.manager.delete_server_group(self.server_group_id) + self.manager.server_groups.delete.assert_called_once_with( + self.server_group_id) + + # Catch the exception for server group object delete exception + + self.manager.server_groups.delete.reset_mock() + self.assertRaises(exceptions.ServerGroupObjectDeleteException, + self.manager.delete_server_group, + self.server_group_id) + self.manager.server_groups.delete.assert_called_once_with( + self.server_group_id) + + def test_attach_network_or_port(self): + self.manager.attach_network_or_port(self.compute_id, + self.network_id) + self.manager.manager.interface_attach.assert_called_with( + server=self.compute_id, net_id=self.network_id, fixed_ip=None, + port_id=None) + + def test_attach_network_or_port_conflict_exception(self): + self.manager.manager.interface_attach.side_effect = ( + nova_exceptions.Conflict('test_exception')) + interface_mock = mock.MagicMock() + interface_mock.id = self.port_id + bad_interface_mock = mock.MagicMock() + bad_interface_mock.id = uuidutils.generate_uuid() + self.manager.manager.interface_list.side_effect = [ + [interface_mock], [bad_interface_mock], [], Exception('boom')] + + # No port specified + self.assertRaises(exceptions.ComputeUnknownException, + self.manager.attach_network_or_port, + self.compute_id, self.network_id) + + # Port already attached + result = self.manager.attach_network_or_port(self.compute_id, + port_id=self.port_id) + self.assertEqual(interface_mock, result) + + # Port not found + self.assertRaises(exceptions.ComputePortInUseException, + self.manager.attach_network_or_port, + self.compute_id, port_id=self.port_id) + + # No ports attached + self.assertRaises(exceptions.ComputePortInUseException, + self.manager.attach_network_or_port, + self.compute_id, port_id=self.port_id) + + # Get attached ports list exception + self.assertRaises(exceptions.ComputeUnknownException, + self.manager.attach_network_or_port, + self.compute_id, port_id=self.port_id) + + def test_attach_network_or_port_general_not_found_exception(self): + self.manager.manager.interface_attach.side_effect = [ + nova_exceptions.NotFound('test_exception')] + self.assertRaises(exceptions.NotFound, + self.manager.attach_network_or_port, + self.compute_id, self.network_id) + + def test_attach_network_or_port_instance_not_found_exception(self): + self.manager.manager.interface_attach.side_effect = [ + nova_exceptions.NotFound('Instance disappeared')] + self.assertRaises(exceptions.NotFound, + self.manager.attach_network_or_port, + self.compute_id, self.network_id) + + def test_attach_network_or_port_network_not_found_exception(self): + self.manager.manager.interface_attach.side_effect = [ + nova_exceptions.NotFound('Network disappeared')] + self.assertRaises(exceptions.NotFound, + self.manager.attach_network_or_port, + self.compute_id, self.network_id) + + def test_attach_network_or_port_port_not_found_exception(self): + self.manager.manager.interface_attach.side_effect = [ + nova_exceptions.NotFound('Port disappeared')] + self.assertRaises(exceptions.NotFound, + self.manager.attach_network_or_port, + self.compute_id, self.network_id) + + def test_attach_network_or_port_fail_claim_pci_exception(self): + self.manager.manager.interface_attach.side_effect = [ + nova_exceptions.BadRequest('Failed to claim PCI device'), + nova_exceptions.BadRequest('NotAClaimFailure')] + self.assertRaises(exceptions.ComputeNoResourcesException, + self.manager.attach_network_or_port, + self.compute_id, self.network_id) + self.assertRaises(nova_exceptions.BadRequest, + self.manager.attach_network_or_port, + self.compute_id, self.network_id) + + def test_attach_network_or_port_port_bind_fail_exception(self): + self.manager.manager.interface_attach.side_effect = [ + nova_exceptions.ClientException('PortBindingFailed'), + nova_exceptions.ClientException('NotABindFailure')] + self.assertRaises(exceptions.ComputeNoResourcesException, + self.manager.attach_network_or_port, + self.compute_id, self.network_id) + self.assertRaises(nova_exceptions.ClientException, + self.manager.attach_network_or_port, + self.compute_id, self.network_id) + + def test_attach_network_or_port_unknown_exception(self): + self.manager.manager.interface_attach.side_effect = [Exception('boom')] + self.assertRaises(exceptions.ComputeUnknownException, + self.manager.attach_network_or_port, + self.compute_id, self.network_id) + + def test_detach_network(self): + self.manager.detach_port(self.compute_id, + self.port_id) + self.manager.manager.interface_detach.assert_called_with( + server=self.compute_id, port_id=self.port_id) + + def test_detach_network_with_exception(self): + self.manager.manager.interface_detach.side_effect = [Exception] + self.manager.detach_port(self.compute_id, + self.port_id) + + def test_validate_flavor(self): + self.manager.validate_flavor(self.flavor_id) + self.manager.flavor_manager.get.assert_called_with(self.flavor_id) + + def test_validate_flavor_with_exception(self): + self.manager.flavor_manager.get.side_effect = [ + nova_exceptions.NotFound(404), exceptions.OctaviaException] + self.assertRaises(exceptions.InvalidSubresource, + self.manager.validate_flavor, + "bogus") + self.assertRaises(exceptions.OctaviaException, + self.manager.validate_flavor, + "bogus") + + def test_validate_availability_zone(self): + mock_az = mock.Mock() + mock_az.zoneName = self.availability_zone + self.manager.availability_zone_manager.list.return_value = [mock_az] + self.manager.validate_availability_zone(self.availability_zone) + self.manager.availability_zone_manager.list.assert_called_with( + detailed=False) + + def test_validate_availability_zone_with_exception(self): + self.manager.availability_zone_manager.list.return_value = [] + self.assertRaises(exceptions.InvalidSubresource, + self.manager.validate_availability_zone, + "bogus") diff --git a/octavia/tests/unit/controller/__init__.py b/octavia/tests/unit/controller/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/controller/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/controller/healthmanager/__init__.py b/octavia/tests/unit/controller/healthmanager/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/controller/healthmanager/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/controller/healthmanager/test_health_manager.py b/octavia/tests/unit/controller/healthmanager/test_health_manager.py new file mode 100644 index 0000000000..24ba1d0302 --- /dev/null +++ b/octavia/tests/unit/controller/healthmanager/test_health_manager.py @@ -0,0 +1,137 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import threading +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_db import exception as db_exc +from oslo_utils import uuidutils + +from octavia.controller.healthmanager import health_manager as healthmanager +import octavia.tests.unit.base as base + + +CONF = cfg.CONF + +AMPHORA_ID = uuidutils.generate_uuid() + + +class TestException(Exception): + + def __init__(self, value): + self.value = value + + def __str__(self): + return repr(self.value) + + +class TestHealthManager(base.TestCase): + + def setUp(self): + super().setUp() + + @mock.patch('octavia.db.api.wait_for_connection') + @mock.patch('octavia.controller.worker.v2.controller_worker.' + 'ControllerWorker.failover_amphora') + @mock.patch('octavia.db.repositories.AmphoraHealthRepository.' + 'get_stale_amphora') + @mock.patch('octavia.db.api.get_session') + def test_health_check_stale_amphora(self, session_mock, get_stale_amp_mock, + failover_mock, + db_wait_mock): + conf = oslo_fixture.Config(cfg.CONF) + conf.config(group="health_manager", heartbeat_timeout=5) + amphora_health = mock.MagicMock() + amphora_health.amphora_id = AMPHORA_ID + + get_stale_amp_mock.side_effect = [amphora_health, None] + + exit_event = threading.Event() + hm = healthmanager.HealthManager(exit_event) + + hm.health_check() + + # Test DBDeadlock and RetryRequest exceptions + session_mock.reset_mock() + get_stale_amp_mock.reset_mock() + mock_session = mock.MagicMock() + session_mock.return_value = mock_session + get_stale_amp_mock.side_effect = [ + db_exc.DBDeadlock, + db_exc.RetryRequest(Exception('retry_test')), + db_exc.DBConnectionError, + TestException('test')] + # Test that a DBDeadlock does not raise an exception + self.assertIsNone(hm.health_check()) + # Test that a RetryRequest does not raise an exception + self.assertIsNone(hm.health_check()) + # Test that a DBConnectionError does not raise an exception + self.assertIsNone(hm.health_check()) + # ... and that it waits for DB reconnection + db_wait_mock.assert_called_once() + # Other exceptions should raise + self.assertRaises(TestException, hm.health_check) + self.assertEqual(4, mock_session.rollback.call_count) + + @mock.patch('octavia.controller.worker.v2.controller_worker.' + 'ControllerWorker.failover_amphora') + @mock.patch('octavia.db.repositories.AmphoraHealthRepository.' + 'get_stale_amphora', return_value=None) + @mock.patch('octavia.db.api.get_session') + def test_health_check_nonstale_amphora(self, session_mock, + get_stale_amp_mock, + failover_mock): + get_stale_amp_mock.side_effect = [None, TestException('test')] + + exit_event = threading.Event() + hm = healthmanager.HealthManager(exit_event) + + hm.health_check() + session_mock.assert_called_once_with() + self.assertFalse(failover_mock.called) + + @mock.patch('octavia.controller.worker.v2.controller_worker.' + 'ControllerWorker.failover_amphora') + @mock.patch('octavia.db.repositories.AmphoraHealthRepository.' + 'get_stale_amphora', return_value=None) + @mock.patch('octavia.db.api.get_session') + def test_health_check_exit(self, session_mock, get_stale_amp_mock, + failover_mock): + get_stale_amp_mock.return_value = None + + exit_event = threading.Event() + hm = healthmanager.HealthManager(exit_event) + hm.health_check() + + session_mock.assert_called_once_with() + self.assertFalse(failover_mock.called) + + @mock.patch('octavia.controller.worker.v2.controller_worker.' + 'ControllerWorker.failover_amphora') + @mock.patch('octavia.db.repositories.AmphoraHealthRepository.' + 'get_stale_amphora', return_value=None) + @mock.patch('octavia.db.api.get_session') + def test_health_check_db_error(self, session_mock, get_stale_amp_mock, + failover_mock): + get_stale_amp_mock.return_value = None + + mock_session = mock.MagicMock() + session_mock.return_value = mock_session + session_mock.side_effect = TestException('DB Error') + exit_event = threading.Event() + hm = healthmanager.HealthManager(exit_event) + + self.assertRaises(TestException, hm.health_check) + self.assertEqual(0, mock_session.rollback.call_count) diff --git a/octavia/tests/unit/controller/housekeeping/__init__.py b/octavia/tests/unit/controller/housekeeping/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/controller/housekeeping/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/controller/housekeeping/test_house_keeping.py b/octavia/tests/unit/controller/housekeeping/test_house_keeping.py new file mode 100644 index 0000000000..0d4cbafb2b --- /dev/null +++ b/octavia/tests/unit/controller/housekeeping/test_house_keeping.py @@ -0,0 +1,216 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import datetime +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import timeutils +from oslo_utils import uuidutils + +from octavia.common import constants +from octavia.controller.housekeeping import house_keeping +from octavia.db import repositories as repo +import octavia.tests.unit.base as base + + +CONF = cfg.CONF +AMPHORA_ID = uuidutils.generate_uuid() + + +class TestException(Exception): + + def __init__(self, value): + self.value = value + + def __str__(self): + return repr(self.value) + + +class TestDatabaseCleanup(base.TestCase): + FAKE_IP = "10.0.0.1" + FAKE_UUID_1 = uuidutils.generate_uuid() + FAKE_UUID_2 = uuidutils.generate_uuid() + FAKE_EXP_AGE = 60 + + def setUp(self): + super().setUp() + self.dbclean = house_keeping.DatabaseCleanup() + self.amp_health_repo = mock.MagicMock() + self.amp_repo = mock.MagicMock() + self.amp = repo.AmphoraRepository() + self.lb = repo.LoadBalancerRepository() + + self.dbclean.amp_repo = self.amp_repo + self.dbclean.amp_health_repo = self.amp_health_repo + self.CONF = self.useFixture(oslo_fixture.Config(cfg.CONF)) + + @mock.patch('octavia.db.api.get_session') + def test_delete_old_amphorae_True(self, session): + """When the deleted amphorae is expired.""" + session.return_value = session + self.CONF.config(group="house_keeping", + amphora_expiry_age=self.FAKE_EXP_AGE) + expired_time = timeutils.utcnow() - datetime.timedelta( + seconds=self.FAKE_EXP_AGE + 1) + amphora = self.amp.create(session, id=self.FAKE_UUID_1, + compute_id=self.FAKE_UUID_2, + status=constants.DELETED, + lb_network_ip=self.FAKE_IP, + vrrp_ip=self.FAKE_IP, + ha_ip=self.FAKE_IP, + updated_at=expired_time) + self.amp_repo.get_all_deleted_expiring.return_value = [amphora.id] + self.amp_health_repo.check_amphora_health_expired.return_value = True + self.dbclean.delete_old_amphorae() + self.assertTrue(self.amp_repo.get_all_deleted_expiring.called) + self.assertTrue( + self.amp_health_repo.check_amphora_health_expired.called) + self.assertTrue(self.amp_repo.delete.called) + + @mock.patch('octavia.db.api.get_session') + def test_delete_old_amphorae_False(self, session): + """When the deleted amphorae is not expired.""" + session.return_value = session + self.CONF.config(group="house_keeping", + amphora_expiry_age=self.FAKE_EXP_AGE) + self.amp.create(session, id=self.FAKE_UUID_1, + compute_id=self.FAKE_UUID_2, + status=constants.DELETED, + lb_network_ip=self.FAKE_IP, + vrrp_ip=self.FAKE_IP, + ha_ip=self.FAKE_IP, + updated_at=datetime.datetime.now()) + self.amp_repo.get_all_deleted_expiring.return_value = [] + self.dbclean.delete_old_amphorae() + self.assertTrue(self.amp_repo.get_all_deleted_expiring.called) + self.assertFalse( + self.amp_health_repo.check_amphora_health_expired.called) + self.assertFalse(self.amp_repo.delete.called) + + @mock.patch('octavia.db.api.get_session') + def test_delete_old_amphorae_Zombie(self, session): + """When the deleted amphorae is expired but is a zombie! + + This is when the amphora is expired in the amphora table, but in the + amphora_health table there are newer records, meaning the amp checked + in with the healthmanager *after* it was deleted (and craves brains). + """ + session.return_value = session + self.CONF.config(group="house_keeping", + amphora_expiry_age=self.FAKE_EXP_AGE) + expired_time = timeutils.utcnow() - datetime.timedelta( + seconds=self.FAKE_EXP_AGE + 1) + amphora = self.amp.create(session, id=self.FAKE_UUID_1, + compute_id=self.FAKE_UUID_2, + status=constants.DELETED, + lb_network_ip=self.FAKE_IP, + vrrp_ip=self.FAKE_IP, + ha_ip=self.FAKE_IP, + updated_at=expired_time) + self.amp_repo.get_all_deleted_expiring.return_value = [amphora.id] + self.amp_health_repo.check_amphora_health_expired.return_value = False + self.dbclean.delete_old_amphorae() + self.assertTrue(self.amp_repo.get_all_deleted_expiring.called) + self.assertTrue( + self.amp_health_repo.check_amphora_health_expired.called) + self.assertFalse(self.amp_repo.delete.called) + + @mock.patch('octavia.db.api.get_session') + def test_delete_old_load_balancer(self, session): + """Check delete of load balancers in DELETED provisioning status.""" + self.CONF.config(group="house_keeping", + load_balancer_expiry_age=self.FAKE_EXP_AGE) + session.return_value = session + load_balancer = self.lb.create(session, id=self.FAKE_UUID_1, + provisioning_status=constants.DELETED, + operating_status=constants.OFFLINE, + enabled=True) + + for expired_status in [True, False]: + lb_repo = mock.MagicMock() + self.dbclean.lb_repo = lb_repo + if expired_status: + expiring_lbs = [load_balancer.id] + else: + expiring_lbs = [] + lb_repo.get_all_deleted_expiring.return_value = expiring_lbs + self.dbclean.cleanup_load_balancers() + self.assertTrue(lb_repo.get_all_deleted_expiring.called) + if expired_status: + self.assertTrue(lb_repo.delete.called) + else: + self.assertFalse(lb_repo.delete.called) + + +class TestCertRotation(base.TestCase): + def setUp(self): + super().setUp() + self.CONF = self.useFixture(oslo_fixture.Config(cfg.CONF)) + + @mock.patch('octavia.controller.worker.v2.controller_worker.' + 'ControllerWorker.amphora_cert_rotation') + @mock.patch('octavia.db.repositories.AmphoraRepository.' + 'get_cert_expiring_amphora') + @mock.patch('octavia.db.api.get_session') + def test_cert_rotation_expired_amphora_with_exception( + self, session, cert_exp_amp_mock, amp_cert_mock): + self.CONF.config(group="api_settings", + default_provider_driver='amphora') + + amphora = mock.MagicMock() + amphora.id = AMPHORA_ID + + session.return_value = session + cert_exp_amp_mock.side_effect = [amphora, TestException( + 'break_while')] + + cr = house_keeping.CertRotation() + self.assertRaises(TestException, cr.rotate) + amp_cert_mock.assert_called_once_with(AMPHORA_ID) + + @mock.patch('octavia.controller.worker.v2.controller_worker.' + 'ControllerWorker.amphora_cert_rotation') + @mock.patch('octavia.db.repositories.AmphoraRepository.' + 'get_cert_expiring_amphora') + @mock.patch('octavia.db.api.get_session') + def test_cert_rotation_expired_amphora_without_exception( + self, session, cert_exp_amp_mock, amp_cert_mock): + self.CONF.config(group="api_settings", + default_provider_driver='amphora') + amphora = mock.MagicMock() + amphora.id = AMPHORA_ID + + session.return_value = session + cert_exp_amp_mock.side_effect = [amphora, None] + + cr = house_keeping.CertRotation() + + self.assertIsNone(cr.rotate()) + amp_cert_mock.assert_called_once_with(AMPHORA_ID) + + @mock.patch('octavia.controller.worker.v2.controller_worker.' + 'ControllerWorker.amphora_cert_rotation') + @mock.patch('octavia.db.repositories.AmphoraRepository.' + 'get_cert_expiring_amphora') + @mock.patch('octavia.db.api.get_session') + def test_cert_rotation_non_expired_amphora( + self, session, cert_exp_amp_mock, amp_cert_mock): + self.CONF.config(group="api_settings", + default_provider_driver='amphora') + session.return_value = session + cert_exp_amp_mock.return_value = None + cr = house_keeping.CertRotation() + cr.rotate() + self.assertFalse(amp_cert_mock.called) diff --git a/octavia/tests/unit/controller/queue/__init__.py b/octavia/tests/unit/controller/queue/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/controller/queue/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/controller/queue/v2/__init__.py b/octavia/tests/unit/controller/queue/v2/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/controller/queue/v2/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/controller/queue/v2/test_consumer.py b/octavia/tests/unit/controller/queue/v2/test_consumer.py new file mode 100644 index 0000000000..68ed22d17b --- /dev/null +++ b/octavia/tests/unit/controller/queue/v2/test_consumer.py @@ -0,0 +1,62 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +import oslo_messaging as messaging + +from octavia.common import constants +from octavia.controller.queue.v2 import consumer +from octavia.controller.queue.v2 import endpoints +from octavia.tests.unit import base + + +class TestConsumer(base.TestRpc): + + def setUp(self): + super().setUp() + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(host='test-hostname') + self.conf = conf.conf + + @mock.patch.object(messaging, 'Target') + @mock.patch.object(endpoints, 'Endpoints') + @mock.patch.object(messaging, 'get_rpc_server') + def test_consumer_run(self, mock_rpc_server, mock_endpoint, mock_target): + mock_rpc_server_rv = mock.Mock() + mock_rpc_server.return_value = mock_rpc_server_rv + mock_endpoint_rv = mock.Mock() + mock_endpoint.return_value = mock_endpoint_rv + mock_target_rv = mock.Mock() + mock_target.return_value = mock_target_rv + + consumer.ConsumerService(1, self.conf).run() + + mock_target.assert_called_once_with(topic=constants.TOPIC_AMPHORA_V2, + server='test-hostname', + fanout=False) + mock_endpoint.assert_called_once_with() + + @mock.patch.object(messaging, 'get_rpc_server') + @mock.patch.object(endpoints, 'Endpoints') + def test_consumer_terminate(self, mock_endpoint, mock_rpc_server): + mock_rpc_server_rv = mock.Mock() + mock_rpc_server.return_value = mock_rpc_server_rv + + cons = consumer.ConsumerService(1, self.conf) + cons.run() + cons.terminate() + mock_rpc_server_rv.stop.assert_called_once_with() + mock_rpc_server_rv.wait.assert_called_once_with() diff --git a/octavia/tests/unit/controller/queue/v2/test_endpoints.py b/octavia/tests/unit/controller/queue/v2/test_endpoints.py new file mode 100644 index 0000000000..c871139039 --- /dev/null +++ b/octavia/tests/unit/controller/queue/v2/test_endpoints.py @@ -0,0 +1,196 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils + +from octavia.common import constants +from octavia.controller.queue.v2 import endpoints +from octavia.tests.unit import base + + +class TestEndpoints(base.TestCase): + + def setUp(self): + super().setUp() + + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(octavia_plugins='hot_plug_plugin') + + self.worker_patcher = mock.patch('octavia.controller.worker.v2.' + 'controller_worker.ControllerWorker') + self.worker_patcher.start() + + self.ep = endpoints.Endpoints() + self.context = {} + self.resource_updates = {} + self.resource_id = 1234 + self.resource = {constants.ID: self.resource_id} + self.server_group_id = 3456 + self.listener_dict = {constants.LISTENER_ID: uuidutils.generate_uuid()} + self.loadbalancer_dict = { + constants.LOADBALANCER_ID: uuidutils.generate_uuid() + } + self.flavor_id = uuidutils.generate_uuid() + self.availability_zone = uuidutils.generate_uuid() + + def test_create_load_balancer(self): + self.ep.create_load_balancer(self.context, self.loadbalancer_dict, + flavor=self.flavor_id, + availability_zone=self.availability_zone) + self.ep.worker.create_load_balancer.assert_called_once_with( + self.loadbalancer_dict, self.flavor_id, self.availability_zone) + + def test_create_load_balancer_no_flavor_or_az(self): + self.ep.create_load_balancer(self.context, self.loadbalancer_dict) + self.ep.worker.create_load_balancer.assert_called_once_with( + self.loadbalancer_dict, None, None) + + def test_update_load_balancer(self): + self.ep.update_load_balancer(self.context, self.loadbalancer_dict, + self.resource_updates) + self.ep.worker.update_load_balancer.assert_called_once_with( + self.loadbalancer_dict, self.resource_updates) + + def test_delete_load_balancer(self): + self.ep.delete_load_balancer(self.context, self.loadbalancer_dict) + self.ep.worker.delete_load_balancer.assert_called_once_with( + self.loadbalancer_dict, False) + + def test_failover_load_balancer(self): + self.ep.failover_load_balancer(self.context, self.resource_id) + self.ep.worker.failover_loadbalancer.assert_called_once_with( + self.resource_id) + + def test_failover_amphora(self): + self.ep.failover_amphora(self.context, self.resource_id) + self.ep.worker.failover_amphora.assert_called_once_with( + self.resource_id) + + def test_create_listener(self): + self.ep.create_listener(self.context, self.listener_dict) + self.ep.worker.create_listener.assert_called_once_with( + self.listener_dict) + + def test_update_listener(self): + self.ep.update_listener(self.context, self.listener_dict, + self.resource_updates) + self.ep.worker.update_listener.assert_called_once_with( + self.listener_dict, self.resource_updates) + + def test_delete_listener(self): + self.ep.delete_listener(self.context, self.listener_dict) + self.ep.worker.delete_listener.assert_called_once_with( + self.listener_dict) + + def test_create_pool(self): + self.ep.create_pool(self.context, self.resource) + self.ep.worker.create_pool.assert_called_once_with( + self.resource) + + def test_update_pool(self): + self.ep.update_pool(self.context, self.resource, + self.resource_updates) + self.ep.worker.update_pool.assert_called_once_with( + self.resource, self.resource_updates) + + def test_delete_pool(self): + self.ep.delete_pool(self.context, self.resource) + self.ep.worker.delete_pool.assert_called_once_with( + self.resource) + + def test_create_health_monitor(self): + self.ep.create_health_monitor(self.context, self.resource) + self.ep.worker.create_health_monitor.assert_called_once_with( + self.resource) + + def test_update_health_monitor(self): + self.ep.update_health_monitor(self.context, self.resource, + self.resource_updates) + self.ep.worker.update_health_monitor.assert_called_once_with( + self.resource, self.resource_updates) + + def test_delete_health_monitor(self): + self.ep.delete_health_monitor(self.context, self.resource) + self.ep.worker.delete_health_monitor.assert_called_once_with( + self.resource) + + def test_create_member(self): + self.ep.create_member(self.context, self.resource) + self.ep.worker.create_member.assert_called_once_with( + self.resource) + + def test_update_member(self): + self.ep.update_member(self.context, self.resource, + self.resource_updates) + self.ep.worker.update_member.assert_called_once_with( + self.resource, self.resource_updates) + + def test_batch_update_members(self): + self.ep.batch_update_members( + self.context, [{constants.MEMBER_ID: 9}], + [{constants.MEMBER_ID: 11}], + [self.resource_updates]) + self.ep.worker.batch_update_members.assert_called_once_with( + [{constants.MEMBER_ID: 9}], [{constants.MEMBER_ID: 11}], + [self.resource_updates]) + + def test_delete_member(self): + self.ep.delete_member(self.context, self.resource) + self.ep.worker.delete_member.assert_called_once_with( + self.resource) + + def test_create_l7policy(self): + self.ep.create_l7policy(self.context, self.resource) + self.ep.worker.create_l7policy.assert_called_once_with( + self.resource) + + def test_update_l7policy(self): + self.ep.update_l7policy(self.context, self.resource, + self.resource_updates) + self.ep.worker.update_l7policy.assert_called_once_with( + self.resource, self.resource_updates) + + def test_delete_l7policy(self): + self.ep.delete_l7policy(self.context, self.resource) + self.ep.worker.delete_l7policy.assert_called_once_with( + self.resource) + + def test_create_l7rule(self): + self.ep.create_l7rule(self.context, self.resource) + self.ep.worker.create_l7rule.assert_called_once_with( + self.resource) + + def test_update_l7rule(self): + self.ep.update_l7rule(self.context, self.resource, + self.resource_updates) + self.ep.worker.update_l7rule.assert_called_once_with( + self.resource, self.resource_updates) + + def test_delete_l7rule(self): + self.ep.delete_l7rule(self.context, self.resource) + self.ep.worker.delete_l7rule.assert_called_once_with( + self.resource) + + def test_update_amphora_agent_config(self): + self.ep.update_amphora_agent_config(self.context, self.resource) + self.ep.worker.update_amphora_agent_config.assert_called_once_with( + self.resource) + + def test_delete_amphora(self): + self.ep.delete_amphora(self.context, self.resource_id) + self.ep.worker.delete_amphora.assert_called_once_with( + self.resource_id) diff --git a/octavia/tests/unit/controller/worker/__init__.py b/octavia/tests/unit/controller/worker/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/controller/worker/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/controller/worker/test_amphora_rate_limit.py b/octavia/tests/unit/controller/worker/test_amphora_rate_limit.py new file mode 100644 index 0000000000..3d7de0ec42 --- /dev/null +++ b/octavia/tests/unit/controller/worker/test_amphora_rate_limit.py @@ -0,0 +1,129 @@ +# Copyright 2016 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils + +from octavia.controller.worker import amphora_rate_limit +import octavia.tests.unit.base as base + +AMP_ID = uuidutils.generate_uuid() +BUILD_PRIORITY = 40 +USED_BUILD_SLOTS = 0 + + +class TestAmphoraBuildRateLimit(base.TestCase): + + def setUp(self): + super().setUp() + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + + self.rate_limit = amphora_rate_limit.AmphoraBuildRateLimit() + self.amp_build_slots_repo = mock.MagicMock() + self.amp_build_req_repo = mock.MagicMock() + self.conf.config(group='haproxy_amphora', build_rate_limit=1) + + @mock.patch('octavia.db.api.session', mock.MagicMock()) + @mock.patch('octavia.controller.worker.amphora_rate_limit' + '.AmphoraBuildRateLimit.wait_for_build_slot') + @mock.patch('octavia.db.repositories.AmphoraBuildReqRepository' + '.add_to_build_queue') + def test_add_to_build_request_queue(self, + mock_add_to_build_queue, + mock_wait_for_build_slot): + self.rate_limit.add_to_build_request_queue(AMP_ID, BUILD_PRIORITY) + + mock_add_to_build_queue.assert_called_once() + mock_wait_for_build_slot.assert_called_once() + + @mock.patch('octavia.db.api.get_session', mock.MagicMock()) + @mock.patch('octavia.db.repositories.AmphoraBuildSlotsRepository' + '.get_used_build_slots_count', + return_value=USED_BUILD_SLOTS) + def test_has_build_slot(self, mock_get_used_build_slots_count): + result = self.rate_limit.has_build_slot() + + mock_get_used_build_slots_count.assert_called_once() + self.assertTrue(result) + + @mock.patch('octavia.db.api.get_session', mock.MagicMock()) + @mock.patch('octavia.db.repositories.AmphoraBuildReqRepository' + '.get_highest_priority_build_req', return_value=AMP_ID) + def test_has_highest_priority(self, mock_get_highest_priority_build_req): + result = self.rate_limit.has_highest_priority(AMP_ID) + + mock_get_highest_priority_build_req.assert_called_once() + self.assertTrue(result) + + @mock.patch('octavia.db.api.get_session', mock.MagicMock()) + @mock.patch('octavia.db.repositories.AmphoraBuildReqRepository' + '.update_req_status') + @mock.patch('octavia.db.repositories.AmphoraBuildSlotsRepository' + '.update_count') + def test_update_build_status_and_available_build_slots(self, + mock_update_count, + mock_update_status): + self.rate_limit.update_build_status_and_available_build_slots(AMP_ID) + + mock_update_count.assert_called_once() + mock_update_status.assert_called_once() + + @mock.patch('octavia.db.api.get_session', mock.MagicMock()) + @mock.patch('octavia.db.repositories.AmphoraBuildReqRepository.delete') + @mock.patch('octavia.db.repositories.AmphoraBuildSlotsRepository' + '.update_count') + def test_remove_from_build_req_queue(self, + mock_update_count, + mock_delete): + self.rate_limit.remove_from_build_req_queue(AMP_ID) + + mock_update_count.assert_called_once() + mock_delete.assert_called_once() + + @mock.patch('octavia.db.api.get_session', mock.MagicMock()) + @mock.patch('octavia.db.repositories.AmphoraBuildReqRepository' + '.delete_all') + @mock.patch('octavia.db.repositories.AmphoraBuildSlotsRepository' + '.update_count') + def test_remove_all_from_build_req_queue(self, + mock_update_count, + mock_delete_all): + self.rate_limit.remove_all_from_build_req_queue() + + mock_update_count.assert_called_once() + mock_delete_all.assert_called_once() + + @mock.patch('octavia.controller.worker.amphora_rate_limit' + '.AmphoraBuildRateLimit.has_build_slot', return_value=True) + @mock.patch('octavia.controller.worker.amphora_rate_limit' + '.AmphoraBuildRateLimit.has_highest_priority', + return_value=True) + @mock.patch('octavia.controller.worker.amphora_rate_limit' + '.AmphoraBuildRateLimit.' + 'update_build_status_and_available_build_slots') + @mock.patch('octavia.controller.worker.amphora_rate_limit' + '.AmphoraBuildRateLimit.remove_all_from_build_req_queue') + @mock.patch('time.sleep') + def test_wait_for_build_slot(self, + mock_time_sleep, + mock_remove_all, + mock_update_status_and_slots_count, + mock_has_high_priority, + mock_has_build_slot): + self.rate_limit.wait_for_build_slot(AMP_ID) + + self.assertTrue(mock_has_build_slot.called) + self.assertTrue(mock_has_high_priority.called) diff --git a/octavia/tests/unit/controller/worker/test_task_utils.py b/octavia/tests/unit/controller/worker/test_task_utils.py new file mode 100644 index 0000000000..9f14be0475 --- /dev/null +++ b/octavia/tests/unit/controller/worker/test_task_utils.py @@ -0,0 +1,359 @@ +# Copyright 2016 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from oslo_utils import uuidutils +import tenacity + +from octavia.common import constants +from octavia.controller.worker import task_utils as task_utilities +import octavia.tests.unit.base as base + + +class TestTaskUtils(base.TestCase): + + def setUp(self): + + self.task_utils = task_utilities.TaskUtils() + + self.AMPHORA_ID = uuidutils.generate_uuid() + self.HEALTH_MON_ID = uuidutils.generate_uuid() + self.L7POLICY_ID = uuidutils.generate_uuid() + self.L7RULE_ID = uuidutils.generate_uuid() + self.LISTENER_ID = uuidutils.generate_uuid() + self.LOADBALANCER_ID = uuidutils.generate_uuid() + self.MEMBER_ID = uuidutils.generate_uuid() + self.POOL_ID = uuidutils.generate_uuid() + + super().setUp() + + @mock.patch('octavia.db.api.session') + @mock.patch('octavia.db.repositories.AmphoraRepository.update') + def test_mark_amphora_status_error(self, + mock_amphora_repo_update, + mock_get_session): + + # Happy path + self.task_utils.mark_amphora_status_error(self.AMPHORA_ID) + + mock_session = mock_get_session().begin().__enter__() + + mock_amphora_repo_update.assert_called_once_with( + mock_session, + id=self.AMPHORA_ID, + status=constants.ERROR) + + # Exception path + mock_amphora_repo_update.reset_mock() + mock_get_session.side_effect = Exception('fail') + + self.task_utils.mark_amphora_status_error(self.AMPHORA_ID) + + self.assertFalse(mock_amphora_repo_update.called) + + @mock.patch('octavia.db.api.session') + @mock.patch('octavia.db.repositories.HealthMonitorRepository.update') + def test_mark_health_mon_prov_status_error(self, + mock_health_mon_repo_update, + mock_get_session): + + # Happy path + self.task_utils.mark_health_mon_prov_status_error(self.HEALTH_MON_ID) + + mock_session = mock_get_session().begin().__enter__() + + mock_health_mon_repo_update.assert_called_once_with( + mock_session, + id=self.HEALTH_MON_ID, + provisioning_status=constants.ERROR) + + # Exception path + mock_health_mon_repo_update.reset_mock() + mock_get_session.side_effect = Exception('fail') + + self.task_utils.mark_health_mon_prov_status_error(self.HEALTH_MON_ID) + + self.assertFalse(mock_health_mon_repo_update.called) + + @mock.patch('octavia.db.api.session') + @mock.patch('octavia.db.repositories.L7PolicyRepository.update') + def test_mark_l7policy_prov_status_error(self, + mock_l7policy_repo_update, + mock_get_session): + + # Happy path + self.task_utils.mark_l7policy_prov_status_error(self.L7POLICY_ID) + + mock_session = mock_get_session().begin().__enter__() + + mock_l7policy_repo_update.assert_called_once_with( + mock_session, + id=self.L7POLICY_ID, + provisioning_status=constants.ERROR) + + # Exception path + mock_l7policy_repo_update.reset_mock() + mock_get_session.side_effect = Exception('fail') + + self.task_utils.mark_l7policy_prov_status_error(self.L7POLICY_ID) + + self.assertFalse(mock_l7policy_repo_update.called) + + @mock.patch('octavia.db.api.session') + @mock.patch('octavia.db.repositories.L7RuleRepository.update') + def test_mark_l7rule_prov_status_error(self, + mock_l7rule_repo_update, + mock_get_session): + + # Happy path + self.task_utils.mark_l7rule_prov_status_error(self.L7RULE_ID) + + mock_session = mock_get_session().begin().__enter__() + + mock_l7rule_repo_update.assert_called_once_with( + mock_session, + id=self.L7RULE_ID, + provisioning_status=constants.ERROR) + + # Exception path + mock_l7rule_repo_update.reset_mock() + mock_get_session.side_effect = Exception('fail') + + self.task_utils.mark_l7rule_prov_status_error(self.L7RULE_ID) + + self.assertFalse(mock_l7rule_repo_update.called) + + @mock.patch('octavia.db.api.session') + @mock.patch('octavia.db.repositories.ListenerRepository.update') + def test_mark_listener_prov_status_active(self, + mock_listener_repo_update, + mock_get_session): + + # Happy path + self.task_utils.mark_listener_prov_status_active(self.LISTENER_ID) + + mock_session = mock_get_session().begin().__enter__() + + mock_listener_repo_update.assert_called_once_with( + mock_session, + id=self.LISTENER_ID, + provisioning_status=constants.ACTIVE) + + # Exception path + mock_listener_repo_update.reset_mock() + mock_get_session.side_effect = Exception('fail') + + self.task_utils.mark_listener_prov_status_active(self.LISTENER_ID) + + self.assertFalse(mock_listener_repo_update.called) + + @mock.patch('octavia.db.api.session') + @mock.patch('octavia.db.repositories.ListenerRepository.update') + def test_mark_listener_prov_status_error(self, + mock_listener_repo_update, + mock_get_session): + + # Happy path + self.task_utils.mark_listener_prov_status_error(self.LISTENER_ID) + + mock_session = mock_get_session().begin().__enter__() + + mock_listener_repo_update.assert_called_once_with( + mock_session, + id=self.LISTENER_ID, + provisioning_status=constants.ERROR) + + # Exception path + mock_listener_repo_update.reset_mock() + mock_get_session.side_effect = Exception('fail') + + self.task_utils.mark_listener_prov_status_error(self.LISTENER_ID) + + self.assertFalse(mock_listener_repo_update.called) + + @mock.patch('octavia.db.api.session') + @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') + @mock.patch('tenacity.nap.time') + # mock LOG so we don't fill the console with log messages from + # tenacity.retry + @mock.patch('octavia.controller.worker.task_utils.LOG') + def test_mark_loadbalancer_prov_status_active(self, + mock_LOG, + mock_time, + mock_lb_repo_update, + mock_get_session): + + # Happy path + self.task_utils.mark_loadbalancer_prov_status_active( + self.LOADBALANCER_ID) + + mock_session = mock_get_session().begin().__enter__() + + mock_lb_repo_update.assert_called_once_with( + mock_session, + id=self.LOADBALANCER_ID, + provisioning_status=constants.ACTIVE) + + # Exception path + mock_lb_repo_update.reset_mock() + mock_get_session.side_effect = Exception('fail') + + self.assertRaises( + tenacity.RetryError, + self.task_utils.mark_loadbalancer_prov_status_active, + self.LOADBALANCER_ID) + + self.assertFalse(mock_lb_repo_update.called) + + # Exceptions then happy path + mock_get_session.reset_mock(side_effect=True) + mock_lb_repo_update.reset_mock() + + mock_session = mock_get_session() + mock_session_context = mock_session.begin().__enter__() + mock_get_session.side_effect = [ + Exception('fail'), + Exception('fail'), + Exception('fail'), + mock_session] + + self.task_utils.mark_loadbalancer_prov_status_active( + self.LOADBALANCER_ID) + + mock_lb_repo_update.assert_called_once_with( + mock_session_context, + id=self.LOADBALANCER_ID, + provisioning_status=constants.ACTIVE) + + @mock.patch('octavia.db.api.session') + @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') + @mock.patch('tenacity.nap.time') + # mock LOG so we don't fill the console with log messages from + # tenacity.retry + @mock.patch('octavia.controller.worker.task_utils.LOG') + def test_mark_loadbalancer_prov_status_error(self, + mock_LOG, + mock_time, + mock_lb_repo_update, + mock_get_session): + + # Happy path + self.task_utils.mark_loadbalancer_prov_status_error( + self.LOADBALANCER_ID) + + mock_session = mock_get_session().begin().__enter__() + + mock_lb_repo_update.assert_called_once_with( + mock_session, + id=self.LOADBALANCER_ID, + provisioning_status=constants.ERROR) + + # Exception path + mock_lb_repo_update.reset_mock() + mock_get_session.side_effect = Exception('fail') + + self.assertRaises(tenacity.RetryError, + self.task_utils.mark_loadbalancer_prov_status_error, + self.LOADBALANCER_ID) + + self.assertFalse(mock_lb_repo_update.called) + + # Exceptions then happy path + mock_get_session.reset_mock(side_effect=True) + mock_lb_repo_update.reset_mock() + + mock_session = mock_get_session() + mock_session_context = mock_session.begin().__enter__() + mock_get_session.side_effect = [ + Exception('fail'), + Exception('fail'), + Exception('fail'), + mock_session] + + self.task_utils.mark_loadbalancer_prov_status_error( + self.LOADBALANCER_ID) + + mock_lb_repo_update.assert_called_once_with( + mock_session_context, + id=self.LOADBALANCER_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.api.session') + @mock.patch('octavia.db.repositories.MemberRepository.update') + def test_mark_member_prov_status_error(self, + mock_member_repo_update, + mock_get_session): + + # Happy path + self.task_utils.mark_member_prov_status_error(self.MEMBER_ID) + + mock_session = mock_get_session().begin().__enter__() + + mock_member_repo_update.assert_called_once_with( + mock_session, + id=self.MEMBER_ID, + provisioning_status=constants.ERROR) + + # Exception path + mock_member_repo_update.reset_mock() + mock_get_session.side_effect = Exception('fail') + + self.task_utils.mark_member_prov_status_error(self.MEMBER_ID) + + self.assertFalse(mock_member_repo_update.called) + + @mock.patch('octavia.db.api.session') + @mock.patch('octavia.db.repositories.PoolRepository.update') + def test_mark_pool_prov_status_error(self, + mock_pool_repo_update, + mock_get_session): + + # Happy path + self.task_utils.mark_pool_prov_status_error(self.POOL_ID) + + mock_session = mock_get_session().begin().__enter__() + + mock_pool_repo_update.assert_called_once_with( + mock_session, + id=self.POOL_ID, + provisioning_status=constants.ERROR) + + # Exception path + mock_pool_repo_update.reset_mock() + mock_get_session.side_effect = Exception('fail') + + self.task_utils.mark_pool_prov_status_error(self.POOL_ID) + + self.assertFalse(mock_pool_repo_update.called) + + @mock.patch('octavia.db.api.session') + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + def test_get_current_loadbalancer_from_db(self, mock_lb_repo_get, + mock_get_session): + # Happy path + self.task_utils.get_current_loadbalancer_from_db(self.LOADBALANCER_ID) + + mock_session = mock_get_session().begin().__enter__() + + mock_lb_repo_get.assert_called_once_with( + mock_session, + id=self.LOADBALANCER_ID) + + # Exception path + mock_lb_repo_get.reset_mock() + mock_get_session.side_effect = Exception('fail') + + self.task_utils.get_current_loadbalancer_from_db(self.POOL_ID) + + self.assertFalse(mock_lb_repo_get.called) diff --git a/octavia/tests/unit/controller/worker/v2/__init__.py b/octavia/tests/unit/controller/worker/v2/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/controller/worker/v2/flows/__init__.py b/octavia/tests/unit/controller/worker/v2/flows/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/flows/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/controller/worker/v2/flows/test_amphora_flows.py b/octavia/tests/unit/controller/worker/v2/flows/test_amphora_flows.py new file mode 100644 index 0000000000..8179e0e482 --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/flows/test_amphora_flows.py @@ -0,0 +1,398 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils +from taskflow.patterns import linear_flow as flow + +from octavia.common import constants +from octavia.common import data_models +from octavia.controller.worker.v2.flows import amphora_flows +import octavia.tests.unit.base as base + +AUTH_VERSION = '2' + + +# NOTE: We patch the get_network_driver for all the calls so we don't +# inadvertently make real calls. +@mock.patch('octavia.common.utils.get_network_driver') +class TestAmphoraFlows(base.TestCase): + + def setUp(self): + super().setUp() + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + self.conf.config( + group="controller_worker", + amphora_driver='amphora_haproxy_rest_driver') + self.conf.config(group="nova", enable_anti_affinity=False) + self.AmpFlow = amphora_flows.AmphoraFlows() + self.amp1 = data_models.Amphora(id=1) + self.amp2 = data_models.Amphora(id=2) + self.amp3 = data_models.Amphora(id=3, status=constants.DELETED) + self.amp4 = data_models.Amphora(id=uuidutils.generate_uuid()) + self.lb = data_models.LoadBalancer( + id=4, amphorae=[self.amp1, self.amp2, self.amp3]) + + def test_get_amphora_for_lb_flow(self, mock_get_net_driver): + + amp_flow = self.AmpFlow.get_amphora_for_lb_subflow( + 'SOMEPREFIX', constants.ROLE_STANDALONE) + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.FLAVOR, amp_flow.requires) + self.assertIn(constants.AVAILABILITY_ZONE, amp_flow.requires) + self.assertIn(constants.BUILD_TYPE_PRIORITY, amp_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) + + self.assertIn(constants.AMPHORA, amp_flow.provides) + self.assertIn(constants.AMPHORA_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) + self.assertIn(constants.SERVER_PEM, amp_flow.provides) + + self.assertEqual(5, len(amp_flow.provides)) + self.assertEqual(5, len(amp_flow.requires)) + + def test_get_cert_create_amphora_for_lb_flow(self, mock_get_net_driver): + + self.AmpFlow = amphora_flows.AmphoraFlows() + + amp_flow = self.AmpFlow.get_amphora_for_lb_subflow( + 'SOMEPREFIX', constants.ROLE_STANDALONE) + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.FLAVOR, amp_flow.requires) + self.assertIn(constants.AVAILABILITY_ZONE, amp_flow.requires) + self.assertIn(constants.BUILD_TYPE_PRIORITY, amp_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) + + self.assertIn(constants.AMPHORA, amp_flow.provides) + self.assertIn(constants.AMPHORA_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) + self.assertIn(constants.SERVER_PEM, amp_flow.provides) + + self.assertEqual(5, len(amp_flow.provides)) + self.assertEqual(5, len(amp_flow.requires)) + + def test_get_cert_master_create_amphora_for_lb_flow( + self, mock_get_net_driver): + + self.AmpFlow = amphora_flows.AmphoraFlows() + + amp_flow = self.AmpFlow.get_amphora_for_lb_subflow( + 'SOMEPREFIX', constants.ROLE_MASTER) + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.FLAVOR, amp_flow.requires) + self.assertIn(constants.AVAILABILITY_ZONE, amp_flow.requires) + self.assertIn(constants.BUILD_TYPE_PRIORITY, amp_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) + + self.assertIn(constants.AMPHORA, amp_flow.provides) + self.assertIn(constants.AMPHORA_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) + self.assertIn(constants.SERVER_PEM, amp_flow.provides) + + self.assertEqual(5, len(amp_flow.provides)) + self.assertEqual(5, len(amp_flow.requires)) + + def test_get_cert_master_rest_anti_affinity_create_amphora_for_lb_flow( + self, mock_get_net_driver): + + self.conf.config(group="nova", enable_anti_affinity=True) + + self.AmpFlow = amphora_flows.AmphoraFlows() + amp_flow = self.AmpFlow.get_amphora_for_lb_subflow( + 'SOMEPREFIX', constants.ROLE_MASTER) + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.FLAVOR, amp_flow.requires) + self.assertIn(constants.AVAILABILITY_ZONE, amp_flow.requires) + self.assertIn(constants.BUILD_TYPE_PRIORITY, amp_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) + self.assertIn(constants.SERVER_GROUP_ID, amp_flow.requires) + + self.assertIn(constants.AMPHORA, amp_flow.provides) + self.assertIn(constants.AMPHORA_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) + self.assertIn(constants.SERVER_PEM, amp_flow.provides) + + self.assertEqual(5, len(amp_flow.provides)) + self.assertEqual(5, len(amp_flow.requires)) + self.conf.config(group="nova", enable_anti_affinity=False) + + def test_get_cert_backup_create_amphora_for_lb_flow( + self, mock_get_net_driver): + self.AmpFlow = amphora_flows.AmphoraFlows() + + amp_flow = self.AmpFlow.get_amphora_for_lb_subflow( + 'SOMEPREFIX', constants.ROLE_BACKUP) + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.FLAVOR, amp_flow.requires) + self.assertIn(constants.AVAILABILITY_ZONE, amp_flow.requires) + self.assertIn(constants.BUILD_TYPE_PRIORITY, amp_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) + + self.assertIn(constants.AMPHORA, amp_flow.provides) + self.assertIn(constants.AMPHORA_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) + self.assertIn(constants.SERVER_PEM, amp_flow.provides) + + self.assertEqual(5, len(amp_flow.provides)) + self.assertEqual(5, len(amp_flow.requires)) + + def test_get_cert_bogus_create_amphora_for_lb_flow( + self, mock_get_net_driver): + self.AmpFlow = amphora_flows.AmphoraFlows() + + amp_flow = self.AmpFlow.get_amphora_for_lb_subflow( + 'SOMEPREFIX', 'BOGUS_ROLE') + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.FLAVOR, amp_flow.requires) + self.assertIn(constants.AVAILABILITY_ZONE, amp_flow.requires) + self.assertIn(constants.BUILD_TYPE_PRIORITY, amp_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) + + self.assertIn(constants.AMPHORA, amp_flow.provides) + self.assertIn(constants.AMPHORA_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) + self.assertIn(constants.SERVER_PEM, amp_flow.provides) + + self.assertEqual(5, len(amp_flow.provides)) + self.assertEqual(5, len(amp_flow.requires)) + + def test_get_cert_backup_rest_anti_affinity_create_amphora_for_lb_flow( + self, mock_get_net_driver): + self.conf.config(group="nova", enable_anti_affinity=True) + + self.AmpFlow = amphora_flows.AmphoraFlows() + amp_flow = self.AmpFlow.get_amphora_for_lb_subflow( + 'SOMEPREFIX', constants.ROLE_BACKUP) + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.FLAVOR, amp_flow.requires) + self.assertIn(constants.AVAILABILITY_ZONE, amp_flow.requires) + self.assertIn(constants.BUILD_TYPE_PRIORITY, amp_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) + self.assertIn(constants.SERVER_GROUP_ID, amp_flow.requires) + + self.assertIn(constants.AMPHORA_ID, amp_flow.provides) + self.assertIn(constants.SERVER_GROUP_ID, amp_flow.requires) + self.assertIn(constants.COMPUTE_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) + self.assertIn(constants.SERVER_PEM, amp_flow.provides) + + self.assertEqual(5, len(amp_flow.provides)) + self.assertEqual(5, len(amp_flow.requires)) + self.conf.config(group="nova", enable_anti_affinity=False) + + def test_get_delete_amphora_flow(self, mock_get_net_driver): + + amp_flow = self.AmpFlow.get_delete_amphora_flow(self.amp4.to_dict()) + + self.assertIsInstance(amp_flow, flow.Flow) + + # This flow injects the required data at flow compile time. + + self.assertEqual(0, len(amp_flow.provides)) + self.assertEqual(0, len(amp_flow.requires)) + + def test_get_failover_flow_act_stdby(self, mock_get_net_driver): + failed_amphora = data_models.Amphora( + id=uuidutils.generate_uuid(), role=constants.ROLE_MASTER, + load_balancer_id=uuidutils.generate_uuid()).to_dict() + + amp_flow = self.AmpFlow.get_failover_amphora_flow( + failed_amphora, 2) + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.AVAILABILITY_ZONE, amp_flow.requires) + self.assertIn(constants.BUILD_TYPE_PRIORITY, amp_flow.requires) + self.assertIn(constants.FLAVOR, amp_flow.requires) + self.assertIn(constants.LOADBALANCER, amp_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) + self.assertIn(constants.VIP, amp_flow.requires) + + self.assertIn(constants.UPDATED_PORTS, amp_flow.provides) + self.assertIn(constants.AMP_VRRP_INT, amp_flow.provides) + self.assertIn(constants.AMPHORA, amp_flow.provides) + self.assertIn(constants.AMPHORA_ID, amp_flow.provides) + self.assertIn(constants.AMPHORAE, amp_flow.provides) + self.assertIn(constants.AMPHORAE_STATUS, amp_flow.provides) + self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides) + self.assertIn(constants.BASE_PORT, amp_flow.provides) + self.assertIn(constants.COMPUTE_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) + self.assertIn(constants.DELTA, amp_flow.provides) + self.assertIn(constants.LOADBALANCER, amp_flow.provides) + self.assertIn(constants.SERVER_PEM, amp_flow.provides) + self.assertIn(constants.VIP_SG_ID, amp_flow.provides) + + self.assertEqual(8, len(amp_flow.requires)) + self.assertEqual(14, len(amp_flow.provides)) + + def test_get_failover_flow_standalone(self, mock_get_net_driver): + failed_amphora = data_models.Amphora( + id=uuidutils.generate_uuid(), role=constants.ROLE_STANDALONE, + load_balancer_id=uuidutils.generate_uuid(), + vrrp_ip='2001:3b8::32').to_dict() + + amp_flow = self.AmpFlow.get_failover_amphora_flow( + failed_amphora, 1) + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.AVAILABILITY_ZONE, amp_flow.requires) + self.assertIn(constants.BUILD_TYPE_PRIORITY, amp_flow.requires) + self.assertIn(constants.FLAVOR, amp_flow.requires) + self.assertIn(constants.LOADBALANCER, amp_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) + self.assertIn(constants.VIP, amp_flow.requires) + + self.assertIn(constants.UPDATED_PORTS, amp_flow.provides) + self.assertIn(constants.AMPHORA, amp_flow.provides) + self.assertIn(constants.AMPHORA_ID, amp_flow.provides) + self.assertIn(constants.AMPHORAE, amp_flow.provides) + self.assertIn(constants.AMPHORAE_STATUS, amp_flow.provides) + self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides) + self.assertIn(constants.BASE_PORT, amp_flow.provides) + self.assertIn(constants.COMPUTE_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) + self.assertIn(constants.DELTA, amp_flow.provides) + self.assertIn(constants.LOADBALANCER, amp_flow.provides) + self.assertIn(constants.SERVER_PEM, amp_flow.provides) + self.assertIn(constants.VIP_SG_ID, amp_flow.provides) + + self.assertEqual(8, len(amp_flow.requires)) + self.assertEqual(13, len(amp_flow.provides)) + + def test_get_failover_flow_bogus_role(self, mock_get_net_driver): + failed_amphora = data_models.Amphora(id=uuidutils.generate_uuid(), + role='bogus').to_dict() + + amp_flow = self.AmpFlow.get_failover_amphora_flow( + failed_amphora, 1) + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) + self.assertIn(constants.LOADBALANCER, amp_flow.requires) + + self.assertIn(constants.VIP_SG_ID, amp_flow.provides) + + self.assertEqual(2, len(amp_flow.requires)) + self.assertEqual(1, len(amp_flow.provides)) + + def test_cert_rotate_amphora_flow(self, mock_get_net_driver): + self.AmpFlow = amphora_flows.AmphoraFlows() + + amp_rotate_flow = self.AmpFlow.cert_rotate_amphora_flow() + self.assertIsInstance(amp_rotate_flow, flow.Flow) + + self.assertIn(constants.SERVER_PEM, amp_rotate_flow.provides) + self.assertIn(constants.AMPHORA, amp_rotate_flow.requires) + + self.assertEqual(1, len(amp_rotate_flow.provides)) + self.assertEqual(2, len(amp_rotate_flow.requires)) + + def test_get_vrrp_subflow(self, mock_get_net_driver): + vrrp_subflow = self.AmpFlow.get_vrrp_subflow('123') + + self.assertIsInstance(vrrp_subflow, flow.Flow) + + self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, vrrp_subflow.provides) + self.assertIn(constants.AMP_VRRP_INT, vrrp_subflow.provides) + self.assertIn(constants.AMPHORAE_STATUS, vrrp_subflow.provides) + + self.assertIn(constants.LOADBALANCER_ID, vrrp_subflow.requires) + self.assertIn(constants.AMPHORAE, vrrp_subflow.requires) + self.assertIn(constants.AMPHORA_ID, vrrp_subflow.requires) + + self.assertEqual(3, len(vrrp_subflow.provides)) + self.assertEqual(3, len(vrrp_subflow.requires)) + + def test_get_vrrp_subflow_dont_get_status(self, mock_get_net_driver): + vrrp_subflow = self.AmpFlow.get_vrrp_subflow('123', + get_amphorae_status=False) + + self.assertIsInstance(vrrp_subflow, flow.Flow) + + self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, vrrp_subflow.provides) + self.assertIn(constants.AMP_VRRP_INT, vrrp_subflow.provides) + + self.assertIn(constants.LOADBALANCER_ID, vrrp_subflow.requires) + self.assertIn(constants.AMPHORAE, vrrp_subflow.requires) + self.assertIn(constants.AMPHORA_ID, vrrp_subflow.requires) + self.assertIn(constants.AMPHORAE_STATUS, vrrp_subflow.requires) + + self.assertEqual(2, len(vrrp_subflow.provides)) + self.assertEqual(4, len(vrrp_subflow.requires)) + + def test_get_vrrp_subflow_dont_create_vrrp_group( + self, mock_get_net_driver): + vrrp_subflow = self.AmpFlow.get_vrrp_subflow('123', + create_vrrp_group=False) + + self.assertIsInstance(vrrp_subflow, flow.Flow) + + self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, vrrp_subflow.provides) + self.assertIn(constants.AMP_VRRP_INT, vrrp_subflow.provides) + self.assertIn(constants.AMPHORAE_STATUS, vrrp_subflow.provides) + + self.assertIn(constants.LOADBALANCER_ID, vrrp_subflow.requires) + self.assertIn(constants.AMPHORAE, vrrp_subflow.requires) + self.assertIn(constants.AMPHORA_ID, vrrp_subflow.requires) + + self.assertEqual(3, len(vrrp_subflow.provides)) + self.assertEqual(3, len(vrrp_subflow.requires)) + + def test_update_amphora_config_flow(self, mock_get_net_driver): + + amp_flow = self.AmpFlow.update_amphora_config_flow() + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.AMPHORA, amp_flow.requires) + self.assertIn(constants.FLAVOR, amp_flow.requires) + + self.assertEqual(2, len(amp_flow.requires)) + self.assertEqual(0, len(amp_flow.provides)) + + def test__retry_flow(self, mock_get_net_driver): + amp_flow = self.AmpFlow._retry_flow('test_flow') + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.AMPHORA, amp_flow.requires) + + self.assertEqual(1, len(amp_flow.requires)) + self.assertEqual(0, len(amp_flow.provides)) diff --git a/octavia/tests/unit/controller/worker/v2/flows/test_health_monitor_flows.py b/octavia/tests/unit/controller/worker/v2/flows/test_health_monitor_flows.py new file mode 100644 index 0000000000..1c736a7d48 --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/flows/test_health_monitor_flows.py @@ -0,0 +1,76 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from taskflow.patterns import linear_flow as flow + +from octavia.common import constants +from octavia.controller.worker.v2.flows import health_monitor_flows +import octavia.tests.unit.base as base + + +class TestHealthMonitorFlows(base.TestCase): + + def setUp(self): + self.HealthMonitorFlow = health_monitor_flows.HealthMonitorFlows() + + super().setUp() + + def test_get_create_health_monitor_flow(self): + + health_mon_flow = (self.HealthMonitorFlow. + get_create_health_monitor_flow()) + + self.assertIsInstance(health_mon_flow, flow.Flow) + + self.assertIn(constants.LISTENERS, health_mon_flow.requires) + self.assertIn(constants.LOADBALANCER, health_mon_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, health_mon_flow.requires) + self.assertIn(constants.POOL_ID, health_mon_flow.requires) + + self.assertEqual(5, len(health_mon_flow.requires)) + self.assertEqual(0, len(health_mon_flow.provides)) + + def test_get_delete_health_monitor_flow(self): + + health_mon_flow = (self.HealthMonitorFlow. + get_delete_health_monitor_flow()) + + self.assertIsInstance(health_mon_flow, flow.Flow) + + self.assertIn(constants.HEALTH_MON, health_mon_flow.requires) + self.assertIn(constants.LISTENERS, health_mon_flow.requires) + self.assertIn(constants.LOADBALANCER, health_mon_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, health_mon_flow.requires) + self.assertIn(constants.POOL_ID, health_mon_flow.requires) + self.assertIn(constants.PROJECT_ID, health_mon_flow.requires) + + self.assertEqual(6, len(health_mon_flow.requires)) + self.assertEqual(0, len(health_mon_flow.provides)) + + def test_get_update_health_monitor_flow(self): + + health_mon_flow = (self.HealthMonitorFlow. + get_update_health_monitor_flow()) + + self.assertIsInstance(health_mon_flow, flow.Flow) + + self.assertIn(constants.LISTENERS, health_mon_flow.requires) + self.assertIn(constants.LOADBALANCER, health_mon_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, health_mon_flow.requires) + self.assertIn(constants.HEALTH_MON, health_mon_flow.requires) + self.assertIn(constants.UPDATE_DICT, health_mon_flow.requires) + + self.assertEqual(6, len(health_mon_flow.requires)) + self.assertEqual(0, len(health_mon_flow.provides)) diff --git a/octavia/tests/unit/controller/worker/v2/flows/test_l7policy_flows.py b/octavia/tests/unit/controller/worker/v2/flows/test_l7policy_flows.py new file mode 100644 index 0000000000..4a216553a9 --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/flows/test_l7policy_flows.py @@ -0,0 +1,68 @@ +# Copyright 2016 Blue Box, an IBM Company +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from taskflow.patterns import linear_flow as flow + +from octavia.common import constants +from octavia.controller.worker.v2.flows import l7policy_flows +import octavia.tests.unit.base as base + + +class TestL7PolicyFlows(base.TestCase): + + def setUp(self): + self.L7PolicyFlow = l7policy_flows.L7PolicyFlows() + + super().setUp() + + def test_get_create_l7policy_flow(self): + + l7policy_flow = self.L7PolicyFlow.get_create_l7policy_flow() + + self.assertIsInstance(l7policy_flow, flow.Flow) + + self.assertIn(constants.LISTENERS, l7policy_flow.requires) + self.assertIn(constants.L7POLICY, l7policy_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, l7policy_flow.requires) + + self.assertEqual(3, len(l7policy_flow.requires)) + self.assertEqual(0, len(l7policy_flow.provides)) + + def test_get_delete_l7policy_flow(self): + + l7policy_flow = self.L7PolicyFlow.get_delete_l7policy_flow() + + self.assertIsInstance(l7policy_flow, flow.Flow) + + self.assertIn(constants.LISTENERS, l7policy_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, l7policy_flow.requires) + self.assertIn(constants.L7POLICY, l7policy_flow.requires) + + self.assertEqual(3, len(l7policy_flow.requires)) + self.assertEqual(0, len(l7policy_flow.provides)) + + def test_get_update_l7policy_flow(self): + + l7policy_flow = self.L7PolicyFlow.get_update_l7policy_flow() + + self.assertIsInstance(l7policy_flow, flow.Flow) + + self.assertIn(constants.L7POLICY, l7policy_flow.requires) + self.assertIn(constants.LISTENERS, l7policy_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, l7policy_flow.requires) + self.assertIn(constants.UPDATE_DICT, l7policy_flow.requires) + + self.assertEqual(4, len(l7policy_flow.requires)) + self.assertEqual(0, len(l7policy_flow.provides)) diff --git a/octavia/tests/unit/controller/worker/v2/flows/test_l7rule_flows.py b/octavia/tests/unit/controller/worker/v2/flows/test_l7rule_flows.py new file mode 100644 index 0000000000..3dc6cb709d --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/flows/test_l7rule_flows.py @@ -0,0 +1,74 @@ +# Copyright 2016 Blue Box, an IBM Company +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from taskflow.patterns import linear_flow as flow + +from octavia.common import constants +from octavia.controller.worker.v2.flows import l7rule_flows +import octavia.tests.unit.base as base + + +class TestL7RuleFlows(base.TestCase): + + def setUp(self): + self.L7RuleFlow = l7rule_flows.L7RuleFlows() + + super().setUp() + + def test_get_create_l7rule_flow(self): + + l7rule_flow = self.L7RuleFlow.get_create_l7rule_flow() + + self.assertIsInstance(l7rule_flow, flow.Flow) + + self.assertIn(constants.LISTENERS, l7rule_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, l7rule_flow.requires) + self.assertIn(constants.L7RULE, l7rule_flow.requires) + self.assertIn(constants.L7POLICY, l7rule_flow.requires) + self.assertIn(constants.L7POLICY_ID, l7rule_flow.requires) + + self.assertEqual(5, len(l7rule_flow.requires)) + self.assertEqual(0, len(l7rule_flow.provides)) + + def test_get_delete_l7rule_flow(self): + + l7rule_flow = self.L7RuleFlow.get_delete_l7rule_flow() + + self.assertIsInstance(l7rule_flow, flow.Flow) + + self.assertIn(constants.LISTENERS, l7rule_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, l7rule_flow.requires) + self.assertIn(constants.L7RULE, l7rule_flow.requires) + self.assertIn(constants.L7POLICY, l7rule_flow.requires) + self.assertIn(constants.L7POLICY_ID, l7rule_flow.requires) + + self.assertEqual(5, len(l7rule_flow.requires)) + self.assertEqual(0, len(l7rule_flow.provides)) + + def test_get_update_l7rule_flow(self): + + l7rule_flow = self.L7RuleFlow.get_update_l7rule_flow() + + self.assertIsInstance(l7rule_flow, flow.Flow) + + self.assertIn(constants.L7RULE, l7rule_flow.requires) + self.assertIn(constants.LISTENERS, l7rule_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, l7rule_flow.requires) + self.assertIn(constants.UPDATE_DICT, l7rule_flow.requires) + self.assertIn(constants.L7POLICY, l7rule_flow.requires) + self.assertIn(constants.L7POLICY_ID, l7rule_flow.requires) + + self.assertEqual(6, len(l7rule_flow.requires)) + self.assertEqual(0, len(l7rule_flow.provides)) diff --git a/octavia/tests/unit/controller/worker/v2/flows/test_listener_flows.py b/octavia/tests/unit/controller/worker/v2/flows/test_listener_flows.py new file mode 100644 index 0000000000..2f03f87a0a --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/flows/test_listener_flows.py @@ -0,0 +1,137 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +from unittest import mock + +from oslo_utils import uuidutils +from taskflow.patterns import linear_flow as flow + +from octavia.common import constants +from octavia.controller.worker.v2.flows import listener_flows +import octavia.tests.unit.base as base + + +# NOTE: We patch the get_network_driver for all the calls so we don't +# inadvertently make real calls. +@mock.patch('octavia.common.utils.get_network_driver') +class TestListenerFlows(base.TestCase): + + def setUp(self): + self.ListenerFlow = listener_flows.ListenerFlows() + + super().setUp() + + def test_get_create_listener_flow(self, mock_get_net_driver): + + flavor_dict = { + constants.SRIOV_VIP: True, + constants.LOADBALANCER_TOPOLOGY: constants.TOPOLOGY_SINGLE} + listener_flow = self.ListenerFlow.get_create_listener_flow( + flavor_dict=flavor_dict) + + self.assertIsInstance(listener_flow, flow.Flow) + + self.assertIn(constants.LISTENERS, listener_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, listener_flow.requires) + + self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, + listener_flow.provides) + self.assertIn(constants.AMPHORAE, listener_flow.provides) + self.assertIn(constants.AMPHORA_FIREWALL_RULES, listener_flow.provides) + + self.assertEqual(2, len(listener_flow.requires)) + self.assertEqual(4, len(listener_flow.provides)) + + def test_get_delete_listener_flow(self, mock_get_net_driver): + flavor_dict = { + constants.SRIOV_VIP: True, + constants.LOADBALANCER_TOPOLOGY: constants.TOPOLOGY_SINGLE} + listener_flow = self.ListenerFlow.get_delete_listener_flow( + flavor_dict=flavor_dict) + + self.assertIsInstance(listener_flow, flow.Flow) + + self.assertIn(constants.LISTENER, listener_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, listener_flow.requires) + self.assertIn(constants.PROJECT_ID, listener_flow.requires) + + self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, + listener_flow.provides) + self.assertIn(constants.AMPHORAE, listener_flow.provides) + self.assertIn(constants.AMPHORA_FIREWALL_RULES, listener_flow.provides) + + self.assertEqual(3, len(listener_flow.requires)) + self.assertEqual(4, len(listener_flow.provides)) + + def test_get_delete_listener_internal_flow(self, mock_get_net_driver): + flavor_dict = { + constants.SRIOV_VIP: True, + constants.LOADBALANCER_TOPOLOGY: constants.TOPOLOGY_SINGLE} + fake_listener = {constants.LISTENER_ID: uuidutils.generate_uuid()} + listener_flow = self.ListenerFlow.get_delete_listener_internal_flow( + fake_listener, flavor_dict=flavor_dict) + + self.assertIsInstance(listener_flow, flow.Flow) + + self.assertIn(constants.LOADBALANCER_ID, listener_flow.requires) + self.assertIn(constants.PROJECT_ID, listener_flow.requires) + + self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, + listener_flow.provides) + self.assertIn(constants.AMPHORAE, listener_flow.provides) + self.assertIn(constants.AMPHORA_FIREWALL_RULES, listener_flow.provides) + + self.assertEqual(2, len(listener_flow.requires)) + self.assertEqual(4, len(listener_flow.provides)) + + def test_get_update_listener_flow(self, mock_get_net_driver): + flavor_dict = { + constants.SRIOV_VIP: True, + constants.LOADBALANCER_TOPOLOGY: constants.TOPOLOGY_SINGLE} + + listener_flow = self.ListenerFlow.get_update_listener_flow( + flavor_dict=flavor_dict) + + self.assertIsInstance(listener_flow, flow.Flow) + + self.assertIn(constants.LISTENER, listener_flow.requires) + self.assertIn(constants.UPDATE_DICT, listener_flow.requires) + self.assertIn(constants.LISTENERS, listener_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, listener_flow.requires) + + self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, + listener_flow.provides) + self.assertIn(constants.AMPHORAE, listener_flow.provides) + self.assertIn(constants.AMPHORA_FIREWALL_RULES, listener_flow.provides) + + self.assertEqual(4, len(listener_flow.requires)) + self.assertEqual(4, len(listener_flow.provides)) + + def test_get_create_all_listeners_flow(self, mock_get_net_driver): + flavor_dict = { + constants.SRIOV_VIP: True, + constants.LOADBALANCER_TOPOLOGY: constants.TOPOLOGY_ACTIVE_STANDBY} + listeners_flow = self.ListenerFlow.get_create_all_listeners_flow( + flavor_dict=flavor_dict) + self.assertIsInstance(listeners_flow, flow.Flow) + self.assertIn(constants.LOADBALANCER, listeners_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, listeners_flow.requires) + self.assertIn(constants.LOADBALANCER, listeners_flow.provides) + self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, + listeners_flow.provides) + self.assertIn(constants.AMPHORAE, listeners_flow.provides) + self.assertIn(constants.AMPHORA_FIREWALL_RULES, + listeners_flow.provides) + self.assertEqual(2, len(listeners_flow.requires)) + self.assertEqual(6, len(listeners_flow.provides)) diff --git a/octavia/tests/unit/controller/worker/v2/flows/test_load_balancer_flows.py b/octavia/tests/unit/controller/worker/v2/flows/test_load_balancer_flows.py new file mode 100644 index 0000000000..4d09ac78f3 --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/flows/test_load_balancer_flows.py @@ -0,0 +1,571 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils +from taskflow.patterns import linear_flow as flow +from taskflow.patterns import unordered_flow + +from octavia.common import constants +from octavia.common import exceptions +from octavia.controller.worker.v2.flows import flow_utils +from octavia.controller.worker.v2.flows import load_balancer_flows +import octavia.tests.unit.base as base + + +class MockNOTIFIER(mock.MagicMock): + info = mock.MagicMock() + + +# NOTE: We patch the get_network_driver for all the calls so we don't +# inadvertently make real calls. +@mock.patch('octavia.common.utils.get_network_driver') +class TestLoadBalancerFlows(base.TestCase): + + def setUp(self): + super().setUp() + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + self.conf.config( + group="controller_worker", + amphora_driver='amphora_haproxy_rest_driver') + self.conf.config(group="nova", enable_anti_affinity=False) + self.LBFlow = load_balancer_flows.LoadBalancerFlows() + + @mock.patch('octavia.common.rpc.NOTIFIER', + new_callable=MockNOTIFIER) + def test_get_create_load_balancer_flow(self, mock_get_net_driver, + mock_notifier): + amp_flow = self.LBFlow.get_create_load_balancer_flow( + constants.TOPOLOGY_SINGLE) + self.assertIsInstance(amp_flow, flow.Flow) + self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) + self.assertIn(constants.AMPHORA, amp_flow.provides) + self.assertIn(constants.AMPHORA_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) + + @mock.patch('octavia.common.rpc.NOTIFIER', + new_callable=MockNOTIFIER) + def test_get_create_active_standby_load_balancer_flow( + self, mock_get_net_driver, mock_notifier): + amp_flow = self.LBFlow.get_create_load_balancer_flow( + constants.TOPOLOGY_ACTIVE_STANDBY) + self.assertIsInstance(amp_flow, flow.Flow) + self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) + self.assertIn(constants.AMPHORA, amp_flow.provides) + self.assertIn(constants.AMPHORA_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) + + @mock.patch('octavia.common.rpc.NOTIFIER', + new_callable=MockNOTIFIER) + def test_get_create_anti_affinity_active_standby_load_balancer_flow( + self, mock_get_net_driver, mock_notifier): + self.conf.config(group="nova", enable_anti_affinity=True) + + self._LBFlow = load_balancer_flows.LoadBalancerFlows() + amp_flow = self._LBFlow.get_create_load_balancer_flow( + constants.TOPOLOGY_ACTIVE_STANDBY) + self.assertIsInstance(amp_flow, flow.Flow) + self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) + self.assertIn(constants.SERVER_GROUP_ID, amp_flow.provides) + self.assertIn(constants.AMPHORA, amp_flow.provides) + self.assertIn(constants.AMPHORA_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) + self.conf.config(group="nova", enable_anti_affinity=False) + + @mock.patch('octavia.common.rpc.NOTIFIER', + new_callable=MockNOTIFIER) + def test_get_create_bogus_topology_load_balancer_flow( + self, mock_get_net_driver, mock_notifier): + self.assertRaises(exceptions.InvalidTopology, + self.LBFlow.get_create_load_balancer_flow, + 'BOGUS') + + @mock.patch('octavia.common.rpc.NOTIFIER', + new_callable=MockNOTIFIER) + def test_get_create_load_balancer_flow_SRIOV(self, mock_get_net_driver, + mock_notifier): + amp_flow = self.LBFlow.get_create_load_balancer_flow( + constants.TOPOLOGY_SINGLE, flavor_dict={constants.SRIOV_VIP: True}) + self.assertIsInstance(amp_flow, flow.Flow) + self.assertIn(constants.AVAILABILITY_ZONE, amp_flow.requires) + self.assertIn(constants.BUILD_TYPE_PRIORITY, amp_flow.requires) + self.assertIn(constants.FLAVOR, amp_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) + self.assertIn(constants.SERVER_GROUP_ID, amp_flow.requires) + self.assertIn(constants.UPDATE_DICT, amp_flow.requires) + self.assertIn(constants.ADDITIONAL_VIPS, amp_flow.provides) + self.assertIn(constants.AMP_DATA, amp_flow.provides) + self.assertIn(constants.AMPHORA, amp_flow.provides) + self.assertIn(constants.AMPHORA_ID, amp_flow.provides) + self.assertIn(constants.AMPHORA_NETWORK_CONFIG, amp_flow.provides) + self.assertIn(constants.COMPUTE_ID, amp_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) + self.assertIn(constants.LOADBALANCER, amp_flow.provides) + self.assertIn(constants.PORT_DATA, amp_flow.provides) + self.assertIn(constants.SERVER_PEM, amp_flow.provides) + self.assertIn(constants.SUBNET, amp_flow.provides) + self.assertIn(constants.VIP, amp_flow.provides) + + @mock.patch('octavia.common.rpc.NOTIFIER', + new_callable=MockNOTIFIER) + def test_get_delete_load_balancer_flow(self, mock_get_net_driver, + mock_notifier): + lb_mock = mock.Mock() + listener_mock = mock.Mock() + listener_mock.id = '123' + lb_mock.listeners = [listener_mock] + + lb_flow = self.LBFlow.get_delete_load_balancer_flow(lb_mock) + + self.assertIsInstance(lb_flow, flow.Flow) + + self.assertIn(constants.LOADBALANCER, lb_flow.requires) + self.assertIn(constants.SERVER_GROUP_ID, lb_flow.requires) + self.assertIn(constants.PROJECT_ID, lb_flow.requires) + + self.assertEqual(0, len(lb_flow.provides)) + self.assertEqual(3, len(lb_flow.requires)) + + @mock.patch('octavia.common.rpc.NOTIFIER', + new_callable=MockNOTIFIER) + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + @mock.patch('octavia.db.api.get_session', return_value=mock.MagicMock()) + def test_get_delete_load_balancer_flow_cascade(self, mock_session, + mock_get_lb, + mock_get_net_driver, + mock_notifier): + lb_mock = mock.Mock() + listener_mock = mock.Mock() + listener_mock.id = '123' + listener_mock.to_dict.return_value = {'id': '123'} + lb_mock.listeners = [listener_mock] + lb_mock.id = '321' + lb_mock.project_id = '876' + pool_mock = mock.Mock() + pool_mock.id = '345' + pool_mock.to_dict.return_value = {constants.ID: pool_mock.id} + pool_mock.listeners = None + pool_mock.health_monitor = None + pool_mock.members = None + lb_mock.pools = [pool_mock] + l7_mock = mock.Mock() + l7_mock.id = '678' + listener_mock.l7policies = [l7_mock] + mock_get_lb.return_value = lb_mock + lb_dict = {constants.LOADBALANCER_ID: lb_mock.id} + + listeners = flow_utils.get_listeners_on_lb(lb_mock) + pools = flow_utils.get_pools_on_lb(lb_mock) + + lb_flow = self.LBFlow.get_cascade_delete_load_balancer_flow( + lb_dict, listeners, pools) + + self.assertIsInstance(lb_flow, flow.Flow) + + self.assertIn(constants.LOADBALANCER, lb_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, lb_flow.requires) + self.assertIn(constants.PROJECT_ID, lb_flow.requires) + self.assertIn(constants.SERVER_GROUP_ID, lb_flow.requires) + + self.assertEqual(1, len(lb_flow.provides)) + self.assertEqual(4, len(lb_flow.requires)) + + @mock.patch('octavia.common.rpc.NOTIFIER', + new_callable=MockNOTIFIER) + def test_get_update_load_balancer_flow(self, mock_get_net_driver, + mock_notifier): + + lb_flow = self.LBFlow.get_update_load_balancer_flow() + + self.assertIsInstance(lb_flow, flow.Flow) + + self.assertIn(constants.LOADBALANCER, lb_flow.requires) + self.assertIn(constants.UPDATE_DICT, lb_flow.requires) + self.assertIn(constants.VIP_SG_ID, lb_flow.provides) + + self.assertEqual(1, len(lb_flow.provides)) + self.assertEqual(3, len(lb_flow.requires)) + + @mock.patch('octavia.common.rpc.NOTIFIER', + new_callable=MockNOTIFIER) + def test_get_post_lb_amp_association_flow(self, mock_get_net_driver, + mock_notifier): + amp_flow = self.LBFlow.get_post_lb_amp_association_flow( + '123', constants.TOPOLOGY_SINGLE) + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) + self.assertIn(constants.UPDATE_DICT, amp_flow.requires) + self.assertIn(constants.LOADBALANCER, amp_flow.provides) + + self.assertEqual(1, len(amp_flow.provides)) + self.assertEqual(2, len(amp_flow.requires)) + + # Test Active/Standby path + amp_flow = self.LBFlow.get_post_lb_amp_association_flow( + '123', constants.TOPOLOGY_ACTIVE_STANDBY) + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) + self.assertIn(constants.UPDATE_DICT, amp_flow.requires) + self.assertIn(constants.AMPHORA_ID, amp_flow.requires) + + self.assertIn(constants.AMPHORAE, amp_flow.provides) + self.assertIn(constants.AMPHORAE_STATUS, amp_flow.provides) + self.assertIn(constants.AMP_VRRP_INT, amp_flow.provides) + self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides) + self.assertIn(constants.LOADBALANCER, amp_flow.provides) + + self.assertEqual(3, len(amp_flow.requires), amp_flow.requires) + self.assertEqual(5, len(amp_flow.provides), amp_flow.provides) + + amp_flow = self.LBFlow.get_post_lb_amp_association_flow( + '123', constants.TOPOLOGY_ACTIVE_STANDBY) + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) + self.assertIn(constants.UPDATE_DICT, amp_flow.requires) + self.assertIn(constants.AMPHORA_ID, amp_flow.requires) + + self.assertIn(constants.AMPHORAE, amp_flow.provides) + self.assertIn(constants.AMPHORAE_STATUS, amp_flow.provides) + self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides) + self.assertIn(constants.AMP_VRRP_INT, amp_flow.provides) + self.assertIn(constants.LOADBALANCER, amp_flow.provides) + + self.assertEqual(3, len(amp_flow.requires), amp_flow.requires) + self.assertEqual(5, len(amp_flow.provides), amp_flow.provides) + + @mock.patch('octavia.common.rpc.NOTIFIER', + new_callable=MockNOTIFIER) + def test_get_create_load_balancer_flows_single_listeners( + self, mock_get_net_driver, mock_notifier): + create_flow = ( + self.LBFlow.get_create_load_balancer_flow( + constants.TOPOLOGY_SINGLE, True + ) + ) + self.assertIsInstance(create_flow, flow.Flow) + self.assertIn(constants.LOADBALANCER_ID, create_flow.requires) + self.assertIn(constants.UPDATE_DICT, create_flow.requires) + self.assertIn(constants.BUILD_TYPE_PRIORITY, create_flow.requires) + self.assertIn(constants.FLAVOR, create_flow.requires) + self.assertIn(constants.AVAILABILITY_ZONE, create_flow.requires) + self.assertIn(constants.SERVER_GROUP_ID, create_flow.requires) + + self.assertIn(constants.LISTENERS, create_flow.provides) + self.assertIn(constants.SUBNET, create_flow.provides) + self.assertIn(constants.AMPHORA, create_flow.provides) + self.assertIn(constants.AMPHORA_ID, create_flow.provides) + self.assertIn(constants.AMPHORA_NETWORK_CONFIG, create_flow.provides) + self.assertIn(constants.AMP_DATA, create_flow.provides) + self.assertIn(constants.COMPUTE_ID, create_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, create_flow.provides) + self.assertIn(constants.LOADBALANCER, create_flow.provides) + self.assertIn(constants.DELTAS, create_flow.provides) + self.assertIn(constants.UPDATED_PORTS, create_flow.provides) + self.assertIn(constants.SERVER_PEM, create_flow.provides) + self.assertIn(constants.VIP, create_flow.provides) + self.assertIn(constants.ADDITIONAL_VIPS, create_flow.provides) + self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, create_flow.provides) + + self.assertEqual(6, len(create_flow.requires)) + self.assertEqual(15, len(create_flow.provides)) + + @mock.patch('octavia.common.rpc.NOTIFIER', + new_callable=MockNOTIFIER) + def test_get_create_load_balancer_flows_active_standby_listeners( + self, mock_get_net_driver, mock_notifier): + create_flow = ( + self.LBFlow.get_create_load_balancer_flow( + constants.TOPOLOGY_ACTIVE_STANDBY, True + ) + ) + self.assertIsInstance(create_flow, flow.Flow) + self.assertIn(constants.AVAILABILITY_ZONE, create_flow.requires) + self.assertIn(constants.BUILD_TYPE_PRIORITY, create_flow.requires) + self.assertIn(constants.FLAVOR, create_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, create_flow.requires) + self.assertIn(constants.SERVER_GROUP_ID, create_flow.requires) + self.assertIn(constants.UPDATE_DICT, create_flow.requires) + + self.assertIn(constants.UPDATED_PORTS, create_flow.provides) + self.assertIn(constants.AMP_DATA, create_flow.provides) + self.assertIn(constants.AMP_VRRP_INT, create_flow.provides) + self.assertIn(constants.AMPHORA, create_flow.provides) + self.assertIn(constants.AMPHORAE, create_flow.provides) + self.assertIn(constants.AMPHORA_ID, create_flow.provides) + self.assertIn(constants.AMPHORA_NETWORK_CONFIG, create_flow.provides) + self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, create_flow.provides) + self.assertIn(constants.AMPHORAE_STATUS, create_flow.provides) + self.assertIn(constants.COMPUTE_ID, create_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, create_flow.provides) + self.assertIn(constants.DELTAS, create_flow.provides) + self.assertIn(constants.LOADBALANCER, create_flow.provides) + self.assertIn(constants.LISTENERS, create_flow.provides) + self.assertIn(constants.SERVER_PEM, create_flow.provides) + self.assertIn(constants.SUBNET, create_flow.provides) + self.assertIn(constants.VIP, create_flow.provides) + self.assertIn(constants.ADDITIONAL_VIPS, create_flow.provides) + + self.assertEqual(6, len(create_flow.requires), create_flow.requires) + self.assertEqual(18, len(create_flow.provides), + create_flow.provides) + + def _test_get_failover_LB_flow_single(self, amphorae): + lb_mock = mock.MagicMock() + lb_mock.id = uuidutils.generate_uuid() + lb_mock.topology = constants.TOPOLOGY_SINGLE + + failover_flow = self.LBFlow.get_failover_LB_flow(amphorae, lb_mock) + + self.assertIsInstance(failover_flow, flow.Flow) + + self.assertIn(constants.AVAILABILITY_ZONE, failover_flow.requires) + self.assertIn(constants.BUILD_TYPE_PRIORITY, failover_flow.requires) + self.assertIn(constants.FLAVOR, failover_flow.requires) + self.assertIn(constants.LOADBALANCER, failover_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, failover_flow.requires) + + self.assertIn(constants.UPDATED_PORTS, failover_flow.provides) + self.assertIn(constants.AMPHORA, failover_flow.provides) + self.assertIn(constants.AMPHORA_ID, failover_flow.provides) + self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, + failover_flow.provides) + self.assertIn(constants.BASE_PORT, failover_flow.provides) + self.assertIn(constants.COMPUTE_ID, failover_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, failover_flow.provides) + self.assertIn(constants.DELTA, failover_flow.provides) + self.assertIn(constants.LOADBALANCER, failover_flow.provides) + self.assertIn(constants.SERVER_PEM, failover_flow.provides) + self.assertIn(constants.VIP, failover_flow.provides) + self.assertIn(constants.ADDITIONAL_VIPS, failover_flow.provides) + self.assertIn(constants.VIP_SG_ID, failover_flow.provides) + self.assertIn(constants.AMPHORA_FIREWALL_RULES, failover_flow.provides) + self.assertIn(constants.SUBNET, failover_flow.provides) + self.assertIn(constants.NEW_AMPHORAE, failover_flow.provides) + + self.assertEqual(6, len(failover_flow.requires), + failover_flow.requires) + self.assertEqual(17, len(failover_flow.provides), + failover_flow.provides) + + @mock.patch('octavia.common.rpc.NOTIFIER', + new_callable=MockNOTIFIER) + def test_get_failover_LB_flow_no_amps_single(self, mock_get_net_driver, + mock_notifier): + self._test_get_failover_LB_flow_single([]) + + @mock.patch('octavia.common.rpc.NOTIFIER', + new_callable=MockNOTIFIER) + def test_get_failover_LB_flow_one_amp_single(self, mock_get_net_driver, + mock_notifier): + amphora_dict = {constants.ID: uuidutils.generate_uuid(), + constants.ROLE: constants.ROLE_STANDALONE, + constants.COMPUTE_ID: uuidutils.generate_uuid(), + constants.VRRP_PORT_ID: None, constants.VRRP_IP: None} + + self._test_get_failover_LB_flow_single([amphora_dict]) + + @mock.patch('octavia.common.rpc.NOTIFIER', + new_callable=MockNOTIFIER) + def test_get_failover_LB_flow_one_bogus_amp_single(self, + mock_get_net_driver, + mock_notifier): + amphora_dict = {constants.ID: uuidutils.generate_uuid(), + constants.ROLE: 'bogus', + constants.COMPUTE_ID: uuidutils.generate_uuid(), + constants.VRRP_PORT_ID: None, constants.VRRP_IP: None} + + self._test_get_failover_LB_flow_single([amphora_dict]) + + @mock.patch('octavia.common.rpc.NOTIFIER', + new_callable=MockNOTIFIER) + def test_get_failover_LB_flow_two_amp_single(self, mock_get_net_driver, + mock_notifier): + amphora_dict = {constants.ID: uuidutils.generate_uuid()} + amphora2_dict = {constants.ID: uuidutils.generate_uuid(), + constants.ROLE: constants.ROLE_STANDALONE, + constants.COMPUTE_ID: uuidutils.generate_uuid(), + constants.VRRP_PORT_ID: None, constants.VRRP_IP: None} + + self._test_get_failover_LB_flow_single([amphora_dict, amphora2_dict]) + + def _test_get_failover_LB_flow_no_amps_act_stdby(self, amphorae): + lb_mock = mock.MagicMock() + lb_mock.id = uuidutils.generate_uuid() + lb_mock.topology = constants.TOPOLOGY_ACTIVE_STANDBY + + failover_flow = self.LBFlow.get_failover_LB_flow(amphorae, lb_mock) + + self.assertIsInstance(failover_flow, flow.Flow) + + self.assertIn(constants.AVAILABILITY_ZONE, failover_flow.requires) + self.assertIn(constants.BUILD_TYPE_PRIORITY, failover_flow.requires) + self.assertIn(constants.FLAVOR, failover_flow.requires) + self.assertIn(constants.LOADBALANCER, failover_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, failover_flow.requires) + + self.assertIn(constants.UPDATED_PORTS, failover_flow.provides) + self.assertIn(constants.AMPHORA, failover_flow.provides) + self.assertIn(constants.AMPHORA_ID, failover_flow.provides) + self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, + failover_flow.provides) + self.assertIn(constants.BASE_PORT, failover_flow.provides) + self.assertIn(constants.COMPUTE_ID, failover_flow.provides) + self.assertIn(constants.COMPUTE_OBJ, failover_flow.provides) + self.assertIn(constants.DELTA, failover_flow.provides) + self.assertIn(constants.LOADBALANCER, failover_flow.provides) + self.assertIn(constants.SERVER_PEM, failover_flow.provides) + self.assertIn(constants.VIP, failover_flow.provides) + self.assertIn(constants.ADDITIONAL_VIPS, failover_flow.provides) + self.assertIn(constants.VIP_SG_ID, failover_flow.provides) + self.assertIn(constants.SUBNET, failover_flow.provides) + self.assertIn(constants.AMPHORA_FIREWALL_RULES, failover_flow.provides) + self.assertIn(constants.SUBNET, failover_flow.provides) + self.assertIn(constants.NEW_AMPHORAE, failover_flow.provides) + + self.assertEqual(6, len(failover_flow.requires), + failover_flow.requires) + self.assertEqual(17, len(failover_flow.provides), + failover_flow.provides) + + @mock.patch('octavia.common.rpc.NOTIFIER', + new_callable=MockNOTIFIER) + def test_get_failover_LB_flow_no_amps_act_stdby(self, mock_get_net_driver, + mock_notifier): + self._test_get_failover_LB_flow_no_amps_act_stdby([]) + + @mock.patch('octavia.common.rpc.NOTIFIER', + new_callable=MockNOTIFIER) + def test_get_failover_LB_flow_one_amps_act_stdby(self, amphorae, + mock_notifier): + amphora_dict = {constants.ID: uuidutils.generate_uuid(), + constants.ROLE: constants.ROLE_MASTER, + constants.COMPUTE_ID: uuidutils.generate_uuid(), + constants.VRRP_PORT_ID: None, constants.VRRP_IP: None} + + self._test_get_failover_LB_flow_no_amps_act_stdby([amphora_dict]) + + @mock.patch('octavia.common.rpc.NOTIFIER', + new_callable=MockNOTIFIER) + def test_get_failover_LB_flow_two_amps_act_stdby(self, + mock_get_net_driver, + mock_notifier): + amphora_dict = {constants.ID: uuidutils.generate_uuid(), + constants.ROLE: constants.ROLE_MASTER, + constants.COMPUTE_ID: uuidutils.generate_uuid(), + constants.VRRP_PORT_ID: uuidutils.generate_uuid(), + constants.VRRP_IP: '192.0.2.46'} + amphora2_dict = {constants.ID: uuidutils.generate_uuid(), + constants.ROLE: constants.ROLE_BACKUP, + constants.COMPUTE_ID: uuidutils.generate_uuid(), + constants.VRRP_PORT_ID: uuidutils.generate_uuid(), + constants.VRRP_IP: '2001:db8::46'} + + self._test_get_failover_LB_flow_no_amps_act_stdby([amphora_dict, + amphora2_dict]) + + @mock.patch('octavia.common.rpc.NOTIFIER', + new_callable=MockNOTIFIER) + def test_get_failover_LB_flow_three_amps_act_stdby(self, + mock_get_net_driver, + mock_notifier): + amphora_dict = {constants.ID: uuidutils.generate_uuid(), + constants.ROLE: constants.ROLE_MASTER, + constants.COMPUTE_ID: uuidutils.generate_uuid(), + constants.VRRP_PORT_ID: uuidutils.generate_uuid(), + constants.VRRP_IP: '192.0.2.46'} + amphora2_dict = {constants.ID: uuidutils.generate_uuid(), + constants.ROLE: constants.ROLE_BACKUP, + constants.COMPUTE_ID: uuidutils.generate_uuid(), + constants.VRRP_PORT_ID: uuidutils.generate_uuid(), + constants.VRRP_IP: '2001:db8::46'} + amphora3_dict = {constants.ID: uuidutils.generate_uuid(), + constants.ROLE: 'bogus', + constants.COMPUTE_ID: uuidutils.generate_uuid(), + constants.VRRP_PORT_ID: None, constants.VRRP_IP: None} + + self._test_get_failover_LB_flow_no_amps_act_stdby( + [amphora_dict, amphora2_dict, amphora3_dict]) + + @mock.patch('octavia.common.rpc.NOTIFIER', + new_callable=MockNOTIFIER) + def test_get_failover_LB_flow_two_amps_bogus_act_stdby( + self, mock_get_net_driver, mock_notifier): + amphora_dict = {constants.ID: uuidutils.generate_uuid(), + constants.ROLE: 'bogus', + constants.COMPUTE_ID: uuidutils.generate_uuid(), + constants.VRRP_PORT_ID: uuidutils.generate_uuid(), + constants.VRRP_IP: '192.0.2.46'} + amphora2_dict = {constants.ID: uuidutils.generate_uuid(), + constants.ROLE: constants.ROLE_MASTER, + constants.COMPUTE_ID: uuidutils.generate_uuid(), + constants.VRRP_PORT_ID: uuidutils.generate_uuid(), + constants.VRRP_IP: '2001:db8::46'} + + self._test_get_failover_LB_flow_no_amps_act_stdby([amphora_dict, + amphora2_dict]) + + @mock.patch('octavia.common.rpc.NOTIFIER', + new_callable=MockNOTIFIER) + def test_get_failover_LB_flow_two_amps_standalone_act_stdby( + self, mock_get_net_driver, mock_notifier): + amphora_dict = {constants.ID: uuidutils.generate_uuid(), + constants.ROLE: constants.ROLE_STANDALONE, + constants.COMPUTE_ID: uuidutils.generate_uuid(), + constants.VRRP_PORT_ID: uuidutils.generate_uuid(), + constants.VRRP_IP: '192.0.2.46'} + + amphora2_dict = {constants.ID: uuidutils.generate_uuid(), + constants.ROLE: constants.ROLE_MASTER, + constants.COMPUTE_ID: uuidutils.generate_uuid(), + constants.VRRP_PORT_ID: uuidutils.generate_uuid(), + constants.VRRP_IP: '2001:db8::46'} + + self._test_get_failover_LB_flow_no_amps_act_stdby([amphora_dict, + amphora2_dict]) + + @mock.patch('octavia.db.repositories.AmphoraMemberPortRepository.' + 'get_port_ids') + @mock.patch('octavia.db.repositories.AmphoraRepository.' + 'get_amphorae_ids_on_lb') + @mock.patch('octavia.db.api.get_session', return_value=mock.MagicMock()) + def test_get_delete_member_ports_subflow(self, + mock_session, + mock_amps_on_lb, + mock_get_port_ids, + mock_get_net_driver): + lb_id = uuidutils.generate_uuid() + amps = ['fake_amp1', 'fake_amp2'] + port1 = uuidutils.generate_uuid() + port2 = uuidutils.generate_uuid() + ports = [port1, port2] + + mock_amps_on_lb.return_value = amps + mock_get_port_ids.return_value = ports + + delete_flow = self.LBFlow.get_delete_member_ports_subflow(lb_id) + + self.assertIsInstance(delete_flow, unordered_flow.Flow) + self.assertEqual(8, len(delete_flow)) diff --git a/octavia/tests/unit/controller/worker/v2/flows/test_member_flows.py b/octavia/tests/unit/controller/worker/v2/flows/test_member_flows.py new file mode 100644 index 0000000000..1cab682504 --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/flows/test_member_flows.py @@ -0,0 +1,109 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +from unittest import mock + +from taskflow.patterns import linear_flow as flow + +from octavia.common import constants +from octavia.controller.worker.v2.flows import member_flows +import octavia.tests.unit.base as base + + +# NOTE: We patch the get_network_driver for all the calls so we don't +# inadvertently make real calls. +@mock.patch('octavia.common.utils.get_network_driver') +class TestMemberFlows(base.TestCase): + + def setUp(self): + self.MemberFlow = member_flows.MemberFlows() + + super().setUp() + + def test_get_create_member_flow(self, mock_get_net_driver): + + member_flow = self.MemberFlow.get_create_member_flow() + + self.assertIsInstance(member_flow, flow.Flow) + + self.assertIn(constants.MEMBER, member_flow.requires) + self.assertIn(constants.LISTENERS, member_flow.requires) + self.assertIn(constants.LOADBALANCER, member_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, member_flow.requires) + self.assertIn(constants.POOL_ID, member_flow.requires) + self.assertIn(constants.AVAILABILITY_ZONE, member_flow.requires) + + self.assertIn(constants.DELTAS, member_flow.provides) + self.assertIn(constants.UPDATED_PORTS, member_flow.provides) + self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, member_flow.provides) + + self.assertEqual(6, len(member_flow.requires)) + self.assertEqual(3, len(member_flow.provides)) + + def test_get_delete_member_flow(self, mock_get_net_driver): + + member_flow = self.MemberFlow.get_delete_member_flow() + + self.assertIsInstance(member_flow, flow.Flow) + + self.assertIn(constants.MEMBER, member_flow.requires) + self.assertIn(constants.LOADBALANCER, member_flow.requires) + self.assertIn(constants.LISTENERS, member_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, member_flow.requires) + self.assertIn(constants.POOL_ID, member_flow.requires) + self.assertIn(constants.PROJECT_ID, member_flow.requires) + self.assertIn(constants.AVAILABILITY_ZONE, member_flow.requires) + + self.assertIn(constants.DELTAS, member_flow.provides) + self.assertIn(constants.UPDATED_PORTS, member_flow.provides) + self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, member_flow.provides) + + self.assertEqual(7, len(member_flow.requires)) + self.assertEqual(3, len(member_flow.provides)) + + def test_get_update_member_flow(self, mock_get_net_driver): + + member_flow = self.MemberFlow.get_update_member_flow() + + self.assertIsInstance(member_flow, flow.Flow) + + self.assertIn(constants.MEMBER, member_flow.requires) + self.assertIn(constants.LISTENERS, member_flow.requires) + self.assertIn(constants.LOADBALANCER, member_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, member_flow.requires) + self.assertIn(constants.POOL_ID, member_flow.requires) + self.assertIn(constants.UPDATE_DICT, member_flow.requires) + + self.assertEqual(6, len(member_flow.requires)) + self.assertEqual(0, len(member_flow.provides)) + + def test_get_batch_update_members_flow(self, mock_get_net_driver): + + member_flow = self.MemberFlow.get_batch_update_members_flow( + [], [], []) + + self.assertIsInstance(member_flow, flow.Flow) + + self.assertIn(constants.LISTENERS, member_flow.requires) + self.assertIn(constants.LOADBALANCER, member_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, member_flow.requires) + self.assertIn(constants.POOL_ID, member_flow.requires) + self.assertIn(constants.AVAILABILITY_ZONE, member_flow.requires) + + self.assertIn(constants.DELTAS, member_flow.provides) + self.assertIn(constants.UPDATED_PORTS, member_flow.provides) + self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, member_flow.provides) + + self.assertEqual(5, len(member_flow.requires)) + self.assertEqual(3, len(member_flow.provides)) diff --git a/octavia/tests/unit/controller/worker/v2/flows/test_pool_flows.py b/octavia/tests/unit/controller/worker/v2/flows/test_pool_flows.py new file mode 100644 index 0000000000..80d39d0186 --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/flows/test_pool_flows.py @@ -0,0 +1,81 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from taskflow.patterns import linear_flow as flow + +from octavia.common import constants +from octavia.controller.worker.v2.flows import pool_flows +import octavia.tests.unit.base as base + + +class TestPoolFlows(base.TestCase): + + def setUp(self): + self.PoolFlow = pool_flows.PoolFlows() + + super().setUp() + + def test_get_create_pool_flow(self): + + pool_flow = self.PoolFlow.get_create_pool_flow() + + self.assertIsInstance(pool_flow, flow.Flow) + + self.assertIn(constants.LISTENERS, pool_flow.requires) + self.assertIn(constants.LOADBALANCER, pool_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, pool_flow.requires) + + self.assertEqual(4, len(pool_flow.requires)) + self.assertEqual(0, len(pool_flow.provides)) + + def test_get_delete_pool_flow(self): + + pool_flow = self.PoolFlow.get_delete_pool_flow() + + self.assertIsInstance(pool_flow, flow.Flow) + + self.assertIn(constants.LISTENERS, pool_flow.requires) + self.assertIn(constants.LOADBALANCER, pool_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, pool_flow.requires) + self.assertIn(constants.POOL_ID, pool_flow.requires) + self.assertIn(constants.PROJECT_ID, pool_flow.requires) + + self.assertEqual(5, len(pool_flow.requires)) + self.assertEqual(1, len(pool_flow.provides)) + + def test_get_delete_pool_flow_internal(self): + + pool_flow = self.PoolFlow.get_delete_pool_flow_internal('test') + + self.assertIsInstance(pool_flow, flow.Flow) + self.assertIn(constants.PROJECT_ID, pool_flow.requires) + + self.assertEqual(1, len(pool_flow.requires)) + self.assertEqual(1, len(pool_flow.provides)) + + def test_get_update_pool_flow(self): + + pool_flow = self.PoolFlow.get_update_pool_flow() + + self.assertIsInstance(pool_flow, flow.Flow) + + self.assertIn(constants.POOL_ID, pool_flow.requires) + self.assertIn(constants.LISTENERS, pool_flow.requires) + self.assertIn(constants.LOADBALANCER, pool_flow.requires) + self.assertIn(constants.LOADBALANCER_ID, pool_flow.requires) + self.assertIn(constants.UPDATE_DICT, pool_flow.requires) + + self.assertEqual(5, len(pool_flow.requires)) + self.assertEqual(0, len(pool_flow.provides)) diff --git a/octavia/tests/unit/controller/worker/v2/tasks/__init__.py b/octavia/tests/unit/controller/worker/v2/tasks/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/tasks/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py b/octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py new file mode 100644 index 0000000000..cc2ebb389b --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py @@ -0,0 +1,1277 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils +from taskflow.types import failure + +from octavia.amphorae.driver_exceptions import exceptions as driver_except +from octavia.common import constants +from octavia.common import data_models +from octavia.common import utils +from octavia.controller.worker.v2.tasks import amphora_driver_tasks +from octavia.db import repositories as repo +from octavia.network import data_models as network_data_models +import octavia.tests.unit.base as base + + +AMP_ID = uuidutils.generate_uuid() +COMPUTE_ID = uuidutils.generate_uuid() +LISTENER_ID = uuidutils.generate_uuid() +LB_ID = uuidutils.generate_uuid() +CONN_MAX_RETRIES = 10 +CONN_RETRY_INTERVAL = 6 +FAKE_CONFIG_FILE = 'fake config file' + +_db_amphora_mock = mock.MagicMock() +_db_amphora_mock.id = AMP_ID +_db_amphora_mock.status = constants.AMPHORA_ALLOCATED +_db_amphora_mock.vrrp_ip = '198.51.100.65' +_amphora_mock = { + constants.ID: AMP_ID, + constants.STATUS: constants.AMPHORA_ALLOCATED, + constants.COMPUTE_ID: COMPUTE_ID, +} +_db_load_balancer_mock = mock.MagicMock() +_db_load_balancer_mock.id = LB_ID +_listener_mock = mock.MagicMock() +_listener_mock.id = LISTENER_ID +_db_load_balancer_mock.listeners = [_listener_mock] +_vip_mock = mock.MagicMock() +_db_load_balancer_mock.vip = _vip_mock +_LB_mock = { + constants.LOADBALANCER_ID: LB_ID, +} +_amphorae_mock = [_db_amphora_mock] +_amphora_network_config_mock = mock.MagicMock() +_amphorae_network_config_mock = { + _amphora_mock[constants.ID]: _amphora_network_config_mock} +_network_mock = mock.MagicMock() +_session_mock = mock.MagicMock() + + +@mock.patch('octavia.db.repositories.AmphoraRepository.update') +@mock.patch('octavia.db.repositories.AmphoraRepository.get') +@mock.patch('octavia.db.repositories.ListenerRepository.update') +@mock.patch('octavia.db.repositories.ListenerRepository.get', + return_value=_listener_mock) +@mock.patch('octavia.db.api.get_session', return_value=_session_mock) +@mock.patch('octavia.controller.worker.v2.tasks.amphora_driver_tasks.LOG') +@mock.patch('oslo_utils.uuidutils.generate_uuid', return_value=AMP_ID) +@mock.patch('stevedore.driver.DriverManager.driver') +class TestAmphoraDriverTasks(base.TestCase): + + def setUp(self): + + conf = oslo_fixture.Config(cfg.CONF) + conf.config(group="haproxy_amphora", + active_connection_max_retries=CONN_MAX_RETRIES) + conf.config(group="haproxy_amphora", + active_connection_retry_interval=CONN_RETRY_INTERVAL) + conf.config(group="controller_worker", + loadbalancer_topology=constants.TOPOLOGY_SINGLE) + self.timeout_dict = {constants.REQ_CONN_TIMEOUT: 1, + constants.REQ_READ_TIMEOUT: 2, + constants.CONN_MAX_RETRIES: 3, + constants.CONN_RETRY_INTERVAL: 4} + super().setUp() + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + def test_amp_listeners_update(self, + mock_lb_get, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_get, + mock_amphora_repo_update): + + mock_amphora_repo_get.return_value = _db_amphora_mock + mock_lb_get.return_value = _db_load_balancer_mock + amp_list_update_obj = amphora_driver_tasks.AmpListenersUpdate() + amp_list_update_obj.execute(_LB_mock, _amphora_mock, self.timeout_dict) + + mock_driver.update_amphora_listeners.assert_called_once_with( + _db_load_balancer_mock, _db_amphora_mock, self.timeout_dict) + + mock_driver.update_amphora_listeners.side_effect = Exception('boom') + + amp_list_update_obj.execute(_LB_mock, _amphora_mock, self.timeout_dict) + + mock_amphora_repo_update.assert_called_once_with( + _session_mock, AMP_ID, status=constants.ERROR) + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + def test_amp_index_listener_update(self, + mock_lb_get, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_get, + mock_amphora_repo_update): + + mock_amphora_repo_get.return_value = _db_amphora_mock + mock_lb_get.return_value = _db_load_balancer_mock + amphorae_status = { + _amphora_mock[constants.ID]: { + constants.UNREACHABLE: False + } + } + + amp_list_update_obj = amphora_driver_tasks.AmphoraIndexListenerUpdate() + amp_list_update_obj.execute(_LB_mock, 0, [_amphora_mock], + amphorae_status, + _amphora_mock[constants.ID], + self.timeout_dict) + + mock_driver.update_amphora_listeners.assert_called_once_with( + _db_load_balancer_mock, _db_amphora_mock, self.timeout_dict) + + # Unreachable amp + mock_driver.reset_mock() + amphorae_status = { + _amphora_mock[constants.ID]: { + constants.UNREACHABLE: True + } + } + amp_list_update_obj.execute(_LB_mock, 0, [_amphora_mock], + amphorae_status, + _amphora_mock[constants.ID], + self.timeout_dict) + mock_driver.update_amphora_listeners.assert_not_called() + + # Test exception + mock_driver.update_amphora_listeners.side_effect = Exception('boom') + + amp_list_update_obj.execute(_LB_mock, 0, [_amphora_mock], {}, + _amphora_mock[constants.ID], + self.timeout_dict) + + mock_amphora_repo_update.assert_called_once_with( + _session_mock, AMP_ID, status=constants.ERROR) + + # Test exception, secondary amp + mock_amphora_repo_update.reset_mock() + mock_driver.update_amphora_listeners.side_effect = Exception('boom') + + amp_list_update_obj.execute(_LB_mock, 0, [_amphora_mock], {}, + '1234', + self.timeout_dict) + + mock_amphora_repo_update.assert_not_called() + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + @mock.patch('octavia.db.api.session') + def test_listeners_update(self, + mock_get_session_ctx, + mock_lb_get, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_get, + mock_amphora_repo_update): + listeners_update_obj = amphora_driver_tasks.ListenersUpdate() + LB_ID = 'lb1' + listeners = [data_models.Listener(id='listener1'), + data_models.Listener(id='listener2')] + vip = data_models.Vip(ip_address='10.0.0.1') + lb = data_models.LoadBalancer(id=LB_ID, listeners=listeners, vip=vip) + mock_lb_get.side_effect = [lb, None, lb] + listeners_update_obj.execute(lb.id) + mock_driver.update.assert_called_once_with(lb) + self.assertEqual(1, mock_driver.update.call_count) + + mock_driver.update.reset_mock() + + listeners_update_obj.execute(None) + mock_driver.update.assert_not_called() + + mock_session = mock_get_session_ctx().begin().__enter__() + + # Test the revert + amp = listeners_update_obj.revert(_LB_mock) + expected_db_calls = [mock.call(mock_session, + id=listeners[0].id, + provisioning_status=constants.ERROR), + mock.call(mock_session, + id=listeners[1].id, + provisioning_status=constants.ERROR)] + repo.ListenerRepository.update.assert_has_calls(expected_db_calls) + self.assertEqual(2, repo.ListenerRepository.update.call_count) + self.assertIsNone(amp) + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_listener_prov_status_error') + def test_amphora_index_listeners_reload( + self, mock_prov_status_error, mock_lb_repo_get, + mock_driver, mock_generate_uuid, mock_log, mock_get_session, + mock_listener_repo_get, mock_listener_repo_update, + mock_amphora_repo_get, mock_amphora_repo_update): + listeners_reload_obj = ( + amphora_driver_tasks.AmphoraIndexListenersReload()) + mock_lb = mock.MagicMock() + mock_listener = mock.MagicMock() + mock_listener.id = '12345' + mock_amphora_repo_get.return_value = _amphora_mock + mock_lb_repo_get.return_value = mock_lb + mock_driver.reload.side_effect = [mock.DEFAULT, Exception('boom')] + + # Test no listeners + mock_lb.listeners = None + listeners_reload_obj.execute(mock_lb, 0, None, {}, + _amphora_mock[constants.ID]) + mock_driver.reload.assert_not_called() + + # Test with listeners + amphorae_status = { + _amphora_mock[constants.ID]: { + constants.UNREACHABLE: False + } + } + mock_driver.reload.reset_mock() + mock_lb.listeners = [mock_listener] + listeners_reload_obj.execute(mock_lb, 0, [_amphora_mock], + amphorae_status, + _amphora_mock[constants.ID], + timeout_dict=self.timeout_dict) + mock_driver.reload.assert_called_once_with(mock_lb, _amphora_mock, + self.timeout_dict) + + # Unreachable amp + amphorae_status = { + _amphora_mock[constants.ID]: { + constants.UNREACHABLE: True + } + } + mock_driver.reload.reset_mock() + listeners_reload_obj.execute(mock_lb, 0, [_amphora_mock], + amphorae_status, + _amphora_mock[constants.ID], + timeout_dict=self.timeout_dict) + mock_driver.reload.assert_not_called() + + # Test with reload exception + mock_driver.reload.reset_mock() + listeners_reload_obj.execute(mock_lb, 0, [_amphora_mock], {}, + _amphora_mock[constants.ID], + timeout_dict=self.timeout_dict) + mock_driver.reload.assert_called_once_with(mock_lb, _amphora_mock, + self.timeout_dict) + mock_amphora_repo_update.assert_called_once_with( + _session_mock, _amphora_mock[constants.ID], + status=constants.ERROR) + + # Test with reload exception, secondary amp + mock_driver.reload.reset_mock() + mock_amphora_repo_update.reset_mock() + listeners_reload_obj.execute(mock_lb, 0, [_amphora_mock], {}, + '1234', + timeout_dict=self.timeout_dict) + mock_driver.reload.assert_called_once_with(mock_lb, _amphora_mock, + self.timeout_dict) + mock_amphora_repo_update.assert_not_called() + + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_listener_prov_status_error') + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + def test_listeners_start(self, mock_lb_get, + mock_prov_status_error, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_get, + mock_amphora_repo_update): + listeners_start_obj = amphora_driver_tasks.ListenersStart() + mock_lb = mock.MagicMock() + mock_listener = mock.MagicMock() + mock_listener.id = '12345' + + # Test no listeners + mock_lb.listeners = None + mock_lb_get.return_value = mock_lb + listeners_start_obj.execute(_LB_mock) + mock_driver.start.assert_not_called() + + # Test with listeners + mock_driver.start.reset_mock() + mock_lb.listeners = [mock_listener] + listeners_start_obj.execute(_LB_mock) + mock_driver.start.assert_called_once_with(mock_lb, None) + + # Test revert + mock_lb.listeners = [mock_listener] + listeners_start_obj.revert(_LB_mock) + mock_prov_status_error.assert_called_once_with('12345') + + @mock.patch('octavia.db.api.session') + def test_listener_delete(self, + mock_get_session_ctx, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_get, + mock_amphora_repo_update): + + listener_dict = {constants.LISTENER_ID: LISTENER_ID} + listener_delete_obj = amphora_driver_tasks.ListenerDelete() + listener_delete_obj.execute(listener_dict) + + mock_driver.delete.assert_called_once_with(_listener_mock) + + mock_session = mock_get_session_ctx().begin().__enter__() + + # Test the revert + amp = listener_delete_obj.revert(listener_dict) + repo.ListenerRepository.update.assert_called_once_with( + mock_session, + id=LISTENER_ID, + provisioning_status=constants.ERROR) + self.assertIsNone(amp) + + # Test the revert with exception + repo.ListenerRepository.update.reset_mock() + mock_listener_repo_update.side_effect = Exception('fail') + amp = listener_delete_obj.revert(listener_dict) + repo.ListenerRepository.update.assert_called_once_with( + mock_session, + id=LISTENER_ID, + provisioning_status=constants.ERROR) + self.assertIsNone(amp) + + def test_amphora_get_info(self, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_get, + mock_amphora_repo_update): + + amphora_get_info_obj = amphora_driver_tasks.AmphoraGetInfo() + mock_amphora_repo_get.return_value = _db_amphora_mock + amphora_get_info_obj.execute(_amphora_mock) + + mock_driver.get_info.assert_called_once_with( + _db_amphora_mock) + + def test_amphora_get_diagnostics(self, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_get, + mock_amphora_repo_update): + + amphora_get_diagnostics_obj = (amphora_driver_tasks. + AmphoraGetDiagnostics()) + amphora_get_diagnostics_obj.execute(_amphora_mock) + + mock_driver.get_diagnostics.assert_called_once_with( + _amphora_mock) + + @mock.patch('octavia.db.api.session') + def test_amphora_finalize(self, + mock_get_session_ctx, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_get, + mock_amphora_repo_update): + + amphora_finalize_obj = amphora_driver_tasks.AmphoraFinalize() + mock_amphora_repo_get.return_value = _db_amphora_mock + amphora_finalize_obj.execute(_amphora_mock) + + mock_driver.finalize_amphora.assert_called_once_with( + _db_amphora_mock) + + mock_session = mock_get_session_ctx().begin().__enter__() + + # Test revert + amp = amphora_finalize_obj.revert(None, _amphora_mock) + repo.AmphoraRepository.update.assert_called_once_with( + mock_session, + id=AMP_ID, + status=constants.ERROR) + self.assertIsNone(amp) + + # Test revert with exception + repo.AmphoraRepository.update.reset_mock() + mock_amphora_repo_update.side_effect = Exception('fail') + amp = amphora_finalize_obj.revert(None, _amphora_mock) + repo.AmphoraRepository.update.assert_called_once_with( + mock_session, + id=AMP_ID, + status=constants.ERROR) + self.assertIsNone(amp) + + # Test revert when this task failed + repo.AmphoraRepository.update.reset_mock() + amp = amphora_finalize_obj.revert( + failure.Failure.from_exception(Exception('boom')), _amphora_mock) + repo.AmphoraRepository.update.assert_not_called() + + @mock.patch('octavia.db.api.session') + def test_amphora_post_network_plug(self, + mock_get_session_ctx, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_get, + mock_amphora_repo_update): + + amphora_post_network_plug_obj = (amphora_driver_tasks. + AmphoraPostNetworkPlug()) + mock_amphora_repo_get.return_value = _db_amphora_mock + fixed_ips = [{constants.SUBNET: {}}] + port_mock = {constants.NETWORK: mock.MagicMock(), + constants.FIXED_IPS: fixed_ips, + constants.ID: uuidutils.generate_uuid()} + amphora_post_network_plug_obj.execute(_amphora_mock, [port_mock], + _amphora_network_config_mock) + + (mock_driver.post_network_plug. + assert_called_once_with)(_db_amphora_mock, + network_data_models.Port(**port_mock), + _amphora_network_config_mock) + + mock_session = mock_get_session_ctx().begin().__enter__() + + # Test revert + amp = amphora_post_network_plug_obj.revert(None, _amphora_mock) + repo.AmphoraRepository.update.assert_called_once_with( + mock_session, + id=AMP_ID, + status=constants.ERROR) + + self.assertIsNone(amp) + + # Test revert with exception + repo.AmphoraRepository.update.reset_mock() + mock_amphora_repo_update.side_effect = Exception('fail') + amp = amphora_post_network_plug_obj.revert(None, _amphora_mock) + repo.AmphoraRepository.update.assert_called_once_with( + mock_session, + id=AMP_ID, + status=constants.ERROR) + + self.assertIsNone(amp) + + # Test revert when this task failed + repo.AmphoraRepository.update.reset_mock() + amp = amphora_post_network_plug_obj.revert( + failure.Failure.from_exception(Exception('boom')), _amphora_mock) + repo.AmphoraRepository.update.assert_not_called() + + def test_amphora_post_network_plug_with_host_routes( + self, mock_driver, mock_generate_uuid, mock_log, mock_get_session, + mock_listener_repo_get, mock_listener_repo_update, + mock_amphora_repo_get, mock_amphora_repo_update): + + amphora_post_network_plug_obj = (amphora_driver_tasks. + AmphoraPostNetworkPlug()) + mock_amphora_repo_get.return_value = _db_amphora_mock + host_routes = [{'destination': '10.0.0.0/16', + 'nexthop': '192.168.10.3'}, + {'destination': '10.2.0.0/16', + 'nexthop': '192.168.10.5'}] + fixed_ips = [{constants.SUBNET: {'host_routes': host_routes}}] + + port_mock = {constants.NETWORK: mock.MagicMock(), + constants.FIXED_IPS: fixed_ips, + constants.ID: uuidutils.generate_uuid()} + amphora_post_network_plug_obj.execute(_amphora_mock, [port_mock], + _amphora_network_config_mock) + + (mock_driver.post_network_plug. + assert_called_once_with)(_db_amphora_mock, + network_data_models.Port(**port_mock), + _amphora_network_config_mock) + + call_args = mock_driver.post_network_plug.call_args[0] + port_arg = call_args[1] + subnet_arg = port_arg.fixed_ips[0].subnet + self.assertEqual(2, len(subnet_arg.host_routes)) + for hr1, hr2 in zip(host_routes, subnet_arg.host_routes): + self.assertEqual(hr1['destination'], hr2.destination) + self.assertEqual(hr1['nexthop'], hr2.nexthop) + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + @mock.patch('octavia.db.api.session') + def test_amphorae_post_network_plug(self, + mock_get_session_ctx, + mock_lb_get, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_get, + mock_amphora_repo_update): + mock_driver.get_network.return_value = _network_mock + _db_amphora_mock.id = AMP_ID + _db_amphora_mock.compute_id = COMPUTE_ID + _db_load_balancer_mock.amphorae = [_db_amphora_mock] + mock_lb_get.return_value = _db_load_balancer_mock + mock_amphora_repo_get.return_value = _db_amphora_mock + amphora_post_network_plug_obj = (amphora_driver_tasks. + AmphoraePostNetworkPlug()) + + port_mock = {constants.NETWORK: mock.MagicMock(), + constants.FIXED_IPS: [mock.MagicMock()], + constants.ID: uuidutils.generate_uuid()} + _deltas_mock = {_db_amphora_mock.id: [port_mock]} + + amphora_post_network_plug_obj.execute(_LB_mock, _deltas_mock, + _amphorae_network_config_mock) + + (mock_driver.post_network_plug. + assert_called_once_with(_db_amphora_mock, + network_data_models.Port(**port_mock), + _amphora_network_config_mock)) + + # Test with no ports to plug + mock_driver.post_network_plug.reset_mock() + + _deltas_mock = {'0': [port_mock]} + + amphora_post_network_plug_obj.execute(_LB_mock, _deltas_mock, + _amphora_network_config_mock) + mock_driver.post_network_plug.assert_not_called() + + mock_session = mock_get_session_ctx().begin().__enter__() + + # Test revert + amp = amphora_post_network_plug_obj.revert(None, _LB_mock, + _deltas_mock) + repo.AmphoraRepository.update.assert_called_once_with( + mock_session, + id=AMP_ID, + status=constants.ERROR) + + self.assertIsNone(amp) + + # Test revert with exception + repo.AmphoraRepository.update.reset_mock() + mock_amphora_repo_update.side_effect = Exception('fail') + amp = amphora_post_network_plug_obj.revert(None, _LB_mock, + _deltas_mock) + repo.AmphoraRepository.update.assert_called_once_with( + mock_session, + id=AMP_ID, + status=constants.ERROR) + + self.assertIsNone(amp) + + # Test revert when this task failed + repo.AmphoraRepository.update.reset_mock() + amp = amphora_post_network_plug_obj.revert( + failure.Failure.from_exception(Exception('boom')), _amphora_mock, + None) + repo.AmphoraRepository.update.assert_not_called() + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + @mock.patch('octavia.db.api.session') + def test_amphora_post_vip_plug(self, + mock_get_session_ctx, + mock_lb_get, + mock_loadbalancer_repo_update, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_get, + mock_amphora_repo_update): + + amphorae_net_config_mock = { + AMP_ID: { + constants.VIP_SUBNET: { + 'host_routes': [] + }, + constants.VRRP_PORT: mock.MagicMock(), + 'additional_vip_data': [] + } + } + mock_amphora_repo_get.return_value = _db_amphora_mock + mock_lb_get.return_value = _db_load_balancer_mock + amphora_post_vip_plug_obj = amphora_driver_tasks.AmphoraPostVIPPlug() + amphora_post_vip_plug_obj.execute(_amphora_mock, + _LB_mock, + amphorae_net_config_mock) + vip_subnet = network_data_models.Subnet( + **amphorae_net_config_mock[AMP_ID]['vip_subnet']) + vrrp_port = network_data_models.Port( + **amphorae_net_config_mock[AMP_ID]['vrrp_port']) + + mock_driver.post_vip_plug.assert_called_once_with( + _db_amphora_mock, _db_load_balancer_mock, amphorae_net_config_mock, + vrrp_port, vip_subnet, additional_vip_data=[]) + + mock_session = mock_get_session_ctx().begin().__enter__() + + # Test revert + amp = amphora_post_vip_plug_obj.revert(None, _amphora_mock, _LB_mock) + repo.AmphoraRepository.update.assert_called_once_with( + mock_session, + id=AMP_ID, + status=constants.ERROR) + repo.LoadBalancerRepository.update.assert_not_called() + + self.assertIsNone(amp) + + # Test revert with repo exceptions + repo.AmphoraRepository.update.reset_mock() + repo.LoadBalancerRepository.update.reset_mock() + mock_amphora_repo_update.side_effect = Exception('fail') + mock_loadbalancer_repo_update.side_effect = Exception('fail') + amp = amphora_post_vip_plug_obj.revert(None, _amphora_mock, _LB_mock) + repo.AmphoraRepository.update.assert_called_once_with( + mock_session, + id=AMP_ID, + status=constants.ERROR) + repo.LoadBalancerRepository.update.assert_not_called() + + self.assertIsNone(amp) + + # Test revert when this task failed + repo.AmphoraRepository.update.reset_mock() + amp = amphora_post_vip_plug_obj.revert( + failure.Failure.from_exception(Exception('boom')), _amphora_mock, + None) + repo.AmphoraRepository.update.assert_not_called() + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + def test_amphora_post_vip_plug_with_host_routes( + self, mock_lb_get, mock_loadbalancer_repo_update, mock_driver, + mock_generate_uuid, mock_log, mock_get_session, + mock_listener_repo_get, mock_listener_repo_update, + mock_amphora_repo_get, mock_amphora_repo_update): + + host_routes = [{'destination': '10.0.0.0/16', + 'nexthop': '192.168.10.3'}, + {'destination': '10.2.0.0/16', + 'nexthop': '192.168.10.5'}] + amphorae_net_config_mock = { + AMP_ID: { + constants.VIP_SUBNET: { + 'host_routes': host_routes + }, + constants.VRRP_PORT: mock.MagicMock(), + 'additional_vip_data': [] + } + } + mock_amphora_repo_get.return_value = _db_amphora_mock + mock_lb_get.return_value = _db_load_balancer_mock + amphora_post_vip_plug_obj = amphora_driver_tasks.AmphoraPostVIPPlug() + amphora_post_vip_plug_obj.execute(_amphora_mock, + _LB_mock, + amphorae_net_config_mock) + vip_subnet = network_data_models.Subnet( + **amphorae_net_config_mock[AMP_ID]['vip_subnet']) + vrrp_port = network_data_models.Port( + **amphorae_net_config_mock[AMP_ID]['vrrp_port']) + + mock_driver.post_vip_plug.assert_called_once_with( + _db_amphora_mock, _db_load_balancer_mock, amphorae_net_config_mock, + vrrp_port, vip_subnet, additional_vip_data=[]) + + call_args = mock_driver.post_vip_plug.call_args[0] + vip_subnet_arg = call_args[4] + self.assertEqual(2, len(vip_subnet_arg.host_routes)) + for hr1, hr2 in zip(host_routes, vip_subnet_arg.host_routes): + self.assertEqual(hr1['destination'], hr2.destination) + self.assertEqual(hr1['nexthop'], hr2.nexthop) + + self.assertEqual( + host_routes, + amphorae_net_config_mock[AMP_ID][ + constants.VIP_SUBNET]['host_routes']) + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + def test_amphora_post_vip_plug_with_additional_vips( + self, mock_lb_get, mock_loadbalancer_repo_update, mock_driver, + mock_generate_uuid, mock_log, mock_get_session, + mock_listener_repo_get, mock_listener_repo_update, + mock_amphora_repo_get, mock_amphora_repo_update): + + host_routes = [{'destination': '10.0.0.0/16', + 'nexthop': '192.168.10.3'}, + {'destination': '10.2.0.0/16', + 'nexthop': '192.168.10.5'}] + additional_host_routes = [{'destination': '2001:db9::/64', + 'nexthop': '2001:db8::1:fff'}] + amphorae_net_config_mock = { + AMP_ID: { + constants.VIP_SUBNET: { + 'host_routes': host_routes + }, + constants.VRRP_PORT: mock.MagicMock(), + 'additional_vip_data': [{ + 'ip_address': '2001:db8::3', + 'subnet': { + 'host_routes': additional_host_routes + } + }] + } + } + mock_amphora_repo_get.return_value = _db_amphora_mock + mock_lb_get.return_value = _db_load_balancer_mock + amphora_post_vip_plug_obj = amphora_driver_tasks.AmphoraPostVIPPlug() + amphora_post_vip_plug_obj.execute(_amphora_mock, + _LB_mock, + amphorae_net_config_mock) + vip_subnet = network_data_models.Subnet( + **amphorae_net_config_mock[AMP_ID]['vip_subnet']) + vrrp_port = network_data_models.Port( + **amphorae_net_config_mock[AMP_ID]['vrrp_port']) + additional_vip_data = [ + network_data_models.AdditionalVipData( + ip_address=add_vip_data['ip_address'], + subnet=network_data_models.Subnet( + host_routes=add_vip_data['subnet']['host_routes'])) + for add_vip_data in amphorae_net_config_mock[ + AMP_ID]['additional_vip_data']] + + mock_driver.post_vip_plug.assert_called_once_with( + _db_amphora_mock, _db_load_balancer_mock, amphorae_net_config_mock, + vrrp_port, vip_subnet, additional_vip_data=additional_vip_data) + + call_args = mock_driver.post_vip_plug.call_args[0] + call_kwargs = mock_driver.post_vip_plug.call_args[1] + vip_subnet_arg = call_args[4] + self.assertEqual(2, len(vip_subnet_arg.host_routes)) + for hr1, hr2 in zip(host_routes, vip_subnet_arg.host_routes): + self.assertEqual(hr1['destination'], hr2.destination) + self.assertEqual(hr1['nexthop'], hr2.nexthop) + + self.assertEqual( + host_routes, + amphorae_net_config_mock[AMP_ID][ + constants.VIP_SUBNET]['host_routes']) + + add_vip_data_arg = call_kwargs.get('additional_vip_data') + self.assertEqual(1, len(add_vip_data_arg[0].subnet.host_routes)) + hr1 = add_vip_data_arg[0].subnet.host_routes[0] + self.assertEqual( + additional_host_routes[0]['destination'], hr1.destination) + self.assertEqual( + additional_host_routes[0]['nexthop'], hr1.nexthop) + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + def test_amphorae_post_vip_plug(self, mock_lb_get, + mock_loadbalancer_repo_update, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_get, + mock_amphora_repo_update): + + amphorae_net_config_mock = mock.MagicMock() + mock_amphora_repo_get.return_value = _db_amphora_mock + vip_subnet = network_data_models.Subnet( + **amphorae_net_config_mock[AMP_ID]['vip_subnet']) + vrrp_port = network_data_models.Port( + **amphorae_net_config_mock[AMP_ID]['vrrp_port']) + _db_load_balancer_mock.amphorae = [_db_amphora_mock] + mock_lb_get.return_value = _db_load_balancer_mock + amphora_post_vip_plug_obj = amphora_driver_tasks.AmphoraePostVIPPlug() + amphora_post_vip_plug_obj.execute(_LB_mock, + amphorae_net_config_mock) + + mock_driver.post_vip_plug.assert_called_once_with( + _db_amphora_mock, _db_load_balancer_mock, amphorae_net_config_mock, + vrrp_port, vip_subnet, additional_vip_data=[]) + + def test_amphora_cert_upload(self, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_get, + mock_amphora_repo_update): + mock_amphora_repo_get.return_value = _db_amphora_mock + fer = utils.get_server_certs_key_passphrases_fernet() + pem_file_mock = fer.encrypt( + utils.get_compatible_value('test-pem-file')).decode('utf-8') + amphora_cert_upload_mock = amphora_driver_tasks.AmphoraCertUpload() + amphora_cert_upload_mock.execute(_amphora_mock, pem_file_mock) + + mock_driver.upload_cert_amp.assert_called_once_with( + _db_amphora_mock, fer.decrypt(pem_file_mock.encode('utf-8'))) + + def test_amphora_update_vrrp_interface(self, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_get, + mock_amphora_repo_update): + FAKE_INTERFACE = 'fake0' + mock_amphora_repo_get.return_value = _db_amphora_mock + mock_driver.get_interface_from_ip.side_effect = [FAKE_INTERFACE, + Exception('boom')] + + timeout_dict = {constants.CONN_MAX_RETRIES: CONN_MAX_RETRIES, + constants.CONN_RETRY_INTERVAL: CONN_RETRY_INTERVAL} + + amphora_update_vrrp_interface_obj = ( + amphora_driver_tasks.AmphoraUpdateVRRPInterface()) + amphora_update_vrrp_interface_obj.execute(_amphora_mock, timeout_dict) + mock_driver.get_interface_from_ip.assert_called_once_with( + _db_amphora_mock, _db_amphora_mock.vrrp_ip, + timeout_dict=timeout_dict) + mock_amphora_repo_update.assert_called_once_with( + _session_mock, _db_amphora_mock.id, vrrp_interface=FAKE_INTERFACE) + + # Test with an exception + mock_amphora_repo_update.reset_mock() + amphora_update_vrrp_interface_obj.execute(_amphora_mock, timeout_dict) + mock_amphora_repo_update.assert_called_once_with( + _session_mock, _db_amphora_mock.id, status=constants.ERROR) + + def test_amphora_index_update_vrrp_interface( + self, mock_driver, mock_generate_uuid, mock_log, mock_get_session, + mock_listener_repo_get, mock_listener_repo_update, + mock_amphora_repo_get, mock_amphora_repo_update): + mock_amphora_repo_get.return_value = _db_amphora_mock + FAKE_INTERFACE = 'fake0' + mock_driver.get_interface_from_ip.side_effect = [FAKE_INTERFACE, + Exception('boom')] + amphorae_status = { + _amphora_mock[constants.ID]: { + constants.UNREACHABLE: False + } + } + + timeout_dict = {constants.CONN_MAX_RETRIES: CONN_MAX_RETRIES, + constants.CONN_RETRY_INTERVAL: CONN_RETRY_INTERVAL} + + amphora_update_vrrp_interface_obj = ( + amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface()) + amphora_update_vrrp_interface_obj.execute( + 0, [_amphora_mock], amphorae_status, _amphora_mock[constants.ID], + timeout_dict) + mock_driver.get_interface_from_ip.assert_called_once_with( + _db_amphora_mock, _db_amphora_mock.vrrp_ip, + timeout_dict=timeout_dict) + mock_amphora_repo_update.assert_called_once_with( + _session_mock, _db_amphora_mock.id, vrrp_interface=FAKE_INTERFACE) + + # Unreachable amp + mock_driver.reset_mock() + amphorae_status = { + _amphora_mock[constants.ID]: { + constants.UNREACHABLE: True + } + } + amphora_update_vrrp_interface_obj.execute( + 0, [_amphora_mock], amphorae_status, _amphora_mock[constants.ID], + timeout_dict) + mock_driver.get_interface_from_ip.assert_not_called() + + # Test with an exception + mock_amphora_repo_update.reset_mock() + amphora_update_vrrp_interface_obj.execute( + 0, [_amphora_mock], {}, _amphora_mock[constants.ID], timeout_dict) + mock_amphora_repo_update.assert_called_once_with( + _session_mock, _db_amphora_mock.id, status=constants.ERROR) + + # Test with an exception, secondary amp + mock_amphora_repo_update.reset_mock() + amphora_update_vrrp_interface_obj.execute( + 0, [_amphora_mock], {}, '1234', timeout_dict) + mock_amphora_repo_update.assert_not_called() + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + def test_amphora_vrrp_update(self, + mock_lb_get, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_get, + mock_amphora_repo_update): + amphorae_network_config = mock.MagicMock() + mock_driver.update_vrrp_conf.side_effect = [mock.DEFAULT, + Exception('boom')] + mock_lb_get.return_value = _db_load_balancer_mock + mock_amphora_repo_get.return_value = _db_amphora_mock + amphora_vrrp_update_obj = ( + amphora_driver_tasks.AmphoraVRRPUpdate()) + amphora_vrrp_update_obj.execute(LB_ID, amphorae_network_config, + _amphora_mock, 'fakeint0') + mock_driver.update_vrrp_conf.assert_called_once_with( + _db_load_balancer_mock, amphorae_network_config, + _db_amphora_mock, None) + + # Test with an exception + mock_amphora_repo_update.reset_mock() + amphora_vrrp_update_obj.execute(LB_ID, amphorae_network_config, + _amphora_mock, 'fakeint0') + mock_amphora_repo_update.assert_called_once_with( + _session_mock, _db_amphora_mock.id, status=constants.ERROR) + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + def test_amphora_index_vrrp_update(self, + mock_lb_get, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_get, + mock_amphora_repo_update): + amphorae_network_config = mock.MagicMock() + mock_driver.update_vrrp_conf.side_effect = [mock.DEFAULT, + Exception('boom')] + mock_lb_get.return_value = _db_load_balancer_mock + mock_amphora_repo_get.return_value = _db_amphora_mock + amphorae_status = { + _amphora_mock[constants.ID]: { + constants.UNREACHABLE: False + } + } + + amphora_vrrp_update_obj = ( + amphora_driver_tasks.AmphoraIndexVRRPUpdate()) + + amphora_vrrp_update_obj.execute(LB_ID, amphorae_network_config, + 0, [_amphora_mock], amphorae_status, + 'fakeint0', + _amphora_mock[constants.ID], + timeout_dict=self.timeout_dict) + mock_driver.update_vrrp_conf.assert_called_once_with( + _db_load_balancer_mock, amphorae_network_config, _db_amphora_mock, + self.timeout_dict) + + # Unreachable amp + amphorae_status = { + _amphora_mock[constants.ID]: { + constants.UNREACHABLE: True + } + } + mock_amphora_repo_update.reset_mock() + mock_driver.update_vrrp_conf.reset_mock() + amphora_vrrp_update_obj.execute(LB_ID, amphorae_network_config, + 0, [_amphora_mock], amphorae_status, + None, _amphora_mock[constants.ID]) + mock_driver.update_vrrp_conf.assert_not_called() + + # Test with an exception + mock_amphora_repo_update.reset_mock() + amphora_vrrp_update_obj.execute(LB_ID, amphorae_network_config, + 0, [_amphora_mock], {}, 'fakeint0', + _amphora_mock[constants.ID]) + mock_amphora_repo_update.assert_called_once_with( + _session_mock, _db_amphora_mock.id, status=constants.ERROR) + + # Test with an exception, secondary amp + mock_amphora_repo_update.reset_mock() + amphora_vrrp_update_obj.execute(LB_ID, amphorae_network_config, + 0, [_amphora_mock], {}, 'fakeint0', + '1234') + mock_amphora_repo_update.assert_not_called() + + def test_amphora_vrrp_start(self, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_get, + mock_amphora_repo_update): + mock_amphora_repo_get.return_value = _db_amphora_mock + amphora_vrrp_start_obj = ( + amphora_driver_tasks.AmphoraVRRPStart()) + amphora_vrrp_start_obj.execute(_amphora_mock, + timeout_dict=self.timeout_dict) + mock_driver.start_vrrp_service.assert_called_once_with( + _db_amphora_mock, self.timeout_dict) + + def test_amphora_index_vrrp_start(self, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_get, + mock_amphora_repo_update): + mock_amphora_repo_get.return_value = _db_amphora_mock + amphorae_status = { + _amphora_mock[constants.ID]: { + constants.UNREACHABLE: False + } + } + + amphora_vrrp_start_obj = ( + amphora_driver_tasks.AmphoraIndexVRRPStart()) + mock_driver.start_vrrp_service.side_effect = [mock.DEFAULT, + Exception('boom')] + + amphora_vrrp_start_obj.execute(0, [_amphora_mock], amphorae_status, + _amphora_mock[constants.ID], + timeout_dict=self.timeout_dict) + mock_driver.start_vrrp_service.assert_called_once_with( + _db_amphora_mock, self.timeout_dict) + + # Unreachable amp + mock_driver.start_vrrp_service.reset_mock() + amphorae_status = { + _amphora_mock[constants.ID]: { + constants.UNREACHABLE: True + } + } + amphora_vrrp_start_obj.execute(0, [_amphora_mock], amphorae_status, + _amphora_mock[constants.ID], + timeout_dict=self.timeout_dict) + mock_driver.start_vrrp_service.assert_not_called() + + # Test with a start exception + mock_driver.start_vrrp_service.reset_mock() + amphora_vrrp_start_obj.execute(0, [_amphora_mock], {}, + _amphora_mock[constants.ID], + timeout_dict=self.timeout_dict) + mock_driver.start_vrrp_service.assert_called_once_with( + _db_amphora_mock, self.timeout_dict) + mock_amphora_repo_update.assert_called_once_with( + _session_mock, _db_amphora_mock.id, status=constants.ERROR) + + # Test with a start exception, secondary amp + mock_driver.start_vrrp_service.reset_mock() + mock_amphora_repo_update.reset_mock() + amphora_vrrp_start_obj.execute(0, [_amphora_mock], {}, '1234', + timeout_dict=self.timeout_dict) + mock_driver.start_vrrp_service.assert_called_once_with( + _db_amphora_mock, self.timeout_dict) + mock_amphora_repo_update.assert_not_called() + + def test_amphora_compute_connectivity_wait(self, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_get, + mock_amphora_repo_update): + amp_compute_conn_wait_obj = ( + amphora_driver_tasks.AmphoraComputeConnectivityWait()) + mock_amphora_repo_get.return_value = _db_amphora_mock + amp_compute_conn_wait_obj.execute(_amphora_mock, + raise_retry_exception=True) + mock_driver.get_info.assert_called_once_with( + _db_amphora_mock, raise_retry_exception=True) + + mock_driver.get_info.side_effect = driver_except.TimeOutException() + self.assertRaises(driver_except.TimeOutException, + amp_compute_conn_wait_obj.execute, _amphora_mock) + mock_amphora_repo_update.assert_called_once_with( + _session_mock, AMP_ID, status=constants.ERROR) + + @mock.patch('octavia.amphorae.backends.agent.agent_jinja_cfg.' + 'AgentJinjaTemplater.build_agent_config') + def test_amphora_config_update(self, + mock_build_config, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_get, + mock_amphora_repo_update): + mock_build_config.return_value = FAKE_CONFIG_FILE + amp_config_update_obj = amphora_driver_tasks.AmphoraConfigUpdate() + mock_driver.update_amphora_agent_config.side_effect = [ + None, None, driver_except.AmpDriverNotImplementedError, + driver_except.TimeOutException] + # With Flavor + flavor = {constants.LOADBALANCER_TOPOLOGY: + constants.TOPOLOGY_ACTIVE_STANDBY} + mock_amphora_repo_get.return_value = _db_amphora_mock + amp_config_update_obj.execute(_amphora_mock, flavor) + mock_build_config.assert_called_once_with( + _db_amphora_mock.id, constants.TOPOLOGY_ACTIVE_STANDBY) + mock_driver.update_amphora_agent_config.assert_called_once_with( + _db_amphora_mock, FAKE_CONFIG_FILE) + # With no Flavor + mock_driver.reset_mock() + mock_build_config.reset_mock() + amp_config_update_obj.execute(_amphora_mock, None) + mock_build_config.assert_called_once_with( + _db_amphora_mock.id, constants.TOPOLOGY_SINGLE) + mock_driver.update_amphora_agent_config.assert_called_once_with( + _db_amphora_mock, FAKE_CONFIG_FILE) + # With amphora that does not support config update + mock_driver.reset_mock() + mock_build_config.reset_mock() + amp_config_update_obj.execute(_amphora_mock, flavor) + mock_build_config.assert_called_once_with( + _db_amphora_mock.id, constants.TOPOLOGY_ACTIVE_STANDBY) + mock_driver.update_amphora_agent_config.assert_called_once_with( + _db_amphora_mock, FAKE_CONFIG_FILE) + # With an unknown exception + mock_driver.reset_mock() + mock_build_config.reset_mock() + self.assertRaises(driver_except.TimeOutException, + amp_config_update_obj.execute, + _amphora_mock, flavor) + + def test_amphorae_get_connectivity_status(self, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_get, + mock_amphora_repo_update): + amphora1_mock = mock.MagicMock() + amphora1_mock[constants.ID] = 'id1' + amphora2_mock = mock.MagicMock() + amphora2_mock[constants.ID] = 'id2' + db_amphora1_mock = mock.Mock() + db_amphora2_mock = mock.Mock() + + amp_get_connectivity_status = ( + amphora_driver_tasks.AmphoraeGetConnectivityStatus()) + + # All amphorae reachable + mock_amphora_repo_get.side_effect = [ + db_amphora1_mock, + db_amphora2_mock] + mock_driver.check.return_value = None + + ret = amp_get_connectivity_status.execute( + [amphora1_mock, amphora2_mock], + amphora1_mock[constants.ID], + timeout_dict=self.timeout_dict) + mock_driver.check.assert_has_calls( + [mock.call(db_amphora1_mock, timeout_dict=self.timeout_dict), + mock.call(db_amphora2_mock, timeout_dict=self.timeout_dict)]) + self.assertFalse( + ret[amphora1_mock[constants.ID]][constants.UNREACHABLE]) + self.assertFalse( + ret[amphora2_mock[constants.ID]][constants.UNREACHABLE]) + + # amphora1 unreachable + mock_driver.check.reset_mock() + mock_amphora_repo_get.side_effect = [ + db_amphora1_mock, + db_amphora2_mock] + mock_driver.check.side_effect = [ + driver_except.TimeOutException, None] + self.assertRaises(driver_except.TimeOutException, + amp_get_connectivity_status.execute, + [amphora1_mock, amphora2_mock], + amphora1_mock[constants.ID], + timeout_dict=self.timeout_dict) + mock_driver.check.assert_called_with( + db_amphora1_mock, timeout_dict=self.timeout_dict) + + # amphora2 unreachable + mock_driver.check.reset_mock() + mock_amphora_repo_get.side_effect = [ + db_amphora1_mock, + db_amphora2_mock] + mock_driver.check.side_effect = [ + None, driver_except.TimeOutException] + ret = amp_get_connectivity_status.execute( + [amphora1_mock, amphora2_mock], + amphora1_mock[constants.ID], + timeout_dict=self.timeout_dict) + mock_driver.check.assert_has_calls( + [mock.call(db_amphora1_mock, timeout_dict=self.timeout_dict), + mock.call(db_amphora2_mock, timeout_dict=self.timeout_dict)]) + self.assertFalse( + ret[amphora1_mock[constants.ID]][constants.UNREACHABLE]) + self.assertTrue( + ret[amphora2_mock[constants.ID]][constants.UNREACHABLE]) + + def test_set_amphora_firewall_rules(self, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_get, + mock_listener_repo_update, + mock_amphora_repo_get, + mock_amphora_repo_update): + amphora = {constants.ID: AMP_ID, constants.VRRP_IP: '192.0.2.88'} + mock_amphora_repo_get.return_value = _db_amphora_mock + + set_amp_fw_rules = amphora_driver_tasks.SetAmphoraFirewallRules() + + # Test non-SRIOV VIP path + set_amp_fw_rules.execute([amphora], 0, [{'non-sriov-vip': True}], {}, + timeout_dict=None) + + mock_get_session.assert_not_called() + mock_driver.set_interface_rules.assert_not_called() + + # Test SRIOV VIP path + set_amp_fw_rules.execute([amphora], 0, [{'fake_rule': True}], {}, + timeout_dict=None) + + mock_amphora_repo_get.assert_called_once_with(_session_mock, id=AMP_ID) + + mock_driver.set_interface_rules.assert_called_once_with( + _db_amphora_mock, '192.0.2.88', [{'fake_rule': True}], + timeout_dict=None) diff --git a/octavia/tests/unit/controller/worker/v2/tasks/test_cert_task.py b/octavia/tests/unit/controller/worker/v2/tasks/test_cert_task.py new file mode 100644 index 0000000000..d41c10cfb0 --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/tasks/test_cert_task.py @@ -0,0 +1,44 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +from unittest import mock + +from oslo_config import cfg + +from octavia.certificates.common import local +from octavia.common import utils +from octavia.controller.worker.v2.tasks import cert_task +import octavia.tests.unit.base as base + +CONF = cfg.CONF + + +class TestCertTasks(base.TestCase): + + @mock.patch('stevedore.driver.DriverManager.driver') + def test_execute(self, mock_driver): + fer = utils.get_server_certs_key_passphrases_fernet() + dummy_cert = local.LocalCert( + utils.get_compatible_value('test_cert'), + utils.get_compatible_value('test_key')) + mock_driver.generate_cert_key_pair.side_effect = [dummy_cert] + c = cert_task.GenerateServerPEMTask() + pem = c.execute('123') + self.assertEqual( + fer.decrypt(pem.encode('utf-8')), + dummy_cert.get_certificate() + + dummy_cert.get_private_key() + ) + mock_driver.generate_cert_key_pair.assert_called_once_with( + cn='123', validity=CONF.certificates.cert_validity_time) diff --git a/octavia/tests/unit/controller/worker/v2/tasks/test_compute_tasks.py b/octavia/tests/unit/controller/worker/v2/tasks/test_compute_tasks.py new file mode 100644 index 0000000000..55e6e14f52 --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/tasks/test_compute_tasks.py @@ -0,0 +1,645 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils +import tenacity + +from octavia.common import constants +from octavia.common import exceptions +from octavia.common import utils +from octavia.controller.worker.v2.tasks import compute_tasks +from octavia.tests.common import utils as test_utils +import octavia.tests.unit.base as base + + +AMP_FLAVOR_ID = '10' +AMP_IMAGE_TAG = 'glance_tag' +AMP_SSH_KEY_NAME = None +AMP_NET = [uuidutils.generate_uuid()] +AMP_SEC_GROUPS = [] +AMP_WAIT = 12 +AMPHORA_ID = uuidutils.generate_uuid() +COMPUTE_ID = uuidutils.generate_uuid() +LB_NET_IP = '192.0.2.1' +LB_ID = uuidutils.generate_uuid() +PORT_ID = uuidutils.generate_uuid() +SERVER_GRPOUP_ID = uuidutils.generate_uuid() + + +class TestException(Exception): + + def __init__(self, value): + self.value = value + + def __str__(self): + return repr(self.value) + + +_db_amphora_mock = mock.MagicMock() +_db_amphora_mock.id = AMPHORA_ID +_db_amphora_mock.compute_id = COMPUTE_ID +_amphora_mock = { + constants.ID: AMPHORA_ID, + constants.COMPUTE_ID: COMPUTE_ID +} +_db_load_balancer_mock = mock.MagicMock() +_db_load_balancer_mock.amphorae = [_db_amphora_mock] +_db_load_balancer_mock.to_dict.return_value = { + constants.ID: LB_ID, +} +_load_balancer_mock = { + constants.LOADBALANCER_ID: LB_ID, +} +_port = mock.MagicMock() +_port.id = PORT_ID + + +class TestComputeTasks(base.TestCase): + + def setUp(self): + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + self.conf.config( + group="controller_worker", amp_flavor_id=AMP_FLAVOR_ID) + self.conf.config( + group="controller_worker", amp_image_tag=AMP_IMAGE_TAG) + self.conf.config( + group="controller_worker", amp_ssh_key_name=AMP_SSH_KEY_NAME) + self.conf.config( + group="controller_worker", amp_boot_network_list=AMP_NET) + self.conf.config( + group="controller_worker", amp_active_wait_sec=AMP_WAIT) + self.conf.config( + group="controller_worker", amp_secgroup_list=AMP_SEC_GROUPS) + self.conf.config(group="controller_worker", amp_image_owner_id='') + + _db_amphora_mock.id = AMPHORA_ID + _db_amphora_mock.status = constants.AMPHORA_ALLOCATED + + logging_mock = mock.MagicMock() + compute_tasks.LOG = logging_mock + + super().setUp() + + @mock.patch('octavia.common.jinja.logging.logging_jinja_cfg.' + 'LoggingJinjaTemplater.build_logging_config') + @mock.patch('jinja2.Environment.get_template') + @mock.patch('octavia.amphorae.backends.agent.' + 'agent_jinja_cfg.AgentJinjaTemplater.' + 'build_agent_config', return_value='test_conf') + @mock.patch('octavia.common.jinja.' + 'user_data_jinja_cfg.UserDataJinjaCfg.' + 'build_user_data_config', return_value='user_data_conf') + @mock.patch('stevedore.driver.DriverManager.driver') + def test_compute_create(self, mock_driver, mock_ud_conf, + mock_conf, mock_jinja, mock_log_cfg): + + image_owner_id = uuidutils.generate_uuid() + self.conf.config( + group="controller_worker", amp_image_owner_id=image_owner_id) + mock_log_cfg.return_value = 'FAKE CFG' + + createcompute = compute_tasks.ComputeCreate() + + mock_driver.build.return_value = COMPUTE_ID + # Test execute() + compute_id = createcompute.execute(_db_amphora_mock.id, ports=[_port], + server_group_id=SERVER_GRPOUP_ID) + + # Validate that the build method was called properly + mock_driver.build.assert_called_once_with( + name="amphora-" + _db_amphora_mock.id, + amphora_flavor=AMP_FLAVOR_ID, + image_tag=AMP_IMAGE_TAG, + image_owner=image_owner_id, + key_name=AMP_SSH_KEY_NAME, + sec_groups=AMP_SEC_GROUPS, + network_ids=AMP_NET, + port_ids=[PORT_ID], + config_drive_files={'/etc/octavia/' + 'amphora-agent.conf': 'test_conf', + '/etc/rsyslog.d/10-rsyslog.conf': 'FAKE CFG'}, + user_data='user_data_conf', + server_group_id=SERVER_GRPOUP_ID, + availability_zone=None + ) + + # Make sure it returns the expected compute_id + self.assertEqual(COMPUTE_ID, compute_id) + + # Test that a build exception is raised + createcompute = compute_tasks.ComputeCreate() + + self.assertRaises(TypeError, + createcompute.execute, + _db_amphora_mock, config_drive_files='test_cert') + + # Test revert() + + _db_amphora_mock.compute_id = COMPUTE_ID + + createcompute = compute_tasks.ComputeCreate() + createcompute.revert(compute_id, _db_amphora_mock.id) + + # Validate that the delete method was called properly + mock_driver.delete.assert_called_once_with( + COMPUTE_ID) + + # Test that a delete exception is not raised + + createcompute.revert(COMPUTE_ID, _db_amphora_mock.id) + + @mock.patch('jinja2.Environment.get_template') + @mock.patch('octavia.amphorae.backends.agent.' + 'agent_jinja_cfg.AgentJinjaTemplater.' + 'build_agent_config', return_value='test_conf') + @mock.patch('octavia.common.jinja.' + 'user_data_jinja_cfg.UserDataJinjaCfg.' + 'build_user_data_config', return_value='user_data_conf') + @mock.patch('stevedore.driver.DriverManager.driver') + def test_compute_create_user_data(self, mock_driver, + mock_ud_conf, mock_conf, mock_jinja): + + self.conf.config( + group="controller_worker", user_data_config_drive=True) + createcompute = compute_tasks.ComputeCreate() + + mock_driver.build.return_value = COMPUTE_ID + # Test execute() + compute_id = createcompute.execute(_db_amphora_mock.id, ports=[_port], + server_group_id=None) + + # Validate that the build method was called properly + mock_driver.build.assert_called_once_with( + name="amphora-" + _db_amphora_mock.id, + amphora_flavor=AMP_FLAVOR_ID, + image_tag=AMP_IMAGE_TAG, + image_owner='', + key_name=AMP_SSH_KEY_NAME, + sec_groups=AMP_SEC_GROUPS, + network_ids=AMP_NET, + port_ids=[PORT_ID], + config_drive_files=None, + user_data='user_data_conf', + server_group_id=None, + availability_zone=None) + + # Make sure it returns the expected compute_id + self.assertEqual(COMPUTE_ID, compute_id) + + # Test that a build exception is raised + createcompute = compute_tasks.ComputeCreate() + + self.assertRaises(TypeError, + createcompute.execute, + _db_amphora_mock, config_drive_files='test_cert') + + # Test revert() + + _db_amphora_mock.compute_id = COMPUTE_ID + + createcompute = compute_tasks.ComputeCreate() + createcompute.revert(compute_id, _db_amphora_mock.id) + + # Validate that the delete method was called properly + mock_driver.delete.assert_called_once_with( + COMPUTE_ID) + + # Test that a delete exception is not raised + + createcompute.revert(COMPUTE_ID, _db_amphora_mock.id) + + @mock.patch('octavia.common.jinja.logging.logging_jinja_cfg.' + 'LoggingJinjaTemplater.build_logging_config') + @mock.patch('jinja2.Environment.get_template') + @mock.patch('octavia.amphorae.backends.agent.' + 'agent_jinja_cfg.AgentJinjaTemplater.' + 'build_agent_config', return_value='test_conf') + @mock.patch('octavia.common.jinja.' + 'user_data_jinja_cfg.UserDataJinjaCfg.' + 'build_user_data_config', return_value='user_data_conf') + @mock.patch('stevedore.driver.DriverManager.driver') + def test_compute_create_availability_zone(self, mock_driver, mock_ud_conf, + mock_conf, mock_jinja, + mock_log_cfg): + + image_owner_id = uuidutils.generate_uuid() + compute_zone = uuidutils.generate_uuid() + az_dict = {constants.COMPUTE_ZONE: compute_zone} + + self.conf.config( + group="controller_worker", amp_image_owner_id=image_owner_id) + mock_log_cfg.return_value = 'FAKE CFG' + + createcompute = compute_tasks.ComputeCreate() + + mock_driver.build.return_value = COMPUTE_ID + # Test execute() + compute_id = createcompute.execute(_db_amphora_mock.id, ports=[_port], + server_group_id=SERVER_GRPOUP_ID, + availability_zone=az_dict) + + # Validate that the build method was called properly + mock_driver.build.assert_called_once_with( + name="amphora-" + _db_amphora_mock.id, + amphora_flavor=AMP_FLAVOR_ID, + image_tag=AMP_IMAGE_TAG, + image_owner=image_owner_id, + key_name=AMP_SSH_KEY_NAME, + sec_groups=AMP_SEC_GROUPS, + network_ids=AMP_NET, + port_ids=[PORT_ID], + config_drive_files={'/etc/octavia/' + 'amphora-agent.conf': 'test_conf', + '/etc/rsyslog.d/10-rsyslog.conf': 'FAKE CFG'}, + user_data='user_data_conf', + server_group_id=SERVER_GRPOUP_ID, + availability_zone=az_dict) + + # Make sure it returns the expected compute_id + self.assertEqual(COMPUTE_ID, compute_id) + + # Test that a build exception is raised + createcompute = compute_tasks.ComputeCreate() + + self.assertRaises(TypeError, + createcompute.execute, + _db_amphora_mock, config_drive_files='test_cert') + + # Test revert() + + _db_amphora_mock.compute_id = COMPUTE_ID + + createcompute = compute_tasks.ComputeCreate() + createcompute.revert(compute_id, _db_amphora_mock.id) + + # Validate that the delete method was called properly + mock_driver.delete.assert_called_once_with( + COMPUTE_ID) + + # Test that a delete exception is not raised + + createcompute.revert(COMPUTE_ID, _db_amphora_mock.id) + + @mock.patch('octavia.common.jinja.logging.logging_jinja_cfg.' + 'LoggingJinjaTemplater.build_logging_config') + @mock.patch('jinja2.Environment.get_template') + @mock.patch('octavia.amphorae.backends.agent.' + 'agent_jinja_cfg.AgentJinjaTemplater.' + 'build_agent_config', return_value='test_conf') + @mock.patch('octavia.common.jinja.' + 'user_data_jinja_cfg.UserDataJinjaCfg.' + 'build_user_data_config', return_value='user_data_conf') + @mock.patch('stevedore.driver.DriverManager.driver') + def test_compute_create_without_ssh_access( + self, mock_driver, nock_user_data_conf, + mock_conf, mock_jinja, mock_log_cfg): + + createcompute = compute_tasks.ComputeCreate() + + mock_driver.build.return_value = COMPUTE_ID + self.conf.config( + group="controller_worker", user_data_config_drive=False) + mock_log_cfg.return_value = 'FAKE CFG' + + # Test execute() + compute_id = createcompute.execute(_db_amphora_mock.id, ports=[_port], + server_group_id=SERVER_GRPOUP_ID) + + # Validate that the build method was called properly + mock_driver.build.assert_called_once_with( + name="amphora-" + _db_amphora_mock.id, + amphora_flavor=AMP_FLAVOR_ID, + image_tag=AMP_IMAGE_TAG, + image_owner='', + key_name=None, + sec_groups=AMP_SEC_GROUPS, + network_ids=AMP_NET, + port_ids=[PORT_ID], + config_drive_files={'/etc/octavia/' + 'amphora-agent.conf': 'test_conf', + '/etc/rsyslog.d/10-rsyslog.conf': 'FAKE CFG'}, + user_data='user_data_conf', + server_group_id=SERVER_GRPOUP_ID, + availability_zone=None) + + self.assertEqual(COMPUTE_ID, compute_id) + + # Test that a build exception is raised + createcompute = compute_tasks.ComputeCreate() + + self.assertRaises(TypeError, + createcompute.execute, + _db_amphora_mock, config_drive_files='test_cert') + + # Test revert() + + _db_amphora_mock.compute_id = COMPUTE_ID + + createcompute = compute_tasks.ComputeCreate() + createcompute.revert(compute_id, _db_amphora_mock.id) + + # Validate that the delete method was called properly + mock_driver.delete.assert_called_once_with( + COMPUTE_ID) + + # Test that a delete exception is not raised + + createcompute.revert(COMPUTE_ID, _db_amphora_mock.id) + + @mock.patch('octavia.common.jinja.logging.logging_jinja_cfg.' + 'LoggingJinjaTemplater.build_logging_config') + @mock.patch('jinja2.Environment.get_template') + @mock.patch('octavia.amphorae.backends.agent.' + 'agent_jinja_cfg.AgentJinjaTemplater.' + 'build_agent_config', return_value='test_conf') + @mock.patch('octavia.common.jinja.' + 'user_data_jinja_cfg.UserDataJinjaCfg.' + 'build_user_data_config', return_value='user_data_conf') + @mock.patch('stevedore.driver.DriverManager.driver') + def test_compute_create_cert(self, mock_driver, mock_ud_conf, + mock_conf, mock_jinja, mock_log_cfg): + createcompute = compute_tasks.CertComputeCreate() + fer = utils.get_server_certs_key_passphrases_fernet() + mock_log_cfg.return_value = 'FAKE CFG' + + mock_driver.build.return_value = COMPUTE_ID + path = '/etc/octavia/certs/ca_01.pem' + self.useFixture(test_utils.OpenFixture(path, 'test')) + # Test execute() + test_cert = fer.encrypt( + utils.get_compatible_value('test_cert') + ).decode('utf-8') + compute_id = createcompute.execute(_db_amphora_mock.id, test_cert, + server_group_id=SERVER_GRPOUP_ID + ) + + # Validate that the build method was called properly + mock_driver.build.assert_called_once_with( + name="amphora-" + _db_amphora_mock.id, + amphora_flavor=AMP_FLAVOR_ID, + image_tag=AMP_IMAGE_TAG, + image_owner='', + key_name=AMP_SSH_KEY_NAME, + sec_groups=AMP_SEC_GROUPS, + network_ids=AMP_NET, + port_ids=[], + user_data='user_data_conf', + config_drive_files={ + '/etc/rsyslog.d/10-rsyslog.conf': 'FAKE CFG', + '/etc/octavia/certs/server.pem': fer.decrypt( + test_cert.encode('utf-8')).decode('utf-8'), + '/etc/octavia/certs/client_ca.pem': 'test', + '/etc/octavia/amphora-agent.conf': 'test_conf'}, + server_group_id=SERVER_GRPOUP_ID, + availability_zone=None) + + self.assertEqual(COMPUTE_ID, compute_id) + + # Test that a build exception is raised + self.useFixture(test_utils.OpenFixture(path, 'test')) + + createcompute = compute_tasks.ComputeCreate() + self.assertRaises(TypeError, + createcompute.execute, + _db_amphora_mock, + config_drive_files=test_cert) + + # Test revert() + + _db_amphora_mock.compute_id = COMPUTE_ID + + createcompute = compute_tasks.ComputeCreate() + createcompute.revert(compute_id, _db_amphora_mock.id) + + # Validate that the delete method was called properly + mock_driver.delete.assert_called_once_with(COMPUTE_ID) + + # Test that a delete exception is not raised + + createcompute.revert(COMPUTE_ID, _db_amphora_mock.id) + + @mock.patch('octavia.controller.worker.amphora_rate_limit' + '.AmphoraBuildRateLimit.remove_from_build_req_queue') + @mock.patch('stevedore.driver.DriverManager.driver') + @mock.patch('time.sleep') + def test_compute_wait(self, + mock_time_sleep, + mock_driver, + mock_remove_from_build_queue): + + self.conf.config(group='haproxy_amphora', build_rate_limit=5) + _db_amphora_mock.compute_id = COMPUTE_ID + _db_amphora_mock.status = constants.ACTIVE + _db_amphora_mock.lb_network_ip = LB_NET_IP + + mock_driver.get_amphora.return_value = _db_amphora_mock, None + + computewait = compute_tasks.ComputeWait() + + # Test with no AZ + computewait.execute(COMPUTE_ID, AMPHORA_ID, None) + mock_driver.get_amphora.assert_called_once_with(COMPUTE_ID, None) + + # Test with AZ + mock_driver.reset_mock() + az = {constants.MANAGEMENT_NETWORK: uuidutils.generate_uuid()} + computewait.execute(COMPUTE_ID, AMPHORA_ID, az) + mock_driver.get_amphora.assert_called_once_with( + COMPUTE_ID, az[constants.MANAGEMENT_NETWORK]) + + # Test with deleted amp + _db_amphora_mock.status = constants.DELETED + self.assertRaises(exceptions.ComputeWaitTimeoutException, + computewait.execute, + _amphora_mock, AMPHORA_ID, None) + + @mock.patch('octavia.controller.worker.amphora_rate_limit' + '.AmphoraBuildRateLimit.remove_from_build_req_queue') + @mock.patch('stevedore.driver.DriverManager.driver') + @mock.patch('time.sleep') + def test_compute_wait_error_status(self, + mock_time_sleep, + mock_driver, + mock_remove_from_build_queue): + + self.conf.config(group='haproxy_amphora', build_rate_limit=5) + _db_amphora_mock.compute_id = COMPUTE_ID + _db_amphora_mock.status = constants.ACTIVE + _db_amphora_mock.lb_network_ip = LB_NET_IP + + mock_driver.get_amphora.return_value = _db_amphora_mock, None + + computewait = compute_tasks.ComputeWait() + computewait.execute(COMPUTE_ID, AMPHORA_ID, None) + + mock_driver.get_amphora.assert_called_once_with(COMPUTE_ID, None) + + _db_amphora_mock.status = constants.ERROR + + self.assertRaises(exceptions.ComputeBuildException, + computewait.execute, + _db_amphora_mock, AMPHORA_ID, None) + + @mock.patch('octavia.controller.worker.amphora_rate_limit' + '.AmphoraBuildRateLimit.remove_from_build_req_queue') + @mock.patch('stevedore.driver.DriverManager.driver') + @mock.patch('time.sleep') + def test_compute_wait_skipped(self, + mock_time_sleep, + mock_driver, + mock_remove_from_build_queue): + _db_amphora_mock.compute_id = COMPUTE_ID + _db_amphora_mock.status = constants.ACTIVE + _db_amphora_mock.lb_network_ip = LB_NET_IP + + mock_driver.get_amphora.return_value = _db_amphora_mock, None + + computewait = compute_tasks.ComputeWait() + computewait.execute(COMPUTE_ID, AMPHORA_ID, None) + + mock_driver.get_amphora.assert_called_once_with(COMPUTE_ID, None) + mock_remove_from_build_queue.assert_not_called() + + @mock.patch('stevedore.driver.DriverManager.driver') + @mock.patch('octavia.db.api.get_session', return_value=mock.MagicMock()) + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + def test_delete_amphorae_on_load_balancer(self, mock_lb_get, mock_session, + mock_driver): + + mock_driver.delete.side_effect = [mock.DEFAULT, + exceptions.OctaviaException('boom')] + delete_amps = compute_tasks.DeleteAmphoraeOnLoadBalancer() + + mock_lb_get.return_value = _db_load_balancer_mock + delete_amps.execute(_load_balancer_mock) + + mock_driver.delete.assert_called_once_with(COMPUTE_ID) + + # Test compute driver exception is raised + self.assertRaises(exceptions.OctaviaException, delete_amps.execute, + _load_balancer_mock) + + @mock.patch('stevedore.driver.DriverManager.driver') + def test_compute_delete(self, mock_driver): + mock_driver.delete.side_effect = [ + mock.DEFAULT, exceptions.OctaviaException('boom'), + mock.DEFAULT, exceptions.OctaviaException('boom'), + exceptions.OctaviaException('boom'), + exceptions.OctaviaException('boom'), + exceptions.OctaviaException('boom')] + + delete_compute = compute_tasks.ComputeDelete() + + # Limit the retry attempts for the test run to save time + delete_compute.execute.retry.stop = tenacity.stop_after_attempt(2) + + delete_compute.execute(_amphora_mock) + + mock_driver.delete.assert_called_once_with(COMPUTE_ID) + + # Test retry after a compute exception + mock_driver.reset_mock() + delete_compute.execute(_amphora_mock) + mock_driver.delete.assert_has_calls([mock.call(COMPUTE_ID), + mock.call(COMPUTE_ID)]) + + # Test passive failure + mock_driver.reset_mock() + delete_compute.execute(_amphora_mock, passive_failure=True) + mock_driver.delete.assert_has_calls([mock.call(COMPUTE_ID), + mock.call(COMPUTE_ID)]) + + # Test non-passive failure + mock_driver.reset_mock() + self.assertRaises(exceptions.OctaviaException, delete_compute.execute, + _amphora_mock, passive_failure=False) + + @mock.patch('stevedore.driver.DriverManager.driver') + def test_nova_server_group_create(self, mock_driver): + nova_sever_group_obj = compute_tasks.NovaServerGroupCreate() + + server_group_test_id = '6789' + fake_server_group = mock.MagicMock() + fake_server_group.id = server_group_test_id + fake_server_group.policy = 'anti-affinity' + mock_driver.create_server_group.return_value = fake_server_group + + # Test execute() + sg_id = nova_sever_group_obj.execute('123') + + # Validate that the build method was called properly + mock_driver.create_server_group.assert_called_once_with( + 'octavia-lb-123', 'anti-affinity') + + # Make sure it returns the expected server group_id + self.assertEqual(server_group_test_id, sg_id) + + # Test revert() + nova_sever_group_obj.revert(sg_id) + + # Validate that the delete_server_group method was called properly + mock_driver.delete_server_group.assert_called_once_with(sg_id) + + # Test revert with exception + mock_driver.reset_mock() + mock_driver.delete_server_group.side_effect = Exception('DelSGExcept') + nova_sever_group_obj.revert(sg_id) + mock_driver.delete_server_group.assert_called_once_with(sg_id) + + @mock.patch('stevedore.driver.DriverManager.driver') + def test_nova_server_group_delete_with_sever_group_id(self, mock_driver): + nova_sever_group_obj = compute_tasks.NovaServerGroupDelete() + sg_id = '6789' + nova_sever_group_obj.execute(sg_id) + mock_driver.delete_server_group.assert_called_once_with(sg_id) + + @mock.patch('stevedore.driver.DriverManager.driver') + def test_nova_server_group_delete_with_None(self, mock_driver): + nova_sever_group_obj = compute_tasks.NovaServerGroupDelete() + sg_id = None + nova_sever_group_obj.execute(sg_id) + self.assertFalse(mock_driver.delete_server_group.called, sg_id) + + @mock.patch('stevedore.driver.DriverManager.driver') + def test_attach_port(self, mock_driver): + COMPUTE_ID = uuidutils.generate_uuid() + PORT_ID = uuidutils.generate_uuid() + amphora_dict = {constants.COMPUTE_ID: COMPUTE_ID} + port_dict = {constants.ID: PORT_ID} + + attach_port_obj = compute_tasks.AttachPort() + + # Test execute + attach_port_obj.execute(amphora_dict, port_dict) + + mock_driver.attach_network_or_port.assert_called_once_with( + COMPUTE_ID, port_id=PORT_ID) + + # Test revert + mock_driver.reset_mock() + + attach_port_obj.revert(amphora_dict, port_dict) + + mock_driver.detach_port.assert_called_once_with(COMPUTE_ID, PORT_ID) + + # Test rever exception + mock_driver.reset_mock() + mock_driver.detach_port.side_effect = [Exception('boom')] + + # should not raise + attach_port_obj.revert(amphora_dict, port_dict) diff --git a/octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks.py b/octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks.py new file mode 100644 index 0000000000..5a2cbbf2ac --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks.py @@ -0,0 +1,3061 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +import copy +import random +from unittest import mock + +from oslo_db import exception as odb_exceptions +from oslo_utils import uuidutils +from sqlalchemy.orm import exc +from taskflow.types import failure + +from octavia.api.drivers import utils as provider_utils +from octavia.common import constants +from octavia.common import data_models +from octavia.common import exceptions +from octavia.common import utils +from octavia.controller.worker.v2.tasks import database_tasks +from octavia.db import repositories as repo +import octavia.tests.unit.base as base + + +AMP_ID = uuidutils.generate_uuid() +AMP2_ID = uuidutils.generate_uuid() +COMPUTE_ID = uuidutils.generate_uuid() +LB_ID = uuidutils.generate_uuid() +SERVER_GROUP_ID = uuidutils.generate_uuid() +LB_NET_IP = '192.0.2.2' +LISTENER_ID = uuidutils.generate_uuid() +POOL_ID = uuidutils.generate_uuid() +HM_ID = uuidutils.generate_uuid() +MEMBER_ID = uuidutils.generate_uuid() +PORT_ID = uuidutils.generate_uuid() +PORT_ID2 = uuidutils.generate_uuid() +PORT_ID3 = uuidutils.generate_uuid() +SUBNET_ID = uuidutils.generate_uuid() +SUBNET_ID2 = uuidutils.generate_uuid() +SUBNET_ID3 = uuidutils.generate_uuid() +VRRP_PORT_ID = uuidutils.generate_uuid() +HA_PORT_ID = uuidutils.generate_uuid() +L7POLICY_ID = uuidutils.generate_uuid() +L7RULE_ID = uuidutils.generate_uuid() +VIP_IP = '192.0.5.2' +VIP_IP2 = '192.0.5.5' +VIP_IP3 = '192.0.5.6' +VRRP_ID = 1 +VRRP_IP = '192.0.5.3' +HA_IP = '192.0.5.4' +AMP_ROLE = 'FAKE_ROLE' +VRRP_PRIORITY = random.randrange(100) +CACHED_ZONE = 'zone1' +IMAGE_ID = uuidutils.generate_uuid() +COMPUTE_FLAVOR = uuidutils.generate_uuid() + +_db_amphora_mock = mock.MagicMock() +_db_amphora_mock.id = AMP_ID +_db_amphora_mock.compute_id = COMPUTE_ID +_db_amphora_mock.lb_network_ip = LB_NET_IP +_db_amphora_mock.vrrp_ip = VRRP_IP +_db_amphora_mock.ha_ip = HA_IP +_db_amphora_mock.ha_port_id = HA_PORT_ID +_db_amphora_mock.vrrp_port_id = VRRP_PORT_ID +_db_amphora_mock.role = AMP_ROLE +_db_amphora_mock.vrrp_id = VRRP_ID +_db_amphora_mock.vrrp_priority = VRRP_PRIORITY +_db_loadbalancer_mock = mock.MagicMock() +_db_loadbalancer_mock.id = LB_ID +_db_loadbalancer_mock.vip_address = VIP_IP +_db_loadbalancer_mock.amphorae = [_db_amphora_mock] +_db_loadbalancer_mock.to_dict.return_value = { + constants.ID: LB_ID +} +_l7policy_mock = mock.MagicMock() +_l7policy_mock.id = L7POLICY_ID +_l7rule_mock = mock.MagicMock() +_l7rule_mock.id = L7RULE_ID +_listener_mock = mock.MagicMock() +_listener_to_dict_mock = mock.MagicMock( + return_value={constants.ID: LISTENER_ID}) +_listener_mock.id = LISTENER_ID +_listener_mock.to_dict = _listener_to_dict_mock +_tf_failure_mock = mock.Mock(spec=failure.Failure) +_vip_mock = mock.MagicMock() +_vip_mock.port_id = PORT_ID +_vip_mock.subnet_id = SUBNET_ID +_vip_mock.ip_address = VIP_IP +_vip_mock.to_dict.return_value = { + constants.PORT_ID: PORT_ID, + constants.SUBNET_ID: SUBNET_ID, + constants.IP_ADDRESS: VIP_IP, +} +_vrrp_group_mock = mock.MagicMock() +_cert_mock = mock.MagicMock() +_compute_mock_dict = { + constants.LB_NETWORK_IP: LB_NET_IP, + constants.CACHED_ZONE: CACHED_ZONE, + constants.IMAGE_ID: IMAGE_ID, + constants.COMPUTE_FLAVOR: COMPUTE_FLAVOR +} + + +@mock.patch('octavia.db.repositories.AmphoraRepository.delete') +@mock.patch('octavia.db.repositories.AmphoraRepository.update') +@mock.patch('octavia.db.repositories.ListenerRepository.update') +@mock.patch('octavia.db.repositories.LoadBalancerRepository.update') +@mock.patch('octavia.db.api.session') +@mock.patch('octavia.controller.worker.v2.tasks.database_tasks.LOG') +@mock.patch('oslo_utils.uuidutils.generate_uuid', return_value=AMP_ID) +class TestDatabaseTasks(base.TestCase): + + def setUp(self): + + self.db_health_mon_mock = mock.MagicMock() + self.db_health_mon_mock.id = HM_ID + self.db_health_mon_mock.pool_id = POOL_ID + + self.health_mon_mock = { + constants.HEALTHMONITOR_ID: HM_ID, + constants.POOL_ID: POOL_ID, + constants.ADMIN_STATE_UP: True, + } + + self.listener_mock = mock.MagicMock() + self.listener_mock.id = LISTENER_ID + + self.loadbalancer_mock = ( + provider_utils.db_loadbalancer_to_provider_loadbalancer( + _db_loadbalancer_mock).to_dict()) + + self.member_mock = mock.MagicMock() + self.member_mock.id = MEMBER_ID + + self.db_pool_mock = mock.MagicMock() + self.db_pool_mock.id = POOL_ID + self.db_pool_mock.health_monitor = self.db_health_mon_mock + self.db_health_mon_mock.to_dict.return_value = { + constants.ID: HM_ID, + constants.POOL_ID: POOL_ID, + } + + self.member_mock = { + constants.MEMBER_ID: MEMBER_ID, + constants.POOL_ID: POOL_ID, + } + + self.l7policy_mock = { + constants.L7POLICY_ID: L7POLICY_ID, + constants.ADMIN_STATE_UP: True, + } + + self.l7rule_mock = { + constants.L7RULE_ID: L7RULE_ID, + constants.ADMIN_STATE_UP: True, + constants.L7POLICY_ID: L7POLICY_ID, + } + + self.amphora = { + constants.ID: AMP_ID, + constants.COMPUTE_ID: COMPUTE_ID, + constants.LB_NETWORK_IP: LB_NET_IP, + constants.VRRP_IP: VRRP_IP, + constants.HA_IP: HA_IP, + constants.HA_PORT_ID: HA_PORT_ID, + constants.VRRP_PORT_ID: VRRP_PORT_ID, + constants.ROLE: AMP_ROLE, + constants.VRRP_ID: VRRP_ID, + constants.VRRP_PRIORITY: VRRP_PRIORITY, + } + _db_amphora_mock.to_dict.return_value = self.amphora + + super().setUp() + + @mock.patch('octavia.db.repositories.AmphoraRepository.create', + return_value=_db_amphora_mock) + @mock.patch('octavia.db.repositories.AmphoraHealthRepository.delete') + def test_create_amphora_in_db(self, + mock_amphora_health_repo_delete, + mock_create, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + create_amp_in_db = database_tasks.CreateAmphoraInDB() + amp_id = create_amp_in_db.execute() + + mock_session = mock_get_session().begin().__enter__() + + repo.AmphoraRepository.create.assert_called_once_with( + mock_session, + id=AMP_ID, + load_balancer_id=None, + status=constants.PENDING_CREATE, + cert_busy=False) + + self.assertEqual(_db_amphora_mock.id, amp_id) + + # Test the revert + create_amp_in_db.revert(_tf_failure_mock) + mock_amphora_repo_delete.assert_not_called() + mock_amphora_health_repo_delete.assert_not_called() + + amp_id = 'AMP' + mock_amphora_repo_delete.reset_mock() + mock_amphora_health_repo_delete.reset_mock() + create_amp_in_db.revert(result=amp_id) + self.assertTrue(mock_amphora_repo_delete.called) + self.assertTrue(mock_amphora_health_repo_delete.called) + mock_amphora_repo_delete.assert_called_once_with( + mock_session, + id=amp_id) + mock_amphora_health_repo_delete.assert_called_once_with( + mock_session, + amphora_id=amp_id) + mock_LOG.error.assert_not_called() + mock_LOG.debug.assert_not_called() + + # Test revert with exception + mock_amphora_repo_delete.reset_mock() + mock_amphora_health_repo_delete.reset_mock() + err1_msg, err2_msg = ('fail', 'fail2') + mock_amphora_repo_delete.side_effect = Exception(err1_msg) + mock_amphora_health_repo_delete.side_effect = Exception(err2_msg) + create_amp_in_db.revert(result=amp_id) + self.assertTrue(mock_amphora_repo_delete.called) + self.assertTrue(mock_amphora_health_repo_delete.called) + mock_amphora_repo_delete.assert_called_once_with( + mock_session, + id=amp_id) + mock_amphora_health_repo_delete.assert_called_once_with( + mock_session, + amphora_id=amp_id) + mock_LOG.error.assert_called_once_with( + "Failed to delete amphora %(amp)s " + "in the database due to: " + "%(except)s", {'amp': amp_id, 'except': err1_msg}) + + @mock.patch('octavia.db.repositories.ListenerRepository.delete') + def test_delete_listener_in_db(self, + mock_listener_repo_delete, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + delete_listener = database_tasks.DeleteListenerInDB() + delete_listener.execute({constants.LISTENER_ID: LISTENER_ID}) + + mock_session = mock_get_session().begin().__enter__() + + repo.ListenerRepository.delete.assert_called_once_with( + mock_session, + id=LISTENER_ID) + + # Test the revert + repo.ListenerRepository.delete.reset_mock() + delete_listener.revert({constants.LISTENER_ID: LISTENER_ID}) + repo.ListenerRepository.delete.assert_not_called() + + @mock.patch('octavia.db.repositories.HealthMonitorRepository.update') + @mock.patch('octavia.db.repositories.HealthMonitorRepository.delete') + def test_delete_health_monitor_in_db(self, + mock_health_mon_repo_delete, + mock_health_mon_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + delete_health_mon = database_tasks.DeleteHealthMonitorInDB() + delete_health_mon.execute(self.health_mon_mock) + + mock_session = mock_get_session().begin().__enter__() + + repo.HealthMonitorRepository.delete.assert_called_once_with( + mock_session, id=HM_ID) + + # Test the revert + mock_health_mon_repo_delete.reset_mock() + delete_health_mon.revert(self.health_mon_mock) + + repo.HealthMonitorRepository.update.assert_called_once_with( + mock_session, id=HM_ID, provisioning_status=constants.ERROR) + + # Test Not Found Exception + mock_health_mon_repo_delete.reset_mock() + mock_health_mon_repo_delete.side_effect = [exc.NoResultFound()] + delete_health_mon.execute(self.health_mon_mock) + + repo.HealthMonitorRepository.delete.assert_called_once_with( + mock_session, id=HM_ID) + + @mock.patch('octavia.db.repositories.HealthMonitorRepository.update') + @mock.patch('octavia.db.repositories.HealthMonitorRepository.delete') + @mock.patch('octavia.db.repositories.PoolRepository.get') + def test_delete_health_monitor_in_db_by_pool(self, + mock_pool_repo_get, + mock_health_mon_repo_delete, + mock_health_mon_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + mock_pool_repo_get.return_value = self.db_pool_mock + delete_health_mon = database_tasks.DeleteHealthMonitorInDBByPool() + delete_health_mon.execute(POOL_ID) + + mock_session = mock_get_session().begin().__enter__() + + repo.HealthMonitorRepository.delete.assert_called_once_with( + mock_session, + id=HM_ID) + + # Test the revert + mock_health_mon_repo_delete.reset_mock() + delete_health_mon.revert(POOL_ID) + + repo.HealthMonitorRepository.update.assert_called_once_with( + mock_session, id=HM_ID, provisioning_status=constants.ERROR) + +# TODO(johnsom) fix once provisioning status added +# repo.HealthMonitorRepository.update.assert_called_once_with( +# mock_session, +# POOL_ID, +# provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.MemberRepository.delete') + def test_delete_member_in_db(self, + mock_member_repo_delete, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + delete_member = database_tasks.DeleteMemberInDB() + delete_member.execute(self.member_mock) + + mock_session = mock_get_session().begin().__enter__() + + repo.MemberRepository.delete.assert_called_once_with( + mock_session, + id=MEMBER_ID) + + # Test the revert + + mock_member_repo_delete.reset_mock() + delete_member.revert(self.member_mock) + +# TODO(johnsom) Fix +# repo.MemberRepository.delete.assert_called_once_with( +# mock_session, +# MEMBER_ID) + + @mock.patch('octavia.db.repositories.PoolRepository.delete') + def test_delete_pool_in_db(self, + mock_pool_repo_delete, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + delete_pool = database_tasks.DeletePoolInDB() + delete_pool.execute(POOL_ID) + + mock_session = mock_get_session().begin().__enter__() + + repo.PoolRepository.delete.assert_called_once_with( + mock_session, + id=POOL_ID) + + # Test the revert + + mock_pool_repo_delete.reset_mock() + delete_pool.revert(POOL_ID) + +# TODO(johnsom) Fix +# repo.PoolRepository.update.assert_called_once_with( +# mock_session, +# POOL_ID, +# operating_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.L7PolicyRepository.delete') + def test_delete_l7policy_in_db(self, + mock_l7policy_repo_delete, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + delete_l7policy = database_tasks.DeleteL7PolicyInDB() + delete_l7policy.execute(self.l7policy_mock) + + mock_session = mock_get_session().begin().__enter__() + + repo.L7PolicyRepository.delete.assert_called_once_with( + mock_session, + id=L7POLICY_ID) + + # Test the revert + + mock_l7policy_repo_delete.reset_mock() + delete_l7policy.revert(self.l7policy_mock) + +# TODO(sbalukoff) Fix +# repo.ListenerRepository.update.assert_called_once_with( +# mock_session, +# LISTENER_ID, +# operating_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.L7RuleRepository.delete') + def test_delete_l7rule_in_db(self, + mock_l7rule_repo_delete, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + delete_l7rule = database_tasks.DeleteL7RuleInDB() + delete_l7rule.execute(self.l7rule_mock) + + mock_session = mock_get_session().begin().__enter__() + + repo.L7RuleRepository.delete.assert_called_once_with( + mock_session, + id=L7RULE_ID) + + # Test the revert + + mock_l7rule_repo_delete.reset_mock() + delete_l7rule.revert(self.l7rule_mock) + +# TODO(sbalukoff) Fix +# repo.ListenerRepository.update.assert_called_once_with( +# mock_session, +# LISTENER_ID, +# operating_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.AmphoraRepository.get', + return_value=_db_amphora_mock) + def test_reload_amphora(self, + mock_amp_get, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + reload_amp = database_tasks.ReloadAmphora() + amp = reload_amp.execute(self.amphora) + + mock_session = mock_get_session().begin().__enter__() + + repo.AmphoraRepository.get.assert_called_once_with( + mock_session, + id=AMP_ID) + + self.assertEqual(_db_amphora_mock.to_dict(), amp) + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get', + return_value=_db_loadbalancer_mock) + def test_reload_load_balancer(self, + mock_lb_get, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + reload_lb = database_tasks.ReloadLoadBalancer() + lb = reload_lb.execute(LB_ID) + + mock_session = mock_get_session().begin().__enter__() + + repo.LoadBalancerRepository.get.assert_called_once_with( + mock_session, + id=LB_ID) + + self.assertEqual(self.loadbalancer_mock, lb) + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get', + return_value=_db_loadbalancer_mock) + @mock.patch('octavia.db.repositories.VipRepository.update') + def test_update_vip_after_allocation(self, + mock_vip_update, + mock_loadbalancer_get, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + update_vip = database_tasks.UpdateVIPAfterAllocation() + loadbalancer = update_vip.execute(LB_ID, _vip_mock.to_dict()) + + mock_session = mock_get_session().begin().__enter__() + + self.assertEqual(self.loadbalancer_mock, loadbalancer) + mock_vip_update.assert_called_once_with(mock_session, + LB_ID, + port_id=PORT_ID, + subnet_id=SUBNET_ID, + ip_address=VIP_IP) + mock_loadbalancer_get.assert_called_once_with(mock_session, + id=LB_ID) + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get', + return_value=_db_loadbalancer_mock) + @mock.patch('octavia.db.repositories.AdditionalVipRepository.update') + def test_update_additional_vips_after_allocation( + self, + mock_additional_vip_update, + mock_loadbalancer_get, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + additional_vip1_dict = { + "subnet_id": SUBNET_ID2, + "ip_address": VIP_IP2, + "port_id": PORT_ID2 + } + additional_vip2_dict = { + "subnet_id": SUBNET_ID3, + "ip_address": VIP_IP3, + "port_id": PORT_ID3 + } + + mock_session = mock_get_session().begin().__enter__() + + update_additional_vips = ( + database_tasks.UpdateAdditionalVIPsAfterAllocation()) + update_additional_vips.execute( + LB_ID, [additional_vip1_dict, additional_vip2_dict]) + mock_additional_vip_update.assert_any_call( + mock_session, LB_ID, SUBNET_ID2, ip_address=VIP_IP2, + port_id=PORT_ID2) + mock_additional_vip_update.assert_any_call( + mock_session, LB_ID, SUBNET_ID3, ip_address=VIP_IP3, + port_id=PORT_ID3) + + def test_update_amphora_vip_data(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + update_amp_vip_data = database_tasks.UpdateAmphoraeVIPData() + update_amp_vip_data.execute([self.amphora]) + + mock_session = mock_get_session().begin().__enter__() + + mock_amphora_repo_update.assert_called_once_with( + mock_session, + AMP_ID, + vrrp_ip=VRRP_IP, + ha_ip=HA_IP, + vrrp_port_id=VRRP_PORT_ID, + ha_port_id=HA_PORT_ID, + vrrp_id=1) + + def test_update_amphora_vip_data2(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + update_amp_vip_data2 = database_tasks.UpdateAmphoraVIPData() + update_amp_vip_data2.execute(self.amphora) + + mock_session = mock_get_session().begin().__enter__() + + mock_amphora_repo_update.assert_called_once_with( + mock_session, + AMP_ID, + vrrp_ip=VRRP_IP, + ha_ip=HA_IP, + vrrp_port_id=VRRP_PORT_ID, + ha_port_id=HA_PORT_ID, + vrrp_id=1) + + def test_update_amp_failover_details(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + amphora_dict = {constants.ID: AMP_ID} + vip_dict = {constants.IP_ADDRESS: HA_IP, + constants.PORT_ID: HA_PORT_ID} + fixed_ips = [{constants.IP_ADDRESS: VRRP_IP}] + base_port_dict = {constants.ID: VRRP_PORT_ID, + constants.FIXED_IPS: fixed_ips} + + update_amp_fo_details = database_tasks.UpdateAmpFailoverDetails() + update_amp_fo_details.execute(amphora_dict, vip_dict, base_port_dict) + + mock_session = mock_get_session().begin().__enter__() + + mock_amphora_repo_update.assert_called_once_with( + mock_session, + AMP_ID, + vrrp_ip=VRRP_IP, + ha_ip=HA_IP, + vrrp_port_id=VRRP_PORT_ID, + ha_port_id=HA_PORT_ID, + vrrp_id=VRRP_ID) + + @mock.patch('octavia.db.repositories.AmphoraRepository.associate') + def test_associate_failover_amphora_with_lb_id( + self, + mock_associate, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + assoc_fo_amp_lb_id = database_tasks.AssociateFailoverAmphoraWithLBID() + assoc_fo_amp_lb_id.execute(AMP_ID, LB_ID) + + mock_session = mock_get_session().begin().__enter__() + + mock_associate.assert_called_once_with(mock_session, + load_balancer_id=LB_ID, + amphora_id=AMP_ID) + + # Test revert + assoc_fo_amp_lb_id.revert(AMP_ID) + + mock_amphora_repo_update.assert_called_once_with(mock_session, + AMP_ID, + loadbalancer_id=None) + + # Test revert with exception + mock_amphora_repo_update.reset_mock() + mock_amphora_repo_update.side_effect = Exception('fail') + + assoc_fo_amp_lb_id.revert(AMP_ID) + + mock_amphora_repo_update.assert_called_once_with(mock_session, + AMP_ID, + loadbalancer_id=None) + + @mock.patch('octavia.db.repositories.AmphoraRepository.get', + return_value=_db_amphora_mock) + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get', + return_value=_db_loadbalancer_mock) + def test_mark_lb_amphorae_deleted_in_db(self, + mock_loadbalancer_repo_get, + mock_amphora_repo_get, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_amp_deleted_in_db = (database_tasks. + MarkLBAmphoraeDeletedInDB()) + mark_amp_deleted_in_db.execute(self.loadbalancer_mock) + + mock_session = mock_get_session().begin().__enter__() + + repo.AmphoraRepository.update.assert_called_once_with( + mock_session, + id=AMP_ID, + status=constants.DELETED) + + @mock.patch('octavia.db.repositories.AmphoraRepository.get', + return_value=_db_amphora_mock) + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get', + return_value=_db_loadbalancer_mock) + def test_mark_amphora_allocated_in_db(self, + mock_loadbalancer_repo_get, + mock_amphora_repo_get, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_amp_allocated_in_db = (database_tasks. + MarkAmphoraAllocatedInDB()) + mark_amp_allocated_in_db.execute(self.amphora, + LB_ID) + + mock_session = mock_get_session().begin().__enter__() + + repo.AmphoraRepository.update.assert_called_once_with( + mock_session, + AMP_ID, + status=constants.AMPHORA_ALLOCATED, + compute_id=COMPUTE_ID, + lb_network_ip=LB_NET_IP, + load_balancer_id=LB_ID) + + # Test the revert + + mock_amphora_repo_update.reset_mock() + mark_amp_allocated_in_db.revert(None, self.amphora, + LB_ID) + + repo.AmphoraRepository.update.assert_called_once_with( + mock_session, + id=AMP_ID, + status=constants.ERROR) + + # Test the revert with exception + + mock_amphora_repo_update.reset_mock() + mock_amphora_repo_update.side_effect = Exception('fail') + mark_amp_allocated_in_db.revert(None, self.amphora, + LB_ID) + + repo.AmphoraRepository.update.assert_called_once_with( + mock_session, + id=AMP_ID, + status=constants.ERROR) + + def test_mark_amphora_booting_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_amp_booting_in_db = database_tasks.MarkAmphoraBootingInDB() + mark_amp_booting_in_db.execute(_db_amphora_mock.id, + _db_amphora_mock.compute_id) + + mock_session = mock_get_session().begin().__enter__() + + repo.AmphoraRepository.update.assert_called_once_with( + mock_session, + AMP_ID, + status=constants.AMPHORA_BOOTING, + compute_id=COMPUTE_ID) + + # Test the revert + + mock_amphora_repo_update.reset_mock() + mark_amp_booting_in_db.revert(None, _db_amphora_mock.id, + _db_amphora_mock.compute_id) + + repo.AmphoraRepository.update.assert_called_once_with( + mock_session, + AMP_ID, + status=constants.ERROR, + compute_id=COMPUTE_ID) + + # Test the revert with exception + + mock_amphora_repo_update.reset_mock() + mock_amphora_repo_update.side_effect = Exception('fail') + mark_amp_booting_in_db.revert(None, _db_amphora_mock.id, + _db_amphora_mock.compute_id) + + repo.AmphoraRepository.update.assert_called_once_with( + mock_session, + AMP_ID, + status=constants.ERROR, + compute_id=COMPUTE_ID) + + def test_mark_amphora_deleted_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_amp_deleted_in_db = database_tasks.MarkAmphoraDeletedInDB() + mark_amp_deleted_in_db.execute(self.amphora) + + mock_session = mock_get_session().begin().__enter__() + + repo.AmphoraRepository.update.assert_called_once_with( + mock_session, + AMP_ID, + status=constants.DELETED) + + # Test the revert + mock_amphora_repo_update.reset_mock() + mark_amp_deleted_in_db.revert(self.amphora) + + repo.AmphoraRepository.update.assert_called_once_with( + mock_session, + id=AMP_ID, + status=constants.ERROR) + + # Test the revert with exception + mock_amphora_repo_update.reset_mock() + mock_amphora_repo_update.side_effect = Exception('fail') + mark_amp_deleted_in_db.revert(self.amphora) + + repo.AmphoraRepository.update.assert_called_once_with( + mock_session, + id=AMP_ID, + status=constants.ERROR) + + def test_mark_amphora_pending_delete_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_amp_pending_delete_in_db = (database_tasks. + MarkAmphoraPendingDeleteInDB()) + mark_amp_pending_delete_in_db.execute(self.amphora) + + mock_session = mock_get_session().begin().__enter__() + + repo.AmphoraRepository.update.assert_called_once_with( + mock_session, + AMP_ID, + status=constants.PENDING_DELETE) + + # Test the revert + mock_amphora_repo_update.reset_mock() + mark_amp_pending_delete_in_db.revert(self.amphora) + + repo.AmphoraRepository.update.assert_called_once_with( + mock_session, + id=AMP_ID, + status=constants.ERROR) + + # Test the revert with exception + mock_amphora_repo_update.reset_mock() + mock_amphora_repo_update.side_effect = Exception('fail') + + mark_amp_pending_delete_in_db.revert(self.amphora) + + repo.AmphoraRepository.update.assert_called_once_with( + mock_session, + id=AMP_ID, + status=constants.ERROR) + + def test_mark_amphora_pending_update_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_amp_pending_update_in_db = (database_tasks. + MarkAmphoraPendingUpdateInDB()) + mark_amp_pending_update_in_db.execute(self.amphora) + + mock_session = mock_get_session().begin().__enter__() + + repo.AmphoraRepository.update.assert_called_once_with( + mock_session, + AMP_ID, + status=constants.PENDING_UPDATE) + + # Test the revert + mock_amphora_repo_update.reset_mock() + mark_amp_pending_update_in_db.revert(self.amphora) + + repo.AmphoraRepository.update.assert_called_once_with( + mock_session, + id=AMP_ID, + status=constants.ERROR) + + # Test the revert with exception + mock_amphora_repo_update.reset_mock() + mock_amphora_repo_update.side_effect = Exception('fail') + mark_amp_pending_update_in_db.revert(self.amphora) + + repo.AmphoraRepository.update.assert_called_once_with( + mock_session, + id=AMP_ID, + status=constants.ERROR) + + @mock.patch('octavia.db.repositories.AmphoraRepository.get') + def test_update_amphora_info(self, + mock_amphora_repo_get, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + update_amphora_info = database_tasks.UpdateAmphoraInfo() + update_amphora_info.execute(AMP_ID, _compute_mock_dict) + + mock_session = mock_get_session().begin().__enter__() + + repo.AmphoraRepository.update.assert_called_once_with( + mock_session, + AMP_ID, + lb_network_ip=LB_NET_IP, + cached_zone=CACHED_ZONE, + image_id=IMAGE_ID, + compute_flavor=COMPUTE_FLAVOR) + + repo.AmphoraRepository.get.assert_called_once_with( + mock_session, + id=AMP_ID) + + def test_mark_listener_deleted_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_listener_deleted = database_tasks.MarkListenerDeletedInDB() + mark_listener_deleted.execute(self.listener_mock) + + mock_session = mock_get_session().begin().__enter__() + + repo.ListenerRepository.update.assert_called_once_with( + mock_session, + LISTENER_ID, + provisioning_status=constants.DELETED) + + # Test the revert + mock_listener_repo_update.reset_mock() + mark_listener_deleted.revert(self.listener_mock) + + repo.ListenerRepository.update.assert_called_once_with( + mock_session, + id=LISTENER_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_listener_repo_update.reset_mock() + mock_listener_repo_update.side_effect = Exception('fail') + mark_listener_deleted.revert(self.listener_mock) + + repo.ListenerRepository.update.assert_called_once_with( + mock_session, + id=LISTENER_ID, + provisioning_status=constants.ERROR) + + def test_mark_listener_pending_deleted_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_listener_pending_delete = (database_tasks. + MarkListenerPendingDeleteInDB()) + mark_listener_pending_delete.execute(self.listener_mock) + + mock_session = mock_get_session().begin().__enter__() + + repo.ListenerRepository.update.assert_called_once_with( + mock_session, + LISTENER_ID, + provisioning_status=constants.PENDING_DELETE) + + # Test the revert + mock_listener_repo_update.reset_mock() + mark_listener_pending_delete.revert(self.listener_mock) + + repo.ListenerRepository.update.assert_called_once_with( + mock_session, + id=LISTENER_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_listener_repo_update.reset_mock() + mock_listener_repo_update.side_effect = Exception('fail') + mark_listener_pending_delete.revert(self.listener_mock) + + repo.ListenerRepository.update.assert_called_once_with( + mock_session, + id=LISTENER_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.ListenerRepository.' + 'prov_status_active_if_not_error') + def test_mark_lb_and_listeners_active_in_db(self, + mock_list_not_error, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + listener_dict = {constants.LISTENER_ID: LISTENER_ID, + constants.LOADBALANCER_ID: LB_ID} + mark_lb_and_listeners_active = (database_tasks. + MarkLBAndListenersActiveInDB()) + mark_lb_and_listeners_active.execute(LB_ID, [listener_dict]) + + mock_session = mock_get_session().begin().__enter__() + + mock_list_not_error.assert_called_once_with(mock_session, + LISTENER_ID) + repo.LoadBalancerRepository.update.assert_called_once_with( + mock_session, + LB_ID, + provisioning_status=constants.ACTIVE) + + # Test with LB_ID from listeners + mock_loadbalancer_repo_update.reset_mock() + mock_list_not_error.reset_mock() + + listener_dict = {constants.LISTENER_ID: LISTENER_ID, + constants.LOADBALANCER_ID: LB_ID} + mark_lb_and_listeners_active = (database_tasks. + MarkLBAndListenersActiveInDB()) + mark_lb_and_listeners_active.execute(None, [listener_dict]) + + mock_list_not_error.assert_called_once_with(mock_session, + LISTENER_ID) + repo.LoadBalancerRepository.update.assert_called_once_with( + mock_session, + LB_ID, + provisioning_status=constants.ACTIVE) + + # Test with no LB_ID + mock_loadbalancer_repo_update.reset_mock() + mark_lb_and_listeners_active.execute(None, []) + mock_loadbalancer_repo_update.assert_not_called() + + # Test the revert + mock_loadbalancer_repo_update.reset_mock() + mock_listener_repo_update.reset_mock() + + mark_lb_and_listeners_active.revert(LB_ID, [listener_dict]) + + repo.ListenerRepository.update.assert_called_once_with( + mock_session, + id=LISTENER_ID, + provisioning_status=constants.ERROR) + repo.LoadBalancerRepository.update.assert_not_called() + + # Test the revert LB_ID from listeners + mock_loadbalancer_repo_update.reset_mock() + mock_listener_repo_update.reset_mock() + + mark_lb_and_listeners_active.revert(None, [listener_dict]) + + repo.ListenerRepository.update.assert_called_once_with( + mock_session, + id=LISTENER_ID, + provisioning_status=constants.ERROR) + repo.LoadBalancerRepository.update.assert_not_called() + + # Test the revert no LB_ID + mock_loadbalancer_repo_update.reset_mock() + mock_listener_repo_update.reset_mock() + + mark_lb_and_listeners_active.revert(None, []) + + mock_loadbalancer_repo_update.assert_not_called() + mock_listener_repo_update.assert_not_called() + + # Test the revert with exceptions + mock_loadbalancer_repo_update.reset_mock() + mock_loadbalancer_repo_update.side_effect = Exception('fail') + mock_listener_repo_update.reset_mock() + mock_listener_repo_update.side_effect = Exception('fail') + + mark_lb_and_listeners_active.revert(LB_ID, [listener_dict]) + + repo.ListenerRepository.update.assert_called_once_with( + mock_session, + id=LISTENER_ID, + provisioning_status=constants.ERROR) + repo.LoadBalancerRepository.update.assert_not_called() + + @mock.patch('octavia.common.tls_utils.cert_parser.get_cert_expiration', + return_value=_cert_mock) + def test_update_amphora_db_cert_exp(self, + mock_get_cert_exp, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + update_amp_cert = database_tasks.UpdateAmphoraDBCertExpiration() + fer = utils.get_server_certs_key_passphrases_fernet() + _pem_mock = fer.encrypt( + utils.get_compatible_value('test_cert') + ).decode('utf-8') + update_amp_cert.execute(_db_amphora_mock.id, _pem_mock) + + mock_session = mock_get_session().begin().__enter__() + + repo.AmphoraRepository.update.assert_called_once_with( + mock_session, + AMP_ID, + cert_expiration=_cert_mock) + + def test_update_amphora_cert_busy_to_false(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + amp_cert_busy_to_F = database_tasks.UpdateAmphoraCertBusyToFalse() + amp_cert_busy_to_F.execute(AMP_ID) + mock_session = mock_get_session().begin().__enter__() + + repo.AmphoraRepository.update.assert_called_once_with( + mock_session, + AMP_ID, + cert_busy=False) + + def test_mark_LB_active_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_loadbalancer_active = database_tasks.MarkLBActiveInDB() + mark_loadbalancer_active.execute(self.loadbalancer_mock) + + mock_session = mock_get_session().begin().__enter__() + + repo.LoadBalancerRepository.update.assert_called_once_with( + mock_session, + LB_ID, + provisioning_status=constants.ACTIVE) + self.assertEqual(0, repo.ListenerRepository.update.call_count) + + # Test the revert + mock_loadbalancer_repo_update.reset_mock() + mark_loadbalancer_active.revert(self.loadbalancer_mock) + + repo.LoadBalancerRepository.update.assert_not_called() + self.assertEqual(0, repo.ListenerRepository.update.call_count) + + # Test the revert with exception + mock_loadbalancer_repo_update.reset_mock() + mock_loadbalancer_repo_update.side_effect = Exception('fail') + mark_loadbalancer_active.revert(self.loadbalancer_mock) + + repo.LoadBalancerRepository.update.assert_not_called() + self.assertEqual(0, repo.ListenerRepository.update.call_count) + + def test_mark_LB_active_in_db_by_listener(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + listener_dict = {'loadbalancer_id': LB_ID} + mark_loadbalancer_active = database_tasks.MarkLBActiveInDBByListener() + mark_loadbalancer_active.execute(listener_dict) + + mock_session = mock_get_session().begin().__enter__() + + repo.LoadBalancerRepository.update.assert_called_once_with( + mock_session, + LB_ID, + provisioning_status=constants.ACTIVE) + self.assertEqual(0, repo.ListenerRepository.update.call_count) + + # Test the revert + mock_loadbalancer_repo_update.reset_mock() + mark_loadbalancer_active.revert(listener_dict) + + repo.LoadBalancerRepository.update.assert_not_called() + self.assertEqual(0, repo.ListenerRepository.update.call_count) + + # Test the revert with exception + mock_loadbalancer_repo_update.reset_mock() + mock_loadbalancer_repo_update.side_effect = Exception('fail') + mark_loadbalancer_active.revert(listener_dict) + + repo.LoadBalancerRepository.update.assert_not_called() + self.assertEqual(0, repo.ListenerRepository.update.call_count) + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + def test_mark_LB_active_in_db_and_listeners(self, + mock_lb_get, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + listeners = [data_models.Listener(id='listener1'), + data_models.Listener(id='listener2')] + lb = data_models.LoadBalancer(id=LB_ID, listeners=listeners) + mock_lb_get.return_value = lb + mark_lb_active = database_tasks.MarkLBActiveInDB(mark_subobjects=True) + mark_lb_active.execute(self.loadbalancer_mock) + + mock_session = mock_get_session().begin().__enter__() + + repo.LoadBalancerRepository.update.assert_called_once_with( + mock_session, + lb.id, + provisioning_status=constants.ACTIVE) + self.assertEqual(2, repo.ListenerRepository.update.call_count) + repo.ListenerRepository.update.assert_has_calls( + [mock.call(mock_session, listeners[0].id, + provisioning_status=constants.ACTIVE), + mock.call(mock_session, listeners[1].id, + provisioning_status=constants.ACTIVE)]) + + mock_loadbalancer_repo_update.reset_mock() + mock_listener_repo_update.reset_mock() + mark_lb_active.revert(self.loadbalancer_mock) + + repo.LoadBalancerRepository.update.assert_not_called() + self.assertEqual(2, repo.ListenerRepository.update.call_count) + repo.ListenerRepository.update.assert_has_calls( + [mock.call(mock_session, listeners[0].id, + provisioning_status=constants.ERROR), + mock.call(mock_session, listeners[1].id, + provisioning_status=constants.ERROR)]) + + @mock.patch('octavia.db.repositories.PoolRepository.update') + @mock.patch('octavia.db.repositories.MemberRepository.update') + @mock.patch('octavia.db.repositories.HealthMonitorRepository.update') + @mock.patch('octavia.db.repositories.L7PolicyRepository.update') + @mock.patch('octavia.db.repositories.L7RuleRepository.update') + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + def test_mark_LB_active_in_db_full_graph(self, + mock_lb_repo_get, + mock_l7r_repo_update, + mock_l7p_repo_update, + mock_hm_repo_update, + mock_member_repo_update, + mock_pool_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + unused_pool = data_models.Pool(id='unused_pool') + members1 = [data_models.Member(id='member1'), + data_models.Member(id='member2')] + health_monitor = data_models.HealthMonitor(id='hm1') + default_pool = data_models.Pool(id='default_pool', + members=members1, + health_monitor=health_monitor) + listener1 = data_models.Listener(id='listener1', + default_pool=default_pool) + members2 = [data_models.Member(id='member3'), + data_models.Member(id='member4')] + redirect_pool = data_models.Pool(id='redirect_pool', + members=members2) + l7rules = [data_models.L7Rule(id='rule1')] + redirect_policy = data_models.L7Policy(id='redirect_policy', + redirect_pool=redirect_pool, + l7rules=l7rules) + l7policies = [redirect_policy] + listener2 = data_models.Listener(id='listener2', + l7policies=l7policies) + listener2.l7policies = l7policies + listeners = [listener1, listener2] + pools = [default_pool, redirect_pool, unused_pool] + + lb = data_models.LoadBalancer(id=LB_ID, listeners=listeners, + pools=pools) + mark_lb_active = database_tasks.MarkLBActiveInDB(mark_subobjects=True) + mock_lb_repo_get.return_value = lb + mark_lb_active.execute(self.loadbalancer_mock) + + mock_session = mock_get_session().begin().__enter__() + + repo.LoadBalancerRepository.update.assert_called_once_with( + mock_session, + lb.id, + provisioning_status=constants.ACTIVE) + repo.ListenerRepository.update.assert_has_calls( + [mock.call(mock_session, listeners[0].id, + provisioning_status=constants.ACTIVE), + mock.call(mock_session, listeners[1].id, + provisioning_status=constants.ACTIVE)]) + repo.PoolRepository.update.assert_has_calls( + [mock.call(mock_session, default_pool.id, + provisioning_status=constants.ACTIVE), + mock.call(mock_session, redirect_pool.id, + provisioning_status=constants.ACTIVE), + mock.call(mock_session, unused_pool.id, + provisioning_status=constants.ACTIVE)]) + repo.HealthMonitorRepository.update.assert_has_calls( + [mock.call(mock_session, health_monitor.id, + provisioning_status=constants.ACTIVE)]) + repo.L7PolicyRepository.update.assert_has_calls( + [mock.call(mock_session, l7policies[0].id, + provisioning_status=constants.ACTIVE)]) + self.assertEqual(1, repo.L7RuleRepository.update.call_count) + repo.L7RuleRepository.update.assert_has_calls( + [mock.call(mock_session, l7rules[0].id, + provisioning_status=constants.ACTIVE)]) + + mock_loadbalancer_repo_update.reset_mock() + mock_listener_repo_update.reset_mock() + mock_pool_repo_update.reset_mock() + mock_member_repo_update.reset_mock() + mock_hm_repo_update.reset_mock() + mock_l7p_repo_update.reset_mock() + mock_l7r_repo_update.reset_mock() + mark_lb_active.revert(self.loadbalancer_mock) + + repo.LoadBalancerRepository.update.assert_not_called() + self.assertEqual(2, repo.ListenerRepository.update.call_count) + repo.ListenerRepository.update.assert_has_calls( + [mock.call(mock_session, listeners[0].id, + provisioning_status=constants.ERROR), + mock.call(mock_session, listeners[1].id, + provisioning_status=constants.ERROR)]) + repo.PoolRepository.update.assert_has_calls( + [mock.call(mock_session, default_pool.id, + provisioning_status=constants.ERROR), + mock.call(mock_session, redirect_pool.id, + provisioning_status=constants.ERROR), + mock.call(mock_session, unused_pool.id, + provisioning_status=constants.ERROR) + ]) + self.assertEqual(2, repo.HealthMonitorRepository.update.call_count) + repo.HealthMonitorRepository.update.assert_has_calls( + [mock.call(mock_session, health_monitor.id, + provisioning_status=constants.ERROR)]) + self.assertEqual(1, repo.L7PolicyRepository.update.call_count) + repo.L7PolicyRepository.update.assert_has_calls( + [mock.call(mock_session, l7policies[0].id, + provisioning_status=constants.ERROR)]) + self.assertEqual(1, repo.L7RuleRepository.update.call_count) + repo.L7RuleRepository.update.assert_has_calls( + [mock.call(mock_session, l7rules[0].id, + provisioning_status=constants.ERROR)]) + + def test_mark_LB_deleted_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_loadbalancer_deleted = database_tasks.MarkLBDeletedInDB() + mark_loadbalancer_deleted.execute(self.loadbalancer_mock) + + mock_session = mock_get_session().begin().__enter__() + + repo.LoadBalancerRepository.update.assert_called_once_with( + mock_session, + LB_ID, + provisioning_status=constants.DELETED) + + # Test the revert + mock_loadbalancer_repo_update.reset_mock() + mark_loadbalancer_deleted.revert(self.loadbalancer_mock) + + repo.LoadBalancerRepository.update.assert_not_called() + + # Test the revert with exception + mock_loadbalancer_repo_update.reset_mock() + mock_loadbalancer_repo_update.side_effect = Exception('fail') + mark_loadbalancer_deleted.revert(self.loadbalancer_mock) + + repo.LoadBalancerRepository.update.assert_not_called() + + def test_mark_LB_pending_deleted_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_loadbalancer_pending_delete = (database_tasks. + MarkLBPendingDeleteInDB()) + mark_loadbalancer_pending_delete.execute(self.loadbalancer_mock) + + mock_session = mock_get_session().begin().__enter__() + + repo.LoadBalancerRepository.update.assert_called_once_with( + mock_session, + LB_ID, + provisioning_status=constants.PENDING_DELETE) + + @mock.patch('octavia.db.repositories.HealthMonitorRepository.update') + def test_update_health_monitor_in_db(self, + mock_health_mon_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + update_health_mon = database_tasks.UpdateHealthMonInDB() + update_health_mon.execute(self.health_mon_mock, + {'delay': 1, 'timeout': 2}) + + mock_session = mock_get_session().begin().__enter__() + + repo.HealthMonitorRepository.update.assert_called_once_with( + mock_session, + HM_ID, + delay=1, timeout=2) + + # Test the revert + mock_health_mon_repo_update.reset_mock() + update_health_mon.revert(self.health_mon_mock) + + repo.HealthMonitorRepository.update.assert_called_once_with( + mock_session, + HM_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_health_mon_repo_update.reset_mock() + mock_health_mon_repo_update.side_effect = Exception('fail') + update_health_mon.revert(self.health_mon_mock) + + repo.HealthMonitorRepository.update.assert_called_once_with( + mock_session, + HM_ID, + provisioning_status=constants.ERROR) + + def test_update_load_balancer_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + update_load_balancer = database_tasks.UpdateLoadbalancerInDB() + update_load_balancer.execute(self.loadbalancer_mock, + {'name': 'test', 'description': 'test2'}) + + mock_session = mock_get_session().begin().__enter__() + + repo.LoadBalancerRepository.update.assert_called_once_with( + mock_session, + LB_ID, + name='test', description='test2') + + @mock.patch('octavia.db.repositories.VipRepository.update') + def test_update_vip_in_db_during_update_loadbalancer(self, + mock_vip_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_lb_update, + mock_listener_update, + mock_amphora_update, + mock_amphora_delete): + + _db_loadbalancer_mock.vip.load_balancer_id = LB_ID + update_load_balancer = database_tasks.UpdateLoadbalancerInDB() + update_load_balancer.execute(self.loadbalancer_mock, + {'name': 'test', + 'description': 'test2', + 'vip': {'qos_policy_id': 'fool'}}) + + mock_session = mock_get_session().begin().__enter__() + + repo.LoadBalancerRepository.update.assert_called_once_with( + mock_session, + LB_ID, + name='test', description='test2') + + repo.VipRepository.update.assert_called_once_with( + mock_session, LB_ID, qos_policy_id='fool') + + def test_update_listener_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + update_listener = database_tasks.UpdateListenerInDB() + listener_dict = {constants.LISTENER_ID: LISTENER_ID} + update_listener.execute(listener_dict, + {'name': 'test', 'description': 'test2'}) + + mock_session = mock_get_session().begin().__enter__() + + repo.ListenerRepository.update.assert_called_once_with( + mock_session, + LISTENER_ID, + name='test', description='test2') + + # Test the revert + mock_listener_repo_update.reset_mock() + update_listener.revert(listener_dict) + repo.ListenerRepository.update.assert_called_once_with( + mock_session, + id=LISTENER_ID, + provisioning_status=constants.ERROR) + + # Test the revert + mock_listener_repo_update.reset_mock() + mock_listener_repo_update.side_effect = Exception('fail') + update_listener.revert(listener_dict) + repo.ListenerRepository.update.assert_called_once_with( + mock_session, + id=LISTENER_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.MemberRepository.update') + def test_update_member_in_db(self, + mock_member_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + update_member = database_tasks.UpdateMemberInDB() + update_member.execute(self.member_mock, + {'weight': 1, 'ip_address': '10.1.0.0'}) + + mock_session = mock_get_session().begin().__enter__() + + repo.MemberRepository.update.assert_called_once_with( + mock_session, + MEMBER_ID, + weight=1, ip_address='10.1.0.0') + + # Test the revert + mock_member_repo_update.reset_mock() + update_member.revert(self.member_mock) + + repo.MemberRepository.update.assert_called_once_with( + mock_session, + MEMBER_ID, + provisioning_status=constants.ERROR) + + # Test the revert + mock_member_repo_update.reset_mock() + mock_member_repo_update.side_effect = Exception('fail') + update_member.revert(self.member_mock) + + repo.MemberRepository.update.assert_called_once_with( + mock_session, + MEMBER_ID, + provisioning_status=constants.ERROR) + + @mock.patch( + 'octavia.db.repositories.Repositories.update_pool_and_sp') + def test_update_pool_in_db(self, + mock_repos_pool_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + sp_dict = {'type': 'SOURCE_IP', 'cookie_name': None} + update_dict = {'name': 'test', 'description': 'test2', + 'session_persistence': sp_dict} + update_pool = database_tasks.UpdatePoolInDB() + update_pool.execute(POOL_ID, + update_dict) + + mock_session = mock_get_session().begin().__enter__() + + repo.Repositories.update_pool_and_sp.assert_called_once_with( + mock_session, + POOL_ID, + update_dict) + + # Test the revert + mock_repos_pool_update.reset_mock() + update_pool.revert(POOL_ID) + + repo.Repositories.update_pool_and_sp.assert_called_once_with( + mock_session, + POOL_ID, + {'provisioning_status': constants.ERROR}) + + # Test the revert with exception + mock_repos_pool_update.reset_mock() + mock_repos_pool_update.side_effect = Exception('fail') + update_pool.revert(POOL_ID) + + repo.Repositories.update_pool_and_sp.assert_called_once_with( + mock_session, + POOL_ID, + {'provisioning_status': constants.ERROR}) + + @mock.patch('octavia.db.repositories.L7PolicyRepository.update') + def test_update_l7policy_in_db(self, + mock_l7policy_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + update_l7policy = database_tasks.UpdateL7PolicyInDB() + update_l7policy.execute(self.l7policy_mock, + {'action': constants.L7POLICY_ACTION_REJECT}) + + mock_session = mock_get_session().begin().__enter__() + + repo.L7PolicyRepository.update.assert_called_once_with( + mock_session, + L7POLICY_ID, + action=constants.L7POLICY_ACTION_REJECT) + + # Test the revert + mock_l7policy_repo_update.reset_mock() + update_l7policy.revert(self.l7policy_mock) + + repo.L7PolicyRepository.update.assert_called_once_with( + mock_session, + L7POLICY_ID, + provisioning_status=constants.ERROR) + + # Test the revert + mock_l7policy_repo_update.reset_mock() + mock_l7policy_repo_update.side_effect = Exception('fail') + update_l7policy.revert(self.l7policy_mock) + + repo.L7PolicyRepository.update.assert_called_once_with( + mock_session, + L7POLICY_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.L7RuleRepository.update') + @mock.patch('octavia.db.repositories.L7PolicyRepository.update') + def test_update_l7rule_in_db(self, + mock_l7rule_repo_update, + mock_l7policy_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + update_l7rule = database_tasks.UpdateL7RuleInDB() + update_l7rule.execute( + self.l7rule_mock, + {'type': constants.L7RULE_TYPE_PATH, + 'compare_type': constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + 'value': '/api'}) + + mock_session = mock_get_session().begin().__enter__() + + repo.L7RuleRepository.update.assert_called_once_with( + mock_session, + L7RULE_ID, + type=constants.L7RULE_TYPE_PATH, + compare_type=constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + value='/api') + + # Test the revert + mock_l7rule_repo_update.reset_mock() + update_l7rule.revert(self.l7rule_mock) + + repo.L7PolicyRepository.update.assert_called_once_with( + mock_session, + L7POLICY_ID, + provisioning_status=constants.ERROR) + + # Test the revert + mock_l7rule_repo_update.reset_mock() + mock_l7rule_repo_update.side_effect = Exception('fail') + update_l7rule.revert(self.l7rule_mock) + + repo.L7PolicyRepository.update.assert_called_once_with( + mock_session, + L7POLICY_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.AmphoraRepository.get', + return_value=_db_amphora_mock) + def test_get_amphora_details(self, + mock_amp_get, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + get_amp_details = database_tasks.GetAmphoraDetails() + new_amp = get_amp_details.execute(self.amphora) + + self.assertEqual(AMP_ID, new_amp[constants.ID]) + self.assertEqual(VRRP_IP, new_amp[constants.VRRP_IP]) + self.assertEqual(HA_IP, new_amp[constants.HA_IP]) + self.assertEqual(VRRP_PORT_ID, new_amp[constants.VRRP_PORT_ID]) + self.assertEqual(AMP_ROLE, new_amp[constants.ROLE]) + self.assertEqual(VRRP_ID, new_amp[constants.VRRP_ID]) + self.assertEqual(VRRP_PRIORITY, new_amp[constants.VRRP_PRIORITY]) + + def test_mark_amphora_role_indb(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_amp_master_indb = database_tasks.MarkAmphoraMasterInDB() + mark_amp_master_indb.execute(self.amphora) + + mock_session = mock_get_session().begin().__enter__() + + repo.AmphoraRepository.update.assert_called_once_with( + mock_session, AMP_ID, role='MASTER', + vrrp_priority=constants.ROLE_MASTER_PRIORITY) + + mock_amphora_repo_update.reset_mock() + + mark_amp_master_indb.revert("BADRESULT", self.amphora) + repo.AmphoraRepository.update.assert_called_once_with( + mock_session, AMP_ID, role=None, vrrp_priority=None) + + mock_amphora_repo_update.reset_mock() + + failure_obj = failure.Failure.from_exception(Exception("TESTEXCEPT")) + mark_amp_master_indb.revert(failure_obj, self.amphora) + self.assertFalse(repo.AmphoraRepository.update.called) + + mock_amphora_repo_update.reset_mock() + + mark_amp_backup_indb = database_tasks.MarkAmphoraBackupInDB() + mark_amp_backup_indb.execute(self.amphora) + repo.AmphoraRepository.update.assert_called_once_with( + mock_session, AMP_ID, role='BACKUP', + vrrp_priority=constants.ROLE_BACKUP_PRIORITY) + + mock_amphora_repo_update.reset_mock() + + mark_amp_backup_indb.revert("BADRESULT", self.amphora) + repo.AmphoraRepository.update.assert_called_once_with( + mock_session, AMP_ID, role=None, vrrp_priority=None) + + mock_amphora_repo_update.reset_mock() + + mark_amp_standalone_indb = database_tasks.MarkAmphoraStandAloneInDB() + mark_amp_standalone_indb.execute(self.amphora) + repo.AmphoraRepository.update.assert_called_once_with( + mock_session, AMP_ID, role='STANDALONE', + vrrp_priority=None) + + mock_amphora_repo_update.reset_mock() + + mark_amp_standalone_indb.revert("BADRESULT", self.amphora) + repo.AmphoraRepository.update.assert_called_once_with( + mock_session, AMP_ID, role=None, vrrp_priority=None) + + # Test revert with exception + mock_amphora_repo_update.reset_mock() + mock_amphora_repo_update.side_effect = Exception('fail') + mark_amp_standalone_indb.revert("BADRESULT", self.amphora) + repo.AmphoraRepository.update.assert_called_once_with( + mock_session, AMP_ID, role=None, vrrp_priority=None) + + @mock.patch('octavia.db.repositories.AmphoraRepository.get') + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + def test_get_amphorae_from_loadbalancer(self, + mock_lb_get, + mock_amphora_get, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + amp1 = mock.MagicMock() + amp1.id = uuidutils.generate_uuid() + amp2 = mock.MagicMock() + amp2.id = uuidutils.generate_uuid() + lb = mock.MagicMock() + lb.amphorae = [amp1, amp2] + + mock_amphora_get.side_effect = [_db_amphora_mock, None] + mock_lb_get.return_value = lb + + get_amps_from_lb_obj = database_tasks.GetAmphoraeFromLoadbalancer() + result = get_amps_from_lb_obj.execute(self.loadbalancer_mock) + self.assertEqual([_db_amphora_mock.to_dict()], result) + self.assertEqual([_db_amphora_mock.to_dict()], result) + + @mock.patch('octavia.db.repositories.ListenerRepository.get') + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + def test_get_listeners_from_loadbalancer(self, + mock_lb_get, + mock_listener_get, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + mock_listener_get.return_value = _listener_mock + _db_loadbalancer_mock.listeners = [_listener_mock] + mock_lb_get.return_value = _db_loadbalancer_mock + get_list_from_lb_obj = database_tasks.GetListenersFromLoadbalancer() + result = get_list_from_lb_obj.execute(self.loadbalancer_mock) + mock_session = mock_get_session().begin().__enter__() + + mock_listener_get.assert_called_once_with(mock_session, + id=_listener_mock.id) + self.assertEqual([{constants.LISTENER_ID: LISTENER_ID}], result) + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + def test_get_vip_from_loadbalancer(self, + mock_lb_get, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + _db_loadbalancer_mock.vip = _vip_mock + mock_lb_get.return_value = _db_loadbalancer_mock + get_vip_from_lb_obj = database_tasks.GetVipFromLoadbalancer() + result = get_vip_from_lb_obj.execute(self.loadbalancer_mock) + self.assertEqual(_vip_mock.to_dict(), result) + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + def test_get_loadbalancer(self, mock_lb_get, mock_generate_uuid, mock_LOG, + mock_get_session, mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + LB_ID = uuidutils.generate_uuid() + get_loadbalancer_obj = database_tasks.GetLoadBalancer() + + mock_lb_get.return_value = _db_loadbalancer_mock + + result = get_loadbalancer_obj.execute(LB_ID) + + self.assertEqual(self.loadbalancer_mock, result) + mock_session = mock_get_session().begin().__enter__() + + mock_lb_get.assert_called_once_with(mock_session, id=LB_ID) + + @mock.patch('octavia.db.repositories.VRRPGroupRepository.create') + def test_create_vrrp_group_for_lb(self, + mock_vrrp_group_create, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mock_session = mock_get_session().begin().__enter__() + mock_get_session().begin().__exit__.side_effect = ( + odb_exceptions.DBDuplicateEntry) + create_vrrp_group = database_tasks.CreateVRRPGroupForLB() + create_vrrp_group.execute(LB_ID) + mock_vrrp_group_create.assert_called_once_with( + mock_session, load_balancer_id=LB_ID, + vrrp_group_name=LB_ID.replace('-', ''), + vrrp_auth_type=constants.VRRP_AUTH_DEFAULT, + vrrp_auth_pass=mock_generate_uuid.return_value.replace('-', + '')[0:7], + advert_int=1) + create_vrrp_group.execute(self.loadbalancer_mock) + + @mock.patch('octavia.db.repositories.AmphoraHealthRepository.delete') + def test_disable_amphora_health_monitoring(self, + mock_amp_health_repo_delete, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + disable_amp_health = database_tasks.DisableAmphoraHealthMonitoring() + disable_amp_health.execute(self.amphora) + mock_session = mock_get_session().begin().__enter__() + + mock_amp_health_repo_delete.assert_called_once_with( + mock_session, amphora_id=AMP_ID) + + @mock.patch('octavia.db.repositories.AmphoraHealthRepository.delete') + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + def test_disable_lb_amphorae_health_monitoring( + self, + mock_lb_get, + mock_amp_health_repo_delete, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + disable_amp_health = ( + database_tasks.DisableLBAmphoraeHealthMonitoring()) + mock_lb_get.return_value = _db_loadbalancer_mock + disable_amp_health.execute(self.loadbalancer_mock) + mock_session = mock_get_session().begin().__enter__() + + mock_amp_health_repo_delete.assert_called_once_with( + mock_session, amphora_id=AMP_ID) + + @mock.patch('octavia.db.repositories.AmphoraHealthRepository.update') + def test_mark_amphora_health_monitoring_busy(self, + mock_amp_health_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + mark_busy = database_tasks.MarkAmphoraHealthBusy() + mark_busy.execute(self.amphora) + mock_session = mock_get_session().begin().__enter__() + + mock_amp_health_repo_update.assert_called_once_with( + mock_session, amphora_id=AMP_ID, busy=True) + + @mock.patch('octavia.db.repositories.AmphoraHealthRepository.update') + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + def test_mark_lb_amphorae_health_monitoring_busy( + self, + mock_lb_get, + mock_amp_health_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + mark_busy = ( + database_tasks.MarkLBAmphoraeHealthBusy()) + mock_lb_get.return_value = _db_loadbalancer_mock + mark_busy.execute(self.loadbalancer_mock) + mock_session = mock_get_session().begin().__enter__() + + mock_amp_health_repo_update.assert_called_once_with( + mock_session, amphora_id=AMP_ID, busy=True) + + def test_update_lb_server_group_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + update_server_group_info = database_tasks.UpdateLBServerGroupInDB() + update_server_group_info.execute(LB_ID, SERVER_GROUP_ID) + + mock_session = mock_get_session().begin().__enter__() + + repo.LoadBalancerRepository.update.assert_called_once_with( + mock_session, + id=LB_ID, + server_group_id=SERVER_GROUP_ID) + + # Test the revert + mock_listener_repo_update.reset_mock() + update_server_group_info.revert(LB_ID, SERVER_GROUP_ID) + + # Test the revert with exception + mock_listener_repo_update.reset_mock() + mock_loadbalancer_repo_update.side_effect = Exception('fail') + update_server_group_info.revert(LB_ID, SERVER_GROUP_ID) + + @mock.patch('octavia.db.repositories.HealthMonitorRepository.update') + @mock.patch('octavia.db.repositories.HealthMonitorRepository.get') + def test_mark_health_mon_active_in_db(self, + mock_health_mon_repo_get, + mock_health_mon_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + mock_health_mon_repo_get.return_value = self.db_health_mon_mock + mark_health_mon_active = (database_tasks.MarkHealthMonitorActiveInDB()) + mark_health_mon_active.execute(self.health_mon_mock) + + mock_session = mock_get_session().begin().__enter__() + + mock_health_mon_repo_update.assert_called_once_with( + mock_session, + HM_ID, + operating_status=constants.ONLINE, + provisioning_status=constants.ACTIVE) + + # Test the revert + mock_health_mon_repo_update.reset_mock() + mark_health_mon_active.revert(self.health_mon_mock) + + mock_health_mon_repo_update.assert_called_once_with( + mock_session, + id=HM_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_health_mon_repo_update.reset_mock() + mock_health_mon_repo_update.side_effect = Exception('fail') + mark_health_mon_active.revert(self.health_mon_mock) + + mock_health_mon_repo_update.assert_called_once_with( + mock_session, + id=HM_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.HealthMonitorRepository.update') + def test_mark_health_mon_pending_create_in_db( + self, + mock_health_mon_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_health_mon_pending_create = (database_tasks. + MarkHealthMonitorPendingCreateInDB()) + mark_health_mon_pending_create.execute(self.health_mon_mock) + + mock_session = mock_get_session().begin().__enter__() + + mock_health_mon_repo_update.assert_called_once_with( + mock_session, + HM_ID, + provisioning_status=constants.PENDING_CREATE) + + # Test the revert + mock_health_mon_repo_update.reset_mock() + mark_health_mon_pending_create.revert(self.health_mon_mock) + + mock_health_mon_repo_update.assert_called_once_with( + mock_session, + id=HM_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_health_mon_repo_update.reset_mock() + mock_health_mon_repo_update.side_effect = Exception('fail') + mark_health_mon_pending_create.revert(self.health_mon_mock) + + mock_health_mon_repo_update.assert_called_once_with( + mock_session, + id=HM_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.HealthMonitorRepository.update') + def test_mark_health_mon_pending_delete_in_db( + self, + mock_health_mon_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_health_mon_pending_delete = (database_tasks. + MarkHealthMonitorPendingDeleteInDB()) + mark_health_mon_pending_delete.execute(self.health_mon_mock) + + mock_session = mock_get_session().begin().__enter__() + + mock_health_mon_repo_update.assert_called_once_with( + mock_session, + HM_ID, + provisioning_status=constants.PENDING_DELETE) + + # Test the revert + mock_health_mon_repo_update.reset_mock() + mark_health_mon_pending_delete.revert(self.health_mon_mock) + + mock_health_mon_repo_update.assert_called_once_with( + mock_session, + id=HM_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_health_mon_repo_update.reset_mock() + mock_health_mon_repo_update.side_effect = Exception('fail') + mark_health_mon_pending_delete.revert(self.health_mon_mock) + + mock_health_mon_repo_update.assert_called_once_with( + mock_session, + id=HM_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.HealthMonitorRepository.update') + def test_mark_health_mon_pending_update_in_db( + self, + mock_health_mon_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_health_mon_pending_update = (database_tasks. + MarkHealthMonitorPendingUpdateInDB()) + mark_health_mon_pending_update.execute(self.health_mon_mock) + + mock_session = mock_get_session().begin().__enter__() + + mock_health_mon_repo_update.assert_called_once_with( + mock_session, + HM_ID, + provisioning_status=constants.PENDING_UPDATE) + + # Test the revert + mock_health_mon_repo_update.reset_mock() + mark_health_mon_pending_update.revert(self.health_mon_mock) + + mock_health_mon_repo_update.assert_called_once_with( + mock_session, + id=HM_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_health_mon_repo_update.reset_mock() + mock_health_mon_repo_update.side_effect = Exception('fail') + mark_health_mon_pending_update.revert(self.health_mon_mock) + + mock_health_mon_repo_update.assert_called_once_with( + mock_session, + id=HM_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + @mock.patch('octavia.db.repositories.HealthMonitorRepository.update') + def test_mark_health_monitors_online_in_db(self, + mock_health_mon_repo_update, + mock_loadbalancer_repo_get, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + # Creating a mock hm for a default pool + mock_lb = mock.MagicMock() + mock_listener = mock.MagicMock() + mock_default_pool = mock_listener.default_pool + mock_hm_def_pool = mock_default_pool.health_monitor + mock_hm_def_pool.id = uuidutils.generate_uuid() + mock_lb.listeners = [mock_listener] + + # Creating a mock hm for a redirect pool of an l7policy + mock_l7policy = mock.MagicMock() + mock_redirect_pool = mock_l7policy.redirect_pool + mock_hm_l7_policy = mock_redirect_pool.health_monitor + mock_hm_l7_policy.id = uuidutils.generate_uuid() + mock_listener.l7policies = [mock_l7policy] + + # Creating a mock hm for a non default pool - we check its health + # monitor won't be updated + mock_pool = mock.MagicMock() + mock_hm_non_def_pool = mock_pool.health_monitor + mock_hm_non_def_pool.id = uuidutils.generate_uuid() + mock_lb.pools = [mock_pool] + + mock_loadbalancer_repo_get.return_value = mock_lb + mark_health_mon_online = (database_tasks. + MarkHealthMonitorsOnlineInDB()) + mark_health_mon_online.execute(mock_lb) + + mock_session = mock_get_session().begin().__enter__() + for mock_id in [mock_hm_def_pool.id, mock_hm_l7_policy.id]: + mock_health_mon_repo_update.assert_called_with( + mock_session, + mock_id, + operating_status=constants.ONLINE) + self.assertEqual(2, mock_health_mon_repo_update.call_count) + + @mock.patch('octavia.db.repositories.L7PolicyRepository.update') + @mock.patch('octavia.db.repositories.L7PolicyRepository.get') + def test_mark_l7policy_active_in_db(self, + mock_l7policy_repo_get, + mock_l7policy_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_l7policy_active = (database_tasks.MarkL7PolicyActiveInDB()) + mock_l7policy_repo_get.return_value = _l7policy_mock + mark_l7policy_active.execute(self.l7policy_mock) + + mock_session = mock_get_session().begin().__enter__() + + mock_l7policy_repo_update.assert_called_once_with( + mock_session, + L7POLICY_ID, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE) + + # Test the revert + mock_l7policy_repo_update.reset_mock() + mark_l7policy_active.revert(self.l7policy_mock) + + mock_l7policy_repo_update.assert_called_once_with( + mock_session, + id=L7POLICY_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_l7policy_repo_update.reset_mock() + mock_l7policy_repo_update.side_effect = Exception('fail') + mark_l7policy_active.revert(self.l7policy_mock) + + mock_l7policy_repo_update.assert_called_once_with( + mock_session, + id=L7POLICY_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.L7PolicyRepository.update') + def test_mark_l7policy_pending_create_in_db(self, + mock_l7policy_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_l7policy_pending_create = (database_tasks. + MarkL7PolicyPendingCreateInDB()) + mark_l7policy_pending_create.execute(self.l7policy_mock) + + mock_session = mock_get_session().begin().__enter__() + + mock_l7policy_repo_update.assert_called_once_with( + mock_session, + L7POLICY_ID, + provisioning_status=constants.PENDING_CREATE) + + # Test the revert + mock_l7policy_repo_update.reset_mock() + mark_l7policy_pending_create.revert(self.l7policy_mock) + + mock_l7policy_repo_update.assert_called_once_with( + mock_session, + id=L7POLICY_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_l7policy_repo_update.reset_mock() + mock_l7policy_repo_update.side_effect = Exception('fail') + mark_l7policy_pending_create.revert(self.l7policy_mock) + + mock_l7policy_repo_update.assert_called_once_with( + mock_session, + id=L7POLICY_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.L7PolicyRepository.update') + def test_mark_l7policy_pending_delete_in_db(self, + mock_l7policy_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_l7policy_pending_delete = (database_tasks. + MarkL7PolicyPendingDeleteInDB()) + mark_l7policy_pending_delete.execute(self.l7policy_mock) + + mock_session = mock_get_session().begin().__enter__() + + mock_l7policy_repo_update.assert_called_once_with( + mock_session, + L7POLICY_ID, + provisioning_status=constants.PENDING_DELETE) + + # Test the revert + mock_l7policy_repo_update.reset_mock() + mark_l7policy_pending_delete.revert(self.l7policy_mock) + + mock_l7policy_repo_update.assert_called_once_with( + mock_session, + id=L7POLICY_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_l7policy_repo_update.reset_mock() + mock_l7policy_repo_update.side_effect = Exception('fail') + mark_l7policy_pending_delete.revert(self.l7policy_mock) + + mock_l7policy_repo_update.assert_called_once_with( + mock_session, + id=L7POLICY_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.L7PolicyRepository.update') + def test_mark_l7policy_pending_update_in_db(self, + mock_l7policy_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_l7policy_pending_update = (database_tasks. + MarkL7PolicyPendingUpdateInDB()) + mark_l7policy_pending_update.execute(self.l7policy_mock) + + mock_session = mock_get_session().begin().__enter__() + + mock_l7policy_repo_update.assert_called_once_with( + mock_session, + L7POLICY_ID, + provisioning_status=constants.PENDING_UPDATE) + + # Test the revert + mock_l7policy_repo_update.reset_mock() + mark_l7policy_pending_update.revert(self.l7policy_mock) + + mock_l7policy_repo_update.assert_called_once_with( + mock_session, + id=L7POLICY_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_l7policy_repo_update.reset_mock() + mock_l7policy_repo_update.side_effect = Exception('fail') + mark_l7policy_pending_update.revert(self.l7policy_mock) + + mock_l7policy_repo_update.assert_called_once_with( + mock_session, + id=L7POLICY_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.L7RuleRepository.update') + @mock.patch('octavia.db.repositories.L7RuleRepository.get') + def test_mark_l7rule_active_in_db(self, + mock_l7rule_repo_get, + mock_l7rule_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + mock_l7rule_repo_get.return_value = _l7rule_mock + mark_l7rule_active = (database_tasks.MarkL7RuleActiveInDB()) + mark_l7rule_active.execute(self.l7rule_mock) + + mock_session = mock_get_session().begin().__enter__() + + mock_l7rule_repo_update.assert_called_once_with( + mock_session, + L7RULE_ID, + provisioning_status=constants.ACTIVE, + operating_status=constants.ONLINE) + + # Test the revert + mock_l7rule_repo_update.reset_mock() + mark_l7rule_active.revert(self.l7rule_mock) + + mock_l7rule_repo_update.assert_called_once_with( + mock_session, + id=L7RULE_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_l7rule_repo_update.reset_mock() + mock_l7rule_repo_update.side_effect = Exception('fail') + mark_l7rule_active.revert(self.l7rule_mock) + + mock_l7rule_repo_update.assert_called_once_with( + mock_session, + id=L7RULE_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.L7RuleRepository.update') + def test_mark_l7rule_pending_create_in_db(self, + mock_l7rule_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_l7rule_pending_create = (database_tasks. + MarkL7RulePendingCreateInDB()) + mark_l7rule_pending_create.execute(self.l7rule_mock) + + mock_session = mock_get_session().begin().__enter__() + + mock_l7rule_repo_update.assert_called_once_with( + mock_session, + L7RULE_ID, + provisioning_status=constants.PENDING_CREATE) + + # Test the revert + mock_l7rule_repo_update.reset_mock() + mark_l7rule_pending_create.revert(self.l7rule_mock) + + mock_l7rule_repo_update.assert_called_once_with( + mock_session, + id=L7RULE_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_l7rule_repo_update.reset_mock() + mock_l7rule_repo_update.side_effect = Exception('fail') + mark_l7rule_pending_create.revert(self.l7rule_mock) + + mock_l7rule_repo_update.assert_called_once_with( + mock_session, + id=L7RULE_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.L7RuleRepository.update') + def test_mark_l7rule_pending_delete_in_db(self, + mock_l7rule_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_l7rule_pending_delete = (database_tasks. + MarkL7RulePendingDeleteInDB()) + mark_l7rule_pending_delete.execute(self.l7rule_mock) + + mock_session = mock_get_session().begin().__enter__() + + mock_l7rule_repo_update.assert_called_once_with( + mock_session, + L7RULE_ID, + provisioning_status=constants.PENDING_DELETE) + + # Test the revert + mock_l7rule_repo_update.reset_mock() + mark_l7rule_pending_delete.revert(self.l7rule_mock) + + mock_l7rule_repo_update.assert_called_once_with( + mock_session, + id=L7RULE_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_l7rule_repo_update.reset_mock() + mock_l7rule_repo_update.side_effect = Exception('fail') + mark_l7rule_pending_delete.revert(self.l7rule_mock) + + mock_l7rule_repo_update.assert_called_once_with( + mock_session, + id=L7RULE_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.L7RuleRepository.update') + def test_mark_l7rule_pending_update_in_db(self, + mock_l7rule_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_l7rule_pending_update = (database_tasks. + MarkL7RulePendingUpdateInDB()) + mark_l7rule_pending_update.execute(self.l7rule_mock) + + mock_session = mock_get_session().begin().__enter__() + + mock_l7rule_repo_update.assert_called_once_with( + mock_session, + L7RULE_ID, + provisioning_status=constants.PENDING_UPDATE) + + # Test the revert + mock_l7rule_repo_update.reset_mock() + mark_l7rule_pending_update.revert(self.l7rule_mock) + + mock_l7rule_repo_update.assert_called_once_with( + mock_session, + id=L7RULE_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_l7rule_repo_update.reset_mock() + mock_l7rule_repo_update.side_effect = Exception('fail') + mark_l7rule_pending_update.revert(self.l7rule_mock) + + mock_l7rule_repo_update.assert_called_once_with( + mock_session, + id=L7RULE_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.MemberRepository.update') + def test_mark_member_active_in_db(self, + mock_member_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_member_active = (database_tasks.MarkMemberActiveInDB()) + mark_member_active.execute(self.member_mock) + + mock_session = mock_get_session().begin().__enter__() + + mock_member_repo_update.assert_called_once_with( + mock_session, + MEMBER_ID, + provisioning_status=constants.ACTIVE) + + # Test the revert + mock_member_repo_update.reset_mock() + mark_member_active.revert(self.member_mock) + + mock_member_repo_update.assert_called_once_with( + mock_session, + id=MEMBER_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_member_repo_update.reset_mock() + mock_member_repo_update.side_effect = Exception('fail') + mark_member_active.revert(self.member_mock) + + mock_member_repo_update.assert_called_once_with( + mock_session, + id=MEMBER_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.MemberRepository.update') + def test_mark_member_pending_create_in_db(self, + mock_member_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_member_pending_create = (database_tasks. + MarkMemberPendingCreateInDB()) + mark_member_pending_create.execute(self.member_mock) + + mock_session = mock_get_session().begin().__enter__() + + mock_member_repo_update.assert_called_once_with( + mock_session, + MEMBER_ID, + provisioning_status=constants.PENDING_CREATE) + + # Test the revert + mock_member_repo_update.reset_mock() + mark_member_pending_create.revert(self.member_mock) + + mock_member_repo_update.assert_called_once_with( + mock_session, + id=MEMBER_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_member_repo_update.reset_mock() + mock_member_repo_update.side_effect = Exception('fail') + mark_member_pending_create.revert(self.member_mock) + + mock_member_repo_update.assert_called_once_with( + mock_session, + id=MEMBER_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.MemberRepository.update') + def test_mark_member_pending_delete_in_db(self, + mock_member_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_member_pending_delete = (database_tasks. + MarkMemberPendingDeleteInDB()) + mark_member_pending_delete.execute(self.member_mock) + + mock_session = mock_get_session().begin().__enter__() + + mock_member_repo_update.assert_called_once_with( + mock_session, + MEMBER_ID, + provisioning_status=constants.PENDING_DELETE) + + # Test the revert + mock_member_repo_update.reset_mock() + mark_member_pending_delete.revert(self.member_mock) + + mock_member_repo_update.assert_called_once_with( + mock_session, + id=MEMBER_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_member_repo_update.reset_mock() + mock_member_repo_update.side_effect = Exception('fail') + mark_member_pending_delete.revert(self.member_mock) + + mock_member_repo_update.assert_called_once_with( + mock_session, + id=MEMBER_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.MemberRepository.update') + def test_mark_member_pending_update_in_db(self, + mock_member_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_member_pending_update = (database_tasks. + MarkMemberPendingUpdateInDB()) + mark_member_pending_update.execute(self.member_mock) + + mock_session = mock_get_session().begin().__enter__() + + mock_member_repo_update.assert_called_once_with( + mock_session, + MEMBER_ID, + provisioning_status=constants.PENDING_UPDATE) + + # Test the revert + mock_member_repo_update.reset_mock() + mark_member_pending_update.revert(self.member_mock) + + mock_member_repo_update.assert_called_once_with( + mock_session, + id=MEMBER_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_member_repo_update.reset_mock() + mock_member_repo_update.side_effect = Exception('fail') + mark_member_pending_update.revert(self.member_mock) + + mock_member_repo_update.assert_called_once_with( + mock_session, + id=MEMBER_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.PoolRepository.update') + def test_mark_pool_active_in_db(self, + mock_pool_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_pool_active = (database_tasks.MarkPoolActiveInDB()) + mark_pool_active.execute(POOL_ID) + + mock_session = mock_get_session().begin().__enter__() + + mock_pool_repo_update.assert_called_once_with( + mock_session, + POOL_ID, + provisioning_status=constants.ACTIVE) + + # Test the revert + mock_pool_repo_update.reset_mock() + mark_pool_active.revert(POOL_ID) + + mock_pool_repo_update.assert_called_once_with( + mock_session, + id=POOL_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_pool_repo_update.reset_mock() + mock_pool_repo_update.side_effect = Exception('fail') + mark_pool_active.revert(POOL_ID) + + mock_pool_repo_update.assert_called_once_with( + mock_session, + id=POOL_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.PoolRepository.update') + def test_mark_pool_pending_create_in_db(self, + mock_pool_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_pool_pending_create = (database_tasks.MarkPoolPendingCreateInDB()) + mark_pool_pending_create.execute(POOL_ID) + + mock_session = mock_get_session().begin().__enter__() + + mock_pool_repo_update.assert_called_once_with( + mock_session, + POOL_ID, + provisioning_status=constants.PENDING_CREATE) + + # Test the revert + mock_pool_repo_update.reset_mock() + mark_pool_pending_create.revert(POOL_ID) + + mock_pool_repo_update.assert_called_once_with( + mock_session, + id=POOL_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_pool_repo_update.reset_mock() + mock_pool_repo_update.side_effect = Exception('fail') + mark_pool_pending_create.revert(POOL_ID) + + mock_pool_repo_update.assert_called_once_with( + mock_session, + id=POOL_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.PoolRepository.update') + def test_mark_pool_pending_delete_in_db(self, + mock_pool_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_pool_pending_delete = (database_tasks.MarkPoolPendingDeleteInDB()) + mark_pool_pending_delete.execute(POOL_ID) + + mock_session = mock_get_session().begin().__enter__() + + mock_pool_repo_update.assert_called_once_with( + mock_session, + POOL_ID, + provisioning_status=constants.PENDING_DELETE) + + # Test the revert + mock_pool_repo_update.reset_mock() + mark_pool_pending_delete.revert(POOL_ID) + + mock_pool_repo_update.assert_called_once_with( + mock_session, + id=POOL_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_pool_repo_update.reset_mock() + mock_pool_repo_update.side_effect = Exception('fail') + mark_pool_pending_delete.revert(POOL_ID) + + mock_pool_repo_update.assert_called_once_with( + mock_session, + id=POOL_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.PoolRepository.update') + def test_mark_pool_pending_update_in_db(self, + mock_pool_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + mark_pool_pending_update = (database_tasks. + MarkPoolPendingUpdateInDB()) + mark_pool_pending_update.execute(POOL_ID) + + mock_session = mock_get_session().begin().__enter__() + + mock_pool_repo_update.assert_called_once_with( + mock_session, + POOL_ID, + provisioning_status=constants.PENDING_UPDATE) + + # Test the revert + mock_pool_repo_update.reset_mock() + mark_pool_pending_update.revert(POOL_ID) + + mock_pool_repo_update.assert_called_once_with( + mock_session, + id=POOL_ID, + provisioning_status=constants.ERROR) + + # Test the revert with exception + mock_pool_repo_update.reset_mock() + mock_pool_repo_update.side_effect = Exception('fail') + mark_pool_pending_update.revert(POOL_ID) + + mock_pool_repo_update.assert_called_once_with( + mock_session, + id=POOL_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.MemberRepository.update_pool_members') + def test_update_pool_members_operating_status_in_db( + self, + mock_member_repo_update_pool_members, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + update_members = database_tasks.UpdatePoolMembersOperatingStatusInDB() + update_members.execute(POOL_ID, constants.ONLINE) + + mock_session = mock_get_session().begin().__enter__() + + mock_member_repo_update_pool_members.assert_called_once_with( + mock_session, + POOL_ID, + operating_status=constants.ONLINE) + + @mock.patch('octavia.common.utils.ip_version') + @mock.patch('octavia.db.api.get_session') + @mock.patch('octavia.db.repositories.ListenerRepository.' + 'get_port_protocol_cidr_for_lb') + def test_get_amphora_firewall_rules(self, + mock_get_port_for_lb, + mock_db_get_session, + mock_ip_version, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + amphora_dict = {constants.ID: AMP_ID} + rules = [{'protocol': 'TCP', 'cidr': '192.0.2.0/24', 'port': 80}, + {'protocol': 'TCP', 'cidr': '198.51.100.0/24', 'port': 80}] + vrrp_rules = [ + {'protocol': 'TCP', 'cidr': '192.0.2.0/24', 'port': 80}, + {'protocol': 'TCP', 'cidr': '198.51.100.0/24', 'port': 80}, + {'cidr': '203.0.113.5/32', 'port': 112, 'protocol': 'vrrp'}] + mock_get_port_for_lb.side_effect = [ + copy.deepcopy(rules), copy.deepcopy(rules), copy.deepcopy(rules), + copy.deepcopy(rules)] + mock_ip_version.side_effect = [4, 6, 55] + + get_amp_fw_rules = database_tasks.GetAmphoraFirewallRules() + + # Test non-SRIOV VIP + amphora_net_cfg_dict = { + AMP_ID: {constants.AMPHORA: { + 'load_balancer': {constants.VIP: { + constants.VNIC_TYPE: constants.VNIC_TYPE_NORMAL}}}}} + result = get_amp_fw_rules.execute([amphora_dict], 0, + amphora_net_cfg_dict) + self.assertEqual([{'non-sriov-vip': True}], result) + + # Test SRIOV VIP - Single + amphora_net_cfg_dict = { + AMP_ID: {constants.AMPHORA: { + 'load_balancer': {constants.VIP: { + constants.VNIC_TYPE: constants.VNIC_TYPE_DIRECT}, + constants.TOPOLOGY: constants.TOPOLOGY_SINGLE}, + constants.LOAD_BALANCER_ID: LB_ID}}} + result = get_amp_fw_rules.execute([amphora_dict], 0, + amphora_net_cfg_dict) + mock_get_port_for_lb.assert_called_once_with(mock_db_get_session(), + LB_ID) + self.assertEqual(rules, result) + + mock_get_port_for_lb.reset_mock() + + # Test SRIOV VIP - Active/Standby + amphora_net_cfg_dict = { + AMP_ID: {constants.AMPHORA: { + 'load_balancer': {constants.VIP: { + constants.VNIC_TYPE: constants.VNIC_TYPE_DIRECT}, + constants.TOPOLOGY: constants.TOPOLOGY_ACTIVE_STANDBY, + constants.AMPHORAE: [{ + constants.ID: AMP_ID, + constants.STATUS: constants.AMPHORA_ALLOCATED}, + {constants.ID: AMP2_ID, + constants.STATUS: constants.AMPHORA_ALLOCATED, + constants.VRRP_IP: '203.0.113.5'}]}, + constants.LOAD_BALANCER_ID: LB_ID}}} + + # IPv4 path + mock_get_port_for_lb.reset_mock() + vrrp_rules = [ + {'protocol': 'TCP', 'cidr': '192.0.2.0/24', 'port': 80}, + {'protocol': 'TCP', 'cidr': '198.51.100.0/24', 'port': 80}, + {'cidr': '203.0.113.5/32', 'port': 112, 'protocol': 'vrrp'}] + result = get_amp_fw_rules.execute([amphora_dict], 0, + amphora_net_cfg_dict) + mock_get_port_for_lb.assert_called_once_with(mock_db_get_session(), + LB_ID) + self.assertEqual(vrrp_rules, result) + + # IPv6 path + mock_get_port_for_lb.reset_mock() + vrrp_rules = [ + {'protocol': 'TCP', 'cidr': '192.0.2.0/24', 'port': 80}, + {'protocol': 'TCP', 'cidr': '198.51.100.0/24', 'port': 80}, + {'cidr': '203.0.113.5/128', 'port': 112, 'protocol': 'vrrp'}] + result = get_amp_fw_rules.execute([amphora_dict], 0, + amphora_net_cfg_dict) + mock_get_port_for_lb.assert_called_once_with(mock_db_get_session(), + LB_ID) + self.assertEqual(vrrp_rules, result) + + # Bogus IP version path + self.assertRaises(exceptions.InvalidIPAddress, + get_amp_fw_rules.execute, [amphora_dict], 0, + amphora_net_cfg_dict) diff --git a/octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks_quota.py b/octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks_quota.py new file mode 100644 index 0000000000..af854f56fa --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks_quota.py @@ -0,0 +1,411 @@ +# Copyright 2017 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +from unittest import mock + +from oslo_utils import uuidutils +from taskflow.types import failure + +from octavia.common import constants +from octavia.common import data_models +from octavia.common import exceptions +from octavia.controller.worker.v2.tasks import database_tasks +import octavia.tests.unit.base as base + + +class TestDatabaseTasksQuota(base.TestCase): + + def setUp(self): + + self._tf_failure_mock = mock.Mock(spec=failure.Failure) + self.zero_pool_child_count = {'HM': 0, 'member': 0} + + super().setUp() + + @mock.patch('octavia.db.repositories.L7PolicyRepository.get') + @mock.patch('octavia.db.api.get_session', return_value='TEST') + @mock.patch('octavia.db.repositories.Repositories.decrement_quota') + @mock.patch('octavia.db.repositories.Repositories.check_quota_met') + def _test_decrement_quota(self, task, data_model, + mock_check_quota_met, mock_decrement_quota, + mock_get_session, mock_l7policy_get, + project_id=None): + test_object = None + if project_id and data_model == data_models.L7Rule: + test_object = {constants.PROJECT_ID: project_id} + elif project_id: + test_object = project_id + else: + project_id = uuidutils.generate_uuid() + test_object = mock.MagicMock() + test_object.project_id = project_id + l7policy_dict = {constants.PROJECT_ID: project_id, + constants.L7POLICY_ID: uuidutils.generate_uuid()} + + # execute without exception + mock_decrement_quota.reset_mock() + with mock.patch('octavia.db.api.' + 'get_session') as mock_get_session_local: + mock_session = mock.MagicMock() + mock_get_session_local.return_value = mock_session + + if data_model == data_models.L7Policy: + test_object.l7rules = [] + mock_l7policy_get.return_value = test_object + + if data_model == data_models.Pool: + task.execute(test_object, self.zero_pool_child_count) + elif data_model == data_models.L7Policy: + task.execute(l7policy_dict) + else: + task.execute(test_object) + + mock_decrement_quota.assert_called_once_with( + mock_session, data_model, project_id) + + mock_session.commit.assert_called_once_with() + + # execute with exception + mock_decrement_quota.reset_mock() + with mock.patch('octavia.db.api.' + 'get_session') as mock_get_session_local: + mock_session = mock.MagicMock() + mock_get_session_local.return_value = mock_session + + mock_decrement_quota.side_effect = ( + exceptions.OctaviaException('fail')) + if data_model == data_models.Pool: + self.assertRaises(exceptions.OctaviaException, + task.execute, + test_object, + self.zero_pool_child_count) + elif data_model == data_models.L7Policy: + self.assertRaises(exceptions.OctaviaException, + task.execute, + l7policy_dict) + else: + self.assertRaises(exceptions.OctaviaException, + task.execute, + test_object) + + mock_decrement_quota.assert_called_once_with( + mock_session, data_model, project_id) + + mock_session.rollback.assert_called_once_with() + + # revert with instance of failure + mock_get_session.reset_mock() + mock_check_quota_met.reset_mock() + if data_model == data_models.Pool: + task.revert(test_object, + self.zero_pool_child_count, + self._tf_failure_mock) + elif data_model == data_models.L7Policy: + task.revert(l7policy_dict, self._tf_failure_mock) + else: + task.revert(test_object, self._tf_failure_mock) + self.assertFalse(mock_get_session.called) + self.assertFalse(mock_check_quota_met.called) + + # revert + mock_check_quota_met.reset_mock() + with mock.patch('octavia.db.api.' + 'get_session') as mock_get_session_local: + mock_session = mock.MagicMock() + + mock_get_session_local.return_value = mock_session + + if data_model == data_models.Pool: + task.revert(test_object, self.zero_pool_child_count, None) + elif data_model == data_models.L7Policy: + task.revert(l7policy_dict, None) + else: + task.revert(test_object, None) + + mock_check_quota_met.assert_called_once_with( + mock_session, data_model, + project_id) + + mock_session.commit.assert_called_once_with() + + # revert with rollback + with mock.patch('octavia.db.api.' + 'get_session') as mock_get_session_local: + mock_session = mock.MagicMock() + + mock_get_session_local.return_value = mock_session + + mock_check_quota_met.side_effect = ( + exceptions.OctaviaException('fail')) + + if data_model == data_models.Pool: + task.revert(test_object, self.zero_pool_child_count, None) + elif data_model == data_models.L7Policy: + task.revert(l7policy_dict, None) + else: + task.revert(test_object, None) + + mock_session.rollback.assert_called_once_with() + + # revert with db exception + mock_check_quota_met.reset_mock() + with mock.patch('octavia.db.api.' + 'get_session') as mock_get_session_local: + mock_get_session_local.side_effect = Exception('fail') + + if data_model == data_models.Pool: + task.revert(test_object, self.zero_pool_child_count, None) + elif data_model == data_models.L7Policy: + task.revert(l7policy_dict, None) + else: + task.revert(test_object, None) + + self.assertFalse(mock_check_quota_met.called) + + def test_decrement_health_monitor_quota(self): + project_id = uuidutils.generate_uuid() + task = database_tasks.DecrementHealthMonitorQuota() + data_model = data_models.HealthMonitor + self._test_decrement_quota(task, data_model, project_id=project_id) + + def test_decrement_listener_quota(self): + project_id = uuidutils.generate_uuid() + task = database_tasks.DecrementListenerQuota() + data_model = data_models.Listener + self._test_decrement_quota(task, data_model, project_id=project_id) + + def test_decrement_loadbalancer_quota(self): + project_id = uuidutils.generate_uuid() + task = database_tasks.DecrementLoadBalancerQuota() + data_model = data_models.LoadBalancer + self._test_decrement_quota(task, data_model, project_id=project_id) + + def test_decrement_pool_quota(self): + project_id = uuidutils.generate_uuid() + task = database_tasks.DecrementPoolQuota() + data_model = data_models.Pool + self._test_decrement_quota(task, data_model, project_id=project_id) + + def test_decrement_member_quota(self): + project_id = uuidutils.generate_uuid() + task = database_tasks.DecrementMemberQuota() + data_model = data_models.Member + self._test_decrement_quota(task, data_model, + project_id=project_id) + + @mock.patch('octavia.db.repositories.Repositories.decrement_quota') + @mock.patch('octavia.db.repositories.Repositories.check_quota_met') + def test_decrement_pool_quota_pool_children(self, + mock_check_quota_met, + mock_decrement_quota): + pool_child_count = {'HM': 1, 'member': 2} + project_id = uuidutils.generate_uuid() + task = database_tasks.DecrementPoolQuota() + mock_session = mock.MagicMock() + + with mock.patch('octavia.db.api.' + 'get_session') as mock_get_session_local: + mock_get_session_local.return_value = mock_session + + task.execute(project_id, pool_child_count) + + calls = [mock.call(mock_session, data_models.Pool, project_id), + mock.call(mock_session, data_models.HealthMonitor, + project_id), + mock.call(mock_session, data_models.Member, project_id, + quantity=2)] + + mock_decrement_quota.assert_has_calls(calls) + + mock_session.commit.assert_called_once_with() + + # revert + mock_session.reset_mock() + with mock.patch('octavia.db.api.' + 'get_session') as mock_get_session_local: + mock_get_session_local.return_value = mock_session + + task.revert(project_id, pool_child_count, None) + + calls = [mock.call(mock_session, + data_models.Pool, project_id), + mock.call(mock_session, + data_models.HealthMonitor, project_id), + mock.call(mock_session, + data_models.Member, project_id), + mock.call(mock_session, + data_models.Member, project_id)] + + mock_check_quota_met.assert_has_calls(calls) + + self.assertEqual(4, mock_session.commit.call_count) + + # revert with health monitor quota exception + mock_session.reset_mock() + mock_check_quota_met.side_effect = [None, Exception('fail'), None, + None] + with mock.patch('octavia.db.api.' + 'get_session') as mock_get_session_local: + mock_get_session_local.return_value = mock_session + + task.revert(project_id, pool_child_count, None) + + calls = [mock.call(mock_session, + data_models.Pool, project_id), + mock.call(mock_session, + data_models.HealthMonitor, project_id), + mock.call(mock_session, + data_models.Member, project_id), + mock.call(mock_session, + data_models.Member, project_id)] + + mock_check_quota_met.assert_has_calls(calls) + + self.assertEqual(3, mock_session.commit.call_count) + self.assertEqual(1, mock_session.rollback.call_count) + + # revert with member quota exception + mock_session.reset_mock() + mock_check_quota_met.side_effect = [None, None, None, + Exception('fail')] + with mock.patch('octavia.db.api.' + 'get_session') as mock_get_session_local: + mock_get_session_local.return_value = mock_session + + task.revert(project_id, pool_child_count, None) + + calls = [mock.call(mock_session, + data_models.Pool, project_id), + mock.call(mock_session, + data_models.HealthMonitor, project_id), + mock.call(mock_session, + data_models.Member, project_id), + mock.call(mock_session, + data_models.Member, project_id)] + + mock_check_quota_met.assert_has_calls(calls) + + self.assertEqual(3, mock_session.commit.call_count) + self.assertEqual(1, mock_session.rollback.call_count) + + @mock.patch('octavia.db.api.session') + @mock.patch('octavia.db.repositories.PoolRepository.get_children_count') + def test_count_pool_children_for_quota(self, repo_mock, session_mock): + project_id = uuidutils.generate_uuid() + pool = data_models.Pool(id=1, project_id=project_id) + + task = database_tasks.CountPoolChildrenForQuota() + + # Test pool with no children + repo_mock.reset_mock() + repo_mock.return_value = (0, 0) + result = task.execute(pool.id) + + self.assertEqual({'HM': 0, 'member': 0}, result) + + # Test pool with health monitor and two members + repo_mock.reset_mock() + repo_mock.return_value = (1, 2) + result = task.execute(pool.id) + + self.assertEqual({'HM': 1, 'member': 2}, result) + + def test_decrement_l7policy_quota(self): + task = database_tasks.DecrementL7policyQuota() + data_model = data_models.L7Policy + self._test_decrement_quota(task, data_model) + + @mock.patch('octavia.db.repositories.Repositories.decrement_quota') + @mock.patch('octavia.db.repositories.Repositories.check_quota_met') + @mock.patch('octavia.db.repositories.L7PolicyRepository.get') + def test_decrement_l7policy_quota_with_children(self, + mock_l7policy_get, + mock_check_quota_met, + mock_decrement_quota): + project_id = uuidutils.generate_uuid() + l7_policy_id = uuidutils.generate_uuid() + test_l7rule1 = mock.MagicMock() + test_l7rule1.project_id = project_id + test_l7rule2 = mock.MagicMock() + test_l7rule2.project_id = project_id + test_object = {constants.PROJECT_ID: project_id, + constants.L7POLICY_ID: l7_policy_id} + db_test_object = mock.MagicMock() + db_test_object.project_id = project_id + db_test_object.l7rules = [test_l7rule1, test_l7rule2] + mock_l7policy_get.return_value = db_test_object + task = database_tasks.DecrementL7policyQuota() + mock_session = mock.MagicMock() + + with mock.patch('octavia.db.api.' + 'get_session') as mock_get_session_local: + mock_get_session_local.return_value = mock_session + + task.execute(test_object) + + calls = [mock.call(mock_session, data_models.L7Policy, project_id), + mock.call(mock_session, data_models.L7Rule, project_id, + quantity=2)] + + mock_decrement_quota.assert_has_calls(calls) + + mock_session.commit.assert_called_once_with() + + # revert + mock_session.reset_mock() + with mock.patch('octavia.db.api.' + 'get_session') as mock_get_session_local: + mock_get_session_local.return_value = mock_session + + task.revert(test_object, None) + + calls = [mock.call(mock_session, + data_models.L7Policy, project_id), + mock.call(mock_session, + data_models.L7Rule, project_id), + mock.call(mock_session, + data_models.L7Rule, project_id)] + + mock_check_quota_met.assert_has_calls(calls) + + self.assertEqual(3, mock_session.commit.call_count) + + # revert with l7rule quota exception + mock_session.reset_mock() + mock_check_quota_met.side_effect = [None, None, + Exception('fail')] + with mock.patch('octavia.db.api.' + 'get_session') as mock_get_session_local: + mock_get_session_local.return_value = mock_session + + task.revert(test_object, None) + + calls = [mock.call(mock_session, + data_models.L7Policy, project_id), + mock.call(mock_session, + data_models.L7Rule, project_id), + mock.call(mock_session, + data_models.L7Rule, project_id)] + + mock_check_quota_met.assert_has_calls(calls) + + self.assertEqual(2, mock_session.commit.call_count) + self.assertEqual(1, mock_session.rollback.call_count) + + def test_decrement_l7rule_quota(self): + project_id = uuidutils.generate_uuid() + task = database_tasks.DecrementL7ruleQuota() + data_model = data_models.L7Rule + self._test_decrement_quota(task, data_model, + project_id=project_id) diff --git a/octavia/tests/unit/controller/worker/v2/tasks/test_lifecycle_tasks.py b/octavia/tests/unit/controller/worker/v2/tasks/test_lifecycle_tasks.py new file mode 100644 index 0000000000..7c14f6fbe5 --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/tasks/test_lifecycle_tasks.py @@ -0,0 +1,412 @@ +# Copyright 2016 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from oslo_utils import uuidutils + +from octavia.common import constants +from octavia.controller.worker.v2.tasks import lifecycle_tasks +import octavia.tests.unit.base as base + + +class TestLifecycleTasks(base.TestCase): + + def setUp(self): + + self.AMPHORA = mock.MagicMock() + self.AMPHORA_ID = uuidutils.generate_uuid() + self.AMPHORA.id = self.AMPHORA_ID + self.L7POLICY = mock.MagicMock() + self.L7POLICY_ID = uuidutils.generate_uuid() + self.L7POLICY.id = self.L7POLICY_ID + self.L7RULE_ID = uuidutils.generate_uuid() + self.L7RULE = { + constants.L7RULE_ID: self.L7RULE_ID + } + self.LISTENER = mock.MagicMock() + self.LISTENER_ID = uuidutils.generate_uuid() + self.LISTENER = {constants.LISTENER_ID: self.LISTENER_ID} + self.LISTENERS = [self.LISTENER] + self.LOADBALANCER_ID = uuidutils.generate_uuid() + self.LOADBALANCER = {constants.LOADBALANCER_ID: self.LOADBALANCER_ID} + self.LISTENER[constants.LOADBALANCER_ID] = self.LOADBALANCER_ID + self.MEMBER = mock.MagicMock() + self.MEMBER_ID = uuidutils.generate_uuid() + self.MEMBER.id = self.MEMBER_ID + self.MEMBERS = [self.MEMBER] + self.POOL = mock.MagicMock() + self.POOL_ID = uuidutils.generate_uuid() + self.POOL.id = self.POOL_ID + self.HEALTH_MON_ID = uuidutils.generate_uuid() + self.HEALTH_MON = { + constants.HEALTHMONITOR_ID: self.HEALTH_MON_ID, + constants.POOL_ID: self.POOL_ID, + } + + super().setUp() + + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'unmark_amphora_health_busy') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_amphora_status_error') + def test_AmphoraIDToErrorOnRevertTask(self, mock_amp_status_error, + mock_amp_health_busy): + + amp_id_to_error_on_revert = (lifecycle_tasks. + AmphoraIDToErrorOnRevertTask()) + + # Execute + amp_id_to_error_on_revert.execute(self.AMPHORA_ID) + + self.assertFalse(mock_amp_status_error.called) + + # Revert + amp_id_to_error_on_revert.revert(self.AMPHORA_ID) + + mock_amp_status_error.assert_called_once_with(self.AMPHORA_ID) + self.assertFalse(mock_amp_health_busy.called) + + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'unmark_amphora_health_busy') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_amphora_status_error') + def test_AmphoraToErrorOnRevertTask(self, mock_amp_status_error, + mock_amp_health_busy): + + amp_to_error_on_revert = lifecycle_tasks.AmphoraToErrorOnRevertTask() + + # Execute + amp = {constants.ID: self.AMPHORA_ID} + amp_to_error_on_revert.execute(amp) + + self.assertFalse(mock_amp_status_error.called) + + # Revert + amp_to_error_on_revert.revert(amp) + + mock_amp_status_error.assert_called_once_with(self.AMPHORA_ID) + self.assertFalse(mock_amp_health_busy.called) + + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_health_mon_prov_status_error') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_loadbalancer_prov_status_active') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_listener_prov_status_active') + def test_HealthMonitorToErrorOnRevertTask( + self, + mock_listener_prov_status_active, + mock_loadbalancer_prov_status_active, + mock_health_mon_prov_status_error): + + health_mon_to_error_on_revert = (lifecycle_tasks. + HealthMonitorToErrorOnRevertTask()) + + # Execute + health_mon_to_error_on_revert.execute(self.HEALTH_MON, + self.LISTENERS, + self.LOADBALANCER) + + self.assertFalse(mock_health_mon_prov_status_error.called) + + # Revert + health_mon_to_error_on_revert.revert(self.HEALTH_MON, + self.LISTENERS, + self.LOADBALANCER) + + mock_health_mon_prov_status_error.assert_called_once_with( + self.HEALTH_MON_ID) + mock_loadbalancer_prov_status_active.assert_called_once_with( + self.LOADBALANCER_ID) + mock_listener_prov_status_active.assert_called_once_with( + self.LISTENER_ID) + + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_l7policy_prov_status_error') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_loadbalancer_prov_status_active') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_listener_prov_status_active') + def test_L7PolicyToErrorOnRevertTask( + self, + mock_listener_prov_status_active, + mock_loadbalancer_prov_status_active, + mock_l7policy_prov_status_error): + + l7policy_to_error_on_revert = (lifecycle_tasks. + L7PolicyToErrorOnRevertTask()) + + # Execute + l7policy_to_error_on_revert.execute({constants.L7POLICY_ID: + self.L7POLICY_ID}, + self.LISTENERS, + self.LOADBALANCER_ID) + + self.assertFalse(mock_l7policy_prov_status_error.called) + + # Revert + l7policy_to_error_on_revert.revert({constants.L7POLICY_ID: + self.L7POLICY_ID}, + self.LISTENERS, + self.LOADBALANCER_ID) + + mock_l7policy_prov_status_error.assert_called_once_with( + self.L7POLICY_ID) + mock_loadbalancer_prov_status_active.assert_called_once_with( + self.LOADBALANCER_ID) + mock_listener_prov_status_active.assert_called_once_with( + self.LISTENER_ID) + + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_l7rule_prov_status_error') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_loadbalancer_prov_status_active') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_listener_prov_status_active') + def test_L7RuleToErrorOnRevertTask( + self, + mock_listener_prov_status_active, + mock_loadbalancer_prov_status_active, + mock_l7rule_prov_status_error): + + l7rule_to_error_on_revert = (lifecycle_tasks. + L7RuleToErrorOnRevertTask()) + + # Execute + l7rule_to_error_on_revert.execute(self.L7RULE, + self.L7POLICY_ID, + self.LISTENERS, + self.LOADBALANCER_ID) + + self.assertFalse(mock_l7rule_prov_status_error.called) + + # Revert + l7rule_to_error_on_revert.revert(self.L7RULE, + self.L7POLICY_ID, + self.LISTENERS, + self.LOADBALANCER_ID) + + mock_l7rule_prov_status_error.assert_called_once_with( + self.L7RULE_ID) + mock_loadbalancer_prov_status_active.assert_called_once_with( + self.LOADBALANCER_ID) + mock_listener_prov_status_active.assert_called_once_with( + self.LISTENER_ID) + + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_loadbalancer_prov_status_active') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_listener_prov_status_error') + def test_ListenerToErrorOnRevertTask( + self, + mock_listener_prov_status_error, + mock_loadbalancer_prov_status_active): + + listener_to_error_on_revert = (lifecycle_tasks. + ListenerToErrorOnRevertTask()) + + # Execute + listener_to_error_on_revert.execute(self.LISTENER) + + self.assertFalse(mock_listener_prov_status_error.called) + + # Revert + listener_to_error_on_revert.revert(self.LISTENER) + + mock_listener_prov_status_error.assert_called_once_with( + self.LISTENER_ID) + mock_loadbalancer_prov_status_active.assert_called_once_with( + self.LOADBALANCER_ID) + + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_loadbalancer_prov_status_active') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_listener_prov_status_error') + def test_ListenersToErrorOnRevertTask( + self, + mock_listener_prov_status_error, + mock_loadbalancer_prov_status_active): + + listeners_to_error_on_revert = (lifecycle_tasks. + ListenersToErrorOnRevertTask()) + + # Execute + listeners_to_error_on_revert.execute([self.LISTENER]) + + self.assertFalse(mock_listener_prov_status_error.called) + + # Revert + listeners_to_error_on_revert.revert([self.LISTENER]) + + mock_listener_prov_status_error.assert_called_once_with( + self.LISTENER_ID) + mock_loadbalancer_prov_status_active.assert_called_once_with( + self.LOADBALANCER_ID) + + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_loadbalancer_prov_status_error') + def test_LoadBalancerIDToErrorOnRevertTask( + self, + mock_loadbalancer_prov_status_error): + + loadbalancer_id_to_error_on_revert = ( + lifecycle_tasks.LoadBalancerIDToErrorOnRevertTask()) + + # Execute + loadbalancer_id_to_error_on_revert.execute(self.LOADBALANCER_ID) + + self.assertFalse(mock_loadbalancer_prov_status_error.called) + + # Revert + loadbalancer_id_to_error_on_revert.revert(self.LOADBALANCER_ID) + + mock_loadbalancer_prov_status_error.assert_called_once_with( + self.LOADBALANCER_ID) + + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_loadbalancer_prov_status_error') + def test_LoadBalancerToErrorOnRevertTask( + self, + mock_loadbalancer_prov_status_error): + + loadbalancer_to_error_on_revert = ( + lifecycle_tasks.LoadBalancerToErrorOnRevertTask()) + + # Execute + loadbalancer_to_error_on_revert.execute({constants.LOADBALANCER_ID: + self.LOADBALANCER_ID}) + + self.assertFalse(mock_loadbalancer_prov_status_error.called) + + # Revert + loadbalancer_to_error_on_revert.revert({constants.LOADBALANCER_ID: + self.LOADBALANCER_ID}) + + mock_loadbalancer_prov_status_error.assert_called_once_with( + self.LOADBALANCER_ID) + + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_member_prov_status_error') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_loadbalancer_prov_status_active') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_listener_prov_status_active') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_pool_prov_status_active') + def test_MemberToErrorOnRevertTask( + self, + mock_pool_prov_status_active, + mock_listener_prov_status_active, + mock_loadbalancer_prov_status_active, + mock_member_prov_status_error): + member_to_error_on_revert = lifecycle_tasks.MemberToErrorOnRevertTask() + + # Execute + member_to_error_on_revert.execute({constants.MEMBER_ID: + self.MEMBER_ID}, + self.LISTENERS, + self.LOADBALANCER, + self.POOL_ID) + + self.assertFalse(mock_member_prov_status_error.called) + + # Revert + member_to_error_on_revert.revert({constants.MEMBER_ID: self.MEMBER_ID}, + self.LISTENERS, + self.LOADBALANCER, + self.POOL_ID) + + mock_member_prov_status_error.assert_called_once_with( + self.MEMBER_ID) + mock_loadbalancer_prov_status_active.assert_called_once_with( + self.LOADBALANCER_ID) + mock_listener_prov_status_active.assert_called_once_with( + self.LISTENER_ID) + mock_pool_prov_status_active.assert_called_once_with( + self.POOL_ID) + + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_member_prov_status_error') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_loadbalancer_prov_status_active') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_listener_prov_status_active') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_pool_prov_status_active') + def test_MembersToErrorOnRevertTask( + self, + mock_pool_prov_status_active, + mock_listener_prov_status_active, + mock_loadbalancer_prov_status_active, + mock_member_prov_status_error): + members_to_error_on_revert = ( + lifecycle_tasks.MembersToErrorOnRevertTask()) + + # Execute + members_to_error_on_revert.execute([{constants.MEMBER_ID: + self.MEMBER_ID}], + self.LISTENERS, + self.LOADBALANCER, + self.POOL_ID) + + self.assertFalse(mock_member_prov_status_error.called) + + # Revert + members_to_error_on_revert.revert([{constants.MEMBER_ID: + self.MEMBER_ID}], + self.LISTENERS, + self.LOADBALANCER, + self.POOL_ID) + + mock_member_prov_status_error.assert_called_once_with( + self.MEMBER_ID) + mock_loadbalancer_prov_status_active.assert_called_once_with( + self.LOADBALANCER_ID) + mock_listener_prov_status_active.assert_called_once_with( + self.LISTENER_ID) + mock_pool_prov_status_active.assert_called_once_with( + self.POOL_ID) + + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_pool_prov_status_error') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_loadbalancer_prov_status_active') + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'mark_listener_prov_status_active') + def test_PoolToErrorOnRevertTask( + self, + mock_listener_prov_status_active, + mock_loadbalancer_prov_status_active, + mock_pool_prov_status_error): + + pool_to_error_on_revert = lifecycle_tasks.PoolToErrorOnRevertTask() + + # Execute + pool_to_error_on_revert.execute(self.POOL_ID, + self.LISTENERS, + self.LOADBALANCER) + + self.assertFalse(mock_pool_prov_status_error.called) + + # Revert + pool_to_error_on_revert.revert(self.POOL_ID, + self.LISTENERS, + self.LOADBALANCER) + + mock_pool_prov_status_error.assert_called_once_with( + self.POOL_ID) + mock_loadbalancer_prov_status_active.assert_called_once_with( + self.LOADBALANCER_ID) + mock_listener_prov_status_active.assert_called_once_with( + self.LISTENER_ID) diff --git a/octavia/tests/unit/controller/worker/v2/tasks/test_network_tasks.py b/octavia/tests/unit/controller/worker/v2/tasks/test_network_tasks.py new file mode 100644 index 0000000000..d2a364b7f6 --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/tasks/test_network_tasks.py @@ -0,0 +1,1946 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils +from taskflow.types import failure +import tenacity + +from octavia.api.drivers import utils as provider_utils +from octavia.common import constants +from octavia.common import data_models as o_data_models +from octavia.common import exceptions +from octavia.controller.worker.v2.tasks import network_tasks +from octavia.network import base as net_base +from octavia.network import data_models +from octavia.tests.common import constants as t_constants +import octavia.tests.unit.base as base + + +AMPHORA_ID = 7 +COMPUTE_ID = uuidutils.generate_uuid() +PORT_ID = uuidutils.generate_uuid() +SUBNET_ID = uuidutils.generate_uuid() +NETWORK_ID = uuidutils.generate_uuid() +MGMT_NETWORK_ID = uuidutils.generate_uuid() +MGMT_SUBNET_ID = uuidutils.generate_uuid() +SG_ID = uuidutils.generate_uuid() +IP_ADDRESS = "172.24.41.1" +VIP = o_data_models.Vip(port_id=t_constants.MOCK_PORT_ID, + subnet_id=t_constants.MOCK_SUBNET_ID, + qos_policy_id=t_constants.MOCK_QOS_POLICY_ID1) +VIP2 = o_data_models.Vip(port_id=t_constants.MOCK_PORT_ID2, + subnet_id=t_constants.MOCK_SUBNET_ID2, + qos_policy_id=t_constants.MOCK_QOS_POLICY_ID2) +LB = o_data_models.LoadBalancer(vip=VIP) +LB2 = o_data_models.LoadBalancer(vip=VIP2) +FIRST_IP = {"ip_address": IP_ADDRESS, "subnet_id": SUBNET_ID} +FIXED_IPS = [FIRST_IP] +INTERFACE = data_models.Interface(id=uuidutils.generate_uuid(), + compute_id=COMPUTE_ID, fixed_ips=FIXED_IPS, + port_id=PORT_ID) +AMPS_DATA = [o_data_models.Amphora(id=t_constants.MOCK_AMP_ID1, + status=constants.AMPHORA_ALLOCATED, + vrrp_port_id=t_constants.MOCK_VRRP_PORT_ID1, + vrrp_ip=t_constants.MOCK_VRRP_IP1), + o_data_models.Amphora(id=t_constants.MOCK_AMP_ID2, + status=constants.AMPHORA_ALLOCATED, + vrrp_port_id=t_constants.MOCK_VRRP_PORT_ID2, + vrrp_ip=t_constants.MOCK_VRRP_IP2), + o_data_models.Amphora(id=t_constants.MOCK_AMP_ID3, + status=constants.DELETED, + vrrp_port_id=t_constants.MOCK_VRRP_PORT_ID3, + vrrp_ip=t_constants.MOCK_VRRP_IP3) + ] +UPDATE_DICT = {constants.TOPOLOGY: None} +_session_mock = mock.MagicMock() + + +class TestException(Exception): + + def __init__(self, value): + self.value = value + + def __str__(self): + return repr(self.value) + + +@mock.patch('octavia.common.utils.get_network_driver') +class TestNetworkTasks(base.TestCase): + def setUp(self): + network_tasks.LOG = mock.MagicMock() + self.db_amphora_mock = mock.MagicMock() + self.db_load_balancer_mock = mock.MagicMock() + self.vip_mock = mock.MagicMock() + self.vip_mock.subnet_id = SUBNET_ID + self.vip_mock.network_id = NETWORK_ID + self.db_load_balancer_mock.vip = self.vip_mock + self.db_load_balancer_mock.amphorae = [] + self.db_amphora_mock.id = AMPHORA_ID + self.db_amphora_mock.compute_id = COMPUTE_ID + self.db_amphora_mock.status = constants.AMPHORA_ALLOCATED + self.mgmt_net_id = MGMT_NETWORK_ID + self.mgmt_subnet_id = MGMT_SUBNET_ID + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + conf.config(group="controller_worker", + amp_boot_network_list=[self.mgmt_net_id]) + conf.config(group="networking", max_retries=1) + self.amphora_mock = {constants.ID: AMPHORA_ID, + constants.COMPUTE_ID: COMPUTE_ID, + constants.LB_NETWORK_IP: IP_ADDRESS, + } + self.load_balancer_mock = { + constants.LOADBALANCER_ID: uuidutils.generate_uuid(), + constants.VIP_SUBNET_ID: SUBNET_ID, + constants.VIP_NETWORK_ID: NETWORK_ID, + constants.VIP_PORT_ID: VIP.port_id, + constants.VIP_ADDRESS: VIP.ip_address, + constants.VIP_QOS_POLICY_ID: t_constants.MOCK_QOS_POLICY_ID1 + } + + conf = oslo_fixture.Config(cfg.CONF) + conf.config(group="controller_worker", + amp_boot_network_list=[self.mgmt_net_id]) + + super().setUp() + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + @mock.patch('octavia.db.api.get_session', return_value=_session_mock) + def test_calculate_amphora_delta(self, mock_get_session, mock_lb_repo_get, + mock_get_net_driver): + LB_ID = uuidutils.generate_uuid() + VRRP_PORT_ID = uuidutils.generate_uuid() + VIP_NETWORK_ID = uuidutils.generate_uuid() + VIP_SUBNET_ID = uuidutils.generate_uuid() + DELETE_NETWORK_ID = uuidutils.generate_uuid() + MEMBER_NETWORK_ID = uuidutils.generate_uuid() + MEMBER_SUBNET_ID = uuidutils.generate_uuid() + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + member_mock = mock.MagicMock() + member_mock.subnet_id = MEMBER_SUBNET_ID + pool_mock = mock.MagicMock() + pool_mock.members = [member_mock] + lb_mock = mock.MagicMock() + lb_mock.pools = [pool_mock] + lb_mock.vip.subnet.network_id = VIP_NETWORK_ID + lb_dict = { + constants.LOADBALANCER_ID: LB_ID, + constants.VIP_SUBNET_ID: VIP_SUBNET_ID, + constants.VIP_NETWORK_ID: VIP_NETWORK_ID + } + + amphora_dict = {constants.ID: AMPHORA_ID, + constants.COMPUTE_ID: COMPUTE_ID, + constants.VRRP_PORT_ID: VRRP_PORT_ID} + + mgmt_subnet = data_models.Subnet( + id=self.mgmt_subnet_id, + network_id=self.mgmt_net_id) + mgmt_net = data_models.Network( + id=self.mgmt_net_id, + subnets=[mgmt_subnet.id]) + mgmt_interface = data_models.Interface( + network_id=mgmt_net.id, + fixed_ips=[ + data_models.FixedIP( + subnet_id=mgmt_subnet.id)]) + + vrrp_subnet = data_models.Subnet( + id=VIP_SUBNET_ID, + network_id=VIP_NETWORK_ID) + vrrp_port = data_models.Port( + id=VRRP_PORT_ID, + network_id=VIP_NETWORK_ID, + fixed_ips=[ + data_models.FixedIP( + subnet=vrrp_subnet, + subnet_id=vrrp_subnet.id)]) + vrrp_interface = data_models.Interface( + network_id=VIP_NETWORK_ID, + fixed_ips=vrrp_port.fixed_ips) + + member_subnet = data_models.Subnet( + id=MEMBER_SUBNET_ID, + network_id=MEMBER_NETWORK_ID) + + to_be_deleted_interface = data_models.Interface( + id=mock.Mock(), + network_id=DELETE_NETWORK_ID) + + mock_lb_repo_get.return_value = lb_mock + mock_driver.get_port.return_value = vrrp_port + mock_driver.get_subnet.return_value = member_subnet + mock_driver.get_plugged_networks.return_value = [ + mgmt_interface, + vrrp_interface, + to_be_deleted_interface] + + calc_amp_delta = network_tasks.CalculateAmphoraDelta() + + # Test vrrp_port_id is None + result = calc_amp_delta.execute(lb_dict, amphora_dict, {}) + + self.assertEqual(AMPHORA_ID, result[constants.AMPHORA_ID]) + self.assertEqual(COMPUTE_ID, result[constants.COMPUTE_ID]) + self.assertEqual(1, len(result[constants.ADD_NICS])) + self.assertEqual(MEMBER_NETWORK_ID, + result[constants.ADD_NICS][0][constants.NETWORK_ID]) + self.assertEqual(1, len(result[constants.DELETE_NICS])) + self.assertEqual( + DELETE_NETWORK_ID, + result[constants.DELETE_NICS][0][constants.NETWORK_ID]) + mock_driver.get_subnet.assert_called_once_with( + MEMBER_SUBNET_ID) + mock_driver.get_plugged_networks.assert_called_once_with(COMPUTE_ID) + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + @mock.patch('octavia.db.api.get_session', return_value=_session_mock) + def test_calculate_delta(self, mock_get_session, mock_get_lb, + mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_lb.return_value = self.db_load_balancer_mock + + self.db_amphora_mock.to_dict.return_value = { + constants.ID: AMPHORA_ID, constants.COMPUTE_ID: COMPUTE_ID, + constants.VRRP_PORT_ID: PORT_ID} + mock_get_net_driver.return_value = mock_driver + empty_deltas = {self.db_amphora_mock.id: data_models.Delta( + amphora_id=AMPHORA_ID, + compute_id=COMPUTE_ID, + add_nics=[], + delete_nics=[], + add_subnets=[], + delete_subnets=[], + ).to_dict()} + + mgmt_subnet = data_models.Subnet( + id=self.mgmt_subnet_id, network_id=self.mgmt_net_id) + mgmt_ip_address = mock.MagicMock() + mgmt_interface = data_models.Interface( + network_id=self.mgmt_net_id, + fixed_ips=[ + data_models.FixedIP( + subnet=mgmt_subnet, + subnet_id=self.mgmt_subnet_id, + ip_address=mgmt_ip_address + ) + ]) + vrrp_subnet = data_models.Subnet( + id=self.vip_mock.subnet_id, network_id=self.vip_mock.network_id, + name='vrrp_subnet') + member_vip_subnet = data_models.Subnet( + id=uuidutils.generate_uuid(), network_id=self.vip_mock.network_id, + name='member_vip_subnet') + vip_net = data_models.Network( + id=self.vip_mock.network_id, + subnets=[member_vip_subnet, vrrp_subnet], + name='flat_network') + vrrp_port = data_models.Port( + id=uuidutils.generate_uuid(), + network_id=vip_net.id, network=vip_net, + fixed_ips=[ + data_models.FixedIP( + subnet=vrrp_subnet, subnet_id=vrrp_subnet.id, + ip_address=t_constants.MOCK_IP_ADDRESS) + ], + name='vrrp_port') + + member_private_net_id = uuidutils.generate_uuid() + member_private_subnet = data_models.Subnet( + id=uuidutils.generate_uuid(), network_id=member_private_net_id, + name='member_private_subnet') + member_private_subnet2 = data_models.Subnet( + id=uuidutils.generate_uuid(), network_id=member_private_net_id, + name='member_private_subnet2') + member_private_net = data_models.Network( + id=member_private_subnet.network_id, + subnets=[member_private_subnet, member_private_subnet2], + name='member_private_net') + member_private_subnet_port = data_models.Port( + id=uuidutils.generate_uuid(), + network_id=member_private_net.id, network=member_private_net, + fixed_ips=[ + data_models.FixedIP( + subnet=member_private_subnet, + subnet_id=member_private_subnet.id, + ip_address=t_constants.MOCK_IP_ADDRESS2) + ], + name='member_private_net_port') + member_private_subnet2_port = data_models.Port( + id=uuidutils.generate_uuid(), + network_id=member_private_net.id, network=member_private_net, + fixed_ips=[ + data_models.FixedIP( + subnet=member_private_subnet2, + subnet_id=member_private_subnet2.id, + ip_address=t_constants.MOCK_IP_ADDRESS2) + ], + name='member_private_net_port') + + # Pretend the VIP is on the member network, so already plugged + mock_driver.get_plugged_networks.return_value = [ + mgmt_interface, + data_models.Interface( + network_id=vip_net.id, port_id=vrrp_port.id, + fixed_ips=vrrp_port.fixed_ips)] + mock_driver.get_port.return_value = vrrp_port + mock_driver.get_subnet.return_value = vrrp_subnet + + calc_delta = network_tasks.CalculateDelta() + + # Test with no amps or anything at all + self.assertEqual({}, calc_delta.execute( + self.load_balancer_mock, {})) + + # Test with one amp and no pools, only the base network plugged + # Delta should be empty + mock_driver.reset_mock() + + self.db_amphora_mock.load_balancer = self.db_load_balancer_mock + self.db_load_balancer_mock.amphorae = [self.db_amphora_mock] + self.db_load_balancer_mock.pools = [] + self.assertEqual(empty_deltas, + calc_delta.execute(self.load_balancer_mock, {})) + mock_driver.get_plugged_networks.assert_called_once_with(COMPUTE_ID) + + # Test with one amp and one pool but no members, nothing plugged + # Delta should be empty + mock_driver.reset_mock() + pool_mock = mock.MagicMock() + pool_mock.members = [] + self.db_load_balancer_mock.pools = [pool_mock] + self.assertEqual(empty_deltas, + calc_delta.execute(self.load_balancer_mock, {})) + + # Test with one amp/pool and one member (on a distinct member subnet) + # Dummy AZ is provided + # Only the base network is already plugged + # Delta should be one additional network/subnet to plug + mock_driver.reset_mock() + member_mock = mock.MagicMock() + member_mock.subnet_id = member_private_subnet.id + member_mock.vnic_type = constants.VNIC_TYPE_NORMAL + member2_mock = mock.MagicMock() + member2_mock.subnet_id = member_private_subnet2.id + pool_mock.members = [member_mock] + az = { + constants.COMPUTE_ZONE: 'foo' + } + mock_driver.get_subnet.return_value = data_models.Subnet( + id=2, network_id=3) + + ndm = data_models.Delta( + amphora_id=self.db_amphora_mock.id, + compute_id=self.db_amphora_mock.compute_id, + add_nics=[ + data_models.Interface( + network_id=3, + fixed_ips=[ + data_models.FixedIP( + subnet_id=member_private_subnet.id)], + vnic_type=constants.VNIC_TYPE_NORMAL)], + delete_nics=[], + add_subnets=[{ + 'subnet_id': member_private_subnet.id, + 'network_id': 3, + 'port_id': None}], + delete_subnets=[]).to_dict(recurse=True) + self.assertEqual({self.db_amphora_mock.id: ndm}, + calc_delta.execute(self.load_balancer_mock, az)) + + mock_driver.get_subnet.assert_called_once_with( + member_mock.subnet_id) + + # Test with one amp/pool and one member (not plugged) that is being + # deleted + # Only the base network is already plugged + # Delta should be empty + mock_driver.reset_mock() + member_mock = mock.MagicMock() + member_mock.subnet_id = member_private_subnet.id + member_mock.provisioning_status = constants.PENDING_DELETE + pool_mock.members = [member_mock] + + self.assertEqual(empty_deltas, + calc_delta.execute(self.load_balancer_mock, {})) + + # Test with one amp/pool and one member (without any subnets) + # Only the base network is already plugged + # No delta + mock_driver.reset_mock() + member_mock = mock.MagicMock() + member_mock.subnet_id = None + pool_mock.members = [member_mock] + + self.assertEqual(empty_deltas, + calc_delta.execute(self.load_balancer_mock, {})) + + # Test with one amp and one pool and one member + # Management network is defined in AZ metadata + # Base network AND member network/subnet already plugged + # Delta should be empty + mock_driver.reset_mock() + member_mock = mock.MagicMock() + member_mock.subnet_id = member_private_subnet.id + pool_mock.members = [member_mock] + + mgmt2_subnet_id = uuidutils.generate_uuid() + mgmt2_net_id = uuidutils.generate_uuid() + mgmt2_subnet = data_models.Subnet( + id=mgmt2_subnet_id, + network_id=mgmt2_net_id) + mgmt2_interface = data_models.Interface( + network_id=mgmt2_net_id, + fixed_ips=[ + data_models.FixedIP( + subnet=mgmt2_subnet, + subnet_id=mgmt2_subnet_id, + ) + ]) + az = { + constants.MANAGEMENT_NETWORK: mgmt2_net_id, + } + mock_driver.get_subnet.return_value = member_private_subnet + mock_driver.get_plugged_networks.return_value = [ + mgmt2_interface, + data_models.Interface( + network_id=vrrp_subnet.network_id, + fixed_ips=vrrp_port.fixed_ips), + data_models.Interface( + network_id=member_private_subnet.network_id, + fixed_ips=member_private_subnet_port.fixed_ips)] + + self.assertEqual(empty_deltas, + calc_delta.execute(self.load_balancer_mock, az)) + + # Test with one amp and one pool and one member, wrong network plugged + # Delta should be one network/subnet to add and one to remove + mock_driver.reset_mock() + member_mock = mock.MagicMock() + member_mock.subnet_id = member_private_subnet.id + member_mock.vnic_type = constants.VNIC_TYPE_NORMAL + pool_mock.members = [member_mock] + az = { + constants.COMPUTE_ZONE: 'foo' + } + mock_driver.get_subnet.return_value = member_private_subnet + mock_driver.get_plugged_networks.return_value = [ + mgmt_interface, + data_models.Interface( + network_id=vrrp_subnet.network_id, + fixed_ips=vrrp_port.fixed_ips), + data_models.Interface( + network_id='bad_net', + fixed_ips=[data_models.FixedIP(subnet_id='bad_subnet')])] + + ndm = data_models.Delta( + amphora_id=self.db_amphora_mock.id, + compute_id=self.db_amphora_mock.compute_id, + add_nics=[data_models.Interface( + network_id=member_private_net.id, + fixed_ips=[data_models.FixedIP( + subnet_id=member_private_subnet.id)], + vnic_type=constants.VNIC_TYPE_NORMAL)], + delete_nics=[data_models.Interface(network_id='bad_net')], + add_subnets=[{ + 'subnet_id': member_private_subnet.id, + 'network_id': member_private_net.id, + 'port_id': None + }], + delete_subnets=[{ + 'subnet_id': 'bad_subnet', + 'network_id': 'bad_net', + 'port_id': None + }]).to_dict(recurse=True) + self.assertEqual({self.db_amphora_mock.id: ndm}, + calc_delta.execute(self.load_balancer_mock, az)) + + # Test with one amp and one pool and no members, one network plugged + # Delta should be one network to remove + mock_driver.reset_mock() + pool_mock.members = [] + mock_driver.get_subnet.side_effect = [ + vrrp_subnet] + mock_driver.get_plugged_networks.return_value = [ + mgmt_interface, + data_models.Interface( + network_id=vrrp_subnet.network_id, + fixed_ips=vrrp_port.fixed_ips), + data_models.Interface( + network_id='bad_net', + fixed_ips=[data_models.FixedIP(subnet_id='bad_subnet')])] + + ndm = data_models.Delta( + amphora_id=self.db_amphora_mock.id, + compute_id=self.db_amphora_mock.compute_id, + add_nics=[], + delete_nics=[data_models.Interface(network_id='bad_net')], + add_subnets=[], + delete_subnets=[{ + 'subnet_id': 'bad_subnet', + 'network_id': 'bad_net', + 'port_id': None + }]).to_dict(recurse=True) + self.assertEqual({self.db_amphora_mock.id: ndm}, + calc_delta.execute(self.load_balancer_mock, {})) + + # Add a new member on a new subnet, an interface with another subnet of + # the same network is already plugged + # Delta should be one new subnet + mock_driver.reset_mock() + pool_mock.members = [member_mock, member2_mock] + mock_driver.get_subnet.side_effect = [ + vrrp_subnet, + member_private_subnet, + member_private_subnet2] + mock_driver.get_plugged_networks.return_value = [ + mgmt_interface, + data_models.Interface( + network_id=vrrp_subnet.network_id, + fixed_ips=vrrp_port.fixed_ips), + data_models.Interface( + network_id=member_private_net_id, + port_id=member_private_subnet_port.id, + fixed_ips=member_private_subnet_port.fixed_ips)] + + ndm = data_models.Delta( + amphora_id=self.db_amphora_mock.id, + compute_id=self.db_amphora_mock.compute_id, + add_nics=[], + delete_nics=[], + add_subnets=[{ + 'subnet_id': member_private_subnet2.id, + 'network_id': member_private_net_id, + 'port_id': member_private_subnet_port.id + }], + delete_subnets=[] + ).to_dict(recurse=True) + self.assertEqual({self.db_amphora_mock.id: ndm}, + calc_delta.execute(self.load_balancer_mock, {})) + + # a new member on a new subnet on an existing network, a delete member2 + # on another subnet of the same network + # Delta should be one new subnet, one deleted subnet, no interface + # change + mock_driver.reset_mock() + pool_mock.members = [member_mock] + mock_driver.get_subnet.return_value = member_private_subnet + mock_driver.get_plugged_networks.return_value = [ + mgmt_interface, + data_models.Interface( + network_id=vrrp_subnet.network_id, + fixed_ips=vrrp_port.fixed_ips), + data_models.Interface( + network_id=member_private_net_id, + port_id=member_private_subnet2_port.id, + fixed_ips=member_private_subnet2_port.fixed_ips)] + + ndm = data_models.Delta( + amphora_id=self.db_amphora_mock.id, + compute_id=self.db_amphora_mock.compute_id, + add_nics=[], + delete_nics=[], + add_subnets=[{ + 'subnet_id': member_private_subnet.id, + 'network_id': member_private_net_id, + 'port_id': member_private_subnet2_port.id}], + delete_subnets=[{ + 'subnet_id': member_private_subnet2.id, + 'network_id': member_private_net_id, + 'port_id': member_private_subnet2_port.id}] + ).to_dict(recurse=True) + self.assertEqual({self.db_amphora_mock.id: ndm}, + calc_delta.execute(self.load_balancer_mock, {})) + + # member on subnet on the same network as the vip subnet + mock_driver.reset_mock() + member_mock.subnet_id = member_vip_subnet.id + pool_mock.members = [member_mock] + mock_driver.get_subnet.side_effect = [ + vrrp_subnet, + member_vip_subnet] + mock_driver.get_plugged_networks.return_value = [ + mgmt_interface, + data_models.Interface( + network_id=vrrp_subnet.network_id, + port_id=vrrp_port.id, + fixed_ips=vrrp_port.fixed_ips), + data_models.Interface( + network_id=member_private_net_id, + port_id=member_private_subnet_port.id, + fixed_ips=member_private_subnet_port.fixed_ips)] + + ndm = data_models.Delta( + amphora_id=self.db_amphora_mock.id, + compute_id=self.db_amphora_mock.compute_id, + add_nics=[], + delete_nics=[ + data_models.Interface( + network_id=member_private_net_id, + port_id=member_private_subnet_port.id)], + add_subnets=[{ + 'subnet_id': member_vip_subnet.id, + 'network_id': vip_net.id, + 'port_id': vrrp_port.id}], + delete_subnets=[{ + 'subnet_id': member_private_subnet.id, + 'network_id': member_private_net_id, + 'port_id': member_private_subnet_port.id}] + ).to_dict(recurse=True) + self.assertEqual({self.db_amphora_mock.id: ndm}, + calc_delta.execute(self.load_balancer_mock, {})) + + def test_get_plumbed_networks(self, mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + mock_driver.get_plugged_networks.side_effect = [['blah']] + net = network_tasks.GetPlumbedNetworks() + + self.assertEqual(['blah'], net.execute(self.amphora_mock)) + mock_driver.get_plugged_networks.assert_called_once_with( + COMPUTE_ID) + + def test_unplug_networks(self, mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + + def _interface(network_id): + return [data_models.Interface(network_id=network_id)] + + net = network_tasks.UnPlugNetworks() + + net.execute(self.db_amphora_mock, None) + self.assertFalse(mock_driver.unplug_network.called) + + delta = data_models.Delta(amphora_id=self.db_amphora_mock.id, + compute_id=self.db_amphora_mock.compute_id, + add_nics=[], + delete_nics=[]).to_dict(recurse=True) + net.execute(self.amphora_mock, delta) + self.assertFalse(mock_driver.unplug_network.called) + + delta = data_models.Delta(amphora_id=self.db_amphora_mock.id, + compute_id=self.db_amphora_mock.compute_id, + add_nics=[], + delete_nics=_interface(1) + ).to_dict(recurse=True) + net.execute(self.amphora_mock, delta) + mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1) + + mock_driver.reset_mock() + mock_driver.unplug_network.side_effect = net_base.NetworkNotFound + net.execute(self.amphora_mock, delta) # No exception + mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1) + + # Do a test with a general exception in case behavior changes + mock_driver.reset_mock() + mock_driver.unplug_network.side_effect = Exception() + net.execute(self.amphora_mock, delta) # No exception + mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1) + + def test_get_member_ports(self, mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + + def _interface(port_id): + return [data_models.Interface(port_id=port_id)] + + net_task = network_tasks.GetMemberPorts() + net_task.execute(self.load_balancer_mock, self.amphora_mock) + mock_driver.get_port.assert_called_once_with(t_constants.MOCK_PORT_ID) + mock_driver.get_plugged_networks.assert_called_once_with(COMPUTE_ID) + + mock_driver.reset_mock() + net_task = network_tasks.GetMemberPorts() + mock_driver.get_plugged_networks.return_value = _interface(1) + mock_driver.get_port.side_effect = [ + data_models.Port(network_id=NETWORK_ID), + data_models.Port(network_id=NETWORK_ID)] + net_task.execute(self.load_balancer_mock, self.amphora_mock) + self.assertEqual(2, mock_driver.get_port.call_count) + self.assertFalse(mock_driver.get_network.called) + + mock_driver.reset_mock() + port_mock = mock.MagicMock() + fixed_ip_mock = mock.MagicMock() + fixed_ip_mock.subnet_id = 1 + port_mock.fixed_ips = [fixed_ip_mock] + net_task = network_tasks.GetMemberPorts() + mock_driver.get_plugged_networks.return_value = _interface(1) + mock_driver.get_port.side_effect = [ + data_models.Port(network_id=NETWORK_ID), port_mock] + ports = net_task.execute(self.load_balancer_mock, self.amphora_mock) + mock_driver.get_subnet.assert_called_once_with(1) + self.assertEqual([port_mock], ports) + + @mock.patch('octavia.db.repositories.AmphoraRepository.get') + @mock.patch('octavia.db.api.get_session', return_value=_session_mock) + def test_handle_network_delta(self, mock_session, mock_amp_get, + mock_get_net_driver): + mock_net_driver = mock.MagicMock() + self.db_amphora_mock.to_dict.return_value = { + constants.ID: AMPHORA_ID, constants.COMPUTE_ID: COMPUTE_ID} + mock_get_net_driver.return_value = mock_net_driver + mock_amp_get.return_value = self.db_amphora_mock + + nic1 = data_models.Interface() + nic1.fixed_ips = [data_models.FixedIP( + subnet_id=uuidutils.generate_uuid())] + nic1.network_id = uuidutils.generate_uuid() + nic1.nic_type = constants.VNIC_TYPE_NORMAL + nic2 = data_models.Interface() + nic2.fixed_ips = [data_models.FixedIP( + subnet_id=uuidutils.generate_uuid())] + nic2.network_id = uuidutils.generate_uuid() + interface1 = mock.MagicMock() + interface1.port_id = uuidutils.generate_uuid() + port1 = mock.MagicMock() + port1.network_id = uuidutils.generate_uuid() + fixed_ip = mock.MagicMock() + fixed_ip.subnet_id = nic1.fixed_ips[0].subnet_id + fixed_ip2 = mock.MagicMock() + fixed_ip2.subnet_id = uuidutils.generate_uuid() + port1.fixed_ips = [fixed_ip, fixed_ip2] + subnet = mock.MagicMock() + network = mock.MagicMock() + + delta = data_models.Delta(amphora_id=self.db_amphora_mock.id, + compute_id=self.db_amphora_mock.compute_id, + add_nics=[nic1], + delete_nics=[nic2, nic2, nic2], + add_subnets=[], + delete_subnets=[] + ).to_dict(recurse=True) + + mock_net_driver.plug_port.side_effect = [ + interface1, + exceptions.NotFound(resource='Instance', id='1'), + exceptions.NotFound(resource='Boom', id='1'), + Exception("boom")] + mock_net_driver.create_port.side_effect = [ + port1, + port1, + exceptions.NotFound(resource='Network', id='1'), + port1, + port1] + + fixed_port1 = mock.MagicMock() + fixed_port1.network_id = port1.network_id + fixed_port1.fixed_ips = [fixed_ip] + mock_net_driver.unplug_fixed_ip.return_value = fixed_port1 + mock_net_driver.get_network.return_value = network + mock_net_driver.get_subnet.return_value = subnet + + mock_net_driver.unplug_network.side_effect = [ + None, net_base.NetworkNotFound, Exception] + + handle_net_delta_obj = network_tasks.HandleNetworkDelta() + result = handle_net_delta_obj.execute(self.amphora_mock, + delta) + + mock_net_driver.plug_port.assert_called_once_with( + self.db_amphora_mock, port1) + mock_net_driver.get_network.assert_called_once_with(port1.network_id) + mock_net_driver.get_subnet.assert_has_calls( + [mock.call(fixed_ip.subnet_id), mock.call(fixed_ip2.subnet_id)]) + + self.assertEqual({self.db_amphora_mock.id: [port1.to_dict()]}, + result) + + mock_net_driver.unplug_network.assert_called_with( + self.db_amphora_mock.compute_id, nic2.network_id) + + self.assertRaises(net_base.AmphoraNotFound, + handle_net_delta_obj.execute, self.amphora_mock, + delta) + + self.assertRaises(net_base.NetworkNotFound, + handle_net_delta_obj.execute, self.amphora_mock, + delta) + + self.assertRaises(net_base.PlugNetworkException, + handle_net_delta_obj.execute, self.amphora_mock, + delta) + + self.assertRaises(net_base.PlugNetworkException, + handle_net_delta_obj.execute, self.amphora_mock, + delta) + + # Revert + delta2 = data_models.Delta(amphora_id=self.db_amphora_mock.id, + compute_id=self.db_amphora_mock.compute_id, + add_nics=[nic1, nic1], + delete_nics=[nic2, nic2, nic2] + ).to_dict(recurse=True) + + mock_net_driver.unplug_network.reset_mock() + handle_net_delta_obj.revert( + failure.Failure.from_exception(Exception('boom')), None, None) + mock_net_driver.unplug_network.assert_not_called() + + mock_net_driver.unplug_network.reset_mock() + handle_net_delta_obj.revert(None, None, None) + mock_net_driver.unplug_network.assert_not_called() + + mock_net_driver.unplug_network.reset_mock() + handle_net_delta_obj.revert(None, None, delta2) + + mock_net_driver.unplug_network.reset_mock() + mock_net_driver.delete_port.side_effect = Exception('boom') + handle_net_delta_obj.revert(None, None, delta2) + + @mock.patch('octavia.db.repositories.AmphoraRepository.get') + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + @mock.patch('octavia.db.api.get_session', return_value=_session_mock) + def test_handle_network_deltas(self, mock_get_session, + mock_get_lb, mock_get_amp, + mock_get_net_driver): + mock_driver = mock.MagicMock() + self.db_load_balancer_mock.amphorae = [self.db_amphora_mock] + self.db_amphora_mock.to_dict.return_value = { + constants.ID: AMPHORA_ID, constants.COMPUTE_ID: COMPUTE_ID} + mock_get_net_driver.return_value = mock_driver + mock_get_lb.return_value = self.db_load_balancer_mock + mock_get_amp.return_value = self.db_amphora_mock + + subnet1 = uuidutils.generate_uuid() + network1 = uuidutils.generate_uuid() + port1 = uuidutils.generate_uuid() + subnet2 = uuidutils.generate_uuid() + + def _interface(network_id, port_id=None, subnet_id=None): + return data_models.Interface( + network_id=network_id, + port_id=port_id, + fixed_ips=[ + data_models.FixedIP( + subnet_id=subnet_id)], + vnic_type=constants.VNIC_TYPE_NORMAL) + + net = network_tasks.HandleNetworkDeltas() + + net.execute({}, self.load_balancer_mock) + self.assertFalse(mock_driver.create_port.called) + + delta = data_models.Delta(amphora_id=self.db_amphora_mock.id, + compute_id=self.db_amphora_mock.compute_id, + add_nics=[], + delete_nics=[], + add_subnets=[], + delete_subnets=[]).to_dict(recurse=True) + net.execute({self.db_amphora_mock.id: delta}, self.load_balancer_mock) + self.assertFalse(mock_driver.create_port.called) + + # Adding a subnet on a new network + port = data_models.Port( + id=port1, + network_id=network1, + fixed_ips=[ + data_models.FixedIP(subnet_id=subnet1)]) + mock_driver.create_port.return_value = port + mock_driver.plug_fixed_ip.return_value = port + mock_driver.get_network.return_value = data_models.Network( + id=network1) + mock_driver.get_subnet.return_value = data_models.Subnet( + id=subnet1, + network_id=network1) + add_nics = [_interface(network1, subnet_id=subnet1)] + add_subnets = [{ + 'subnet_id': subnet1, + 'network_id': network1, + 'port_id': None}] + + delta = data_models.Delta(amphora_id=self.db_amphora_mock.id, + compute_id=self.db_amphora_mock.compute_id, + add_nics=add_nics, + delete_nics=[], + add_subnets=add_subnets, + delete_subnets=[]).to_dict(recurse=True) + updated_ports = net.execute({self.db_amphora_mock.id: delta}, + self.load_balancer_mock) + mock_driver.plug_port.assert_called_once_with( + self.db_amphora_mock, port) + mock_driver.unplug_network.assert_not_called() + + self.assertEqual(1, len(updated_ports)) + + updated_port = updated_ports[self.db_amphora_mock.id][0] + self.assertEqual(port1, updated_port['id']) + self.assertEqual(network1, updated_port['network_id']) + self.assertEqual(1, len(updated_port['fixed_ips'])) + self.assertEqual(subnet1, updated_port['fixed_ips'][0]['subnet_id']) + + # revert + net.revert(None, {self.db_amphora_mock.id: delta}, + self.load_balancer_mock) + mock_driver.unplug_network.assert_called_once_with( + self.db_amphora_mock.compute_id, network1) + + # Adding a subnet on an existing network/port + mock_driver.reset_mock() + port = data_models.Port( + id=port1, + network_id=network1, + fixed_ips=[ + data_models.FixedIP(subnet_id=subnet2), + data_models.FixedIP(subnet_id=subnet1)]) + mock_driver.plug_fixed_ip.return_value = port + mock_driver.get_network.return_value = data_models.Network( + id=network1) + mock_driver.get_subnet.side_effect = [ + data_models.Subnet( + id=subnet2, + network_id=network1), + data_models.Subnet( + id=subnet1, + network_id=network1)] + add_subnets = [{ + 'subnet_id': subnet1, + 'network_id': network1, + 'port_id': port1}] + + delta = data_models.Delta(amphora_id=self.db_amphora_mock.id, + compute_id=self.db_amphora_mock.compute_id, + add_nics=[], + delete_nics=[], + add_subnets=add_subnets, + delete_subnets=[]).to_dict(recurse=True) + updated_ports = net.execute({self.db_amphora_mock.id: delta}, + self.load_balancer_mock) + mock_driver.unplug_network.assert_not_called() + mock_driver.get_port.assert_not_called() + mock_driver.plug_fixed_ip.assert_called_once_with(port_id=port1, + subnet_id=subnet1) + self.assertEqual(1, len(updated_ports)) + + updated_port = updated_ports[self.db_amphora_mock.id][0] + self.assertEqual(port1, updated_port['id']) + self.assertEqual(network1, updated_port['network_id']) + self.assertEqual(2, len(updated_port['fixed_ips'])) + self.assertEqual(subnet2, updated_port['fixed_ips'][0]['subnet_id']) + self.assertEqual(subnet1, updated_port['fixed_ips'][1]['subnet_id']) + + # Deleting a subnet + mock_driver.reset_mock() + delete_subnets = [{ + 'subnet_id': subnet1, + 'network_id': network1, + 'port_id': port1}] + mock_driver.get_subnet.side_effect = [ + data_models.Subnet( + id=subnet2, + network_id=network1)] + mock_driver.unplug_fixed_ip.return_value = data_models.Port( + id=port1, + network_id=network1, + fixed_ips=[ + data_models.FixedIP(subnet_id=subnet2)]) + + delta = data_models.Delta(amphora_id=self.db_amphora_mock.id, + compute_id=self.db_amphora_mock.compute_id, + add_nics=[], + delete_nics=[], + add_subnets=[], + delete_subnets=delete_subnets).to_dict( + recurse=True) + updated_ports = net.execute({self.db_amphora_mock.id: delta}, + self.load_balancer_mock) + mock_driver.delete_port.assert_not_called() + mock_driver.plug_fixed_ip.assert_not_called() + mock_driver.unplug_fixed_ip.assert_called_once_with( + port_id=port1, subnet_id=subnet1) + self.assertEqual(1, len(updated_ports)) + self.assertEqual(1, len(updated_ports[self.db_amphora_mock.id])) + + updated_port = updated_ports[self.db_amphora_mock.id][0] + self.assertEqual(port1, updated_port['id']) + self.assertEqual(network1, updated_port['network_id']) + self.assertEqual(1, len(updated_port['fixed_ips'])) + self.assertEqual(subnet2, updated_port['fixed_ips'][0]['subnet_id']) + + # Noop update + # Delta are empty because there's nothing to update + mock_driver.reset_mock() + delta = data_models.Delta(amphora_id=self.db_amphora_mock.id, + compute_id=self.db_amphora_mock.compute_id, + add_nics=[], + delete_nics=[], + add_subnets=[], + delete_subnets=[]).to_dict(recurse=True) + net.execute({self.db_amphora_mock.id: delta}, + self.load_balancer_mock) + mock_driver.delete_port.assert_not_called() + mock_driver.plug_fixed_ip.assert_not_called() + mock_driver.unplug_fixed_ip.assert_not_called() + + # Deleting a subnet and a network + mock_driver.reset_mock() + mock_driver.get_subnet.side_effect = [ + data_models.Subnet( + id=subnet2, + network_id=network1), + data_models.Subnet( + id=subnet1, + network_id=network1)] + delete_nics = [_interface(network1, port_id=port1)] + delete_subnets = [{ + 'subnet_id': subnet1, + 'network_id': network1, + 'port_id': port1}] + + delta = data_models.Delta(amphora_id=self.db_amphora_mock.id, + compute_id=self.db_amphora_mock.compute_id, + add_nics=[], + delete_nics=delete_nics, + add_subnets=[], + delete_subnets=delete_subnets).to_dict( + recurse=True) + updated_ports = net.execute({self.db_amphora_mock.id: delta}, + self.load_balancer_mock) + mock_driver.delete_port.assert_called_once_with(port1) + mock_driver.plug_fixed_ip.assert_not_called() + self.assertEqual(1, len(updated_ports)) + self.assertEqual(0, len(updated_ports[self.db_amphora_mock.id])) + + delta = data_models.Delta(amphora_id=self.db_amphora_mock.id, + compute_id=self.db_amphora_mock.compute_id, + add_nics=[_interface(1, port_id=12)], + delete_nics=[], + add_subnets=[], + delete_subnets=[]).to_dict(recurse=True) + mock_driver.reset_mock() + mock_driver.unplug_network.side_effect = TestException('test') + net.revert(None, {self.db_amphora_mock.id: delta}, + self.load_balancer_mock) + mock_driver.unplug_network.assert_called_once_with( + self.db_amphora_mock.compute_id, 1) + + mock_driver.reset_mock() + mock_driver.delete_port.side_effect = TestException('test') + net.revert(None, {self.db_amphora_mock.id: delta}, + self.load_balancer_mock) + mock_driver.unplug_network.assert_called_once_with( + self.db_amphora_mock.compute_id, 1) + mock_driver.delete_port.assert_called_once_with(12) + + mock_driver.reset_mock() + net.execute({}, self.load_balancer_mock) + self.assertFalse(mock_driver.unplug_network.called) + + delta = data_models.Delta(amphora_id=self.db_amphora_mock.id, + compute_id=self.db_amphora_mock.compute_id, + add_nics=[], + delete_nics=[_interface(1)], + add_subnets=[], + delete_subnets=[]).to_dict(recurse=True) + net.execute({self.db_amphora_mock.id: delta}, self.load_balancer_mock) + mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1) + + mock_driver.reset_mock() + mock_driver.unplug_network.side_effect = net_base.NetworkNotFound + net.execute({self.db_amphora_mock.id: delta}, self.load_balancer_mock) + mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1) + + # Do a test with a general exception in case behavior changes + mock_driver.reset_mock() + mock_driver.unplug_network.side_effect = Exception() + net.execute({self.db_amphora_mock.id: delta}, self.load_balancer_mock) + mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1) + + # Do a test with a general exception in case behavior changes + delta = data_models.Delta(amphora_id=self.db_amphora_mock.id, + compute_id=self.db_amphora_mock.compute_id, + add_nics=[], + delete_nics=[_interface(1, port_id=12)], + add_subnets=[], + delete_subnets=[]).to_dict(recurse=True) + mock_driver.reset_mock() + mock_driver.delete_port.side_effect = Exception() + net.execute({self.db_amphora_mock.id: delta}, self.load_balancer_mock) + mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1) + mock_driver.delete_port.assert_called_once_with(12) + + mock_driver.unplug_network.reset_mock() + net.revert( + failure.Failure.from_exception(Exception('boom')), None, None) + mock_driver.unplug_network.assert_not_called() + + mock_driver.unplug_network.reset_mock() + net.revert(None, None, None) + mock_driver.unplug_network.assert_not_called() + + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'get_current_loadbalancer_from_db') + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + @mock.patch('octavia.db.api.get_session', return_value=_session_mock) + def test_apply_qos_on_creation(self, mock_get_session, mock_get_lb, + mock_get_lb_db, mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + net = network_tasks.ApplyQos() + mock_get_lb_db.return_value = LB + mock_get_lb.return_value = LB + + # execute + UPDATE_DICT[constants.TOPOLOGY] = constants.TOPOLOGY_SINGLE + update_dict = UPDATE_DICT + net.execute(self.load_balancer_mock, [AMPS_DATA[0]], update_dict) + mock_driver.apply_qos_on_port.assert_called_once_with( + VIP.qos_policy_id, AMPS_DATA[0].vrrp_port_id) + self.assertEqual(1, mock_driver.apply_qos_on_port.call_count) + standby_topology = constants.TOPOLOGY_ACTIVE_STANDBY + mock_driver.reset_mock() + update_dict[constants.TOPOLOGY] = standby_topology + net.execute(self.load_balancer_mock, AMPS_DATA, update_dict) + mock_driver.apply_qos_on_port.assert_called_with( + t_constants.MOCK_QOS_POLICY_ID1, mock.ANY) + self.assertEqual(2, mock_driver.apply_qos_on_port.call_count) + + # revert + mock_driver.reset_mock() + update_dict = UPDATE_DICT + net.revert(None, self.load_balancer_mock, [AMPS_DATA[0]], update_dict) + self.assertEqual(0, mock_driver.apply_qos_on_port.call_count) + mock_driver.reset_mock() + update_dict[constants.TOPOLOGY] = standby_topology + net.revert(None, self.load_balancer_mock, AMPS_DATA, update_dict) + self.assertEqual(0, mock_driver.apply_qos_on_port.call_count) + + @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' + 'get_current_loadbalancer_from_db') + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + @mock.patch('octavia.db.api.get_session', return_value=_session_mock) + def test_apply_qos_on_update(self, mock_get_session, mock_get_lb, + mock_get_lb_db, mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + net = network_tasks.ApplyQos() + null_qos_vip = o_data_models.Vip(qos_policy_id=None) + null_qos_lb = o_data_models.LoadBalancer( + vip=null_qos_vip, topology=constants.TOPOLOGY_SINGLE, + amphorae=[AMPS_DATA[0]]) + null_qos_lb_dict = ( + provider_utils.db_loadbalancer_to_provider_loadbalancer( + null_qos_lb).to_dict()) + + tmp_vip_object = o_data_models.Vip( + qos_policy_id=t_constants.MOCK_QOS_POLICY_ID1) + tmp_lb = o_data_models.LoadBalancer( + vip=tmp_vip_object, topology=constants.TOPOLOGY_SINGLE, + amphorae=[AMPS_DATA[0]]) + pr_tm_dict = provider_utils.db_loadbalancer_to_provider_loadbalancer( + tmp_lb).to_dict() + mock_get_lb.return_value = tmp_lb + # execute + update_dict = {'description': 'fool'} + net.execute(pr_tm_dict, update_dict=update_dict) + mock_driver.apply_qos_on_port.assert_called_once_with( + t_constants.MOCK_QOS_POLICY_ID1, AMPS_DATA[0].vrrp_port_id) + self.assertEqual(1, mock_driver.apply_qos_on_port.call_count) + + mock_driver.reset_mock() + mock_get_lb.reset_mock() + mock_get_lb.return_value = null_qos_lb + update_dict = {'vip': {'qos_policy_id': None}} + net.execute(null_qos_lb_dict, update_dict=update_dict) + mock_driver.apply_qos_on_port.assert_called_once_with( + None, AMPS_DATA[0].vrrp_port_id) + self.assertEqual(1, mock_driver.apply_qos_on_port.call_count) + + mock_driver.reset_mock() + update_dict = {'name': '123'} + net.execute(null_qos_lb_dict, update_dict=update_dict) + self.assertEqual(0, mock_driver.apply_qos_on_port.call_count) + + mock_driver.reset_mock() + mock_get_lb.reset_mock() + update_dict = {'description': 'fool'} + tmp_lb.amphorae = AMPS_DATA + tmp_lb.topology = constants.TOPOLOGY_ACTIVE_STANDBY + mock_get_lb.return_value = tmp_lb + net.execute(pr_tm_dict, update_dict=update_dict) + mock_driver.apply_qos_on_port.assert_called_with( + t_constants.MOCK_QOS_POLICY_ID1, mock.ANY) + self.assertEqual(2, mock_driver.apply_qos_on_port.call_count) + + mock_driver.reset_mock() + update_dict = {'description': 'fool', + 'vip': { + 'qos_policy_id': t_constants.MOCK_QOS_POLICY_ID1}} + tmp_lb.amphorae = AMPS_DATA + tmp_lb.topology = constants.TOPOLOGY_ACTIVE_STANDBY + net.execute(pr_tm_dict, update_dict=update_dict) + mock_driver.apply_qos_on_port.assert_called_with( + t_constants.MOCK_QOS_POLICY_ID1, mock.ANY) + self.assertEqual(2, mock_driver.apply_qos_on_port.call_count) + + mock_get_lb.return_value = null_qos_lb + mock_driver.reset_mock() + update_dict = {} + net.execute(null_qos_lb_dict, update_dict=update_dict) + self.assertEqual(0, mock_driver.apply_qos_on_port.call_count) + + # revert + mock_driver.reset_mock() + mock_get_lb.reset_mock() + tmp_lb.amphorae = [AMPS_DATA[0]] + tmp_lb.topology = constants.TOPOLOGY_SINGLE + update_dict = {'description': 'fool'} + mock_get_lb_db.return_value = tmp_lb + net.revert(None, pr_tm_dict, update_dict=update_dict) + self.assertEqual(0, mock_driver.apply_qos_on_port.call_count) + + mock_driver.reset_mock() + update_dict = {'vip': {'qos_policy_id': None}} + ori_lb_db = LB2 + ori_lb_db.amphorae = [AMPS_DATA[0]] + mock_get_lb_db.return_value = ori_lb_db + net.revert(None, null_qos_lb_dict, update_dict=update_dict) + mock_driver.apply_qos_on_port.assert_called_once_with( + t_constants.MOCK_QOS_POLICY_ID2, AMPS_DATA[0].vrrp_port_id) + self.assertEqual(1, mock_driver.apply_qos_on_port.call_count) + + mock_driver.reset_mock() + mock_get_lb.reset_mock() + update_dict = {'vip': { + 'qos_policy_id': t_constants.MOCK_QOS_POLICY_ID2}} + tmp_lb.amphorae = AMPS_DATA + tmp_lb.topology = constants.TOPOLOGY_ACTIVE_STANDBY + ori_lb_db = LB2 + ori_lb_db.amphorae = [AMPS_DATA[0]] + mock_get_lb_db.return_value = ori_lb_db + net.revert(None, pr_tm_dict, update_dict=update_dict) + mock_driver.apply_qos_on_port.assert_called_with( + t_constants.MOCK_QOS_POLICY_ID2, mock.ANY) + self.assertEqual(1, mock_driver.apply_qos_on_port.call_count) + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + @mock.patch('octavia.db.api.get_session', return_value=_session_mock) + def test_unplug_vip(self, mock_get_session, mock_get_lb, + mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_lb.return_value = LB + mock_get_net_driver.return_value = mock_driver + net = network_tasks.UnplugVIP() + + net.execute(self.load_balancer_mock) + mock_driver.unplug_vip.assert_called_once_with(LB, LB.vip) + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + @mock.patch('octavia.db.api.get_session', return_value=_session_mock) + def test_allocate_vip(self, mock_get_session, mock_get_lb, + mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_lb.return_value = LB + mock_get_net_driver.return_value = mock_driver + net = network_tasks.AllocateVIP() + + additional_vip = mock.Mock() + additional_vip.subnet_id = uuidutils.generate_uuid() + additional_vip.ip_address = IP_ADDRESS + mock_driver.allocate_vip.return_value = LB.vip, [additional_vip] + + mock_driver.reset_mock() + self.assertEqual((LB.vip.to_dict(), [additional_vip.to_dict()]), + net.execute(self.load_balancer_mock)) + mock_driver.allocate_vip.assert_called_once_with(LB) + + # revert + vip_mock = VIP.to_dict() + additional_vips_mock = mock.MagicMock() + net.revert((vip_mock, additional_vips_mock), self.load_balancer_mock) + mock_driver.deallocate_vip.assert_called_once_with( + o_data_models.Vip(**vip_mock)) + + # revert exception + mock_driver.reset_mock() + additional_vips_mock.reset_mock() + mock_driver.deallocate_vip.side_effect = Exception('DeallVipException') + vip_mock = VIP.to_dict() + net.revert((vip_mock, additional_vips_mock), self.load_balancer_mock) + mock_driver.deallocate_vip.assert_called_once_with(o_data_models.Vip( + **vip_mock)) + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + @mock.patch('octavia.db.api.get_session', return_value=_session_mock) + def test_allocate_vip_for_failover(self, mock_get_session, mock_get_lb, + mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_lb.return_value = LB + mock_get_net_driver.return_value = mock_driver + net = network_tasks.AllocateVIPforFailover() + + mock_driver.allocate_vip.return_value = LB.vip, [] + + mock_driver.reset_mock() + self.assertEqual((LB.vip.to_dict(), []), + net.execute(self.load_balancer_mock)) + mock_driver.allocate_vip.assert_called_once_with(LB) + + # revert + vip_mock = VIP.to_dict() + additional_vips_mock = mock.MagicMock() + net.revert((vip_mock, additional_vips_mock), self.load_balancer_mock) + mock_driver.deallocate_vip.assert_not_called() + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + @mock.patch('octavia.db.api.get_session', return_value=_session_mock) + def test_deallocate_vip(self, mock_get_session, mock_get_lb, + mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + net = network_tasks.DeallocateVIP() + vip = o_data_models.Vip() + lb = o_data_models.LoadBalancer(vip=vip) + mock_get_lb.return_value = lb + net.execute(self.load_balancer_mock) + mock_driver.deallocate_vip.assert_called_once_with(lb.vip) + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + @mock.patch('octavia.db.api.get_session', return_value=_session_mock) + def test_update_vip(self, mock_get_session, mock_get_lb, + mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + vip = o_data_models.Vip() + lb = o_data_models.LoadBalancer(vip=vip) + mock_get_lb.return_value = lb + listeners = [{constants.LOADBALANCER_ID: lb.id}] + net_task = network_tasks.UpdateVIP() + net_task.execute(listeners) + mock_driver.update_vip.assert_called_once_with(lb) + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + @mock.patch('octavia.db.api.get_session', return_value=_session_mock) + def test_update_vip_for_delete(self, mock_get_session, mock_get_lb, + mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + vip = o_data_models.Vip() + lb = o_data_models.LoadBalancer(vip=vip) + mock_get_lb.return_value = lb + listener = {constants.LOADBALANCER_ID: lb.id} + net_task = network_tasks.UpdateVIPForDelete() + net_task.execute(listener) + mock_driver.update_vip.assert_called_once_with(lb, for_delete=True) + + @mock.patch('octavia.db.api.get_session') + @mock.patch('octavia.db.repositories.AmphoraRepository.get') + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + def test_get_amphora_network_configs_by_id( + self, mock_lb_get, mock_amp_get, + mock_get_session, mock_get_net_driver): + LB_ID = uuidutils.generate_uuid() + AMP_ID = uuidutils.generate_uuid() + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + amphora_config_mock = mock.MagicMock() + mock_driver.get_network_configs.return_value = { + "amphora_uuid1": amphora_config_mock + } + mock_amp_get.return_value = 'mock amphora' + mock_lb_get.return_value = 'mock load balancer' + + net_task = network_tasks.GetAmphoraNetworkConfigsByID() + + net_task.execute(LB_ID, AMP_ID) + + mock_driver.get_network_configs.assert_called_once_with( + 'mock load balancer', amphora='mock amphora') + mock_amp_get.assert_called_once_with(mock_get_session(), id=AMP_ID) + mock_lb_get.assert_called_once_with(mock_get_session(), id=LB_ID) + amphora_config_mock.to_dict.assert_called_once_with( + recurse=True, calling_classes=[o_data_models.LoadBalancer] + ) + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + @mock.patch('octavia.db.api.get_session', return_value=_session_mock) + def test_get_amphorae_network_configs(self, mock_session, mock_lb_get, + mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_lb_get.return_value = LB + mock_get_net_driver.return_value = mock_driver + amphora_config_mock = mock.MagicMock() + mock_driver.get_network_configs.return_value = { + "amphora_uuid1": amphora_config_mock + } + lb = o_data_models.LoadBalancer() + net_task = network_tasks.GetAmphoraeNetworkConfigs() + net_task.execute(self.load_balancer_mock) + mock_driver.get_network_configs.assert_called_once_with(lb) + amphora_config_mock.to_dict.assert_called_once_with( + recurse=True, calling_classes=[o_data_models.LoadBalancer] + ) + + def test_retrieve_portids_on_amphora_except_lb_network( + self, mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + + def _interface(port_id): + return [data_models.Interface(port_id=port_id)] + + net_task = network_tasks.RetrievePortIDsOnAmphoraExceptLBNetwork() + mock_driver.get_plugged_networks.return_value = [] + net_task.execute(self.amphora_mock) + mock_driver.get_plugged_networks.assert_called_once_with( + compute_id=COMPUTE_ID) + self.assertFalse(mock_driver.get_port.called) + + mock_driver.reset_mock() + net_task = network_tasks.RetrievePortIDsOnAmphoraExceptLBNetwork() + mock_driver.get_plugged_networks.return_value = _interface(1) + net_task.execute(self.amphora_mock) + mock_driver.get_port.assert_called_once_with(port_id=1) + + mock_driver.reset_mock() + net_task = network_tasks.RetrievePortIDsOnAmphoraExceptLBNetwork() + port_mock = mock.MagicMock() + fixed_ip_mock = mock.MagicMock() + fixed_ip_mock.ip_address = IP_ADDRESS + port_mock.fixed_ips = [fixed_ip_mock] + mock_driver.get_plugged_networks.return_value = _interface(1) + mock_driver.get_port.return_value = port_mock + ports = net_task.execute(self.amphora_mock) + self.assertEqual([], ports) + + mock_driver.reset_mock() + net_task = network_tasks.RetrievePortIDsOnAmphoraExceptLBNetwork() + port_mock = mock.MagicMock() + fixed_ip_mock = mock.MagicMock() + fixed_ip_mock.ip_address = "172.17.17.17" + port_mock.fixed_ips = [fixed_ip_mock] + mock_driver.get_plugged_networks.return_value = _interface(1) + mock_driver.get_port.return_value = port_mock + ports = net_task.execute(self.amphora_mock) + self.assertEqual(1, len(ports)) + + @mock.patch('octavia.db.repositories.AmphoraRepository.get') + @mock.patch('octavia.db.api.get_session', return_value=mock.MagicMock()) + def test_plug_ports(self, mock_session, mock_get, mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get.return_value = self.db_amphora_mock + mock_get_net_driver.return_value = mock_driver + + port1 = mock.MagicMock() + port2 = mock.MagicMock() + amp = {constants.ID: AMPHORA_ID, + constants.COMPUTE_ID: '1234'} + plugports = network_tasks.PlugPorts() + plugports.execute(amp, [port1, port2]) + + mock_driver.plug_port.assert_any_call(self.db_amphora_mock, port1) + mock_driver.plug_port.assert_any_call(self.db_amphora_mock, port2) + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + @mock.patch('octavia.db.api.get_session', return_value=_session_mock) + def test_update_vip_sg(self, mock_session, mock_lb_get, + mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_driver.update_vip_sg.return_value = SG_ID + mock_lb_get.return_value = LB + mock_get_net_driver.return_value = mock_driver + net = network_tasks.UpdateVIPSecurityGroup() + + sg_id = net.execute(self.load_balancer_mock) + mock_driver.update_vip_sg.assert_called_once_with(LB, LB.vip) + self.assertEqual(sg_id, SG_ID) + + def test_get_subnet_from_vip(self, mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + net = network_tasks.GetSubnetFromVIP() + + net.execute(self.load_balancer_mock) + mock_driver.get_subnet.assert_called_once_with( + SUBNET_ID) + + @mock.patch('octavia.db.repositories.AmphoraRepository.get') + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + @mock.patch('octavia.db.api.get_session', return_value=_session_mock) + def test_plug_vip_amphora(self, mock_session, mock_lb_get, mock_get, + mock_get_net_driver): + mock_driver = mock.MagicMock() + amphora = {constants.ID: AMPHORA_ID, + constants.LB_NETWORK_IP: IP_ADDRESS} + mock_lb_get.return_value = LB + mock_get.return_value = self.db_amphora_mock + mock_get_net_driver.return_value = mock_driver + net = network_tasks.PlugVIPAmphora() + subnet = {constants.ID: SUBNET_ID} + mockSubnet = mock.MagicMock() + mock_driver.get_subnet.return_value = mockSubnet + net.execute(self.load_balancer_mock, amphora, subnet) + mock_driver.plug_aap_port.assert_called_once_with( + LB, LB.vip, self.db_amphora_mock, mockSubnet) + + @mock.patch('octavia.db.repositories.AmphoraRepository.get') + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + @mock.patch('octavia.db.api.get_session', return_value=_session_mock) + def test_revert_plug_vip_amphora(self, mock_session, mock_lb_get, mock_get, + mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_lb_get.return_value = LB + mock_get.return_value = self.db_amphora_mock + mock_get_net_driver.return_value = mock_driver + net = network_tasks.PlugVIPAmphora() + amphora = {constants.ID: AMPHORA_ID, + constants.LB_NETWORK_IP: IP_ADDRESS} + subnet = {constants.ID: SUBNET_ID} + mockSubnet = mock.MagicMock() + mock_driver.get_subnet.return_value = mockSubnet + net.revert(AMPS_DATA[0].to_dict(), self.load_balancer_mock, + amphora, subnet) + mock_driver.unplug_aap_port.assert_called_once_with( + LB.vip, self.db_amphora_mock, mockSubnet) + + @mock.patch('octavia.db.repositories.AmphoraRepository.get') + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + @mock.patch('octavia.db.api.get_session', return_value=_session_mock) + def test_revert_plug_vip_amphora_subnet_not_found( + self, mock_session, mock_lb_get, mock_get, mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_lb_get.return_value = LB + mock_get.return_value = self.db_amphora_mock + mock_get_net_driver.return_value = mock_driver + net = network_tasks.PlugVIPAmphora() + amphora = {constants.ID: AMPHORA_ID, + constants.LB_NETWORK_IP: IP_ADDRESS} + subnet = {constants.ID: SUBNET_ID} + err_msg = 'Subnet not found' + mock_driver.get_subnet.side_effect = net_base.SubnetNotFound(err_msg) + result = AMPS_DATA[0].to_dict() + net.revert(result, self.load_balancer_mock, amphora, subnet) + mock_driver.unplug_aap_port.assert_not_called() + network_tasks.LOG.error.assert_called_once_with( + 'Failed to unplug AAP port for load balancer: %s. ' + 'Resources may still be in use for VRRP port: %s. ' + 'Due to error: %s', + self.load_balancer_mock[constants.LOADBALANCER_ID], + result[constants.VRRP_PORT_ID], err_msg + ) + + @mock.patch('octavia.db.repositories.AmphoraRepository.get') + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + @mock.patch('octavia.db.api.get_session', return_value=_session_mock) + def test_revert_plug_vip_amphora_raise_db_error( + self, mock_session, mock_lb_get, mock_get, mock_get_net_driver): + mock_driver = mock.MagicMock() + mock_lb_get.return_value = LB + err_msg = 'Some Error' + mock_get.side_effect = Exception(err_msg) + net = network_tasks.PlugVIPAmphora() + amphora = {constants.ID: AMPHORA_ID, + constants.LB_NETWORK_IP: IP_ADDRESS} + subnet = {constants.ID: SUBNET_ID} + result = AMPS_DATA[0].to_dict() + net.revert(result, self.load_balancer_mock, amphora, subnet) + mock_driver.unplug_aap_port.assert_not_called() + mock_lb_get.assert_not_called() + network_tasks.LOG.error.assert_called_once_with( + 'Failed to unplug AAP port for load balancer: %s. ' + 'Resources may still be in use for VRRP port: %s. ' + 'Due to error: %s', + self.load_balancer_mock[constants.LOADBALANCER_ID], + result[constants.VRRP_PORT_ID], err_msg + ) + + @mock.patch('octavia.controller.worker.v2.tasks.network_tasks.DeletePort.' + 'update_progress') + def test_delete_port(self, mock_update_progress, mock_get_net_driver): + PORT_ID = uuidutils.generate_uuid() + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + mock_driver.delete_port.side_effect = [ + mock.DEFAULT, exceptions.OctaviaException('boom'), mock.DEFAULT, + exceptions.OctaviaException('boom'), + exceptions.OctaviaException('boom'), + exceptions.OctaviaException('boom'), + exceptions.OctaviaException('boom'), + exceptions.OctaviaException('boom'), + exceptions.OctaviaException('boom')] + mock_driver.admin_down_port.side_effect = [ + mock.DEFAULT, exceptions.OctaviaException('boom')] + + net_task = network_tasks.DeletePort() + + # Limit the retry attempts for the test run to save time + net_task.execute.retry.stop = tenacity.stop_after_attempt(2) + + # Test port ID is None (no-op) + net_task.execute(None) + + mock_update_progress.assert_not_called() + mock_driver.delete_port.assert_not_called() + + # Test successful delete + mock_update_progress.reset_mock() + mock_driver.reset_mock() + + net_task.execute(PORT_ID) + + mock_update_progress.assert_called_once_with(0.5) + mock_driver.delete_port.assert_called_once_with(PORT_ID) + + # Test exception and successful retry + mock_update_progress.reset_mock() + mock_driver.reset_mock() + + net_task.execute(PORT_ID) + + mock_update_progress.assert_has_calls([mock.call(0.5), mock.call(1.0)]) + mock_driver.delete_port.assert_has_calls([mock.call(PORT_ID), + mock.call(PORT_ID)]) + + # Test passive failure + mock_update_progress.reset_mock() + mock_driver.reset_mock() + + net_task.execute(PORT_ID, passive_failure=True) + + mock_update_progress.assert_has_calls([mock.call(0.5), mock.call(1.0)]) + mock_driver.delete_port.assert_has_calls([mock.call(PORT_ID), + mock.call(PORT_ID)]) + mock_driver.admin_down_port.assert_called_once_with(PORT_ID) + + # Test passive failure admin down failure + mock_update_progress.reset_mock() + mock_driver.reset_mock() + mock_driver.admin_down_port.reset_mock() + + net_task.execute(PORT_ID, passive_failure=True) + + mock_update_progress.assert_has_calls([mock.call(0.5), mock.call(1.0)]) + mock_driver.delete_port.assert_has_calls([mock.call(PORT_ID), + mock.call(PORT_ID)]) + mock_driver.admin_down_port.assert_called_once_with(PORT_ID) + + # Test non-passive failure + mock_update_progress.reset_mock() + mock_driver.reset_mock() + mock_driver.admin_down_port.reset_mock() + + mock_driver.admin_down_port.side_effect = [ + exceptions.OctaviaException('boom')] + + self.assertRaises(exceptions.OctaviaException, net_task.execute, + PORT_ID) + + mock_update_progress.assert_has_calls([mock.call(0.5), mock.call(1.0)]) + mock_driver.delete_port.assert_has_calls([mock.call(PORT_ID), + mock.call(PORT_ID)]) + mock_driver.admin_down_port.assert_not_called() + + def test_create_vip_base_port(self, mock_get_net_driver): + AMP_ID = uuidutils.generate_uuid() + PORT_ID = uuidutils.generate_uuid() + VIP_NETWORK_ID = uuidutils.generate_uuid() + VIP_QOS_ID = uuidutils.generate_uuid() + VIP_SG_ID = uuidutils.generate_uuid() + VIP_SUBNET_ID = uuidutils.generate_uuid() + VIP_IP_ADDRESS = '203.0.113.81' + VIP_IP_ADDRESS2 = 'fd08::1' + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + vip_dict = {constants.IP_ADDRESS: VIP_IP_ADDRESS, + constants.NETWORK_ID: VIP_NETWORK_ID, + constants.QOS_POLICY_ID: VIP_QOS_ID, + constants.SUBNET_ID: VIP_SUBNET_ID, + constants.SG_IDS: []} + port_mock = mock.MagicMock() + port_mock.id = PORT_ID + additional_vips = [{constants.IP_ADDRESS: VIP_IP_ADDRESS2}] + + mock_driver.create_port.side_effect = [ + port_mock, exceptions.OctaviaException('boom'), + exceptions.OctaviaException('boom'), + exceptions.OctaviaException('boom')] + mock_driver.delete_port.side_effect = [mock.DEFAULT, Exception('boom')] + + net_task = network_tasks.CreateVIPBasePort() + + # Limit the retry attempts for the test run to save time + net_task.execute.retry.stop = tenacity.stop_after_attempt(2) + + # Test execute + result = net_task.execute(vip_dict, VIP_SG_ID, AMP_ID, additional_vips) + + self.assertEqual(port_mock.to_dict(), result) + mock_driver.create_port.assert_called_once_with( + VIP_NETWORK_ID, name=constants.AMP_BASE_PORT_PREFIX + AMP_ID, + fixed_ips=[{constants.SUBNET_ID: VIP_SUBNET_ID}], + secondary_ips=[VIP_IP_ADDRESS, VIP_IP_ADDRESS2], + security_group_ids=[VIP_SG_ID], + qos_policy_id=VIP_QOS_ID) + + # Test execute exception + mock_driver.reset_mock() + + self.assertRaises(exceptions.OctaviaException, net_task.execute, + vip_dict, None, AMP_ID, additional_vips) + + # Test revert when this task failed + mock_driver.reset_mock() + + net_task.revert(failure.Failure.from_exception(Exception('boom')), + vip_dict, VIP_SG_ID, AMP_ID, additional_vips) + + mock_driver.delete_port.assert_not_called() + + # Test revert + mock_driver.reset_mock() + + # The execute path generates a port dict, so this will be the result + # passed into the revert method by Taskflow + port_dict = {constants.ID: PORT_ID} + + net_task.revert(port_dict, vip_dict, VIP_SG_ID, AMP_ID, + additional_vips) + + mock_driver.delete_port.assert_called_once_with(PORT_ID) + + # Test revert exception + mock_driver.reset_mock() + + net_task.revert(port_dict, vip_dict, VIP_SG_ID, AMP_ID, + additional_vips) + + mock_driver.delete_port.assert_called_once_with(PORT_ID) + + @mock.patch('time.sleep') + def test_admin_down_port(self, mock_sleep, mock_get_net_driver): + PORT_ID = uuidutils.generate_uuid() + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + port_down_mock = mock.MagicMock() + port_down_mock.status = constants.DOWN + port_up_mock = mock.MagicMock() + port_up_mock.status = constants.UP + mock_driver.set_port_admin_state_up.side_effect = [ + mock.DEFAULT, net_base.PortNotFound, mock.DEFAULT, mock.DEFAULT, + Exception('boom')] + mock_driver.get_port.side_effect = [port_down_mock, port_up_mock] + + net_task = network_tasks.AdminDownPort() + + # Test execute + net_task.execute(PORT_ID) + + mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID, + False) + mock_driver.get_port.assert_called_once_with(PORT_ID) + + # Test passive fail on port not found + mock_driver.reset_mock() + + net_task.execute(PORT_ID) + + mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID, + False) + mock_driver.get_port.assert_not_called() + + # Test passive fail on port stays up + mock_driver.reset_mock() + + net_task.execute(PORT_ID) + + mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID, + False) + mock_driver.get_port.assert_called_once_with(PORT_ID) + + # Test revert when this task failed + mock_driver.reset_mock() + + net_task.revert(failure.Failure.from_exception(Exception('boom')), + PORT_ID) + + mock_driver.set_port_admin_state_up.assert_not_called() + + # Test revert + mock_driver.reset_mock() + + net_task.revert(None, PORT_ID) + + mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID, + True) + + # Test revert exception passive failure + mock_driver.reset_mock() + + net_task.revert(None, PORT_ID) + + mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID, + True) + + @mock.patch('octavia.common.utils.get_vip_security_group_name') + def test_get_vip_security_group_id(self, mock_get_sg_name, + mock_get_net_driver): + LB_ID = uuidutils.generate_uuid() + SG_ID = uuidutils.generate_uuid() + SG_NAME = 'fake_SG_name' + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + mock_get_sg_name.return_value = SG_NAME + sg_mock = mock.MagicMock() + sg_mock.id = SG_ID + mock_driver.get_security_group.side_effect = [ + sg_mock, None, net_base.SecurityGroupNotFound, + net_base.SecurityGroupNotFound] + + net_task = network_tasks.GetVIPSecurityGroupID() + + # Test execute + result = net_task.execute(LB_ID) + + mock_driver.get_security_group.assert_called_once_with(SG_NAME) + mock_get_sg_name.assert_called_once_with(LB_ID) + + # Test execute with empty get subnet response + mock_driver.reset_mock() + mock_get_sg_name.reset_mock() + + result = net_task.execute(LB_ID) + + self.assertIsNone(result) + mock_get_sg_name.assert_called_once_with(LB_ID) + + # Test execute no security group found, security groups enabled + mock_driver.reset_mock() + mock_get_sg_name.reset_mock() + mock_driver.sec_grp_enabled = True + + self.assertRaises(net_base.SecurityGroupNotFound, net_task.execute, + LB_ID) + mock_driver.get_security_group.assert_called_once_with(SG_NAME) + mock_get_sg_name.assert_called_once_with(LB_ID) + + # Test execute no security group found, security groups disabled + mock_driver.reset_mock() + mock_get_sg_name.reset_mock() + mock_driver.sec_grp_enabled = False + + result = net_task.execute(LB_ID) + + self.assertIsNone(result) + mock_driver.get_security_group.assert_called_once_with(SG_NAME) + mock_get_sg_name.assert_called_once_with(LB_ID) + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') + @mock.patch('octavia.db.api.get_session', return_value=_session_mock) + def test_create_SRIOV_base_port(self, mock_get_session, mock_lb_repo_get, + mock_get_net_driver): + AMP_ID = uuidutils.generate_uuid() + LB_ID = uuidutils.generate_uuid() + PORT_ID = uuidutils.generate_uuid() + VIP_NETWORK_ID = uuidutils.generate_uuid() + VIP_QOS_ID = uuidutils.generate_uuid() + VIP_SUBNET_ID = uuidutils.generate_uuid() + VIP_IP_ADDRESS = '203.0.113.81' + VIP_IP_ADDRESS2 = 'fd08::1' + mock_driver = mock.MagicMock() + mock_get_net_driver.return_value = mock_driver + port_mock = mock.MagicMock() + port_mock.id = PORT_ID + subnet_dict = {constants.ID: VIP_SUBNET_ID} + amphora_dict = {constants.ID: AMP_ID} + lb_dict = {constants.LOADBALANCER_ID: LB_ID, + constants.VIP_ADDRESS: VIP_IP_ADDRESS, + constants.VIP_NETWORK_ID: VIP_NETWORK_ID, + constants.VIP_QOS_POLICY_ID: VIP_QOS_ID} + addl_vips = [o_data_models.AdditionalVip( + ip_address=VIP_IP_ADDRESS2)] + lb_mock = mock.MagicMock() + lb_mock.additional_vips = addl_vips + mock_lb_repo_get.return_value = lb_mock + + mock_driver.create_port.side_effect = [ + port_mock, exceptions.OctaviaException('boom'), + exceptions.OctaviaException('boom'), + exceptions.OctaviaException('boom')] + mock_driver.delete_port.side_effect = [mock.DEFAULT, Exception('boom')] + + net_task = network_tasks.CreateSRIOVBasePort() + + # Limit the retry attempts for the test run to save time + net_task.execute.retry.stop = tenacity.stop_after_attempt(2) + + # Test execute + result = net_task.execute(lb_dict, amphora_dict, subnet_dict) + + self.assertEqual(port_mock.to_dict(), result) + mock_driver.create_port.assert_called_once_with( + VIP_NETWORK_ID, name=constants.AMP_BASE_PORT_PREFIX + AMP_ID, + fixed_ips=[{constants.SUBNET_ID: VIP_SUBNET_ID}], + secondary_ips=[VIP_IP_ADDRESS2, VIP_IP_ADDRESS], + qos_policy_id=VIP_QOS_ID, vnic_type=constants.VNIC_TYPE_DIRECT) + + # Test execute exception + mock_driver.reset_mock() + + self.assertRaises(exceptions.OctaviaException, net_task.execute, + lb_dict, amphora_dict, subnet_dict) + + # Test revert when this task failed + mock_driver.reset_mock() + + net_task.revert(failure.Failure.from_exception(Exception('boom')), + lb_dict, amphora_dict, subnet_dict) + + mock_driver.delete_port.assert_not_called() + + # Test revert + mock_driver.reset_mock() + + # The execute path generates a port dict, so this will be the result + # passed into the revert method by Taskflow + port_dict = {constants.ID: PORT_ID} + + net_task.revert(port_dict, lb_dict, amphora_dict, subnet_dict) + + mock_driver.delete_port.assert_called_once_with(PORT_ID) + + # Test revert exception + mock_driver.reset_mock() + + net_task.revert(port_dict, lb_dict, amphora_dict, subnet_dict) + + mock_driver.delete_port.assert_called_once_with(PORT_ID) + + def test_build_amp_data(self, mock_get_net_driver): + VIP_ADDRESS = '203.0.113.33' + VIP_PORT_ID = uuidutils.generate_uuid() + lb_dict = {constants.VIP_ADDRESS: VIP_ADDRESS, + constants.VIP_PORT_ID: VIP_PORT_ID} + amphora_dict = {} + BASE_PORT_ID = uuidutils.generate_uuid() + BASE_PORT_IP = '203.0.113.50' + port_data_dict = { + constants.ID: BASE_PORT_ID, + constants.FIXED_IPS: [{constants.IP_ADDRESS: BASE_PORT_IP}]} + + expected_amp_data = {constants.HA_IP: VIP_ADDRESS, + constants.HA_PORT_ID: VIP_PORT_ID, + constants.VRRP_ID: 1, + constants.VRRP_PORT_ID: BASE_PORT_ID, + constants.VRRP_IP: BASE_PORT_IP} + + net_task = network_tasks.BuildAMPData() + + result = net_task.execute(lb_dict, amphora_dict, port_data_dict) + + self.assertEqual(expected_amp_data, result) diff --git a/octavia/tests/unit/controller/worker/v2/tasks/test_notification_tasks.py b/octavia/tests/unit/controller/worker/v2/tasks/test_notification_tasks.py new file mode 100644 index 0000000000..f74f6678fb --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/tasks/test_notification_tasks.py @@ -0,0 +1,61 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from unittest import mock +import octavia # noqa H306 +from octavia.common import constants +from octavia.controller.worker.v2.tasks import notification_tasks +import octavia.tests.unit.base as base + + +class MockNOTIFIER(mock.MagicMock): + info = mock.MagicMock() + + +@mock.patch('octavia.common.rpc.NOTIFIER', + new_callable=MockNOTIFIER) +@mock.patch('octavia.common.context.RequestContext', + new_callable=mock.MagicMock) +@mock.patch('octavia.api.v2.types.load_balancer.LoadBalancerFullResponse.' + 'from_data_model', + new_callable=mock.MagicMock) +class TestNotificationTasks(base.TestCase): + def test_update_notification_execute(self, *args): + noti = notification_tasks.SendUpdateNotification() + id = 1 + lb = {constants.PROJECT_ID: id, + constants.LOADBALANCER_ID: id} + noti.execute(lb) + octavia.common.context.RequestContext.assert_called_with(project_id=id) + call_args, call_kwargs = octavia.common.rpc.NOTIFIER.info.call_args + self.assertEqual('octavia.loadbalancer.update.end', call_args[1]) + + def test_create_notification(self, *args): + noti = notification_tasks.SendCreateNotification() + id = 2 + lb = {constants.PROJECT_ID: id, + constants.LOADBALANCER_ID: id} + noti.execute(lb) + octavia.common.context.RequestContext.assert_called_with(project_id=id) + call_args, call_kwargs = octavia.common.rpc.NOTIFIER.info.call_args + self.assertEqual('octavia.loadbalancer.create.end', call_args[1]) + + def test_delete_notification(self, *args): + noti = notification_tasks.SendDeleteNotification() + id = 3 + lb = {constants.PROJECT_ID: id, + constants.LOADBALANCER_ID: id} + noti.execute(lb) + octavia.common.context.RequestContext.assert_called_with(project_id=id) + call_args, call_kwargs = octavia.common.rpc.NOTIFIER.info.call_args + self.assertEqual('octavia.loadbalancer.delete.end', call_args[1]) diff --git a/octavia/tests/unit/controller/worker/v2/tasks/test_retry_tasks.py b/octavia/tests/unit/controller/worker/v2/tasks/test_retry_tasks.py new file mode 100644 index 0000000000..c46fb72c8c --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/tasks/test_retry_tasks.py @@ -0,0 +1,47 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from taskflow import retry + +from octavia.controller.worker.v2.tasks import retry_tasks +import octavia.tests.unit.base as base + + +class TestRetryTasks(base.TestCase): + + def setUp(self): + super().setUp() + + @mock.patch('time.sleep') + def test_sleeping_retry_times_controller(self, mock_sleep): + retry_ctrlr = retry_tasks.SleepingRetryTimesController( + attempts=2, name='test_retry') + + # Test on_failure that should RETRY + history = ['boom'] + + result = retry_ctrlr.on_failure(history) + + self.assertEqual(retry.RETRY, result) + + # Test on_failure retries exhausted, should REVERT + history = ['boom', 'bang', 'pow'] + + result = retry_ctrlr.on_failure(history) + + self.assertEqual(retry.REVERT, result) + + # Test revert - should not raise + retry_ctrlr.revert(history) diff --git a/octavia/tests/unit/controller/worker/v2/tasks/test_shim_tasks.py b/octavia/tests/unit/controller/worker/v2/tasks/test_shim_tasks.py new file mode 100644 index 0000000000..58c446f83d --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/tasks/test_shim_tasks.py @@ -0,0 +1,33 @@ +# Copyright 2024 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +from octavia.common import constants +from octavia.controller.worker.v2.tasks import shim_tasks +import octavia.tests.unit.base as base + + +class TestShimTasks(base.TestCase): + + def test_amphora_to_amphorae_with_vrrp_ip(self): + + amp_to_amps = shim_tasks.AmphoraToAmphoraeWithVRRPIP() + + base_port = {constants.FIXED_IPS: + [{constants.IP_ADDRESS: '192.0.2.43'}]} + amphora = {constants.ID: '123456'} + expected_amphora = [{constants.ID: '123456', + constants.VRRP_IP: '192.0.2.43'}] + + self.assertEqual(expected_amphora, + amp_to_amps.execute(amphora, base_port)) diff --git a/octavia/tests/unit/controller/worker/v2/test_controller_worker.py b/octavia/tests/unit/controller/worker/v2/test_controller_worker.py new file mode 100644 index 0000000000..c680f938b8 --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/test_controller_worker.py @@ -0,0 +1,2512 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils +import tenacity + +from octavia.api.drivers import utils as provider_utils +from octavia.common import constants +from octavia.common import data_models +from octavia.common import exceptions +from octavia.controller.worker.v2 import controller_worker +from octavia.controller.worker.v2.flows import flow_utils +import octavia.tests.unit.base as base + +TLS_CERT_ID = uuidutils.generate_uuid() +AMP_ID = uuidutils.generate_uuid() +LB_ID = uuidutils.generate_uuid() +LISTENER_ID = uuidutils.generate_uuid() +POOL_ID = uuidutils.generate_uuid() +PROJECT_ID = uuidutils.generate_uuid() +HM_ID = uuidutils.generate_uuid() +MEMBER_ID = uuidutils.generate_uuid() +COMPUTE_ID = uuidutils.generate_uuid() +L7POLICY_ID = uuidutils.generate_uuid() +L7RULE_ID = uuidutils.generate_uuid() +PROJECT_ID = uuidutils.generate_uuid() +LISTENER_ID = uuidutils.generate_uuid() +FLAVOR_ID = uuidutils.generate_uuid() +SERVER_GROUP_ID = uuidutils.generate_uuid() +AZ_ID = uuidutils.generate_uuid() +HEALTH_UPDATE_DICT = {'delay': 1, 'timeout': 2} +LISTENER_UPDATE_DICT = {'name': 'test', 'description': 'test2'} +MEMBER_UPDATE_DICT = {'weight': 1, 'ip_address': '10.0.0.0'} +POOL_UPDATE_DICT = {'name': 'test', 'description': 'test2'} +L7POLICY_UPDATE_DICT = {'action': constants.L7POLICY_ACTION_REJECT} +L7RULE_UPDATE_DICT = { + 'type': constants.L7RULE_TYPE_PATH, + 'compare_type': constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + 'value': '/api'} + +_db_amphora_mock = mock.MagicMock() +_amphora_mock = { + constants.ID: AMP_ID, + constants.LOAD_BALANCER_ID: LB_ID, +} +_flow_mock = mock.MagicMock() +_db_health_mon_mock = mock.MagicMock() +_health_mon_mock = { + constants.HEALTHMONITOR_ID: HM_ID, + constants.POOL_ID: POOL_ID +} +_vip_mock = mock.MagicMock() +_listener_mock = mock.MagicMock() +_db_load_balancer_mock = mock.MagicMock() +_load_balancer_mock = { + constants.LOADBALANCER_ID: LB_ID, + constants.TOPOLOGY: constants.TOPOLOGY_SINGLE, + constants.FLAVOR_ID: 1, + constants.AVAILABILITY_ZONE: None, + constants.SERVER_GROUP_ID: None +} + +_member_mock = mock.MagicMock() +_pool_mock = {constants.POOL_ID: POOL_ID} +_db_pool_mock = mock.MagicMock() +_db_pool_mock.load_balancer = _db_load_balancer_mock +_member_mock.pool = _db_pool_mock +_l7policy_mock = mock.MagicMock() +_l7policy_mock.id = L7POLICY_ID +_l7policy_mock.to_dict.return_value = {constants.ID: L7POLICY_ID} +_l7rule_mock = mock.MagicMock() +_create_map_flow_mock = mock.MagicMock() +_db_amphora_mock.load_balancer_id = LB_ID +_db_amphora_mock.id = AMP_ID +_db_session = mock.MagicMock() +CONF = cfg.CONF + + +class TestException(Exception): + + def __init__(self, value): + self.value = value + + def __str__(self): + return repr(self.value) + + +@mock.patch('octavia.db.repositories.AmphoraRepository.get', + return_value=_db_amphora_mock) +@mock.patch('octavia.db.repositories.HealthMonitorRepository.get', + return_value=_db_health_mon_mock) +@mock.patch('octavia.db.repositories.LoadBalancerRepository.get', + return_value=_db_load_balancer_mock) +@mock.patch('octavia.db.repositories.ListenerRepository.get', + return_value=_listener_mock) +@mock.patch('octavia.db.repositories.L7PolicyRepository.get', + return_value=_l7policy_mock) +@mock.patch('octavia.db.repositories.L7RuleRepository.get', + return_value=_l7rule_mock) +@mock.patch('octavia.db.repositories.MemberRepository.get', + return_value=_member_mock) +@mock.patch('octavia.db.repositories.PoolRepository.get', + return_value=_db_pool_mock) +@mock.patch('octavia.common.base_taskflow.TaskFlowServiceController', + return_value=_flow_mock) +@mock.patch('taskflow.listeners.logging.DynamicLoggingListener') +@mock.patch('octavia.db.api.get_session', return_value=_db_session) +class TestControllerWorker(base.TestCase): + + def setUp(self): + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + self.conf.config(group="task_flow", jobboard_enabled=True) + + _db_pool_mock.listeners = [_listener_mock] + _db_pool_mock.load_balancer = _db_load_balancer_mock + _db_health_mon_mock.pool = _db_pool_mock + _db_load_balancer_mock.amphorae = _db_amphora_mock + _db_load_balancer_mock.vip = _vip_mock + _db_load_balancer_mock.id = LB_ID + _db_load_balancer_mock.flavor_id = 1 + _db_load_balancer_mock.availability_zone = None + _db_load_balancer_mock.server_group_id = None + _db_load_balancer_mock.project_id = PROJECT_ID + _db_load_balancer_mock.topology = constants.TOPOLOGY_SINGLE + _listener_mock.load_balancer = _db_load_balancer_mock + _listener_mock.id = LISTENER_ID + _listener_mock.to_dict.return_value = { + constants.ID: LISTENER_ID, constants.LOAD_BALANCER_ID: LB_ID, + constants.PROJECT_ID: PROJECT_ID} + self.ref_listener_dict = {constants.LISTENER_ID: LISTENER_ID, + constants.LOADBALANCER_ID: LB_ID, + constants.PROJECT_ID: PROJECT_ID} + + _member_mock.pool = _db_pool_mock + _l7policy_mock.listener = _listener_mock + _l7rule_mock.l7policy = _l7policy_mock + _db_load_balancer_mock.listeners = [_listener_mock] + _db_load_balancer_mock.to_dict.return_value = {'id': LB_ID} + + fetch_mock = mock.MagicMock() + _flow_mock.driver.persistence = fetch_mock + + _db_pool_mock.id = POOL_ID + _db_health_mon_mock.pool_id = POOL_ID + _db_health_mon_mock.id = HM_ID + _db_health_mon_mock.to_dict.return_value = { + 'id': HM_ID, + constants.POOL_ID: POOL_ID + } + + super().setUp() + + @mock.patch('octavia.controller.worker.v2.flows.' + 'amphora_flows.AmphoraFlows.get_delete_amphora_flow', + return_value='TEST') + def test_delete_amphora(self, + mock_get_delete_amp_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + + cw = controller_worker.ControllerWorker() + cw.delete_amphora(AMP_ID) + + (cw.services_controller.run_poster. + assert_called_once_with( + flow_utils.get_delete_amphora_flow, + store={constants.AMPHORA: _db_amphora_mock.to_dict()})) + + @mock.patch('octavia.controller.worker.v2.flows.' + 'health_monitor_flows.HealthMonitorFlows.' + 'get_create_health_monitor_flow', + return_value=_flow_mock) + def test_create_health_monitor(self, + mock_get_create_hm_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + + cw = controller_worker.ControllerWorker() + cw.create_health_monitor(_health_mon_mock) + provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( + _db_load_balancer_mock).to_dict(recurse=True) + mock_health_mon_repo_get.return_value = _db_health_mon_mock + + (cw.services_controller.run_poster. + assert_called_once_with(flow_utils.get_create_health_monitor_flow, + store={constants.HEALTH_MON: + _health_mon_mock, + constants.LISTENERS: + [self.ref_listener_dict], + constants.LOADBALANCER_ID: + LB_ID, + constants.LOADBALANCER: + provider_lb, + constants.POOL_ID: + POOL_ID})) + + def test_delete_health_monitor(self, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + + cw = controller_worker.ControllerWorker() + provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( + _db_load_balancer_mock).to_dict(recurse=True) + + cw.delete_health_monitor(_health_mon_mock) + mock_health_mon_repo_get.return_value = _db_health_mon_mock + (cw.services_controller.run_poster. + assert_called_once_with(flow_utils.get_delete_health_monitor_flow, + store={constants.HEALTH_MON: + _health_mon_mock, + constants.LISTENERS: + [self.ref_listener_dict], + constants.LOADBALANCER_ID: + LB_ID, + constants.LOADBALANCER: + provider_lb, + constants.POOL_ID: + POOL_ID, + constants.PROJECT_ID: PROJECT_ID})) + + def test_update_health_monitor(self, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + mock_health_mon_repo_get.return_value = _db_health_mon_mock + _db_health_mon_mock.provisioning_status = constants.PENDING_UPDATE + + cw = controller_worker.ControllerWorker() + cw.update_health_monitor(_health_mon_mock, + HEALTH_UPDATE_DICT) + provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( + _db_load_balancer_mock).to_dict(recurse=True) + + (cw.services_controller.run_poster. + assert_called_once_with(flow_utils.get_update_health_monitor_flow, + store={constants.HEALTH_MON: + _health_mon_mock, + constants.POOL_ID: POOL_ID, + constants.LOADBALANCER_ID: + LB_ID, + constants.LISTENERS: + [self.ref_listener_dict], + constants.LOADBALANCER: + provider_lb, + constants.UPDATE_DICT: + HEALTH_UPDATE_DICT})) + + @mock.patch("octavia.controller.worker.v2.controller_worker." + "ControllerWorker._get_db_obj_until_pending_update") + def test_update_health_monitor_timeout(self, + mock__get_db_obj_until_pending, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + _db_health_mon_mock.provisioning_status = constants.ACTIVE + last_attempt_mock = mock.MagicMock() + last_attempt_mock.result.return_value = _db_health_mon_mock + mock__get_db_obj_until_pending.side_effect = tenacity.RetryError( + last_attempt=last_attempt_mock) + + cw = controller_worker.ControllerWorker() + cw.update_health_monitor(_health_mon_mock, + HEALTH_UPDATE_DICT) + + @mock.patch('octavia.db.repositories.FlavorRepository.' + 'get_flavor_metadata_dict', return_value={}) + def test_create_listener(self, + mock_get_flavor_dict, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + + cw = controller_worker.ControllerWorker() + + listener_dict = {constants.LISTENER_ID: LISTENER_ID, + constants.LOADBALANCER_ID: LB_ID, + constants.PROJECT_ID: PROJECT_ID} + cw.create_listener(listener_dict) + provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( + _db_load_balancer_mock).to_dict(recurse=True) + + flavor_dict = {constants.LOADBALANCER_TOPOLOGY: + constants.TOPOLOGY_SINGLE} + (cw.services_controller.run_poster. + assert_called_once_with( + flow_utils.get_create_listener_flow, flavor_dict=flavor_dict, + store={constants.LOADBALANCER: provider_lb, + constants.LOADBALANCER_ID: LB_ID, + constants.LISTENERS: [listener_dict]})) + + @mock.patch('octavia.db.repositories.FlavorRepository.' + 'get_flavor_metadata_dict', return_value={}) + def test_delete_listener(self, + mock_get_flavor_dict, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + load_balancer_mock = mock.MagicMock() + load_balancer_mock.provisioning_status = constants.PENDING_UPDATE + load_balancer_mock.id = LB_ID + load_balancer_mock.flavor_id = 1 + load_balancer_mock.topology = constants.TOPOLOGY_SINGLE + mock_lb_repo_get.return_value = load_balancer_mock + + _flow_mock.reset_mock() + + listener_dict = {constants.LISTENER_ID: LISTENER_ID, + constants.LOADBALANCER_ID: LB_ID, + constants.PROJECT_ID: PROJECT_ID} + cw = controller_worker.ControllerWorker() + cw.delete_listener(listener_dict) + + flavor_dict = {constants.LOADBALANCER_TOPOLOGY: + constants.TOPOLOGY_SINGLE} + (cw.services_controller.run_poster. + assert_called_once_with( + flow_utils.get_delete_listener_flow, flavor_dict=flavor_dict, + store={constants.LISTENER: self.ref_listener_dict, + constants.LOADBALANCER_ID: LB_ID, + constants.PROJECT_ID: PROJECT_ID})) + + @mock.patch('octavia.db.repositories.FlavorRepository.' + 'get_flavor_metadata_dict', return_value={}) + def test_update_listener(self, + mock_get_flavor_dict, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + load_balancer_mock = mock.MagicMock() + load_balancer_mock.provisioning_status = constants.PENDING_UPDATE + load_balancer_mock.id = LB_ID + load_balancer_mock.flavor_id = None + load_balancer_mock.topology = constants.TOPOLOGY_SINGLE + mock_lb_repo_get.return_value = load_balancer_mock + + _flow_mock.reset_mock() + _listener_mock.provisioning_status = constants.PENDING_UPDATE + + listener_dict = {constants.LISTENER_ID: LISTENER_ID, + constants.LOADBALANCER_ID: LB_ID} + cw = controller_worker.ControllerWorker() + cw.update_listener(listener_dict, LISTENER_UPDATE_DICT) + + flavor_dict = {constants.LOADBALANCER_TOPOLOGY: + constants.TOPOLOGY_SINGLE} + (cw.services_controller.run_poster. + assert_called_once_with(flow_utils.get_update_listener_flow, + flavor_dict=flavor_dict, + store={constants.LISTENER: listener_dict, + constants.UPDATE_DICT: + LISTENER_UPDATE_DICT, + constants.LOADBALANCER_ID: LB_ID, + constants.LISTENERS: + [listener_dict]})) + + @mock.patch('octavia.db.repositories.FlavorRepository.' + 'get_flavor_metadata_dict', return_value={}) + @mock.patch("octavia.controller.worker.v2.controller_worker." + "ControllerWorker._get_db_obj_until_pending_update") + def test_update_listener_timeout(self, + mock__get_db_obj_until_pending, + mock_get_flavor_dict, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + load_balancer_mock = mock.MagicMock() + load_balancer_mock.provisioning_status = constants.PENDING_UPDATE + load_balancer_mock.id = LB_ID + load_balancer_mock.flavor_id = 1 + _flow_mock.reset_mock() + _listener_mock.provisioning_status = constants.PENDING_UPDATE + last_attempt_mock = mock.MagicMock() + last_attempt_mock.result.return_value = load_balancer_mock + mock__get_db_obj_until_pending.side_effect = tenacity.RetryError( + last_attempt=last_attempt_mock) + + listener_dict = {constants.LISTENER_ID: LISTENER_ID, + constants.LOADBALANCER_ID: LB_ID} + cw = controller_worker.ControllerWorker() + cw.update_listener(listener_dict, LISTENER_UPDATE_DICT) + + def test_create_load_balancer_single_no_anti_affinity( + self, mock_api_get_session, + mock_dyn_log_listener, mock_taskflow_load, mock_pool_repo_get, + mock_member_repo_get, mock_l7rule_repo_get, mock_l7policy_repo_get, + mock_listener_repo_get, mock_lb_repo_get, + mock_health_mon_repo_get, mock_amp_repo_get): + # Test the code path with Nova anti-affinity disabled + self.conf.config(group="nova", enable_anti_affinity=False) + self._test_create_load_balancer_single( + mock_api_get_session, + mock_dyn_log_listener, mock_taskflow_load, mock_pool_repo_get, + mock_member_repo_get, mock_l7rule_repo_get, + mock_l7policy_repo_get, mock_listener_repo_get, + mock_lb_repo_get, mock_health_mon_repo_get, mock_amp_repo_get) + + def test_create_load_balancer_single_anti_affinity( + self, mock_api_get_session, + mock_dyn_log_listener, mock_taskflow_load, mock_pool_repo_get, + mock_member_repo_get, mock_l7rule_repo_get, mock_l7policy_repo_get, + mock_listener_repo_get, mock_lb_repo_get, + mock_health_mon_repo_get, mock_amp_repo_get): + # Test the code path with Nova anti-affinity enabled + self.conf.config(group="nova", enable_anti_affinity=True) + self._test_create_load_balancer_single( + mock_api_get_session, + mock_dyn_log_listener, mock_taskflow_load, mock_pool_repo_get, + mock_member_repo_get, mock_l7rule_repo_get, + mock_l7policy_repo_get, mock_listener_repo_get, + mock_lb_repo_get, mock_health_mon_repo_get, mock_amp_repo_get) + + def _test_create_load_balancer_single( + self, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + # Test the code path with an SINGLE topology + self.conf.config(group="controller_worker", + loadbalancer_topology=constants.TOPOLOGY_SINGLE) + _flow_mock.reset_mock() + + store = { + constants.LOADBALANCER_ID: LB_ID, + 'update_dict': {'topology': constants.TOPOLOGY_SINGLE}, + constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY, + constants.FLAVOR: None, + constants.SERVER_GROUP_ID: None, + constants.AVAILABILITY_ZONE: None, + } + lb_mock = mock.MagicMock() + lb_mock.listeners = [] + lb_mock.topology = constants.TOPOLOGY_SINGLE + mock_lb_repo_get.side_effect = [None, None, None, lb_mock] + + cw = controller_worker.ControllerWorker() + cw.create_load_balancer(_load_balancer_mock) + + cw.services_controller.run_poster.assert_called_with( + flow_utils.get_create_load_balancer_flow, + constants.TOPOLOGY_SINGLE, listeners=[], + flavor_dict=None, store=store) + self.assertEqual(4, mock_lb_repo_get.call_count) + + def test_create_load_balancer_active_standby( + self, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + self.conf.config( + group="controller_worker", + loadbalancer_topology=constants.TOPOLOGY_ACTIVE_STANDBY) + + _flow_mock.reset_mock() + store = { + constants.LOADBALANCER_ID: LB_ID, + 'update_dict': {'topology': constants.TOPOLOGY_ACTIVE_STANDBY}, + constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY, + constants.FLAVOR: None, + constants.SERVER_GROUP_ID: None, + constants.AVAILABILITY_ZONE: None, + } + setattr(mock_lb_repo_get.return_value, 'topology', + constants.TOPOLOGY_ACTIVE_STANDBY) + setattr(mock_lb_repo_get.return_value, 'listeners', []) + + cw = controller_worker.ControllerWorker() + cw.create_load_balancer(_load_balancer_mock) + + cw.services_controller.run_poster.assert_called_with( + flow_utils.get_create_load_balancer_flow, + constants.TOPOLOGY_ACTIVE_STANDBY, listeners=[], + flavor_dict=None, store=store) + + def test_create_load_balancer_full_graph_single( + self, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + self.conf.config( + group="controller_worker", + loadbalancer_topology=constants.TOPOLOGY_SINGLE) + + listeners = [data_models.Listener(id='listener1'), + data_models.Listener(id='listener2')] + dict_listeners = [listener.to_dict() for listener in + provider_utils.db_listeners_to_provider_listeners( + listeners)] + lb = data_models.LoadBalancer(id=LB_ID, listeners=listeners, + topology=constants.TOPOLOGY_SINGLE) + mock_lb_repo_get.return_value = lb + store = { + constants.LOADBALANCER_ID: LB_ID, + 'update_dict': {'topology': constants.TOPOLOGY_SINGLE}, + constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY, + constants.FLAVOR: None, + constants.SERVER_GROUP_ID: None, + constants.AVAILABILITY_ZONE: None, + } + + cw = controller_worker.ControllerWorker() + cw.create_load_balancer(_load_balancer_mock) + + cw.services_controller.run_poster.assert_called_with( + flow_utils.get_create_load_balancer_flow, + constants.TOPOLOGY_SINGLE, listeners=dict_listeners, + flavor_dict=None, store=store) + + def test_create_load_balancer_full_graph_active_standby( + self, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + self.conf.config( + group="controller_worker", + loadbalancer_topology=constants.TOPOLOGY_ACTIVE_STANDBY) + + listeners = [data_models.Listener(id='listener1'), + data_models.Listener(id='listener2')] + dict_listeners = [listener.to_dict() for listener in + provider_utils.db_listeners_to_provider_listeners( + listeners)] + lb = data_models.LoadBalancer( + id=LB_ID, listeners=listeners, + topology=constants.TOPOLOGY_ACTIVE_STANDBY) + dict_listeners = [listener.to_dict() for listener in + provider_utils.db_listeners_to_provider_listeners( + listeners)] + mock_lb_repo_get.return_value = lb + store = { + constants.LOADBALANCER_ID: LB_ID, + 'update_dict': {'topology': constants.TOPOLOGY_ACTIVE_STANDBY}, + constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY, + constants.FLAVOR: None, + constants.SERVER_GROUP_ID: None, + constants.AVAILABILITY_ZONE: None, + } + + cw = controller_worker.ControllerWorker() + cw.create_load_balancer(_load_balancer_mock) + + cw.services_controller.run_poster.assert_called_with( + flow_utils.get_create_load_balancer_flow, + constants.TOPOLOGY_ACTIVE_STANDBY, listeners=dict_listeners, + store=store, flavor_dict=None) + + @mock.patch('octavia.controller.worker.v2.flows.load_balancer_flows.' + 'LoadBalancerFlows.get_create_load_balancer_flow') + @mock.patch('octavia.common.base_taskflow.BaseTaskFlowEngine.' + 'taskflow_load') + def test_create_load_balancer_full_graph_jobboard_disabled( + self, + mock_base_taskflow_load, + mock_get_create_load_balancer_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + self.conf.config(group="task_flow", jobboard_enabled=False) + + listeners = [data_models.Listener(id='listener1'), + data_models.Listener(id='listener2')] + dict_listeners = [listener.to_dict() for listener in + provider_utils.db_listeners_to_provider_listeners( + listeners)] + lb = data_models.LoadBalancer(id=LB_ID, listeners=listeners, + topology=constants.TOPOLOGY_SINGLE) + mock_lb_repo_get.return_value = lb + store = { + constants.LOADBALANCER_ID: LB_ID, + 'update_dict': {'topology': constants.TOPOLOGY_SINGLE}, + constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY, + constants.FLAVOR: None, + constants.SERVER_GROUP_ID: None, + constants.AVAILABILITY_ZONE: None, + } + + cw = controller_worker.ControllerWorker() + cw.create_load_balancer(_load_balancer_mock) + + mock_get_create_load_balancer_flow.assert_called_with( + constants.TOPOLOGY_SINGLE, listeners=dict_listeners, + flavor_dict=None) + mock_base_taskflow_load.assert_called_with( + mock_get_create_load_balancer_flow.return_value, store=store) + + def test_delete_load_balancer_without_cascade(self, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + + cw = controller_worker.ControllerWorker() + cw.delete_load_balancer(_load_balancer_mock, cascade=False) + + mock_lb_repo_get.assert_called_once_with( + _db_session, + id=LB_ID) + + (cw.services_controller.run_poster. + assert_called_once_with( + flow_utils.get_delete_load_balancer_flow, + _load_balancer_mock, + store={constants.LOADBALANCER: _load_balancer_mock, + constants.LOADBALANCER_ID: LB_ID, + constants.SERVER_GROUP_ID: + _db_load_balancer_mock.server_group_id, + constants.PROJECT_ID: _db_load_balancer_mock.project_id, + + })) + + def test_delete_load_balancer_with_cascade(self, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + + cw = controller_worker.ControllerWorker() + cw.delete_load_balancer(_load_balancer_mock, cascade=True) + + mock_lb_repo_get.assert_called_once_with( + _db_session, + id=LB_ID) + + listener_list = [{constants.LISTENER_ID: LISTENER_ID, + constants.LOADBALANCER_ID: LB_ID, + constants.PROJECT_ID: PROJECT_ID}] + + (cw.services_controller.run_poster. + assert_called_once_with( + flow_utils.get_cascade_delete_load_balancer_flow, + _load_balancer_mock, listener_list, [], + store={constants.LOADBALANCER: _load_balancer_mock, + constants.LOADBALANCER_ID: LB_ID, + constants.SERVER_GROUP_ID: + _db_load_balancer_mock.server_group_id, + constants.PROJECT_ID: _db_load_balancer_mock.project_id, + }) + ) + + @mock.patch( + "octavia.common.tls_utils.cert_parser.load_certificates_data", + side_effect=RuntimeError + ) + def test_delete_load_balancer_with_cascade_tls_unavailable( + self, + mock_load_tls_cert, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get + ): + _flow_mock.reset_mock() + + _listener_mock.tls_certificate_id = TLS_CERT_ID + _listener_mock.to_dict.return_value[ + constants.TLS_CERTIFICATE_ID] = TLS_CERT_ID + + cw = controller_worker.ControllerWorker() + cw.delete_load_balancer(_load_balancer_mock, cascade=True) + + mock_lb_repo_get.assert_called_once_with( + _db_session, + id=LB_ID) + + # Check load_certificates_data called and error is raised + # Error must be ignored because it is not critical for current flow + mock_load_tls_cert.assert_called_once() + + listener_list = [{constants.LISTENER_ID: LISTENER_ID, + constants.LOADBALANCER_ID: LB_ID, + constants.PROJECT_ID: PROJECT_ID, + "default_tls_container_ref": TLS_CERT_ID}] + + (cw.services_controller.run_poster. + assert_called_once_with( + flow_utils.get_cascade_delete_load_balancer_flow, + _load_balancer_mock, listener_list, [], + store={constants.LOADBALANCER: _load_balancer_mock, + constants.LOADBALANCER_ID: LB_ID, + constants.SERVER_GROUP_ID: + _db_load_balancer_mock.server_group_id, + constants.PROJECT_ID: _db_load_balancer_mock.project_id, + }) + ) + + _listener_mock.reset_mock() + + @mock.patch('octavia.db.repositories.ListenerRepository.get_all', + return_value=([_listener_mock], None)) + def test_update_load_balancer(self, + mock_listener_repo_get_all, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + _db_load_balancer_mock.provisioning_status = constants.PENDING_UPDATE + + cw = controller_worker.ControllerWorker() + change = 'TEST2' + cw.update_load_balancer(_load_balancer_mock, change) + + (cw.services_controller.run_poster. + assert_called_once_with(flow_utils.get_update_load_balancer_flow, + store={constants.UPDATE_DICT: change, + constants.LOADBALANCER: + _load_balancer_mock, + constants.LOADBALANCER_ID: + _db_load_balancer_mock.id, + })) + + @mock.patch('octavia.db.repositories.ListenerRepository.get_all', + return_value=([_listener_mock], None)) + @mock.patch("octavia.controller.worker.v2.controller_worker." + "ControllerWorker._get_db_obj_until_pending_update") + def test_update_load_balancer_timeout(self, + mock__get_db_obj_until_pending, + mock_listener_repo_get_all, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + _db_load_balancer_mock.provisioning_status = constants.ACTIVE + last_attempt_mock = mock.MagicMock() + last_attempt_mock.result.return_value = _db_load_balancer_mock + mock__get_db_obj_until_pending.side_effect = tenacity.RetryError( + last_attempt=last_attempt_mock) + + cw = controller_worker.ControllerWorker() + change = 'TEST2' + cw.update_load_balancer(_load_balancer_mock, change) + + @mock.patch('octavia.controller.worker.v2.flows.' + 'member_flows.MemberFlows.get_create_member_flow', + return_value=_flow_mock) + @mock.patch('octavia.db.repositories.AvailabilityZoneRepository.' + 'get_availability_zone_metadata_dict') + def test_create_member(self, + mock_get_az_metadata_dict, + mock_get_create_member_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + mock_get_az_metadata_dict.return_value = {} + mock_member_repo_get.side_effect = [None, _member_mock] + _member = _member_mock.to_dict() + cw = controller_worker.ControllerWorker() + cw.create_member(_member) + + provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( + _db_load_balancer_mock).to_dict(recurse=True) + (cw.services_controller.run_poster. + assert_called_once_with(flow_utils.get_create_member_flow, + store={constants.MEMBER: _member, + constants.LISTENERS: + [self.ref_listener_dict], + constants.LOADBALANCER_ID: + LB_ID, + constants.LOADBALANCER: + provider_lb, + constants.POOL_ID: + POOL_ID, + constants.AVAILABILITY_ZONE: {}})) + + @mock.patch('octavia.controller.worker.v2.flows.' + 'member_flows.MemberFlows.get_delete_member_flow', + return_value=_flow_mock) + @mock.patch('octavia.db.repositories.AvailabilityZoneRepository.' + 'get_availability_zone_metadata_dict') + def test_delete_member(self, + mock_get_az_metadata_dict, + mock_get_delete_member_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + _member = _member_mock.to_dict() + mock_get_az_metadata_dict.return_value = {} + cw = controller_worker.ControllerWorker() + cw.delete_member(_member) + provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( + _db_load_balancer_mock).to_dict(recurse=True) + + (cw.services_controller.run_poster. + assert_called_once_with( + flow_utils.get_delete_member_flow, + store={constants.MEMBER: _member, + constants.LISTENERS: [self.ref_listener_dict], + constants.LOADBALANCER_ID: LB_ID, + constants.LOADBALANCER: provider_lb, + constants.POOL_ID: POOL_ID, + constants.PROJECT_ID: PROJECT_ID, + constants.AVAILABILITY_ZONE: {}})) + + @mock.patch('octavia.controller.worker.v2.flows.' + 'member_flows.MemberFlows.get_update_member_flow', + return_value=_flow_mock) + @mock.patch('octavia.db.repositories.AvailabilityZoneRepository.' + 'get_availability_zone_metadata_dict') + def test_update_member(self, + mock_get_az_metadata_dict, + mock_get_update_member_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + db_member = mock.MagicMock() + db_member.provisioning_status = constants.PENDING_UPDATE + db_member.pool = _db_pool_mock + mock_member_repo_get.return_value = db_member + _member = _member_mock.to_dict() + _member[constants.PROVISIONING_STATUS] = constants.PENDING_UPDATE + mock_get_az_metadata_dict.return_value = {} + cw = controller_worker.ControllerWorker() + cw.update_member(_member, MEMBER_UPDATE_DICT) + provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( + _db_load_balancer_mock).to_dict(recurse=True) + (cw.services_controller.run_poster. + assert_called_once_with(flow_utils.get_update_member_flow, + store={constants.MEMBER: _member, + constants.LISTENERS: + [self.ref_listener_dict], + constants.LOADBALANCER: + provider_lb, + constants.POOL_ID: + POOL_ID, + constants.LOADBALANCER_ID: + LB_ID, + constants.UPDATE_DICT: + MEMBER_UPDATE_DICT, + constants.AVAILABILITY_ZONE: {}})) + + @mock.patch('octavia.controller.worker.v2.flows.' + 'member_flows.MemberFlows.get_update_member_flow', + return_value=_flow_mock) + @mock.patch('octavia.db.repositories.AvailabilityZoneRepository.' + 'get_availability_zone_metadata_dict') + @mock.patch("octavia.controller.worker.v2.controller_worker." + "ControllerWorker._get_db_obj_until_pending_update") + def test_update_member_timeout(self, + mock__get_db_obj_until_pending, + mock_get_az_metadata_dict, + mock_get_update_member_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + db_member = mock.MagicMock() + db_member.provisioning_status = constants.ACTIVE + db_member.pool = _db_pool_mock + last_attempt_mock = mock.MagicMock() + last_attempt_mock.result.return_value = db_member + mock__get_db_obj_until_pending.side_effect = tenacity.RetryError( + last_attempt=last_attempt_mock) + mock_member_repo_get.return_value = db_member + _member = _member_mock.to_dict() + _member[constants.PROVISIONING_STATUS] = constants.PENDING_UPDATE + mock_get_az_metadata_dict.return_value = {} + cw = controller_worker.ControllerWorker() + cw.update_member(_member, MEMBER_UPDATE_DICT) + + @mock.patch('octavia.controller.worker.v2.flows.' + 'member_flows.MemberFlows.get_batch_update_members_flow', + return_value=_flow_mock) + @mock.patch('octavia.db.repositories.AvailabilityZoneRepository.' + 'get_availability_zone_metadata_dict') + def test_batch_update_members(self, + mock_get_az_metadata_dict, + mock_get_batch_update_members_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + mock_get_az_metadata_dict.return_value = {} + cw = controller_worker.ControllerWorker() + old_member = mock.MagicMock() + old_member.to_dict.return_value = {'id': 9, + constants.POOL_ID: 'testtest'} + new_member = mock.MagicMock() + mock_member_repo_get.side_effect = [ + new_member, _member_mock, old_member] + cw.batch_update_members([{constants.MEMBER_ID: 9, + constants.POOL_ID: 'testtest'}], + [{constants.MEMBER_ID: 11}], + [MEMBER_UPDATE_DICT]) + provider_m = provider_utils.db_member_to_provider_member(_member_mock) + old_provider_m = provider_utils.db_member_to_provider_member( + old_member).to_dict() + provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( + _db_load_balancer_mock).to_dict(recurse=True) + (cw.services_controller.run_poster. + assert_called_once_with( + flow_utils.get_batch_update_members_flow, + [old_provider_m], + [{'member_id': 11}], + [(provider_m.to_dict(), MEMBER_UPDATE_DICT)], + store={constants.LISTENERS: [self.ref_listener_dict], + constants.LOADBALANCER_ID: LB_ID, + constants.LOADBALANCER: provider_lb, + constants.POOL_ID: POOL_ID, + constants.PROJECT_ID: PROJECT_ID, + constants.AVAILABILITY_ZONE: {}})) + + def test_create_pool(self, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + mock_pool_repo_get.return_value = _db_pool_mock + + cw = controller_worker.ControllerWorker() + cw.create_pool(_pool_mock) + provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( + _db_load_balancer_mock).to_dict(recurse=True) + + (cw.services_controller.run_poster. + assert_called_once_with(flow_utils.get_create_pool_flow, + store={constants.POOL_ID: POOL_ID, + constants.LOADBALANCER_ID: + LB_ID, + constants.LISTENERS: + [self.ref_listener_dict], + constants.LOADBALANCER: + provider_lb})) + + self.assertEqual(1, mock_pool_repo_get.call_count) + + def test_delete_pool(self, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + _db_pool_mock.project_id = PROJECT_ID + + cw = controller_worker.ControllerWorker() + cw.delete_pool(_pool_mock) + provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( + _db_load_balancer_mock).to_dict(recurse=True) + (cw.services_controller.run_poster. + assert_called_once_with(flow_utils.get_delete_pool_flow, + store={constants.POOL_ID: POOL_ID, + constants.LOADBALANCER_ID: + LB_ID, + constants.LISTENERS: + [self.ref_listener_dict], + constants.LOADBALANCER: + provider_lb, + constants.PROJECT_ID: PROJECT_ID})) + + def test_update_pool(self, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + _db_pool_mock.provisioning_status = constants.PENDING_UPDATE + mock_pool_repo_get.return_value = _db_pool_mock + + cw = controller_worker.ControllerWorker() + cw.update_pool(_pool_mock, POOL_UPDATE_DICT) + provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( + _db_load_balancer_mock).to_dict(recurse=True) + (cw.services_controller.run_poster. + assert_called_once_with(flow_utils.get_update_pool_flow, + store={constants.POOL_ID: POOL_ID, + constants.LISTENERS: + [self.ref_listener_dict], + constants.LOADBALANCER_ID: + LB_ID, + constants.LOADBALANCER: + provider_lb, + constants.UPDATE_DICT: + POOL_UPDATE_DICT})) + + @mock.patch("octavia.controller.worker.v2.controller_worker." + "ControllerWorker._get_db_obj_until_pending_update") + def test_update_pool_update(self, + mock__get_db_obj_until_pending, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + _db_pool_mock.provisioning_status = constants.ACTIVE + last_attempt_mock = mock.MagicMock() + last_attempt_mock.result.return_value = _db_pool_mock + mock__get_db_obj_until_pending.side_effect = tenacity.RetryError( + last_attempt=last_attempt_mock) + + cw = controller_worker.ControllerWorker() + cw.update_pool(_pool_mock, POOL_UPDATE_DICT) + + def test_create_l7policy(self, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + + cw = controller_worker.ControllerWorker() + l7policy_mock = { + constants.L7POLICY_ID: L7POLICY_ID, + constants.LISTENER_ID: LISTENER_ID + } + mock_l7policy_repo_get.side_effect = [None, _l7policy_mock] + cw.create_l7policy(l7policy_mock) + + (cw.services_controller.run_poster. + assert_called_once_with(flow_utils.get_create_l7policy_flow, + store={constants.L7POLICY: l7policy_mock, + constants.LISTENERS: + [self.ref_listener_dict], + constants.LOADBALANCER_ID: LB_ID})) + + def test_delete_l7policy(self, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + + cw = controller_worker.ControllerWorker() + l7policy_mock = { + constants.L7POLICY_ID: L7POLICY_ID, + constants.LISTENER_ID: LISTENER_ID + } + cw.delete_l7policy(l7policy_mock) + + (cw.services_controller.run_poster. + assert_called_once_with(flow_utils.get_delete_l7policy_flow, + store={constants.L7POLICY: l7policy_mock, + constants.LISTENERS: + [self.ref_listener_dict], + constants.LOADBALANCER_ID: + LB_ID})) + + def test_update_l7policy(self, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + mock_listener_repo_get.return_value = _listener_mock + _l7policy_mock.provisioning_status = constants.PENDING_UPDATE + + cw = controller_worker.ControllerWorker() + l7policy_mock = { + constants.L7POLICY_ID: L7POLICY_ID, + constants.LISTENER_ID: LISTENER_ID + } + + cw.update_l7policy(l7policy_mock, L7POLICY_UPDATE_DICT) + + (cw.services_controller.run_poster. + assert_called_once_with(flow_utils.get_update_l7policy_flow, + store={constants.L7POLICY: l7policy_mock, + constants.LISTENERS: + [self.ref_listener_dict], + constants.LOADBALANCER_ID: + LB_ID, + constants.UPDATE_DICT: + L7POLICY_UPDATE_DICT})) + + @mock.patch("octavia.controller.worker.v2.controller_worker." + "ControllerWorker._get_db_obj_until_pending_update") + def test_update_l7policy_timeout(self, + mock__get_db_obj_until_pending, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + mock_listener_repo_get.return_value = _listener_mock + _l7policy_mock.provisioning_status = constants.ACTIVE + last_attempt_mock = mock.MagicMock() + last_attempt_mock.result.return_value = _l7policy_mock + mock__get_db_obj_until_pending.side_effect = tenacity.RetryError( + last_attempt=last_attempt_mock) + + cw = controller_worker.ControllerWorker() + l7policy_mock = { + constants.L7POLICY_ID: L7POLICY_ID, + constants.LISTENER_ID: LISTENER_ID + } + + cw.update_l7policy(l7policy_mock, L7POLICY_UPDATE_DICT) + + def test_create_l7rule(self, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + mock_l7rule_repo_get.side_effect = [None, _l7rule_mock] + + cw = controller_worker.ControllerWorker() + + cw.create_l7rule(_l7rule_mock.to_dict()) + + l7_policy = provider_utils.db_l7policy_to_provider_l7policy( + _l7policy_mock) + + (cw.services_controller.run_poster. + assert_called_once_with(flow_utils.get_create_l7rule_flow, + store={constants.L7RULE: + _l7rule_mock.to_dict(), + constants.L7POLICY: + l7_policy.to_dict(), + constants.L7POLICY_ID: L7POLICY_ID, + constants.LOADBALANCER_ID: LB_ID, + constants.LISTENERS: + [self.ref_listener_dict] + })) + + def test_delete_l7rule(self, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + + cw = controller_worker.ControllerWorker() + cw.delete_l7rule(_l7rule_mock.to_dict()) + l7_policy = provider_utils.db_l7policy_to_provider_l7policy( + _l7policy_mock) + + (cw.services_controller.run_poster. + assert_called_once_with(flow_utils.get_delete_l7rule_flow, + store={ + constants.L7RULE: + _l7rule_mock.to_dict(), + constants.L7POLICY: + l7_policy.to_dict(), + constants.L7POLICY_ID: L7POLICY_ID, + constants.LISTENERS: + [self.ref_listener_dict], + constants.LOADBALANCER_ID: LB_ID, + })) + + def test_update_l7rule(self, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + _l7rule_mock.provisioning_status = constants.PENDING_UPDATE + + cw = controller_worker.ControllerWorker() + cw.update_l7rule(_l7rule_mock.to_dict(), L7RULE_UPDATE_DICT) + l7_policy = provider_utils.db_l7policy_to_provider_l7policy( + _l7policy_mock) + + (cw.services_controller.run_poster. + assert_called_once_with(flow_utils.get_update_l7rule_flow, + store={ + constants.L7RULE: + _l7rule_mock.to_dict(), + constants.L7POLICY: + l7_policy.to_dict(), + constants.L7POLICY_ID: L7POLICY_ID, + constants.LOADBALANCER_ID: LB_ID, + constants.LISTENERS: + [self.ref_listener_dict], + constants.UPDATE_DICT: + L7RULE_UPDATE_DICT})) + + @mock.patch("octavia.controller.worker.v2.controller_worker." + "ControllerWorker._get_db_obj_until_pending_update") + def test_update_l7rule_timeout(self, + mock__get_db_obj_until_pending, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + _l7rule_mock.provisioning_status = constants.ACTIVE + last_attempt_mock = mock.MagicMock() + last_attempt_mock.result.return_value = _l7rule_mock + mock__get_db_obj_until_pending.side_effect = tenacity.RetryError( + last_attempt=last_attempt_mock) + + cw = controller_worker.ControllerWorker() + cw.update_l7rule(_l7rule_mock.to_dict(), L7RULE_UPDATE_DICT) + + @mock.patch('octavia.api.drivers.utils.' + 'db_loadbalancer_to_provider_loadbalancer') + @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') + def test_failover_amphora_lb_single(self, + mock_update, + mock_lb_db_to_provider, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + mock_lb = mock.MagicMock() + mock_lb.id = LB_ID + mock_lb.topology = constants.TOPOLOGY_SINGLE + mock_lb.flavor_id = None + mock_lb.availability_zone = None + mock_lb.server_group_id = None + mock_lb_repo_get.return_value = mock_lb + mock_provider_lb = mock.MagicMock() + mock_lb_db_to_provider.return_value = mock_provider_lb + mock_amphora = mock.MagicMock() + mock_amphora.load_balancer_id = None + mock_amphora.id = AMP_ID + mock_amphora.load_balancer_id = LB_ID + mock_amphora.status = constants.AMPHORA_ALLOCATED + mock_amp_repo_get.return_value = mock_amphora + flavor_dict = {constants.LOADBALANCER_TOPOLOGY: + constants.TOPOLOGY_SINGLE} + expected_stored_params = { + constants.AVAILABILITY_ZONE: {}, + constants.BUILD_TYPE_PRIORITY: + constants.LB_CREATE_FAILOVER_PRIORITY, + constants.FLAVOR: flavor_dict, + constants.LOADBALANCER: mock_provider_lb.to_dict(), + constants.LOADBALANCER_ID: LB_ID, + constants.SERVER_GROUP_ID: None, + constants.VIP: mock_lb.vip.to_dict(), + constants.ADDITIONAL_VIPS: []} + + cw = controller_worker.ControllerWorker() + cw.services_controller.reset_mock() + cw.failover_amphora(AMP_ID) + + cw.services_controller.run_poster.assert_called_once_with( + flow_utils.get_failover_amphora_flow, + mock_amphora.to_dict(), 1, flavor_dict=flavor_dict, + store=expected_stored_params) + + @mock.patch('octavia.db.repositories.AvailabilityZoneRepository.' + 'get_availability_zone_metadata_dict', return_value={}) + @mock.patch('octavia.api.drivers.utils.' + 'db_loadbalancer_to_provider_loadbalancer') + @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') + def test_failover_amphora_lb_act_stdby(self, + mock_update, + mock_lb_db_to_provider, + mock_get_az_meta, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + mock_lb = mock.MagicMock() + mock_lb.id = LB_ID + mock_lb.topology = constants.TOPOLOGY_ACTIVE_STANDBY + mock_lb.flavor_id = None + mock_lb.availability_zone = None + mock_lb.server_group_id = None + mock_lb_repo_get.return_value = mock_lb + mock_provider_lb = mock.MagicMock() + mock_lb_db_to_provider.return_value = mock_provider_lb + mock_amphora = mock.MagicMock() + mock_amphora.load_balancer_id = None + mock_amphora.id = AMP_ID + mock_amphora.load_balancer_id = LB_ID + mock_amphora.status = constants.AMPHORA_ALLOCATED + mock_amp_repo_get.return_value = mock_amphora + flavor_dict = {constants.LOADBALANCER_TOPOLOGY: + constants.TOPOLOGY_ACTIVE_STANDBY} + expected_stored_params = { + constants.AVAILABILITY_ZONE: {}, + constants.BUILD_TYPE_PRIORITY: + constants.LB_CREATE_FAILOVER_PRIORITY, + constants.FLAVOR: flavor_dict, + constants.LOADBALANCER: mock_provider_lb.to_dict(), + constants.LOADBALANCER_ID: LB_ID, + constants.SERVER_GROUP_ID: None, + constants.VIP: mock_lb.vip.to_dict(), + constants.ADDITIONAL_VIPS: []} + + cw = controller_worker.ControllerWorker() + cw.services_controller.reset_mock() + cw.failover_amphora(AMP_ID) + + cw.services_controller.run_poster.assert_called_once_with( + flow_utils.get_failover_amphora_flow, + mock_amphora.to_dict(), 2, flavor_dict=flavor_dict, + store=expected_stored_params) + + @mock.patch('octavia.db.repositories.AvailabilityZoneRepository.' + 'get_availability_zone_metadata_dict', return_value={}) + @mock.patch('octavia.api.drivers.utils.' + 'db_loadbalancer_to_provider_loadbalancer') + @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') + def test_failover_amphora_anti_affinity(self, + mock_update, + mock_lb_db_to_provider, + mock_get_az_meta, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + mock_lb = mock.MagicMock() + mock_lb.id = LB_ID + mock_lb.topology = constants.TOPOLOGY_ACTIVE_STANDBY + mock_lb.flavor_id = None + mock_lb.availability_zone = None + mock_lb.server_group_id = SERVER_GROUP_ID + mock_lb_repo_get.return_value = mock_lb + mock_provider_lb = mock.MagicMock() + mock_lb_db_to_provider.return_value = mock_provider_lb + mock_amphora = mock.MagicMock() + mock_amphora.load_balancer_id = None + mock_amphora.id = AMP_ID + mock_amphora.load_balancer_id = LB_ID + mock_amphora.status = constants.AMPHORA_ALLOCATED + mock_amp_repo_get.return_value = mock_amphora + flavor_dict = {constants.LOADBALANCER_TOPOLOGY: + constants.TOPOLOGY_ACTIVE_STANDBY} + expected_stored_params = { + constants.AVAILABILITY_ZONE: {}, + constants.BUILD_TYPE_PRIORITY: + constants.LB_CREATE_FAILOVER_PRIORITY, + constants.FLAVOR: flavor_dict, + constants.LOADBALANCER: mock_provider_lb.to_dict(), + constants.LOADBALANCER_ID: LB_ID, + constants.SERVER_GROUP_ID: SERVER_GROUP_ID, + constants.VIP: mock_lb.vip.to_dict(), + constants.ADDITIONAL_VIPS: []} + + cw = controller_worker.ControllerWorker() + cw.services_controller.reset_mock() + cw.failover_amphora(AMP_ID) + + cw.services_controller.run_poster.assert_called_once_with( + flow_utils.get_failover_amphora_flow, + mock_amphora.to_dict(), 2, flavor_dict=flavor_dict, + store=expected_stored_params) + + @mock.patch('octavia.api.drivers.utils.' + 'db_loadbalancer_to_provider_loadbalancer') + @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') + def test_failover_amphora_unknown_topology(self, + mock_update, + mock_lb_db_to_provider, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + mock_lb = mock.MagicMock() + mock_lb.id = LB_ID + mock_lb.topology = 'bogus' + mock_lb.flavor_id = None + mock_lb.availability_zone = None + mock_lb.server_group_id = SERVER_GROUP_ID + mock_lb_repo_get.return_value = mock_lb + mock_provider_lb = mock.MagicMock() + mock_lb_db_to_provider.return_value = mock_provider_lb + mock_amphora = mock.MagicMock() + mock_amphora.load_balancer_id = None + mock_amphora.id = AMP_ID + mock_amphora.load_balancer_id = LB_ID + mock_amphora.status = constants.AMPHORA_ALLOCATED + mock_amp_repo_get.return_value = mock_amphora + flavor_dict = {constants.LOADBALANCER_TOPOLOGY: mock_lb.topology} + expected_stored_params = { + constants.AVAILABILITY_ZONE: {}, + constants.BUILD_TYPE_PRIORITY: + constants.LB_CREATE_FAILOVER_PRIORITY, + constants.FLAVOR: flavor_dict, + constants.LOADBALANCER: mock_provider_lb.to_dict(), + constants.LOADBALANCER_ID: LB_ID, + constants.SERVER_GROUP_ID: SERVER_GROUP_ID, + constants.VIP: mock_lb.vip.to_dict(), + constants.ADDITIONAL_VIPS: []} + + cw = controller_worker.ControllerWorker() + cw.services_controller.reset_mock() + cw.failover_amphora(AMP_ID) + + cw.services_controller.run_poster.assert_called_once_with( + flow_utils.get_failover_amphora_flow, + mock_amphora.to_dict(), None, flavor_dict=flavor_dict, + store=expected_stored_params) + + @mock.patch('octavia.db.repositories.FlavorRepository.' + 'get_flavor_metadata_dict', return_value={}) + @mock.patch('octavia.api.drivers.utils.' + 'db_loadbalancer_to_provider_loadbalancer') + @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') + def test_failover_amphora_with_flavor(self, + mock_update, + mock_lb_db_to_provider, + mock_get_flavor_meta, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + mock_lb = mock.MagicMock() + mock_lb.id = LB_ID + mock_lb.topology = constants.TOPOLOGY_SINGLE + mock_lb.flavor_id = FLAVOR_ID + mock_lb.availability_zone = None + mock_lb.server_group_id = None + mock_lb_repo_get.return_value = mock_lb + mock_provider_lb = mock.MagicMock() + mock_lb_db_to_provider.return_value = mock_provider_lb + mock_amphora = mock.MagicMock() + mock_amphora.load_balancer_id = None + mock_amphora.id = AMP_ID + mock_amphora.load_balancer_id = LB_ID + mock_amphora.status = constants.AMPHORA_ALLOCATED + mock_amp_repo_get.return_value = mock_amphora + flavor_dict = {constants.LOADBALANCER_TOPOLOGY: + constants.TOPOLOGY_SINGLE, 'taste': 'spicy'} + expected_stored_params = { + constants.AVAILABILITY_ZONE: {}, + constants.BUILD_TYPE_PRIORITY: + constants.LB_CREATE_FAILOVER_PRIORITY, + constants.FLAVOR: flavor_dict, + constants.LOADBALANCER: mock_provider_lb.to_dict(), + constants.LOADBALANCER_ID: LB_ID, + constants.SERVER_GROUP_ID: None, + constants.VIP: mock_lb.vip.to_dict(), + constants.ADDITIONAL_VIPS: []} + mock_get_flavor_meta.return_value = {'taste': 'spicy'} + + cw = controller_worker.ControllerWorker() + cw.services_controller.reset_mock() + cw.failover_amphora(AMP_ID) + + cw.services_controller.run_poster.assert_called_once_with( + flow_utils.get_failover_amphora_flow, + mock_amphora.to_dict(), 1, flavor_dict=flavor_dict, + store=expected_stored_params) + + @mock.patch('octavia.db.repositories.AvailabilityZoneRepository.' + 'get_availability_zone_metadata_dict', return_value={}) + @mock.patch('octavia.api.drivers.utils.' + 'db_loadbalancer_to_provider_loadbalancer') + @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') + def test_failover_amphora_with_az(self, + mock_update, + mock_lb_db_to_provider, + mock_get_az_meta, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + mock_lb = mock.MagicMock() + mock_lb.id = LB_ID + mock_lb.topology = constants.TOPOLOGY_SINGLE + mock_lb.flavor_id = None + mock_lb.availability_zone = AZ_ID + mock_lb.server_group_id = None + mock_lb_repo_get.return_value = mock_lb + mock_provider_lb = mock.MagicMock() + mock_lb_db_to_provider.return_value = mock_provider_lb + mock_amphora = mock.MagicMock() + mock_amphora.load_balancer_id = None + mock_amphora.id = AMP_ID + mock_amphora.load_balancer_id = LB_ID + mock_amphora.status = constants.AMPHORA_ALLOCATED + mock_amp_repo_get.return_value = mock_amphora + flavor_dict = {constants.LOADBALANCER_TOPOLOGY: + constants.TOPOLOGY_SINGLE} + expected_stored_params = { + constants.AVAILABILITY_ZONE: {'planet': 'jupiter'}, + constants.BUILD_TYPE_PRIORITY: + constants.LB_CREATE_FAILOVER_PRIORITY, + constants.FLAVOR: flavor_dict, + constants.LOADBALANCER: mock_provider_lb.to_dict(), + constants.LOADBALANCER_ID: LB_ID, + constants.SERVER_GROUP_ID: None, + constants.VIP: mock_lb.vip.to_dict(), + constants.ADDITIONAL_VIPS: []} + mock_get_az_meta.return_value = {'planet': 'jupiter'} + + cw = controller_worker.ControllerWorker() + cw.services_controller.reset_mock() + cw.failover_amphora(AMP_ID) + + cw.services_controller.run_poster.assert_called_once_with( + flow_utils.get_failover_amphora_flow, + mock_amphora.to_dict(), 1, flavor_dict=flavor_dict, + store=expected_stored_params) + + @mock.patch('octavia.api.drivers.utils.' + 'db_loadbalancer_to_provider_loadbalancer') + @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') + def test_failover_amphora_with_add_vips(self, + mock_update, + mock_lb_db_to_provider, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + mock_additional_vips = [mock.MagicMock()] + # mock_additional_vips[0].ip_address = mock.Mock() + mock_lb = mock.MagicMock() + mock_lb.id = LB_ID + mock_lb.topology = constants.TOPOLOGY_SINGLE + mock_lb.flavor_id = None + mock_lb.availability_zone = None + mock_lb.server_group_id = None + mock_lb.additional_vips = mock_additional_vips + mock_lb_repo_get.return_value = mock_lb + mock_provider_lb = mock.MagicMock() + mock_lb_db_to_provider.return_value = mock_provider_lb + mock_amphora = mock.MagicMock() + mock_amphora.load_balancer_id = None + mock_amphora.id = AMP_ID + mock_amphora.load_balancer_id = LB_ID + mock_amphora.status = constants.AMPHORA_ALLOCATED + mock_amp_repo_get.return_value = mock_amphora + flavor_dict = {constants.LOADBALANCER_TOPOLOGY: + constants.TOPOLOGY_SINGLE} + expected_stored_params = { + constants.AVAILABILITY_ZONE: {}, + constants.BUILD_TYPE_PRIORITY: + constants.LB_CREATE_FAILOVER_PRIORITY, + constants.FLAVOR: flavor_dict, + constants.LOADBALANCER: mock_provider_lb.to_dict(), + constants.LOADBALANCER_ID: LB_ID, + constants.SERVER_GROUP_ID: None, + constants.VIP: mock_lb.vip.to_dict(), + constants.ADDITIONAL_VIPS: [ + add_vips.to_dict() + for add_vips in mock_additional_vips + ]} + + cw = controller_worker.ControllerWorker() + cw.services_controller.reset_mock() + cw.failover_amphora(AMP_ID) + + cw.services_controller.run_poster.assert_called_once_with( + flow_utils.get_failover_amphora_flow, + mock_amphora.to_dict(), 1, flavor_dict=flavor_dict, + store=expected_stored_params) + + @mock.patch('octavia.controller.worker.v2.flows.amphora_flows.' + 'AmphoraFlows.get_failover_amphora_flow') + def test_failover_amp_missing_amp(self, + mock_get_amp_failover, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + mock_amp_repo_get.return_value = None + + cw = controller_worker.ControllerWorker() + cw.failover_amphora(AMP_ID) + + mock_get_amp_failover.assert_not_called() + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') + def test_failover_amp_flow_exception(self, + mock_update, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + mock_amphora = mock.MagicMock() + mock_amphora.id = AMP_ID + mock_amphora.load_balancer_id = LB_ID + mock_amp_repo_get.return_value = mock_amphora + + mock_lb_repo_get.side_effect = TestException('boom') + cw = controller_worker.ControllerWorker() + cw.failover_amphora(AMP_ID) + mock_update.assert_called_with(_db_session, LB_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') + def test_failover_amp_flow_exception_reraise(self, + mock_update, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + mock_amphora = mock.MagicMock() + mock_amphora.id = AMP_ID + mock_amphora.load_balancer_id = LB_ID + mock_amp_repo_get.return_value = mock_amphora + + mock_lb_repo_get.side_effect = TestException('boom') + cw = controller_worker.ControllerWorker() + self.assertRaises(TestException, + cw.failover_amphora, + AMP_ID, reraise=True) + + @mock.patch('octavia.controller.worker.v2.flows.amphora_flows.' + 'AmphoraFlows.get_failover_amphora_flow') + def test_failover_amp_no_lb(self, + mock_get_failover_amp_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + _flow_mock.run.reset_mock() + mock_amphora = mock.MagicMock() + mock_amphora.load_balancer_id = None + mock_amphora.id = AMP_ID + mock_amphora.status = constants.AMPHORA_ALLOCATED + mock_amp_repo_get.return_value = mock_amphora + expected_stored_params = {constants.AVAILABILITY_ZONE: {}, + constants.BUILD_TYPE_PRIORITY: + constants.LB_CREATE_FAILOVER_PRIORITY, + constants.FLAVOR: {}, + constants.LOADBALANCER: None, + constants.LOADBALANCER_ID: None, + constants.SERVER_GROUP_ID: None, + constants.VIP: {}, + constants.ADDITIONAL_VIPS: []} + + cw = controller_worker.ControllerWorker() + cw.services_controller.reset_mock() + cw.failover_amphora(AMP_ID) + + cw.services_controller.run_poster.assert_called_once_with( + flow_utils.get_failover_amphora_flow, + mock_amphora.to_dict(), + None, flavor_dict={}, store=expected_stored_params) + + @mock.patch('octavia.db.repositories.AmphoraHealthRepository.delete') + def test_failover_deleted_amphora(self, + mock_delete, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + mock_taskflow_load.reset_mock() + mock_amphora = mock.MagicMock() + mock_amphora.id = AMP_ID + mock_amphora.status = constants.DELETED + mock_amp_repo_get.return_value = mock_amphora + + cw = controller_worker.ControllerWorker() + cw.failover_amphora(AMP_ID) + + mock_delete.assert_called_with(_db_session, amphora_id=AMP_ID) + mock_taskflow_load.assert_not_called() + + def test_get_amphorae_for_failover_single(self, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + amphora1_mock = mock.MagicMock() + amphora1_mock.status = constants.AMPHORA_ALLOCATED + amphora2_mock = mock.MagicMock() + amphora2_mock.status = constants.DELETED + + load_balancer_mock = mock.MagicMock() + load_balancer_mock.topology = constants.TOPOLOGY_SINGLE + load_balancer_mock.amphorae = [amphora1_mock, amphora2_mock] + + cw = controller_worker.ControllerWorker() + result = cw._get_amphorae_for_failover(load_balancer_mock) + + self.assertEqual([amphora1_mock.to_dict()], result) + + @mock.patch('octavia.common.utils.get_amphora_driver') + def test_get_amphorae_for_failover_act_stdby(self, + mock_get_amp_driver, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + # Note: This test uses three amphora even though we only have + # two per load balancer to properly test the ordering from + # this method. + amp_driver_mock = mock.MagicMock() + amp_driver_mock.get_interface_from_ip.side_effect = [ + 'fake0', None, 'fake1'] + mock_get_amp_driver.return_value = amp_driver_mock + backup_amphora_mock = mock.MagicMock() + backup_amphora_mock.status = constants.AMPHORA_ALLOCATED + deleted_amphora_mock = mock.MagicMock() + deleted_amphora_mock.status = constants.DELETED + master_amphora_mock = mock.MagicMock() + master_amphora_mock.status = constants.AMPHORA_ALLOCATED + bogus_amphora_mock = mock.MagicMock() + bogus_amphora_mock.status = constants.AMPHORA_ALLOCATED + + load_balancer_mock = mock.MagicMock() + load_balancer_mock.topology = constants.TOPOLOGY_ACTIVE_STANDBY + load_balancer_mock.amphorae = [ + master_amphora_mock, deleted_amphora_mock, backup_amphora_mock, + bogus_amphora_mock] + + cw = controller_worker.ControllerWorker() + result = cw._get_amphorae_for_failover(load_balancer_mock) + + self.assertEqual([master_amphora_mock.to_dict(), + bogus_amphora_mock.to_dict(), + backup_amphora_mock.to_dict()], result) + + @mock.patch('octavia.common.utils.get_amphora_driver') + def test_get_amphorae_for_failover_act_stdby_net_split( + self, mock_get_amp_driver, mock_api_get_session, + mock_dyn_log_listener, mock_taskflow_load, mock_pool_repo_get, + mock_member_repo_get, mock_l7rule_repo_get, mock_l7policy_repo_get, + mock_listener_repo_get, mock_lb_repo_get, mock_health_mon_repo_get, + mock_amp_repo_get): + # Case where the amps can't see each other and somehow end up with + # two amphora with an interface. This is highly unlikely as the + # higher priority amphora should get the IP in a net split, but + # let's test the code for this odd case. + # Note: This test uses three amphora even though we only have + # two per load balancer to properly test the ordering from + # this method. + amp_driver_mock = mock.MagicMock() + amp_driver_mock.get_interface_from_ip.side_effect = [ + 'fake0', 'fake1'] + mock_get_amp_driver.return_value = amp_driver_mock + backup_amphora_mock = mock.MagicMock() + backup_amphora_mock.status = constants.AMPHORA_ALLOCATED + deleted_amphora_mock = mock.MagicMock() + deleted_amphora_mock.status = constants.DELETED + master_amphora_mock = mock.MagicMock() + master_amphora_mock.status = constants.AMPHORA_ALLOCATED + + load_balancer_mock = mock.MagicMock() + load_balancer_mock.topology = constants.TOPOLOGY_ACTIVE_STANDBY + load_balancer_mock.amphorae = [ + backup_amphora_mock, deleted_amphora_mock, master_amphora_mock] + + cw = controller_worker.ControllerWorker() + result = cw._get_amphorae_for_failover(load_balancer_mock) + + self.assertEqual([backup_amphora_mock.to_dict(), + master_amphora_mock.to_dict()], result) + + def test_get_amphorae_for_failover_bogus_topology(self, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + load_balancer_mock = mock.MagicMock() + load_balancer_mock.topology = 'bogus' + + cw = controller_worker.ControllerWorker() + self.assertRaises(exceptions.InvalidTopology, + cw._get_amphorae_for_failover, + load_balancer_mock) + + @mock.patch('octavia.db.repositories.FlavorRepository.' + 'get_flavor_metadata_dict') + @mock.patch('octavia.controller.worker.v2.controller_worker.' + 'ControllerWorker._get_amphorae_for_failover') + def test_failover_loadbalancer_single(self, + mock_get_amps_for_failover, + mock_get_flavor_dict, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + _flow_mock.reset_mock() + mock_lb_repo_get.return_value = _db_load_balancer_mock + mock_get_amps_for_failover.return_value = [_amphora_mock] + mock_get_flavor_dict.return_value = {} + provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( + _db_load_balancer_mock).to_dict() + + expected_flavor = {constants.LOADBALANCER_TOPOLOGY: + _load_balancer_mock[constants.TOPOLOGY]} + provider_lb[constants.FLAVOR] = expected_flavor + expected_flow_store = {constants.LOADBALANCER: provider_lb, + constants.BUILD_TYPE_PRIORITY: + constants.LB_CREATE_FAILOVER_PRIORITY, + constants.LOADBALANCER_ID: + _load_balancer_mock[ + constants.LOADBALANCER_ID], + constants.SERVER_GROUP_ID: + _load_balancer_mock[ + constants.SERVER_GROUP_ID], + constants.FLAVOR: expected_flavor, + constants.AVAILABILITY_ZONE: {}} + + cw = controller_worker.ControllerWorker() + cw.failover_loadbalancer(LB_ID) + + mock_lb_repo_get.assert_called_once_with(_db_session, id=LB_ID) + mock_get_amps_for_failover.assert_called_once_with( + _db_load_balancer_mock) + + cw.services_controller.run_poster.assert_called_once_with( + flow_utils.get_failover_LB_flow, [_amphora_mock], provider_lb, + store=expected_flow_store) + + @mock.patch('octavia.controller.worker.v2.controller_worker.' + 'ControllerWorker._get_amphorae_for_failover') + def test_failover_loadbalancer_act_stdby(self, + mock_get_amps_for_failover, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + _flow_mock.reset_mock() + load_balancer_mock = mock.MagicMock() + load_balancer_mock.listeners = [_listener_mock] + load_balancer_mock.topology = constants.TOPOLOGY_ACTIVE_STANDBY + load_balancer_mock.flavor_id = None + load_balancer_mock.availability_zone = None + load_balancer_mock.vip = _vip_mock + mock_lb_repo_get.return_value = load_balancer_mock + mock_get_amps_for_failover.return_value = [_amphora_mock, + _amphora_mock] + provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( + load_balancer_mock).to_dict() + + expected_flavor = {constants.LOADBALANCER_TOPOLOGY: + load_balancer_mock.topology} + provider_lb[constants.FLAVOR] = expected_flavor + expected_flow_store = {constants.LOADBALANCER: provider_lb, + constants.BUILD_TYPE_PRIORITY: + constants.LB_CREATE_FAILOVER_PRIORITY, + constants.LOADBALANCER_ID: + load_balancer_mock.id, + constants.SERVER_GROUP_ID: + load_balancer_mock.server_group_id, + constants.FLAVOR: expected_flavor, + constants.AVAILABILITY_ZONE: {}} + + cw = controller_worker.ControllerWorker() + cw.failover_loadbalancer(LB_ID) + + mock_lb_repo_get.assert_called_once_with(_db_session, id=LB_ID) + mock_get_amps_for_failover.assert_called_once_with(load_balancer_mock) + + cw.services_controller.run_poster.assert_called_once_with( + flow_utils.get_failover_LB_flow, [_amphora_mock, _amphora_mock], + provider_lb, store=expected_flow_store) + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') + def test_failover_loadbalancer_no_lb(self, + mock_lb_repo_update, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + mock_lb_repo_get.return_value = None + + cw = controller_worker.ControllerWorker() + cw.failover_loadbalancer(LB_ID) + + mock_lb_repo_update.assert_called_once_with( + _db_session, LB_ID, provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') + @mock.patch('octavia.controller.worker.v2.controller_worker.' + 'ControllerWorker._get_amphorae_for_failover') + def test_failover_loadbalancer_with_bogus_topology( + self, mock_get_amps_for_failover, mock_lb_repo_update, + mock_api_get_session, mock_dyn_log_listener, mock_taskflow_load, + mock_pool_repo_get, mock_member_repo_get, mock_l7rule_repo_get, + mock_l7policy_repo_get, mock_listener_repo_get, mock_lb_repo_get, + mock_health_mon_repo_get, mock_amp_repo_get): + _flow_mock.reset_mock() + load_balancer_mock = mock.MagicMock() + load_balancer_mock.topology = 'bogus' + mock_lb_repo_get.return_value = load_balancer_mock + mock_get_amps_for_failover.return_value = [_amphora_mock] + + cw = controller_worker.ControllerWorker() + result = cw.failover_loadbalancer(LB_ID) + + self.assertIsNone(result) + mock_lb_repo_update.assert_called_once_with( + _db_session, LB_ID, provisioning_status=constants.ERROR) + mock_lb_repo_get.assert_called_once_with(_db_session, id=LB_ID) + mock_get_amps_for_failover.assert_called_once_with(load_balancer_mock) + + @mock.patch('octavia.db.repositories.AvailabilityZoneRepository.' + 'get_availability_zone_metadata_dict', return_value={}) + @mock.patch('octavia.controller.worker.v2.controller_worker.' + 'ControllerWorker._get_amphorae_for_failover') + def test_failover_loadbalancer_with_az(self, + mock_get_amps_for_failover, + mock_get_az_meta, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + _flow_mock.reset_mock() + load_balancer_mock = mock.MagicMock() + load_balancer_mock.listeners = [_listener_mock] + load_balancer_mock.topology = constants.TOPOLOGY_ACTIVE_STANDBY + load_balancer_mock.flavor_id = None + load_balancer_mock.availability_zone = uuidutils.generate_uuid() + load_balancer_mock.vip = _vip_mock + mock_lb_repo_get.return_value = load_balancer_mock + mock_get_amps_for_failover.return_value = [_amphora_mock] + mock_get_az_meta.return_value = {'planet': 'jupiter'} + provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( + load_balancer_mock).to_dict() + + expected_flavor = {constants.LOADBALANCER_TOPOLOGY: + load_balancer_mock.topology} + provider_lb[constants.FLAVOR] = expected_flavor + expected_flow_store = {constants.LOADBALANCER: provider_lb, + constants.BUILD_TYPE_PRIORITY: + constants.LB_CREATE_FAILOVER_PRIORITY, + constants.LOADBALANCER_ID: + load_balancer_mock.id, + constants.FLAVOR: expected_flavor, + constants.SERVER_GROUP_ID: + load_balancer_mock.server_group_id, + constants.AVAILABILITY_ZONE: { + 'planet': 'jupiter'}} + + cw = controller_worker.ControllerWorker() + cw.failover_loadbalancer(LB_ID) + + mock_lb_repo_get.assert_called_once_with(_db_session, id=LB_ID) + mock_get_amps_for_failover.assert_called_once_with(load_balancer_mock) + + cw.services_controller.run_poster.assert_called_once_with( + flow_utils.get_failover_LB_flow, [_amphora_mock], provider_lb, + store=expected_flow_store) + + @mock.patch('octavia.db.repositories.FlavorRepository.' + 'get_flavor_metadata_dict', return_value={'taste': 'spicy'}) + @mock.patch('octavia.controller.worker.v2.controller_worker.' + 'ControllerWorker._get_amphorae_for_failover') + def test_failover_loadbalancer_with_flavor(self, + mock_get_amps_for_failover, + mock_get_flavor_meta, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + _flow_mock.reset_mock() + load_balancer_mock = mock.MagicMock() + load_balancer_mock.listeners = [_listener_mock] + load_balancer_mock.topology = constants.TOPOLOGY_SINGLE + load_balancer_mock.flavor_id = uuidutils.generate_uuid() + load_balancer_mock.availability_zone = None + load_balancer_mock.vip = _vip_mock + mock_lb_repo_get.return_value = load_balancer_mock + mock_get_amps_for_failover.return_value = [_amphora_mock, + _amphora_mock] + provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( + load_balancer_mock).to_dict() + + expected_flavor = {'taste': 'spicy', constants.LOADBALANCER_TOPOLOGY: + load_balancer_mock.topology} + provider_lb[constants.FLAVOR] = expected_flavor + expected_flow_store = {constants.LOADBALANCER: provider_lb, + constants.BUILD_TYPE_PRIORITY: + constants.LB_CREATE_FAILOVER_PRIORITY, + constants.LOADBALANCER_ID: + load_balancer_mock.id, + constants.FLAVOR: expected_flavor, + constants.SERVER_GROUP_ID: + load_balancer_mock.server_group_id, + constants.AVAILABILITY_ZONE: {}} + + cw = controller_worker.ControllerWorker() + cw.failover_loadbalancer(LB_ID) + + mock_lb_repo_get.assert_called_once_with(_db_session, id=LB_ID) + mock_get_amps_for_failover.assert_called_once_with(load_balancer_mock) + + cw.services_controller.run_poster.assert_called_once_with( + flow_utils.get_failover_LB_flow, [_amphora_mock, _amphora_mock], + provider_lb, store=expected_flow_store) + + def test_amphora_cert_rotation(self, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + _flow_mock.reset_mock() + cw = controller_worker.ControllerWorker() + cw.services_controller.reset_mock() + cw.amphora_cert_rotation(AMP_ID) + mock_amp_repo_get.return_value = _db_amphora_mock + (cw.services_controller.run_poster. + assert_called_once_with(flow_utils.cert_rotate_amphora_flow, + store={constants.AMPHORA: + _db_amphora_mock.to_dict(), + constants.AMPHORA_ID: + _amphora_mock[constants.ID]})) + + @mock.patch('octavia.db.repositories.FlavorRepository.' + 'get_flavor_metadata_dict') + @mock.patch('octavia.db.repositories.AmphoraRepository.get_lb_for_amphora') + def test_update_amphora_agent_config(self, + mock_get_lb_for_amp, + mock_flavor_meta, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + _flow_mock.reset_mock() + mock_lb = mock.MagicMock() + mock_lb.flavor_id = 'vanilla' + mock_get_lb_for_amp.return_value = mock_lb + mock_flavor_meta.return_value = {'test': 'dict'} + cw = controller_worker.ControllerWorker() + cw.services_controller.reset_mock() + cw.update_amphora_agent_config(AMP_ID) + + mock_amp_repo_get.assert_called_once_with(_db_session, id=AMP_ID) + mock_get_lb_for_amp.assert_called_once_with(_db_session, AMP_ID) + mock_flavor_meta.assert_called_once_with(_db_session, 'vanilla') + (cw.services_controller.run_poster. + assert_called_once_with(flow_utils.update_amphora_config_flow, + store={constants.AMPHORA: + _db_amphora_mock.to_dict(), + constants.FLAVOR: {'test': 'dict'}})) + + # Test with no flavor + _flow_mock.reset_mock() + mock_amp_repo_get.reset_mock() + mock_get_lb_for_amp.reset_mock() + mock_flavor_meta.reset_mock() + mock_lb.flavor_id = None + cw.update_amphora_agent_config(AMP_ID) + mock_amp_repo_get.assert_called_once_with(_db_session, id=AMP_ID) + mock_get_lb_for_amp.assert_called_once_with(_db_session, AMP_ID) + mock_flavor_meta.assert_not_called() + (cw.services_controller.run_poster. + assert_called_once_with(flow_utils.update_amphora_config_flow, + store={constants.AMPHORA: + _db_amphora_mock.to_dict(), + constants.FLAVOR: {}})) diff --git a/octavia/tests/unit/controller/worker/v2/test_taskflow_jobboard_driver.py b/octavia/tests/unit/controller/worker/v2/test_taskflow_jobboard_driver.py new file mode 100644 index 0000000000..0f31c5807e --- /dev/null +++ b/octavia/tests/unit/controller/worker/v2/test_taskflow_jobboard_driver.py @@ -0,0 +1,356 @@ +# Copyright 2024 NTT DATA Group Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +from unittest import mock + +from oslo_config import cfg + +from octavia.controller.worker.v2 import taskflow_jobboard_driver +import octavia.tests.unit.base as base + + +class TestRedisTaskFlowDriver(base.TestCase): + + @mock.patch('octavia.controller.worker.v2.taskflow_jobboard_driver.' + 'job_backends') + def test_job_board_default(self, mock_job_backends): + driver = taskflow_jobboard_driver.RedisTaskFlowDriver(mock.Mock()) + driver.job_board(None) + mock_job_backends.backend.assert_called_once_with( + 'octavia_jobboard', + { + 'board': 'redis', + 'host': '127.0.0.1', + 'port': 6379, + 'db': 0, + 'namespace': 'octavia_jobboard', + 'sentinel': None, + 'sentinel_fallbacks': [], + 'ssl': False, + 'ssl_keyfile': None, + 'ssl_certfile': None, + 'ssl_ca_certs': None, + 'ssl_cert_reqs': 'required', + 'sentinel_kwargs': { + 'ssl': False, + 'ssl_keyfile': None, + 'ssl_certfile': None, + 'ssl_ca_certs': None, + 'ssl_cert_reqs': 'required', + } + }, + persistence=None + ) + + @mock.patch('octavia.controller.worker.v2.taskflow_jobboard_driver.' + 'job_backends') + def test_job_board_password(self, mock_job_backends): + driver = taskflow_jobboard_driver.RedisTaskFlowDriver(mock.Mock()) + cfg.CONF.set_override('jobboard_backend_password', 'redispass', + group='task_flow') + driver.job_board(None) + mock_job_backends.backend.assert_called_once_with( + 'octavia_jobboard', + { + 'board': 'redis', + 'host': '127.0.0.1', + 'port': 6379, + 'db': 0, + 'namespace': 'octavia_jobboard', + 'password': 'redispass', + 'sentinel': None, + 'sentinel_fallbacks': [], + 'ssl': False, + 'ssl_keyfile': None, + 'ssl_certfile': None, + 'ssl_ca_certs': None, + 'ssl_cert_reqs': 'required', + 'sentinel_kwargs': { + 'ssl': False, + 'ssl_keyfile': None, + 'ssl_certfile': None, + 'ssl_ca_certs': None, + 'ssl_cert_reqs': 'required', + } + }, + persistence=None + ) + + @mock.patch('octavia.controller.worker.v2.taskflow_jobboard_driver.' + 'job_backends') + def test_job_board_username(self, mock_job_backends): + driver = taskflow_jobboard_driver.RedisTaskFlowDriver(mock.Mock()) + cfg.CONF.set_override('jobboard_backend_password', 'redispass', + group='task_flow') + cfg.CONF.set_override('jobboard_backend_username', 'redisuser', + group='task_flow') + driver.job_board(None) + mock_job_backends.backend.assert_called_once_with( + 'octavia_jobboard', + { + 'board': 'redis', + 'host': '127.0.0.1', + 'port': 6379, + 'db': 0, + 'namespace': 'octavia_jobboard', + 'username': 'redisuser', + 'password': 'redispass', + 'sentinel': None, + 'sentinel_fallbacks': [], + 'ssl': False, + 'ssl_keyfile': None, + 'ssl_certfile': None, + 'ssl_ca_certs': None, + 'ssl_cert_reqs': 'required', + 'sentinel_kwargs': { + 'ssl': False, + 'ssl_keyfile': None, + 'ssl_certfile': None, + 'ssl_ca_certs': None, + 'ssl_cert_reqs': 'required', + } + }, + persistence=None + ) + + @mock.patch('octavia.controller.worker.v2.taskflow_jobboard_driver.' + 'job_backends') + def test_job_board_ssl(self, mock_job_backends): + driver = taskflow_jobboard_driver.RedisTaskFlowDriver(mock.Mock()) + cfg.CONF.set_override( + 'jobboard_redis_backend_ssl_options', + { + 'ssl': True, + 'ssl_keyfile': 'rediskey', + 'ssl_certfile': 'rediscert', + 'ssl_ca_certs': 'redisca', + 'ssl_cert_reqs': 'required' + }, + group='task_flow') + driver.job_board(None) + mock_job_backends.backend.assert_called_once_with( + 'octavia_jobboard', + { + 'board': 'redis', + 'host': '127.0.0.1', + 'port': 6379, + 'db': 0, + 'namespace': 'octavia_jobboard', + 'sentinel': None, + 'sentinel_fallbacks': [], + 'ssl': True, + 'ssl_keyfile': 'rediskey', + 'ssl_certfile': 'rediscert', + 'ssl_ca_certs': 'redisca', + 'ssl_cert_reqs': 'required', + 'sentinel_kwargs': { + 'ssl': False, + 'ssl_keyfile': None, + 'ssl_certfile': None, + 'ssl_ca_certs': None, + 'ssl_cert_reqs': 'required', + } + }, + persistence=None + ) + + @mock.patch('octavia.controller.worker.v2.taskflow_jobboard_driver.' + 'job_backends') + def test_job_board_sentinel(self, mock_job_backends): + driver = taskflow_jobboard_driver.RedisTaskFlowDriver(mock.Mock()) + cfg.CONF.set_override('jobboard_redis_sentinel', 'mymaster', + group='task_flow') + cfg.CONF.set_override('jobboard_backend_hosts', + ['host1', 'host2', 'host3'], + group='task_flow') + cfg.CONF.set_override('jobboard_backend_port', 26379, + group='task_flow') + driver.job_board(None) + mock_job_backends.backend.assert_called_once_with( + 'octavia_jobboard', + { + 'board': 'redis', + 'host': 'host1', + 'port': 26379, + 'db': 0, + 'namespace': 'octavia_jobboard', + 'sentinel': 'mymaster', + 'sentinel_fallbacks': ['host2:26379', 'host3:26379'], + 'ssl': False, + 'ssl_keyfile': None, + 'ssl_certfile': None, + 'ssl_ca_certs': None, + 'ssl_cert_reqs': 'required', + 'sentinel_kwargs': { + 'ssl': False, + 'ssl_keyfile': None, + 'ssl_certfile': None, + 'ssl_ca_certs': None, + 'ssl_cert_reqs': 'required', + } + }, + persistence=None + ) + + @mock.patch('octavia.controller.worker.v2.taskflow_jobboard_driver.' + 'job_backends') + def test_job_board_sentinel_password(self, mock_job_backends): + driver = taskflow_jobboard_driver.RedisTaskFlowDriver(mock.Mock()) + cfg.CONF.set_override('jobboard_redis_sentinel', 'mymaster', + group='task_flow') + cfg.CONF.set_override('jobboard_backend_hosts', + ['host1', 'host2', 'host3'], + group='task_flow') + cfg.CONF.set_override('jobboard_backend_port', 26379, + group='task_flow') + cfg.CONF.set_override('jobboard_backend_password', 'redispass', + group='task_flow') + cfg.CONF.set_override('jobboard_redis_sentinel_password', + 'sentinelpass', group='task_flow') + driver.job_board(None) + mock_job_backends.backend.assert_called_once_with( + 'octavia_jobboard', + { + 'board': 'redis', + 'host': 'host1', + 'port': 26379, + 'db': 0, + 'namespace': 'octavia_jobboard', + 'password': 'redispass', + 'sentinel': 'mymaster', + 'sentinel_fallbacks': ['host2:26379', 'host3:26379'], + 'ssl': False, + 'ssl_keyfile': None, + 'ssl_certfile': None, + 'ssl_ca_certs': None, + 'ssl_cert_reqs': 'required', + 'sentinel_kwargs': { + 'password': 'sentinelpass', + 'ssl': False, + 'ssl_keyfile': None, + 'ssl_certfile': None, + 'ssl_ca_certs': None, + 'ssl_cert_reqs': 'required', + } + }, + persistence=None + ) + + @mock.patch('octavia.controller.worker.v2.taskflow_jobboard_driver.' + 'job_backends') + def test_job_board_sentinel_username(self, mock_job_backends): + driver = taskflow_jobboard_driver.RedisTaskFlowDriver(mock.Mock()) + cfg.CONF.set_override('jobboard_redis_sentinel', 'mymaster', + group='task_flow') + cfg.CONF.set_override('jobboard_backend_hosts', + ['host1', 'host2', 'host3'], + group='task_flow') + cfg.CONF.set_override('jobboard_backend_port', 26379, + group='task_flow') + cfg.CONF.set_override('jobboard_backend_username', 'redisuser', + group='task_flow') + cfg.CONF.set_override('jobboard_backend_password', 'redispass', + group='task_flow') + cfg.CONF.set_override('jobboard_redis_sentinel_username', + 'sentineluser', group='task_flow') + cfg.CONF.set_override('jobboard_redis_sentinel_password', + 'sentinelpass', group='task_flow') + driver.job_board(None) + mock_job_backends.backend.assert_called_once_with( + 'octavia_jobboard', + { + 'board': 'redis', + 'host': 'host1', + 'port': 26379, + 'db': 0, + 'namespace': 'octavia_jobboard', + 'username': 'redisuser', + 'password': 'redispass', + 'sentinel': 'mymaster', + 'sentinel_fallbacks': ['host2:26379', 'host3:26379'], + 'ssl': False, + 'ssl_keyfile': None, + 'ssl_certfile': None, + 'ssl_ca_certs': None, + 'ssl_cert_reqs': 'required', + 'sentinel_kwargs': { + 'username': 'sentineluser', + 'password': 'sentinelpass', + 'ssl': False, + 'ssl_keyfile': None, + 'ssl_certfile': None, + 'ssl_ca_certs': None, + 'ssl_cert_reqs': 'required', + } + }, + persistence=None + ) + + @mock.patch('octavia.controller.worker.v2.taskflow_jobboard_driver.' + 'job_backends') + def test_job_board_sentinel_ssl(self, mock_job_backends): + driver = taskflow_jobboard_driver.RedisTaskFlowDriver(mock.Mock()) + cfg.CONF.set_override('jobboard_redis_sentinel', 'mymaster', + group='task_flow') + cfg.CONF.set_override('jobboard_backend_hosts', + ['host1', 'host2', 'host3'], + group='task_flow') + cfg.CONF.set_override('jobboard_backend_port', 26379, + group='task_flow') + cfg.CONF.set_override( + 'jobboard_redis_backend_ssl_options', + { + 'ssl': True, + 'ssl_keyfile': 'rediskey', + 'ssl_certfile': 'rediscert', + 'ssl_ca_certs': 'redisca', + 'ssl_cert_reqs': 'required' + }, + group='task_flow') + cfg.CONF.set_override( + 'jobboard_redis_sentinel_ssl_options', + { + 'ssl': True, + 'ssl_keyfile': 'sentinelkey', + 'ssl_certfile': 'sentinelcert', + 'ssl_ca_certs': 'sentinelca', + 'ssl_cert_reqs': 'required' + }, + group='task_flow') + driver.job_board(None) + mock_job_backends.backend.assert_called_once_with( + 'octavia_jobboard', + { + 'board': 'redis', + 'host': 'host1', + 'port': 26379, + 'db': 0, + 'namespace': 'octavia_jobboard', + 'sentinel': 'mymaster', + 'sentinel_fallbacks': ['host2:26379', 'host3:26379'], + 'ssl': True, + 'ssl_keyfile': 'rediskey', + 'ssl_certfile': 'rediscert', + 'ssl_ca_certs': 'redisca', + 'ssl_cert_reqs': 'required', + 'sentinel_kwargs': { + 'ssl': True, + 'ssl_keyfile': 'sentinelkey', + 'ssl_certfile': 'sentinelcert', + 'ssl_ca_certs': 'sentinelca', + 'ssl_cert_reqs': 'required' + } + }, + persistence=None + ) diff --git a/octavia/tests/unit/db/__init__.py b/octavia/tests/unit/db/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/db/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/hacking/test_checks.py b/octavia/tests/unit/hacking/test_checks.py new file mode 100644 index 0000000000..43fa0f3f6c --- /dev/null +++ b/octavia/tests/unit/hacking/test_checks.py @@ -0,0 +1,143 @@ +# Copyright 2015 +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import testtools + +from oslotest import base + +from octavia.hacking import checks + + +class HackingTestCase(base.BaseTestCase): + """Hacking test class. + + This class tests the hacking checks in octavia.hacking.checks by passing + strings to the check methods like the pep8/flake8 parser would. The parser + loops over each line in the file and then passes the parameters to the + check method. The parameter names in the check method dictate what type of + object is passed to the check method. The parameter types are:: + + logical_line: A processed line with the following modifications: + - Multi-line statements converted to a single line. + - Stripped left and right. + - Contents of strings replaced with "xxx" of same length. + - Comments removed. + physical_line: Raw line of text from the input file. + lines: a list of the raw lines from the input file + tokens: the tokens that contribute to this logical line + line_number: line number in the input file + total_lines: number of lines in the input file + blank_lines: blank lines before this one + indent_char: indentation character in this file (" " or "\t") + indent_level: indentation (with tabs expanded to multiples of 8) + previous_indent_level: indentation on previous line + previous_logical: previous logical line + filename: Path of the file being run through pep8 + + When running a test on a check method the return will be False/None if + there is no violation in the sample input. If there is an error a tuple is + returned with a position in the line, and a message. So to check the result + just assertTrue if the check is expected to fail and assertFalse if it + should pass. + """ + + def assertLinePasses(self, func, *args): + with testtools.ExpectedException(StopIteration): + next(func(*args)) + + def assertLineFails(self, func, *args): + self.assertIsInstance(next(func(*args)), tuple) + + def test_no_mutable_default_args(self): + self.assertEqual(0, len(list(checks.no_mutable_default_args( + "def foo (bar):")))) + self.assertEqual(1, len(list(checks.no_mutable_default_args( + "def foo (bar=[]):")))) + self.assertEqual(1, len(list(checks.no_mutable_default_args( + "def foo (bar={}):")))) + + def test_assert_equal_true_or_false(self): + self.assertEqual(1, len(list(checks.assert_equal_true_or_false( + "self.assertEqual(True, A)")))) + + self.assertEqual(1, len(list(checks.assert_equal_true_or_false( + "self.assertEqual(False, A)")))) + + self.assertEqual(0, len(list(checks.assert_equal_true_or_false( + "self.assertTrue()")))) + + self.assertEqual(0, len(list(checks.assert_equal_true_or_false( + "self.assertFalse()")))) + + def test_no_log_warn(self): + self.assertEqual(1, len(list(checks.no_log_warn( + "LOG.warn()")))) + + self.assertEqual(0, len(list(checks.no_log_warn( + "LOG.warning()")))) + + def test_no_log_translations(self): + for log in checks._all_log_levels: + for hint in checks._all_hints: + bad = f'LOG.{log}({hint}("Bad"))' + self.assertEqual( + 1, len(list(checks.no_translate_logs(bad, 'f')))) + # Catch abuses when used with a variable and not a literal + bad = f'LOG.{log}({hint}(msg))' + self.assertEqual( + 1, len(list(checks.no_translate_logs(bad, 'f')))) + # Do not do validations in tests + ok = f'LOG.{log}(_("OK - unit tests"))' + self.assertEqual( + 0, len(list(checks.no_translate_logs(ok, 'f/tests/f')))) + + def test_check_localized_exception_messages(self): + f = checks.check_raised_localized_exceptions + self.assertLineFails(f, " raise KeyError('Error text')", '') + self.assertLineFails(f, ' raise KeyError("Error text")', '') + self.assertLinePasses(f, ' raise KeyError(_("Error text"))', '') + self.assertLinePasses(f, ' raise KeyError(_ERR("Error text"))', '') + self.assertLinePasses(f, " raise KeyError(translated_msg)", '') + self.assertLinePasses(f, '# raise KeyError("Not translated")', '') + self.assertLinePasses(f, 'print("raise KeyError("Not ' + 'translated")")', '') + + def test_check_localized_exception_message_skip_tests(self): + f = checks.check_raised_localized_exceptions + self.assertLinePasses(f, "raise KeyError('Error text')", + 'neutron_lib/tests/unit/mytest.py') + + def test_check_no_eventlet_imports(self): + f = checks.check_no_eventlet_imports + self.assertLinePasses(f, 'from not_eventlet import greenthread') + self.assertLineFails(f, 'from eventlet import greenthread') + self.assertLineFails(f, 'import eventlet') + + def test_line_continuation_no_backslash(self): + results = list(checks.check_line_continuation_no_backslash( + '', [(1, 'import', (2, 0), (2, 6), 'import \\\n'), + (1, 'os', (3, 4), (3, 6), ' os\n')])) + self.assertEqual(1, len(results)) + self.assertEqual((2, 7), results[0][0]) + + def test_check_no_logging_imports(self): + f = checks.check_no_logging_imports + self.assertLinePasses(f, 'from oslo_log import log') + self.assertLineFails(f, 'from logging import log') + self.assertLineFails(f, 'import logging') + + def test_revert_must_have_kwargs(self): + f = checks.revert_must_have_kwargs + self.assertLinePasses(f, 'def revert(self, *args, **kwargs):') + self.assertLineFails(f, 'def revert(self, loadbalancer):') diff --git a/octavia/tests/unit/image/__init__.py b/octavia/tests/unit/image/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/octavia/tests/unit/image/drivers/__init__.py b/octavia/tests/unit/image/drivers/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/octavia/tests/unit/image/drivers/noop_driver/test_driver.py b/octavia/tests/unit/image/drivers/noop_driver/test_driver.py new file mode 100644 index 0000000000..ab336624ef --- /dev/null +++ b/octavia/tests/unit/image/drivers/noop_driver/test_driver.py @@ -0,0 +1,39 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_utils import uuidutils + +from octavia.image.drivers.noop_driver import driver +import octavia.tests.unit.base as base + + +CONF = cfg.CONF + + +class TestNoopImageDriver(base.TestCase): + + def setUp(self): + super().setUp() + self.driver = driver.NoopImageDriver() + + def test_get_image_id_by_tag(self): + image_tag = 'amphora' + image_owner = uuidutils.generate_uuid() + image_id = self.driver.get_image_id_by_tag(image_tag, image_owner) + self.assertEqual((image_tag, image_owner, 'get_image_id_by_tag'), + self.driver.driver.imageconfig[( + image_tag, image_owner + )]) + self.assertEqual(1, image_id) diff --git a/octavia/tests/unit/image/drivers/test_glance_driver.py b/octavia/tests/unit/image/drivers/test_glance_driver.py new file mode 100644 index 0000000000..4f3a1f5aa1 --- /dev/null +++ b/octavia/tests/unit/image/drivers/test_glance_driver.py @@ -0,0 +1,65 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from oslo_utils import uuidutils + +from octavia.common import exceptions +import octavia.image.drivers.glance_driver as glance_common +import octavia.tests.unit.base as base + + +class TestGlanceClient(base.TestCase): + + def setUp(self): + self.manager = glance_common.ImageManager() + self.manager.manager = mock.MagicMock() + + super().setUp() + + def test_no_images(self): + self.manager.manager.list.return_value = [] + self.assertRaises( + exceptions.ImageGetException, + self.manager.get_image_id_by_tag, 'faketag') + + def test_single_image(self): + images = [ + {'id': uuidutils.generate_uuid(), 'tag': 'faketag'} + ] + self.manager.manager.list.return_value = images + image_id = self.manager.get_image_id_by_tag('faketag', None) + self.assertEqual(image_id, images[0]['id']) + + def test_single_image_owner(self): + owner = uuidutils.generate_uuid() + images = [ + {'id': uuidutils.generate_uuid(), + 'tag': 'faketag', + 'owner': owner} + ] + self.manager.manager.list.return_value = images + image_id = self.manager.get_image_id_by_tag('faketag', owner) + self.assertEqual(image_id, images[0]['id']) + self.assertEqual(owner, images[0]['owner']) + + def test_multiple_images_returns_one_of_images(self): + images = [ + {'id': image_id, 'tag': 'faketag'} + for image_id in [uuidutils.generate_uuid() for i in range(10)] + ] + self.manager.manager.list.return_value = images + image_id = self.manager.get_image_id_by_tag('faketag', None) + self.assertIn(image_id, [image['id'] for image in images]) diff --git a/octavia/tests/unit/network/__init__.py b/octavia/tests/unit/network/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/network/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/network/drivers/__init__.py b/octavia/tests/unit/network/drivers/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/network/drivers/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/network/drivers/neutron/__init__.py b/octavia/tests/unit/network/drivers/neutron/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/network/drivers/neutron/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/network/drivers/neutron/test_allowed_address_pairs.py b/octavia/tests/unit/network/drivers/neutron/test_allowed_address_pairs.py new file mode 100644 index 0000000000..488ccb8dc2 --- /dev/null +++ b/octavia/tests/unit/network/drivers/neutron/test_allowed_address_pairs.py @@ -0,0 +1,1675 @@ +# Copyright 2015 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from novaclient.client import exceptions as nova_exceptions +import openstack.exceptions as os_exceptions +from openstack.network.v2.port import Port +from openstack.network.v2.security_group import SecurityGroup +from openstack.network.v2.subnet import Subnet +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils + +from octavia.common import constants +from octavia.common import data_models +from octavia.common import exceptions +from octavia.network import base as network_base +from octavia.network import data_models as network_models +from octavia.network.drivers.neutron import allowed_address_pairs +from octavia.network.drivers.neutron import base as neutron_base +from octavia.tests.common import constants as t_constants +from octavia.tests.common import data_model_helpers as dmh +from octavia.tests.unit import base + + +class TestAllowedAddressPairsDriver(base.TestCase): + k_session = None + driver = None + + SUBNET_ID_1 = "5" + SUBNET_ID_2 = "8" + FIXED_IP_ID_1 = "6" + FIXED_IP_ID_2 = "8" + NETWORK_ID_1 = "7" + NETWORK_ID_2 = "10" + IP_ADDRESS_1 = "10.0.0.2" + IP_ADDRESS_2 = "12.0.0.2" + IPV6_ADDRESS_1 = "2001:db8::1234" + AMPHORA_ID = "1" + LB_ID = "2" + COMPUTE_ID = "3" + ACTIVE = "ACTIVE" + LB_NET_IP = "10.0.0.2" + LB_NET_PORT_ID = "6" + HA_PORT_ID = "8" + HA_IP = "12.0.0.2" + PORT_ID = uuidutils.generate_uuid() + DEVICE_ID = uuidutils.generate_uuid() + + def setUp(self): + super().setUp() + with mock.patch('octavia.common.clients.openstack.connection' + '.Connection', autospec=True) as os_connection: + with mock.patch('stevedore.driver.DriverManager.driver', + autospec=True): + network_proxy = os_connection().network + network_proxy.find_extension = ( + lambda x: 'alias' if x in ( + allowed_address_pairs.AAP_EXT_ALIAS, + neutron_base.SEC_GRP_EXT_ALIAS) + else None) + self.k_session = mock.patch( + 'keystoneauth1.session.Session').start() + self.driver = allowed_address_pairs.AllowedAddressPairsDriver() + + @mock.patch('octavia.network.drivers.neutron.base.BaseNeutronDriver.' + '_check_extension_enabled', return_value=False) + def test_check_aap_loaded(self, mock_check_ext): + self.assertRaises(network_base.NetworkException, + self.driver._check_aap_loaded) + + def test_get_interfaces_to_unplug(self): + if1 = network_models.Interface() + if1.network_id = 'if1-net' + if1.port_id = 'if1-port' + if1.fixed_ips = [network_models.FixedIP(ip_address='10.0.0.1')] + if2 = network_models.Interface() + if2.network_id = 'if2-net' + if2.port_id = 'if2-port' + if2.fixed_ips = [network_models.FixedIP(ip_address='11.0.0.1')] + interfaces = [if1, if2] + unpluggers = self.driver._get_interfaces_to_unplug( + interfaces, 'if1-net') + self.assertEqual([if1], unpluggers) + unpluggers = self.driver._get_interfaces_to_unplug( + interfaces, 'if1-net', ip_address='10.0.0.1') + self.assertEqual([if1], unpluggers) + unpluggers = self.driver._get_interfaces_to_unplug( + interfaces, 'if1-net', ip_address='11.0.0.1') + self.assertEqual([], unpluggers) + unpluggers = self.driver._get_interfaces_to_unplug( + interfaces, 'if3-net') + self.assertEqual([], unpluggers) + + def test_deallocate_vip(self): + lb = dmh.generate_load_balancer_tree() + lb.vip.load_balancer = lb + vip = lb.vip + sec_grp_id = 'lb-sec-grp1' + show_port = self.driver.network_proxy.get_port + show_port.return_value = Port(device_owner=constants.OCTAVIA_OWNER) + delete_port = self.driver.network_proxy.delete_port + delete_sec_grp = self.driver.network_proxy.delete_security_group + list_security_groups = self.driver.network_proxy.find_security_group + list_security_groups.return_value = SecurityGroup(id=sec_grp_id) + self.driver.deallocate_vip(vip) + calls = [mock.call(vip.port_id)] + for amp in lb.amphorae: + calls.append(mock.call(amp.vrrp_port_id)) + delete_port.assert_has_calls(calls, any_order=True) + delete_sec_grp.assert_called_once_with(sec_grp_id) + + def test_deallocate_vip_no_vrrp_port(self): + lb = dmh.generate_load_balancer_tree() + lb.vip.load_balancer = lb + # amphora 0 doesn't have a vrrp_port_id + lb.amphorae[0].vrrp_port_id = None + vip = lb.vip + sec_grp_id = 'lb-sec-grp1' + show_port = self.driver.network_proxy.get_port + show_port.return_value = Port( + device_owner=constants.OCTAVIA_OWNER) + delete_port = self.driver.network_proxy.delete_port + delete_sec_grp = self.driver.network_proxy.delete_security_group + list_security_groups = self.driver.network_proxy.find_security_group + list_security_groups.return_value = SecurityGroup(id=sec_grp_id) + self.driver.deallocate_vip(vip) + # not called for lb.amphorae[0] + calls = [mock.call(vip.port_id), + mock.call(lb.amphorae[1].vrrp_port_id)] + delete_port.assert_has_calls(calls, any_order=True) + self.assertEqual(2, delete_port.call_count) + delete_sec_grp.assert_called_once_with(sec_grp_id) + + def test_deallocate_vip_no_port(self): + lb = dmh.generate_load_balancer_tree() + lb.vip.load_balancer = lb + vip = lb.vip + sec_grp_id = 'lb-sec-grp1' + show_port = self.driver.network_proxy.get_port + port = Port(device_owner=constants.OCTAVIA_OWNER) + show_port.side_effect = [port, Exception] + list_security_groups = self.driver.network_proxy.find_security_group + list_security_groups.return_value = SecurityGroup(id=sec_grp_id) + self.driver.deallocate_vip(vip) + self.driver.network_proxy.update_port.assert_not_called() + + def test_deallocate_vip_port_deleted(self): + lb = dmh.generate_load_balancer_tree() + lb.vip.load_balancer = lb + vip = lb.vip + sec_grp_id = 'lb-sec-grp1' + show_port = self.driver.network_proxy.get_port + show_port.return_value = Port( + device_owner=constants.OCTAVIA_OWNER) + delete_port = self.driver.network_proxy.delete_port + delete_port.side_effect = os_exceptions.ResourceNotFound + delete_sec_grp = self.driver.network_proxy.delete_security_group + find_security_group = self.driver.network_proxy.find_security_group + find_security_group.return_value = SecurityGroup(id=sec_grp_id) + self.driver.deallocate_vip(vip) + calls = [mock.call(vip.port_id)] + for amp in lb.amphorae: + calls.append(mock.call(amp.vrrp_port_id)) + delete_port.assert_has_calls(calls, any_order=True) + delete_sec_grp.assert_called_once_with(sec_grp_id) + + def test_deallocate_vip_no_sec_group(self): + lb = dmh.generate_load_balancer_tree() + lb.vip.load_balancer = lb + vip = lb.vip + show_port = self.driver.network_proxy.get_port + show_port.return_value = Port( + device_owner=constants.OCTAVIA_OWNER) + delete_port = self.driver.network_proxy.delete_port + delete_sec_grp = self.driver.network_proxy.delete_security_group + list_security_groups = self.driver.network_proxy.find_security_group + list_security_groups.return_value = None + self.driver.deallocate_vip(vip) + delete_port.assert_called_with(vip.port_id) + delete_sec_grp.assert_not_called() + + def test_deallocate_vip_when_delete_port_fails(self): + lb = dmh.generate_load_balancer_tree() + vip = data_models.Vip(port_id='1') + vip.load_balancer = lb + show_port = self.driver.network_proxy.get_port + show_port.return_value = Port( + device_owner=constants.OCTAVIA_OWNER) + delete_port = self.driver.network_proxy.delete_port + delete_port.side_effect = [None, None, TypeError] + self.assertRaises(network_base.DeallocateVIPException, + self.driver.deallocate_vip, vip) + + def test_deallocate_vip_when_secgrp_has_allocated_ports(self): + max_retries = 1 + conf = oslo_fixture.Config(cfg.CONF) + conf.config(group="networking", max_retries=max_retries) + + lb = dmh.generate_load_balancer_tree() + lb.vip.load_balancer = lb + vip = lb.vip + show_port = self.driver.network_proxy.get_port + show_port.return_value = Port( + device_owner=constants.OCTAVIA_OWNER) + delete_port = self.driver.network_proxy.delete_port + list_ports = self.driver.network_proxy.ports + find_security_group = self.driver.network_proxy.find_security_group + delete_sec_grp = self.driver.network_proxy.delete_security_group + security_group = SecurityGroup(id=t_constants.MOCK_SECURITY_GROUP_ID) + find_security_group.return_value = security_group + delete_grp_results = [ + network_base.DeallocateVIPException + for _ in range(max_retries + 1)] # Total tries = max_retries + 1 + delete_grp_results.append(None) + delete_sec_grp.side_effect = delete_grp_results + list_ports.return_value = iter([t_constants.MOCK_NEUTRON_PORT, + t_constants.MOCK_NEUTRON_PORT2]) + self.driver.deallocate_vip(vip) + # First we expect the amp's ports to be deleted + dp_calls = [mock.call(amp.vrrp_port_id) for amp in lb.amphorae] + # Then after the SG delete fails, extra hanging-on ports are removed + dp_calls.append(mock.call(t_constants.MOCK_PORT_ID)) + # Lastly we remove the vip port + dp_calls.append(mock.call(vip.port_id)) + self.assertEqual(len(dp_calls), delete_port.call_count) + delete_port.assert_has_calls(dp_calls) + dsg_calls = [mock.call(t_constants.MOCK_SECURITY_GROUP_ID) + for _ in range(max_retries + 2)] # Max fail + one success + self.assertEqual(len(dsg_calls), delete_sec_grp.call_count) + delete_sec_grp.assert_has_calls(dsg_calls) + + def test_deallocate_vip_when_port_not_found(self): + lb = dmh.generate_load_balancer_tree() + vip = data_models.Vip(port_id='1') + vip.load_balancer = lb + show_port = self.driver.network_proxy.get_port + show_port.side_effect = os_exceptions.ResourceNotFound + self.driver.deallocate_vip(vip) + + def test_deallocate_vip_when_port_not_found_for_update(self): + lb = dmh.generate_load_balancer_tree() + vip = data_models.Vip(port_id='1') + vip.load_balancer = lb + show_port = self.driver.network_proxy.get_port + show_port.return_value = Port( + device_owner=constants.OCTAVIA_OWNER) + update_port = self.driver.network_proxy.update_port + update_port.side_effect = os_exceptions.ResourceNotFound + self.driver.deallocate_vip(vip) + + def test_deallocate_vip_when_port_not_owned_by_octavia(self): + lb = dmh.generate_load_balancer_tree() + lb.vip.load_balancer = lb + vip = lb.vip + sec_grp_id = 'lb-sec-grp1' + show_port = self.driver.network_proxy.get_port + show_port.return_value = Port(**{ + 'id': vip.port_id, + 'device_owner': 'neutron:LOADBALANCERV2', + 'security_groups': [sec_grp_id]}) + update_port = self.driver.network_proxy.update_port + delete_sec_grp = self.driver.network_proxy.delete_security_group + list_security_groups = self.driver.network_proxy.find_security_group + list_security_groups.return_value = SecurityGroup(id=sec_grp_id) + self.driver.deallocate_vip(vip) + expected_port_update = {'security_group_ids': []} + update_port.assert_called_once_with(vip.port_id, + **expected_port_update) + delete_sec_grp.assert_called_once_with(sec_grp_id) + + def test_deallocate_vip_when_vip_port_not_found(self): + lb = dmh.generate_load_balancer_tree() + vip = data_models.Vip(port_id='1') + vip.load_balancer = lb + admin_project_id = 'octavia' + session_mock = mock.MagicMock() + session_mock.get_project_id.return_value = admin_project_id + self.k_session.return_value = session_mock + show_port = self.driver.network_proxy.get_port + show_port.side_effect = os_exceptions.ResourceNotFound + self.driver.deallocate_vip(vip) + + def test_plug_aap_errors_when_nova_cant_find_network_to_attach(self): + lb = dmh.generate_load_balancer_tree() + subnet = network_models.Subnet(id=t_constants.MOCK_VIP_SUBNET_ID, + network_id=t_constants.MOCK_VIP_NET_ID) + + network_attach = self.driver.compute.attach_network_or_port + network_attach.side_effect = nova_exceptions.NotFound(404, "Network") + self.assertRaises(network_base.PlugVIPException, + self.driver.plug_aap_port, lb, lb.vip, + lb.amphorae[0], subnet) + + def test_plug_aap_errors_when_neutron_cant_find_port_to_update(self): + lb = dmh.generate_load_balancer_tree() + subnet = network_models.Subnet(id=t_constants.MOCK_VIP_SUBNET_ID, + network_id=t_constants.MOCK_VIP_NET_ID) + network_attach = self.driver.compute.attach_network_or_port + network_attach.return_value = t_constants.MOCK_NOVA_INTERFACE + + update_port = self.driver.network_proxy.update_port + update_port.side_effect = os_exceptions.ResourceNotFound + self.assertRaises(network_base.PortNotFound, + self.driver.plug_aap_port, lb, lb.vip, + lb.amphorae[0], subnet) + + @mock.patch('octavia.common.utils.get_vip_security_group_name') + def test_update_vip_sg(self, mock_get_sg_name): + LB_ID = uuidutils.generate_uuid() + SG_ID = uuidutils.generate_uuid() + VIP_PORT_ID = uuidutils.generate_uuid() + TEST_SG_NAME = 'test_SG_name' + lb_mock = mock.MagicMock() + lb_mock.id = LB_ID + vip_mock = mock.MagicMock() + vip_mock.port_id = VIP_PORT_ID + vip_sg = mock.MagicMock() + vip_sg.id = uuidutils.generate_uuid() + vip_mock.sg_ids = [vip_sg.id] + security_group_dict = {'id': SG_ID} + mock_get_sg_name.return_value = TEST_SG_NAME + + test_driver = allowed_address_pairs.AllowedAddressPairsDriver() + + test_driver._add_vip_security_group_to_port = mock.MagicMock() + test_driver._create_security_group = mock.MagicMock() + test_driver._get_lb_security_group = mock.MagicMock() + test_driver._update_security_group_rules = mock.MagicMock() + test_driver._get_lb_security_group.side_effect = [security_group_dict, + None] + test_driver._create_security_group.return_value = security_group_dict + + # Test security groups disabled + test_driver.sec_grp_enabled = False + + result = test_driver.update_vip_sg(lb_mock, vip_mock) + + self.assertIsNone(result) + test_driver._add_vip_security_group_to_port.assert_not_called() + test_driver._get_lb_security_group.assert_not_called() + test_driver._update_security_group_rules.assert_not_called() + + # Test by security group ID + test_driver.sec_grp_enabled = True + + result = test_driver.update_vip_sg(lb_mock, vip_mock) + + self.assertEqual(SG_ID, result) + test_driver._update_security_group_rules.assert_called_once_with( + lb_mock, SG_ID) + test_driver._add_vip_security_group_to_port.assert_called_once_with( + LB_ID, VIP_PORT_ID, SG_ID, vip_sg_ids=[vip_sg.id]) + + # Test by security group name + test_driver._add_vip_security_group_to_port.reset_mock() + test_driver._get_lb_security_group.reset_mock() + test_driver._update_security_group_rules.reset_mock() + + result = test_driver.update_vip_sg(lb_mock, vip_mock) + + self.assertEqual(SG_ID, result) + mock_get_sg_name.assert_called_once_with(LB_ID) + test_driver._create_security_group.assert_called_once_with( + TEST_SG_NAME) + test_driver._update_security_group_rules.assert_called_once_with( + lb_mock, SG_ID) + test_driver._add_vip_security_group_to_port.assert_called_once_with( + LB_ID, VIP_PORT_ID, SG_ID, vip_sg_ids=[vip_sg.id]) + + def test_update_aap_port_sg(self): + LB_ID = uuidutils.generate_uuid() + SG_ID = uuidutils.generate_uuid() + VIP_PORT_ID = uuidutils.generate_uuid() + VRRP_PORT_ID = uuidutils.generate_uuid() + lb_mock = mock.MagicMock() + lb_mock.id = LB_ID + amp_mock = mock.MagicMock() + amp_mock.vrrp_port_id = VRRP_PORT_ID + vip_mock = mock.MagicMock() + vip_mock.port_id = VIP_PORT_ID + vip_sg = mock.MagicMock() + vip_sg.id = uuidutils.generate_uuid() + vip_mock.sg_ids = [vip_sg.id] + security_group_dict = {'id': SG_ID} + + test_driver = allowed_address_pairs.AllowedAddressPairsDriver() + + test_driver._add_vip_security_group_to_port = mock.MagicMock() + test_driver._create_security_group = mock.MagicMock() + test_driver._get_lb_security_group = mock.MagicMock() + test_driver._update_security_group_rules = mock.MagicMock() + test_driver._get_lb_security_group.side_effect = [security_group_dict, + None] + + # Test security groups disabled + test_driver.sec_grp_enabled = False + + test_driver.update_aap_port_sg(lb_mock, amp_mock, vip_mock) + + test_driver._add_vip_security_group_to_port.assert_not_called() + test_driver._get_lb_security_group.assert_not_called() + test_driver._update_security_group_rules.assert_not_called() + + # Normal path + test_driver.sec_grp_enabled = True + + test_driver.update_aap_port_sg(lb_mock, amp_mock, vip_mock) + + test_driver._update_security_group_rules.assert_not_called() + test_driver._add_vip_security_group_to_port.assert_called_once_with( + LB_ID, VRRP_PORT_ID, SG_ID, vip_sg_ids=[vip_sg.id]) + + # No LB SG + test_driver._add_vip_security_group_to_port.reset_mock() + test_driver._get_lb_security_group.reset_mock() + test_driver._update_security_group_rules.reset_mock() + + test_driver.update_aap_port_sg(lb_mock, amp_mock, vip_mock) + + test_driver._update_security_group_rules.assert_not_called() + test_driver._add_vip_security_group_to_port.assert_not_called() + + def test_plug_aap_port(self): + lb = dmh.generate_load_balancer_tree() + + subnet = network_models.Subnet(id=t_constants.MOCK_VIP_SUBNET_ID, + network_id=t_constants.MOCK_VIP_NET_ID) + + list_ports = self.driver.network_proxy.ports + port1 = t_constants.MOCK_MANAGEMENT_PORT1 + port2 = t_constants.MOCK_MANAGEMENT_PORT2 + list_ports.return_value = iter([port1, port2]) + network_attach = self.driver.compute.attach_network_or_port + network_attach.side_effect = [t_constants.MOCK_VRRP_INTERFACE1] + update_port = self.driver.network_proxy.update_port + expected_aap = { + 'allowed_address_pairs': [{'ip_address': lb.vip.ip_address}]} + amp = self.driver.plug_aap_port(lb, lb.vip, lb.amphorae[0], subnet) + update_port.assert_any_call(amp.vrrp_port_id, **expected_aap) + self.assertIn(amp.vrrp_ip, [t_constants.MOCK_VRRP_IP1, + t_constants.MOCK_VRRP_IP2]) + self.assertEqual(lb.vip.ip_address, amp.ha_ip) + + @mock.patch('octavia.network.drivers.neutron.utils.' + 'convert_port_to_model') + def test_plug_aap_port_create_fails(self, mock_convert): + lb = dmh.generate_load_balancer_tree() + + subnet = network_models.Subnet(id=t_constants.MOCK_VIP_SUBNET_ID, + network_id=t_constants.MOCK_VIP_NET_ID) + + list_ports = self.driver.network_proxy.ports + port1 = t_constants.MOCK_MANAGEMENT_PORT1 + port2 = t_constants.MOCK_MANAGEMENT_PORT2 + list_ports.return_value = iter([port1, port2]) + port_create = self.driver.network_proxy.create_port + port_create.side_effect = [Exception('Create failure')] + self.assertRaises(network_base.PlugVIPException, + self.driver.plug_aap_port, + lb, lb.vip, lb.amphorae[0], subnet) + mock_convert.assert_not_called() + self.driver.network_proxy.delete_port.assert_not_called() + + def test_plug_aap_port_attach_fails(self): + lb = dmh.generate_load_balancer_tree() + + subnet = network_models.Subnet(id=t_constants.MOCK_VIP_SUBNET_ID, + network_id=t_constants.MOCK_VIP_NET_ID) + + list_ports = self.driver.network_proxy.ports + port1 = t_constants.MOCK_MANAGEMENT_PORT1 + port2 = t_constants.MOCK_MANAGEMENT_PORT2 + list_ports.return_value = iter([port1, port2]) + network_attach = self.driver.compute.attach_network_or_port + network_attach.side_effect = [Exception('Attach failure')] + self.assertRaises(network_base.PlugVIPException, + self.driver.plug_aap_port, + lb, lb.vip, lb.amphorae[0], subnet) + self.driver.network_proxy.delete_port.assert_called_once() + + def test_plug_aap_port_with_add_vips(self): + additional_vips = [ + {'ip_address': t_constants.MOCK_IP_ADDRESS2, + 'subnet_id': t_constants.MOCK_VIP_SUBNET_ID2} + ] + lb = dmh.generate_load_balancer_tree(additional_vips=additional_vips) + + subnet = network_models.Subnet(id=t_constants.MOCK_VIP_SUBNET_ID, + network_id=t_constants.MOCK_VIP_NET_ID) + + list_ports = self.driver.network_proxy.ports + port1 = t_constants.MOCK_MANAGEMENT_PORT1 + port2 = t_constants.MOCK_MANAGEMENT_PORT2 + list_ports.return_value = iter([port1, port2]) + network_attach = self.driver.compute.attach_network_or_port + network_attach.side_effect = [t_constants.MOCK_VRRP_INTERFACE1] + update_port = self.driver.network_proxy.update_port + amp = self.driver.plug_aap_port(lb, lb.vip, lb.amphorae[0], subnet) + expected_aap = { + 'allowed_address_pairs': + [{'ip_address': lb.vip.ip_address}, + {'ip_address': lb.additional_vips[0].ip_address}]} + + update_port.assert_any_call(amp.vrrp_port_id, **expected_aap) + self.assertIn(amp.vrrp_ip, [t_constants.MOCK_VRRP_IP1, + t_constants.MOCK_VRRP_IP2]) + self.assertEqual(lb.vip.ip_address, amp.ha_ip) + + def _set_safely(self, obj, name, value): + if isinstance(obj, dict): + current = obj.get(name) + self.addCleanup(obj.update, {name: current}) + obj.update({name: value}) + else: + current = getattr(obj, name) + self.addCleanup(setattr, obj, name, current) + setattr(obj, name, value) + + def test_plug_aap_on_mgmt_net(self): + lb = dmh.generate_load_balancer_tree() + lb.vip.subnet_id = t_constants.MOCK_MANAGEMENT_SUBNET_ID + subnet = network_models.Subnet( + id=t_constants.MOCK_MANAGEMENT_SUBNET_ID, + network_id=t_constants.MOCK_MANAGEMENT_NET_ID) + list_ports = self.driver.network_proxy.ports + port1 = t_constants.MOCK_MANAGEMENT_PORT1 + port2 = t_constants.MOCK_MANAGEMENT_PORT2 + self._set_safely(t_constants.MOCK_MANAGEMENT_FIXED_IPS1[0], + 'ip_address', lb.amphorae[0].lb_network_ip) + self._set_safely(t_constants.MOCK_MANAGEMENT_FIXED_IPS2[0], + 'ip_address', lb.amphorae[1].lb_network_ip) + list_ports.side_effect = [iter([port1]), iter([port2])] + network_attach = self.driver.compute.attach_network_or_port + self._set_safely(t_constants.MOCK_VRRP_INTERFACE1, + 'net_id', t_constants.MOCK_MANAGEMENT_NET_ID) + self._set_safely(t_constants.MOCK_VRRP_FIXED_IPS1[0], + 'subnet_id', t_constants.MOCK_MANAGEMENT_SUBNET_ID) + self._set_safely(t_constants.MOCK_VRRP_INTERFACE2, + 'net_id', t_constants.MOCK_MANAGEMENT_NET_ID) + self._set_safely(t_constants.MOCK_VRRP_FIXED_IPS2[0], + 'subnet_id', t_constants.MOCK_MANAGEMENT_SUBNET_ID) + network_attach.side_effect = [t_constants.MOCK_VRRP_INTERFACE1] + update_port = self.driver.network_proxy.update_port + expected_aap = { + 'allowed_address_pairs': [{'ip_address': lb.vip.ip_address}]} + amp = self.driver.plug_aap_port(lb, lb.vip, lb.amphorae[0], subnet) + update_port.assert_any_call(amp.vrrp_port_id, **expected_aap) + self.assertIn(amp.vrrp_ip, [t_constants.MOCK_VRRP_IP1, + t_constants.MOCK_VRRP_IP2]) + self.assertEqual(lb.vip.ip_address, amp.ha_ip) + + def test_validate_fixed_ip(self): + IP_ADDRESS = '203.0.113.61' + OTHER_IP_ADDRESS = '203.0.113.62' + SUBNET_ID = uuidutils.generate_uuid() + OTHER_SUBNET_ID = uuidutils.generate_uuid() + fixed_ip_mock = mock.MagicMock() + fixed_ip_mock.subnet_id = SUBNET_ID + fixed_ip_mock.ip_address = IP_ADDRESS + + # valid + result = self.driver._validate_fixed_ip([fixed_ip_mock], SUBNET_ID, + IP_ADDRESS) + self.assertTrue(result) + + # no subnet match + result = self.driver._validate_fixed_ip( + [fixed_ip_mock], OTHER_SUBNET_ID, IP_ADDRESS) + self.assertFalse(result) + + # no IP match + result = self.driver._validate_fixed_ip([fixed_ip_mock], SUBNET_ID, + OTHER_IP_ADDRESS) + self.assertFalse(result) + + def test_allocate_vip_when_port_already_provided(self): + show_port = self.driver.network_proxy.get_port + show_port.return_value = t_constants.MOCK_NEUTRON_PORT + fake_lb_vip = data_models.Vip( + port_id=t_constants.MOCK_PORT_ID, + subnet_id=t_constants.MOCK_SUBNET_ID, + network_id=t_constants.MOCK_NETWORK_ID, + ip_address=t_constants.MOCK_IP_ADDRESS) + fake_lb = data_models.LoadBalancer(id='1', vip=fake_lb_vip) + vip, additional_vips = self.driver.allocate_vip(fake_lb) + self.assertIsInstance(vip, data_models.Vip) + self.assertEqual(t_constants.MOCK_IP_ADDRESS, vip.ip_address) + self.assertEqual(t_constants.MOCK_SUBNET_ID, vip.subnet_id) + self.assertEqual(t_constants.MOCK_PORT_ID, vip.port_id) + self.assertEqual(fake_lb.id, vip.load_balancer_id) + self.assertFalse(additional_vips) + + @mock.patch('octavia.network.drivers.neutron.base.BaseNeutronDriver.' + '_check_extension_enabled', return_value=True) + def test_allocate_vip_with_port_mismatch(self, mock_check_ext): + bad_existing_port = mock.MagicMock() + bad_existing_port.port_id = uuidutils.generate_uuid() + bad_existing_port.network_id = uuidutils.generate_uuid() + bad_existing_port.subnet_id = uuidutils.generate_uuid() + show_port = self.driver.network_proxy.get_port + show_port.return_value = bad_existing_port + port_create_dict = Port(**t_constants.MOCK_NEUTRON_PORT.to_dict()) + port_create_dict['device_owner'] = constants.OCTAVIA_OWNER + port_create_dict['device_id'] = 'lb-1' + create_port = self.driver.network_proxy.create_port + create_port.return_value = port_create_dict + show_subnet = self.driver.network_proxy.get_subnet + show_subnet.return_value = {'subnet': { + 'id': t_constants.MOCK_SUBNET_ID, + 'network_id': t_constants.MOCK_NETWORK_ID + }} + fake_lb_vip = data_models.Vip(subnet_id=t_constants.MOCK_SUBNET_ID, + network_id=t_constants.MOCK_NETWORK_ID, + port_id=t_constants.MOCK_PORT_ID, + octavia_owned=True) + fake_lb = data_models.LoadBalancer(id='1', vip=fake_lb_vip, + project_id='test-project') + vip, additional_vips = self.driver.allocate_vip(fake_lb) + exp_create_port_call = { + 'name': 'octavia-lb-1', + 'network_id': t_constants.MOCK_NETWORK_ID, + 'device_id': 'lb-1', + 'device_owner': constants.OCTAVIA_OWNER, + 'admin_state_up': False, + 'project_id': 'test-project', + 'fixed_ips': [{'subnet_id': t_constants.MOCK_SUBNET_ID}] + } + self.driver.network_proxy.delete_port.assert_called_once_with( + t_constants.MOCK_PORT_ID) + create_port.assert_called_once_with(**exp_create_port_call) + self.assertIsInstance(vip, data_models.Vip) + self.assertEqual(t_constants.MOCK_IP_ADDRESS, vip.ip_address) + self.assertEqual(t_constants.MOCK_SUBNET_ID, vip.subnet_id) + self.assertEqual(t_constants.MOCK_PORT_ID, vip.port_id) + self.assertEqual(fake_lb.id, vip.load_balancer_id) + self.assertFalse(additional_vips) + + @mock.patch('octavia.network.drivers.neutron.base.BaseNeutronDriver.' + 'get_port', side_effect=network_base.PortNotFound) + @mock.patch('octavia.network.drivers.neutron.base.BaseNeutronDriver.' + '_check_extension_enabled', return_value=True) + def test_allocate_vip_when_port_not_found(self, mock_check_ext, + mock_get_port): + port_create_dict = Port(**t_constants.MOCK_NEUTRON_PORT.to_dict()) + port_create_dict['device_owner'] = constants.OCTAVIA_OWNER + port_create_dict['device_id'] = 'lb-1' + create_port = self.driver.network_proxy.create_port + create_port.return_value = port_create_dict + show_subnet = self.driver.network_proxy.get_subnet + show_subnet.return_value = {'subnet': { + 'id': t_constants.MOCK_SUBNET_ID, + 'network_id': t_constants.MOCK_NETWORK_ID + }} + fake_lb_vip = data_models.Vip(subnet_id=t_constants.MOCK_SUBNET_ID, + network_id=t_constants.MOCK_NETWORK_ID, + port_id=t_constants.MOCK_PORT_ID) + fake_lb = data_models.LoadBalancer(id='1', vip=fake_lb_vip, + project_id='test-project') + vip, additional_vips = self.driver.allocate_vip(fake_lb) + exp_create_port_call = { + 'name': 'octavia-lb-1', + 'network_id': t_constants.MOCK_NETWORK_ID, + 'device_id': 'lb-1', + 'device_owner': constants.OCTAVIA_OWNER, + 'admin_state_up': False, + 'project_id': 'test-project', + 'fixed_ips': [{'subnet_id': t_constants.MOCK_SUBNET_ID}] + } + create_port.assert_called_once_with(**exp_create_port_call) + self.assertIsInstance(vip, data_models.Vip) + self.assertEqual(t_constants.MOCK_IP_ADDRESS, vip.ip_address) + self.assertEqual(t_constants.MOCK_SUBNET_ID, vip.subnet_id) + self.assertEqual(t_constants.MOCK_PORT_ID, vip.port_id) + self.assertEqual(fake_lb.id, vip.load_balancer_id) + self.assertFalse(additional_vips) + + @mock.patch('octavia.network.drivers.neutron.base.BaseNeutronDriver.' + 'get_port', side_effect=Exception('boom')) + @mock.patch('octavia.network.drivers.neutron.base.BaseNeutronDriver.' + '_check_extension_enabled', return_value=True) + def test_allocate_vip_unkown_exception(self, mock_check_ext, + mock_get_port): + fake_lb_vip = data_models.Vip(subnet_id=t_constants.MOCK_SUBNET_ID, + network_id=t_constants.MOCK_NETWORK_ID, + port_id=t_constants.MOCK_PORT_ID) + fake_lb = data_models.LoadBalancer(id='1', vip=fake_lb_vip, + project_id='test-project') + self.assertRaises(network_base.AllocateVIPException, + self.driver.allocate_vip, fake_lb) + + def test_allocate_vip_conflict(self): + fake_lb_vip = data_models.Vip( + subnet_id=t_constants.MOCK_SUBNET_ID) + fake_lb = data_models.LoadBalancer(id='1', vip=fake_lb_vip) + create_port = self.driver.network_proxy.create_port + create_port.side_effect = os_exceptions.ConflictException + self.assertRaises(network_base.VIPInUseException, + self.driver.allocate_vip, fake_lb) + + def test_allocate_vip_when_port_creation_fails(self): + fake_lb_vip = data_models.Vip( + subnet_id=t_constants.MOCK_SUBNET_ID) + fake_lb = data_models.LoadBalancer(id='1', vip=fake_lb_vip) + create_port = self.driver.network_proxy.create_port + create_port.side_effect = Exception + self.assertRaises(network_base.AllocateVIPException, + self.driver.allocate_vip, fake_lb) + + @mock.patch('octavia.network.drivers.neutron.base.BaseNeutronDriver.' + '_check_extension_enabled', return_value=True) + def test_allocate_vip_when_no_port_provided(self, mock_check_ext): + port_create_dict = Port(**t_constants.MOCK_NEUTRON_PORT.to_dict()) + port_create_dict['device_owner'] = constants.OCTAVIA_OWNER + port_create_dict['device_id'] = 'lb-1' + create_port = self.driver.network_proxy.create_port + create_port.return_value = port_create_dict + show_subnet = self.driver.network_proxy.get_subnet + show_subnet.return_value = { + 'id': t_constants.MOCK_SUBNET_ID, + 'network_id': t_constants.MOCK_NETWORK_ID + } + fake_lb_vip = data_models.Vip(subnet_id=t_constants.MOCK_SUBNET_ID, + network_id=t_constants.MOCK_NETWORK_ID, + ip_address=t_constants.MOCK_IP_ADDRESS) + fake_lb = data_models.LoadBalancer(id='1', vip=fake_lb_vip, + project_id='test-project') + vip, additional_vips = self.driver.allocate_vip(fake_lb) + exp_create_port_call = { + 'name': 'octavia-lb-1', + 'network_id': t_constants.MOCK_NETWORK_ID, + 'device_id': 'lb-1', + 'device_owner': constants.OCTAVIA_OWNER, + 'admin_state_up': False, + 'project_id': 'test-project', + 'fixed_ips': [{'ip_address': t_constants.MOCK_IP_ADDRESS, + 'subnet_id': t_constants.MOCK_SUBNET_ID}] + } + create_port.assert_called_once_with(**exp_create_port_call) + self.assertIsInstance(vip, data_models.Vip) + self.assertEqual(t_constants.MOCK_IP_ADDRESS, vip.ip_address) + self.assertEqual(t_constants.MOCK_SUBNET_ID, vip.subnet_id) + self.assertEqual(t_constants.MOCK_PORT_ID, vip.port_id) + self.assertEqual(fake_lb.id, vip.load_balancer_id) + self.assertFalse(additional_vips) + + @mock.patch('octavia.network.drivers.neutron.base.BaseNeutronDriver.' + '_check_extension_enabled', return_value=True) + def test_allocate_vip_when_no_port_fixed_ip(self, mock_check_ext): + port_create_dict = Port(**t_constants.MOCK_NEUTRON_PORT.to_dict()) + port_create_dict['device_owner'] = constants.OCTAVIA_OWNER + port_create_dict['device_id'] = 'lb-1' + create_port = self.driver.network_proxy.create_port + create_port.return_value = port_create_dict + show_subnet = self.driver.network_proxy.get_subnet + show_subnet.return_value = Subnet(**{ + 'id': t_constants.MOCK_SUBNET_ID, + 'network_id': t_constants.MOCK_NETWORK_ID + }) + fake_lb_vip = data_models.Vip(subnet_id=t_constants.MOCK_SUBNET_ID, + network_id=t_constants.MOCK_NETWORK_ID, + ip_address=t_constants.MOCK_IP_ADDRESS) + fake_lb = data_models.LoadBalancer(id='1', vip=fake_lb_vip, + project_id='test-project') + vip, additional_vips = self.driver.allocate_vip(fake_lb) + exp_create_port_call = { + 'name': 'octavia-lb-1', + 'network_id': t_constants.MOCK_NETWORK_ID, + 'device_id': 'lb-1', + 'device_owner': constants.OCTAVIA_OWNER, + 'admin_state_up': False, + 'project_id': 'test-project', + 'fixed_ips': [{'subnet_id': t_constants.MOCK_SUBNET_ID, + 'ip_address': t_constants.MOCK_IP_ADDRESS}] + } + create_port.assert_called_once_with(**exp_create_port_call) + self.assertIsInstance(vip, data_models.Vip) + self.assertEqual(t_constants.MOCK_IP_ADDRESS, vip.ip_address) + self.assertEqual(t_constants.MOCK_SUBNET_ID, vip.subnet_id) + self.assertEqual(t_constants.MOCK_PORT_ID, vip.port_id) + self.assertEqual(fake_lb.id, vip.load_balancer_id) + self.assertFalse(additional_vips) + + @mock.patch('octavia.network.drivers.neutron.base.BaseNeutronDriver.' + '_check_extension_enabled', return_value=True) + def test_allocate_vip_when_no_port_no_fixed_ip(self, mock_check_ext): + port_create_dict = Port(**t_constants.MOCK_NEUTRON_PORT.to_dict()) + port_create_dict['device_owner'] = constants.OCTAVIA_OWNER + port_create_dict['device_id'] = 'lb-1' + create_port = self.driver.network_proxy.create_port + create_port.return_value = port_create_dict + show_subnet = self.driver.network_proxy.get_subnet + show_subnet.return_value = { + 'id': t_constants.MOCK_SUBNET_ID, + 'network_id': t_constants.MOCK_NETWORK_ID + } + fake_lb_vip = data_models.Vip(network_id=t_constants.MOCK_NETWORK_ID) + fake_lb = data_models.LoadBalancer(id='1', vip=fake_lb_vip, + project_id='test-project') + vip, additional_vips = self.driver.allocate_vip(fake_lb) + exp_create_port_call = { + 'name': 'octavia-lb-1', + 'network_id': t_constants.MOCK_NETWORK_ID, + 'device_id': 'lb-1', + 'device_owner': constants.OCTAVIA_OWNER, + 'admin_state_up': False, + 'project_id': 'test-project'} + create_port.assert_called_once_with(**exp_create_port_call) + self.assertIsInstance(vip, data_models.Vip) + self.assertEqual(t_constants.MOCK_PORT_ID, vip.port_id) + self.assertEqual(fake_lb.id, vip.load_balancer_id) + self.assertTrue(additional_vips) + + @mock.patch('octavia.network.drivers.neutron.base.BaseNeutronDriver.' + '_check_extension_enabled', return_value=False) + def test_allocate_vip_when_no_port_provided_tenant(self, mock_check_ext): + port_create_dict = Port(**t_constants.MOCK_NEUTRON_PORT.to_dict()) + port_create_dict['device_owner'] = constants.OCTAVIA_OWNER + port_create_dict['device_id'] = 'lb-1' + create_port = self.driver.network_proxy.create_port + create_port.return_value = port_create_dict + show_subnet = self.driver.network_proxy.get_subnet + show_subnet.return_value = { + 'id': t_constants.MOCK_SUBNET_ID, + 'network_id': t_constants.MOCK_NETWORK_ID + } + fake_lb_vip = data_models.Vip(subnet_id=t_constants.MOCK_SUBNET_ID, + network_id=t_constants.MOCK_NETWORK_ID, + ip_address=t_constants.MOCK_IP_ADDRESS) + fake_lb = data_models.LoadBalancer(id='1', vip=fake_lb_vip, + project_id='test-project') + vip, additional_vips = self.driver.allocate_vip(fake_lb) + exp_create_port_call = { + 'name': 'octavia-lb-1', + 'network_id': t_constants.MOCK_NETWORK_ID, + 'device_id': 'lb-1', + 'device_owner': constants.OCTAVIA_OWNER, + 'admin_state_up': False, + 'tenant_id': 'test-project', + 'fixed_ips': [{'ip_address': t_constants.MOCK_IP_ADDRESS, + 'subnet_id': t_constants.MOCK_SUBNET_ID}] + } + create_port.assert_called_once_with(**exp_create_port_call) + self.assertIsInstance(vip, data_models.Vip) + self.assertEqual(t_constants.MOCK_IP_ADDRESS, vip.ip_address) + self.assertEqual(t_constants.MOCK_SUBNET_ID, vip.subnet_id) + self.assertEqual(t_constants.MOCK_PORT_ID, vip.port_id) + self.assertEqual(fake_lb.id, vip.load_balancer_id) + self.assertFalse(additional_vips) + + @mock.patch('octavia.network.drivers.neutron.base.BaseNeutronDriver.' + '_check_extension_enabled', return_value=False) + def test_allocate_vip_with_additional_vips(self, mock_check_ext): + port_create_dict = Port(**t_constants.MOCK_NEUTRON_PORT.to_dict()) + port_create_dict['device_owner'] = constants.OCTAVIA_OWNER + port_create_dict['device_id'] = 'lb-1' + create_port = self.driver.network_proxy.create_port + create_port.return_value = port_create_dict + show_subnet = self.driver.network_proxy.get_subnet + show_subnet.return_value = { + 'id': t_constants.MOCK_SUBNET_ID, + 'network_id': t_constants.MOCK_NETWORK_ID + } + fake_lb_vip = data_models.Vip(subnet_id=t_constants.MOCK_SUBNET_ID, + network_id=t_constants.MOCK_NETWORK_ID, + ip_address=t_constants.MOCK_IP_ADDRESS) + fake_additional_vips = [ + data_models.AdditionalVip(ip_address=t_constants.MOCK_IP_ADDRESS2), + data_models.AdditionalVip(subnet_id=t_constants.MOCK_SUBNET_ID3)] + fake_lb = data_models.LoadBalancer( + id='1', vip=fake_lb_vip, + additional_vips=fake_additional_vips, + project_id='test-project') + vip, additional_vips = self.driver.allocate_vip(fake_lb) + exp_create_port_call = { + 'name': 'octavia-lb-1', + 'network_id': t_constants.MOCK_NETWORK_ID, + 'device_id': 'lb-1', + 'device_owner': constants.OCTAVIA_OWNER, + 'admin_state_up': False, + 'tenant_id': 'test-project', + 'fixed_ips': [ + {'ip_address': t_constants.MOCK_IP_ADDRESS, + 'subnet_id': t_constants.MOCK_SUBNET_ID}, + {'ip_address': t_constants.MOCK_IP_ADDRESS2}, + {'subnet_id': t_constants.MOCK_SUBNET_ID3}] + } + create_port.assert_called_once_with(**exp_create_port_call) + self.assertIsInstance(vip, data_models.Vip) + self.assertEqual(t_constants.MOCK_IP_ADDRESS, vip.ip_address) + self.assertEqual(t_constants.MOCK_SUBNET_ID, vip.subnet_id) + self.assertEqual(t_constants.MOCK_PORT_ID, vip.port_id) + self.assertEqual(fake_lb.id, vip.load_balancer_id) + self.assertFalse(additional_vips) + + @mock.patch("time.time") + @mock.patch("time.sleep") + def test_unplug_aap_port_errors_when_update_port_cant_find_port( + self, mock_time_sleep, mock_time_time): + lb = dmh.generate_load_balancer_tree() + list_ports = self.driver.network_proxy.ports + port1 = t_constants.MOCK_NEUTRON_PORT + port2 = { + 'id': '4', 'network_id': '3', 'fixed_ips': + [{'ip_address': '10.0.0.2'}] + } + subnet = network_models.Subnet( + id=t_constants.MOCK_MANAGEMENT_SUBNET_ID, + network_id='3') + list_ports.side_effect = [ + iter([port1, port2]), + iter([port1, port2]), + iter([port1]), + ] + update_port = self.driver.network_proxy.update_port + update_port.side_effect = os_exceptions.ResourceNotFound + self.assertRaises(network_base.UnplugVIPException, + self.driver.unplug_aap_port, lb.vip, lb.amphorae[0], + subnet) + + @mock.patch("time.time") + @mock.patch("time.sleep") + def test_unplug_aap_errors_when_update_port_fails( + self, mock_time_sleep, mock_time_time): + lb = dmh.generate_load_balancer_tree() + port1 = t_constants.MOCK_NEUTRON_PORT + port2 = { + 'id': '4', 'network_id': '3', 'fixed_ips': + [{'ip_address': '10.0.0.2'}] + } + + subnet = network_models.Subnet( + id=t_constants.MOCK_MANAGEMENT_SUBNET_ID, + network_id='3') + + list_ports = self.driver.network_proxy.ports + list_ports.side_effect = [ + iter([port1, port2]), + iter([port1, port2]), + iter([port1]), + ] + mock_time_time.side_effect = [1, 1, 2] + + update_port = self.driver.network_proxy.update_port + update_port.side_effect = TypeError + self.assertRaises(network_base.UnplugVIPException, + self.driver.unplug_aap_port, lb.vip, + lb.amphorae[0], subnet) + + def test_unplug_vip_errors_when_vip_subnet_not_found(self): + lb = dmh.generate_load_balancer_tree() + show_subnet = self.driver.network_proxy.get_subnet + show_subnet.side_effect = os_exceptions.ResourceNotFound + self.assertRaises(network_base.PluggedVIPNotFound, + self.driver.unplug_vip, lb, lb.vip) + + @mock.patch('octavia.network.drivers.neutron.allowed_address_pairs.' + 'AllowedAddressPairsDriver.unplug_aap_port') + def test_unplug_vip(self, mock): + lb = dmh.generate_load_balancer_tree() + show_subnet = self.driver.network_proxy.get_subnet + show_subnet.return_value = t_constants.MOCK_SUBNET + self.driver.unplug_vip(lb, lb.vip) + self.assertEqual(len(lb.amphorae), mock.call_count) + + @mock.patch("time.time") + @mock.patch("time.sleep") + @mock.patch("octavia.network.drivers.neutron.allowed_address_pairs." + "AllowedAddressPairsDriver.unplug_network") + def test_unplug_aap_port(self, mock_unplug_network, + mock_time_sleep, mock_time_time): + lb = dmh.generate_load_balancer_tree() + update_port = self.driver.network_proxy.update_port + port1 = t_constants.MOCK_NEUTRON_PORT + port2 = { + 'id': '4', 'network_id': '3', 'fixed_ips': + [{'ip_address': '10.0.0.2'}] + } + subnet = network_models.Subnet( + id=t_constants.MOCK_MANAGEMENT_SUBNET_ID, + network_id='3') + list_ports = self.driver.network_proxy.ports + list_ports.side_effect = [ + iter([port1, port2]), + iter([port1, port2]), + iter([port1]), + ] + mock_time_time.side_effect = [1, 1, 2] + get_port = self.driver.network_proxy.get_port + get_port.side_effect = os_exceptions.ResourceNotFound + self.driver.unplug_aap_port(lb.vip, lb.amphorae[0], subnet) + clear_aap = {'allowed_address_pairs': []} + update_port.assert_called_once_with(port2.get('id'), **clear_aap) + mock_unplug_network.assert_called_once_with( + lb.amphorae[0].compute_id, subnet.network_id) + + def test_unplug_network_when_compute_port_cant_be_found(self): + net_id = t_constants.MOCK_NOVA_INTERFACE.net_id + list_ports = self.driver.network_proxy.ports + list_ports.return_value = iter([]) + self.assertRaises(network_base.NetworkNotFound, + self.driver.unplug_network, + t_constants.MOCK_COMPUTE_ID, net_id) + + def test_unplug_network_when_list_ports_fails(self): + net_id = t_constants.MOCK_NOVA_INTERFACE.net_id + list_ports = self.driver.network_proxy.ports + list_ports.side_effect = Exception + self.assertRaises(network_base.NetworkException, + self.driver.unplug_network, + t_constants.MOCK_COMPUTE_ID, net_id) + + @mock.patch("time.time") + @mock.patch("time.sleep") + def test_unplug_network(self, mock_time_sleep, mock_time_time): + list_ports = self.driver.network_proxy.ports + port1 = t_constants.MOCK_NEUTRON_PORT + port2 = { + 'id': '4', 'network_id': '3', 'fixed_ips': + [{'ip_address': '10.0.0.2'}] + } + list_ports.side_effect = [ + iter([port1, port2]), + iter([port1, port2]), + iter([port1]), + ] + port_detach = self.driver.compute.detach_port + + mock_time_time.side_effect = [1, 1, 2] + + self.driver.unplug_network(t_constants.MOCK_COMPUTE_ID, + port2.get('network_id')) + port_detach.assert_called_once_with( + compute_id=t_constants.MOCK_COMPUTE_ID, port_id=port2.get('id')) + + mock_time_sleep.assert_called_once() + + @mock.patch("time.time") + @mock.patch("time.sleep") + @mock.patch("octavia.network.drivers.neutron.allowed_address_pairs.LOG") + def test_unplug_network_timeout(self, mock_log, + mock_time_sleep, mock_time_time): + list_ports = self.driver.network_proxy.ports + port1 = t_constants.MOCK_NEUTRON_PORT + port2 = Port(**{ + 'id': '4', 'network_id': '3', 'fixed_ips': + [{'ip_address': '10.0.0.2'}] + }) + + list_ports.side_effect = [iter([port1, port2]) for _ in range(7)] + port_detach = self.driver.compute.detach_port + + mock_time_time.side_effect = [0, 0, 1, 2, 10, 20, 100, 300] + + self.driver.unplug_network(t_constants.MOCK_COMPUTE_ID, + port2.get('network_id')) + port_detach.assert_called_once_with( + compute_id=t_constants.MOCK_COMPUTE_ID, port_id=port2.get('id')) + + self.assertEqual(6, len(mock_time_sleep.mock_calls)) + mock_log.warning.assert_called_once() + + def test_update_vip(self): + lc_1 = data_models.ListenerCidr('l1', '10.0.101.0/24') + lc_2 = data_models.ListenerCidr('l2', '10.0.102.0/24') + lc_3 = data_models.ListenerCidr('l2', '10.0.103.0/24') + lc_4 = data_models.ListenerCidr('l2', '2001:0DB8::/32') + listeners = [data_models.Listener(protocol_port=80, peer_port=1024, + protocol=constants.PROTOCOL_TCP, + allowed_cidrs=[lc_1]), + data_models.Listener(protocol_port=443, peer_port=1025, + protocol=constants.PROTOCOL_TCP, + allowed_cidrs=[lc_2, lc_3, lc_4]), + data_models.Listener(protocol_port=50, peer_port=1026, + protocol=constants.PROTOCOL_UDP)] + vip = data_models.Vip(ip_address='10.0.0.2') + + additional_vip = data_models.AdditionalVip( + ip_address=self.IPV6_ADDRESS_1) + lb = data_models.LoadBalancer(id='1', listeners=listeners, vip=vip, + additional_vips=[additional_vip]) + list_sec_grps = self.driver.network_proxy.find_security_group + list_sec_grps.return_value = {'id': 'secgrp-1'} + fake_rules = [ + {'id': 'rule-80', 'port_range_max': 80, 'protocol': 'tcp', + 'remote_ip_prefix': '10.0.101.0/24'}, + {'id': 'rule-22', 'port_range_max': 22, 'protocol': 'tcp'}, + {'id': 'rule-None', 'port_range_max': 22}, + ] + list_rules = self.driver.network_proxy.security_group_rules + list_rules.return_value = fake_rules + delete_rule = self.driver.network_proxy.delete_security_group_rule + create_rule = self.driver.network_proxy.create_security_group_rule + self.driver.update_vip(lb) + delete_rule.assert_called_once_with('rule-22') + expected_create_rule_1 = { + 'security_group_id': 'secgrp-1', + 'direction': 'ingress', + 'protocol': 'tcp', + 'port_range_min': 1024, + 'port_range_max': 1024, + 'ethertype': 'IPv4', + 'remote_ip_prefix': None + } + expected_create_rule_udp_peer = { + 'security_group_id': 'secgrp-1', + 'direction': 'ingress', + 'protocol': 'tcp', + 'port_range_min': 1026, + 'port_range_max': 1026, + 'ethertype': 'IPv4', + 'remote_ip_prefix': None + } + expected_create_rule_2 = { + 'security_group_id': 'secgrp-1', + 'direction': 'ingress', + 'protocol': 'tcp', + 'port_range_min': 1025, + 'port_range_max': 1025, + 'ethertype': 'IPv4', + 'remote_ip_prefix': None + } + expected_create_rule_3 = { + 'security_group_id': 'secgrp-1', + 'direction': 'ingress', + 'protocol': 'tcp', + 'port_range_min': 443, + 'port_range_max': 443, + 'ethertype': 'IPv4', + 'remote_ip_prefix': '10.0.102.0/24' + } + expected_create_rule_4 = { + 'security_group_id': 'secgrp-1', + 'direction': 'ingress', + 'protocol': 'tcp', + 'port_range_min': 443, + 'port_range_max': 443, + 'ethertype': 'IPv4', + 'remote_ip_prefix': '10.0.103.0/24' + } + expected_create_rule_5 = { + 'security_group_id': 'secgrp-1', + 'direction': 'ingress', + 'protocol': 'tcp', + 'port_range_min': 443, + 'port_range_max': 443, + 'ethertype': 'IPv6', + 'remote_ip_prefix': '2001:0DB8::/32' + } + expected_create_rule_udp_1 = { + 'security_group_id': 'secgrp-1', + 'direction': 'ingress', + 'protocol': 'udp', + 'port_range_min': 50, + 'port_range_max': 50, + 'ethertype': 'IPv4', + 'remote_ip_prefix': None + } + expected_create_rule_udp_2 = { + 'security_group_id': 'secgrp-1', + 'direction': 'ingress', + 'protocol': 'udp', + 'port_range_min': 50, + 'port_range_max': 50, + 'ethertype': 'IPv6', + 'remote_ip_prefix': None + } + + create_rule.assert_has_calls([mock.call(**expected_create_rule_1), + mock.call( + **expected_create_rule_udp_peer), + mock.call(**expected_create_rule_2), + mock.call(**expected_create_rule_3), + mock.call(**expected_create_rule_4), + mock.call(**expected_create_rule_5), + mock.call(**expected_create_rule_udp_1), + mock.call(**expected_create_rule_udp_2)], + any_order=True) + + def test_update_vip_when_protocol_and_peer_ports_overlap(self): + lc_1 = data_models.ListenerCidr('l1', '0.0.0.0/0') + listeners = [data_models.Listener(protocol_port=80, peer_port=1024, + protocol=constants.PROTOCOL_TCP), + data_models.Listener(protocol_port=443, peer_port=1025, + protocol=constants.PROTOCOL_TCP), + data_models.Listener(protocol_port=1025, peer_port=1026, + protocol=constants.PROTOCOL_TCP, + allowed_cidrs=[lc_1])] + vip = data_models.Vip(ip_address='10.0.0.2') + lb = data_models.LoadBalancer(id='1', listeners=listeners, vip=vip) + list_sec_grps = self.driver.network_proxy.find_security_group + list_sec_grps.return_value = {'id': 'secgrp-1'} + fake_rules = [ + {'id': 'rule-80', 'port_range_max': 80, 'protocol': 'tcp'}, + {'id': 'rule-22', 'port_range_max': 22, 'protocol': 'tcp'} + ] + list_rules = self.driver.network_proxy.security_group_rules + list_rules.return_value = fake_rules + delete_rule = self.driver.network_proxy.delete_security_group_rule + create_rule = self.driver.network_proxy.create_security_group_rule + self.driver.update_vip(lb) + delete_rule.assert_called_once_with('rule-22') + + # Create SG rule calls should be 4, each for port 1024/1025/1026/443 + # No duplicate SG creation for overlap port 1025 + self.assertEqual(4, create_rule.call_count) + + def test_update_vip_when_listener_deleted(self): + listeners = [data_models.Listener(protocol_port=80, + protocol=constants.PROTOCOL_TCP), + data_models.Listener( + protocol_port=443, + protocol=constants.PROTOCOL_TCP, + provisioning_status=constants.PENDING_DELETE), + data_models.Listener( + protocol_port=50, protocol=constants.PROTOCOL_UDP, + provisioning_status=constants.PENDING_DELETE)] + vip = data_models.Vip(ip_address='10.0.0.2') + lb = data_models.LoadBalancer(id='1', listeners=listeners, vip=vip) + list_sec_grps = self.driver.network_proxy.find_security_group + list_sec_grps.return_value = {'id': 'secgrp-1'} + fake_rules = [ + {'id': 'rule-80', 'port_range_max': 80, 'protocol': 'tcp'}, + {'id': 'rule-22', 'port_range_max': 443, 'protocol': 'tcp'}, + {'id': 'rule-udp-50', 'port_range_max': 50, 'protocol': 'tcp'} + ] + list_rules = self.driver.network_proxy.security_group_rules + list_rules.return_value = fake_rules + delete_rule = self.driver.network_proxy.delete_security_group_rule + create_rule = self.driver.network_proxy.create_security_group_rule + self.driver.update_vip(lb) + delete_rule.assert_has_calls( + [mock.call('rule-22'), mock.call('rule-udp-50')]) + self.assertTrue(create_rule.called) + + def test_update_vip_when_no_listeners(self): + listeners = [] + vip = data_models.Vip(ip_address='10.0.0.2') + lb = data_models.LoadBalancer(id='1', listeners=listeners, vip=vip) + list_sec_grps = self.driver.network_proxy.find_security_group + list_sec_grps.return_value = {'id': 'secgrp-1'} + fake_rules = [ + {'id': 'all-egress', 'protocol': None, 'direction': 'egress'}, + {'id': 'ssh-rule', 'protocol': 'tcp', 'port_range_max': 22} + ] + list_rules = self.driver.network_proxy.security_group_rules + list_rules.return_value = fake_rules + delete_rule = self.driver.network_proxy.delete_security_group_rule + self.driver.update_vip(lb) + delete_rule.assert_called_once_with('ssh-rule') + + def test_update_vip_when_security_group_rule_deleted(self): + listeners = [] + vip = data_models.Vip(ip_address='10.0.0.2') + lb = data_models.LoadBalancer(id='1', listeners=listeners, vip=vip) + list_sec_grps = self.driver.network_proxy.find_security_group + list_sec_grps.return_value = {'id': 'secgrp-1'} + fake_rules = [ + {'id': 'all-egress', 'protocol': None, 'direction': 'egress'}, + {'id': 'ssh-rule', 'protocol': 'tcp', 'port_range_max': 22} + ] + list_rules = self.driver.network_proxy.security_group_rules + list_rules.return_value = fake_rules + delete_rule = self.driver.network_proxy.delete_security_group_rule + delete_rule.side_effect = os_exceptions.ResourceNotFound + self.driver.update_vip(lb) + delete_rule.assert_called_once_with('ssh-rule') + + def test_update_vip_when_security_group_missing(self): + listeners = [] + vip = data_models.Vip(ip_address='10.0.0.2') + lb = data_models.LoadBalancer(id='1', listeners=listeners, vip=vip) + list_sec_grps = self.driver.network_proxy.find_security_group + list_sec_grps.return_value = None + self.assertRaises(exceptions.MissingVIPSecurityGroup, + self.driver.update_vip, + lb) + + @mock.patch('octavia.network.drivers.neutron.allowed_address_pairs.' + 'AllowedAddressPairsDriver._update_security_group_rules') + def test_update_vip_for_delete_when_security_group_missing(self, + update_rules): + listeners = [] + vip = data_models.Vip(ip_address='10.0.0.2') + lb = data_models.LoadBalancer(id='1', listeners=listeners, vip=vip) + list_sec_grps = self.driver.network_proxy.find_security_group + list_sec_grps.return_value = None + self.driver.update_vip(lb, for_delete=True) + update_rules.assert_not_called() + + def test_failover_preparation(self): + original_dns_integration_state = self.driver.dns_integration_enabled + self.driver.dns_integration_enabled = False + ports = [ + Port(**{"fixed_ips": [{"subnet_id": self.SUBNET_ID_1, + "ip_address": self.IP_ADDRESS_1}], + "id": self.FIXED_IP_ID_1, + "network_id": self.NETWORK_ID_1}), + Port(**{"fixed_ips": [{"subnet_id": self.SUBNET_ID_2, + "ip_address": self.IP_ADDRESS_2}], + "id": self.FIXED_IP_ID_2, + "network_id": self.NETWORK_ID_2})] + self.driver.network_proxy.ports.return_value = ports + self.driver.network_proxy.get_port = mock.Mock( + side_effect=self._failover_show_port_side_effect) + port_update = self.driver.network_proxy.update_port + amphora = data_models.Amphora( + id=self.AMPHORA_ID, load_balancer_id=self.LB_ID, + compute_id=self.COMPUTE_ID, status=self.ACTIVE, + lb_network_ip=self.LB_NET_IP, ha_port_id=self.HA_PORT_ID, + ha_ip=self.HA_IP) + self.driver.failover_preparation(amphora) + self.assertFalse(port_update.called) + self.driver.dns_integration_enabled = original_dns_integration_state + + def test_failover_preparation_dns_integration(self): + ports = [ + Port(**{"fixed_ips": [{"subnet_id": self.SUBNET_ID_1, + "ip_address": self.IP_ADDRESS_1}], + "id": self.FIXED_IP_ID_1, + "network_id": self.NETWORK_ID_1}), + Port(**{"fixed_ips": [{"subnet_id": self.SUBNET_ID_2, + "ip_address": self.IP_ADDRESS_2}], + "id": self.FIXED_IP_ID_2, + "network_id": self.NETWORK_ID_2})] + original_dns_integration_state = self.driver.dns_integration_enabled + self.driver.dns_integration_enabled = True + self.driver.network_proxy.ports.return_value = ports + self.driver.network_proxy.get_port = mock.Mock( + side_effect=self._failover_show_port_side_effect) + port_update = self.driver.network_proxy.update_port + amphora = data_models.Amphora( + id=self.AMPHORA_ID, load_balancer_id=self.LB_ID, + compute_id=self.COMPUTE_ID, status=self.ACTIVE, + lb_network_ip=self.LB_NET_IP, ha_port_id=self.HA_PORT_ID, + ha_ip=self.HA_IP) + self.driver.failover_preparation(amphora) + port_update.assert_called_once_with(ports[1].get('id'), + dns_name='') + self.driver.dns_integration_enabled = original_dns_integration_state + + def _failover_show_port_side_effect(self, port_id): + if port_id == self.LB_NET_PORT_ID: + return Port(**{"fixed_ips": [{"subnet_id": self.SUBNET_ID_1, + "ip_address": self.IP_ADDRESS_1}], + "id": self.FIXED_IP_ID_1, + "network_id": self.NETWORK_ID_1}) + if port_id == self.HA_PORT_ID: + return Port(**{"fixed_ips": [{"subnet_id": self.SUBNET_ID_2, + "ip_address": self.IP_ADDRESS_2}], + "id": self.FIXED_IP_ID_2, + "network_id": self.NETWORK_ID_2}) + + def test_plug_port(self): + port = mock.MagicMock() + port.id = self.PORT_ID + network_attach = self.driver.compute.attach_network_or_port + network_attach.return_value = t_constants.MOCK_NOVA_INTERFACE + amphora = data_models.Amphora( + id=self.AMPHORA_ID, load_balancer_id=self.LB_ID, + compute_id=self.COMPUTE_ID, status=self.ACTIVE, + lb_network_ip=self.LB_NET_IP, ha_port_id=self.HA_PORT_ID, + ha_ip=self.HA_IP) + + self.driver.plug_port(amphora, port) + network_attach.assert_called_once_with(compute_id=amphora.compute_id, + network_id=None, + ip_address=None, + port_id=self.PORT_ID) + + # NotFound cases + network_attach.side_effect = exceptions.NotFound( + resource='Instance', id=1) + self.assertRaises(network_base.AmphoraNotFound, + self.driver.plug_port, + amphora, + port) + network_attach.side_effect = exceptions.NotFound( + resource='Network', id=1) + self.assertRaises(network_base.NetworkNotFound, + self.driver.plug_port, + amphora, + port) + network_attach.side_effect = exceptions.NotFound( + resource='bogus', id=1) + self.assertRaises(network_base.PlugNetworkException, + self.driver.plug_port, + amphora, + port) + + # Already plugged case should not raise an exception + network_attach.side_effect = nova_exceptions.Conflict(1) + self.driver.plug_port(amphora, port) + + # Unknown error case + network_attach.side_effect = TypeError + self.assertRaises(network_base.PlugNetworkException, + self.driver.plug_port, + amphora, + port) + + def test_get_network_configs(self): + amphora_mock = mock.MagicMock() + amphora2_mock = mock.MagicMock() + load_balancer_mock = mock.MagicMock() + vip_mock = mock.MagicMock() + amphora_mock.status = constants.DELETED + load_balancer_mock.amphorae = [amphora_mock] + show_port = self.driver.network_proxy.get_port + show_port.side_effect = [ + t_constants.MOCK_NEUTRON_PORT, t_constants.MOCK_NEUTRON_PORT, + t_constants.MOCK_NEUTRON_PORT, t_constants.MOCK_NEUTRON_PORT, + t_constants.MOCK_NEUTRON_PORT, t_constants.MOCK_NEUTRON_PORT, + t_constants.MOCK_NEUTRON_PORT, t_constants.MOCK_NEUTRON_PORT, + t_constants.MOCK_NEUTRON_PORT, t_constants.MOCK_NEUTRON_PORT, + t_constants.MOCK_NEUTRON_PORT, t_constants.MOCK_NEUTRON_PORT, + t_constants.MOCK_NEUTRON_PORT, t_constants.MOCK_NEUTRON_PORT, + Exception('boom')] + fake_subnet = Subnet(**{ + 'id': t_constants.MOCK_SUBNET_ID, + 'gateway_ip': t_constants.MOCK_IP_ADDRESS, + 'cidr': t_constants.MOCK_CIDR}) + fake_subnet2 = Subnet(**{ + 'id': t_constants.MOCK_SUBNET_ID2, + 'gateway_ip': t_constants.MOCK_IP_ADDRESS2, + 'cidr': t_constants.MOCK_CIDR}) + show_subnet = self.driver.network_proxy.get_subnet + show_subnet.return_value = fake_subnet + configs = self.driver.get_network_configs(load_balancer_mock) + self.assertEqual({}, configs) + + vip_mock.port_id = 1 + amphora_mock.id = 222 + amphora_mock.status = constants.ACTIVE + amphora_mock.vrrp_port_id = 2 + amphora_mock.vrrp_ip = "10.0.0.1" + amphora_mock.ha_port_id = 3 + amphora_mock.ha_ip = "10.0.0.2" + amphora2_mock.id = 333 + amphora2_mock.status = constants.ACTIVE + amphora2_mock.vrrp_port_id = 3 + amphora2_mock.vrrp_ip = "10.0.0.2" + amphora2_mock.ha_port_id = 4 + amphora2_mock.ha_ip = "10.0.0.3" + + configs = self.driver.get_network_configs(load_balancer_mock) + self.assertEqual(1, len(configs)) + config = configs[222] + # TODO(ptoohill): find a way to return different items for multiple + # calls to the same method, right now each call to show subnet + # will return the same values if a method happens to call it + # multiple times for different subnets. We should be able to verify + # different requests get different expected data. + expected_port_id = t_constants.MOCK_NEUTRON_PORT['id'] + self.assertEqual(expected_port_id, config.ha_port.id) + self.assertEqual(expected_port_id, config.vrrp_port.id) + expected_subnet_id = fake_subnet['id'] + self.assertEqual(expected_subnet_id, config.ha_subnet.id) + self.assertEqual(expected_subnet_id, config.vrrp_subnet.id) + + # Test with additional_vips + load_balancer_mock.additional_vips = [ + data_models.AdditionalVip( + subnet_id=t_constants.MOCK_SUBNET_ID2, + ip_address=t_constants.MOCK_IP_ADDRESS2) + ] + show_subnet.side_effect = [ + fake_subnet, + fake_subnet, + fake_subnet, + fake_subnet2] + + configs = self.driver.get_network_configs(load_balancer_mock, + amphora_mock) + self.assertEqual(1, len(configs)) + config = configs[222] + self.assertEqual(t_constants.MOCK_SUBNET_ID2, + config.additional_vip_data[0].subnet.id) + self.assertEqual(t_constants.MOCK_IP_ADDRESS2, + config.additional_vip_data[0].ip_address) + + show_subnet.reset_mock(side_effect=True) + show_subnet.return_value = fake_subnet + + # Test with a specific amphora + configs = self.driver.get_network_configs(load_balancer_mock, + amphora_mock) + self.assertEqual(1, len(configs)) + config = configs[222] + # TODO(ptoohill): find a way to return different items for multiple + # calls to the same method, right now each call to show subnet + # will return the same values if a method happens to call it + # multiple times for different subnets. We should be able to verify + # different requests get different expected data. + expected_port_id = t_constants.MOCK_NEUTRON_PORT['id'] + self.assertEqual(expected_port_id, config.ha_port.id) + self.assertEqual(expected_port_id, config.vrrp_port.id) + expected_subnet_id = fake_subnet['id'] + self.assertEqual(expected_subnet_id, config.ha_subnet.id) + self.assertEqual(expected_subnet_id, config.vrrp_subnet.id) + + # Test with a load balancer with two amphora, one that has a + # neutron problem. + load_balancer_mock.amphorae = [amphora_mock, amphora2_mock] + configs = self.driver.get_network_configs(load_balancer_mock) + self.assertEqual(1, len(configs)) + + def test_delete_port(self): + PORT_ID = uuidutils.generate_uuid() + + self.driver.network_proxy.delete_port.side_effect = [ + mock.DEFAULT, os_exceptions.ResourceNotFound, + Exception('boom')] + + # Test successful delete + self.driver.delete_port(PORT_ID) + + self.driver.network_proxy.delete_port.assert_called_once_with(PORT_ID) + + # Test port NotFound (does not raise) + self.driver.delete_port(PORT_ID) + + # Test unknown exception + self.assertRaises(exceptions.NetworkServiceError, + self.driver.delete_port, PORT_ID) + + def test_set_port_admin_state_up(self): + PORT_ID = uuidutils.generate_uuid() + TEST_STATE = 'test state' + + self.driver.network_proxy.update_port.side_effect = [ + mock.DEFAULT, os_exceptions.ResourceNotFound, Exception('boom')] + + # Test successful state set + self.driver.set_port_admin_state_up(PORT_ID, TEST_STATE) + + self.driver.network_proxy.update_port.assert_called_once_with( + PORT_ID, admin_state_up=TEST_STATE) + + # Test port NotFound + self.assertRaises(network_base.PortNotFound, + self.driver.set_port_admin_state_up, + PORT_ID, TEST_STATE) + + # Test unknown exception + self.assertRaises(exceptions.NetworkServiceError, + self.driver.set_port_admin_state_up, PORT_ID, + TEST_STATE) + + def test_create_port(self): + ADMIN_STATE_UP = False + FAKE_NAME = 'fake_name' + IP_ADDRESS1 = '203.0.113.71' + IP_ADDRESS2 = '203.0.113.72' + IP_ADDRESS3 = '203.0.113.73' + NETWORK_ID = uuidutils.generate_uuid() + QOS_POLICY_ID = uuidutils.generate_uuid() + SECONDARY_IPS = [IP_ADDRESS2, IP_ADDRESS3] + SECURITY_GROUP_ID = uuidutils.generate_uuid() + SUBNET1_ID = uuidutils.generate_uuid() + FIXED_IPS = [{'subnet_id': SUBNET1_ID, 'ip_address': IP_ADDRESS1}] + + MOCK_NEUTRON_PORT = Port(**{ + 'network_id': NETWORK_ID, 'device_id': t_constants.MOCK_DEVICE_ID, + 'device_owner': t_constants.MOCK_DEVICE_OWNER, + 'id': t_constants.MOCK_PORT_ID, 'name': FAKE_NAME, + 'tenant_id': t_constants.MOCK_PROJECT_ID, + 'admin_state_up': ADMIN_STATE_UP, + 'status': t_constants.MOCK_STATUS, + 'mac_address': t_constants.MOCK_MAC_ADDR, + 'fixed_ips': [{'ip_address': IP_ADDRESS1, + 'subnet_id': SUBNET1_ID}], + 'security_groups': [], + 'qos_policy_id': QOS_POLICY_ID, + 'binding_vnic_type': constants.VNIC_TYPE_NORMAL}) + + reference_port_dict = {'admin_state_up': ADMIN_STATE_UP, + 'device_id': t_constants.MOCK_DEVICE_ID, + 'device_owner': t_constants.MOCK_DEVICE_OWNER, + 'fixed_ips': [], + 'id': t_constants.MOCK_PORT_ID, + 'mac_address': t_constants.MOCK_MAC_ADDR, + 'name': FAKE_NAME, + 'network': None, + 'network_id': NETWORK_ID, + 'project_id': t_constants.MOCK_PROJECT_ID, + 'qos_policy_id': QOS_POLICY_ID, + 'security_group_ids': [], + 'status': t_constants.MOCK_STATUS, + 'vnic_type': constants.VNIC_TYPE_NORMAL} + + self.driver.network_proxy.create_port.side_effect = [ + MOCK_NEUTRON_PORT, MOCK_NEUTRON_PORT, Exception('boom')] + + # Test successful path + result = self.driver.create_port( + NETWORK_ID, name=FAKE_NAME, fixed_ips=FIXED_IPS, + secondary_ips=SECONDARY_IPS, + security_group_ids=[SECURITY_GROUP_ID], admin_state_up=False, + qos_policy_id=QOS_POLICY_ID) + + self.assertEqual(reference_port_dict, result.to_dict()) + self.driver.network_proxy.create_port.assert_called_once_with( + **{ + 'network_id': NETWORK_ID, 'admin_state_up': ADMIN_STATE_UP, + 'device_owner': constants.OCTAVIA_OWNER, + 'allowed_address_pairs': [ + {'ip_address': IP_ADDRESS2}, {'ip_address': IP_ADDRESS3}], + 'fixed_ips': [{ + 'subnet_id': SUBNET1_ID, 'ip_address': IP_ADDRESS1}], + 'name': FAKE_NAME, 'qos_policy_id': QOS_POLICY_ID, + 'security_groups': [SECURITY_GROUP_ID], + 'binding_vnic_type': constants.VNIC_TYPE_NORMAL}) + + # Test minimal successful path + result = self.driver.create_port(NETWORK_ID) + + self.assertEqual(reference_port_dict, result.to_dict()) + + # Test exception + self.assertRaises(network_base.CreatePortException, + self.driver.create_port, NETWORK_ID, name=FAKE_NAME, + fixed_ips=FIXED_IPS, secondary_ips=SECONDARY_IPS, + security_group_ids=[SECURITY_GROUP_ID], + admin_state_up=False, qos_policy_id=QOS_POLICY_ID) + + def test_get_security_group(self): + + # Test the case of security groups disabled in neutron + FAKE_SG_NAME = 'Fake_SG_name' + FAKE_NEUTRON_SECURITY_GROUPS = iter([t_constants.MOCK_SECURITY_GROUP]) + reference_sg_dict = {'id': t_constants.MOCK_SECURITY_GROUP_ID, + 'name': t_constants.MOCK_SECURITY_GROUP_NAME, + 'description': '', 'tags': [], + 'security_group_rule_ids': [], + 'stateful': None, + 'project_id': t_constants.MOCK_PROJECT_ID} + + network_proxy = self.driver.network_proxy + network_proxy.security_groups.side_effect = [ + FAKE_NEUTRON_SECURITY_GROUPS, iter([]), Exception('boom')] + + self.driver.sec_grp_enabled = False + result = self.driver.get_security_group(FAKE_SG_NAME) + + self.assertIsNone(result) + network_proxy.security_groups.assert_not_called() + + # Test successful get of the security group + self.driver.sec_grp_enabled = True + + result = self.driver.get_security_group(FAKE_SG_NAME) + + self.assertEqual(reference_sg_dict, result.to_dict()) + network_proxy.security_groups.assert_called_once_with( + name=FAKE_SG_NAME) + + # Test no security groups returned + self.assertRaises(network_base.SecurityGroupNotFound, + self.driver.get_security_group, FAKE_SG_NAME) + + # Test with an unknown exception + self.assertRaises(network_base.NetworkException, + self.driver.get_security_group, FAKE_SG_NAME) diff --git a/octavia/tests/unit/network/drivers/neutron/test_base.py b/octavia/tests/unit/network/drivers/neutron/test_base.py new file mode 100644 index 0000000000..d705a49478 --- /dev/null +++ b/octavia/tests/unit/network/drivers/neutron/test_base.py @@ -0,0 +1,695 @@ +# Copyright 2015 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture + +from octavia.common import data_models +from octavia.network import base as network_base +from octavia.network import data_models as network_models +from octavia.network.drivers.neutron import base as neutron_base +from octavia.network.drivers.neutron import utils +from octavia.tests.common import constants as t_constants +from octavia.tests.common import data_model_helpers as dmh +from octavia.tests.unit import base +import openstack.exceptions as os_exceptions +from openstack.network.v2.network import Network +from openstack.network.v2.network_ip_availability import NetworkIPAvailability +from openstack.network.v2.port import Port +from openstack.network.v2.qos_policy import QoSPolicy +from openstack.network.v2.subnet import Subnet + + +class TestBaseNeutronNetworkDriver(base.TestCase): + + def _instantiate_partial_abc(self, abclass): + if "__abstractmethods__" not in abclass.__dict__: + return abclass() + new_dict = abclass.__dict__.copy() + for abstractmethod in abclass.__abstractmethods__: + new_dict[abstractmethod] = lambda x, *args, **kw: (x, args, kw) + impl_class = type(f"partially_implemented_abc_{abclass.__name__}", + (abclass,), new_dict) + return impl_class() + + def setUp(self): + super().setUp() + with mock.patch('octavia.common.clients.openstack.connection.' + 'Connection', autospec=True) as os_connection: + self._original_find_extension = ( + os_connection.return_value.network.find_extension) + os_connection.return_value.network.find_extension = ( + lambda x: 'alias' if x == neutron_base.SEC_GRP_EXT_ALIAS else + None) + self.k_session = mock.patch( + 'keystoneauth1.session.Session').start() + self.driver = self._instantiate_partial_abc( + neutron_base.BaseNeutronDriver) + + def test__check_extension_enabled(self): + with mock.patch.object(self.driver.network_proxy, "find_extension", + side_effect=[True, False]) as show_extension: + self.assertTrue(self.driver._check_extension_enabled('TEST1')) + self.assertFalse(self.driver._check_extension_enabled('TEST2')) + show_extension.assert_has_calls( + [mock.call('TEST1'), mock.call('TEST2')]) + + def test__check_extension_enabled_cached(self): + with mock.patch.object(self.driver.network_proxy, "find_extension", + ) as show_extension: + self.driver._check_extension_cache = {'TEST1': True, + 'TEST2': False} + self.assertTrue(self.driver._check_extension_enabled('TEST1')) + self.assertFalse(self.driver._check_extension_enabled('TEST2')) + self.assertNotIn(mock.call('TEST1'), show_extension.mock_calls) + self.assertNotIn(mock.call('TEST2'), show_extension.mock_calls) + + def test__add_allowed_address_pair_to_port(self): + self.driver._add_allowed_address_pairs_to_port( + t_constants.MOCK_PORT_ID, [t_constants.MOCK_IP_ADDRESS]) + expected_aap_dict = { + 'allowed_address_pairs': [ + {'ip_address': t_constants.MOCK_IP_ADDRESS}]} + self.driver.network_proxy.update_port.assert_has_calls([ + mock.call(t_constants.MOCK_PORT_ID, **expected_aap_dict)]) + + def test__update_security_groups(self): + self.driver._update_security_groups( + [t_constants.MOCK_SECURITY_GROUP_ID], t_constants.MOCK_PORT_ID) + expected_sg_dict = { + 'security_groups': [ + t_constants.MOCK_SECURITY_GROUP_ID]} + self.driver.network_proxy.update_port.assert_has_calls([ + mock.call(t_constants.MOCK_PORT_ID, **expected_sg_dict)]) + + def test__update_security_groups_with_port_not_found(self): + self.driver.network_proxy.update_port.side_effect = ( + os_exceptions.ResourceNotFound) + self.assertRaises( + network_base.PortNotFound, + self.driver._update_security_groups, + t_constants.MOCK_SECURITY_GROUP_ID, t_constants.MOCK_PORT_ID) + + def test__update_security_groups_with_other_exception(self): + self.driver.network_proxy.update_port.side_effect = IOError + self.assertRaises( + network_base.NetworkException, + self.driver._update_security_groups, + t_constants.MOCK_SECURITY_GROUP_ID, t_constants.MOCK_PORT_ID) + + def test__get_ports_by_security_group(self): + self.driver.network_proxy.ports.return_value = [ + t_constants.MOCK_NEUTRON_PORT, + t_constants.MOCK_NEUTRON_PORT2] + ports = self.driver._get_ports_by_security_group( + t_constants.MOCK_SECURITY_GROUP_ID) + self.assertEqual(1, len(ports)) + self.assertIn(t_constants.MOCK_NEUTRON_PORT, ports) + + def test__create_security_group(self): + sg_return = self.driver._create_security_group( + t_constants.MOCK_SECURITY_GROUP_NAME) + expected_sec_grp_dict = { + 'name': t_constants.MOCK_SECURITY_GROUP_NAME} + self.driver.network_proxy.create_security_group.assert_has_calls([ + mock.call(**expected_sec_grp_dict)]) + self.assertEqual( + sg_return, + self.driver.network_proxy.create_security_group()) + + def test__create_security_group_rule(self): + self.driver._create_security_group_rule( + sec_grp_id=t_constants.MOCK_SECURITY_GROUP_ID, + direction=1, + protocol=2, + port_min=3, + port_max=4, + ethertype=5, + cidr="10.0.0.0/24") + expected_sec_grp_rule_dict = { + 'security_group_id': t_constants.MOCK_SECURITY_GROUP_ID, + 'direction': 1, + 'protocol': 2, + 'port_range_min': 3, + 'port_range_max': 4, + 'ethertype': 5, + 'remote_ip_prefix': '10.0.0.0/24'} + self.driver.network_proxy.create_security_group_rule.assert_has_calls( + [mock.call(**expected_sec_grp_rule_dict)]) + + def test__port_to_vip(self): + lb = dmh.generate_load_balancer_tree() + lb.vip.subnet_id = t_constants.MOCK_SUBNET_ID + lb.vip.ip_address = t_constants.MOCK_IP_ADDRESS + port = utils.convert_port_to_model(t_constants.MOCK_NEUTRON_PORT) + vip, additional_vips = self.driver._port_to_vip(port, lb) + self.assertIsInstance(vip, data_models.Vip) + self.assertIsInstance(additional_vips, list) + self.assertEqual(t_constants.MOCK_IP_ADDRESS, vip.ip_address) + self.assertEqual(t_constants.MOCK_SUBNET_ID, vip.subnet_id) + self.assertEqual(t_constants.MOCK_PORT_ID, vip.port_id) + self.assertEqual(lb.id, vip.load_balancer_id) + + def test__nova_interface_to_octavia_interface(self): + nova_interface = t_constants.MockNovaInterface() + nova_interface.net_id = '1' + nova_interface.port_id = '2' + nova_interface.fixed_ips = [{'ip_address': '10.0.0.1'}] + interface = self.driver._nova_interface_to_octavia_interface( + '3', nova_interface) + self.assertEqual('1', interface.network_id) + self.assertEqual('2', interface.port_id) + ips = [fixed_ip.ip_address for fixed_ip in interface.fixed_ips] + self.assertIn('10.0.0.1', ips) + + def test_get_plugged_networks(self): + list_ports = self.driver.network_proxy.ports + list_ports.side_effect = TypeError + o_ifaces = self.driver.get_plugged_networks( + t_constants.MOCK_DEVICE_ID) + self.assertEqual(0, len(o_ifaces)) + list_ports.side_effect = None + list_ports.reset_mock() + port1 = t_constants.MOCK_NEUTRON_PORT + port2 = { + 'id': '4', 'network_id': '3', 'fixed_ips': + [{'ip_address': '10.0.0.2'}] + } + list_ports.return_value = [port1, port2] + plugged_networks = self.driver.get_plugged_networks( + t_constants.MOCK_DEVICE_ID) + for pn in plugged_networks: + self.assertIn(pn.port_id, [port1.get('id'), port2.get('id')]) + self.assertIn(pn.network_id, [port1.get('network_id'), + port2.get('network_id')]) + for fixed_ip in pn.fixed_ips: + self.assertIn(fixed_ip.ip_address, + [port1['fixed_ips'][0]['ip_address'], + port2['fixed_ips'][0]['ip_address']]) + + def test_get_network(self): + config = self.useFixture(oslo_fixture.Config(cfg.CONF)) + config.config(group="networking", allow_invisible_resource_usage=True) + + show_network = self.driver.network_proxy.get_network + show_network.return_value = Network(**{ + 'id': t_constants.MOCK_NETWORK_ID, + 'subnets': [t_constants.MOCK_SUBNET_ID]}) + network = self.driver.get_network(t_constants.MOCK_NETWORK_ID) + self.assertIsInstance(network, network_models.Network) + self.assertEqual(t_constants.MOCK_NETWORK_ID, network.id) + self.assertEqual(1, len(network.subnets)) + self.assertEqual(t_constants.MOCK_SUBNET_ID, network.subnets[0]) + + @mock.patch("octavia.common.clients.NeutronAuth.get_user_neutron_client") + def test_get_user_network(self, neutron_client_mock): + show_network = neutron_client_mock.return_value.get_network + show_network.return_value = Network(**{ + 'id': t_constants.MOCK_NETWORK_ID, + 'subnets': [t_constants.MOCK_SUBNET_ID]}) + + network = self.driver.get_network(t_constants.MOCK_NETWORK_ID, + context=mock.ANY) + + self.assertIsInstance(network, network_models.Network) + self.assertEqual(t_constants.MOCK_NETWORK_ID, network.id) + self.assertEqual(1, len(network.subnets)) + self.assertEqual(t_constants.MOCK_SUBNET_ID, network.subnets[0]) + + def test_get_subnet(self): + config = self.useFixture(oslo_fixture.Config(cfg.CONF)) + config.config(group="networking", allow_invisible_resource_usage=True) + + show_subnet = self.driver.network_proxy.get_subnet + show_subnet.return_value = Subnet(**{ + 'id': t_constants.MOCK_SUBNET_ID, + 'gateway_ip': t_constants.MOCK_IP_ADDRESS, + 'cidr': t_constants.MOCK_CIDR}) + subnet = self.driver.get_subnet(t_constants.MOCK_SUBNET_ID) + self.assertIsInstance(subnet, network_models.Subnet) + self.assertEqual(t_constants.MOCK_SUBNET_ID, subnet.id) + self.assertEqual(t_constants.MOCK_IP_ADDRESS, subnet.gateway_ip) + self.assertEqual(t_constants.MOCK_CIDR, subnet.cidr) + + @mock.patch("octavia.common.clients.NeutronAuth.get_user_neutron_client") + def test_get_user_subnet(self, neutron_client_mock): + show_subnet = neutron_client_mock.return_value.get_subnet + show_subnet.return_value = Subnet(**{ + 'id': t_constants.MOCK_SUBNET_ID, + 'gateway_ip': t_constants.MOCK_IP_ADDRESS, + 'cidr': t_constants.MOCK_CIDR}) + + subnet = self.driver.get_subnet(t_constants.MOCK_SUBNET_ID, + context=mock.ANY) + + self.assertIsInstance(subnet, network_models.Subnet) + self.assertEqual(t_constants.MOCK_SUBNET_ID, subnet.id) + self.assertEqual(t_constants.MOCK_IP_ADDRESS, subnet.gateway_ip) + self.assertEqual(t_constants.MOCK_CIDR, subnet.cidr) + + def test_get_port(self): + config = self.useFixture(oslo_fixture.Config(cfg.CONF)) + config.config(group="networking", allow_invisible_resource_usage=True) + + show_port = self.driver.network_proxy.get_port + show_port.return_value = Port(**{ + 'id': t_constants.MOCK_PORT_ID, + 'mac_address': t_constants.MOCK_MAC_ADDR, + 'network_id': t_constants.MOCK_NETWORK_ID, + 'fixed_ips': [{ + 'subnet_id': t_constants.MOCK_SUBNET_ID, + 'ip_address': t_constants.MOCK_IP_ADDRESS + }]}) + port = self.driver.get_port(t_constants.MOCK_PORT_ID) + self.assertIsInstance(port, network_models.Port) + self.assertEqual(t_constants.MOCK_PORT_ID, port.id) + self.assertEqual(t_constants.MOCK_MAC_ADDR, port.mac_address) + self.assertEqual(t_constants.MOCK_NETWORK_ID, port.network_id) + self.assertEqual(1, len(port.fixed_ips)) + self.assertIsInstance(port.fixed_ips[0], network_models.FixedIP) + self.assertEqual(t_constants.MOCK_SUBNET_ID, + port.fixed_ips[0].subnet_id) + self.assertEqual(t_constants.MOCK_IP_ADDRESS, + port.fixed_ips[0].ip_address) + + @mock.patch("octavia.common.clients.NeutronAuth.get_user_neutron_client") + def test_get_user_port(self, neutron_client_mock): + show_port = neutron_client_mock.return_value.get_port + show_port.return_value = Port(**{ + 'id': t_constants.MOCK_PORT_ID, + 'mac_address': t_constants.MOCK_MAC_ADDR, + 'network_id': t_constants.MOCK_NETWORK_ID, + 'fixed_ips': [{ + 'subnet_id': t_constants.MOCK_SUBNET_ID, + 'ip_address': t_constants.MOCK_IP_ADDRESS + }]}) + + port = self.driver.get_port(t_constants.MOCK_PORT_ID, context=mock.ANY) + + self.assertIsInstance(port, network_models.Port) + self.assertEqual(t_constants.MOCK_PORT_ID, port.id) + self.assertEqual(t_constants.MOCK_MAC_ADDR, port.mac_address) + self.assertEqual(t_constants.MOCK_NETWORK_ID, port.network_id) + self.assertEqual(1, len(port.fixed_ips)) + self.assertIsInstance(port.fixed_ips[0], network_models.FixedIP) + self.assertEqual(t_constants.MOCK_SUBNET_ID, + port.fixed_ips[0].subnet_id) + self.assertEqual(t_constants.MOCK_IP_ADDRESS, + port.fixed_ips[0].ip_address) + + def test_get_network_by_name(self): + list_network = self.driver.network_proxy.networks + list_network.return_value = iter([Network(**{ + 'id': t_constants.MOCK_NETWORK_ID, + 'name': t_constants.MOCK_NETWORK_NAME, + 'subnets': [t_constants.MOCK_SUBNET_ID]})]) + network = self.driver.get_network_by_name( + t_constants.MOCK_NETWORK_NAME) + self.assertIsInstance(network, network_models.Network) + self.assertEqual(t_constants.MOCK_NETWORK_ID, network.id) + self.assertEqual(t_constants.MOCK_NETWORK_NAME, network.name) + self.assertEqual(1, len(network.subnets)) + self.assertEqual(t_constants.MOCK_SUBNET_ID, network.subnets[0]) + # Negative + list_network.side_effect = os_exceptions.ResourceNotFound + self.assertRaises(network_base.NetworkNotFound, + self.driver.get_network_by_name, + t_constants.MOCK_NETWORK_NAME) + list_network.side_effect = Exception + self.assertRaises(network_base.NetworkException, + self.driver.get_network_by_name, + t_constants.MOCK_NETWORK_NAME) + + def test_get_subnet_by_name(self): + list_subnet = self.driver.network_proxy.subnets + list_subnet.return_value = iter([Subnet(**{ + 'id': t_constants.MOCK_SUBNET_ID, + 'name': t_constants.MOCK_SUBNET_NAME, + 'gateway_ip': t_constants.MOCK_IP_ADDRESS, + 'cidr': t_constants.MOCK_CIDR})]) + subnet = self.driver.get_subnet_by_name(t_constants.MOCK_SUBNET_NAME) + self.assertIsInstance(subnet, network_models.Subnet) + self.assertEqual(t_constants.MOCK_SUBNET_ID, subnet.id) + self.assertEqual(t_constants.MOCK_SUBNET_NAME, subnet.name) + self.assertEqual(t_constants.MOCK_IP_ADDRESS, subnet.gateway_ip) + self.assertEqual(t_constants.MOCK_CIDR, subnet.cidr) + # Negative + list_subnet.side_effect = os_exceptions.ResourceNotFound + self.assertRaises(network_base.SubnetNotFound, + self.driver.get_subnet_by_name, + t_constants.MOCK_SUBNET_NAME) + list_subnet.side_effect = Exception + self.assertRaises(network_base.NetworkException, + self.driver.get_subnet_by_name, + t_constants.MOCK_SUBNET_NAME) + + def test_get_port_by_name(self): + list_port = self.driver.network_proxy.ports + list_port.return_value = iter([Port(**{ + 'id': t_constants.MOCK_PORT_ID, + 'name': t_constants.MOCK_PORT_NAME, + 'mac_address': t_constants.MOCK_MAC_ADDR, + 'network_id': t_constants.MOCK_NETWORK_ID, + 'fixed_ips': [{ + 'subnet_id': t_constants.MOCK_SUBNET_ID, + 'ip_address': t_constants.MOCK_IP_ADDRESS + }]})]) + port = self.driver.get_port_by_name(t_constants.MOCK_PORT_NAME) + self.assertIsInstance(port, network_models.Port) + self.assertEqual(t_constants.MOCK_PORT_ID, port.id) + self.assertEqual(t_constants.MOCK_PORT_NAME, port.name) + self.assertEqual(t_constants.MOCK_MAC_ADDR, port.mac_address) + self.assertEqual(t_constants.MOCK_NETWORK_ID, port.network_id) + self.assertEqual(1, len(port.fixed_ips)) + self.assertIsInstance(port.fixed_ips[0], network_models.FixedIP) + self.assertEqual(t_constants.MOCK_SUBNET_ID, + port.fixed_ips[0].subnet_id) + self.assertEqual(t_constants.MOCK_IP_ADDRESS, + port.fixed_ips[0].ip_address) + # Negative + list_port.side_effect = os_exceptions.ResourceNotFound + self.assertRaises(network_base.PortNotFound, + self.driver.get_port_by_name, + t_constants.MOCK_PORT_NAME) + list_port.side_effect = Exception + self.assertRaises(network_base.NetworkException, + self.driver.get_port_by_name, + t_constants.MOCK_PORT_NAME) + + def test_get_port_by_net_id_device_id(self): + list_port = self.driver.network_proxy.ports + list_port.return_value = iter([Port(**{ + 'id': t_constants.MOCK_PORT_ID, + 'name': t_constants.MOCK_PORT_NAME, + 'mac_address': t_constants.MOCK_MAC_ADDR, + 'network_id': t_constants.MOCK_NETWORK_ID, + 'device_id': t_constants.MOCK_DEVICE_ID, + 'fixed_ips': [{ + 'subnet_id': t_constants.MOCK_SUBNET_ID, + 'ip_address': t_constants.MOCK_IP_ADDRESS + }]})]) + port = self.driver.get_port_by_net_id_device_id( + t_constants.MOCK_NETWORK_ID, t_constants.MOCK_DEVICE_ID) + self.assertIsInstance(port, network_models.Port) + self.assertEqual(t_constants.MOCK_PORT_ID, port.id) + self.assertEqual(t_constants.MOCK_DEVICE_ID, port.device_id) + self.assertEqual(t_constants.MOCK_PORT_NAME, port.name) + self.assertEqual(t_constants.MOCK_MAC_ADDR, port.mac_address) + self.assertEqual(t_constants.MOCK_NETWORK_ID, port.network_id) + self.assertEqual(1, len(port.fixed_ips)) + self.assertIsInstance(port.fixed_ips[0], network_models.FixedIP) + self.assertEqual(t_constants.MOCK_SUBNET_ID, + port.fixed_ips[0].subnet_id) + self.assertEqual(t_constants.MOCK_IP_ADDRESS, + port.fixed_ips[0].ip_address) + # Negative + list_port.side_effect = os_exceptions.ResourceNotFound + self.assertRaises(network_base.PortNotFound, + self.driver.get_port_by_net_id_device_id, + t_constants.MOCK_PORT_NAME, + t_constants.MOCK_DEVICE_ID) + list_port.side_effect = Exception + self.assertRaises(network_base.NetworkException, + self.driver.get_port_by_net_id_device_id, + t_constants.MOCK_NETWORK_ID, + t_constants.MOCK_DEVICE_ID) + + def test_get_ports_by_net_id_device_id(self): + """Test get_port_by_net_id_device_id, when port is not unique. + + The expected result is: only the first port is returned. + """ + + list_port = self.driver.network_proxy.ports + list_port.return_value = iter([t_constants.MOCK_NEUTRON_PORT, + t_constants.MOCK_NEUTRON_PORT2, + ]) + + port = self.driver.get_port_by_net_id_device_id( + t_constants.MOCK_NETWORK_ID, t_constants.MOCK_DEVICE_ID) + self.assertIsInstance(port, network_models.Port) + self.assertEqual(t_constants.MOCK_PORT_ID, port.id) + self.assertEqual(t_constants.MOCK_DEVICE_ID, port.device_id) + self.assertEqual(t_constants.MOCK_PORT_NAME, port.name) + self.assertEqual(t_constants.MOCK_MAC_ADDR, port.mac_address) + self.assertEqual(t_constants.MOCK_NETWORK_ID, port.network_id) + self.assertEqual(1, len(port.fixed_ips)) + self.assertIsInstance(port.fixed_ips[0], network_models.FixedIP) + self.assertEqual(t_constants.MOCK_SUBNET_ID, + port.fixed_ips[0].subnet_id) + self.assertEqual(t_constants.MOCK_IP_ADDRESS, + port.fixed_ips[0].ip_address) + # Negative + list_port.side_effect = os_exceptions.ResourceNotFound + self.assertRaises(network_base.PortNotFound, + self.driver.get_port_by_net_id_device_id, + t_constants.MOCK_PORT_NAME, + t_constants.MOCK_DEVICE_ID) + list_port.side_effect = Exception + self.assertRaises(network_base.NetworkException, + self.driver.get_port_by_net_id_device_id, + t_constants.MOCK_NETWORK_ID, + t_constants.MOCK_DEVICE_ID) + + def test_get_multiple_ports_by_net_id_device_id(self): + """Test _get_resources_by_filters, when result is not unique""" + list_port = self.driver.network_proxy.ports + list_port.return_value = iter([t_constants.MOCK_NEUTRON_PORT, + t_constants.MOCK_NEUTRON_PORT2, + ]) + + ports = self.driver._get_resources_by_filters( + 'port', + network_id=t_constants.MOCK_NETWORK_ID, + device_id=t_constants.MOCK_DEVICE_ID, + ) + self.assertIsInstance(ports, list) + port1, port2 = ports + + self.assertEqual(t_constants.MOCK_PORT_ID, port1.id) + self.assertEqual(t_constants.MOCK_PORT_ID2, port2.id) + self.assertEqual(t_constants.MOCK_IP_ADDRESS, + port1.fixed_ips[0].ip_address) + self.assertEqual(t_constants.MOCK_IP_ADDRESS2, + port2.fixed_ips[0].ip_address) + + def test_get_unique_port_by_name(self): + """Test _get_resources_by_filters, when result is unique""" + list_port = self.driver.network_proxy.ports + list_port.return_value = iter([t_constants.MOCK_NEUTRON_PORT]) + + port = self.driver._get_resources_by_filters( + 'port', unique_item=True, name=t_constants.MOCK_PORT_NAME) + + self.assertIsInstance(port, network_models.Port) + self.assertEqual(t_constants.MOCK_PORT_ID, port.id) + + def test_get_non_existing_port_by_name(self): + """Test _get_resources_by_filters, when result is empty""" + list_port = self.driver.network_proxy.ports + list_port.return_value = iter([]) + + self.assertRaises(network_base.PortNotFound, + self.driver._get_resources_by_filters, + 'port', unique_item=True, name='port1') + + def test_get_qos_policy(self): + get_qos = self.driver.network_proxy.get_qos_policy + get_qos.return_value = QoSPolicy(**{ + 'id': t_constants.MOCK_NEUTRON_QOS_POLICY_ID}) + qos = self.driver.get_qos_policy( + t_constants.MOCK_NEUTRON_QOS_POLICY_ID) + self.assertIsInstance(qos, network_models.QosPolicy) + self.assertEqual(t_constants.MOCK_NEUTRON_QOS_POLICY_ID, + qos.id) + + get_qos.side_effect = os_exceptions.ResourceNotFound + self.assertRaises(network_base.QosPolicyNotFound, + self.driver.get_qos_policy, + t_constants.MOCK_NEUTRON_QOS_POLICY_ID) + + get_qos.side_effect = os_exceptions.SDKException + self.assertRaises(network_base.NetworkException, + self.driver.get_qos_policy, + t_constants.MOCK_NEUTRON_QOS_POLICY_ID) + + def test_apply_qos_on_port(self): + update_port = self.driver.network_proxy.update_port + self.driver.apply_qos_on_port( + t_constants.MOCK_NEUTRON_QOS_POLICY_ID, + t_constants.MOCK_PORT_ID + ) + update_port.assert_called_once_with( + t_constants.MOCK_PORT_ID, + qos_policy_id=t_constants.MOCK_NEUTRON_QOS_POLICY_ID) + + def test_apply_or_undo_qos_on_port(self): + # The apply and undo qos function use the same "update_port" with + # neutron client. So testing them in one Uts. + update_port = self.driver.network_proxy.update_port + update_port.side_effect = os_exceptions.ResourceNotFound + self.assertRaises(network_base.PortNotFound, + self.driver.apply_qos_on_port, + t_constants.MOCK_PORT_ID, + t_constants.MOCK_NEUTRON_QOS_POLICY_ID) + + update_port.side_effect = os_exceptions.SDKException + self.assertRaises(network_base.NetworkException, + self.driver.apply_qos_on_port, + t_constants.MOCK_PORT_ID, + t_constants.MOCK_NEUTRON_QOS_POLICY_ID) + + def test_get_network_ip_availability(self): + show_network_ip_availability = ( + self.driver.network_proxy.get_network_ip_availability) + show_network_ip_availability.return_value = ( + NetworkIPAvailability(**{ + 'network_id': t_constants.MOCK_NETWORK_ID, + 'subnet_ip_availability': + t_constants.MOCK_SUBNET_IP_AVAILABILITY + })) + ip_avail = self.driver.get_network_ip_availability( + network_models.Network(t_constants.MOCK_NETWORK_ID)) + self.assertIsInstance(ip_avail, network_models.Network_IP_Availability) + self.assertEqual(t_constants.MOCK_NETWORK_ID, ip_avail.network_id) + self.assertEqual(t_constants.MOCK_SUBNET_IP_AVAILABILITY, + ip_avail.subnet_ip_availability) + + def test_plug_fixed_ip(self): + show_port = self.driver.network_proxy.get_port + show_port.return_value = Port(**{ + 'id': t_constants.MOCK_PORT_ID, + 'fixed_ips': [ + { + 'subnet_id': t_constants.MOCK_SUBNET_ID, + 'ip_address': t_constants.MOCK_IP_ADDRESS, + 'subnet': None + }] + }) + + self.driver.plug_fixed_ip(t_constants.MOCK_PORT_ID, + t_constants.MOCK_SUBNET_ID2, + t_constants.MOCK_IP_ADDRESS2) + + expected_body = { + 'fixed_ips': [ + { + 'subnet_id': t_constants.MOCK_SUBNET_ID, + 'ip_address': t_constants.MOCK_IP_ADDRESS, + 'subnet': None + }, { + 'subnet_id': t_constants.MOCK_SUBNET_ID2, + 'ip_address': t_constants.MOCK_IP_ADDRESS2 + } + ] + } + self.driver.network_proxy.update_port.assert_called_once_with( + t_constants.MOCK_PORT_ID, + **expected_body) + + def test_plug_fixed_ip_no_ip_address(self): + show_port = self.driver.network_proxy.get_port + show_port.return_value = Port(**{ + 'id': t_constants.MOCK_PORT_ID, + 'fixed_ips': [ + { + 'subnet_id': t_constants.MOCK_SUBNET_ID, + 'ip_address': t_constants.MOCK_IP_ADDRESS, + 'subnet': None + }] + }) + + self.driver.plug_fixed_ip(t_constants.MOCK_PORT_ID, + t_constants.MOCK_SUBNET_ID2) + + expected_body = { + 'fixed_ips': [ + { + 'subnet_id': t_constants.MOCK_SUBNET_ID, + 'ip_address': t_constants.MOCK_IP_ADDRESS, + 'subnet': None + }, { + 'subnet_id': t_constants.MOCK_SUBNET_ID2, + } + ] + } + self.driver.network_proxy.update_port.assert_called_once_with( + t_constants.MOCK_PORT_ID, **expected_body) + + def test_plug_fixed_ip_exception(self): + show_port = self.driver.network_proxy.get_port + show_port.return_value = { + 'id': t_constants.MOCK_PORT_ID, + 'fixed_ips': [ + { + 'subnet_id': t_constants.MOCK_SUBNET_ID, + 'ip_address': t_constants.MOCK_IP_ADDRESS, + 'subnet': None + }] + } + + self.driver.network_proxy.update_port.side_effect = Exception + + self.assertRaises(network_base.NetworkException, + self.driver.plug_fixed_ip, + t_constants.MOCK_PORT_ID, + t_constants.MOCK_SUBNET_ID2) + + def test_unplug_fixed_ip(self): + show_port = self.driver.network_proxy.get_port + show_port.return_value = Port(**{ + 'id': t_constants.MOCK_PORT_ID, + 'fixed_ips': [ + { + 'subnet_id': t_constants.MOCK_SUBNET_ID, + 'ip_address': t_constants.MOCK_IP_ADDRESS, + 'subnet': None + }, { + 'subnet_id': t_constants.MOCK_SUBNET_ID2, + 'ip_address': t_constants.MOCK_IP_ADDRESS2, + 'subnet': None + }] + }) + + self.driver.unplug_fixed_ip(t_constants.MOCK_PORT_ID, + t_constants.MOCK_SUBNET_ID) + + expected_body = { + 'fixed_ips': [ + { + 'subnet_id': t_constants.MOCK_SUBNET_ID2, + 'ip_address': t_constants.MOCK_IP_ADDRESS2, + 'subnet': None + } + ] + } + self.driver.network_proxy.update_port.assert_called_once_with( + t_constants.MOCK_PORT_ID, + **expected_body) + + def test_unplug_fixed_ip_exception(self): + show_port = self.driver.network_proxy.get_port + show_port.return_value = Port( + device_id=t_constants.MOCK_PORT_ID, + fixed_ips=[(t_constants.MOCK_IP_ADDRESS, + t_constants.MOCK_SUBNET_ID)], + ) + + self.driver.network_proxy.update_port.side_effect = Exception + + self.assertRaises(network_base.NetworkException, + self.driver.unplug_fixed_ip, + t_constants.MOCK_PORT_ID, + t_constants.MOCK_SUBNET_ID) diff --git a/octavia/tests/unit/network/drivers/neutron/test_utils.py b/octavia/tests/unit/network/drivers/neutron/test_utils.py new file mode 100644 index 0000000000..59bcd20e61 --- /dev/null +++ b/octavia/tests/unit/network/drivers/neutron/test_utils.py @@ -0,0 +1,120 @@ +# Copyright 2017 GoDaddy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from octavia.common import constants +from octavia.network.drivers.neutron import utils +from octavia.tests.common import constants as t_constants +from octavia.tests.unit import base + + +class TestNeutronUtils(base.TestCase): + + def setUp(self): + super().setUp() + + def _compare_ignore_value_none(self, obj1_in, obj2_in): + obj1 = {key: obj1_in[key] for key in obj1_in + if obj1_in[key] is not None} + obj2 = {key: obj2_in[key] for key in obj2_in + if obj2_in[key] is not None} + self.assertEqual(obj1, obj2) + + def _in_ignore_value_none(self, needle, haystack): + newneedle = {key: needle[key] for key in needle + if needle[key] is not None} + newhaystack = [] + for hay in haystack: + newhaystack.append({key: hay[key] for key in hay + if hay[key] is not None}) + self.assertIn(newneedle, newhaystack) + + def test_convert_subnet_to_model(self): + model_obj = utils.convert_subnet_to_model( + t_constants.MOCK_SUBNET) + assert_dict = dict( + id=t_constants.MOCK_SUBNET_ID, + name=t_constants.MOCK_SUBNET_NAME, + network_id=t_constants.MOCK_NETWORK_ID, + project_id=t_constants.MOCK_PROJECT_ID, + gateway_ip=t_constants.MOCK_GATEWAY_IP, + cidr=t_constants.MOCK_CIDR, + ip_version=t_constants.MOCK_IP_VERSION, + host_routes=[], + ) + self._compare_ignore_value_none(model_obj.to_dict(), assert_dict) + + def test_convert_port_to_model(self): + model_obj = utils.convert_port_to_model( + t_constants.MOCK_NEUTRON_PORT) + assert_dict = dict( + id=t_constants.MOCK_PORT_ID, + name=t_constants.MOCK_PORT_NAME, + device_id=t_constants.MOCK_DEVICE_ID, + device_owner=t_constants.MOCK_DEVICE_OWNER, + mac_address=t_constants.MOCK_MAC_ADDR, + network_id=t_constants.MOCK_NETWORK_ID, + status=t_constants.MOCK_STATUS, + project_id=t_constants.MOCK_PROJECT_ID, + admin_state_up=t_constants.MOCK_ADMIN_STATE_UP, + fixed_ips=[], + security_group_ids=[], + vnic_type=constants.VNIC_TYPE_NORMAL, + ) + self._compare_ignore_value_none(model_obj.to_dict(), assert_dict) + fixed_ips = t_constants.MOCK_NEUTRON_PORT['fixed_ips'] + for ip in model_obj.fixed_ips: + self._in_ignore_value_none(ip.to_dict(), fixed_ips) + + def test_convert_network_to_model(self): + model_obj = utils.convert_network_to_model( + t_constants.MOCK_NETWORK) + assert_dict = dict( + id=t_constants.MOCK_NETWORK_ID, + name=t_constants.MOCK_NETWORK_NAME, + subnets=[t_constants.MOCK_SUBNET_ID], + project_id=t_constants.MOCK_PROJECT_ID, + admin_state_up=t_constants.MOCK_ADMIN_STATE_UP, + mtu=t_constants.MOCK_MTU, + provider_network_type=t_constants.MOCK_NETWORK_TYPE, + provider_physical_network=t_constants.MOCK_NETWORK_NAME, + provider_segmentation_id=t_constants.MOCK_SEGMENTATION_ID, + router_external=t_constants.MOCK_ROUTER_EXTERNAL, + port_security_enabled=False, + ) + model_dict = model_obj.to_dict() + model_dict['subnets'] = model_obj.subnets + self._compare_ignore_value_none(assert_dict, model_dict) + + def test_convert_fixed_ip_dict_to_model(self): + model_obj = utils.convert_fixed_ip_dict_to_model( + t_constants.MOCK_FIXED_IP) + assert_dict = dict( + subnet_id=t_constants.MOCK_SUBNET_ID, + ip_address=t_constants.MOCK_IP_ADDRESS + ) + self._compare_ignore_value_none(assert_dict, model_obj.to_dict()) + + def test_convert_network_ip_availability_to_model(self): + model_obj = utils.convert_network_ip_availability_to_model( + t_constants.MOCK_NETWORK_IP_AVAILABILITY) + assert_dict = dict( + network_id=t_constants.MOCK_NETWORK_ID, + project_id=t_constants.MOCK_PROJECT_ID, + tenant_id=t_constants.MOCK_PROJECT_ID, + network_name=t_constants.MOCK_NETWORK_NAME, + total_ips=t_constants.MOCK_NETWORK_TOTAL_IPS, + used_ips=t_constants.MOCK_NETWORK_USED_IPS, + subnet_ip_availability=t_constants.MOCK_SUBNET_IP_AVAILABILITY + ) + self._compare_ignore_value_none(model_obj.to_dict(recurse=True), + assert_dict) diff --git a/octavia/tests/unit/network/drivers/noop_driver/__init__.py b/octavia/tests/unit/network/drivers/noop_driver/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/tests/unit/network/drivers/noop_driver/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/tests/unit/network/drivers/noop_driver/test_driver.py b/octavia/tests/unit/network/drivers/noop_driver/test_driver.py new file mode 100644 index 0000000000..0345ad5424 --- /dev/null +++ b/octavia/tests/unit/network/drivers/noop_driver/test_driver.py @@ -0,0 +1,361 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from oslo_utils import uuidutils + +from octavia.common import constants +from octavia.db import models +from octavia.network import data_models as network_models +from octavia.network.drivers.noop_driver import driver +import octavia.tests.unit.base as base + + +class TestNoopNetworkDriver(base.TestCase): + FAKE_UUID_1 = uuidutils.generate_uuid() + FAKE_UUID_2 = uuidutils.generate_uuid() + FAKE_UUID_3 = uuidutils.generate_uuid() + FAKE_UUID_4 = uuidutils.generate_uuid() + FAKE_UUID_5 = uuidutils.generate_uuid() + FAKE_UUID_6 = uuidutils.generate_uuid() + FAKE_UUID_7 = uuidutils.generate_uuid() + + def setUp(self): + super().setUp() + self.mock_engine = mock.MagicMock() + with mock.patch('octavia.network.drivers.noop_driver.driver.' + 'create_engine') as mock_create_engine: + mock_create_engine.return_value = self.mock_engine + with mock.patch('octavia.network.drivers.noop_driver.' + 'driver.event'): + self.driver = driver.NoopNetworkDriver() + mock_create_engine.assert_called_once_with( + 'sqlite:////tmp/octavia-network-noop.db') + + self.port = mock.MagicMock() + self.port_id = 88 + self.port_name = 'port1' + self.port.id = self.port_id + self.network_id = self.FAKE_UUID_3 + self.network_name = 'net1' + self.device_id = self.FAKE_UUID_4 + self.ip_address = "192.0.2.2" + self.load_balancer = models.LoadBalancer() + self.load_balancer.id = self.FAKE_UUID_2 + + self.vip = models.Vip() + self.vip.ip_address = "192.0.2.1" + self.vip.subnet_id = uuidutils.generate_uuid() + self.vip.port_id = uuidutils.generate_uuid() + self.amphora_id = self.FAKE_UUID_1 + self.compute_id = self.FAKE_UUID_2 + self.compute2_id = self.FAKE_UUID_2 + self.subnet_id = self.FAKE_UUID_3 + self.subnet_name = 'subnet1' + self.qos_policy_id = self.FAKE_UUID_5 + self.vrrp_port_id = self.FAKE_UUID_6 + + self.amphora1 = models.Amphora() + self.amphora1.id = uuidutils.generate_uuid() + self.amphora1.compute_id = self.compute_id + self.amphora1.vrrp_port_id = uuidutils.generate_uuid() + self.amphora1.ha_port_id = uuidutils.generate_uuid() + self.amphora1.vrrp_ip = '192.0.2.10' + self.amphora1.ha_ip = '192.0.2.11' + self.amphora2 = models.Amphora() + self.amphora2.id = uuidutils.generate_uuid() + self.amphora2.compute_id = self.compute2_id + self.amphora2.vrrp_port_id = uuidutils.generate_uuid() + self.amphora2.ha_port_id = uuidutils.generate_uuid() + self.amphora2.vrrp_ip = '192.0.2.20' + self.amphora2.ha_ip = '192.0.2.21' + self.load_balancer.amphorae = [self.amphora1, self.amphora2] + self.load_balancer.vip = self.vip + self.subnet = mock.MagicMock() + self.subnet.id = self.subnet_id + + def test_allocate_vip(self): + self.driver.allocate_vip(self.load_balancer) + self.assertEqual( + (self.load_balancer, 'allocate_vip'), + self.driver.driver.networkconfigconfig[self.load_balancer.id]) + + def test_deallocate_vip(self): + self.driver.deallocate_vip(self.vip) + self.assertEqual((self.vip, + 'deallocate_vip'), + self.driver.driver.networkconfigconfig[ + self.vip.ip_address]) + + def test_update_vip_sg(self): + self.driver.update_vip_sg(self.load_balancer, self.vip) + self.assertEqual((self.load_balancer, self.vip, + 'update_vip_sg'), + self.driver.driver.networkconfigconfig[( + self.load_balancer.id, self.vip.ip_address)]) + + def test_update_aap_port_sg(self): + self.driver.update_aap_port_sg(self.load_balancer, self.amphora1, + self.vip) + self.assertEqual((self.load_balancer, self.vip, self.amphora1, + 'update_aap_port_sg'), + self.driver.driver.networkconfigconfig[( + self.amphora1.id, self.vip.ip_address)]) + + def test_unplug_vip(self): + self.driver.unplug_vip(self.load_balancer, self.vip) + self.assertEqual((self.load_balancer, self.vip, + 'unplug_vip'), + self.driver.driver.networkconfigconfig[( + self.load_balancer.id, self.vip.ip_address)]) + + def test_unplug_network(self): + self.driver.unplug_network(self.compute_id, self.network_id) + self.assertEqual((self.compute_id, self.network_id, 'unplug_network'), + self.driver.driver.networkconfigconfig[( + self.compute_id, self.network_id)]) + + def test_get_plugged_networks(self): + interface_db_mock = mock.MagicMock() + interface_db_mock.port_id = self.port_id + interface_db_mock.network_id = self.network_id + interface_db_mock.compute_id = self.compute_id + interface_db_mock.vnic_type = constants.VNIC_TYPE_NORMAL + + fixed_ips_db_mock = mock.MagicMock() + fixed_ips_db_mock.port_id = self.port_id + fixed_ips_db_mock.subnet_id = self.subnet_id + fixed_ips_db_mock.ip_address = self.ip_address + + # mock out the sqlite db calls + connect_mock = mock.MagicMock() + connection_mock = mock.MagicMock() + self.mock_engine.connect.return_value = connect_mock + connect_mock.__enter__.return_value = connection_mock + + connection_mock.execute.side_effect = [[interface_db_mock], + [fixed_ips_db_mock]] + + result = self.driver.get_plugged_networks(self.compute_id) + + self.assertEqual((self.compute_id, 'get_plugged_networks'), + self.driver.driver.networkconfigconfig[( + self.compute_id)]) + + expected_fixed_ips = [network_models.FixedIP( + subnet_id=self.subnet_id, ip_address=self.ip_address)] + expected_interfaces = [network_models.Interface( + compute_id=self.compute_id, network_id=self.network_id, + port_id=self.port_id, fixed_ips=expected_fixed_ips, + vnic_type=constants.VNIC_TYPE_NORMAL)] + + self.assertEqual(expected_interfaces, result) + + def test_update_vip(self): + self.driver.update_vip(self.load_balancer) + self.assertEqual((self.load_balancer, False, 'update_vip'), + self.driver.driver.networkconfigconfig[( + self.load_balancer.id + )]) + + def test_get_network(self): + network = self.driver.get_network(self.network_id) + self.assertEqual( + (self.network_id, 'get_network'), + self.driver.driver.networkconfigconfig[self.network_id] + ) + self.assertEqual(self.network_id, network.id) + network_again = self.driver.get_network(self.network_id) + self.assertEqual(network, network_again) + + def test_get_subnet(self): + subnet = self.driver.get_subnet(self.subnet_id) + self.assertEqual( + (self.subnet_id, 'get_subnet'), + self.driver.driver.networkconfigconfig[self.subnet_id] + ) + self.assertEqual(self.subnet_id, subnet.id) + subnet_again = self.driver.get_subnet(self.subnet_id) + self.assertEqual(subnet, subnet_again) + + def test_get_port(self): + port = self.driver.get_port(self.port_id) + self.assertEqual( + (self.port_id, 'get_port'), + self.driver.driver.networkconfigconfig[self.port_id] + ) + self.assertEqual(self.port_id, port.id) + port_again = self.driver.get_port(self.port_id) + self.assertEqual(port, port_again) + + def test_get_network_by_name(self): + network = self.driver.get_network_by_name(self.network_name) + self.assertEqual( + (self.network_name, 'get_network_by_name'), + self.driver.driver.networkconfigconfig[self.network_name] + ) + self.assertEqual(self.network_name, network.name) + network_again = self.driver.get_network_by_name(self.network_name) + self.assertEqual(network, network_again) + + def test_get_subnet_by_name(self): + subnet = self.driver.get_subnet_by_name(self.subnet_name) + self.assertEqual( + (self.subnet_name, 'get_subnet_by_name'), + self.driver.driver.networkconfigconfig[self.subnet_name] + ) + self.assertEqual(self.subnet_name, subnet.name) + subnet_again = self.driver.get_subnet_by_name(self.subnet_name) + self.assertEqual(subnet, subnet_again) + + def test_get_port_by_name(self): + port = self.driver.get_port_by_name(self.port_name) + self.assertEqual( + (self.port_name, 'get_port_by_name'), + self.driver.driver.networkconfigconfig[self.port_name] + ) + self.assertEqual(self.port_name, port.name) + port_again = self.driver.get_port_by_name(self.port_name) + self.assertEqual(port, port_again) + + def test_get_port_by_net_id_device_id(self): + port = self.driver.get_port_by_net_id_device_id( + self.network_id, self.device_id) + self.assertEqual( + (self.network_id, self.device_id, + 'get_port_by_net_id_device_id'), + self.driver.driver.networkconfigconfig[(self.network_id, + self.device_id)] + ) + self.assertEqual(self.network_id, port.network_id) + self.assertEqual(self.device_id, port.device_id) + port_again = self.driver.get_port_by_net_id_device_id( + self.network_id, self.device_id) + self.assertEqual(port, port_again) + + def test_get_security_group(self): + FAKE_SG_NAME = 'fake_sg_name' + result = self.driver.get_security_group(FAKE_SG_NAME) + + self.assertEqual((FAKE_SG_NAME, 'get_security_group'), + self.driver.driver.networkconfigconfig[FAKE_SG_NAME]) + self.assertTrue(uuidutils.is_uuid_like(result.id)) + + def test_plug_port(self): + self.driver.plug_port(self.amphora1, self.port) + self.assertEqual( + (self.amphora1, self.port, 'plug_port'), + self.driver.driver.networkconfigconfig[self.amphora1.id, + self.port.id] + ) + + def test_get_network_configs(self): + amp_config = self.driver.get_network_configs(self.load_balancer) + self.assertEqual( + (self.load_balancer, 'get_network_configs'), + self.driver.driver.networkconfigconfig[self.load_balancer.id] + ) + self.assertEqual(2, len(amp_config)) + self.assertEqual(self.amphora1, amp_config[self.amphora1.id].amphora) + self.assertEqual(self.amphora2, amp_config[self.amphora2.id].amphora) + + def test_get_qos_policy(self): + self.driver.get_qos_policy(self.qos_policy_id) + self.assertEqual( + (self.qos_policy_id, 'get_qos_policy'), + self.driver.driver.networkconfigconfig[self.qos_policy_id] + ) + + def test_apply_qos_on_port(self): + self.driver.apply_qos_on_port(self.qos_policy_id, self.vrrp_port_id) + self.assertEqual( + (self.qos_policy_id, self.vrrp_port_id, 'apply_qos_on_port'), + self.driver.driver.networkconfigconfig[self.qos_policy_id, + self.vrrp_port_id] + ) + + def test_plug_aap_port(self): + self.driver.plug_aap_port(self.load_balancer, self.vip, self.amphora1, + self.subnet) + self.assertEqual( + (self.load_balancer, self.vip, self.amphora1, self.subnet, + 'plug_aap_port'), + self.driver.driver.networkconfigconfig[self.amphora1.id, + self.vip.ip_address] + ) + + def test_unplug_aap(self): + self.driver.unplug_aap_port(self.vip, self.amphora1, self.subnet) + self.assertEqual( + (self.vip, self.amphora1, self.subnet, + 'unplug_aap_port'), + self.driver.driver.networkconfigconfig[self.amphora1.id, + self.vip.ip_address] + ) + + def test_delete_port(self): + PORT_ID = uuidutils.generate_uuid() + + self.driver.delete_port(PORT_ID) + + self.assertEqual((PORT_ID, 'delete_port'), + self.driver.driver.networkconfigconfig[PORT_ID]) + + def test_set_port_admin_state_up(self): + PORT_ID = uuidutils.generate_uuid() + + self.driver.set_port_admin_state_up(PORT_ID, False) + + self.assertEqual( + (PORT_ID, False, 'admin_down_port'), + self.driver.driver.networkconfigconfig[(PORT_ID, False)]) + + def test_create_port(self): + FAKE_NAME = 'fake_name' + IP_ADDRESS = '2001:db8::77' + NETWORK_ID = uuidutils.generate_uuid() + QOS_POLICY_ID = uuidutils.generate_uuid() + SUBNET_ID = uuidutils.generate_uuid() + FIXED_IPS = [{'ip_address': IP_ADDRESS, 'subnet_id': SUBNET_ID}, + {'subnet_id': SUBNET_ID}] + + # Test minimum + result = self.driver.create_port(NETWORK_ID) + + self.assertIsInstance(result, network_models.Port) + self.assertEqual(NETWORK_ID, result.network_id) + + # Test full parameters + result = self.driver.create_port( + NETWORK_ID, name=FAKE_NAME, fixed_ips=FIXED_IPS, + admin_state_up=False, qos_policy_id=QOS_POLICY_ID) + + self.assertIsInstance(result, network_models.Port) + self.assertEqual(NETWORK_ID, result.network_id) + self.assertEqual(FAKE_NAME, result.name) + self.assertEqual(IP_ADDRESS, result.fixed_ips[0].ip_address) + self.assertEqual(SUBNET_ID, result.fixed_ips[0].subnet_id) + self.assertEqual('198.51.100.56', result.fixed_ips[1].ip_address) + self.assertEqual(SUBNET_ID, result.fixed_ips[1].subnet_id) + self.assertEqual(QOS_POLICY_ID, result.qos_policy_id) + self.assertFalse(result.admin_state_up) + + def test_plug_fixed_ip(self): + self.driver.plug_fixed_ip(self.port_id, self.subnet_id, + self.ip_address) + self.assertEqual( + (self.port_id, self.subnet_id, self.ip_address, 'plug_fixed_ip'), + self.driver.driver.networkconfigconfig[ + self.port_id, self.subnet_id] + ) diff --git a/octavia/tests/unit/statistics/__init__.py b/octavia/tests/unit/statistics/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/octavia/tests/unit/statistics/drivers/__init__.py b/octavia/tests/unit/statistics/drivers/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/octavia/tests/unit/statistics/drivers/test_logger.py b/octavia/tests/unit/statistics/drivers/test_logger.py new file mode 100644 index 0000000000..49b1faa5a1 --- /dev/null +++ b/octavia/tests/unit/statistics/drivers/test_logger.py @@ -0,0 +1,34 @@ +# Copyright 2018 GoDaddy +# Copyright (c) 2015 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from oslo_utils import uuidutils + +from octavia.common import data_models +from octavia.statistics.drivers import logger +from octavia.tests.unit import base + + +class TestStatsUpdateLogger(base.TestCase): + def setUp(self): + super().setUp() + self.logger = logger.StatsLogger() + self.amphora_id = uuidutils.generate_uuid() + + @mock.patch('octavia.statistics.drivers.logger.LOG') + def test_update_stats(self, mock_log): + self.logger.update_stats([data_models.ListenerStatistics()]) + self.assertEqual(1, mock_log.info.call_count) diff --git a/octavia/tests/unit/statistics/drivers/test_update_db.py b/octavia/tests/unit/statistics/drivers/test_update_db.py new file mode 100644 index 0000000000..dffd764910 --- /dev/null +++ b/octavia/tests/unit/statistics/drivers/test_update_db.py @@ -0,0 +1,80 @@ +# Copyright 2018 GoDaddy +# Copyright (c) 2015 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import random +from unittest import mock + +from oslo_utils import uuidutils + +from octavia.common import data_models +from octavia.statistics.drivers import update_db +from octavia.tests.unit import base + + +class TestStatsUpdateDb(base.TestCase): + def setUp(self): + super().setUp() + self.amphora_id = uuidutils.generate_uuid() + self.listener_id = uuidutils.generate_uuid() + + @mock.patch('octavia.db.repositories.ListenerStatisticsRepository') + @mock.patch('octavia.db.api.session') + def test_update_stats(self, mock_get_session, mock_listener_stats_repo): + bytes_in1 = random.randrange(1000000000) + bytes_out1 = random.randrange(1000000000) + active_conns1 = random.randrange(1000000000) + total_conns1 = random.randrange(1000000000) + request_errors1 = random.randrange(1000000000) + stats_1 = data_models.ListenerStatistics( + listener_id=self.listener_id, + amphora_id=self.amphora_id, + bytes_in=bytes_in1, + bytes_out=bytes_out1, + active_connections=active_conns1, + total_connections=total_conns1, + request_errors=request_errors1 + ) + bytes_in2 = random.randrange(1000000000) + bytes_out2 = random.randrange(1000000000) + active_conns2 = random.randrange(1000000000) + total_conns2 = random.randrange(1000000000) + request_errors2 = random.randrange(1000000000) + stats_2 = data_models.ListenerStatistics( + listener_id=self.listener_id, + amphora_id=self.amphora_id, + bytes_in=bytes_in2, + bytes_out=bytes_out2, + active_connections=active_conns2, + total_connections=total_conns2, + request_errors=request_errors2 + ) + + mock_session = mock_get_session().begin().__enter__() + + update_db.StatsUpdateDb().update_stats( + [stats_1, stats_2], deltas=False) + + mock_listener_stats_repo().replace.assert_has_calls([ + mock.call(mock_session, stats_1), + mock.call(mock_session, stats_2) + ]) + + update_db.StatsUpdateDb().update_stats( + [stats_1, stats_2], deltas=True) + + mock_listener_stats_repo().increment.assert_has_calls([ + mock.call(mock_session, stats_1), + mock.call(mock_session, stats_2) + ]) diff --git a/octavia/tests/unit/statistics/test_stats_base.py b/octavia/tests/unit/statistics/test_stats_base.py new file mode 100644 index 0000000000..89a4f824a6 --- /dev/null +++ b/octavia/tests/unit/statistics/test_stats_base.py @@ -0,0 +1,97 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# Copyright (c) 2015 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import random +from unittest import mock + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils + +from octavia.common import data_models +from octavia.statistics import stats_base +from octavia.tests.unit import base + +STATS_DRIVERS = ['stats_db', 'stats_logger'] + + +class TestStatsBase(base.TestCase): + + def setUp(self): + super().setUp() + + self.conf = oslo_fixture.Config(cfg.CONF) + self.conf.config(group="controller_worker", + statistics_drivers=STATS_DRIVERS) + self.amphora_id = uuidutils.generate_uuid() + self.listener_id = uuidutils.generate_uuid() + self.listener_stats = data_models.ListenerStatistics( + amphora_id=self.amphora_id, + listener_id=self.listener_id, + bytes_in=random.randrange(1000000000), + bytes_out=random.randrange(1000000000), + active_connections=random.randrange(1000000000), + total_connections=random.randrange(1000000000), + request_errors=random.randrange(1000000000)) + self.listener_stats_dict = { + self.listener_id: { + "request_errors": self.listener_stats.request_errors, + "active_connections": + self.listener_stats.active_connections, + "total_connections": self.listener_stats.total_connections, + "bytes_in": self.listener_stats.bytes_in, + "bytes_out": self.listener_stats.bytes_out, + } + } + + @mock.patch('octavia.statistics.drivers.update_db.StatsUpdateDb') + @mock.patch('octavia.statistics.drivers.logger.StatsLogger') + def test_update_stats(self, mock_stats_logger, mock_stats_db): + stats_base._STATS_HANDLERS = None + + # Test with update success + stats_base.update_stats_via_driver([self.listener_stats], deltas=True) + + mock_stats_db().update_stats.assert_called_once_with( + [self.listener_stats], deltas=True) + mock_stats_logger().update_stats.assert_called_once_with( + [self.listener_stats], deltas=True) + + # Test with update failure (should still run both drivers) + mock_stats_db.reset_mock() + mock_stats_logger.reset_mock() + mock_stats_db().update_stats.side_effect = Exception + mock_stats_logger().update_stats.side_effect = Exception + stats_base.update_stats_via_driver( + [self.listener_stats]) + + mock_stats_db().update_stats.assert_called_once_with( + [self.listener_stats], deltas=False) + mock_stats_logger().update_stats.assert_called_once_with( + [self.listener_stats], deltas=False) + + @mock.patch('octavia.statistics.drivers.update_db.StatsUpdateDb') + @mock.patch('octavia.statistics.drivers.logger.StatsLogger') + def test__get_stats_handlers(self, mock_stats_logger, mock_stats_db): + stats_base._STATS_HANDLERS = None + + # Test that this function implements a singleton + first_call_handlers = stats_base._get_stats_handlers() + second_call_handlers = stats_base._get_stats_handlers() + + self.assertEqual(first_call_handlers, second_call_handlers) + + # Drivers should only load once (this is a singleton) + mock_stats_db.assert_called_once_with() + mock_stats_logger.assert_called_once_with() diff --git a/octavia/tests/unit/test_opts.py b/octavia/tests/unit/test_opts.py new file mode 100644 index 0000000000..58cc964d4d --- /dev/null +++ b/octavia/tests/unit/test_opts.py @@ -0,0 +1,26 @@ +# Copyright 2018 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from octavia import opts +import octavia.tests.unit.base as base + + +class TestOpts(base.TestCase): + + def setUp(self): + super().setUp() + + def test_list_opts(self): + opts_list = opts.list_opts()[0] + self.assertIn('DEFAULT', opts_list) diff --git a/octavia/tests/unit/test_version.py b/octavia/tests/unit/test_version.py new file mode 100644 index 0000000000..9866d16329 --- /dev/null +++ b/octavia/tests/unit/test_version.py @@ -0,0 +1,34 @@ +# Copyright 2018 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +import octavia.tests.unit.base as base +from octavia import version + + +class TestVersion(base.TestCase): + + def setUp(self): + super().setUp() + + def test_vendor_str(self): + self.assertEqual("OpenStack Foundation", version.vendor_string()) + + def test_product_string(self): + self.assertEqual("OpenStack Octavia", version.product_string()) + + @mock.patch('pbr.version.VersionInfo.version_string', return_value='0.0.0') + def test_version_str(self, mock_pbr): + self.assertEqual('0.0.0', version.version_string_with_package()) diff --git a/octavia/tests/unit/volume/__init__.py b/octavia/tests/unit/volume/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/octavia/tests/unit/volume/drivers/__init__.py b/octavia/tests/unit/volume/drivers/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/octavia/tests/unit/volume/drivers/noop_driver/__init__.py b/octavia/tests/unit/volume/drivers/noop_driver/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/octavia/tests/unit/volume/drivers/noop_driver/test_driver.py b/octavia/tests/unit/volume/drivers/noop_driver/test_driver.py new file mode 100644 index 0000000000..de6858cef7 --- /dev/null +++ b/octavia/tests/unit/volume/drivers/noop_driver/test_driver.py @@ -0,0 +1,55 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_utils import uuidutils + +from octavia.common import constants +import octavia.tests.unit.base as base +from octavia.volume.drivers.noop_driver import driver + +CONF = cfg.CONF + + +class TestNoopVolumeDriver(base.TestCase): + FAKE_UUID_1 = uuidutils.generate_uuid() + FAKE_UUID_2 = uuidutils.generate_uuid() + + def setUp(self): + super().setUp() + self.driver = driver.NoopVolumeDriver() + + self.image_id = self.FAKE_UUID_1 + self.volume_id = self.FAKE_UUID_2 + + def test_create_volume_from_image(self): + self.driver.create_volume_from_image(self.image_id) + self.assertEqual((self.image_id, None, 'create_volume_from_image'), + self.driver.driver.volumeconfig[( + self.image_id + )]) + + def test_create_volume_from_image_with_availability_zone(self): + az_name = "some_az" + az_data = {constants.VOLUME_ZONE: az_name} + self.driver.create_volume_from_image(self.image_id, az_data) + self.assertEqual((self.image_id, az_data, 'create_volume_from_image'), + self.driver.driver.volumeconfig[( + self.image_id + )]) + + def test_get_image_from_volume(self): + self.driver.get_image_from_volume(self.volume_id) + self.assertEqual((self.volume_id, 'get_image_from_volume'), + self.driver.driver.volumeconfig[( + self.volume_id + )]) diff --git a/octavia/tests/unit/volume/drivers/test_cinder_driver.py b/octavia/tests/unit/volume/drivers/test_cinder_driver.py new file mode 100644 index 0000000000..d32d1eb025 --- /dev/null +++ b/octavia/tests/unit/volume/drivers/test_cinder_driver.py @@ -0,0 +1,146 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from cinderclient import exceptions as cinder_exceptions +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils + +from octavia.common import constants +from octavia.common import exceptions +import octavia.tests.unit.base as base +import octavia.volume.drivers.cinder_driver as cinder_common + + +CONF = cfg.CONF + + +class TestCinderClient(base.TestCase): + + def setUp(self): + fake_uuid1 = uuidutils.generate_uuid() + fake_uuid2 = uuidutils.generate_uuid() + fake_uuid3 = uuidutils.generate_uuid() + + conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + self.conf = conf + + self.manager = cinder_common.VolumeManager() + self.manager.manager = mock.MagicMock() + + self.cinder_response = mock.Mock() + self.cinder_response.id = fake_uuid1 + + self.manager.manager.get.return_value.status = 'available' + self.manager.manager.create.return_value = self.cinder_response + self.manager.availability_zone_manager = mock.MagicMock() + self.image_id = fake_uuid2 + self.volume_id = fake_uuid3 + + super().setUp() + + def test_create_volume_from_image(self): + self.conf.config(group="controller_worker", + volume_driver='volume_cinder_driver') + self.conf.config(group="cinder", volume_create_retry_interval=0) + self.manager.create_volume_from_image(self.image_id) + self.manager.manager.create.assert_called_with( + size=16, + volume_type=None, + availability_zone=None, + imageRef=self.image_id) + + def test_create_volume_from_image_with_availability_zone(self): + self.conf.config(group="controller_worker", + volume_driver='volume_cinder_driver') + self.conf.config(group="cinder", volume_create_retry_interval=0) + self.conf.config(group="cinder", availability_zone="no_zone") + + az_name = "some_zone" + az_data = {constants.VOLUME_ZONE: az_name} + + self.manager.create_volume_from_image(self.image_id, az_data) + self.manager.manager.create.assert_called_with( + size=16, + volume_type=None, + availability_zone=az_name, + imageRef=self.image_id) + + def test_create_volume_from_image_with_availability_zone_from_conf(self): + az_name = "some_az" + self.conf.config(group="controller_worker", + volume_driver='volume_cinder_driver') + self.conf.config(group="cinder", volume_create_retry_interval=0) + self.conf.config(group="cinder", availability_zone=az_name) + + self.manager.create_volume_from_image(self.image_id) + self.manager.manager.create.assert_called_with( + size=16, + volume_type=None, + availability_zone=az_name, + imageRef=self.image_id) + + def test_create_volume_from_image_error(self): + self.conf.config(group="controller_worker", + volume_driver='volume_cinder_driver') + self.conf.config(group="cinder", volume_create_retry_interval=0) + self.manager.manager.get.return_value.status = 'error' + self.assertRaises(cinder_exceptions.ResourceInErrorState, + self.manager.create_volume_from_image, + self.image_id) + + def test_build_cinder_volume_timeout(self): + self.conf.config(group="controller_worker", + volume_driver='volume_cinder_driver') + self.conf.config(group="cinder", volume_create_timeout=0) + self.conf.config(group="cinder", volume_create_retry_interval=0) + self.manager.manager.get.return_value.status = 'build' + self.manager.create_volume_from_image.retry.sleep = mock.Mock() + self.assertRaises(cinder_exceptions.TimeoutException, + self.manager.create_volume_from_image, + self.image_id) + + def test_get_image_from_volume(self): + self.conf.config(group="controller_worker", + volume_driver='volume_cinder_driver') + self.conf.config(group="cinder", + volume_create_retry_interval=0) + self.manager.get_image_from_volume(self.volume_id) + self.manager.manager.get.assert_called_with( + self.volume_id) + + def test_get_image_from_volume_error(self): + self.conf.config(group="controller_worker", + volume_driver='volume_cinder_driver') + self.conf.config(group="cinder", + volume_create_retry_interval=0) + self.manager.manager.get.side_effect = [ + exceptions.VolumeGetException('test_exception')] + self.assertRaises(exceptions.VolumeGetException, + self.manager.get_image_from_volume, + self.volume_id) + + def test_validate_availability_zone(self): + az_name = "some_az" + mock_az = mock.Mock() + mock_az.zoneName = az_name + self.manager.availability_zone_manager.list.return_value = [mock_az] + self.manager.validate_availability_zone(az_name) + self.manager.availability_zone_manager.list.assert_called_with( + detailed=False) + + def test_validate_availability_zone_with_exception(self): + self.manager.availability_zone_manager.list.return_value = [] + self.assertRaises(exceptions.InvalidSubresource, + self.manager.validate_availability_zone, + "bogus") diff --git a/octavia/version.py b/octavia/version.py new file mode 100644 index 0000000000..03c98a68e7 --- /dev/null +++ b/octavia/version.py @@ -0,0 +1,32 @@ +# Copyright 2011-2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import pbr.version + +OCTAVIA_VENDOR = "OpenStack Foundation" +OCTAVIA_PRODUCT = "OpenStack Octavia" + +version_info = pbr.version.VersionInfo('octavia') + + +def vendor_string(): + return OCTAVIA_VENDOR + + +def product_string(): + return OCTAVIA_PRODUCT + + +def version_string_with_package(): + return version_info.version_string() diff --git a/octavia/volume/__init__.py b/octavia/volume/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/octavia/volume/drivers/__init__.py b/octavia/volume/drivers/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/octavia/volume/drivers/cinder_driver.py b/octavia/volume/drivers/cinder_driver.py new file mode 100644 index 0000000000..ec681746a7 --- /dev/null +++ b/octavia/volume/drivers/cinder_driver.py @@ -0,0 +1,153 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import time + +from cinderclient import exceptions as cinder_exceptions +from oslo_config import cfg +from oslo_log import log as logging +from tenacity import retry +from tenacity import stop_after_attempt + +from octavia.common import clients +from octavia.common import constants +from octavia.common import exceptions +from octavia.volume import volume_base + +LOG = logging.getLogger(__name__) + +CONF = cfg.CONF + + +class VolumeManager(volume_base.VolumeBase): + '''Volume implementation of virtual machines via cinder.''' + + def __init__(self): + super().__init__() + # Must initialize cinder api + self._cinder_client = clients.CinderAuth.get_cinder_client( + service_name=CONF.cinder.service_name, + endpoint=CONF.cinder.endpoint, + region=CONF.cinder.region_name, + endpoint_type=CONF.cinder.endpoint_type, + insecure=CONF.cinder.insecure, + cacert=CONF.cinder.ca_certificates_file + ) + self.manager = self._cinder_client.volumes + self.availability_zone_manager = self._cinder_client.availability_zones + + @retry(reraise=True, + stop=stop_after_attempt(CONF.cinder.volume_create_max_retries)) + def create_volume_from_image(self, image_id, availability_zone=None): + """Create cinder volume + + :param image_id: ID of amphora image + :param availability_zone: Availability zone data dict + + :return volume id + """ + + if availability_zone: + az_name = availability_zone.get( + constants.VOLUME_ZONE, CONF.cinder.availability_zone) + else: + az_name = CONF.cinder.availability_zone + + volume = self.manager.create( + size=CONF.cinder.volume_size, + volume_type=CONF.cinder.volume_type, + availability_zone=az_name, + imageRef=image_id) + resource_status = self.manager.get(volume.id).status + + status = constants.CINDER_STATUS_AVAILABLE + start = int(time.time()) + + while resource_status != status: + time.sleep(CONF.cinder.volume_create_retry_interval) + instance_volume = self.manager.get(volume.id) + resource_status = instance_volume.status + if resource_status == constants.CINDER_STATUS_ERROR: + LOG.error('Error creating %s', instance_volume.id) + instance_volume.delete() + raise cinder_exceptions.ResourceInErrorState( + obj=volume, fault_msg='Cannot create volume') + if int(time.time()) - start >= CONF.cinder.volume_create_timeout: + LOG.error('Timed out waiting to create cinder volume %s', + instance_volume.id) + instance_volume.delete() + raise cinder_exceptions.TimeoutException( + obj=volume, action=constants.CINDER_ACTION_CREATE_VOLUME) + return volume.id + + def delete_volume(self, volume_id): + """Get glance image from volume + + :param volume_id: ID of amphora boot volume + + :return image id + """ + LOG.debug('Deleting cinder volume %s', volume_id) + try: + instance_volume = self.manager.get(volume_id) + try: + instance_volume.delete() + LOG.debug("Deleted volume %s", volume_id) + except Exception as e: + LOG.exception("Error deleting cinder volume %s", + volume_id) + raise exceptions.VolumeDeleteException() from e + except cinder_exceptions.NotFound: + LOG.warning("Volume %s not found: assuming already deleted", + volume_id) + + def get_image_from_volume(self, volume_id): + """Get glance image from volume + + :param volume_id: ID of amphora boot volume + + :return image id + """ + image_id = None + LOG.debug('Get glance image for volume %s', volume_id) + try: + instance_volume = self.manager.get(volume_id) + except cinder_exceptions.NotFound as e: + LOG.exception("Volume %s not found", volume_id) + raise exceptions.VolumeGetException() from e + if hasattr(instance_volume, 'volume_image_metadata'): + image_id = instance_volume.volume_image_metadata.get("image_id") + else: + LOG.error("Volume %s has no image metadata", volume_id) + image_id = None + return image_id + + def validate_availability_zone(self, availability_zone): + """Validates that an availability zone exists in cinder. + + :param availability_zone: Name of the availability zone to lookup. + :raises: NotFound + :returns: None + """ + try: + volume_zones = [ + a.zoneName for a in self.availability_zone_manager.list( + detailed=False)] + if availability_zone not in volume_zones: + LOG.info('Availability zone %s was not found in cinder. %s', + availability_zone, volume_zones) + raise exceptions.InvalidSubresource( + resource='Cinder availability zone', id=availability_zone) + except Exception as e: + LOG.exception('Cinder reports a failure getting listing ' + 'availability zones: %s', str(e)) + raise diff --git a/octavia/volume/drivers/noop_driver/__init__.py b/octavia/volume/drivers/noop_driver/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/octavia/volume/drivers/noop_driver/driver.py b/octavia/volume/drivers/noop_driver/driver.py new file mode 100644 index 0000000000..32502d835e --- /dev/null +++ b/octavia/volume/drivers/noop_driver/driver.py @@ -0,0 +1,72 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging +from oslo_utils import uuidutils + +from octavia.volume import volume_base as driver_base + +LOG = logging.getLogger(__name__) + + +class NoopManager: + def __init__(self): + super().__init__() + self.volumeconfig = {} + + def create_volume_from_image(self, image_id, availability_zone=None): + LOG.debug("Volume %s no-op, image id %s, availability zone %s", + self.__class__.__name__, image_id, availability_zone) + self.volumeconfig[image_id] = (image_id, + availability_zone, + 'create_volume_from_image') + volume_id = uuidutils.generate_uuid() + return volume_id + + def delete_volume(self, volume_id): + LOG.debug("Volume %s no-op, volume id %s", + self.__class__.__name__, volume_id) + self.volumeconfig[volume_id] = (volume_id, 'delete') + + def get_image_from_volume(self, volume_id): + LOG.debug("Volume %s no-op, volume id %s", + self.__class__.__name__, volume_id) + self.volumeconfig[volume_id] = (volume_id, 'get_image_from_volume') + image_id = uuidutils.generate_uuid() + return image_id + + def validate_availability_zone(self, availability_zone): + LOG.debug("Volume %s no-op, validate_availability_zone name %s", + self.__class__.__name__, availability_zone) + self.volumeconfig[availability_zone] = ( + availability_zone, 'validate_availability_zone') + + +class NoopVolumeDriver(driver_base.VolumeBase): + def __init__(self): + super().__init__() + self.driver = NoopManager() + + def create_volume_from_image(self, image_id, availability_zone=None): + volume_id = self.driver.create_volume_from_image( + image_id, availability_zone) + return volume_id + + def delete_volume(self, volume_id): + self.driver.delete_volume(volume_id) + + def get_image_from_volume(self, volume_id): + image_id = self.driver.get_image_from_volume(volume_id) + return image_id + + def validate_availability_zone(self, availability_zone): + self.driver.validate_availability_zone(availability_zone) diff --git a/octavia/volume/volume_base.py b/octavia/volume/volume_base.py new file mode 100644 index 0000000000..f164c56671 --- /dev/null +++ b/octavia/volume/volume_base.py @@ -0,0 +1,54 @@ +# Copyright 2011-2019 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + + +class VolumeBase(metaclass=abc.ABCMeta): + + @abc.abstractmethod + def create_volume_from_image(self, image_id, availability_zone=None): + """Create volume for instance + + :param image_id: ID of amphora image + :param availability_zone: Availability zone data dict + + :return volume id + """ + + @abc.abstractmethod + def delete_volume(self, volume_id): + """Delete volume + + :param volume_id: ID of amphora volume + """ + + @abc.abstractmethod + def get_image_from_volume(self, volume_id): + """Get cinder volume + + :param volume_id: ID of amphora volume + + :return image id + """ + + @abc.abstractmethod + def validate_availability_zone(self, availability_zone): + """Validates that a volume availability zone exists. + + :param availability_zone: Name of the availability zone to lookup. + :returns: None + :raises: NotFound + :raises: NotImplementedError + """ diff --git a/octavia/wsgi/__init__.py b/octavia/wsgi/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/octavia/wsgi/api.py b/octavia/wsgi/api.py new file mode 100644 index 0000000000..3434394598 --- /dev/null +++ b/octavia/wsgi/api.py @@ -0,0 +1,34 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""WSGI application entry-point for the Octavia API.""" + +from pathlib import Path +from sys import argv +import threading + +from octavia.api import app + +application = None +args = None + +# Our wsgi app will pull in the sys.argv if we don't pass an argv parameter +# which means octavia will try to use the sphinx-build parameters for oslo +# config. Work around this while maintaining compatiblity by passing the 'h' +# help parameter if we are "running" under sphinx-build. +if Path(argv[0]).name == "sphinx-build": + args = ['h'] + +lock = threading.Lock() +with lock: + if application is None: + application = app.setup_app(argv=args) diff --git a/playbooks/enable-fips.yaml b/playbooks/enable-fips.yaml new file mode 100644 index 0000000000..bc1dc04ea8 --- /dev/null +++ b/playbooks/enable-fips.yaml @@ -0,0 +1,3 @@ +- hosts: all + roles: + - enable-fips diff --git a/playbooks/image-build/post.yaml b/playbooks/image-build/post.yaml new file mode 100644 index 0000000000..ae841eb6a0 --- /dev/null +++ b/playbooks/image-build/post.yaml @@ -0,0 +1,10 @@ +- hosts: all + name: Copy image + tasks: + - synchronize: + dest: "{{ zuul.executor.work_root }}/artifacts/" + mode: pull + src: "{{ ansible_user_dir }}/test-images" + verify_host: true + rsync_opts: + - "--exclude=/*/*/" \ No newline at end of file diff --git a/playbooks/image-build/run.yaml b/playbooks/image-build/run.yaml new file mode 100644 index 0000000000..5e02135827 --- /dev/null +++ b/playbooks/image-build/run.yaml @@ -0,0 +1,45 @@ +- hosts: all + tasks: + # Workaround for Red Hat like distros: SELinux prevents diskimage-builder + # from creating a RPM DB dir in a chroot on Centos 9 Stream + - name: Disable SELinux on Red Hat distributions + selinux: + state: disabled + become: yes + when: + - ansible_os_family == 'RedHat' + - name: Install required pip packages + pip: + name: "{{ item }}" + state: "latest" + virtualenv: /var/tmp/venv + virtualenv_command: python3 -m venv + register: install_packages + until: install_packages is success + retries: 5 + delay: 2 + become: yes + with_items: + - bindep + - diskimage-builder + - setuptools + - name: Install binary dependencies from diskimage-builder + include_role: + name: bindep + vars: + bindep_dir: "{{ ansible_user_dir }}/{{ zuul.projects['opendev.org/openstack/diskimage-builder'].src_dir }}" + - name: Ensure artifacts/images directory exists + file: + path: '{{ ansible_user_dir }}/test-images' + state: directory + - name: Build an amphora image for publishing + shell: >- + . /var/tmp/venv/bin/activate && \ + ./diskimage-create.sh -o {{ ansible_user_dir }}/test-images/test-only-amphora-x64-haproxy-{{ amphora_os }}-{{ amphora_os_release }}.qcow2 \ + -i {{ amphora_os }} \ + -d {{ amphora_os_release }} \ + -s {{ amphora_image_size | default(2) }} + args: + chdir: "{{ ansible_user_dir }}/src/opendev.org/openstack/octavia/diskimage-create" + tags: + - skip_ansible_lint diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000..2a38d6bc75 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["pbr>=6.1.1"] +build-backend = "pbr.build" diff --git a/releasenotes/notes/.placeholder b/releasenotes/notes/.placeholder new file mode 100644 index 0000000000..e69de29bb2 diff --git a/releasenotes/notes/3rd-party-neutron-backends-deployment-82691be2bc72a786.yaml b/releasenotes/notes/3rd-party-neutron-backends-deployment-82691be2bc72a786.yaml new file mode 100644 index 0000000000..b2f0f2d855 --- /dev/null +++ b/releasenotes/notes/3rd-party-neutron-backends-deployment-82691be2bc72a786.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + Added hook to plugin.sh: `octavia_create_network_interface_device` and + `octavia_delete_network_interface_device`. For each of these functions, + if they are defined during stack (respectively unstack), they are called + to create (respectively delete) the management network interface. diff --git a/releasenotes/notes/Add-PROMETHEUS-listeners-1b3924680b409a1a.yaml b/releasenotes/notes/Add-PROMETHEUS-listeners-1b3924680b409a1a.yaml new file mode 100644 index 0000000000..f36df8717a --- /dev/null +++ b/releasenotes/notes/Add-PROMETHEUS-listeners-1b3924680b409a1a.yaml @@ -0,0 +1,11 @@ +--- +features: + - | + Added a new PROMETHEUS listener that exposes a prometheus exporter + endpoint. +upgrade: + - | + PROMETHEUS listeners require an amphora image with HAProxy 2.0 or newer. +issues: + - | + PROMETHEUS listeners will not report information for UDP or SCTP listeners. diff --git a/releasenotes/notes/Add-SR-IOV-support-for-member-ports-b78d55469303f258.yaml b/releasenotes/notes/Add-SR-IOV-support-for-member-ports-b78d55469303f258.yaml new file mode 100644 index 0000000000..665daca332 --- /dev/null +++ b/releasenotes/notes/Add-SR-IOV-support-for-member-ports-b78d55469303f258.yaml @@ -0,0 +1,9 @@ +--- +features: + - | + Octavia Amphora based load balancers now support using SR-IOV virtual + functions (VF) on the member ports. +upgrade: + - | + You must update the amphora image to support the SR-IOV member port + feature. diff --git a/releasenotes/notes/Add-Stein-Prelude-7d8290b803db8c56.yaml b/releasenotes/notes/Add-Stein-Prelude-7d8290b803db8c56.yaml new file mode 100644 index 0000000000..d335c4d3bf --- /dev/null +++ b/releasenotes/notes/Add-Stein-Prelude-7d8290b803db8c56.yaml @@ -0,0 +1,23 @@ +--- +prelude: | + For the OpenStack Stein release, the Octavia team is excited to announce + support for: Octavia flavors, TLS client authentication, backend + re-encryption, and object tags. + + * Octavia flavors allow an operator to define "flavors" of load balancers, + such as "active-standby" or "single" using the amphora driver, that + configure the load balancer topology. The Amphora driver also supports + specifying the nova compute flavor to use for the load balancer amphora. + * TLS client authentication allows the listener to request a client + certificate from users connecting to the load balancer. This certificate + can then be checked against a CA certificate and optionally a certificate + revocation list. New HTTP header insertions allow passing client + certificate information to the backend members, while new L7 rules + allow you to take custom actions based on the content of the client + certificate. + * Backend re-encryption allows users to configure pools to initiate TLS + connections to the backend member servers. This enables load balancers + to authenticate and encrypt connections from the load balancer to the + backend member server. + * Object tags allow users to assign a list of strings to the load balancer + objects that can then be used for advanced API list filtering. diff --git a/releasenotes/notes/Add-TLS-client-auth-CA-certificate-6863f64a2fe70a6f.yaml b/releasenotes/notes/Add-TLS-client-auth-CA-certificate-6863f64a2fe70a6f.yaml new file mode 100644 index 0000000000..b1b76e1a54 --- /dev/null +++ b/releasenotes/notes/Add-TLS-client-auth-CA-certificate-6863f64a2fe70a6f.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + You can now specify a certificate authority certificate reference, on + listeners, for use with TLS client authentication. diff --git a/releasenotes/notes/Add-TLS-client-auth-CRL-d0722fd175bc2f51.yaml b/releasenotes/notes/Add-TLS-client-auth-CRL-d0722fd175bc2f51.yaml new file mode 100644 index 0000000000..1f8591cb29 --- /dev/null +++ b/releasenotes/notes/Add-TLS-client-auth-CRL-d0722fd175bc2f51.yaml @@ -0,0 +1,10 @@ +--- +features: + - | + You can now provide a certificate revocation list reference for listeners + using TLS client authentication. +security: + - | + Note that the amphora provider currently only supports the crl-file + provided to check for revocation. Remote revocation lists and/or OCSP + will not be used by the amphora provider. diff --git a/releasenotes/notes/Add-TLS-client-auth-header-insertion-039debc7e6f06474.yaml b/releasenotes/notes/Add-TLS-client-auth-header-insertion-039debc7e6f06474.yaml new file mode 100644 index 0000000000..8e684e0383 --- /dev/null +++ b/releasenotes/notes/Add-TLS-client-auth-header-insertion-039debc7e6f06474.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + When using TLS client authentication on TERMINATED_HTTPS listeners, you can now insert the + following headers for backend members\: 'X-SSL-Client-Verify', 'X-SSL-Client-Has-Cert', + 'X-SSL-Client-DN', 'X-SSL-Client-CN', 'X-SSL-Issuer', 'X-SSL-Client-SHA1', + 'X-SSL-Client-Not-Before', 'X-SSL-Client-Not-After'. diff --git a/releasenotes/notes/Add-TLS-client-auth-option-15d868d1009fc130.yaml b/releasenotes/notes/Add-TLS-client-auth-option-15d868d1009fc130.yaml new file mode 100644 index 0000000000..babbf36955 --- /dev/null +++ b/releasenotes/notes/Add-TLS-client-auth-option-15d868d1009fc130.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + You can now enable TLS client authentication on listeners. diff --git a/releasenotes/notes/Add-UDP-protocol-support-9c011a23525092a1.yaml b/releasenotes/notes/Add-UDP-protocol-support-9c011a23525092a1.yaml new file mode 100644 index 0000000000..047c0a562f --- /dev/null +++ b/releasenotes/notes/Add-UDP-protocol-support-9c011a23525092a1.yaml @@ -0,0 +1,19 @@ +--- +features: + - Added UDP protocol support to listeners and pools. + - Adds a health monitor type of UDP-CONNECT that does a basic UDP port + connect. +issues: + - You cannot mix IPv4 UDP listeners with IPv6 members at this time. This is + being tracked with this story + https://storyboard.openstack.org/#!/story/2003329 +upgrade: + - | + UDP protocol support requires an update to the amphora image to support + UDP protocol statistics reporting and UDP-CONNECT health monitoring. +other: + - | + Health monitors of type UDP-CONNECT may not work correctly if ICMP + unreachable is not enabled on the member server or is blocked by a security + rule. A member server may be marked as operating status ONLINE when it is + actually down. diff --git a/releasenotes/notes/Add-amphora-agent-config-update-API-298b31e6c0cd715c.yaml b/releasenotes/notes/Add-amphora-agent-config-update-API-298b31e6c0cd715c.yaml new file mode 100644 index 0000000000..b3959b183b --- /dev/null +++ b/releasenotes/notes/Add-amphora-agent-config-update-API-298b31e6c0cd715c.yaml @@ -0,0 +1,12 @@ +--- +features: + - | + Octavia now has an administrative API that updates the amphora agent + configuration on running amphora. +upgrade: + - | + When the amphora agent configuration update API is called on an amphora + running a version of the amphora agent that does not support configuration + updates, an ERROR log message will be posted to the controller log file + indicating that the amphora does not support agent configuration updates. + In this case, the amphora image should be updated to a newer version. diff --git a/releasenotes/notes/Add-amphora-info-endpoint-e2e3b53ae5ab5a85.yaml b/releasenotes/notes/Add-amphora-info-endpoint-e2e3b53ae5ab5a85.yaml new file mode 100644 index 0000000000..5ae72bf1c1 --- /dev/null +++ b/releasenotes/notes/Add-amphora-info-endpoint-e2e3b53ae5ab5a85.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added a new endpoint /v2.0/octavia/amphorae to expose internal details + about amphorae. This endpoint is admin only. diff --git a/releasenotes/notes/Add-cached_zone-to-the-amphora-record-7c3231c2b5b96574.yaml b/releasenotes/notes/Add-cached_zone-to-the-amphora-record-7c3231c2b5b96574.yaml new file mode 100644 index 0000000000..053c75cecc --- /dev/null +++ b/releasenotes/notes/Add-cached_zone-to-the-amphora-record-7c3231c2b5b96574.yaml @@ -0,0 +1,10 @@ +--- +features: + - | + The compute zone (if applicable) is now cached in the database and returned + in the Amphora API as `cached_zone`. Please note that this is only set at + the original time of provisioning, and could be stale for various reasons + (for example, if live-migrations have taken place due to maintenances). We + recommend it be used for reference only, unless you are absolutey certain + it is current in your environment. The source of truth is still the system + you use for compute. diff --git a/releasenotes/notes/Add-driver-agent-get-methods-b624a1342c3e6d0f.yaml b/releasenotes/notes/Add-driver-agent-get-methods-b624a1342c3e6d0f.yaml new file mode 100644 index 0000000000..77d9858fd3 --- /dev/null +++ b/releasenotes/notes/Add-driver-agent-get-methods-b624a1342c3e6d0f.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Adds support for the driver agent to query for load balancer objects. diff --git a/releasenotes/notes/Add-log-offloading-233cd8612c0dd2b5.yaml b/releasenotes/notes/Add-log-offloading-233cd8612c0dd2b5.yaml new file mode 100644 index 0000000000..64c52cddbc --- /dev/null +++ b/releasenotes/notes/Add-log-offloading-233cd8612c0dd2b5.yaml @@ -0,0 +1,12 @@ +--- +features: + - | + Octavia now supports Amphora log offloading. Operators can define syslog + targets for the Amphora administrative logs and for the tenant load + balancer flow logs. +issues: + - | + Amphorae are unable to provide tenant flow logs for UDP listeners. +upgrade: + - | + To enable log offloading, the amphora image needs to be updated. diff --git a/releasenotes/notes/Add-pool-CA-and-CRL-bb467b17188ed022.yaml b/releasenotes/notes/Add-pool-CA-and-CRL-bb467b17188ed022.yaml new file mode 100644 index 0000000000..68dba05904 --- /dev/null +++ b/releasenotes/notes/Add-pool-CA-and-CRL-bb467b17188ed022.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + You can now specify a ca_tls_container_ref and crl_container_ref on pools + for validating backend pool members using TLS. diff --git a/releasenotes/notes/Add-pool-tls-client-auth-01d3b8acfb78ab14.yaml b/releasenotes/notes/Add-pool-tls-client-auth-01d3b8acfb78ab14.yaml new file mode 100644 index 0000000000..d57ee6f644 --- /dev/null +++ b/releasenotes/notes/Add-pool-tls-client-auth-01d3b8acfb78ab14.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + You can now specify a tls_container_ref on pools for TLS client + authentication to pool members. diff --git a/releasenotes/notes/Add-pool-tls_enabled-f189677c0e13c447.yaml b/releasenotes/notes/Add-pool-tls_enabled-f189677c0e13c447.yaml new file mode 100644 index 0000000000..5b1f599df5 --- /dev/null +++ b/releasenotes/notes/Add-pool-tls_enabled-f189677c0e13c447.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + You can now enable TLS backend re-encryption for connections to member + servers by enabling tls_enabled option on pools. diff --git a/releasenotes/notes/Add-provider-agent-support-a735806c4da4c470.yaml b/releasenotes/notes/Add-provider-agent-support-a735806c4da4c470.yaml new file mode 100644 index 0000000000..f4a80aa6d5 --- /dev/null +++ b/releasenotes/notes/Add-provider-agent-support-a735806c4da4c470.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + The Octavia driver-agent now supports starting provider driver agents. + Provider driver agents are long running agent processes supporting + provider drivers. diff --git a/releasenotes/notes/Add-proxy-protocol-v2-90e4f5bf76138c69.yaml b/releasenotes/notes/Add-proxy-protocol-v2-90e4f5bf76138c69.yaml new file mode 100644 index 0000000000..985a6d6a6d --- /dev/null +++ b/releasenotes/notes/Add-proxy-protocol-v2-90e4f5bf76138c69.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Added support for proxy protocol version 2. diff --git a/releasenotes/notes/Add-support-for-SR-IOV-VIPs-862858ec61e9955b.yaml b/releasenotes/notes/Add-support-for-SR-IOV-VIPs-862858ec61e9955b.yaml new file mode 100644 index 0000000000..7a0c78c889 --- /dev/null +++ b/releasenotes/notes/Add-support-for-SR-IOV-VIPs-862858ec61e9955b.yaml @@ -0,0 +1,9 @@ +--- +features: + - | + Octavia Amphora based load balancers now support using SR-IOV virtual + functions (VF) on the VIP port(s) of the load balancer. This is enabled + by using an Octavia Flavor that includes the 'sriov_vip': True setting. +upgrade: + - | + You must update the amphora image to support the SR-IOV VIP feature. diff --git a/releasenotes/notes/Added-RBAC-default-roles-and-scoping-0081627043f5c96d.yaml b/releasenotes/notes/Added-RBAC-default-roles-and-scoping-0081627043f5c96d.yaml new file mode 100644 index 0000000000..77f9b558cb --- /dev/null +++ b/releasenotes/notes/Added-RBAC-default-roles-and-scoping-0081627043f5c96d.yaml @@ -0,0 +1,24 @@ +--- +features: + - | + Added support for keystone default roles and system token scopes. +upgrade: + - | + Legacy Octavia Advanced RBAC policies will continue to function as before + as long as the [oslo_policy] enforce_scope = False and + enforce_new_defaults = False settings are present (this is the current + oslo.policy default). However, we highly recommend you update your + user roles to follow the new keystone default roles and start using scoped + tokens as appropriate. + See the `Octavia Policies + `_ + administration guide for more information. +deprecations: + - | + Legacy Octavia Advanced RBAC policies without the keystone default roles + and/or token scoping are deprecated as of the Wallaby release. + The oslo.policy project may change the default settings requiring the + keystone default roles and scoped tokens in a future release. Please see + the upgrade section in these release notes and the `Octavia Policies + `_ + administration guide for more information. diff --git a/releasenotes/notes/Adds-L7rule-support-for-TLS-client-authentication-22e3ae29aaf7fc26.yaml b/releasenotes/notes/Adds-L7rule-support-for-TLS-client-authentication-22e3ae29aaf7fc26.yaml new file mode 100644 index 0000000000..30d819591c --- /dev/null +++ b/releasenotes/notes/Adds-L7rule-support-for-TLS-client-authentication-22e3ae29aaf7fc26.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Adds the ability to define L7 rules based on TLS client authentication + information. The new L7 rules are\: "L7RULE_TYPE_SSL_CONN_HAS_CERT", + "L7RULE_TYPE_VERIFY_RESULT", and "L7RULE_TYPE_DN_FIELD". diff --git a/releasenotes/notes/Allow-configuration-of-listener-timeout-values-9a7600c4e21364e3.yaml b/releasenotes/notes/Allow-configuration-of-listener-timeout-values-9a7600c4e21364e3.yaml new file mode 100644 index 0000000000..8ba21006f1 --- /dev/null +++ b/releasenotes/notes/Allow-configuration-of-listener-timeout-values-9a7600c4e21364e3.yaml @@ -0,0 +1,11 @@ +--- +features: + - | + Listeners have four new timeout settings: + + * `timeout_client_data`: Frontend client inactivity timeout + * `timeout_member_connect`: Backend member connection timeout + * `timeout_member_data`: Backend member inactivity timeout + * `timeout_tcp_inspect`: Time to wait for TCP packets for content inspection + + The value for all of these fields is expected to be in milliseconds. diff --git a/releasenotes/notes/Allow-members-to-be-set-as-backup-e68e46bc52f2fc1f.yaml b/releasenotes/notes/Allow-members-to-be-set-as-backup-e68e46bc52f2fc1f.yaml new file mode 100644 index 0000000000..b784dd8eae --- /dev/null +++ b/releasenotes/notes/Allow-members-to-be-set-as-backup-e68e46bc52f2fc1f.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + Members have a new boolean option `backup`. When set to `true`, the member + will not receive traffic until all non-backup members are offline. Once + all non-backup members are offline, traffic will begin balancing between + the backup members. diff --git a/releasenotes/notes/Amphora-Failover-API-612090f761936254.yaml b/releasenotes/notes/Amphora-Failover-API-612090f761936254.yaml new file mode 100644 index 0000000000..2fe3821e4c --- /dev/null +++ b/releasenotes/notes/Amphora-Failover-API-612090f761936254.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + Added the 'failover' sub-resource for the Amphora API. Each amphora can be + triggered to failover by sending a PUT (with an empty body) to the resource + ``/v2.0/octavia/amphorae//failover``. It will cause the amphora to be + recycled and replaced, in the same way as the health-triggered failover. diff --git a/releasenotes/notes/Change-HTTPS-HealthMonitor-functionality-79240ef13e65cd88.yaml b/releasenotes/notes/Change-HTTPS-HealthMonitor-functionality-79240ef13e65cd88.yaml new file mode 100644 index 0000000000..dad31e76fd --- /dev/null +++ b/releasenotes/notes/Change-HTTPS-HealthMonitor-functionality-79240ef13e65cd88.yaml @@ -0,0 +1,14 @@ +--- +features: + - | + New Health Monitor type "TLS-HELLO" to perform a simple TLS connection. +upgrade: + - | + If users have configured Health Monitors of type "HTTPS" and are expecting + a simple "TLS-HELLO" check, they will need to recreate their monitor with + the new "TLS-HELLO" type. +fixes: + - | + Health Monitor type "HTTPS" now correctly performs the configured check. + This is done with all certificate validation disabled, so it will not work + if backend members are performing client certificate validation. diff --git a/releasenotes/notes/Correct-naming-for-quota-resources-8e4309a839208cd1.yaml b/releasenotes/notes/Correct-naming-for-quota-resources-8e4309a839208cd1.yaml new file mode 100644 index 0000000000..46c72582a9 --- /dev/null +++ b/releasenotes/notes/Correct-naming-for-quota-resources-8e4309a839208cd1.yaml @@ -0,0 +1,7 @@ +--- +deprecations: + - | + The quota objects named `health_monitor` and `load_balancer` have been + renamed to `healthmonitor` and `loadbalancer`, respectively. The old names + are deprecated, and will be removed in the T cycle. + diff --git a/releasenotes/notes/Deprecate-user-data-99325dbe5361b536.yaml b/releasenotes/notes/Deprecate-user-data-99325dbe5361b536.yaml new file mode 100644 index 0000000000..a8f29fe5dd --- /dev/null +++ b/releasenotes/notes/Deprecate-user-data-99325dbe5361b536.yaml @@ -0,0 +1,7 @@ +--- +deprecations: + - | + The configuration option *user_data_config_drive* is deprecated. The nova + user_data option is too small to replace the normal file based config_drive + provisioning for cloud-init. This option has never been functional in + Octavia and will be removed to reduce confusion. diff --git a/releasenotes/notes/EnforceApplicationJSONContentType-65ad696565eac75c.yaml b/releasenotes/notes/EnforceApplicationJSONContentType-65ad696565eac75c.yaml new file mode 100644 index 0000000000..068709b8c6 --- /dev/null +++ b/releasenotes/notes/EnforceApplicationJSONContentType-65ad696565eac75c.yaml @@ -0,0 +1,12 @@ +--- +upgrade: + - | + The Octavia API will now check that the HTTP Accept header, if present, is + compatible with the application/json content type. If not the user will + get a 406 status code response, Not Acceptable. +fixes: + - | + The Octavia API will now check that the HTTP Accept header, if present, is + compatible with the application/json content type. If not the user will + get a 406 status code response, Not Acceptable. This change also ensures + that the API responses have a content type of application/json. diff --git a/releasenotes/notes/Fix-API-update-null-None-1b400962017a3d56.yaml b/releasenotes/notes/Fix-API-update-null-None-1b400962017a3d56.yaml new file mode 100644 index 0000000000..b77e203f46 --- /dev/null +++ b/releasenotes/notes/Fix-API-update-null-None-1b400962017a3d56.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixed the API handling of None (JSON null) on object update calls. The + API will now either clear the value from the field or will reset the value + of the field to the API default. diff --git a/releasenotes/notes/Fix-Amphora-Config-Update-06b649883c7a4f44.yaml b/releasenotes/notes/Fix-Amphora-Config-Update-06b649883c7a4f44.yaml new file mode 100644 index 0000000000..80b637b5b5 --- /dev/null +++ b/releasenotes/notes/Fix-Amphora-Config-Update-06b649883c7a4f44.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixed a bug where the Amphora configuration update would only update the + Amphora agent configuration, but the health sender would not be updated + with the new controller IP list. diff --git a/releasenotes/notes/Fix-HM-DB-Rollback-no-connection-2664c4f7823ecaec.yaml b/releasenotes/notes/Fix-HM-DB-Rollback-no-connection-2664c4f7823ecaec.yaml new file mode 100644 index 0000000000..1d15db31c0 --- /dev/null +++ b/releasenotes/notes/Fix-HM-DB-Rollback-no-connection-2664c4f7823ecaec.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed an issue with the health manager reporting an UnboundLocalError if + it gets an exception attempting to get a database connection. diff --git a/releasenotes/notes/Fix-Listener-Update-for-SRIOV-VIPs-8348b7fe0c02b9c4.yaml b/releasenotes/notes/Fix-Listener-Update-for-SRIOV-VIPs-8348b7fe0c02b9c4.yaml new file mode 100644 index 0000000000..1598881d83 --- /dev/null +++ b/releasenotes/notes/Fix-Listener-Update-for-SRIOV-VIPs-8348b7fe0c02b9c4.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Fixed an issue updating listeners when using SR-IOV VIP ports. diff --git a/releasenotes/notes/Fix-SR-IOV-when-VIP-interface-is-used-for-members-adb150ece454ecff.yaml b/releasenotes/notes/Fix-SR-IOV-when-VIP-interface-is-used-for-members-adb150ece454ecff.yaml new file mode 100644 index 0000000000..7ba615467a --- /dev/null +++ b/releasenotes/notes/Fix-SR-IOV-when-VIP-interface-is-used-for-members-adb150ece454ecff.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed a bug in the VIP SR-IOV implementation that would cause load balancer + memebers that use the SR-IOV VIP interface to not receive traffic. diff --git a/releasenotes/notes/Fix-UDP-Health-monitor-update-without-delay-c56240e59e15483f.yaml b/releasenotes/notes/Fix-UDP-Health-monitor-update-without-delay-c56240e59e15483f.yaml new file mode 100644 index 0000000000..40c7450492 --- /dev/null +++ b/releasenotes/notes/Fix-UDP-Health-monitor-update-without-delay-c56240e59e15483f.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Fixed error on update UDP Health Monitor with empty "delay" parameter diff --git a/releasenotes/notes/Fix-UDP-member-server-rebalance-74c67cb09c7c529a.yaml b/releasenotes/notes/Fix-UDP-member-server-rebalance-74c67cb09c7c529a.yaml new file mode 100644 index 0000000000..4ae0686d68 --- /dev/null +++ b/releasenotes/notes/Fix-UDP-member-server-rebalance-74c67cb09c7c529a.yaml @@ -0,0 +1,10 @@ +--- +upgrade: + - | + UDP load balancers will require a failover to fix the UDP rebalance issue + once the control plane is updated. +fixes: + - | + Fixed an issue where UDP listeners may not rebalance failed member servers + in a timely fashion. It may have been up to five minutes for a failed + member server to be removed from existing flows. diff --git a/releasenotes/notes/Fix-allocate_and_associate-deadlock-3ff1464421c1d464.yaml b/releasenotes/notes/Fix-allocate_and_associate-deadlock-3ff1464421c1d464.yaml new file mode 100644 index 0000000000..8f2e3fabbc --- /dev/null +++ b/releasenotes/notes/Fix-allocate_and_associate-deadlock-3ff1464421c1d464.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Fixes a potential DB deadlock in allocate_and_associate found in testing. diff --git a/releasenotes/notes/Fix-disable-sshd-470ba6a09278df69.yaml b/releasenotes/notes/Fix-disable-sshd-470ba6a09278df69.yaml new file mode 100644 index 0000000000..693f8c6884 --- /dev/null +++ b/releasenotes/notes/Fix-disable-sshd-470ba6a09278df69.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed a bug that caused SSH to not be diabled in amphora images created + with the "-n" flag. diff --git a/releasenotes/notes/Fix-duplicate-VIP-IP-error-reporting-253c88cca4fed73d.yaml b/releasenotes/notes/Fix-duplicate-VIP-IP-error-reporting-253c88cca4fed73d.yaml new file mode 100644 index 0000000000..d20caca52f --- /dev/null +++ b/releasenotes/notes/Fix-duplicate-VIP-IP-error-reporting-253c88cca4fed73d.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixes the error reporting when a user requests a VIP IP address that is + already in use. diff --git a/releasenotes/notes/Fix-failover-for-SRIOV-VIPs-e2ab193c0de5eb1d.yaml b/releasenotes/notes/Fix-failover-for-SRIOV-VIPs-e2ab193c0de5eb1d.yaml new file mode 100644 index 0000000000..5744b6bbf0 --- /dev/null +++ b/releasenotes/notes/Fix-failover-for-SRIOV-VIPs-e2ab193c0de5eb1d.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Fixed an issue when failing over load balancers using SR-IOV VIP ports. diff --git a/releasenotes/notes/Fix-failover-ip-addresses-exhausted-69110b2fa4683e1a.yaml b/releasenotes/notes/Fix-failover-ip-addresses-exhausted-69110b2fa4683e1a.yaml new file mode 100644 index 0000000000..692822f833 --- /dev/null +++ b/releasenotes/notes/Fix-failover-ip-addresses-exhausted-69110b2fa4683e1a.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixes an issue with load balancer failover, when the VIP subnet is out of + IP addresses, that could lead to the VIP being deallocated. diff --git a/releasenotes/notes/Fix-failover-revert-port-leak-d9879523506c6ff3.yaml b/releasenotes/notes/Fix-failover-revert-port-leak-d9879523506c6ff3.yaml new file mode 100644 index 0000000000..79610cebc4 --- /dev/null +++ b/releasenotes/notes/Fix-failover-revert-port-leak-d9879523506c6ff3.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixed an issue when a failover reverts, a neutron port may get abandoned. + The issue was logged with "Failed to delete port", + "Resources may still be in use for a port intended for amphora", and + "Search for a port named octavia-lb-vrrp-". diff --git a/releasenotes/notes/Fix-healthcheck-text-plain-mime-type-134485abb8bcea0c.yaml b/releasenotes/notes/Fix-healthcheck-text-plain-mime-type-134485abb8bcea0c.yaml new file mode 100644 index 0000000000..8a71895386 --- /dev/null +++ b/releasenotes/notes/Fix-healthcheck-text-plain-mime-type-134485abb8bcea0c.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed the ability to use the 'text/plain' mime type with the healthcheck + endpoint. diff --git a/releasenotes/notes/Fix-healthmanager-not-update-amphora-health-when-LB-disabled-46a4fb295c6d0850.yaml b/releasenotes/notes/Fix-healthmanager-not-update-amphora-health-when-LB-disabled-46a4fb295c6d0850.yaml new file mode 100644 index 0000000000..7266316b84 --- /dev/null +++ b/releasenotes/notes/Fix-healthmanager-not-update-amphora-health-when-LB-disabled-46a4fb295c6d0850.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixed an issue when a loadbalancer is disabled, Octavia Health Manager + keeps failovering the amphorae + diff --git a/releasenotes/notes/Fix-ifup-on-member-create-5b405d98eb036718.yaml b/releasenotes/notes/Fix-ifup-on-member-create-5b405d98eb036718.yaml new file mode 100644 index 0000000000..1304dd94bd --- /dev/null +++ b/releasenotes/notes/Fix-ifup-on-member-create-5b405d98eb036718.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Fixed an issue creating members on networks with IPv6 subnets. diff --git a/releasenotes/notes/Fix-limit-pagination-less-or-equal-zero-93a33f1318ea34e5.yaml b/releasenotes/notes/Fix-limit-pagination-less-or-equal-zero-93a33f1318ea34e5.yaml new file mode 100644 index 0000000000..3ea3a967a3 --- /dev/null +++ b/releasenotes/notes/Fix-limit-pagination-less-or-equal-zero-93a33f1318ea34e5.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fix the issue, when "limit" parameter in request less or equal 0. + Now it returns resources according pagination_max_limit as expected, + instead of error. diff --git a/releasenotes/notes/Fix-listener-delete-causing-failover-251efdb79af24c0a.yaml b/releasenotes/notes/Fix-listener-delete-causing-failover-251efdb79af24c0a.yaml new file mode 100644 index 0000000000..60c99fae8f --- /dev/null +++ b/releasenotes/notes/Fix-listener-delete-causing-failover-251efdb79af24c0a.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed an issue when deleting the last listener from a load balancer may + trigger a failover. diff --git a/releasenotes/notes/Fix-noop-batch-member-update-issue-09b76787553e7752.yaml b/releasenotes/notes/Fix-noop-batch-member-update-issue-09b76787553e7752.yaml new file mode 100644 index 0000000000..39e45ecbe2 --- /dev/null +++ b/releasenotes/notes/Fix-noop-batch-member-update-issue-09b76787553e7752.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed an issue with batch member updates, that don't have any changes, + not properly rolling back the update. diff --git a/releasenotes/notes/Fix-plug-vip-revert-abandoned-vrrp-port-efff14edce62ad75.yaml b/releasenotes/notes/Fix-plug-vip-revert-abandoned-vrrp-port-efff14edce62ad75.yaml new file mode 100644 index 0000000000..3de4e901d9 --- /dev/null +++ b/releasenotes/notes/Fix-plug-vip-revert-abandoned-vrrp-port-efff14edce62ad75.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixes an issue where, if we were unable to attach the base (VRRP) port to + an amphora instance, the revert would not clean up the port in neutron. diff --git a/releasenotes/notes/Fix-pool-alpn-older-haproxy-50514c1df4f77bcd.yaml b/releasenotes/notes/Fix-pool-alpn-older-haproxy-50514c1df4f77bcd.yaml new file mode 100644 index 0000000000..d7418ebfad --- /dev/null +++ b/releasenotes/notes/Fix-pool-alpn-older-haproxy-50514c1df4f77bcd.yaml @@ -0,0 +1,8 @@ +--- +fixes: + - | + Fixed amphora driver pool ALPN compatibity with older amphora images. +upgrade: + - | + Support for new features, such as ALPN on pools, HTTP/2 on pools, + gRPC, and SCTP require an updated amphora image. diff --git a/releasenotes/notes/Handle-blank-cert-subjects-b660d403ce56b0b8.yaml b/releasenotes/notes/Handle-blank-cert-subjects-b660d403ce56b0b8.yaml new file mode 100644 index 0000000000..bccbd6db6d --- /dev/null +++ b/releasenotes/notes/Handle-blank-cert-subjects-b660d403ce56b0b8.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Fixed an issue when using certificates with a blank subject or missing CN. diff --git a/releasenotes/notes/IPv6-support-953ef81ed8555fce.yaml b/releasenotes/notes/IPv6-support-953ef81ed8555fce.yaml new file mode 100644 index 0000000000..24c866b044 --- /dev/null +++ b/releasenotes/notes/IPv6-support-953ef81ed8555fce.yaml @@ -0,0 +1,7 @@ +--- +features: + - Adds support for IPv6 +upgrade: + - To support IPv6 a databse migration and amphora image update are required. +fixes: + - Resolves an issue with subnets larger than /24 diff --git a/releasenotes/notes/Increase-TCP-buffer-memory-max-and-enable-mtu-black-hole-detection.-0640432a7202400f.yaml b/releasenotes/notes/Increase-TCP-buffer-memory-max-and-enable-mtu-black-hole-detection.-0640432a7202400f.yaml new file mode 100644 index 0000000000..f8793f469b --- /dev/null +++ b/releasenotes/notes/Increase-TCP-buffer-memory-max-and-enable-mtu-black-hole-detection.-0640432a7202400f.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Increased the TCP buffer memory maximum and enabled MTU ICMP black hole + detection. diff --git a/releasenotes/notes/Make-keystone-default-rules-the-default-RBAC-989c51ab2e319549.yaml b/releasenotes/notes/Make-keystone-default-rules-the-default-RBAC-989c51ab2e319549.yaml new file mode 100644 index 0000000000..102e3c4aa6 --- /dev/null +++ b/releasenotes/notes/Make-keystone-default-rules-the-default-RBAC-989c51ab2e319549.yaml @@ -0,0 +1,24 @@ +--- +upgrade: + - | + When upgrading, the default RBAC rules will switch from Octavia Advanced + RBAC to the keystone default roles. This means the load_balancer_* roles + will not longer have access to the load balancer API. To continue to use + the Octavia Advanced RBAC rules, please use the + octavia-advanced-rbac-policy.yaml override file provided. +critical: + - | + When upgrading, the default RBAC rules will switch from Octavia Advanced + RBAC to the keystone default roles. This means the load_balancer_* roles + will not longer have access to the load balancer API. To continue to use + the Octavia Advanced RBAC rules, please use the + octavia-advanced-rbac-policy.yaml override file provided. +security: + - | + When upgrading, the default RBAC rules will switch from Octavia Advanced + RBAC to the keystone default roles. This means the load_balancer_* roles + will not longer have access to the load balancer API. To continue to use + the Octavia Advanced RBAC rules, please use the + octavia-advanced-rbac-policy.yaml override file provided. Note: the + keystone default roles are less restrictive than the Octavia Advanced RBAC + rules and you will no longer have global observer or quota specific roles. diff --git a/releasenotes/notes/Octavia-flavors-2a96424c3d65c224.yaml b/releasenotes/notes/Octavia-flavors-2a96424c3d65c224.yaml new file mode 100644 index 0000000000..566bfe234f --- /dev/null +++ b/releasenotes/notes/Octavia-flavors-2a96424c3d65c224.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Octavia now has flavors support which allows the operator to define, + named, custom configurations that users can select from when creating + a load balancer. diff --git a/releasenotes/notes/Octavia-lib-transition-driver-agent-aeefef114898b8f5.yaml b/releasenotes/notes/Octavia-lib-transition-driver-agent-aeefef114898b8f5.yaml new file mode 100644 index 0000000000..62e66466bc --- /dev/null +++ b/releasenotes/notes/Octavia-lib-transition-driver-agent-aeefef114898b8f5.yaml @@ -0,0 +1,18 @@ +--- +features: + - | + The Stein release of Octavia introduces the octavia-lib python module. + This library enables provider drivers to integrate easier with the Octavia + API by providing a shared set of coding objects and interfaces. +upgrade: + - | + The Stein release of Octavia adds the driver-agent controller process. + This process is deployed along with the Octavia API process and uses + unix domain sockets for communication between the provider drivers using + octavia-lib and the driver-agent. + When upgrading to Stein, operators should make sure that the + /var/run/octavia directry is available for the driver-agent with the + appropriate ownership and permissions for the driver-agent and API + processes to access it. The operator may need to make sure the driver-agent + process starts after installation. For example, a systemd service may need + to be created and enabled for it. diff --git a/releasenotes/notes/Remove-netaddr-requirement-0ce7f8605a86172a.yaml b/releasenotes/notes/Remove-netaddr-requirement-0ce7f8605a86172a.yaml new file mode 100644 index 0000000000..f3aa45ec5e --- /dev/null +++ b/releasenotes/notes/Remove-netaddr-requirement-0ce7f8605a86172a.yaml @@ -0,0 +1,5 @@ +--- +other: + - | + The netaddr python module has been removed as an Octavia requirement. It + has been replaced with the python standard library 'ipaddress' module. diff --git a/releasenotes/notes/Report-more-accurate-haproxy-statuses-7e995bb4c7cc0dd6.yaml b/releasenotes/notes/Report-more-accurate-haproxy-statuses-7e995bb4c7cc0dd6.yaml new file mode 100644 index 0000000000..59b27795a0 --- /dev/null +++ b/releasenotes/notes/Report-more-accurate-haproxy-statuses-7e995bb4c7cc0dd6.yaml @@ -0,0 +1,9 @@ +--- +fixes: + - | + Some versions of HAProxy incorrectly reported nodes in DRAIN status as + being UP, and Octavia code was written around this incorrect reporting. + This has been fixed in some versions of HAProxy and is now handled + properly in Octavia as well. Now it is possible for members to be in the + status DRAINING. Note that this is masked when statuses are forwarded to + neutron-lbaas in the eventstream, so no compatibility change is necessary. diff --git a/releasenotes/notes/Set-retry-defaults-to-prod-values-f3cc10d16baa716a.yaml b/releasenotes/notes/Set-retry-defaults-to-prod-values-f3cc10d16baa716a.yaml new file mode 100644 index 0000000000..b1a1430cc0 --- /dev/null +++ b/releasenotes/notes/Set-retry-defaults-to-prod-values-f3cc10d16baa716a.yaml @@ -0,0 +1,25 @@ +--- +upgrade: + - | + We have changed the [haproxy_amphora] connection_max_retries and + build_active_retries default values from 300 to 120. This means load + balancer builds will wait for ten minutes instead of twenty-five minutes + for nova to boot the virtual machine. + We feel these are more reasonable default values for most production + deployments and provide a better user experience. + If you are running nova in a nested virtualization environment, meaning + nova is booting VMs inside another VM, and you do not have nested + virtualization enabled in the bottom hypervisor, you may need to set these + values back up to 300. +other: + - | + We have changed the [haproxy_amphora] connection_max_retries and + build_active_retries default values from 300 to 120. This means load + balancer builds will wait for ten minutes instead of twenty-five minutes + for nova to boot the virtual machine. + We feel these are more reasonable default values for most production + deployments and provide a better user experience. + If you are running nova in a nested virtualization environment, meaning + nova is booting VMs inside another VM, and you do not have nested + virtualization enabled in the bottom hypervisor, you may need to set these + values back up to 300. diff --git a/releasenotes/notes/Support-PKCS12-certificate-objects-1c6e896be9d35977.yaml b/releasenotes/notes/Support-PKCS12-certificate-objects-1c6e896be9d35977.yaml new file mode 100644 index 0000000000..aca42bd879 --- /dev/null +++ b/releasenotes/notes/Support-PKCS12-certificate-objects-1c6e896be9d35977.yaml @@ -0,0 +1,21 @@ +--- +features: + - | + Users can now use a reference to a single PKCS12 bundle as their + `default_tls_container_ref` instead of a Barbican container with + individual secret objects. PKCS12 supports bundling a private key, + certificate, and intermediates. Private keys can no longer be passphrase + protected when using PKCS12 bundles. + No configuration change is necessary to enable this feature. Users may + simply begin using this. Any use of the old style containers will be + detected and automatically fall back to using the old Barbican driver. + - | + Certificate bundles can now be stored in any backend Castellan supports, + and can be retrieved via a Castellan driver, even if Barbican is not + deployed. +security: + - | + Private keys can no longer be password protected, as PKCS12 does not + support storing a passphrase in an explicitly defined way. Note that this + is not noticeably less secure than storing a passphrase protected private + key in the same place as the passphrase, as was the case with Barbican. diff --git a/releasenotes/notes/UDP-listener-health-d8fdf64a32e022d4.yaml b/releasenotes/notes/UDP-listener-health-d8fdf64a32e022d4.yaml new file mode 100644 index 0000000000..6fb446e2a2 --- /dev/null +++ b/releasenotes/notes/UDP-listener-health-d8fdf64a32e022d4.yaml @@ -0,0 +1,6 @@ +--- +upgrade: + - | + To enable UDP listener monitoring when no pool is attached, the amphora + image needs to be updated and load balancers with UDP listeners need + to be failed over to the new image. diff --git a/releasenotes/notes/Updated-default-TLS-cipher-suites-7359fe87fc246618.yaml b/releasenotes/notes/Updated-default-TLS-cipher-suites-7359fe87fc246618.yaml new file mode 100644 index 0000000000..ae751bccf3 --- /dev/null +++ b/releasenotes/notes/Updated-default-TLS-cipher-suites-7359fe87fc246618.yaml @@ -0,0 +1,24 @@ +--- +upgrade: + - | + The default TLS cipher suite list has been updated to the current + 'intermediate' recommendations. Load balancers will need to be failed over + to use the new default list if the operator and user opted for the Octavia + default cipher list. +security: + - | + Updated the default TLS cipher suites based on current OWASP/Mozilla.org + recommendations for Intermediate compatibility. The new default list is\: + + - TLS_AES_128_GCM_SHA256 + - TLS_AES_256_GCM_SHA384 + - TLS_CHACHA20_POLY1305_SHA256 + - ECDHE-ECDSA-AES128-GCM-SHA256 + - ECDHE-RSA-AES128-GCM-SHA256 + - ECDHE-ECDSA-AES256-GCM-SHA384 + - ECDHE-RSA-AES256-GCM-SHA384 + - ECDHE-ECDSA-CHACHA20-POLY1305 + - ECDHE-RSA-CHACHA20-POLY1305 + - DHE-RSA-AES128-GCM-SHA256 + - DHE-RSA-AES256-GCM-SHA384 + - DHE-RSA-CHACHA20-POLY1305 diff --git a/releasenotes/notes/Use-Ubuntu-virtual-kernel-for-Amphora-a1e8af8bc7893011.yaml b/releasenotes/notes/Use-Ubuntu-virtual-kernel-for-Amphora-a1e8af8bc7893011.yaml new file mode 100644 index 0000000000..e2357593a6 --- /dev/null +++ b/releasenotes/notes/Use-Ubuntu-virtual-kernel-for-Amphora-a1e8af8bc7893011.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + The default kernel for the amphora image has switched from + linux-image-generic to linux-image-virtual, resulting in an image size + reduction of about 150MB. The linux-image-virtual kernel works with kvm, + qemu tcg, and Xen hypervisors among others. diff --git a/releasenotes/notes/Use-nftables-is-now-True-e1da3f92a4907b8c.yaml b/releasenotes/notes/Use-nftables-is-now-True-e1da3f92a4907b8c.yaml new file mode 100644 index 0000000000..2d4387825b --- /dev/null +++ b/releasenotes/notes/Use-nftables-is-now-True-e1da3f92a4907b8c.yaml @@ -0,0 +1,4 @@ +--- +other: + - | + Amphora images will now be built with nftables by default. diff --git a/releasenotes/notes/add-aarch64-amphora-image-support-7b0859f89d9092f8.yaml b/releasenotes/notes/add-aarch64-amphora-image-support-7b0859f89d9092f8.yaml new file mode 100644 index 0000000000..60b21b10a5 --- /dev/null +++ b/releasenotes/notes/add-aarch64-amphora-image-support-7b0859f89d9092f8.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added aarch64/arm64 amphora image support to the disk image create tool + and to the devstack plugin. diff --git a/releasenotes/notes/add-ability-setting-barbican-acls-85f36747d4284035.yaml b/releasenotes/notes/add-ability-setting-barbican-acls-85f36747d4284035.yaml new file mode 100644 index 0000000000..807f65399c --- /dev/null +++ b/releasenotes/notes/add-ability-setting-barbican-acls-85f36747d4284035.yaml @@ -0,0 +1,8 @@ +--- +features: + - | + Added ability for Octavia to automatically set Barbican ACLs on behalf of + the user. Such enables users to create TLS-terminated listeners without + having to add the Octavia keystone user id to the ACL list. Octavia will + also automatically revoke access to secrets whenever load balancing + resources no longer require access to them. diff --git a/releasenotes/notes/add-ability-to-disable-tls-terminated-listeners-965ec7c1a8a9f732.yaml b/releasenotes/notes/add-ability-to-disable-tls-terminated-listeners-965ec7c1a8a9f732.yaml new file mode 100644 index 0000000000..4d819d273a --- /dev/null +++ b/releasenotes/notes/add-ability-to-disable-tls-terminated-listeners-965ec7c1a8a9f732.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Add a config variable to disable creation of TLS Terminated listeners. diff --git a/releasenotes/notes/add-amphora-alpn-h2-support-dfa9a86b2c06f354.yaml b/releasenotes/notes/add-amphora-alpn-h2-support-dfa9a86b2c06f354.yaml new file mode 100644 index 0000000000..4f25df1343 --- /dev/null +++ b/releasenotes/notes/add-amphora-alpn-h2-support-dfa9a86b2c06f354.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Added HTTP/2 over TLS support via ALPN protocol negotiation to the + amphora provider driver. Feature available in amphora images with HAProxy + 2.0 or newer. diff --git a/releasenotes/notes/add-amphora-delete-69badba140f7b228.yaml b/releasenotes/notes/add-amphora-delete-69badba140f7b228.yaml new file mode 100644 index 0000000000..d534dc786c --- /dev/null +++ b/releasenotes/notes/add-amphora-delete-69badba140f7b228.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Added the ability to delete amphora that are not in use. diff --git a/releasenotes/notes/add-amphora-flavor-field-54d42da0381ced7f.yaml b/releasenotes/notes/add-amphora-flavor-field-54d42da0381ced7f.yaml new file mode 100644 index 0000000000..2975feed8e --- /dev/null +++ b/releasenotes/notes/add-amphora-flavor-field-54d42da0381ced7f.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Amphora API now can return the field `compute_flavor` which is the ID of + the compute instance flavor used to boot the amphora. diff --git a/releasenotes/notes/add-amphora-image-tag-capability-ba2ea034bc01ab48.yaml b/releasenotes/notes/add-amphora-image-tag-capability-ba2ea034bc01ab48.yaml new file mode 100644 index 0000000000..acb210c7e9 --- /dev/null +++ b/releasenotes/notes/add-amphora-image-tag-capability-ba2ea034bc01ab48.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + Operators can now use the 'amp_image_tag' Octavia flavor capability when + using the amphora provider driver. This allows custom amphora images to be + used per-load balancer. If this is not defined in an Octavia flavor, the + amp_image_tag Octavia configuration file setting will continue to be used. diff --git a/releasenotes/notes/add-anti-affinity-policy-config-39df309fd12d443c.yaml b/releasenotes/notes/add-anti-affinity-policy-config-39df309fd12d443c.yaml new file mode 100644 index 0000000000..24345a9c16 --- /dev/null +++ b/releasenotes/notes/add-anti-affinity-policy-config-39df309fd12d443c.yaml @@ -0,0 +1,3 @@ +--- +features: + - Adds a new config parameter to specify the anti-affinity policy diff --git a/releasenotes/notes/add-api-tag-filtering-8bfb3c3b7cfd6afe.yaml b/releasenotes/notes/add-api-tag-filtering-8bfb3c3b7cfd6afe.yaml new file mode 100644 index 0000000000..4985ece2b8 --- /dev/null +++ b/releasenotes/notes/add-api-tag-filtering-8bfb3c3b7cfd6afe.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + You can now filter API queries by the object tag. diff --git a/releasenotes/notes/add-batch-member-update-capability-4923bd266a9b2b80.yaml b/releasenotes/notes/add-batch-member-update-capability-4923bd266a9b2b80.yaml new file mode 100644 index 0000000000..f4c3b53946 --- /dev/null +++ b/releasenotes/notes/add-batch-member-update-capability-4923bd266a9b2b80.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + It is now possible to completely update a pool's member list as a batch + operation. Using a PUT request on the base member endpoint of a pool, you + can specify a list of member objects and the service will perform any + necessary creates/deletes/updates as a single operation. diff --git a/releasenotes/notes/add-c9b9401b831efb25.yaml b/releasenotes/notes/add-c9b9401b831efb25.yaml new file mode 100644 index 0000000000..dfb2813cca --- /dev/null +++ b/releasenotes/notes/add-c9b9401b831efb25.yaml @@ -0,0 +1,10 @@ +--- +features: + - | + Introduced an image driver interface. Supported drivers are noop and + Glance. +upgrade: + - | + When the amphora provider driver is enabled, operators need to set option + ``[controller_worker]/image_driver``. The default image driver is + ``image_glance_driver``. For testing could be used ``image_noop_driver``. diff --git a/releasenotes/notes/add-compute-flavor-capability-ab202697a7fbdc3d.yaml b/releasenotes/notes/add-compute-flavor-capability-ab202697a7fbdc3d.yaml new file mode 100644 index 0000000000..aab61ac5b0 --- /dev/null +++ b/releasenotes/notes/add-compute-flavor-capability-ab202697a7fbdc3d.yaml @@ -0,0 +1,8 @@ +--- +features: + - | + Operators can now use the 'compute_flavor' Octavia flavor capability when + using the amphora provider driver. This allows custom compute driver + flavors to be used per-load balancer. If this is not defined in an + Octavia flavor, the amp_flavor_id Octavia configuration file setting + will continue to be used. diff --git a/releasenotes/notes/add-config-option-for-amp-timezone-6496a33a23d7520d.yaml b/releasenotes/notes/add-config-option-for-amp-timezone-6496a33a23d7520d.yaml new file mode 100644 index 0000000000..d33e0a76ad --- /dev/null +++ b/releasenotes/notes/add-config-option-for-amp-timezone-6496a33a23d7520d.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Configuration of the amphora's timezone is now possible using new + configuration setting "amp_timezone" in the controller_worker options + group. diff --git a/releasenotes/notes/add-cpu-pinning-element-86617303b720d5a9.yaml b/releasenotes/notes/add-cpu-pinning-element-86617303b720d5a9.yaml new file mode 100644 index 0000000000..044ab5e5d3 --- /dev/null +++ b/releasenotes/notes/add-cpu-pinning-element-86617303b720d5a9.yaml @@ -0,0 +1,19 @@ +--- +features: + - | + The new "cpu-pinning" element optimizes the amphora image for better + vertical scaling. When an amphora flavor with multiple vCPUs is configured + it will configure the kernel to isolate (isolcpus) + all vCPUs except the first one. + Furthermore, it uninstalls irqbalance and sets the IRQ affinity to the + first CPU. That way the other CPUs are free to be used by HAProxy + exclusively. A new customized TuneD profile applies some more tweaks + for improving network latency. + This new feature is disabled by default, but can be enabled by running + `diskimage-create.sh` with the `-m` option or setting the + `AMP_ENABLE_CPUPINNING` environment variable to 1 before running the script. +upgrade: + - | + Amphora vertical scaling optimizations require a new amphora image + build with the optional CPU pinning feature enabled in order + to become effective. diff --git a/releasenotes/notes/add-default-ciphers-2eb70b34290711be.yaml b/releasenotes/notes/add-default-ciphers-2eb70b34290711be.yaml new file mode 100644 index 0000000000..1016576133 --- /dev/null +++ b/releasenotes/notes/add-default-ciphers-2eb70b34290711be.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + HTTPS-terminated listeners can now be individually configured with an OpenSSL cipher string. + The default cipher string for new listeners can be specified with ``default_tls_ciphers`` + in ``octavia.conf``. The built-in default is OWASP's "Suite B" recommendation. (https://cheatsheetseries.owasp.org/cheatsheets/TLS_Cipher_String_Cheat_Sheet.html) + Existing listeners will be unaffected. \ No newline at end of file diff --git a/releasenotes/notes/add-event-notifications-aa9946d771308da5.yaml b/releasenotes/notes/add-event-notifications-aa9946d771308da5.yaml new file mode 100644 index 0000000000..bacc1ba294 --- /dev/null +++ b/releasenotes/notes/add-event-notifications-aa9946d771308da5.yaml @@ -0,0 +1,13 @@ +--- +features: + - | + Octavia now supports oslo.message notifications for loadbalancer create, + delete, and update operations. +upgrade: + - | + A new option is provided in the oslo_messaging namespace to disable + event_notifications. +other: + - | + Admin documentation page has been added to explain the available events, + the notification format, and how to disable event notifications. diff --git a/releasenotes/notes/add-h2-alpn-protocol-to-default-e2d499d21a5d90d1.yaml b/releasenotes/notes/add-h2-alpn-protocol-to-default-e2d499d21a5d90d1.yaml new file mode 100644 index 0000000000..771e5d335b --- /dev/null +++ b/releasenotes/notes/add-h2-alpn-protocol-to-default-e2d499d21a5d90d1.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + The HTTP/2 protocol is now added to the default ALPN protocol list for + listener and pools. diff --git a/releasenotes/notes/add-haproxy2-centos-063beb304409d141.yaml b/releasenotes/notes/add-haproxy2-centos-063beb304409d141.yaml new file mode 100644 index 0000000000..6ce22700b9 --- /dev/null +++ b/releasenotes/notes/add-haproxy2-centos-063beb304409d141.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + CentOS-based amphora images will now install HAProxy version 2.2 maintained + by CentOS NFV SIG. Other supported distributions (Ubuntu Bionic, RHEL 8) + remain untouched. diff --git a/releasenotes/notes/add-healthcheck-middleware-6c09150bddd3113f.yaml b/releasenotes/notes/add-healthcheck-middleware-6c09150bddd3113f.yaml new file mode 100644 index 0000000000..8c8c7c17d0 --- /dev/null +++ b/releasenotes/notes/add-healthcheck-middleware-6c09150bddd3113f.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Added the oslo-middleware healthcheck app to the Octavia API. + Hitting /healthcheck will return a 200. This is enabled via the + [api_settings]healthcheck_enabled setting and is disabled by default. diff --git a/releasenotes/notes/add-hsts-support-91527398ba966115.yaml b/releasenotes/notes/add-hsts-support-91527398ba966115.yaml new file mode 100644 index 0000000000..be4f980be1 --- /dev/null +++ b/releasenotes/notes/add-hsts-support-91527398ba966115.yaml @@ -0,0 +1,9 @@ +--- +features: + - | + Added support for HTTP Strict Transport Security (HSTS) for TLS-terminated + listeners. The API for creating and updating listeners has been extended + by the optional fields `hsts_max_age`, `hsts_include_subdomains` and + `hsts_preload`. By default this feature is disabled. + In order to activate this feature the `hsts_max_age` + option needs to be set. diff --git a/releasenotes/notes/add-id-column-to-healthmonitor-a331934ad2cede87.yaml b/releasenotes/notes/add-id-column-to-healthmonitor-a331934ad2cede87.yaml new file mode 100644 index 0000000000..ca542fffd9 --- /dev/null +++ b/releasenotes/notes/add-id-column-to-healthmonitor-a331934ad2cede87.yaml @@ -0,0 +1,6 @@ +--- +upgrade: + - Adding `ID` column to the health_monitor table in Octavia, + whose value is same as the `pool_id` column. + The database needs to be upgraded first, followed by upgrade + and restart of the API servers. diff --git a/releasenotes/notes/add-jobboard-based-controller-599279c7cc172e955.yaml b/releasenotes/notes/add-jobboard-based-controller-599279c7cc172e955.yaml new file mode 100644 index 0000000000..3c586c96eb --- /dev/null +++ b/releasenotes/notes/add-jobboard-based-controller-599279c7cc172e955.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + Operators can now use the amphorav2 provider which uses jobboard-based + controller. A jobboard controller solves the issue with resources stuck in + PENDING_* states by writing info about task states in persistent backend + and monitoring job claims via jobboard. diff --git a/releasenotes/notes/add-l7policy-and-l7rule-to-quota-4b873c77f1e608e6.yaml b/releasenotes/notes/add-l7policy-and-l7rule-to-quota-4b873c77f1e608e6.yaml new file mode 100644 index 0000000000..0988ef5520 --- /dev/null +++ b/releasenotes/notes/add-l7policy-and-l7rule-to-quota-4b873c77f1e608e6.yaml @@ -0,0 +1,3 @@ +--- +features: + - Add l7policy and l7rule to octavia quota. diff --git a/releasenotes/notes/add-lb-algorithm-source-ip-port-ff86433143e43136.yaml b/releasenotes/notes/add-lb-algorithm-source-ip-port-ff86433143e43136.yaml new file mode 100644 index 0000000000..b5b808a1c9 --- /dev/null +++ b/releasenotes/notes/add-lb-algorithm-source-ip-port-ff86433143e43136.yaml @@ -0,0 +1,16 @@ +--- +features: + - | + New Load Balancing algorithm SOURCE_IP_PORT has been added. + It is supported only by OVN provider driver. +upgrade: + - | + All pools configured under OVN provider driver are + automatically migrated to SOURCE_IP_PORT algorithm. + Previously algorithm was named as ROUND_ROBIN, but in + fact it was not working like ROUND_ROBIN. After + investigating, it was observed that core OVN actually + utilizes a 5 Tuple Hash/RSS Hash in DPDK/Kernel as a Load + Balancing algorithm. The 5 Tuple Hash has Source IP, Destination + IP, Protocol, Source Port, Destination Port. + To reflect this the name was changed to SOURCE_IP_PORT. diff --git a/releasenotes/notes/add-listener-tls-alpn-support-3056fb01b418c88f.yaml b/releasenotes/notes/add-listener-tls-alpn-support-3056fb01b418c88f.yaml new file mode 100644 index 0000000000..f0b2ee19a1 --- /dev/null +++ b/releasenotes/notes/add-listener-tls-alpn-support-3056fb01b418c88f.yaml @@ -0,0 +1,9 @@ +--- +features: + - | + Added support for TLS extension Application Layer Protocol Negotiation + (ALPN) to TLS-terminated HTTPS load balancers. A new parameter + ``alpn_protocols`` was added to the Listener API. + - | + Octavia provider drivers can now offer HTTP/2 over TLS (protocol + negotiation via ALPN) to clients. diff --git a/releasenotes/notes/add-monitor-address-and-port-to-member-99fa2ee65e2b04b4.yaml b/releasenotes/notes/add-monitor-address-and-port-to-member-99fa2ee65e2b04b4.yaml new file mode 100644 index 0000000000..556310195f --- /dev/null +++ b/releasenotes/notes/add-monitor-address-and-port-to-member-99fa2ee65e2b04b4.yaml @@ -0,0 +1,3 @@ +--- +features: + - Add monitor address and port to member diff --git a/releasenotes/notes/add-neutron-client-interface-info-06faaaad92886b8c.yaml b/releasenotes/notes/add-neutron-client-interface-info-06faaaad92886b8c.yaml new file mode 100644 index 0000000000..9a040f42fb --- /dev/null +++ b/releasenotes/notes/add-neutron-client-interface-info-06faaaad92886b8c.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Fixed wrong endpoint information in neutron client configuration. diff --git a/releasenotes/notes/add-nftables-support-c86a89c420f6a42a.yaml b/releasenotes/notes/add-nftables-support-c86a89c420f6a42a.yaml new file mode 100644 index 0000000000..34a3ebfeda --- /dev/null +++ b/releasenotes/notes/add-nftables-support-c86a89c420f6a42a.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Added support for nftables to the devstack plugin and the amphora. diff --git a/releasenotes/notes/add-noop-amphora-stats-2cc64717f85eb9a8.yaml b/releasenotes/notes/add-noop-amphora-stats-2cc64717f85eb9a8.yaml new file mode 100644 index 0000000000..88aca6aeb1 --- /dev/null +++ b/releasenotes/notes/add-noop-amphora-stats-2cc64717f85eb9a8.yaml @@ -0,0 +1,5 @@ +--- +other: + - | + Add fake Amphora stats for when Octavia runs in noop mode / using + noop drivers. diff --git a/releasenotes/notes/add-noop-cert-manager-7018d3933a0ce9c6.yaml b/releasenotes/notes/add-noop-cert-manager-7018d3933a0ce9c6.yaml new file mode 100644 index 0000000000..9e1de08cdd --- /dev/null +++ b/releasenotes/notes/add-noop-cert-manager-7018d3933a0ce9c6.yaml @@ -0,0 +1,4 @@ +--- +other: + - | + Noop certificate manager was added. Now any Octavia certificate operations using noop drivers will be faster (as they won't be validated). diff --git a/releasenotes/notes/add-policy-json-support-38929bb1fb581a7a.yaml b/releasenotes/notes/add-policy-json-support-38929bb1fb581a7a.yaml new file mode 100644 index 0000000000..a6f89f039a --- /dev/null +++ b/releasenotes/notes/add-policy-json-support-38929bb1fb581a7a.yaml @@ -0,0 +1,6 @@ +--- +features: + - Policy.json enforcement in Octavia. + + * Enables verification of privileges on specific API command for a specific + user role and project_id. diff --git a/releasenotes/notes/add-pool-tls-alpn-support-68cb94b828c9ba37.yaml b/releasenotes/notes/add-pool-tls-alpn-support-68cb94b828c9ba37.yaml new file mode 100644 index 0000000000..c764229c03 --- /dev/null +++ b/releasenotes/notes/add-pool-tls-alpn-support-68cb94b828c9ba37.yaml @@ -0,0 +1,16 @@ +--- +features: + - | + Added support for TLS extension Application Layer Protocol Negotiation + (ALPN) to TLS-enabled pools. A new parameter ``alpn_protocols`` was added + to the Pool API. + - | + Octavia provider drivers can now be extended to support HTTP/2 between + TLS-enabled pools and members. + - | + Added HTTP/2 over TLS support via ALPN protocol negotiation to the + amphora provider driver for TLS-enabled pools. + - | + The Octavia amphora driver now supports gRPC protocol when HTTP/2 is + enabled for TERMINATED_HTTPS listeners and TLS-enabled pools, and the + amphora image is using HAProxy 2.0 or newer. diff --git a/releasenotes/notes/add-protocol-validation-0f9129a045e372ce.yaml b/releasenotes/notes/add-protocol-validation-0f9129a045e372ce.yaml new file mode 100644 index 0000000000..c172c11f1b --- /dev/null +++ b/releasenotes/notes/add-protocol-validation-0f9129a045e372ce.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add listener and pool protocol validation. The pool and listener can't be + combined arbitrarily. We need some constraints on the protocol side. diff --git a/releasenotes/notes/add-ptvsd-debugger-33bb632bccf494bb.yaml b/releasenotes/notes/add-ptvsd-debugger-33bb632bccf494bb.yaml new file mode 100644 index 0000000000..32bed00750 --- /dev/null +++ b/releasenotes/notes/add-ptvsd-debugger-33bb632bccf494bb.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added support to debug with the Python Visual Studio Debugger engine + (ptvsd). diff --git a/releasenotes/notes/add-quota-support-fe63a52b6b903789.yaml b/releasenotes/notes/add-quota-support-fe63a52b6b903789.yaml new file mode 100644 index 0000000000..c854a1777f --- /dev/null +++ b/releasenotes/notes/add-quota-support-fe63a52b6b903789.yaml @@ -0,0 +1,3 @@ +--- +features: + - Adds quota support to the Octavia API. diff --git a/releasenotes/notes/add-retry-for-getting-amphora-7f96ec666403ea49.yaml b/releasenotes/notes/add-retry-for-getting-amphora-7f96ec666403ea49.yaml new file mode 100644 index 0000000000..7788bb7a76 --- /dev/null +++ b/releasenotes/notes/add-retry-for-getting-amphora-7f96ec666403ea49.yaml @@ -0,0 +1,4 @@ +fixes: + - | + Fix an issue when load balancer creation was aborted due to en error on get + of amphora VM. \ No newline at end of file diff --git a/releasenotes/notes/add-rh-flavors-support-for-amphora-agent-cd3e9f9f519b9ff2.yaml b/releasenotes/notes/add-rh-flavors-support-for-amphora-agent-cd3e9f9f519b9ff2.yaml new file mode 100644 index 0000000000..8bb1073ea7 --- /dev/null +++ b/releasenotes/notes/add-rh-flavors-support-for-amphora-agent-cd3e9f9f519b9ff2.yaml @@ -0,0 +1,18 @@ +--- +prelude: > + Amphora image support for RH Linux flavors. +features: + - The diskimage-create script supports different operating system flavors + such as Ubuntu (the default option), CentOS, Fedora and RHEL. Adaptations + were made to several elements to ensure all images are operational. + - The amphora-agent is now able to distinguish between operating systems and + choose the right course of action to manage files and networking on each + Linux flavor. +issues: + - To use CentOS, Fedora, or RHEL in your amphora image you must set + the user_group option, located in the [haproxy_amphora] section of the + octavia.conf file to "haproxy". This will be made automatic in a + future version. +upgrade: + - agent_server_network_dir is now auto-detected for Ubuntu, CentOS, Fedora + and RHEL if one is not specified in the configuration file. diff --git a/releasenotes/notes/add-rhel-amphora-ab4a7bada2fa3eb7.yaml b/releasenotes/notes/add-rhel-amphora-ab4a7bada2fa3eb7.yaml new file mode 100644 index 0000000000..13df622659 --- /dev/null +++ b/releasenotes/notes/add-rhel-amphora-ab4a7bada2fa3eb7.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Added support to create RHEL 8 amphora images. diff --git a/releasenotes/notes/add-rockylinux-support-ac6e410b979e622e.yaml b/releasenotes/notes/add-rockylinux-support-ac6e410b979e622e.yaml new file mode 100644 index 0000000000..49cdf0cafe --- /dev/null +++ b/releasenotes/notes/add-rockylinux-support-ac6e410b979e622e.yaml @@ -0,0 +1,8 @@ +--- +features: + - | + Added support for Rocky Linux controllers in devstack. + - | + Added support for Rocky Linux amphora images. To enable it, users have to + build their amphora images with the ``OCTAVIA_AMP_BASE_OS=rocky`` and + ``OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID=9`` parameters. diff --git a/releasenotes/notes/add-sctp-support-in-amphora-driver-d6e60731029badf5.yaml b/releasenotes/notes/add-sctp-support-in-amphora-driver-d6e60731029badf5.yaml new file mode 100644 index 0000000000..4dc62b6bf9 --- /dev/null +++ b/releasenotes/notes/add-sctp-support-in-amphora-driver-d6e60731029badf5.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + Add support for the SCTP protocol in the Amphora driver. Support for SCTP + listeners and pools is implemented using keepalived in the amphora. Support + for SCTP health monitors is provided by the amphora-health-checker script + and relies on an INIT/INIT-ACK/ABORT sequence of packets. diff --git a/releasenotes/notes/add-shared-pools-and-l7-ef9edf01bb9058e0.yaml b/releasenotes/notes/add-shared-pools-and-l7-ef9edf01bb9058e0.yaml new file mode 100644 index 0000000000..717be4bce5 --- /dev/null +++ b/releasenotes/notes/add-shared-pools-and-l7-ef9edf01bb9058e0.yaml @@ -0,0 +1,26 @@ +--- +features: + - | + Adds support for Layer 7 switching and shared pools + features to Octavia. This supports the equivalent + feature added to Neutron LBaaS v2. + + * Layer 7 policies allow a tenant / user to define + actions the load balancer may take other than + routing requests to the default pool. + * Layer 7 rules control the logic behind whether a + given Layer 7 policy is followed. + * Works for HTTP and TERMINATED_HTTPS listeners. + * Shared pools allow listeners or Layer 7 + REDIRECT_TO_POOL policies to share back-end + pools. +upgrade: + - | + Upgrade requires a database migration. + + * Shared-pools introduces a new ``load_balancer_id`` + column into the ``pools`` table. + * ``pools.load_balancer_id`` column is populated + from ``listeners`` data using ETL in the migration. + * Two new tables are created to handle Layer 7 + switching. These are ``l7policy`` and ``l7rule``. diff --git a/releasenotes/notes/add-sizelimit-middleware-91dc6078522f81ec.yaml b/releasenotes/notes/add-sizelimit-middleware-91dc6078522f81ec.yaml new file mode 100644 index 0000000000..cbd8a796cb --- /dev/null +++ b/releasenotes/notes/add-sizelimit-middleware-91dc6078522f81ec.yaml @@ -0,0 +1,7 @@ +--- +upgrade: + - | + Octavia now uses the oslo middleware sizelimit module. It allows to limit + the size of the incoming requests in the API. Admins may need to ajust the + ``[oslo_middleware].max_request_body_size`` setting to their needs. The + default value for ``max_request_body_size`` is 114688 bytes. diff --git a/releasenotes/notes/add-sos-element-5d6677471341e7f2.yaml b/releasenotes/notes/add-sos-element-5d6677471341e7f2.yaml new file mode 100644 index 0000000000..8ca8ccb19c --- /dev/null +++ b/releasenotes/notes/add-sos-element-5d6677471341e7f2.yaml @@ -0,0 +1,3 @@ +--- +features: + - Add sos element to amphora images (Red Hat family only). diff --git a/releasenotes/notes/add-support-for-centos-8-e0730de5d20a48be.yaml b/releasenotes/notes/add-support-for-centos-8-e0730de5d20a48be.yaml new file mode 100644 index 0000000000..efc3fbf249 --- /dev/null +++ b/releasenotes/notes/add-support-for-centos-8-e0730de5d20a48be.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Added support for CentOS 8 amphora images. diff --git a/releasenotes/notes/add-support-for-sctp-protocol-152444b211ab2188.yaml b/releasenotes/notes/add-support-for-sctp-protocol-152444b211ab2188.yaml new file mode 100644 index 0000000000..c3a2e166f1 --- /dev/null +++ b/releasenotes/notes/add-support-for-sctp-protocol-152444b211ab2188.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add support for SCTP protocol. SCTP support has been added in the Octavia + API for listener, pool, and health-monitor resources. diff --git a/releasenotes/notes/add-systemd-support-5794252f02bce666.yaml b/releasenotes/notes/add-systemd-support-5794252f02bce666.yaml new file mode 100644 index 0000000000..27ca865f3f --- /dev/null +++ b/releasenotes/notes/add-systemd-support-5794252f02bce666.yaml @@ -0,0 +1,8 @@ +--- +features: + - Adds support for amphora images that use systemd. + - Add support for Ubuntu Xenial amphora images. +deprecations: + - The "use_upstart" configuration option is now deprecated + because the amphora agent can now automatically discover + the init system in use in the amphora image. diff --git a/releasenotes/notes/add-upgrade-check-framework-cc440f3f440ba6d2.yaml b/releasenotes/notes/add-upgrade-check-framework-cc440f3f440ba6d2.yaml new file mode 100644 index 0000000000..b0922d6890 --- /dev/null +++ b/releasenotes/notes/add-upgrade-check-framework-cc440f3f440ba6d2.yaml @@ -0,0 +1,11 @@ +--- +features: + - | + Added new tool ``octavia-status upgrade check``. + This framework allows adding various checks which can be run before a + Octavia upgrade to ensure if the upgrade can be performed safely. +upgrade: + - | + Operator can now use new CLI tool ``octavia-status upgrade check`` + to check if Octavia deployment can be safely upgraded from + N-1 to N release. diff --git a/releasenotes/notes/add-vip-acl-4a7e20d167fe4a49.yaml b/releasenotes/notes/add-vip-acl-4a7e20d167fe4a49.yaml new file mode 100644 index 0000000000..2f8a7e8942 --- /dev/null +++ b/releasenotes/notes/add-vip-acl-4a7e20d167fe4a49.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added support to VIP access control list. Users can now limit incoming + traffic to a set of allowed CIDRs. diff --git a/releasenotes/notes/add-x-forwarded-proto-19a1d971cf43b795.yaml b/releasenotes/notes/add-x-forwarded-proto-19a1d971cf43b795.yaml new file mode 100644 index 0000000000..63e4128116 --- /dev/null +++ b/releasenotes/notes/add-x-forwarded-proto-19a1d971cf43b795.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Adding support for the listener X-Forwarded-Proto header insertion. diff --git a/releasenotes/notes/add_API_reference-81d84d0c8598b764.yaml b/releasenotes/notes/add_API_reference-81d84d0c8598b764.yaml new file mode 100644 index 0000000000..14d14ede59 --- /dev/null +++ b/releasenotes/notes/add_API_reference-81d84d0c8598b764.yaml @@ -0,0 +1,5 @@ +--- +other: + - | + Octavia now has an up to date API reference for the Octavia v2 API. + It is available at: https://developer.openstack.org/api-ref/load-balancer/ diff --git a/releasenotes/notes/add_ability_to_disable_api_versions-253a8dc4253f0f56.yaml b/releasenotes/notes/add_ability_to_disable_api_versions-253a8dc4253f0f56.yaml new file mode 100644 index 0000000000..f8ff1af99d --- /dev/null +++ b/releasenotes/notes/add_ability_to_disable_api_versions-253a8dc4253f0f56.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Add config variables to allow disabling either API version (v1 or v2.0). diff --git a/releasenotes/notes/add_api_audit-58dc16bff517eae7.yaml b/releasenotes/notes/add_api_audit-58dc16bff517eae7.yaml new file mode 100644 index 0000000000..04d749eb07 --- /dev/null +++ b/releasenotes/notes/add_api_audit-58dc16bff517eae7.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + The Octavia API now supports Cloud Auditing Data Federation (CADF) + auditing. diff --git a/releasenotes/notes/add_provider_driver_support-7523f130dd5025af.yaml b/releasenotes/notes/add_provider_driver_support-7523f130dd5025af.yaml new file mode 100644 index 0000000000..50efedb09e --- /dev/null +++ b/releasenotes/notes/add_provider_driver_support-7523f130dd5025af.yaml @@ -0,0 +1,44 @@ +--- +features: + - | + Octavia now supports provider drivers. This allows third party load + balancing drivers to be integrated with the Octavia v2 API. Users select + the "provider" for a load balancer at creation time. + - | + There is now an API available to list enabled provider drivers. +upgrade: + - | + Two new options are included with provider driver support. The + enabled_provider_drivers option defaults to "amphora, octavia" to support + existing Octavia load balancers. The default_provider_driver option + defaults to "amphora" for all new load balancers that do not specify a + provider at creation time. These defaults should cover most existing + deployments. + - | + The provider driver support requires a database migration and follows + Octavia standard rolling upgrade procedures; database migration followed + by rolling control plane upgrades. Existing load balancers with no + provider specified will be assigned "amphora" as part of the database + migration. +deprecations: + - | + The Octavia API handlers are now deprecated and replaced by the new + provider driver support. Octavia API handlers will remain in the code to + support the Octavia v1 API (used for neutron-lbaas). + - | + Provider of "octavia" has been deprecated in favor of "amphora" to clarify + the provider driver supporting the load balancer. +other: + - | + A provider driver developer guide has been added to the documentation to + aid driver providers. + - | + An operator documentation page has been added to list known Octavia + provider drivers and provide links to those drivers. + Non-reference drivers, drivers other than the "amphora" driver, will be + outside of the octavia code repository but are dynamically loadable via + a well defined interface described in the provider driver developers + guide. + - | + Installed drivers need to be enabled for use in the Octavia + configuration file once you are ready to expose the driver to users. diff --git a/releasenotes/notes/add_tag_support-4735534f4066b9af.yaml b/releasenotes/notes/add_tag_support-4735534f4066b9af.yaml new file mode 100644 index 0000000000..d3b0b68123 --- /dev/null +++ b/releasenotes/notes/add_tag_support-4735534f4066b9af.yaml @@ -0,0 +1,12 @@ +--- +features: + - | + Added tags property for Octavia resources. It includes: + + * Load balancer + * Listener + * Member + * Pool + * L7rule + * L7policy + * Health Monitor diff --git a/releasenotes/notes/add_vip_sg_ids-feaeaf8b3301e267.yaml b/releasenotes/notes/add_vip_sg_ids-feaeaf8b3301e267.yaml new file mode 100644 index 0000000000..3a609d257b --- /dev/null +++ b/releasenotes/notes/add_vip_sg_ids-feaeaf8b3301e267.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Add the ``vip_sg_ids`` parameter to the load-balancer POST API. It allows to + set a list of user-defined Neutron Security Groups on the VIP port of the + Load Balancer. diff --git a/releasenotes/notes/add_vip_sg_ids_amphora_driver-8b8078aa674ff60a.yaml b/releasenotes/notes/add_vip_sg_ids_amphora_driver-8b8078aa674ff60a.yaml new file mode 100644 index 0000000000..f495522e59 --- /dev/null +++ b/releasenotes/notes/add_vip_sg_ids_amphora_driver-8b8078aa674ff60a.yaml @@ -0,0 +1,10 @@ +--- +features: + - | + Add the vip_sg_ids parameter to the Amphora driver, a list of Neutron + Security Groups. When set, the Amphora driver applies the Security Groups to + the VIP port of the Load Balancer. It also doesn't set any Security Group + Rules related to the Listeners on this ports, however it adds Security + Groups Rules for VRRP and haproxy peers when needed. + This feature does not work with SR-IOV ports as Neutron does not support + Security Groups on these ports. diff --git a/releasenotes/notes/add_volume_avalability_zone-cc03ba448960bef4.yaml b/releasenotes/notes/add_volume_avalability_zone-cc03ba448960bef4.yaml new file mode 100644 index 0000000000..030a477da4 --- /dev/null +++ b/releasenotes/notes/add_volume_avalability_zone-cc03ba448960bef4.yaml @@ -0,0 +1,6 @@ +--- + +features: + - | + Added volume zone to availability zone profile for amphorae backend for + creating loadbalancer with specific volume availability zone diff --git a/releasenotes/notes/additional-udp-healthcheck-types-2414a5edee9f5110.yaml b/releasenotes/notes/additional-udp-healthcheck-types-2414a5edee9f5110.yaml new file mode 100644 index 0000000000..2f3a4bfa89 --- /dev/null +++ b/releasenotes/notes/additional-udp-healthcheck-types-2414a5edee9f5110.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Two new types of healthmonitoring are now valid for UDP listeners. Both + ``HTTP`` and ``TCP`` check types can now be used. diff --git a/releasenotes/notes/admin-state-up-fix-4aa278eac67646ae.yaml b/releasenotes/notes/admin-state-up-fix-4aa278eac67646ae.yaml new file mode 100644 index 0000000000..a59816510b --- /dev/null +++ b/releasenotes/notes/admin-state-up-fix-4aa278eac67646ae.yaml @@ -0,0 +1,7 @@ +--- +upgrade: + - To fix the admin-state-up bug you must upgrade your + amphora image. +fixes: + - Fixes admin-state-up=False action for loadbalancer + and listener. diff --git a/releasenotes/notes/allow-invisible-subnets-e30b0b5fbd216294.yaml b/releasenotes/notes/allow-invisible-subnets-e30b0b5fbd216294.yaml new file mode 100644 index 0000000000..b869190d3d --- /dev/null +++ b/releasenotes/notes/allow-invisible-subnets-e30b0b5fbd216294.yaml @@ -0,0 +1,16 @@ +--- +upgrade: + - | + After this upgrade, users will no longer be able use network resources they + cannot see or "show" on load balancers. Operators can revert this behavior + by setting the "allow_invisible_resource_usage" configuration file setting + to ``True``. +security: + - | + Previously, if a user knew or could guess the UUID for a network resource, + they could use that UUID to create load balancer resources using that UUID. + Now the user must have permission to see or "show" the resource before it + can be used with a load balancer. This will be the new default, but + operators can disable this behavior via the setting the configuration file + setting "allow_invisible_resource_usage" to ``True``. This issue falls + under the "Class C1" security issue as the user would require a valid UUID. diff --git a/releasenotes/notes/allow-operators-to-disable-ping-healthchecks-42fd8c3b88edaf35.yaml b/releasenotes/notes/allow-operators-to-disable-ping-healthchecks-42fd8c3b88edaf35.yaml new file mode 100644 index 0000000000..c51d273ca3 --- /dev/null +++ b/releasenotes/notes/allow-operators-to-disable-ping-healthchecks-42fd8c3b88edaf35.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Cloud deployers can set `api_settings.allow_ping_health_monitors = False` + in `octavia.conf` to disable the ability to create PING health monitors. diff --git a/releasenotes/notes/allow-vip-on-mgmt-net-d6c65d4ccb2a8f2c.yaml b/releasenotes/notes/allow-vip-on-mgmt-net-d6c65d4ccb2a8f2c.yaml new file mode 100644 index 0000000000..86f35f6304 --- /dev/null +++ b/releasenotes/notes/allow-vip-on-mgmt-net-d6c65d4ccb2a8f2c.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - Allow the loadbalancer's VIP to be created on the same + network as the management interface. diff --git a/releasenotes/notes/allowed_cidr-validation-for-additional_vips-175c32824cc7ee95.yaml b/releasenotes/notes/allowed_cidr-validation-for-additional_vips-175c32824cc7ee95.yaml new file mode 100644 index 0000000000..456d9d1806 --- /dev/null +++ b/releasenotes/notes/allowed_cidr-validation-for-additional_vips-175c32824cc7ee95.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + The validation for the allowed_cidr parameter only took into account the + IP version of the primary VIP. CIDRs which only matched the version of an + additonal VIP were rejected. This if fixed and CIDRs are now matched + against the IP version of all VIPs. diff --git a/releasenotes/notes/amp-agent-py3-cert-upload-binary-74e0ab35c5a85c68.yaml b/releasenotes/notes/amp-agent-py3-cert-upload-binary-74e0ab35c5a85c68.yaml new file mode 100644 index 0000000000..cadc3311ba --- /dev/null +++ b/releasenotes/notes/amp-agent-py3-cert-upload-binary-74e0ab35c5a85c68.yaml @@ -0,0 +1,11 @@ +--- +upgrade: + - | + Any amphorae running a py3 based image must be recycled or else they will + eventually fail on certificate rotation. +fixes: + - | + Resolved broken certificate upload on py3 based amphora images. On a + housekeeping certificate rotation event, the amphora would clear out its + server certificate and return a 500, putting the amphora in ERROR status + and breaking further communication. See upgrade notes. diff --git a/releasenotes/notes/amp-az-1a0b4255c77fd1dc.yaml b/releasenotes/notes/amp-az-1a0b4255c77fd1dc.yaml new file mode 100644 index 0000000000..f247a7ee07 --- /dev/null +++ b/releasenotes/notes/amp-az-1a0b4255c77fd1dc.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added a configuration option that specifies the availability zone amphora + should be built in. diff --git a/releasenotes/notes/amphora-network-interface-management-d77bc9905ed997f6.yaml b/releasenotes/notes/amphora-network-interface-management-d77bc9905ed997f6.yaml new file mode 100644 index 0000000000..26b710d9a1 --- /dev/null +++ b/releasenotes/notes/amphora-network-interface-management-d77bc9905ed997f6.yaml @@ -0,0 +1,13 @@ +--- +deprecations: + - | + The ``[amphora_agent].agent_server_network_file`` configuration option is + now deprecated, the new Amphora network configuration tool introduced in + Xena does not support a single configuration file. +fixes: + - | + Amphora network configuration for the VIP interface and the pool member + interfaces are now applied with the amphora-interface tool. + amphora-interface uses pyroute2 low-level functions to configure the + interfaces instead of distribution-specific tools such as "network-scripts" + or "/etc/network/interfaces" files. diff --git a/releasenotes/notes/amphora-support-rsyslog-failover-f8bf00e0bf0fc27e.yaml b/releasenotes/notes/amphora-support-rsyslog-failover-f8bf00e0bf0fc27e.yaml new file mode 100644 index 0000000000..27a7a18e68 --- /dev/null +++ b/releasenotes/notes/amphora-support-rsyslog-failover-f8bf00e0bf0fc27e.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + The generated RSyslog configuration on the amphora supports now + RSyslog failover with TCP if multiple RSyslog servers were specified. \ No newline at end of file diff --git a/releasenotes/notes/aodh-service-api-5c485a172d76fa1a.yaml b/releasenotes/notes/aodh-service-api-5c485a172d76fa1a.yaml new file mode 100644 index 0000000000..abf38994b9 --- /dev/null +++ b/releasenotes/notes/aodh-service-api-5c485a172d76fa1a.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + The ``service`` role now has access to list members in + a pool, this is needed by Aodh to evaluate unhealthy + members in a pool when doing evaluations. diff --git a/releasenotes/notes/api-create-project-id-4bb984b24d56de2e.yaml b/releasenotes/notes/api-create-project-id-4bb984b24d56de2e.yaml new file mode 100644 index 0000000000..fa4c9b85ce --- /dev/null +++ b/releasenotes/notes/api-create-project-id-4bb984b24d56de2e.yaml @@ -0,0 +1,9 @@ +--- +deprecations: + - | + The project_id attribute of the POST method on the following objects + is now deprecated\: listener, pool, health monitor, and member. + These objects will use the parent load balancer's project_id. + Values passed into the project_id on those objects will be ignored + until the deprecation cycle has expired, at which point they will + cause an error. diff --git a/releasenotes/notes/auth-strategy-keystone-80b3780a18420b6c.yaml b/releasenotes/notes/auth-strategy-keystone-80b3780a18420b6c.yaml new file mode 100644 index 0000000000..ff4119a3ff --- /dev/null +++ b/releasenotes/notes/auth-strategy-keystone-80b3780a18420b6c.yaml @@ -0,0 +1,4 @@ +--- +upgrade: + - | + The configuration setting auth_strategy is now set to keystone by default. diff --git a/releasenotes/notes/auto_detect_haproxy_user_group-c220b6a2c8f1d589.yaml b/releasenotes/notes/auto_detect_haproxy_user_group-c220b6a2c8f1d589.yaml new file mode 100644 index 0000000000..b5e25694df --- /dev/null +++ b/releasenotes/notes/auto_detect_haproxy_user_group-c220b6a2c8f1d589.yaml @@ -0,0 +1,7 @@ +--- +features: + - The amphora haproxy user_group setting is now automatically detected for + Ubuntu, CentOS, Fedora, or RHEL based amphora. +deprecations: + - haproxy user_group is no longer being used. it is now auto-detected for + Ubuntu, CentOS, Fedora and RHEL based amphora images. diff --git a/releasenotes/notes/automatic-vertical-scaling-optimization-9604d53d60ca7b85.yaml b/releasenotes/notes/automatic-vertical-scaling-optimization-9604d53d60ca7b85.yaml new file mode 100644 index 0000000000..7b4da2137a --- /dev/null +++ b/releasenotes/notes/automatic-vertical-scaling-optimization-9604d53d60ca7b85.yaml @@ -0,0 +1,9 @@ +--- +features: + - | + Amphora agent has been adjusted to complement the vertical scaling + optimizations implemented in the new cpu-pinning element. If the flavor + uses multiple vCPUs it will + configure HAProxy automatically to pin each of its worker threads + to an individual CPU that was isolated by the element (all vCPUs starting + from the second one). diff --git a/releasenotes/notes/availability-zone-api-a28ff5e00bdcc69a.yaml b/releasenotes/notes/availability-zone-api-a28ff5e00bdcc69a.yaml new file mode 100644 index 0000000000..70ae88208f --- /dev/null +++ b/releasenotes/notes/availability-zone-api-a28ff5e00bdcc69a.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Add an API for allowing administrators to manage Octavia Availability + Zones and Availability Zone Profiles, which behave nearly identically + to Flavors and Flavor Profiles. diff --git a/releasenotes/notes/availability-zones-can-override-valid-vip-networks-5566aa4769c158dc.yaml b/releasenotes/notes/availability-zones-can-override-valid-vip-networks-5566aa4769c158dc.yaml new file mode 100644 index 0000000000..fd3dc73462 --- /dev/null +++ b/releasenotes/notes/availability-zones-can-override-valid-vip-networks-5566aa4769c158dc.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Availability zone profiles can now override the ``valid_vip_networks`` + configuration option. diff --git a/releasenotes/notes/bug-1797130-8c9bfa50d9b6c955.yaml b/releasenotes/notes/bug-1797130-8c9bfa50d9b6c955.yaml new file mode 100644 index 0000000000..0ce0a48178 --- /dev/null +++ b/releasenotes/notes/bug-1797130-8c9bfa50d9b6c955.yaml @@ -0,0 +1,11 @@ +--- +features: + - | + Listeners default timeouts can be set by config in section haproxy_amphora: + + * `timeout_client_data`: Frontend client inactivity timeout + * `timeout_member_connect`: Backend member connection timeout + * `timeout_member_data`: Backend member inactivity timeout + * `timeout_tcp_inspect`: Time to wait for TCP packets for content inspection + + The value for all of these options is expected to be in milliseconds. diff --git a/releasenotes/notes/build-rate-limiting-a2d2d4c9333a8f46.yaml b/releasenotes/notes/build-rate-limiting-a2d2d4c9333a8f46.yaml new file mode 100644 index 0000000000..0f12b93d67 --- /dev/null +++ b/releasenotes/notes/build-rate-limiting-a2d2d4c9333a8f46.yaml @@ -0,0 +1,9 @@ +--- +features: + - | + Octavia now has options to limit the amphora concurrent build rate. + This may be useful for deployments where nova can get overloaded. + Amphora builds will be prioritized in the following order: + failover, normal, spares pool builds. + See the configuration guide for more information: + https://docs.openstack.org/octavia/latest/configuration/configref.html#haproxy_amphora.build_rate_limit diff --git a/releasenotes/notes/catch_validation-27ffe48ca187c46f.yaml b/releasenotes/notes/catch_validation-27ffe48ca187c46f.yaml new file mode 100644 index 0000000000..73a2045ff0 --- /dev/null +++ b/releasenotes/notes/catch_validation-27ffe48ca187c46f.yaml @@ -0,0 +1,8 @@ +--- +fixes: + - | + In order to avoid hitting the Neutron API hard + when batch update with creating many new members, we cache the + subnet validation results in batch update members API call. + We also change to validate new members only during batch update + members since subnet ID is immutable. diff --git a/releasenotes/notes/cert-encrypted-ramfs-381ffe3d4a7392d7.yaml b/releasenotes/notes/cert-encrypted-ramfs-381ffe3d4a7392d7.yaml new file mode 100644 index 0000000000..51d2301f5b --- /dev/null +++ b/releasenotes/notes/cert-encrypted-ramfs-381ffe3d4a7392d7.yaml @@ -0,0 +1,12 @@ +--- +upgrade: + - To enabled encrypted ramfs storage for certificates + and keys, you must upgrade your amphora image. +deprecations: + - Amphora with a terminated HTTPS load balancer can + no longer be rebooted. If they reboot, they will + trigger a failover of the amphora. +security: + - Certificate and key storage for terminated HTTPS + load balancers is now in an encrypted ramfs path + inside the amphora. diff --git a/releasenotes/notes/change-keystone-backend-config-d246b1e34015c86c.yaml b/releasenotes/notes/change-keystone-backend-config-d246b1e34015c86c.yaml new file mode 100644 index 0000000000..72360c4a5f --- /dev/null +++ b/releasenotes/notes/change-keystone-backend-config-d246b1e34015c86c.yaml @@ -0,0 +1,11 @@ +--- +prelude: > + Extended support for Keystone API v3. +features: + - Octavia supports different Keystone APIs and choose authentication + mechanism based on configuration specified in "keystone_authtoken" section + of octavia.conf file. +upgrade: + - From configuration file section "keystone_authtoken_v3" was removed and all + parameters are stored in "keystone_authtoken" section of configuration + file. diff --git a/releasenotes/notes/correct-amp-client-auth-vulnerability-6803f4bac2508e4c.yaml b/releasenotes/notes/correct-amp-client-auth-vulnerability-6803f4bac2508e4c.yaml new file mode 100644 index 0000000000..e348b14877 --- /dev/null +++ b/releasenotes/notes/correct-amp-client-auth-vulnerability-6803f4bac2508e4c.yaml @@ -0,0 +1,5 @@ +--- +security: + - | + Correctly require two-way certificate authentication to connect to the + amphora agent API (CVE-2019-17134). diff --git a/releasenotes/notes/custom_eventstreamer_queue_url-7a98bd6a7e92e9de.yaml b/releasenotes/notes/custom_eventstreamer_queue_url-7a98bd6a7e92e9de.yaml new file mode 100644 index 0000000000..0bdec4d67a --- /dev/null +++ b/releasenotes/notes/custom_eventstreamer_queue_url-7a98bd6a7e92e9de.yaml @@ -0,0 +1,16 @@ +--- +features: + - | + In some environments (e.g. OSA) Neutron and Octavia use different queues + (at least different vhosts) and so if Octavia posts to the Octavia queue + and Neutron listens on the Neutron queue the events will never make it + over. + + This adds a way to configure a custom queue for the event streamer thus + allowing to post messages to the Neutron queue if needed. +security: + - | + Depending on how the other queue is set up additional passwords for the + other queue will be in the Octavia config file. Operators should take care + of setting up appropriate users with appropriate restrictions to the + topic(s) needed. diff --git a/releasenotes/notes/default-ubuntu-focal-6c4a94b8cfcfd995.yaml b/releasenotes/notes/default-ubuntu-focal-6c4a94b8cfcfd995.yaml new file mode 100644 index 0000000000..fd58f12bc4 --- /dev/null +++ b/releasenotes/notes/default-ubuntu-focal-6c4a94b8cfcfd995.yaml @@ -0,0 +1,4 @@ +--- +other: + - | + The diskimage-create.sh default for Ubuntu is now focal. diff --git a/releasenotes/notes/delete_amphora_health_row_on_amphora_revert-082f94459ecacaa2.yaml b/releasenotes/notes/delete_amphora_health_row_on_amphora_revert-082f94459ecacaa2.yaml new file mode 100644 index 0000000000..39b7266fb8 --- /dev/null +++ b/releasenotes/notes/delete_amphora_health_row_on_amphora_revert-082f94459ecacaa2.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Remove record in amphora_health table on revert. It's necessary, because + record in amphora table for corresponding amphora also deleted. + It allows to avoid false positive react of failover threshold due to + orphan records in amphora_health table. diff --git a/releasenotes/notes/deleted-404-2cdd751e7afbe036.yaml b/releasenotes/notes/deleted-404-2cdd751e7afbe036.yaml new file mode 100644 index 0000000000..396017ef84 --- /dev/null +++ b/releasenotes/notes/deleted-404-2cdd751e7afbe036.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixes the v2 API returning "DELETED" records until the amphora_expiry_age + timeout expired. The API will now immediately return a 404 HTTP status + code when deleted objects are requested. The API version has been raised + to v2.1 to reflect this change. diff --git a/releasenotes/notes/deprecate-amp_ssh_key_name-e1041a64ed970a9e.yaml b/releasenotes/notes/deprecate-amp_ssh_key_name-e1041a64ed970a9e.yaml new file mode 100644 index 0000000000..8d335c5613 --- /dev/null +++ b/releasenotes/notes/deprecate-amp_ssh_key_name-e1041a64ed970a9e.yaml @@ -0,0 +1,17 @@ +--- +features: + - | + New option in diskimage-create.sh `-n` to completely disable sshd on the + amphora. +deprecations: + - | + Config option `amp_ssh_access_allowed` is deprecated, as it overlaps with + `amp_ssh_key_name` in functionality and is not needed. Simply leave the + variable `amp_ssh_key_name` blank and no ssh key will be installed. This + is the same result as using `amp_ssh_access_allowed = False`. +security: + - | + It is now possible to completely remove sshd from the amphora image, to + further lock down access and increase security. If this is set, providing + an `amp_ssh_key_name` in config will install the key, but ssh access will + not be possible as sshd will not be running. diff --git a/releasenotes/notes/deprecate-amphorav1-driver-eb2dca7da2dd0776.yaml b/releasenotes/notes/deprecate-amphorav1-driver-eb2dca7da2dd0776.yaml new file mode 100644 index 0000000000..90c0eb5924 --- /dev/null +++ b/releasenotes/notes/deprecate-amphorav1-driver-eb2dca7da2dd0776.yaml @@ -0,0 +1,5 @@ +--- +deprecations: + - | + The 'amphorav1' provider is deprecated and will be removed in a future + release. Use the 'amphora' provider (an alias for 'amphorav2') instead. diff --git a/releasenotes/notes/deprecate-json-formatted-policy-file-cc3dbf8b07c2638e.yaml b/releasenotes/notes/deprecate-json-formatted-policy-file-cc3dbf8b07c2638e.yaml new file mode 100644 index 0000000000..c9c5300045 --- /dev/null +++ b/releasenotes/notes/deprecate-json-formatted-policy-file-cc3dbf8b07c2638e.yaml @@ -0,0 +1,20 @@ +--- +upgrade: + - | + The default value of ``[oslo_policy] policy_file`` config option has + been changed from ``policy.json`` to ``policy.yaml``. + Operators who are utilizing customized or previously generated + static policy JSON files (which are not needed by default), should + generate new policy files or convert them in YAML format. Use the + `oslopolicy-convert-json-to-yaml + `_ + tool to convert a JSON to YAML formatted policy file in + backward compatible way. +deprecations: + - | + Use of JSON policy files was deprecated by the ``oslo.policy`` library + during the Victoria development cycle. As a result, this deprecation is + being noted in the Wallaby cycle with an anticipated future removal of support + by ``oslo.policy``. As such operators will need to convert to YAML policy + files. Please see the upgrade notes for details on migration of any + custom policy files. diff --git a/releasenotes/notes/deprecating-spares-pool-9f92787ec9809a78.yaml b/releasenotes/notes/deprecating-spares-pool-9f92787ec9809a78.yaml new file mode 100644 index 0000000000..4444450ccc --- /dev/null +++ b/releasenotes/notes/deprecating-spares-pool-9f92787ec9809a78.yaml @@ -0,0 +1,11 @@ +--- +deprecations: + - | + Spares pool support is deprecated, pending removal in the X release. Use of + the spares pool was originally recommended to increase provisioning speed, + but since Nova's server groups do not support adding existing VMs, Octavia + cannot support use of the spares pool with the Active-Standby topology. + Since this is our recommended topology for production deployments, and + speed is less essential in development/testing environments (the only place + we could recommend the use of Single topology), the overhead of maintaining + spares pool support exceeds its theoretical usefulness. diff --git a/releasenotes/notes/disable-conntrack-for-tcp-01ef6948d99353c2.yaml b/releasenotes/notes/disable-conntrack-for-tcp-01ef6948d99353c2.yaml new file mode 100644 index 0000000000..2313fac9ec --- /dev/null +++ b/releasenotes/notes/disable-conntrack-for-tcp-01ef6948d99353c2.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Disable conntrack for TCP flows in the Amphora, it reduces memory usage for + HAProxy-based listeners and prevents some kernel warnings about dropped + packets. diff --git a/releasenotes/notes/disable_logging-3b50f388ee2b8127.yaml b/releasenotes/notes/disable_logging-3b50f388ee2b8127.yaml new file mode 100644 index 0000000000..c0d9f2d717 --- /dev/null +++ b/releasenotes/notes/disable_logging-3b50f388ee2b8127.yaml @@ -0,0 +1,18 @@ +--- +features: + - | + The new option `[haproxy_amphora]/connection_logging` will disable logging + of connection data if set to False which can improve performance of the + load balancer and might aid compliance. +security: + - | + Disabling connection logging might make it more difficult to audit + systems for unauthorized access, from which IPs it originated, and + which assets were compromised. +other: + - | + As part of GDPR compliance, connection logs might be considered + personal data and might need to follow specific data retention policies. + Disabling connection logging might aid in making Octavia compliant by + preventing the output of such data. As always, consult with an expert + on compliance prior to making changes. diff --git a/releasenotes/notes/diskimage-create-add-file-extension-to-default-34eeaa97b788cc67.yaml b/releasenotes/notes/diskimage-create-add-file-extension-to-default-34eeaa97b788cc67.yaml new file mode 100644 index 0000000000..3449cf9f8d --- /dev/null +++ b/releasenotes/notes/diskimage-create-add-file-extension-to-default-34eeaa97b788cc67.yaml @@ -0,0 +1,11 @@ +--- +upgrade: + - | + The default for the output file has been changed in + diskimage-create.sh. It is now amphora-x64-haproxy.qcow2 instead of + amphora-x64-haproxy. +fixes: + - | + diskimage-create.sh used $AMP_OUTPUTFILENAME.$AMP_IMAGETYPE for + constructing the image file path when checking the file size, which was + not correct and caused an "No such file or directory" error. diff --git a/releasenotes/notes/diskimage-create-git-branch-9c44e7e3fa70a985.yaml b/releasenotes/notes/diskimage-create-git-branch-9c44e7e3fa70a985.yaml new file mode 100644 index 0000000000..8dcfe74194 --- /dev/null +++ b/releasenotes/notes/diskimage-create-git-branch-9c44e7e3fa70a985.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added an option to the diskimage-create.sh script to specify the Octavia + Git branch to build the image from. diff --git a/releasenotes/notes/do-not-serialize-full-loadbalancer-graph-on-get-amphora-network-configs-347a0a4340ee222b.yaml b/releasenotes/notes/do-not-serialize-full-loadbalancer-graph-on-get-amphora-network-configs-347a0a4340ee222b.yaml new file mode 100644 index 0000000000..6fce35c4df --- /dev/null +++ b/releasenotes/notes/do-not-serialize-full-loadbalancer-graph-on-get-amphora-network-configs-347a0a4340ee222b.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Ignore serialization loadbalancer class in GetAmphoraNetworkConfigs tasks. + It allows to avoid storing full graph in jobboard details. It fixes cases + with enabled jobboard for huge LBs with ~2000+ resources in graph. diff --git a/releasenotes/notes/documentation-migration-f72c6a1703a105b7.yaml b/releasenotes/notes/documentation-migration-f72c6a1703a105b7.yaml new file mode 100644 index 0000000000..fff33e53d8 --- /dev/null +++ b/releasenotes/notes/documentation-migration-f72c6a1703a105b7.yaml @@ -0,0 +1,7 @@ +--- +other: + - | + The Octavia project documentation has been reorganized as part of + the OpenStack documentation migration project. + The Octavia project documentation is now located at: + https://docs.openstack.org/octavia/latest/ diff --git a/releasenotes/notes/drop-python-2-7-a6b3f456bf6a3da7.yaml b/releasenotes/notes/drop-python-2-7-a6b3f456bf6a3da7.yaml new file mode 100644 index 0000000000..c716883bf4 --- /dev/null +++ b/releasenotes/notes/drop-python-2-7-a6b3f456bf6a3da7.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + Python 2.7 support has been dropped. The minimum version of Python now + supported by Octavia is Python 3.6. diff --git a/releasenotes/notes/enable-keystone-on-api-b3ebb132ad5ab308.yaml b/releasenotes/notes/enable-keystone-on-api-b3ebb132ad5ab308.yaml new file mode 100644 index 0000000000..5299785693 --- /dev/null +++ b/releasenotes/notes/enable-keystone-on-api-b3ebb132ad5ab308.yaml @@ -0,0 +1,11 @@ +--- +prelude: > + Support for Keystone token authentication on frontend Octavia API. +features: + - After setting "auth_strategy = keystone" all incoming requests to Octavia + API will be verified using Keystone are they send by authenticated person. + By default that option is disabled because Neutron LBaaS v2 is not + supporting that functionality properly. +upgrade: + - This feature add new configuration value "auth_strategy" which by default + is set for "noauth". diff --git a/releasenotes/notes/enable-mutable-configuration-1d7f62a133148767.yaml b/releasenotes/notes/enable-mutable-configuration-1d7f62a133148767.yaml new file mode 100644 index 0000000000..630955fe17 --- /dev/null +++ b/releasenotes/notes/enable-mutable-configuration-1d7f62a133148767.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + You can now update the running configuration of the Octavia control + plane processes by sending the parent process a "HUP" signal. + Note: The configuration item must support mutation. diff --git a/releasenotes/notes/encrypt-certs-and-keys-5175d7704d8df3ce.yaml b/releasenotes/notes/encrypt-certs-and-keys-5175d7704d8df3ce.yaml new file mode 100644 index 0000000000..abadce89ef --- /dev/null +++ b/releasenotes/notes/encrypt-certs-and-keys-5175d7704d8df3ce.yaml @@ -0,0 +1,15 @@ +--- +security: + - | + As a followup to the fix that resolved CVE-2018-16856, Octavia will now + encrypt certificates and keys used for secure communication with amphorae, + in its internal workflows. Octavia used to exclude debug-level log prints + for specific tasks and flows that were explicitly specified by name, a + method that is susceptive to code changes. +other: + - | + Added a new option named server_certs_key_passphrase under the certificates + section. The default value gets copied from an environment variable named + TLS_PASS_AMPS_DEFAULT. In a case where TLS_PASS_AMPS_DEFAULT is not set, + and the operator did not fill any other value directly, + 'insecure-key-do-not-use-this-key' will be used. diff --git a/releasenotes/notes/ensure-rsyslog-config-is-reloaded-b4a25a98b661d0f1.yaml b/releasenotes/notes/ensure-rsyslog-config-is-reloaded-b4a25a98b661d0f1.yaml new file mode 100644 index 0000000000..762c3453eb --- /dev/null +++ b/releasenotes/notes/ensure-rsyslog-config-is-reloaded-b4a25a98b661d0f1.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Ensure that the provided rsyslog configuration file is used by the rsyslog + by restarting the service, it fixes the log offloading feature on + distributions that start rsyslog before cloud-init. diff --git a/releasenotes/notes/ensure-rsyslog-reloaded-amphorav1-a4ec5127a459f3bf.yaml b/releasenotes/notes/ensure-rsyslog-reloaded-amphorav1-a4ec5127a459f3bf.yaml new file mode 100644 index 0000000000..9af01c1bf1 --- /dev/null +++ b/releasenotes/notes/ensure-rsyslog-reloaded-amphorav1-a4ec5127a459f3bf.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Ensure that the provided rsyslog configuration file is used by rsyslog in + the amphora by restarting the service when using the amphorav1 provider, it + fixes the log offloading feature on distributions that start rsyslog before + cloud-init. diff --git a/releasenotes/notes/etcd-backend-for-jobboard-a08ef7c37180e7c6.yaml b/releasenotes/notes/etcd-backend-for-jobboard-a08ef7c37180e7c6.yaml new file mode 100644 index 0000000000..95c3234cc2 --- /dev/null +++ b/releasenotes/notes/etcd-backend-for-jobboard-a08ef7c37180e7c6.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Added support for the Jobboard Etcd backend in Taskflow. diff --git a/releasenotes/notes/extend_api_to_accept_qos_policy_id-128ab592a735f3b8.yaml b/releasenotes/notes/extend_api_to_accept_qos_policy_id-128ab592a735f3b8.yaml new file mode 100644 index 0000000000..3ce2a92bc2 --- /dev/null +++ b/releasenotes/notes/extend_api_to_accept_qos_policy_id-128ab592a735f3b8.yaml @@ -0,0 +1,5 @@ +--- +features: + - Now Octavia API can accept the QoS Policy id from neutron to support the + QoS requirements towards Load Balancer VIP port when create/update load + balancer. diff --git a/releasenotes/notes/failover-threshold-f5cdf2bbe8a64d6d.yaml b/releasenotes/notes/failover-threshold-f5cdf2bbe8a64d6d.yaml new file mode 100644 index 0000000000..8d940e0d74 --- /dev/null +++ b/releasenotes/notes/failover-threshold-f5cdf2bbe8a64d6d.yaml @@ -0,0 +1,12 @@ +--- +features: + - | + A new configuration option ``failover_threshold`` can be set to limit the + number of amphorae simultaneously pending failover before halting the + automatic failover process. This should help prevent unwanted mass failover + events that can happen in cases like network interruption to an AZ or the + database becoming read-only. This feature is not enabled by default, and it + should be configured carefully based on the size of the environment. + For example, with 100 amphorae a good threshold might be 20 or 30, or + a value greater than the typical number of amphorae that would be expected + on a single host. diff --git a/releasenotes/notes/failover-vip-no-dhcp-50805c5321ebbb05.yaml b/releasenotes/notes/failover-vip-no-dhcp-50805c5321ebbb05.yaml new file mode 100644 index 0000000000..8e25e743aa --- /dev/null +++ b/releasenotes/notes/failover-vip-no-dhcp-50805c5321ebbb05.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed an issue that caused failover to unsuccessful if the vip network + was not DHCP enabled. diff --git a/releasenotes/notes/filter-bogus-exception-when-jobboard-disabled-6f1375463f5a71dc.yaml b/releasenotes/notes/filter-bogus-exception-when-jobboard-disabled-6f1375463f5a71dc.yaml new file mode 100644 index 0000000000..e5e535af50 --- /dev/null +++ b/releasenotes/notes/filter-bogus-exception-when-jobboard-disabled-6f1375463f5a71dc.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Added a filter to hide a bogus ComputeWaitTimeoutException exception when + creating an amphora when jobboard is disabled. This exception is part of + the flow when creating a load balancer or an amphora and should not be shown + to the user. diff --git a/releasenotes/notes/filter-out-private-information-from-taskflow-logs-0d8697140423b4d5.yaml b/releasenotes/notes/filter-out-private-information-from-taskflow-logs-0d8697140423b4d5.yaml new file mode 100644 index 0000000000..5cead5933e --- /dev/null +++ b/releasenotes/notes/filter-out-private-information-from-taskflow-logs-0d8697140423b4d5.yaml @@ -0,0 +1,12 @@ +--- +security: + - | + Filter out private information from the taskflow logs when ''INFO'' level + messages are enabled and when jobboard is enabled. Logs might have included + TLS certificates and private_key. By default, in Octavia only WARNING and + above messages are enabled in taskflow and jobboard is disabled. +fixes: + - | + The parameters of a taskflow Flow were logged in ''INFO'' level messages by + taskflow, it included TLS-enabled listeners and pools parameters, such as + certificates and private_key. diff --git a/releasenotes/notes/fix-API-list-performance-3b121deffbc3ce4a.yaml b/releasenotes/notes/fix-API-list-performance-3b121deffbc3ce4a.yaml new file mode 100644 index 0000000000..ae52db8d9f --- /dev/null +++ b/releasenotes/notes/fix-API-list-performance-3b121deffbc3ce4a.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed a performance regression in the Octavia v2 API when using the + "list" APIs. diff --git a/releasenotes/notes/fix-IPv6-vip-079a3285f78686ee.yaml b/releasenotes/notes/fix-IPv6-vip-079a3285f78686ee.yaml new file mode 100644 index 0000000000..55605d25e4 --- /dev/null +++ b/releasenotes/notes/fix-IPv6-vip-079a3285f78686ee.yaml @@ -0,0 +1,10 @@ +--- +upgrade: + - | + To fix IPv6 VIP addresses, you must run the "octavia-db-manage upgrade + head" migration script. +fixes: + - | + Fully expanded IPv6 VIP addresses would fail to store with "Data too long + for column 'ip_address' at row 1". This patch includes a database migration + to fix this column. diff --git a/releasenotes/notes/fix-PING-health-monitor-bc38de57fa759ac0.yaml b/releasenotes/notes/fix-PING-health-monitor-bc38de57fa759ac0.yaml new file mode 100644 index 0000000000..ad1ebaf290 --- /dev/null +++ b/releasenotes/notes/fix-PING-health-monitor-bc38de57fa759ac0.yaml @@ -0,0 +1,17 @@ +--- +issues: + - | + Amphora images with HAProxy older than 1.6 (CentOS 7, etc.) will still + use health monitor type TCP when PING is selected by the user. +upgrade: + - | + Amphora will need to be updated to a new image with this version of the + agent and ping-wrapper.sh script prior to updating the Octavia controllers. + If a load balancer is using a health monitor of type PING with an + amphora image that has not been updated, the next configuration change to + the load balancer will cause it to go into an ERROR state until it is + failed over to an updated image. +fixes: + - | + Fixed an issue where health monitors of type PING were really doing a + TCP health check. diff --git a/releasenotes/notes/fix-SNI-single-process-879ffce5eaa6c1c3.yaml b/releasenotes/notes/fix-SNI-single-process-879ffce5eaa6c1c3.yaml new file mode 100644 index 0000000000..c41d4f968c --- /dev/null +++ b/releasenotes/notes/fix-SNI-single-process-879ffce5eaa6c1c3.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixes an issue where load balancers with more than one TLS enabled + listener, one or more SNI enabled, may load certificates from + other TLS enabled listeners for SNI use. diff --git a/releasenotes/notes/fix-active-standby-in-centos-4e47140e0e139de8.yaml b/releasenotes/notes/fix-active-standby-in-centos-4e47140e0e139de8.yaml new file mode 100644 index 0000000000..1da390c904 --- /dev/null +++ b/releasenotes/notes/fix-active-standby-in-centos-4e47140e0e139de8.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Fixed duplicated IPv6 addresses in Active/Standby mode in CentOS amphorae. diff --git a/releasenotes/notes/fix-active_connection_retry_interval-config-option-name-6fce9ea68803540c.yaml b/releasenotes/notes/fix-active_connection_retry_interval-config-option-name-6fce9ea68803540c.yaml new file mode 100644 index 0000000000..f39446ed8e --- /dev/null +++ b/releasenotes/notes/fix-active_connection_retry_interval-config-option-name-6fce9ea68803540c.yaml @@ -0,0 +1,12 @@ +--- +upgrade: + - | + The ``[haproxy_amphora].active_connection_rety_interval`` configuration + option has been renamed to + ``[haproxy_amphora].active_connection_retry_interval``. An alias for the old + name is in place to maintain compatibility with old configuration files. +fixes: + - | + The ``[haproxy_amphora].active_connection_rety_interval`` configuration + option has been renamed to + ``[haproxy_amphora].active_connection_retry_interval``. diff --git a/releasenotes/notes/fix-add-member-tls-enabled-pool-cc77bfa320aaf659.yaml b/releasenotes/notes/fix-add-member-tls-enabled-pool-cc77bfa320aaf659.yaml new file mode 100644 index 0000000000..91b65a639d --- /dev/null +++ b/releasenotes/notes/fix-add-member-tls-enabled-pool-cc77bfa320aaf659.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed an issue where members added to TLS-enabled pools would go to ERROR + provisioning status. diff --git a/releasenotes/notes/fix-amp-failover-missing-vrrp-port-9b5f13b9951b7edb.yaml b/releasenotes/notes/fix-amp-failover-missing-vrrp-port-9b5f13b9951b7edb.yaml new file mode 100644 index 0000000000..9605ac0286 --- /dev/null +++ b/releasenotes/notes/fix-amp-failover-missing-vrrp-port-9b5f13b9951b7edb.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed an issue with failing over an amphora if the pair amphora in an + active/standby pair had a missing VRRP port in neutron. diff --git a/releasenotes/notes/fix-amphora-failover-amphorav2-b19a76ccfdc75245.yaml b/releasenotes/notes/fix-amphora-failover-amphorav2-b19a76ccfdc75245.yaml new file mode 100644 index 0000000000..207dc9b5c8 --- /dev/null +++ b/releasenotes/notes/fix-amphora-failover-amphorav2-b19a76ccfdc75245.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fix an issue with amphorav2 driver, a failover of an amphora created an + amphora with an ERROR status. diff --git a/releasenotes/notes/fix-amphora-haproxy-count-b1b1df43a7150926.yaml b/releasenotes/notes/fix-amphora-haproxy-count-b1b1df43a7150926.yaml new file mode 100644 index 0000000000..568bdac818 --- /dev/null +++ b/releasenotes/notes/fix-amphora-haproxy-count-b1b1df43a7150926.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fix amphora haproxy_count to return the number of + haproxy processes that are running. diff --git a/releasenotes/notes/fix-amphora-to-support-centos-stream-9-e4c8599ae152d396.yaml b/releasenotes/notes/fix-amphora-to-support-centos-stream-9-e4c8599ae152d396.yaml new file mode 100644 index 0000000000..dae0aa4080 --- /dev/null +++ b/releasenotes/notes/fix-amphora-to-support-centos-stream-9-e4c8599ae152d396.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Fixed issues when building amphora image for Centos Stream 9. diff --git a/releasenotes/notes/fix-amphora-to-support-rhel-9-b10091e81b48533a.yaml b/releasenotes/notes/fix-amphora-to-support-rhel-9-b10091e81b48533a.yaml new file mode 100644 index 0000000000..402665af8d --- /dev/null +++ b/releasenotes/notes/fix-amphora-to-support-rhel-9-b10091e81b48533a.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Fixed issues when building amphora image for RHEL 9. diff --git a/releasenotes/notes/fix-amphora-update-api-call-d90853d7f75304a4.yaml b/releasenotes/notes/fix-amphora-update-api-call-d90853d7f75304a4.yaml new file mode 100644 index 0000000000..ad9586a2a7 --- /dev/null +++ b/releasenotes/notes/fix-amphora-update-api-call-d90853d7f75304a4.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixed a bug that prevented the amphora from being updated by the Amphora + Configure API call, the API call was succesfull but the internal flow for + updating it failed. diff --git a/releasenotes/notes/fix-amphorav2-failover-secgroup-c793de5e00b32653.yaml b/releasenotes/notes/fix-amphorav2-failover-secgroup-c793de5e00b32653.yaml new file mode 100644 index 0000000000..1e95f98539 --- /dev/null +++ b/releasenotes/notes/fix-amphorav2-failover-secgroup-c793de5e00b32653.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed an issue that an amphorav2 LB cannot be reached after loadbalancer + failover. The LB security group was not set in the amphora port. diff --git a/releasenotes/notes/fix-api-listener-update-sni-containers-6595c52e2de1f621.yaml b/releasenotes/notes/fix-api-listener-update-sni-containers-6595c52e2de1f621.yaml new file mode 100644 index 0000000000..e8366f7ffd --- /dev/null +++ b/releasenotes/notes/fix-api-listener-update-sni-containers-6595c52e2de1f621.yaml @@ -0,0 +1,8 @@ +--- +fixes: + - | + Fixed an issue where setting of SNI containers were not being applied on + listener update API calls. + - | + Fixed an Octavia API validation on listener update where SNI containers + could be set on non-TERMINATED_HTTPS listeners. diff --git a/releasenotes/notes/fix-api-sort-key-337f342d5cdce432.yaml b/releasenotes/notes/fix-api-sort-key-337f342d5cdce432.yaml new file mode 100644 index 0000000000..313b32a9dc --- /dev/null +++ b/releasenotes/notes/fix-api-sort-key-337f342d5cdce432.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed an issue where some columns could not be used for sort keys in + API list calls. diff --git a/releasenotes/notes/fix-application-credential-tokens-with-barbican-3b7d13283206c124.yaml b/releasenotes/notes/fix-application-credential-tokens-with-barbican-3b7d13283206c124.yaml new file mode 100644 index 0000000000..b56ec73a7d --- /dev/null +++ b/releasenotes/notes/fix-application-credential-tokens-with-barbican-3b7d13283206c124.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fix an authentication error with Barbican when creating a TERMINATED_HTTPS + listener with application credential tokens or trust IDs. diff --git a/releasenotes/notes/fix-attributeerror-when-securitygroup-rule-has-protocol-none-9b7217c5477d01b6.yaml b/releasenotes/notes/fix-attributeerror-when-securitygroup-rule-has-protocol-none-9b7217c5477d01b6.yaml new file mode 100644 index 0000000000..9e6cd7f013 --- /dev/null +++ b/releasenotes/notes/fix-attributeerror-when-securitygroup-rule-has-protocol-none-9b7217c5477d01b6.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed potential AttributeError during listener update when security group + rule had no protocol defined (ie. it was null). diff --git a/releasenotes/notes/fix-audit-map-for-failover-e63390399da6841d.yaml b/releasenotes/notes/fix-audit-map-for-failover-e63390399da6841d.yaml new file mode 100644 index 0000000000..3de6b28f11 --- /dev/null +++ b/releasenotes/notes/fix-audit-map-for-failover-e63390399da6841d.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed an issue in the CADF audit map file for failover actions that could + cause keystonemiddleware to raise an exception. diff --git a/releasenotes/notes/fix-bad-management-port-update-3fa157f74ee8c7b2.yaml b/releasenotes/notes/fix-bad-management-port-update-3fa157f74ee8c7b2.yaml new file mode 100644 index 0000000000..aa13d3ae75 --- /dev/null +++ b/releasenotes/notes/fix-bad-management-port-update-3fa157f74ee8c7b2.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixed an issue when adding or deleting a member, Octavia might have + reconfigured the management port of the amphora by adding or removing + additional subnets. Octavia no longer updates the management port during + those tasks. diff --git a/releasenotes/notes/fix-barbican-client-verfiy-689be1b9389bd1d8.yaml b/releasenotes/notes/fix-barbican-client-verfiy-689be1b9389bd1d8.yaml new file mode 100644 index 0000000000..4943ac2350 --- /dev/null +++ b/releasenotes/notes/fix-barbican-client-verfiy-689be1b9389bd1d8.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fix an issue when the barbican service enable TLS, we create the listerner + failed. diff --git a/releasenotes/notes/fix-batch-member-update-race-condition-09b82e2cc3121e03.yaml b/releasenotes/notes/fix-batch-member-update-race-condition-09b82e2cc3121e03.yaml new file mode 100644 index 0000000000..80307992fc --- /dev/null +++ b/releasenotes/notes/fix-batch-member-update-race-condition-09b82e2cc3121e03.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed a potential race condition in the member batch update API call, the + load balancers might not have been locked properly. diff --git a/releasenotes/notes/fix-certs-ramfs-race-561f355d13fc6d14.yaml b/releasenotes/notes/fix-certs-ramfs-race-561f355d13fc6d14.yaml new file mode 100644 index 0000000000..06d551a988 --- /dev/null +++ b/releasenotes/notes/fix-certs-ramfs-race-561f355d13fc6d14.yaml @@ -0,0 +1,14 @@ +--- +upgrade: + - | + A new amphora image is required to fix the potential certs-ramfs race + condition. +security: + - | + A race condition between the certs-ramfs and the amphora agent may lead + to tenant TLS content being stored on the amphora filesystem instead of + in the encrypted RAM filesystem. +fixes: + - | + Fixed a potential race condition with the certs-ramfs and amphora agent + services. diff --git a/releasenotes/notes/fix-client-auth-single-process-749af7791454ff03.yaml b/releasenotes/notes/fix-client-auth-single-process-749af7791454ff03.yaml new file mode 100644 index 0000000000..60c15fe725 --- /dev/null +++ b/releasenotes/notes/fix-client-auth-single-process-749af7791454ff03.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixes an issue where load balancers with more than one TLS enabled + listener, using client authentication and/or backend re-encryption, + may load incorrect certificates for the listener. diff --git a/releasenotes/notes/fix-computewait-with-persistence-d10223bfb48a0ded.yaml b/releasenotes/notes/fix-computewait-with-persistence-d10223bfb48a0ded.yaml new file mode 100644 index 0000000000..6275a02811 --- /dev/null +++ b/releasenotes/notes/fix-computewait-with-persistence-d10223bfb48a0ded.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fix an issue that may have occurred when running the amphorav2 with + persistence, the ComputeActiveWait was incorrectly executed twice on + different controllers. diff --git a/releasenotes/notes/fix-corrupted-global-server-state-file-325ab7c62e21ff14.yaml b/releasenotes/notes/fix-corrupted-global-server-state-file-325ab7c62e21ff14.yaml new file mode 100644 index 0000000000..e0cd761883 --- /dev/null +++ b/releasenotes/notes/fix-corrupted-global-server-state-file-325ab7c62e21ff14.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixed a "corrupted global server state file" error in Centos 9 Stream when + reloading the state of the servers after restarting haproxy. + It also fixed the recovering of the operational state of the servers in + haproxy after its restart. diff --git a/releasenotes/notes/fix-creating-fully-populated-load-balancer-ae57ffae5c017ac3.yaml b/releasenotes/notes/fix-creating-fully-populated-load-balancer-ae57ffae5c017ac3.yaml new file mode 100644 index 0000000000..ab0d7c6ac2 --- /dev/null +++ b/releasenotes/notes/fix-creating-fully-populated-load-balancer-ae57ffae5c017ac3.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixes creating a fully populated load balancer with not REDIRECT_POOL + type L7 policy and default_pool field. diff --git a/releasenotes/notes/fix-default-timeout-values-for-listeners-108c8048ba8beb9a.yaml b/releasenotes/notes/fix-default-timeout-values-for-listeners-108c8048ba8beb9a.yaml new file mode 100644 index 0000000000..cab6766799 --- /dev/null +++ b/releasenotes/notes/fix-default-timeout-values-for-listeners-108c8048ba8beb9a.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fix default value override for timeout values for listeners. Changing the + default timeouts in the configuration file wasn't correctly applied in the + default listener parameters. diff --git a/releasenotes/notes/fix-disable-udp-listener-status-3d34a5596e62da1c.yaml b/releasenotes/notes/fix-disable-udp-listener-status-3d34a5596e62da1c.yaml new file mode 100644 index 0000000000..f2cb52425c --- /dev/null +++ b/releasenotes/notes/fix-disable-udp-listener-status-3d34a5596e62da1c.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fix operational status for disabled UDP listeners. The operating status of + disabled UDP listeners is now OFFLINE instead of ONLINE, the behavior is now + similary to the behavior of HTTP/HTTPS/TCP/... listeners. diff --git a/releasenotes/notes/fix-disabled-udp-pool-3e84558f996017d5.yaml b/releasenotes/notes/fix-disabled-udp-pool-3e84558f996017d5.yaml new file mode 100644 index 0000000000..260de05a7b --- /dev/null +++ b/releasenotes/notes/fix-disabled-udp-pool-3e84558f996017d5.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fix disabled UDP pools. Disabled UDP pools were marked as "OFFLINE" but the + requests were still forwarded to the members of the pool. diff --git a/releasenotes/notes/fix-drain-status-detection-b9395fa4fe8c936f.yaml b/releasenotes/notes/fix-drain-status-detection-b9395fa4fe8c936f.yaml new file mode 100644 index 0000000000..6f60a4624a --- /dev/null +++ b/releasenotes/notes/fix-drain-status-detection-b9395fa4fe8c936f.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Correctly detect the member operating status "drain" when querying status + data from HAProxy. diff --git a/releasenotes/notes/fix-driver-agent-decrement-quota-27486d9fa0bdeb89.yaml b/releasenotes/notes/fix-driver-agent-decrement-quota-27486d9fa0bdeb89.yaml new file mode 100644 index 0000000000..f6b67f5fba --- /dev/null +++ b/releasenotes/notes/fix-driver-agent-decrement-quota-27486d9fa0bdeb89.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixes an issue where provider drivers may not decrement the load + balancer objects quota on delete. diff --git a/releasenotes/notes/fix-driver-agent-graceful-shutdown-daff9ffaccb09a9e.yaml b/releasenotes/notes/fix-driver-agent-graceful-shutdown-daff9ffaccb09a9e.yaml new file mode 100644 index 0000000000..512cf269b8 --- /dev/null +++ b/releasenotes/notes/fix-driver-agent-graceful-shutdown-daff9ffaccb09a9e.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fix the shutdown of the driver-agent, the process might have been stuck + while waiting for threads to finish. Systemd would have killed the process + after a timeout, but some children processes might have leaked on the + controllers. diff --git a/releasenotes/notes/fix-driver-errors-81d33948288bf8cf.yaml b/releasenotes/notes/fix-driver-errors-81d33948288bf8cf.yaml new file mode 100644 index 0000000000..3976eea566 --- /dev/null +++ b/releasenotes/notes/fix-driver-errors-81d33948288bf8cf.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Fixed an issue where the driver errors were not caught. diff --git a/releasenotes/notes/fix-dual-error-amp-failover-69e323892bad8254.yaml b/releasenotes/notes/fix-dual-error-amp-failover-69e323892bad8254.yaml new file mode 100644 index 0000000000..5240b7a186 --- /dev/null +++ b/releasenotes/notes/fix-dual-error-amp-failover-69e323892bad8254.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed an issue that could cause load balancers, with multiple amphora + in a failed state, to be unable to complete a failover. diff --git a/releasenotes/notes/fix-duplicate-sg-creation-0c502a5d2d8c276d.yaml b/releasenotes/notes/fix-duplicate-sg-creation-0c502a5d2d8c276d.yaml new file mode 100644 index 0000000000..5d0de32b58 --- /dev/null +++ b/releasenotes/notes/fix-duplicate-sg-creation-0c502a5d2d8c276d.yaml @@ -0,0 +1,9 @@ +--- +fixes: + - | + Fixes loadbalancer creation failure when one of the listener port matches + with the octavia generated peer ports and the allowed_cidr is explicitly + set to 0.0.0.0/0 on the listener. This is due to creation of two security + group rules with remote_ip_prefix as None and remote_ip_prefix as 0.0.0.0/0 + which neutron rejects the second request with security group rule already + exists. diff --git a/releasenotes/notes/fix-empty-udp-pool-status-3171950628898468.yaml b/releasenotes/notes/fix-empty-udp-pool-status-3171950628898468.yaml new file mode 100644 index 0000000000..5c668cbacd --- /dev/null +++ b/releasenotes/notes/fix-empty-udp-pool-status-3171950628898468.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fix an incorrect ``operating_status`` with empty UDP pools. A UDP pool + without any member is now ``ONLINE`` instead of ``OFFLINE``. diff --git a/releasenotes/notes/fix-enforced-selinux-on-centos-27842ca6afbb500c.yaml b/releasenotes/notes/fix-enforced-selinux-on-centos-27842ca6afbb500c.yaml new file mode 100644 index 0000000000..b82b581607 --- /dev/null +++ b/releasenotes/notes/fix-enforced-selinux-on-centos-27842ca6afbb500c.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Enable required SELinux booleans for CentOS or RHEL amphora image. diff --git a/releasenotes/notes/fix-error-amphora-agent-udp-pools-dual-stack-lb-b298ded551ac97e1.yaml b/releasenotes/notes/fix-error-amphora-agent-udp-pools-dual-stack-lb-b298ded551ac97e1.yaml new file mode 100644 index 0000000000..07090675f8 --- /dev/null +++ b/releasenotes/notes/fix-error-amphora-agent-udp-pools-dual-stack-lb-b298ded551ac97e1.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixed a bug in the amphora-agent, an exception was triggered when a LB with + both IPv4 and IPv6 VIPs and with a UDP pool had only IPv4 members or + only IPv6 members. diff --git a/releasenotes/notes/fix-error-in-rsyslog-config-a316a7856e1a847a.yaml b/releasenotes/notes/fix-error-in-rsyslog-config-a316a7856e1a847a.yaml new file mode 100644 index 0000000000..3034815419 --- /dev/null +++ b/releasenotes/notes/fix-error-in-rsyslog-config-a316a7856e1a847a.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fix an issue with the rsyslog configuration file in the Amphora when the + log offloading feature and the local log storage feature are both disabled. diff --git a/releasenotes/notes/fix-error-message-translation-in-HandleNetworkDelta-1f99354ee7a41c01.yaml b/releasenotes/notes/fix-error-message-translation-in-HandleNetworkDelta-1f99354ee7a41c01.yaml new file mode 100644 index 0000000000..9f583fca00 --- /dev/null +++ b/releasenotes/notes/fix-error-message-translation-in-HandleNetworkDelta-1f99354ee7a41c01.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixes an error message translation in the controller worker to support + the i18n translate before adding dynamic values. diff --git a/releasenotes/notes/fix-error-messages-ec817a66249e6666.yaml b/releasenotes/notes/fix-error-messages-ec817a66249e6666.yaml new file mode 100644 index 0000000000..c875f15765 --- /dev/null +++ b/releasenotes/notes/fix-error-messages-ec817a66249e6666.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Improves error messages returned to the user, such as errors for attempting + to add a second health monitor to a pool. diff --git a/releasenotes/notes/fix-error-on-delete-with-broken-amp-10d7f4e85754d7ee.yaml b/releasenotes/notes/fix-error-on-delete-with-broken-amp-10d7f4e85754d7ee.yaml new file mode 100644 index 0000000000..398aa4db98 --- /dev/null +++ b/releasenotes/notes/fix-error-on-delete-with-broken-amp-10d7f4e85754d7ee.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixed a potential issue when deleting a load balancer with an amphora that + was not fully created, the deletion may have failed when deallocating the + VIP port, leaving the load balancer in ERROR state. diff --git a/releasenotes/notes/fix-error-with-duplicate-members-in-batch-update-610ffbbf949927d0.yaml b/releasenotes/notes/fix-error-with-duplicate-members-in-batch-update-610ffbbf949927d0.yaml new file mode 100644 index 0000000000..a79300c24f --- /dev/null +++ b/releasenotes/notes/fix-error-with-duplicate-members-in-batch-update-610ffbbf949927d0.yaml @@ -0,0 +1,10 @@ +--- +fixes: + - | + Added a validation step in the batch member API request that checks if a + member is included multiple times in the list of updated members, this + additional check prevents the load balancer from being stuck in + PENDING_UPDATE. Duplicate members in the batch member flow triggered an + exception in Taskflow. + The API now returns 400 (ValidationException) if a member is already + present in the body of the request. diff --git a/releasenotes/notes/fix-eth1-ip-flush-7fadda4bdca387b5.yaml b/releasenotes/notes/fix-eth1-ip-flush-7fadda4bdca387b5.yaml new file mode 100644 index 0000000000..76992ab444 --- /dev/null +++ b/releasenotes/notes/fix-eth1-ip-flush-7fadda4bdca387b5.yaml @@ -0,0 +1,11 @@ +--- +upgrade: + - | + To fix the issue with active/standby load balancers or single topology + load balancers with members on the VIP subnet, you need to update the + amphora image. +critical: + - | + Fixed a bug where active/standby load balancers and single topology + load balancers with members on the VIP subnet may fail. An updated + image is required to fix this bug. diff --git a/releasenotes/notes/fix-filtering-with-boolean-attributes-15df51820753a900.yaml b/releasenotes/notes/fix-filtering-with-boolean-attributes-15df51820753a900.yaml new file mode 100644 index 0000000000..b43101ee74 --- /dev/null +++ b/releasenotes/notes/fix-filtering-with-boolean-attributes-15df51820753a900.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed an issue when filtering resources with a boolean attribute in the GET + calls in the Octavia API. diff --git a/releasenotes/notes/fix-full-graph-loadbalancer-creation-if-jobboard-is-disabled.yaml b/releasenotes/notes/fix-full-graph-loadbalancer-creation-if-jobboard-is-disabled.yaml new file mode 100644 index 0000000000..9325469e8f --- /dev/null +++ b/releasenotes/notes/fix-full-graph-loadbalancer-creation-if-jobboard-is-disabled.yaml @@ -0,0 +1,5 @@ +--- +fixes: +- | + Fix a bug when full graph of load balancer is created without + listeners if jobboard_enabled=False diff --git a/releasenotes/notes/fix-fully-populated-lb-with-listener-92a369ea8d57e8f5.yaml b/releasenotes/notes/fix-fully-populated-lb-with-listener-92a369ea8d57e8f5.yaml new file mode 100644 index 0000000000..7692505130 --- /dev/null +++ b/releasenotes/notes/fix-fully-populated-lb-with-listener-92a369ea8d57e8f5.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed a bug that prevented Octavia from creating listeners with the + fully-populated load balancer API in SINGLE topology mode. diff --git a/releasenotes/notes/fix-fully-populated-with-allowed-cidrs-ad04ccf02bf9cbbc.yaml b/releasenotes/notes/fix-fully-populated-with-allowed-cidrs-ad04ccf02bf9cbbc.yaml new file mode 100644 index 0000000000..47aa3d5759 --- /dev/null +++ b/releasenotes/notes/fix-fully-populated-with-allowed-cidrs-ad04ccf02bf9cbbc.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixed a bug when creating a load balancer and a listener with + ``allowed_cidrs`` with the fully-populated load balancer API, the call was + rejected because Octavia could not validate that the IP addresses of the + ``allowed_cidrs`` have the same family as the VIP address. diff --git a/releasenotes/notes/fix-garp-for-udp-listeners-6bf2ec8d491d1e1b.yaml b/releasenotes/notes/fix-garp-for-udp-listeners-6bf2ec8d491d1e1b.yaml new file mode 100644 index 0000000000..cd45eaf49f --- /dev/null +++ b/releasenotes/notes/fix-garp-for-udp-listeners-6bf2ec8d491d1e1b.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixed an issue with SINGLE topology load balancer with UDP listeners, the + Amphora now sends a Gratuitous ARP packet when a UDP pool is added, it + makes the VIP address more quickly reachable after a failover or when + reusing a previously allocated IP address. diff --git a/releasenotes/notes/fix-haproxy-compatibility-about-server-state-file-df70e5ac859417e2.yaml b/releasenotes/notes/fix-haproxy-compatibility-about-server-state-file-df70e5ac859417e2.yaml new file mode 100644 index 0000000000..559c10e943 --- /dev/null +++ b/releasenotes/notes/fix-haproxy-compatibility-about-server-state-file-df70e5ac859417e2.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixed backwards compatibility issue with the feature that preserves HAProxy + server states between reloads. + HAProxy version 1.5 or below do not support this feature, so Octavia + will not to activate it on amphorae with those versions. diff --git a/releasenotes/notes/fix-haproxy-maxconn-with-disabled-listeners-fa89f762a94b8fe9.yaml b/releasenotes/notes/fix-haproxy-maxconn-with-disabled-listeners-fa89f762a94b8fe9.yaml new file mode 100644 index 0000000000..1642a97aa2 --- /dev/null +++ b/releasenotes/notes/fix-haproxy-maxconn-with-disabled-listeners-fa89f762a94b8fe9.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixed the global number of concurrent connections in haproxy when disabling + listeners. The connection-limit of disabled listeners was used to compute + this value, disabled listeners are now skipped. diff --git a/releasenotes/notes/fix-haproxy-ssl-cache-size-5d5842100a87de54.yaml b/releasenotes/notes/fix-haproxy-ssl-cache-size-5d5842100a87de54.yaml new file mode 100644 index 0000000000..3a1c917482 --- /dev/null +++ b/releasenotes/notes/fix-haproxy-ssl-cache-size-5d5842100a87de54.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Reduce the value of tune.ssl.cachesize for HTTPS termination listeners to + prevent OOM during haproxy reload (LP: #2119987). diff --git a/releasenotes/notes/fix-health-check-db-outage-279b0bc1d0039312.yaml b/releasenotes/notes/fix-health-check-db-outage-279b0bc1d0039312.yaml new file mode 100644 index 0000000000..f816c0129c --- /dev/null +++ b/releasenotes/notes/fix-health-check-db-outage-279b0bc1d0039312.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixed an issue when Octavia cannot reach the database (all database + instances are down) bringing down all running loadbalancers. The Health + Manager is more resilient to DB outages now. diff --git a/releasenotes/notes/fix-health-monitor-info-retrieval-in-api-response-d3b2e02a3a966f60.yaml b/releasenotes/notes/fix-health-monitor-info-retrieval-in-api-response-d3b2e02a3a966f60.yaml new file mode 100644 index 0000000000..91a6732349 --- /dev/null +++ b/releasenotes/notes/fix-health-monitor-info-retrieval-in-api-response-d3b2e02a3a966f60.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Bug fix: The response body of the LB API, when creating a new load + balancer, now correctly includes information about the health monitor. + Previously, this information was consistently null, despite configuring + a health monitor. diff --git a/releasenotes/notes/fix-health-monitor-to-error-revert-task-feb38ba7641a4892.yaml b/releasenotes/notes/fix-health-monitor-to-error-revert-task-feb38ba7641a4892.yaml new file mode 100644 index 0000000000..2ff75aa5fb --- /dev/null +++ b/releasenotes/notes/fix-health-monitor-to-error-revert-task-feb38ba7641a4892.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fix a bug that prevented the provisioning_state of a health-monitor to be + set to ERROR when an error occurred while creating, updating or deleting a + health-monitor. \ No newline at end of file diff --git a/releasenotes/notes/fix-hm-operating-status-online-in-single-lb-call-214a7ca22937a877.yaml b/releasenotes/notes/fix-hm-operating-status-online-in-single-lb-call-214a7ca22937a877.yaml new file mode 100644 index 0000000000..5e5a892514 --- /dev/null +++ b/releasenotes/notes/fix-hm-operating-status-online-in-single-lb-call-214a7ca22937a877.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed a bug that didn't set all the active load balancer Health Monitors + ONLINE in populated LB single-create calls. diff --git a/releasenotes/notes/fix-host-routes-with-amphorav2-and-persistence-54b99d651a4ee9c4.yaml b/releasenotes/notes/fix-host-routes-with-amphorav2-and-persistence-54b99d651a4ee9c4.yaml new file mode 100644 index 0000000000..24a859073e --- /dev/null +++ b/releasenotes/notes/fix-host-routes-with-amphorav2-and-persistence-54b99d651a4ee9c4.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fix a serialization error when using host_routes in VIP subnets when + persistence in the amphorav2 driver is enabled. diff --git a/releasenotes/notes/fix-house-keeping-shutdown-17b04417a2c4849f.yaml b/releasenotes/notes/fix-house-keeping-shutdown-17b04417a2c4849f.yaml new file mode 100644 index 0000000000..0fd7aef9e6 --- /dev/null +++ b/releasenotes/notes/fix-house-keeping-shutdown-17b04417a2c4849f.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fix a bug that could interrupt resource creation when performing a graceful + shutdown of the house keeping service and leave resources such as amphorae + in a BOOTING status. diff --git a/releasenotes/notes/fix-housekeeping-db-performance-b0d0fcfcce696314.yaml b/releasenotes/notes/fix-housekeeping-db-performance-b0d0fcfcce696314.yaml new file mode 100644 index 0000000000..1e09933afb --- /dev/null +++ b/releasenotes/notes/fix-housekeeping-db-performance-b0d0fcfcce696314.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixed a performance issue where the Housekeeping service could + significantly and incrementally utilize CPU as more amphorae and load + balancers are created and/or marked as DELETED. diff --git a/releasenotes/notes/fix-http-https-healthmonitor-with-alpn-pools-82249b2b9a025068.yaml b/releasenotes/notes/fix-http-https-healthmonitor-with-alpn-pools-82249b2b9a025068.yaml new file mode 100644 index 0000000000..8db6b678e7 --- /dev/null +++ b/releasenotes/notes/fix-http-https-healthmonitor-with-alpn-pools-82249b2b9a025068.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixed a bug with HTTP/HTTPS health-monitors on pools with ALPN protocols in + the amphora-driver. The healthchecks sent by haproxy were flagged as bad + requests by the backend servers. Updated haproxy configuration to use ALPN + for the heathchecks too. diff --git a/releasenotes/notes/fix-http-version-domain-name-hm-117ec59c63e18acd.yaml b/releasenotes/notes/fix-http-version-domain-name-hm-117ec59c63e18acd.yaml new file mode 100644 index 0000000000..6acaa636ba --- /dev/null +++ b/releasenotes/notes/fix-http-version-domain-name-hm-117ec59c63e18acd.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixed an issue with HAProxy configuration files when using ``http_version`` + or ``domain_name`` attributes in ``healthmonitor``. The generated + configuration files used deprecated/removed features from HAProxy, the fix + updates the HAProxy templates to support recent releases. diff --git a/releasenotes/notes/fix-invalid-attribute-for-filtering-d2ddb95a1acbded2.yaml b/releasenotes/notes/fix-invalid-attribute-for-filtering-d2ddb95a1acbded2.yaml new file mode 100644 index 0000000000..8068d4fe1b --- /dev/null +++ b/releasenotes/notes/fix-invalid-attribute-for-filtering-d2ddb95a1acbded2.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Fixed an error triggered when the deletion of the VIP security group fails. diff --git a/releasenotes/notes/fix-ip-rules-in-amphora-b74b7b616752c13b.yaml b/releasenotes/notes/fix-ip-rules-in-amphora-b74b7b616752c13b.yaml new file mode 100644 index 0000000000..02c0960d0e --- /dev/null +++ b/releasenotes/notes/fix-ip-rules-in-amphora-b74b7b616752c13b.yaml @@ -0,0 +1,11 @@ +--- +fixes: + - | + Fixed a bug that could have made the VIP port unreachable because of the + removal of some IP rules in the Amphora. It could have been triggered only + when sending a request from a subnet that is not the VIP subnet but that is + plugged as a member subnet. +upgrade: + - | + A patch that fixes an issue making the VIP port unreachable because of + missing IP rules requires an update of the Amphora image. diff --git a/releasenotes/notes/fix-ipv6-address-enclosed-in-brackets-c1cfc4717465ba09.yaml b/releasenotes/notes/fix-ipv6-address-enclosed-in-brackets-c1cfc4717465ba09.yaml new file mode 100644 index 0000000000..2d731ab3e2 --- /dev/null +++ b/releasenotes/notes/fix-ipv6-address-enclosed-in-brackets-c1cfc4717465ba09.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fix a bug that prevented the operating_status of a health-monitor to be + set to ONLINE when ipv6 addresses were enclosed within square brackets + in ``controller_ip_port_list``. diff --git a/releasenotes/notes/fix-ipv6-interface-configuration-61b1bd7d2c962cea.yaml b/releasenotes/notes/fix-ipv6-interface-configuration-61b1bd7d2c962cea.yaml new file mode 100644 index 0000000000..718655ef60 --- /dev/null +++ b/releasenotes/notes/fix-ipv6-interface-configuration-61b1bd7d2c962cea.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fix an issue with IPv6 members that could have been set in operating_status + ``ERROR`` just after being added. diff --git a/releasenotes/notes/fix-ipv6-session-persistence-failure-d649656a44fc3bbb.yaml b/releasenotes/notes/fix-ipv6-session-persistence-failure-d649656a44fc3bbb.yaml new file mode 100644 index 0000000000..90fabbfc72 --- /dev/null +++ b/releasenotes/notes/fix-ipv6-session-persistence-failure-d649656a44fc3bbb.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixed the issue with session persistence based on source IP not working for + IPv6 load balancers. Session persistence now functions properly for IPv4, + IPv6 and dual-stack load balancers. diff --git a/releasenotes/notes/fix-ipv6-udp-health-message-ed94b35bbea396ec.yaml b/releasenotes/notes/fix-ipv6-udp-health-message-ed94b35bbea396ec.yaml new file mode 100644 index 0000000000..e7d9d93213 --- /dev/null +++ b/releasenotes/notes/fix-ipv6-udp-health-message-ed94b35bbea396ec.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Some IPv6 UDP members were incorrectly marked in ERROR status, because of + a formatting issue while generating the health message in the amphora. diff --git a/releasenotes/notes/fix-l7rule-FILE_TYPE-EQUAL_TO-6e84773d6ab22c50.yaml b/releasenotes/notes/fix-l7rule-FILE_TYPE-EQUAL_TO-6e84773d6ab22c50.yaml new file mode 100644 index 0000000000..716e574a7f --- /dev/null +++ b/releasenotes/notes/fix-l7rule-FILE_TYPE-EQUAL_TO-6e84773d6ab22c50.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed a bug when using a L7Rule with FILE_TYPE and EQUAL_TO comparison, + it never matched due to an issue with the generated HAProxy configuration. diff --git a/releasenotes/notes/fix-lb-error-failover-2c17afaa20c0c97f.yaml b/releasenotes/notes/fix-lb-error-failover-2c17afaa20c0c97f.yaml new file mode 100644 index 0000000000..0fe4c856e3 --- /dev/null +++ b/releasenotes/notes/fix-lb-error-failover-2c17afaa20c0c97f.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fix load balancers that could not be failed over when in ERROR provisioning + status. diff --git a/releasenotes/notes/fix-lb-in-PENDING-on-DB-failure-1ffea71a86cd4ea9.yaml b/releasenotes/notes/fix-lb-in-PENDING-on-DB-failure-1ffea71a86cd4ea9.yaml new file mode 100644 index 0000000000..5d46a790da --- /dev/null +++ b/releasenotes/notes/fix-lb-in-PENDING-on-DB-failure-1ffea71a86cd4ea9.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixed an issue with load balancers stuck in a ``PENDING_*`` state during + database outages. Now when a task fails in Octavia, it retries to update + the ``provisioning_status`` of the load balancer until the database is back + (or it gives up after a really long timeout - around 2h45) diff --git a/releasenotes/notes/fix-lb-update-with-no-data-abefe7860b8fb4c7.yaml b/releasenotes/notes/fix-lb-update-with-no-data-abefe7860b8fb4c7.yaml new file mode 100644 index 0000000000..ffd8b0dac0 --- /dev/null +++ b/releasenotes/notes/fix-lb-update-with-no-data-abefe7860b8fb4c7.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed an issue where load balancers would go into ERROR when + setting data not visible to providers (e.g. tags). diff --git a/releasenotes/notes/fix-listener-API-update-with-null/None-fde2ffab82e783a4.yaml b/releasenotes/notes/fix-listener-API-update-with-null/None-fde2ffab82e783a4.yaml new file mode 100644 index 0000000000..3f51781667 --- /dev/null +++ b/releasenotes/notes/fix-listener-API-update-with-null/None-fde2ffab82e783a4.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixed an issue where the listener API would accept null/None values for + fields that must have a valid value, such as connection-limit. + Now when a PUT call is made to one of these fields with null as the value + the API will reset the field value to the field default value. diff --git a/releasenotes/notes/fix-listener-MAX_TIMEOUT-4c4fdf804a96c34b.yaml b/releasenotes/notes/fix-listener-MAX_TIMEOUT-4c4fdf804a96c34b.yaml new file mode 100644 index 0000000000..d0b6fde838 --- /dev/null +++ b/releasenotes/notes/fix-listener-MAX_TIMEOUT-4c4fdf804a96c34b.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixed MAX_TIMEOUT for timeout_client_data, timeout_member_connect, + timeout_member_data, timeout_tcp_inspect API listener. The value was + reduced from 365 days to 24 days, which now does not exceed the value of + the data type in DB. diff --git a/releasenotes/notes/fix-listener-creation-allowing-pool-protocol-b9e9ef147f6eeaf4.yaml b/releasenotes/notes/fix-listener-creation-allowing-pool-protocol-b9e9ef147f6eeaf4.yaml new file mode 100644 index 0000000000..ad93faef12 --- /dev/null +++ b/releasenotes/notes/fix-listener-creation-allowing-pool-protocol-b9e9ef147f6eeaf4.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixes listener creation failure when protocol used is PROXY or PROXYV2 + which are pool protocol and not listener protocol. diff --git a/releasenotes/notes/fix-listener-tls-versions-ciphers-clearing-4e987ee17d6b6e79.yaml b/releasenotes/notes/fix-listener-tls-versions-ciphers-clearing-4e987ee17d6b6e79.yaml new file mode 100644 index 0000000000..09a975fec3 --- /dev/null +++ b/releasenotes/notes/fix-listener-tls-versions-ciphers-clearing-4e987ee17d6b6e79.yaml @@ -0,0 +1,8 @@ +--- +fixes: + - | + Fixed an issue where clearing listener TLS versions resulted in a + server-side error. + - | + Fixed an issue where when clearing listener TLS versions and ciphers would + not apply the default values per defined in the API configuration settings. diff --git a/releasenotes/notes/fix-listener-update-certs-failed-315c66f4806e76c8.yaml b/releasenotes/notes/fix-listener-update-certs-failed-315c66f4806e76c8.yaml new file mode 100644 index 0000000000..e87ea73883 --- /dev/null +++ b/releasenotes/notes/fix-listener-update-certs-failed-315c66f4806e76c8.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fix update listener certs failed. The fix ensures + that an existing certificate gets overwritten properly. diff --git a/releasenotes/notes/fix-lo-interface-amphora-netns-90fb9934026e1485.yaml b/releasenotes/notes/fix-lo-interface-amphora-netns-90fb9934026e1485.yaml new file mode 100644 index 0000000000..e0a2971409 --- /dev/null +++ b/releasenotes/notes/fix-lo-interface-amphora-netns-90fb9934026e1485.yaml @@ -0,0 +1,8 @@ +--- +fixes: + - | + Fixed an issue with the ``lo`` interface in the ``amphora-haproxy`` network + namespace. The ``lo`` interface was down and prevented haproxy to + communicate with other haproxy processes (for persistent stick tables) on + configuration change. It delayed old haproxy worker cleanup and increased + the memory consumption usage after reloading the configuration. diff --git a/releasenotes/notes/fix-loadbalancer-db-cleanup-61ee81a4fd597067.yaml b/releasenotes/notes/fix-loadbalancer-db-cleanup-61ee81a4fd597067.yaml new file mode 100644 index 0000000000..c893ddd8f8 --- /dev/null +++ b/releasenotes/notes/fix-loadbalancer-db-cleanup-61ee81a4fd597067.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fix an issue that prevented the cleanup of load balancer entries in the + database by the Octavia housekeeper service. diff --git a/releasenotes/notes/fix-loadbalancer-stuck-on-delete-da5950cf87fc8507.yaml b/releasenotes/notes/fix-loadbalancer-stuck-on-delete-da5950cf87fc8507.yaml new file mode 100644 index 0000000000..09129057fe --- /dev/null +++ b/releasenotes/notes/fix-loadbalancer-stuck-on-delete-da5950cf87fc8507.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fix load balancer stuck in PENDING_DELETE if TLS storage unavailable or + returns error diff --git a/releasenotes/notes/fix-long-tasks-with-redis-keepalive-af18211334c14f54.yaml b/releasenotes/notes/fix-long-tasks-with-redis-keepalive-af18211334c14f54.yaml new file mode 100644 index 0000000000..5ca61f3c40 --- /dev/null +++ b/releasenotes/notes/fix-long-tasks-with-redis-keepalive-af18211334c14f54.yaml @@ -0,0 +1,8 @@ +--- +fixes: + - | + Fix an issue with amphorav2 and persistence, some long tasks executed by a + controller might have been released in taskflow and rescheduled on another + controller. Octavia now ensures that a task is never released early by + using a keepalive mechanism to notify taskflow (and its redis backend) that + a job is still running. diff --git a/releasenotes/notes/fix-member-operating-status-on-reload-fe3688603bae8726.yaml b/releasenotes/notes/fix-member-operating-status-on-reload-fe3688603bae8726.yaml new file mode 100644 index 0000000000..1dab5c48d9 --- /dev/null +++ b/releasenotes/notes/fix-member-operating-status-on-reload-fe3688603bae8726.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed an issue with members in ERROR operating status that may have been + updated briefly to ONLINE during a Load Balancer configuration change. diff --git a/releasenotes/notes/fix-missing-additional-vips-port_id-fd0708aa798744c9.yaml b/releasenotes/notes/fix-missing-additional-vips-port_id-fd0708aa798744c9.yaml new file mode 100644 index 0000000000..fae08b28bc --- /dev/null +++ b/releasenotes/notes/fix-missing-additional-vips-port_id-fd0708aa798744c9.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed missing `port_id` element when getting the `additional_vips` + parameter of a load balancer. diff --git a/releasenotes/notes/fix-missing-amphora-create-dependency-a954ded0d260d462.yaml b/releasenotes/notes/fix-missing-amphora-create-dependency-a954ded0d260d462.yaml new file mode 100644 index 0000000000..348bd6a14a --- /dev/null +++ b/releasenotes/notes/fix-missing-amphora-create-dependency-a954ded0d260d462.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed an issue where amphora load balancers fail to create when Nova + anti-affinity is enabled and topology is SINGLE. diff --git a/releasenotes/notes/fix-missing-cloud-guest-utils-rh-eb39a53502dc1e91.yaml b/releasenotes/notes/fix-missing-cloud-guest-utils-rh-eb39a53502dc1e91.yaml new file mode 100644 index 0000000000..dcba20f4ec --- /dev/null +++ b/releasenotes/notes/fix-missing-cloud-guest-utils-rh-eb39a53502dc1e91.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Add missing cloud-utils-growpart RPM to Red Hat based amphora images. diff --git a/releasenotes/notes/fix-missing-cronie-rh-bd31001338ddbb1e.yaml b/releasenotes/notes/fix-missing-cronie-rh-bd31001338ddbb1e.yaml new file mode 100644 index 0000000000..698f21b924 --- /dev/null +++ b/releasenotes/notes/fix-missing-cronie-rh-bd31001338ddbb1e.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Add missing cronie RPM to Red Hat based amphora images. diff --git a/releasenotes/notes/fix-missing-masquerade-rules-in-dualstack-lbs-94f97606c5804b36.yaml b/releasenotes/notes/fix-missing-masquerade-rules-in-dualstack-lbs-94f97606c5804b36.yaml new file mode 100644 index 0000000000..d80f076413 --- /dev/null +++ b/releasenotes/notes/fix-missing-masquerade-rules-in-dualstack-lbs-94f97606c5804b36.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixed an issue when using UDP listeners in dual-stack (IPv4 and IPv6) load + balancers, some masquerade rules needed by UDP were not correctly set on the + member interfaces. diff --git a/releasenotes/notes/fix-multi-amp-down-failover-952618fb8d3d8ae6.yaml b/releasenotes/notes/fix-multi-amp-down-failover-952618fb8d3d8ae6.yaml new file mode 100644 index 0000000000..95d492a984 --- /dev/null +++ b/releasenotes/notes/fix-multi-amp-down-failover-952618fb8d3d8ae6.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixes an issue where if more than one amphora fails at the same time, + failover might not fully complete, leaving the load balancer in ERROR. diff --git a/releasenotes/notes/fix-network-interface-collision-939fd32587ea3344.yaml b/releasenotes/notes/fix-network-interface-collision-939fd32587ea3344.yaml new file mode 100644 index 0000000000..7dd8776dc4 --- /dev/null +++ b/releasenotes/notes/fix-network-interface-collision-939fd32587ea3344.yaml @@ -0,0 +1,8 @@ +--- +fixes: + - | + Fixed a potential error when plugging a member from a new network after + deleting another member and unplugging its network. Octavia may have tried + to plug the new network to a new interface but with an already existing + name. + This fix requires to update the Amphora image. diff --git a/releasenotes/notes/fix-neutron-overrides-710ed047ebf0c45c.yaml b/releasenotes/notes/fix-neutron-overrides-710ed047ebf0c45c.yaml new file mode 100644 index 0000000000..4713173391 --- /dev/null +++ b/releasenotes/notes/fix-neutron-overrides-710ed047ebf0c45c.yaml @@ -0,0 +1,8 @@ +--- +fixes: + - | + Fixed a bug when the deprecated settings (``endpoint``, ``endpoint_type``, + ``ca_certificates_file``) are used in the ``[neutron]`` section of the + configuration file. The connection to the neutron service may have used + some settings from the ``[service_auth]`` section or used undefined + settings. diff --git a/releasenotes/notes/fix-nf-conntrack-max-value-in-amp-0e16eb50b42e7b58.yaml b/releasenotes/notes/fix-nf-conntrack-max-value-in-amp-0e16eb50b42e7b58.yaml new file mode 100644 index 0000000000..8ee949b65d --- /dev/null +++ b/releasenotes/notes/fix-nf-conntrack-max-value-in-amp-0e16eb50b42e7b58.yaml @@ -0,0 +1,15 @@ +--- +upgrade: + - | + The fix that updates the Netfilter Conntrack Sysfs variables requires + rebuilding the amphora image in order to be effective. +fixes: + - | + Netfilter Conntrack Sysfs variables net.netfilter.nf_conntrack_max and + nf_conntrack_expect_max get set to sensible values on the amphora now. + Previously, kernel default values were used which were much too low for the + configured net.netfilter.nf_conntrack_buckets value. As a result packets + could get dropped because the conntrack table got filled too quickly. Note + that this affects only UDP and SCTP protocol listeners. + Connection tracking is disabled for TCP-based connections on the + amphora including HTTP(S). diff --git a/releasenotes/notes/fix-nf_conntrack_buckets-sysctl-75ae6dbb9d052863.yaml b/releasenotes/notes/fix-nf_conntrack_buckets-sysctl-75ae6dbb9d052863.yaml new file mode 100644 index 0000000000..2ce22e5c9c --- /dev/null +++ b/releasenotes/notes/fix-nf_conntrack_buckets-sysctl-75ae6dbb9d052863.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fix nf_conntrack_buckets sysctl in the Amphora, its value was incorrectly + set. diff --git a/releasenotes/notes/fix-no-resolvconf-rhel-dhclient-hook-36a1c3b1a3b03a3d.yaml b/releasenotes/notes/fix-no-resolvconf-rhel-dhclient-hook-36a1c3b1a3b03a3d.yaml new file mode 100644 index 0000000000..d1003aada8 --- /dev/null +++ b/releasenotes/notes/fix-no-resolvconf-rhel-dhclient-hook-36a1c3b1a3b03a3d.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed an issue where /etc/resolv.conf on RHEl-based amphorae was being + populated with DNS servers. diff --git a/releasenotes/notes/fix-nova-service_name-6bde4970047817f4.yaml b/releasenotes/notes/fix-nova-service_name-6bde4970047817f4.yaml new file mode 100644 index 0000000000..d25461c3a8 --- /dev/null +++ b/releasenotes/notes/fix-nova-service_name-6bde4970047817f4.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Now the ``[nova] service_name`` parameter is effectively used to find + the nova endpoint in keystone catalog. The parameter had no effect before + it was fixed. diff --git a/releasenotes/notes/fix-nr_open-limit-value-7f475c3e301a608d.yaml b/releasenotes/notes/fix-nr_open-limit-value-7f475c3e301a608d.yaml new file mode 100644 index 0000000000..5976cdc9bb --- /dev/null +++ b/releasenotes/notes/fix-nr_open-limit-value-7f475c3e301a608d.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Increase the limit value for nr_open and file-max in the amphora, the new + value is based on what HAProxy 2.x is expecting from the system with the + greatest maxconn value that Octavia can set. diff --git a/releasenotes/notes/fix-octavia-policies-8f3cb690f1fa4556.yaml b/releasenotes/notes/fix-octavia-policies-8f3cb690f1fa4556.yaml new file mode 100644 index 0000000000..b571dfb75c --- /dev/null +++ b/releasenotes/notes/fix-octavia-policies-8f3cb690f1fa4556.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixed the policy of the legacy `admin` role, it is still an admin with sRBAC. + - | + Removed system scope policies, all the policies are now project scoped. diff --git a/releasenotes/notes/fix-octavia-status-amphorav2-038fe77a2189b99f.yaml b/releasenotes/notes/fix-octavia-status-amphorav2-038fe77a2189b99f.yaml new file mode 100644 index 0000000000..d20ffe5416 --- /dev/null +++ b/releasenotes/notes/fix-octavia-status-amphorav2-038fe77a2189b99f.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed a bug in octavia-status which reported an incorrect status for the + *amphorav2* driver when using the default *amphora* alias. diff --git a/releasenotes/notes/fix-oslo-messaging-connection-leakage-aeb79474105ac116.yaml b/releasenotes/notes/fix-oslo-messaging-connection-leakage-aeb79474105ac116.yaml new file mode 100644 index 0000000000..9cfc750c61 --- /dev/null +++ b/releasenotes/notes/fix-oslo-messaging-connection-leakage-aeb79474105ac116.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed a bug that caused an excessive number of RabbitMQ connections to be + opened. diff --git a/releasenotes/notes/fix-peer-name-prefix-hypen-e74a87e9a01b4f4c.yaml b/releasenotes/notes/fix-peer-name-prefix-hypen-e74a87e9a01b4f4c.yaml new file mode 100644 index 0000000000..0f883eab8f --- /dev/null +++ b/releasenotes/notes/fix-peer-name-prefix-hypen-e74a87e9a01b4f4c.yaml @@ -0,0 +1,10 @@ +--- +upgrade: + - | + An amphora image update is recommended to pick up a workaround to an + HAProxy issue where it would fail to reload on configuration change should + the local peer name start with "-x". +fixes: + - | + Workaround an HAProxy issue where it would fail to reload on configuration + change should the local peer name start with "-x". diff --git a/releasenotes/notes/fix-persistence-granularity-default-value-540093bbf6518ed8.yaml b/releasenotes/notes/fix-persistence-granularity-default-value-540093bbf6518ed8.yaml new file mode 100644 index 0000000000..89298ddb4b --- /dev/null +++ b/releasenotes/notes/fix-persistence-granularity-default-value-540093bbf6518ed8.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Modified default Keepalived LVS persistence granularity + configuration value so it would be ipv6 compatible. diff --git a/releasenotes/notes/fix-ping-hm-on-centos-stream-6624f19c8da86e22.yaml b/releasenotes/notes/fix-ping-hm-on-centos-stream-6624f19c8da86e22.yaml new file mode 100644 index 0000000000..e8b775b4b9 --- /dev/null +++ b/releasenotes/notes/fix-ping-hm-on-centos-stream-6624f19c8da86e22.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fix an issue with PING health-monitors on Centos 8 Stream. Changes in + Centos and systemd prevent an unprivileged user from sending ping requests + from a network namespace. diff --git a/releasenotes/notes/fix-ping-hm-with-haproxy-2.2-9b83777172fb8835.yaml b/releasenotes/notes/fix-ping-hm-with-haproxy-2.2-9b83777172fb8835.yaml new file mode 100644 index 0000000000..a16e0604e4 --- /dev/null +++ b/releasenotes/notes/fix-ping-hm-with-haproxy-2.2-9b83777172fb8835.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fix PING health-monitors with recent haproxy releases (>=2.2), haproxy now + requires an additional "insecure-fork-wanted" option to authorize the + Octavia PING healthcheck. diff --git a/releasenotes/notes/fix-plugging-member-subnets-8560cd9403ff79a7.yaml b/releasenotes/notes/fix-plugging-member-subnets-8560cd9403ff79a7.yaml new file mode 100644 index 0000000000..fb5647a523 --- /dev/null +++ b/releasenotes/notes/fix-plugging-member-subnets-8560cd9403ff79a7.yaml @@ -0,0 +1,8 @@ +--- +fixes: + - | + Fix a bug when adding a member on a subnet that belongs to a network with + multiple subnets, an incorrect subnet may have been plugged in the amphora. + - | + Fix a bug when deleting the last member plugged on a network, the port that + was no longer used was not deleted. diff --git a/releasenotes/notes/fix-pool-crl-2cc6f2705f5b2009.yaml b/releasenotes/notes/fix-pool-crl-2cc6f2705f5b2009.yaml new file mode 100644 index 0000000000..6d17b98f1d --- /dev/null +++ b/releasenotes/notes/fix-pool-crl-2cc6f2705f5b2009.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed an issue were updating a CRL or client certificate on a pool would + cause the pool to go into ERROR. diff --git a/releasenotes/notes/fix-pool-prov-status-on-lb-single-create-897070aee0a42da6.yaml b/releasenotes/notes/fix-pool-prov-status-on-lb-single-create-897070aee0a42da6.yaml new file mode 100644 index 0000000000..c95b7883b1 --- /dev/null +++ b/releasenotes/notes/fix-pool-prov-status-on-lb-single-create-897070aee0a42da6.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed a bug that didn't set the correct provisioning_status for unattached + pools when creating a fully-populated load balancer. diff --git a/releasenotes/notes/fix-potential-race-conditions-in-cascade-delete-1e100213445a17ec.yaml b/releasenotes/notes/fix-potential-race-conditions-in-cascade-delete-1e100213445a17ec.yaml new file mode 100644 index 0000000000..c331511da2 --- /dev/null +++ b/releasenotes/notes/fix-potential-race-conditions-in-cascade-delete-1e100213445a17ec.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fix a potential race condition during the cascade deletion of load + balancers. When deleting a load balancer with multiple listeners, the + security groups of the VIP port may have been updated many times + concurrently, creating a race condition. diff --git a/releasenotes/notes/fix-protocol-header-insertion-mismatch-e3aeb5f5fee0348b.yaml b/releasenotes/notes/fix-protocol-header-insertion-mismatch-e3aeb5f5fee0348b.yaml new file mode 100644 index 0000000000..25a268ea41 --- /dev/null +++ b/releasenotes/notes/fix-protocol-header-insertion-mismatch-e3aeb5f5fee0348b.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed an issue where listener "insert_headers" parameter was accepted for + protocols that do not support header insertion. diff --git a/releasenotes/notes/fix-provider-capabilities-filtering-8bd12b2cf7b37a84.yaml b/releasenotes/notes/fix-provider-capabilities-filtering-8bd12b2cf7b37a84.yaml new file mode 100644 index 0000000000..017647afe3 --- /dev/null +++ b/releasenotes/notes/fix-provider-capabilities-filtering-8bd12b2cf7b37a84.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Fixes the ability to filter on the provider flavor capabilities API. diff --git a/releasenotes/notes/fix-provider-driver-utils-b75485785dcd6462.yaml b/releasenotes/notes/fix-provider-driver-utils-b75485785dcd6462.yaml new file mode 100644 index 0000000000..0e2e129c33 --- /dev/null +++ b/releasenotes/notes/fix-provider-driver-utils-b75485785dcd6462.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixes the provider driver utils conversion of flavor_id in load balancer + conversion, sni_refs and L7 policies in listener conversion, and health + monitor in pool conversions. diff --git a/releasenotes/notes/fix-provisioning-status-on-errors-7f3736ef6e94d453.yaml b/releasenotes/notes/fix-provisioning-status-on-errors-7f3736ef6e94d453.yaml new file mode 100644 index 0000000000..f4688bb3e9 --- /dev/null +++ b/releasenotes/notes/fix-provisioning-status-on-errors-7f3736ef6e94d453.yaml @@ -0,0 +1,9 @@ +--- +fixes: + - | + Fix an issue with the provisioning status of a load balancer that was set + to ERROR too early when an error occurred, making the load balancer mutable + while the execution of the tasks for this resources haven't finished yet. + - | + Fix an issue that could set the provisioning status of a load balancer to a + PENDING_UPDATE state when an error occurred in the amphora failover flow. diff --git a/releasenotes/notes/fix-proxyv2-44a7627294922a8e.yaml b/releasenotes/notes/fix-proxyv2-44a7627294922a8e.yaml new file mode 100644 index 0000000000..84c3681930 --- /dev/null +++ b/releasenotes/notes/fix-proxyv2-44a7627294922a8e.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Fixed a bug where pools with PROXYV2 will go into ERROR. diff --git a/releasenotes/notes/fix-qos-apply-after-failover-561abbd153ab88ee.yaml b/releasenotes/notes/fix-qos-apply-after-failover-561abbd153ab88ee.yaml new file mode 100644 index 0000000000..c4908fe33a --- /dev/null +++ b/releasenotes/notes/fix-qos-apply-after-failover-561abbd153ab88ee.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fix a bug when updating a load balancer with a QoS policy after a failover, + Octavia attempted to update the VRRP ports of the deleted amphorae, moving + the provisioning status of the load balancer to ERROR. diff --git a/releasenotes/notes/fix-race-condiction-on-update-b5330c8fcf1800cd.yaml b/releasenotes/notes/fix-race-condiction-on-update-b5330c8fcf1800cd.yaml new file mode 100644 index 0000000000..0d07e64ae1 --- /dev/null +++ b/releasenotes/notes/fix-race-condiction-on-update-b5330c8fcf1800cd.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fix a potential race condition when updating a resource in the amphorav2 + worker. The worker was not waiting for the resource to be set to + PENDING_UPDATE, so the resource may have been updated with old data from the + database, resulting in a no-op update. diff --git a/releasenotes/notes/fix-race-condition-member-batch-update-1aed0e06004c5dad.yaml b/releasenotes/notes/fix-race-condition-member-batch-update-1aed0e06004c5dad.yaml new file mode 100644 index 0000000000..b2c5403e08 --- /dev/null +++ b/releasenotes/notes/fix-race-condition-member-batch-update-1aed0e06004c5dad.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixed a race condition in the members batch update API call, the data + passed to the Octavia worker service may have been incorrect when quickly + sending successive API calls. Then the load balancer was stuck in + PENDING_UPDATE provisioning_status. diff --git a/releasenotes/notes/fix-redhat-amphora-images-interface-files-5ba1be40c65940d9.yaml b/releasenotes/notes/fix-redhat-amphora-images-interface-files-5ba1be40c65940d9.yaml new file mode 100644 index 0000000000..f1eacc91af --- /dev/null +++ b/releasenotes/notes/fix-redhat-amphora-images-interface-files-5ba1be40c65940d9.yaml @@ -0,0 +1,8 @@ +--- +fixes: + - | + Fixed code that configured the CentOS/Red Hat amphora images to use the + correct names for the network 'ifcfg' files for static routes and + routing rules. It was using the wrong name for the routes file, + and did not support IPv6 in either file. For more information, see + https://storyboard.openstack.org/#!/story/2007051 diff --git a/releasenotes/notes/fix-reschedule-of-jobboard-tasks-929c066dea9267fd.yaml b/releasenotes/notes/fix-reschedule-of-jobboard-tasks-929c066dea9267fd.yaml new file mode 100644 index 0000000000..67f3c395a7 --- /dev/null +++ b/releasenotes/notes/fix-reschedule-of-jobboard-tasks-929c066dea9267fd.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fix the rescheduling of taskflow tasks that have been resumed after being + interrupted. diff --git a/releasenotes/notes/fix-route-table-b2ec0aa7b92d2abc.yaml b/releasenotes/notes/fix-route-table-b2ec0aa7b92d2abc.yaml new file mode 100644 index 0000000000..7a49fbd6f8 --- /dev/null +++ b/releasenotes/notes/fix-route-table-b2ec0aa7b92d2abc.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixes an issue where VIP return traffic was always routed, if a gateway + was defined, through the gateway address even if it was local traffic. diff --git a/releasenotes/notes/fix-selinux-issue-with-lvs-masquerade.sh-ebbb89886148c70f.yaml b/releasenotes/notes/fix-selinux-issue-with-lvs-masquerade.sh-ebbb89886148c70f.yaml new file mode 100644 index 0000000000..4bcec13c8a --- /dev/null +++ b/releasenotes/notes/fix-selinux-issue-with-lvs-masquerade.sh-ebbb89886148c70f.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixed issue with SELinux and the lvs-masquerade.sh script on the amphora. + The script already runs with root permissions, so the use of sudo inside the + script is unneeded. diff --git a/releasenotes/notes/fix-selinux-tcp-hm-on-udp-pools-89c3b8db89e359ba.yaml b/releasenotes/notes/fix-selinux-tcp-hm-on-udp-pools-89c3b8db89e359ba.yaml new file mode 100644 index 0000000000..97ca282820 --- /dev/null +++ b/releasenotes/notes/fix-selinux-tcp-hm-on-udp-pools-89c3b8db89e359ba.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixed an SELinux issues with TCP-based health-monitor on UDP pools, some + specific monitoring ports were denied by SELinux. The Amphora image now + enables the ``keepalived_connect_any`` SELinux boolean that allows + connections to any ports. diff --git a/releasenotes/notes/fix-spare-amphora-check-and-creation-3adf939b45610155.yaml b/releasenotes/notes/fix-spare-amphora-check-and-creation-3adf939b45610155.yaml new file mode 100644 index 0000000000..53aadb8f08 --- /dev/null +++ b/releasenotes/notes/fix-spare-amphora-check-and-creation-3adf939b45610155.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Fixed an issue that prevents spare amphorae to be created. diff --git a/releasenotes/notes/fix-subnet-host_routes-amphorav2-3c079c5a3bfa1b3d.yaml b/releasenotes/notes/fix-subnet-host_routes-amphorav2-3c079c5a3bfa1b3d.yaml new file mode 100644 index 0000000000..2e9b5b1eb7 --- /dev/null +++ b/releasenotes/notes/fix-subnet-host_routes-amphorav2-3c079c5a3bfa1b3d.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fix load balancers that use customized host_routes in the VIP or the member + subnets in amphorav2. diff --git a/releasenotes/notes/fix-support-for-monitoring-address-and-port-in-udp-members-ff83395544f228cf.yaml b/releasenotes/notes/fix-support-for-monitoring-address-and-port-in-udp-members-ff83395544f228cf.yaml new file mode 100644 index 0000000000..5ab2672e86 --- /dev/null +++ b/releasenotes/notes/fix-support-for-monitoring-address-and-port-in-udp-members-ff83395544f228cf.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Add support for monitor_address and monitor_port attributes in UDP members. + Previously, monitor_address and monitor_port were ignored and address and + protocol_port attributes were used as monitoring address and port. diff --git a/releasenotes/notes/fix-timeout-dict-in-failover-tasks-537456e0fe1d7cb8.yaml b/releasenotes/notes/fix-timeout-dict-in-failover-tasks-537456e0fe1d7cb8.yaml new file mode 100644 index 0000000000..c343620438 --- /dev/null +++ b/releasenotes/notes/fix-timeout-dict-in-failover-tasks-537456e0fe1d7cb8.yaml @@ -0,0 +1,9 @@ +--- +fixes: + - | + Fix an issue when Octavia performs a failover of an ACTIVE-STANDBY load + balancer that has both amphorae missing. + Some tasks in the controller took too much time to timeout because the + timeout value defined in + ``[haproxy_amphora].active_connection_max_retries`` and + ``[haproxy_amphora].active_connection_rety_interval`` was not used. diff --git a/releasenotes/notes/fix-timeout-dict-when-start-vrrp-278d4837702bd247.yaml b/releasenotes/notes/fix-timeout-dict-when-start-vrrp-278d4837702bd247.yaml new file mode 100644 index 0000000000..40d9f9012a --- /dev/null +++ b/releasenotes/notes/fix-timeout-dict-when-start-vrrp-278d4837702bd247.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixed a too long timeout when attempting to start the VRRP service in an + unreachable amphora during a failover. A specific shorter timeout should be + used during the failovers. diff --git a/releasenotes/notes/fix-tls-container-serialization-1cb83ad4c9eca3b8.yaml b/releasenotes/notes/fix-tls-container-serialization-1cb83ad4c9eca3b8.yaml new file mode 100644 index 0000000000..779d9fd097 --- /dev/null +++ b/releasenotes/notes/fix-tls-container-serialization-1cb83ad4c9eca3b8.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fix a serialization issue when using TLSContainer with amphorav2 driver + with persistence, a list of bytes type in the data model was not correctly + converted to serializable data. diff --git a/releasenotes/notes/fix-tls-enabled-pool-provisioning-e3adb987244a025a.yaml b/releasenotes/notes/fix-tls-enabled-pool-provisioning-e3adb987244a025a.yaml new file mode 100644 index 0000000000..8d233cf2f0 --- /dev/null +++ b/releasenotes/notes/fix-tls-enabled-pool-provisioning-e3adb987244a025a.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Fixed an issue where TLS-enabled pools would fail to provision. diff --git a/releasenotes/notes/fix-tls-hello-healthmonitors-a4b98a80f6de8394.yaml b/releasenotes/notes/fix-tls-hello-healthmonitors-a4b98a80f6de8394.yaml new file mode 100644 index 0000000000..07b909109d --- /dev/null +++ b/releasenotes/notes/fix-tls-hello-healthmonitors-a4b98a80f6de8394.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Fixed TLS-HELLO health-monitors in the amphora-driver. diff --git a/releasenotes/notes/fix-udp-and-sctp-listener-wrr-50de9dc0774a8ea1.yaml b/releasenotes/notes/fix-udp-and-sctp-listener-wrr-50de9dc0774a8ea1.yaml new file mode 100644 index 0000000000..97a98236a5 --- /dev/null +++ b/releasenotes/notes/fix-udp-and-sctp-listener-wrr-50de9dc0774a8ea1.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fix weighted round-robin for UDP and SCTP listeners with keepalived and + lvs. The algorithm must be specified as 'wrr' in order for weighted + round-robin to work correctly, but was being set to 'rr'. diff --git a/releasenotes/notes/fix-udp-member-status-with-additional-vips-7511690a0c112b44.yaml b/releasenotes/notes/fix-udp-member-status-with-additional-vips-7511690a0c112b44.yaml new file mode 100644 index 0000000000..b69c7b59b9 --- /dev/null +++ b/releasenotes/notes/fix-udp-member-status-with-additional-vips-7511690a0c112b44.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixed a bug with the status of the members of UDP pools in load balancer + with IPv4 and IPv6 VIPs. Some members may have been incorrectly reported as + DOWN by the Amphora. diff --git a/releasenotes/notes/fix-udp-members-status-ef3202849bfda29b.yaml b/releasenotes/notes/fix-udp-members-status-ef3202849bfda29b.yaml new file mode 100644 index 0000000000..d293fc0778 --- /dev/null +++ b/releasenotes/notes/fix-udp-members-status-ef3202849bfda29b.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fix operating_status for pools and members that use UDP protocol. + operating_status values are now consistant with the values of non-UDP + load balancers. diff --git a/releasenotes/notes/fix-udp-only-lbs-c4ca42106fc1e2bb.yaml b/releasenotes/notes/fix-udp-only-lbs-c4ca42106fc1e2bb.yaml new file mode 100644 index 0000000000..90e330895b --- /dev/null +++ b/releasenotes/notes/fix-udp-only-lbs-c4ca42106fc1e2bb.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed an issue where UDP only load balancers would not bring up the VIP + address. diff --git a/releasenotes/notes/fix-udp-server-status-bug-db4d3e38bcdf0554.yaml b/releasenotes/notes/fix-udp-server-status-bug-db4d3e38bcdf0554.yaml new file mode 100644 index 0000000000..c35f7d03e9 --- /dev/null +++ b/releasenotes/notes/fix-udp-server-status-bug-db4d3e38bcdf0554.yaml @@ -0,0 +1,12 @@ +--- +issues: + - | + When a load balancer with a UDP listener is updated, the listener service + is restarted, which causes an interruption of the flow of traffic during a + short period of time. This issue is caused by a keepalived bug + (https://github.com/acassen/keepalived/issues/1163) that was fixed in + keepalived 2.0.14, but this package is not yet provided by distributions. +fixes: + - | + Fix a bug that prevented UDP servers to be restored as members of a pool + after removing a health monitor resource. diff --git a/releasenotes/notes/fix-unlimited-connection-limit-48079688de033c1a.yaml b/releasenotes/notes/fix-unlimited-connection-limit-48079688de033c1a.yaml new file mode 100644 index 0000000000..23a01b930e --- /dev/null +++ b/releasenotes/notes/fix-unlimited-connection-limit-48079688de033c1a.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixes a bug where unspecified or unlimited listener connection limit + settings would lead to a 2000 connection limit when using the + amphora/octavia driver. This was the compiled in connection limit + in some HAproxy packages. diff --git a/releasenotes/notes/fix-unplugging-member-ports-262b35426e570edd.yaml b/releasenotes/notes/fix-unplugging-member-ports-262b35426e570edd.yaml new file mode 100644 index 0000000000..f0937c03c4 --- /dev/null +++ b/releasenotes/notes/fix-unplugging-member-ports-262b35426e570edd.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fix a bug that could have triggered a race condition when configuring a + member interface in the amphora. Due to a race condition, a network + interface might have been deleted from the amphora, leading to a loss of + connectivity. diff --git a/releasenotes/notes/fix-unset-for-tls_versions-tls_ciphers-in-pools-7534715ce28bd8cb.yaml b/releasenotes/notes/fix-unset-for-tls_versions-tls_ciphers-in-pools-7534715ce28bd8cb.yaml new file mode 100644 index 0000000000..f893ebde91 --- /dev/null +++ b/releasenotes/notes/fix-unset-for-tls_versions-tls_ciphers-in-pools-7534715ce28bd8cb.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fix an issue when updating ``tls_versions`` and ``tls_ciphers`` in Pools + with empty (None) values, unsetting theses parameters now resets their + values to the default values. diff --git a/releasenotes/notes/fix-update-listener-ca-error-167464debc06cba2.yaml b/releasenotes/notes/fix-update-listener-ca-error-167464debc06cba2.yaml new file mode 100644 index 0000000000..25c0e32cee --- /dev/null +++ b/releasenotes/notes/fix-update-listener-ca-error-167464debc06cba2.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed "Could not retrieve certificate" error when updating/deleting the + client_ca_tls_container_ref field of a listener after a CA/CRL was deleted. diff --git a/releasenotes/notes/fix-user_data_config_drive-b4ce8cc66fb21365.yaml b/releasenotes/notes/fix-user_data_config_drive-b4ce8cc66fb21365.yaml new file mode 100644 index 0000000000..acf73f00fd --- /dev/null +++ b/releasenotes/notes/fix-user_data_config_drive-b4ce8cc66fb21365.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fix a python3 error that prevented to use the + ``[controller_worker]/user_data_config_drive`` option when building + amphorae. diff --git a/releasenotes/notes/fix-verification-of-private-ca-signed-certificates-b9386a0d92627b03.yaml b/releasenotes/notes/fix-verification-of-private-ca-signed-certificates-b9386a0d92627b03.yaml new file mode 100644 index 0000000000..4c47eefc8a --- /dev/null +++ b/releasenotes/notes/fix-verification-of-private-ca-signed-certificates-b9386a0d92627b03.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fix verification of certificates signed by a private CA when using Neutron + endpoints. diff --git a/releasenotes/notes/fix-vip-net-no-gw-b46c3ade1a748e69.yaml b/releasenotes/notes/fix-vip-net-no-gw-b46c3ade1a748e69.yaml new file mode 100644 index 0000000000..f0ce1e5ea8 --- /dev/null +++ b/releasenotes/notes/fix-vip-net-no-gw-b46c3ade1a748e69.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed an issue where the amphora would fail to bring up the VIP if the + VIP network did not have a gateway specified in neutron. diff --git a/releasenotes/notes/fix-vip-network-ip-availability-2e924f32abf01052.yaml b/releasenotes/notes/fix-vip-network-ip-availability-2e924f32abf01052.yaml new file mode 100644 index 0000000000..90e2f14a67 --- /dev/null +++ b/releasenotes/notes/fix-vip-network-ip-availability-2e924f32abf01052.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixes an issue in the selection of vip-subnet-id on multi-subnet networks + by checking the IP availability of the subnets, ensuring enough IPs are + available for loadbalancer when creating loadbalancer specifying + vip-network-id. diff --git a/releasenotes/notes/fix-vip-plug-centos-74c2fe7099964b08.yaml b/releasenotes/notes/fix-vip-plug-centos-74c2fe7099964b08.yaml new file mode 100644 index 0000000000..6995a546c3 --- /dev/null +++ b/releasenotes/notes/fix-vip-plug-centos-74c2fe7099964b08.yaml @@ -0,0 +1,3 @@ +--- +fixes: + - Fixed an error when plugging the VIP on CentOS-based amphorae. diff --git a/releasenotes/notes/fix-vip-qos-policy-extension-enabled-3e16e1c23a7d7ae5.yaml b/releasenotes/notes/fix-vip-qos-policy-extension-enabled-3e16e1c23a7d7ae5.yaml new file mode 100644 index 0000000000..3f210296d8 --- /dev/null +++ b/releasenotes/notes/fix-vip-qos-policy-extension-enabled-3e16e1c23a7d7ae5.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - Fixed an issue where trying to set a QoS policy on a VIP while the QoS + extension is disabled would bring the load balancer to ERROR. Should the + QoS extension be disabled, the API will now return HTTP 400 to the user. + - Fixed an issue where setting a QoS policy on the VIP would bring the load + balancer to ERROR when the QoS extension is enabled. diff --git a/releasenotes/notes/fix-worker-graceful-shutdown-c44b6797637aa1b3.yaml b/releasenotes/notes/fix-worker-graceful-shutdown-c44b6797637aa1b3.yaml new file mode 100644 index 0000000000..b2856462e6 --- /dev/null +++ b/releasenotes/notes/fix-worker-graceful-shutdown-c44b6797637aa1b3.yaml @@ -0,0 +1,9 @@ +--- +fixes: + - | + Fix a bug that could interrupt resource creation when performing a graceful + shutdown of the controller worker and leave resources in a + PENDING_CREATE/PENDING_UPDATE/PENDING_DELETE provisioning status. If the + duration of an Octavia flow is greater than the 'graceful_shutdown_timeout' + configuration value, stopping the Octavia worker can still interrupt the + creation of resources. diff --git a/releasenotes/notes/fix_active_standby_ipv6-0317d5cd9e5d50e5.yaml b/releasenotes/notes/fix_active_standby_ipv6-0317d5cd9e5d50e5.yaml new file mode 100644 index 0000000000..3bd08e2271 --- /dev/null +++ b/releasenotes/notes/fix_active_standby_ipv6-0317d5cd9e5d50e5.yaml @@ -0,0 +1,10 @@ +--- +upgrade: + - | + To resolve the IPv6 VIP issues on active/standby load balancers you + need to build a new amphora image. +fixes: + - | + Fixes issues using IPv6 VIP addresses with load balancers configured for + active/standby topology. This fix requires a new amphora image to be + built. diff --git a/releasenotes/notes/fixed-API-validation-for-L7-rules-and-session-cookies-cb88f3f1b90171f9.yaml b/releasenotes/notes/fixed-API-validation-for-L7-rules-and-session-cookies-cb88f3f1b90171f9.yaml new file mode 100644 index 0000000000..c8801cec95 --- /dev/null +++ b/releasenotes/notes/fixed-API-validation-for-L7-rules-and-session-cookies-cb88f3f1b90171f9.yaml @@ -0,0 +1,9 @@ +--- +fixes: + - | + Fixed validations in L7 rule and session cookie APIs in order to prevent + authenticated and authorized users to inject code into HAProxy + configuration. CR and LF (\\r and \\n) are no longer allowed in L7 rule + keys and values. The session persistence cookie names must follow the rules + described in + https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie. diff --git a/releasenotes/notes/fixed-PENDING_UPDATE-on-provider-errors-40a03adc8ef82a54.yaml b/releasenotes/notes/fixed-PENDING_UPDATE-on-provider-errors-40a03adc8ef82a54.yaml new file mode 100644 index 0000000000..79cf904476 --- /dev/null +++ b/releasenotes/notes/fixed-PENDING_UPDATE-on-provider-errors-40a03adc8ef82a54.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fix load balancers stuck in PENDING_UPDATE issues for some API calls (POST + /l7rule, PUT /pool) when a provider denied the call. diff --git a/releasenotes/notes/fixed-quota-error-messages-fe3ae81a43f93a17.yaml b/releasenotes/notes/fixed-quota-error-messages-fe3ae81a43f93a17.yaml new file mode 100644 index 0000000000..b3977b6b6a --- /dev/null +++ b/releasenotes/notes/fixed-quota-error-messages-fe3ae81a43f93a17.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixed the format of log messages related to quota decrement errors. They + displayed unhelpful information, they now report the correct resource + type for which the error occurs. diff --git a/releasenotes/notes/fixed-spare-amphora-rotation-007ba406411a313d.yaml b/releasenotes/notes/fixed-spare-amphora-rotation-007ba406411a313d.yaml new file mode 100644 index 0000000000..9c337376f9 --- /dev/null +++ b/releasenotes/notes/fixed-spare-amphora-rotation-007ba406411a313d.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Fixed a bug that prevents spare amphora rotation. diff --git a/releasenotes/notes/fixed-unstable-UDP-health-status-ba32690b83a9641b.yaml b/releasenotes/notes/fixed-unstable-UDP-health-status-ba32690b83a9641b.yaml new file mode 100644 index 0000000000..b93fedc52f --- /dev/null +++ b/releasenotes/notes/fixed-unstable-UDP-health-status-ba32690b83a9641b.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fix a potential invalid DOWN operating status for members of a UDP pool. + A race condition could have occured when building the first heartbeat + message after adding a new member in a pool, this recently added member + could have been seen as DOWN. diff --git a/releasenotes/notes/force-controlplane-amphora-communication-to-use-tls1.2-1c4adf72d2ce5a82.yaml b/releasenotes/notes/force-controlplane-amphora-communication-to-use-tls1.2-1c4adf72d2ce5a82.yaml new file mode 100644 index 0000000000..950e74871f --- /dev/null +++ b/releasenotes/notes/force-controlplane-amphora-communication-to-use-tls1.2-1c4adf72d2ce5a82.yaml @@ -0,0 +1,6 @@ +--- +security: + - | + Communication between the control-plane and the amphora-agent now uses + minimum TLSv1.2 by default, and is configurable. The previous default of + SSLv2/3 is widely considered insecure. diff --git a/releasenotes/notes/get-all-unscoped-token-61da95856bc662e0.yaml b/releasenotes/notes/get-all-unscoped-token-61da95856bc662e0.yaml new file mode 100644 index 0000000000..b03537adb7 --- /dev/null +++ b/releasenotes/notes/get-all-unscoped-token-61da95856bc662e0.yaml @@ -0,0 +1,10 @@ +--- +security: + - | + If you are using the admin_or_owner-policy.yaml policy override file + you should upgrade your API processes to include the unscoped token fix. + The default policies are not affected by this issue. +fixes: + - | + Fixes an issue when using the admin_or_owner-policy.yaml policy override + file and unscoped tokens. diff --git a/releasenotes/notes/glance-tags-for-amphora-images-28bd9df1ed4b9ca3.yaml b/releasenotes/notes/glance-tags-for-amphora-images-28bd9df1ed4b9ca3.yaml new file mode 100644 index 0000000000..633c07153a --- /dev/null +++ b/releasenotes/notes/glance-tags-for-amphora-images-28bd9df1ed4b9ca3.yaml @@ -0,0 +1,9 @@ +--- +features: + - Glance image containing the latest Amphora image can now be referenced + using a Glance tag. To use the feature, set amp_image_tag in + [controller_worker]. Note that amp_image_id should be unset for the new + feature to take into effect. +upgrade: + - amp_image_id option is deprecated and will be removed in one of the next + releases. Operators are advised to migrate to the new amp_image_tag option. diff --git a/releasenotes/notes/glance_image_owner-42c92a12f91a62a6.yaml b/releasenotes/notes/glance_image_owner-42c92a12f91a62a6.yaml new file mode 100644 index 0000000000..58cefed486 --- /dev/null +++ b/releasenotes/notes/glance_image_owner-42c92a12f91a62a6.yaml @@ -0,0 +1,6 @@ +--- +security: + - Allows the operator to optionally restrict the amphora + glance image selection to a specific owner id. + This is a recommended security setting for clouds that + allow user uploadable images. diff --git a/releasenotes/notes/handle_empty_db_lb_on_plug_vip_revert-5c24af124498b246.yaml b/releasenotes/notes/handle_empty_db_lb_on_plug_vip_revert-5c24af124498b246.yaml new file mode 100644 index 0000000000..cd03963765 --- /dev/null +++ b/releasenotes/notes/handle_empty_db_lb_on_plug_vip_revert-5c24af124498b246.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fix error on revert PlugVIPAmphora task, when db_lb is not defined + and get_subnet raises NotFound error. It could happen when Amphora + creation failed by timeout and before it VIP network was removed. + As result revert failed with exception. diff --git a/releasenotes/notes/haproxy-single-process-b17a3af3a97accea.yaml b/releasenotes/notes/haproxy-single-process-b17a3af3a97accea.yaml new file mode 100644 index 0000000000..332912a3b3 --- /dev/null +++ b/releasenotes/notes/haproxy-single-process-b17a3af3a97accea.yaml @@ -0,0 +1,11 @@ +--- +upgrade: + - | + A new amphora image is required to resolve the amphora memory issues when + a load balancer has multiple listeners and the amphora image uses + haproxy 1.8 or newer. +fixes: + - | + Fixed an issue with load balancers that have multiple listeners when using + an amphora image that contains HAProxy 1.8 or newer. An updated amphora + image is required to apply this fix. diff --git a/releasenotes/notes/healthcheck-cache-641f0a64e6f5856c.yaml b/releasenotes/notes/healthcheck-cache-641f0a64e6f5856c.yaml new file mode 100644 index 0000000000..767fe78156 --- /dev/null +++ b/releasenotes/notes/healthcheck-cache-641f0a64e6f5856c.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed the healthcheck endpoint always querying the backends by caching + results for a configurable time. The default is five seconds. diff --git a/releasenotes/notes/improve-error-message-failed-constraints-6ad10bd22cac523a.yaml b/releasenotes/notes/improve-error-message-failed-constraints-6ad10bd22cac523a.yaml new file mode 100644 index 0000000000..4768009216 --- /dev/null +++ b/releasenotes/notes/improve-error-message-failed-constraints-6ad10bd22cac523a.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + The Octavia API returned an unhelpful message when a constraint failed + while creating an object in the DB. The error now contains the name and the + value of the parameter that breaks the constraints. diff --git a/releasenotes/notes/improve-terminology-8ddacb4458c74d57.yaml b/releasenotes/notes/improve-terminology-8ddacb4458c74d57.yaml new file mode 100644 index 0000000000..1a8b5e774c --- /dev/null +++ b/releasenotes/notes/improve-terminology-8ddacb4458c74d57.yaml @@ -0,0 +1,9 @@ +--- +deprecations: + - | + Terminology such as ``blacklist`` has been replaced with more + inclusive words, such as ``prohibit list`` wherever possible. + + The configuration option ``tls_cipher_blacklist`` has been deprecated + and replaced with ``tls_cipher_prohibit_list``. It will be removed in a + future release. diff --git a/releasenotes/notes/improved-string-representation-of-db-model-1c4fe799186b4dea.yaml b/releasenotes/notes/improved-string-representation-of-db-model-1c4fe799186b4dea.yaml new file mode 100644 index 0000000000..5b50cc2706 --- /dev/null +++ b/releasenotes/notes/improved-string-representation-of-db-model-1c4fe799186b4dea.yaml @@ -0,0 +1,7 @@ +--- +other: + - | + The string representation of data base model objects has been improved. + Calling str() on them will return a certain subset of fields and calling + repr() on them will return all fields. This is helpful for debugging, but + it may also change some of the log messages that Octavia emits. diff --git a/releasenotes/notes/input-validation-server_certs_key_passphrase-6a9dfc190c9deba8.yaml b/releasenotes/notes/input-validation-server_certs_key_passphrase-6a9dfc190c9deba8.yaml new file mode 100644 index 0000000000..6a4858e130 --- /dev/null +++ b/releasenotes/notes/input-validation-server_certs_key_passphrase-6a9dfc190c9deba8.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - The passphrase for config option 'server_certs_key_passphrase' is used as + a Fernet key in Octavia and thus must be 32, base64(url) compatible, + characters long. Octavia will now validate the passphrase length and + format. diff --git a/releasenotes/notes/jobboard-enable-option-5132e372c446d6de.yaml b/releasenotes/notes/jobboard-enable-option-5132e372c446d6de.yaml new file mode 100644 index 0000000000..715157ca43 --- /dev/null +++ b/releasenotes/notes/jobboard-enable-option-5132e372c446d6de.yaml @@ -0,0 +1,8 @@ +--- +features: + - | + Added a new configuration setting (``[task_flow]/jobboard_enabled``) to + enable/disable jobboard functionality in the amphorav2 provider. When + disabled, the amphorav2 provider behaves similarly to the amphora v1 + provider and does not require extra dependencies. The default setting is + jobboard disabled while jobboard remains an experimental feature. \ No newline at end of file diff --git a/releasenotes/notes/jobboard-redis-db-4a6206247270e996.yaml b/releasenotes/notes/jobboard-redis-db-4a6206247270e996.yaml new file mode 100644 index 0000000000..b8d16f7c07 --- /dev/null +++ b/releasenotes/notes/jobboard-redis-db-4a6206247270e996.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + The new ``[task_flow] jobboard_redis_backend_db`` option has been added. + This option allows using non default database in redis as backend. diff --git a/releasenotes/notes/jobboard-redis-driver-updates-caracal-49fd98c16e727910.yaml b/releasenotes/notes/jobboard-redis-driver-updates-caracal-49fd98c16e727910.yaml new file mode 100644 index 0000000000..b7b83cd27f --- /dev/null +++ b/releasenotes/notes/jobboard-redis-driver-updates-caracal-49fd98c16e727910.yaml @@ -0,0 +1,10 @@ +--- +features: + - | + The new ``[task_flow] jobboard_backend_username`` option has been added, to + support Redis ACL feature. + + - | + Previously, redis jobboard driver used only the first host in + ``[task_flow] jobboard_backend_hosts`` when connecting to Redis Sentinel. + Now the driver attempts the other hosts as fallbacks. diff --git a/releasenotes/notes/keepalived-race-with-haproxy-e402ef7f466871f6.yaml b/releasenotes/notes/keepalived-race-with-haproxy-e402ef7f466871f6.yaml new file mode 100644 index 0000000000..f9adacb73f --- /dev/null +++ b/releasenotes/notes/keepalived-race-with-haproxy-e402ef7f466871f6.yaml @@ -0,0 +1,10 @@ +--- +upgrade: + - | + The keepalived improvements require the amphora image to be upgraded. +fixes: + - | + Improvements to the keepalived system used in active/standby topologies. + keepalived is now monitored for health by the amphora agent (previously + just by the init system) and a systemd race condition between keepalived + and haproxy have been resolved. diff --git a/releasenotes/notes/lb-delete-flow-refactor-cfb1bc621bbe92b4.yaml b/releasenotes/notes/lb-delete-flow-refactor-cfb1bc621bbe92b4.yaml new file mode 100644 index 0000000000..ac177d523b --- /dev/null +++ b/releasenotes/notes/lb-delete-flow-refactor-cfb1bc621bbe92b4.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Removes unnecessary listener delete from non-cascade delete load balancer + flow thus speeding up the loadbalancer delete. diff --git a/releasenotes/notes/lb_flow_amp_vip-a83db5d84e17a26a.yaml b/releasenotes/notes/lb_flow_amp_vip-a83db5d84e17a26a.yaml new file mode 100644 index 0000000000..59d877c19c --- /dev/null +++ b/releasenotes/notes/lb_flow_amp_vip-a83db5d84e17a26a.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + This will speed up lb creation by allocating AAP ports in parallel for + LBs with more than one amp. As a side effect the AAP driver will be + simplified and thus easier to mainain. diff --git a/releasenotes/notes/link-amphora-to-loadbalancer-earlier-ab3dddec48b8da96.yaml b/releasenotes/notes/link-amphora-to-loadbalancer-earlier-ab3dddec48b8da96.yaml new file mode 100644 index 0000000000..cbb1a58129 --- /dev/null +++ b/releasenotes/notes/link-amphora-to-loadbalancer-earlier-ab3dddec48b8da96.yaml @@ -0,0 +1,9 @@ +--- +other: + - | + Amphorae that are booting for a specific loadbalancer will now be linked to + that loadbalancer immediately upon creation. Previously this would not + happen until near the end of the process, leaving a gap during booting + during which is was difficult to understand which booting amphora belonged + to which loadbalancer. This was especially problematic when attempting to + troubleshoot loadbalancers that entered ERROR status due to boot issues. diff --git a/releasenotes/notes/load-balancer-expiry-age-a473f9147552f1b1.yaml b/releasenotes/notes/load-balancer-expiry-age-a473f9147552f1b1.yaml new file mode 100644 index 0000000000..23b51b4b34 --- /dev/null +++ b/releasenotes/notes/load-balancer-expiry-age-a473f9147552f1b1.yaml @@ -0,0 +1,10 @@ +--- + +features: + - Stale load balancer entries with DELETED provisioning_status are now + cleaned-up by housekeeper after if they are older than + `load_balancer_expiry_age`. +upgrade: + - New option `load_balancer_expiry_age` is added to the `house_keeping` + config section. It defines load balancer expiry age in seconds, the + default value is 604800. diff --git a/releasenotes/notes/load-ipvs-before-setting-opts-c5b2f0871bc38c27.yaml b/releasenotes/notes/load-ipvs-before-setting-opts-c5b2f0871bc38c27.yaml new file mode 100644 index 0000000000..16f337f474 --- /dev/null +++ b/releasenotes/notes/load-ipvs-before-setting-opts-c5b2f0871bc38c27.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fix the issue where nf_conntrack* opts values are lost after rebooting the Amphora VM. + more details `Story 2010795 `__ diff --git a/releasenotes/notes/make-amphora-cert-validity-configurable-7defc508b1174f89.yaml b/releasenotes/notes/make-amphora-cert-validity-configurable-7defc508b1174f89.yaml new file mode 100644 index 0000000000..d625e3c028 --- /dev/null +++ b/releasenotes/notes/make-amphora-cert-validity-configurable-7defc508b1174f89.yaml @@ -0,0 +1,9 @@ +--- +features: + - | + The validity period for locally generated certificates used inside Amphora + is now configurable. See ``[certificates] cert_validity_time``. +security: + - | + The default validity time for Amphora certificates has been reduced from + two years to 30 days. diff --git a/releasenotes/notes/make-batch-member-call-additive-4785163e625fed1a.yaml b/releasenotes/notes/make-batch-member-call-additive-4785163e625fed1a.yaml new file mode 100644 index 0000000000..280be06c54 --- /dev/null +++ b/releasenotes/notes/make-batch-member-call-additive-4785163e625fed1a.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + The batch member update resource can now be used additively by passing the + query parameter ``additive_only=True``. Existing members can be updated and + new members will be created, but missing members will not be deleted. diff --git a/releasenotes/notes/make_task_flow_persistence_connection_secret-bda0d7157fa179ea.yaml b/releasenotes/notes/make_task_flow_persistence_connection_secret-bda0d7157fa179ea.yaml new file mode 100644 index 0000000000..646528677a --- /dev/null +++ b/releasenotes/notes/make_task_flow_persistence_connection_secret-bda0d7157fa179ea.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Make ``[task_flow].persistence_connection`` configuration setting secret, + so it is not displayed when starting the Octavia applications in debug + mode. diff --git a/releasenotes/notes/members-subnet-ip-advertisements-af2264844079ef6b.yaml b/releasenotes/notes/members-subnet-ip-advertisements-af2264844079ef6b.yaml new file mode 100644 index 0000000000..2610ba8d20 --- /dev/null +++ b/releasenotes/notes/members-subnet-ip-advertisements-af2264844079ef6b.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + When plugging a new member subnet, the amphora sends an IP advertisement of + the newly allocated IP. It allows the servers on the same L2 network to + flush the ARP entries of a previously allocated IP address. diff --git a/releasenotes/notes/min-tls-version-8e2856fb055ece2c.yaml b/releasenotes/notes/min-tls-version-8e2856fb055ece2c.yaml new file mode 100644 index 0000000000..2a45179f60 --- /dev/null +++ b/releasenotes/notes/min-tls-version-8e2856fb055ece2c.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Added ``minimum_tls_version`` to ``octavia.conf``. Listeners, pools, and + the defaults for either will be blocked from using any lower TLS versions. + By default, there is no minumum version. diff --git a/releasenotes/notes/min-volume-size-6ea9c69182b325bd.yaml b/releasenotes/notes/min-volume-size-6ea9c69182b325bd.yaml new file mode 100644 index 0000000000..f685bbbfae --- /dev/null +++ b/releasenotes/notes/min-volume-size-6ea9c69182b325bd.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + The ``[cinder] volume_size`` option no longer accepts 0 or a negative value + and octavia services may fail to start when these values are used. + These were accepted previously but caused an internal error when a volume + is actually created. diff --git a/releasenotes/notes/move-to-python-3.8-203249392fd1f1aa.yaml b/releasenotes/notes/move-to-python-3.8-203249392fd1f1aa.yaml new file mode 100644 index 0000000000..045b5242a3 --- /dev/null +++ b/releasenotes/notes/move-to-python-3.8-203249392fd1f1aa.yaml @@ -0,0 +1,6 @@ +--- +upgrade: + - | + Update Python base version from 3.6 to 3.8. As per Openstack Python + runtime versions policy Python 3.8 will be the the minimum Python version + in the Zed release cycle. diff --git a/releasenotes/notes/moving-api-config-variables-into-new-section-e1c20b77aaf5ea15.yaml b/releasenotes/notes/moving-api-config-variables-into-new-section-e1c20b77aaf5ea15.yaml new file mode 100644 index 0000000000..f07e0a83da --- /dev/null +++ b/releasenotes/notes/moving-api-config-variables-into-new-section-e1c20b77aaf5ea15.yaml @@ -0,0 +1,11 @@ +--- +upgrade: + - | + Several API related variables are moving to their own section `api_settings`. + bind_host + bind_port + api_handler + allow_pagination + allow_sorting + pagination_max_limit + api_base_uri diff --git a/releasenotes/notes/mysql-persistence-driver-idle_timeout-23a481d304c3d283.yaml b/releasenotes/notes/mysql-persistence-driver-idle_timeout-23a481d304c3d283.yaml new file mode 100644 index 0000000000..8db904e0c5 --- /dev/null +++ b/releasenotes/notes/mysql-persistence-driver-idle_timeout-23a481d304c3d283.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Now the ``[database] connection_recycle_time`` option is also used by + connections in MySQL persistence driver. diff --git a/releasenotes/notes/new-amphora-fields-fa3ffc5801b5e551.yaml b/releasenotes/notes/new-amphora-fields-fa3ffc5801b5e551.yaml new file mode 100644 index 0000000000..3106a284aa --- /dev/null +++ b/releasenotes/notes/new-amphora-fields-fa3ffc5801b5e551.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Amphora API now returns the field `image_id` which is the ID of the glance + image used to boot the amphora. diff --git a/releasenotes/notes/new-default_connection_limit-config-option-3ed9f0ed6ec2b514.yaml b/releasenotes/notes/new-default_connection_limit-config-option-3ed9f0ed6ec2b514.yaml new file mode 100644 index 0000000000..41837df966 --- /dev/null +++ b/releasenotes/notes/new-default_connection_limit-config-option-3ed9f0ed6ec2b514.yaml @@ -0,0 +1,18 @@ +--- +features: + - | + Add a new configuration option to define the default connection_limit for + new listeners that use the Amphora provider. The option is + [haproxy_amphora].default_connection_limit and its default value is 50,000. + This value is used when creating or setting a listener with -1 as + connection_limit parameter, or when unsetting connection_limit parameter. +fixes: + - | + With haproxy 1.8.x releases, haproxy consumes much more memory in the + amphorae because of pre-allocated data structures. This amount of memory + depends on the maxconn parameters in its configuration file (which is + related to the connection_limit parameter in the Octavia API). + In the Amphora provider, the default connection_limit value -1 is + now converted to a maxconn of 50,000. It was previously 1,000,000 but that + value triggered some memory allocation issues when quickly performing + multiple configuration updates in a load balancer. diff --git a/releasenotes/notes/octavia-active-standby-cec5d2ad4fd214d8.yaml b/releasenotes/notes/octavia-active-standby-cec5d2ad4fd214d8.yaml new file mode 100644 index 0000000000..7bcfda55b2 --- /dev/null +++ b/releasenotes/notes/octavia-active-standby-cec5d2ad4fd214d8.yaml @@ -0,0 +1,19 @@ +--- +features: + - | + Active/Standby support for Octavia. + + * When enabled in the configuration file, Octavia + will boot an active and standby amphora for each + load balancer. + * Session persistence is maintained between the + active and standby amphora. + * Amphora failover is supported when active/standby + is enabled. Should the master or backup amphora + fail, the health manager will rebuild it. +upgrade: + - | + Upgrade requires a database migration. + + * Adds tables for active/standby. + * Updates load balancer, listener, and amphora tables. diff --git a/releasenotes/notes/octavia-v2-api-c32a62b37c2c8f6f.yaml b/releasenotes/notes/octavia-v2-api-c32a62b37c2c8f6f.yaml new file mode 100644 index 0000000000..7f96c51bd6 --- /dev/null +++ b/releasenotes/notes/octavia-v2-api-c32a62b37c2c8f6f.yaml @@ -0,0 +1,8 @@ +--- +features: + - | + Octavia now has a v2 API that can be used as a standalone endpoint. + The Octavia v2 API is fully backward compatible with the neutron-lbaas + v2 API and is a superset of the neutron-lbaas v2 API. + For more information see the Octavia API reference: + https://developer.openstack.org/api-ref/load-balancer/v2/index.html diff --git a/releasenotes/notes/octavia_castellan_config-995e65f129e3e983.yaml b/releasenotes/notes/octavia_castellan_config-995e65f129e3e983.yaml new file mode 100644 index 0000000000..5f5f4d1f1f --- /dev/null +++ b/releasenotes/notes/octavia_castellan_config-995e65f129e3e983.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Usage of ``castellan_cert_manager`` as cert_manager has been significantly + improved. Now you can define configuration options for castellan in + octavia.conf and they will be passed properly to castellan beckend. This + allows to use allowed castellan backends as for certificate storage. diff --git a/releasenotes/notes/octavia_v2_RBAC-0eb2b51aa6278435.yaml b/releasenotes/notes/octavia_v2_RBAC-0eb2b51aa6278435.yaml new file mode 100644 index 0000000000..5c2586c117 --- /dev/null +++ b/releasenotes/notes/octavia_v2_RBAC-0eb2b51aa6278435.yaml @@ -0,0 +1,18 @@ +--- +features: + - | + The Octavia v2 API now supports Role Based Access Control (RBAC). + The default rules require users to have a load-balancer_* role to be + able to access the Octavia v2 API. This can be overridden with the + admin_or_owner-policy.json sample file provided. + See the `Octavia Policies + `_ + document for more information. +security: + - | + Note that while the Octavia v2 API now supports Role Bassed Access + Control (RBAC), the Octavia v1.0 API does not. The Octavia v1.0 API + should not be exposed publicly and should only be used internally + such as for the neutron-lbaas octavia driver. Publicly accessible + instances of the Octavia API should have the v1.0 API disabled via the + Octavia configuration file. diff --git a/releasenotes/notes/per-amphora-statistics-api-5479605c7f3adb12.yaml b/releasenotes/notes/per-amphora-statistics-api-5479605c7f3adb12.yaml new file mode 100644 index 0000000000..af7566db7a --- /dev/null +++ b/releasenotes/notes/per-amphora-statistics-api-5479605c7f3adb12.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Adds an administrator API to access per-amphora statistics. diff --git a/releasenotes/notes/pike-release-35a1d632ce854d4a.yaml b/releasenotes/notes/pike-release-35a1d632ce854d4a.yaml new file mode 100644 index 0000000000..3b7f58504c --- /dev/null +++ b/releasenotes/notes/pike-release-35a1d632ce854d4a.yaml @@ -0,0 +1,7 @@ +--- +prelude: > + For the OpenStack Pike release, the Octavia team is excited to announce + Octavia version 1.0.0 and introduce the Octavia v2 API. Octavia can now + be deployed without neutron-lbaas as a standalone endpoint. The + Octavia v2 API is fully backward compatible with the neutron-lbaas v2 API + and is a superset of the neutron-lbaas v2 API. diff --git a/releasenotes/notes/ping-healthcheck-selinux-e3b7d360c8503527.yaml b/releasenotes/notes/ping-healthcheck-selinux-e3b7d360c8503527.yaml new file mode 100644 index 0000000000..70043c4348 --- /dev/null +++ b/releasenotes/notes/ping-healthcheck-selinux-e3b7d360c8503527.yaml @@ -0,0 +1,6 @@ +--- +issues: + - | + When using a distribution with a recent SELinux release such as CentOS 8 + Stream, PING health-monitor does not work as shell_exec_t calls are denied + by SELinux. diff --git a/releasenotes/notes/pool-ciphers-73a347942e31788b.yaml b/releasenotes/notes/pool-ciphers-73a347942e31788b.yaml new file mode 100644 index 0000000000..38fceb9c50 --- /dev/null +++ b/releasenotes/notes/pool-ciphers-73a347942e31788b.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + TLS-enabled pools can now be individually configured with an OpenSSL cipher string. + The default cipher for new pools can be specified with ``default_pools_ciphers`` + in ``octavia.conf``. The built-in default is OWASP's "Suite B" recommendation. (https://cheatsheetseries.owasp.org/cheatsheets/TLS_Cipher_String_Cheat_Sheet.html) + Existing pools will be unaffected. diff --git a/releasenotes/notes/pool-tls-versions-37f8036eb29ffeee.yaml b/releasenotes/notes/pool-tls-versions-37f8036eb29ffeee.yaml new file mode 100644 index 0000000000..6a71d22d86 --- /dev/null +++ b/releasenotes/notes/pool-tls-versions-37f8036eb29ffeee.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + TLS-enabled pools can now be configured to use only specified versions of + TLS. Default TLS versions for new pools can be set with + ``default_pool_tls_versions`` in ``octavia.conf``. Existing pools + will continue to use the old defaults. diff --git a/releasenotes/notes/provisioning_neutron_db_sync-c019d96a7b64fe20.yaml b/releasenotes/notes/provisioning_neutron_db_sync-c019d96a7b64fe20.yaml new file mode 100644 index 0000000000..d2bc404d62 --- /dev/null +++ b/releasenotes/notes/provisioning_neutron_db_sync-c019d96a7b64fe20.yaml @@ -0,0 +1,8 @@ +--- +upgrade: + - Added option 'sync_provisioning_status' to enable synchronizing provisioning status + of loadbalancers with the neutron-lbaas database. Enabling this option will queue one + additional message per amphora every heartbeat interval. +fixes: + - Resolved an issue that could cause provisioning status to become out of sync between + neutron-lbaas and octavia during high load. diff --git a/releasenotes/notes/py3-hmac-digest-81696f6b176e7ae4.yaml b/releasenotes/notes/py3-hmac-digest-81696f6b176e7ae4.yaml new file mode 100644 index 0000000000..0bde1a72c5 --- /dev/null +++ b/releasenotes/notes/py3-hmac-digest-81696f6b176e7ae4.yaml @@ -0,0 +1,11 @@ +--- +upgrade: + - | + The fix for the hmac.compare_digest on python3 requires you to upgrade + your health managers before updating the amphora image. The health + manager is compatible with older amphora images, but older controllers + will reject the health heartbeats from images with this fix. +fixes: + - | + Fixes an issue with hmac.compare_digest on python3 that could cause + health manager "calculated hmac not equal to msg hmac" errors. diff --git a/releasenotes/notes/recommend-haproxy-2.x-for-http2-697416c486e36840.yaml b/releasenotes/notes/recommend-haproxy-2.x-for-http2-697416c486e36840.yaml new file mode 100644 index 0000000000..fdcb4be66c --- /dev/null +++ b/releasenotes/notes/recommend-haproxy-2.x-for-http2-697416c486e36840.yaml @@ -0,0 +1,6 @@ +--- +other: + - | + Though the current HAProxy version 1.8 used in some distributions + support HTTP/2, we highly recommend using HAProxy version 2.0 or newer + in the amphora image when using HTTP/2. diff --git a/releasenotes/notes/redis-sentinel-auth-and-ssl-be1888903d68922d.yaml b/releasenotes/notes/redis-sentinel-auth-and-ssl-be1888903d68922d.yaml new file mode 100644 index 0000000000..6ad12998d8 --- /dev/null +++ b/releasenotes/notes/redis-sentinel-auth-and-ssl-be1888903d68922d.yaml @@ -0,0 +1,13 @@ +--- +features: + - | + The following options, to enable authentication in Redis Sentinel, have + been added. + + - ``[task_flow] jobboard_redis_sentinel_username`` + - ``[task_flow] jobboard_redis_sentinel_password`` + + - | + The new ``[task_flow] jobboard_redis_sentinel_ssl_options`` option has + been added. This option controls SSL settings for connections to Redis + Sentinel. diff --git a/releasenotes/notes/reduce-duration-failover-636032433984d911.yaml b/releasenotes/notes/reduce-duration-failover-636032433984d911.yaml new file mode 100644 index 0000000000..21d5718d6e --- /dev/null +++ b/releasenotes/notes/reduce-duration-failover-636032433984d911.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Reduce the duration of the failovers of ACTIVE_STANDBY load balancers. Many + updates of an unreachable amphora may have been attempted during a + failover, now if an amphora is not reachable at the first update, the other + updates are skipped. diff --git a/releasenotes/notes/reduce-failover-duration-active-standby-amphora-in-error-3c1d75bc7d9b169f.yaml b/releasenotes/notes/reduce-failover-duration-active-standby-amphora-in-error-3c1d75bc7d9b169f.yaml new file mode 100644 index 0000000000..7c04516e99 --- /dev/null +++ b/releasenotes/notes/reduce-failover-duration-active-standby-amphora-in-error-3c1d75bc7d9b169f.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Reduce the duration of the failovers of ACTIVE_STANDBY load balancers when + both amphorae are unreachable. diff --git a/releasenotes/notes/refactor_failover_flow-9efcd854240f71ad.yaml b/releasenotes/notes/refactor_failover_flow-9efcd854240f71ad.yaml new file mode 100644 index 0000000000..5c5e078725 --- /dev/null +++ b/releasenotes/notes/refactor_failover_flow-9efcd854240f71ad.yaml @@ -0,0 +1,11 @@ +--- +upgrade: + - | + The failover improvements do not require an updated amphora image, + but updating existing amphora will minimize the failover + outage time for standalone amphora on subsequent failovers. +fixes: + - | + Significantly improved the reliability and performance of amphora + and load balancer failovers. This is especially true when the + Nova service is experiencing failures. diff --git a/releasenotes/notes/remove-agent_server_network_file-1c9ab712d6b05016.yaml b/releasenotes/notes/remove-agent_server_network_file-1c9ab712d6b05016.yaml new file mode 100644 index 0000000000..d6ded1fd24 --- /dev/null +++ b/releasenotes/notes/remove-agent_server_network_file-1c9ab712d6b05016.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + The deprecated ``[amphora_agent] agent_server_network_file`` option has + been removed. diff --git a/releasenotes/notes/remove-amp-ssh-access-allowed-e11dc011637b21dd.yaml b/releasenotes/notes/remove-amp-ssh-access-allowed-e11dc011637b21dd.yaml new file mode 100644 index 0000000000..4bb121a25e --- /dev/null +++ b/releasenotes/notes/remove-amp-ssh-access-allowed-e11dc011637b21dd.yaml @@ -0,0 +1,6 @@ +--- +upgrade: + - | + The option ``[controller_worker]/amp_ssh_access_allowed`` has been + deprecated since Queens release and is now removed. This option was + superseded by ``[controller_worker]/amp_ssh_key_name`` option. diff --git a/releasenotes/notes/remove-amp_image_id-12a88bae6518455b.yaml b/releasenotes/notes/remove-amp_image_id-12a88bae6518455b.yaml new file mode 100644 index 0000000000..04216e8a76 --- /dev/null +++ b/releasenotes/notes/remove-amp_image_id-12a88bae6518455b.yaml @@ -0,0 +1,6 @@ +--- +upgrade: + - | + The option ``[controller_worker]/amp_image_id`` has been deprecated since + Mitaka release and is now removed. This option was superseded by + ``[controller_worker]/amp_image_tag`` option. diff --git a/releasenotes/notes/remove-bbq-unset-acl-e680020de6a9ad3d.yaml b/releasenotes/notes/remove-bbq-unset-acl-e680020de6a9ad3d.yaml new file mode 100644 index 0000000000..e063c36fd1 --- /dev/null +++ b/releasenotes/notes/remove-bbq-unset-acl-e680020de6a9ad3d.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Octavia will no longer automatically revoke access to secrets whenever + load balancing resources no longer require access to them. This may be + added in the future. diff --git a/releasenotes/notes/remove-default-role-from-keepalived-c879bad3a42a6b4a.yaml b/releasenotes/notes/remove-default-role-from-keepalived-c879bad3a42a6b4a.yaml new file mode 100644 index 0000000000..bfca76f83f --- /dev/null +++ b/releasenotes/notes/remove-default-role-from-keepalived-c879bad3a42a6b4a.yaml @@ -0,0 +1,8 @@ +--- +fixes: + - | + Fixed a bug with the `nopreempt` option in keepalived. The option didn't + work properly because the default role of the `MASTER` amphora was set. + Removing the default roles from the configuration files fixed that issue. + Now after a failover, the newly created amphora doesn't preempt the + `MASTER` role from the other amphora. diff --git a/releasenotes/notes/remove-deprecated-api-configs-3f5652f71610b05e.yaml b/releasenotes/notes/remove-deprecated-api-configs-3f5652f71610b05e.yaml new file mode 100644 index 0000000000..35ad12eb9c --- /dev/null +++ b/releasenotes/notes/remove-deprecated-api-configs-3f5652f71610b05e.yaml @@ -0,0 +1,23 @@ +--- +upgrade: + - | + The following configuration settings have reached the end of their + deprecation period and are now removed from the [default] section of + the configuration. These will only be available in the [api_settings] + section going forward. + + * [DEFAULT] bind_host + * [DEFAULT] bind_port + * [DEFAULT] auth_strategy + * [DEFAULT] api_handler +deprecations: + - | + The following configuration settings have reached the end of their + deprecation period and are now removed from the [default] section of + the configuration. These will only be available in the [api_settings] + section going forward. + + * [DEFAULT] bind_host + * [DEFAULT] bind_port + * [DEFAULT] auth_strategy + * [DEFAULT] api_handler diff --git a/releasenotes/notes/remove-deprecated-v1-resources-6360da3de27b74d3.yaml b/releasenotes/notes/remove-deprecated-v1-resources-6360da3de27b74d3.yaml new file mode 100644 index 0000000000..f981122fdc --- /dev/null +++ b/releasenotes/notes/remove-deprecated-v1-resources-6360da3de27b74d3.yaml @@ -0,0 +1,10 @@ +--- +upgrade: + - | + Octavia v1 API (used for integration with Neutron-LBaaS) has been removed. + If Neutron-LBaaS integration is still required, do not upgrade to this + version. +deprecations: + - | + Octavia v1 API deprecation is complete. All relevant code, tests, and docs + have been removed. diff --git a/releasenotes/notes/remove-duplicated-cert_generator-option-83d18647dc1d2954.yaml b/releasenotes/notes/remove-duplicated-cert_generator-option-83d18647dc1d2954.yaml new file mode 100644 index 0000000000..308a58edf9 --- /dev/null +++ b/releasenotes/notes/remove-duplicated-cert_generator-option-83d18647dc1d2954.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - Remove duplicated config option 'cert_generator' + in [controller_worker]. Operators now should set it + under [certificates]. diff --git a/releasenotes/notes/remove-forcal-support-2f7991f2d435876f.yaml b/releasenotes/notes/remove-forcal-support-2f7991f2d435876f.yaml new file mode 100644 index 0000000000..eb35e9271e --- /dev/null +++ b/releasenotes/notes/remove-forcal-support-2f7991f2d435876f.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + The diskimage-builder elements for amphora image no longer supports Ubuntu + Focal. diff --git a/releasenotes/notes/remove-health_update_driver-56c99ec977bca245.yaml b/releasenotes/notes/remove-health_update_driver-56c99ec977bca245.yaml new file mode 100644 index 0000000000..d14cc83fb1 --- /dev/null +++ b/releasenotes/notes/remove-health_update_driver-56c99ec977bca245.yaml @@ -0,0 +1,9 @@ +--- +upgrade: + - | + The deprecated ``[health_manager] health_update_driver`` option was + removed. + + - | + The deprecated ``[health_manager] stats_update_driver`` option has been + removed. Use the ``[controller_worker] statistics_drivers`` option instead. diff --git a/releasenotes/notes/remove-split-listeners-6a4ccdf66fe7b683.yaml b/releasenotes/notes/remove-split-listeners-6a4ccdf66fe7b683.yaml new file mode 100644 index 0000000000..6a3f6c4b9e --- /dev/null +++ b/releasenotes/notes/remove-split-listeners-6a4ccdf66fe7b683.yaml @@ -0,0 +1,6 @@ +--- +deprecations: + - | + Amphora load balancers support single process mode only now. Split listener + configuration, which was used up to API version 0.5, has been + removed from the codebase. diff --git a/releasenotes/notes/remove-status_update_threads-85a8b0307a04c164.yaml b/releasenotes/notes/remove-status_update_threads-85a8b0307a04c164.yaml new file mode 100644 index 0000000000..2a68cdb2a9 --- /dev/null +++ b/releasenotes/notes/remove-status_update_threads-85a8b0307a04c164.yaml @@ -0,0 +1,6 @@ +--- +deprecations: + - | + The deprecated option ``status_update_threads`` has been removed, + ``health_update_threads`` and ``stats_update_threads`` should be used + instead. diff --git a/releasenotes/notes/remove-sysvinit-and-upstart-f1655e9d0c53e5cc.yaml b/releasenotes/notes/remove-sysvinit-and-upstart-f1655e9d0c53e5cc.yaml new file mode 100644 index 0000000000..a4e0f42507 --- /dev/null +++ b/releasenotes/notes/remove-sysvinit-and-upstart-f1655e9d0c53e5cc.yaml @@ -0,0 +1,13 @@ +--- +upgrade: + - | + SysVinit and Upstart are no longer supported as init system in amphora + instances. The only supported init system is now systemd. + +deprecations: + - | + The following options have been deprecated and have no effect now. These + options were used by Upstart support which has been removed. + + - ``[haproxy_amphora] respawn_count`` + - ``[haproxy_amphora] respawn_interval`` diff --git a/releasenotes/notes/remove-tags-relationship-warnings-a3c0175135f6cd84.yaml b/releasenotes/notes/remove-tags-relationship-warnings-a3c0175135f6cd84.yaml new file mode 100644 index 0000000000..43ca3c98cd --- /dev/null +++ b/releasenotes/notes/remove-tags-relationship-warnings-a3c0175135f6cd84.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed SQLAlchemy warnings about the relationship between the Tags object + and the other Octavia resources. diff --git a/releasenotes/notes/remove-tenant_id-c0352efbfb3a54f9.yaml b/releasenotes/notes/remove-tenant_id-c0352efbfb3a54f9.yaml new file mode 100644 index 0000000000..83dccdcc82 --- /dev/null +++ b/releasenotes/notes/remove-tenant_id-c0352efbfb3a54f9.yaml @@ -0,0 +1,5 @@ +--- +deprecations: + - | + Finally completely remove tenant_id, as it was deprecated along with the + keystone v2 API in Mitaka, which means we're free of it in Pike! diff --git a/releasenotes/notes/remove-unused-amphora-flow-and-status-constants-a3b2c4d5e6f7a8b9.yaml b/releasenotes/notes/remove-unused-amphora-flow-and-status-constants-a3b2c4d5e6f7a8b9.yaml new file mode 100644 index 0000000000..354920e093 --- /dev/null +++ b/releasenotes/notes/remove-unused-amphora-flow-and-status-constants-a3b2c4d5e6f7a8b9.yaml @@ -0,0 +1,8 @@ +--- +other: + - | + Removed unused amphora-related code including the get_create_amphora_flow + function, MarkAmphoraReadyInDB task class, MARK_AMPHORA_READY_INDB + constant, and AMPHORA_READY status constant. Updated amphora deletion + logic to only allow deletion when amphora status is ERROR, which is the + correct behavior since AMPHORA_ALLOCATED amphorae should not be deletable. diff --git a/releasenotes/notes/remove-use_upstart-448eaf86a7a46c54.yaml b/releasenotes/notes/remove-use_upstart-448eaf86a7a46c54.yaml new file mode 100644 index 0000000000..57a1b8d995 --- /dev/null +++ b/releasenotes/notes/remove-use_upstart-448eaf86a7a46c54.yaml @@ -0,0 +1,4 @@ +--- +upgrade: + - | + The deprecated ``[haproxy_amphora] use_upstart`` option has been removed. diff --git a/releasenotes/notes/remove-wsgi-scripts-a66048263bd550c6.yaml b/releasenotes/notes/remove-wsgi-scripts-a66048263bd550c6.yaml new file mode 100644 index 0000000000..05101a078b --- /dev/null +++ b/releasenotes/notes/remove-wsgi-scripts-a66048263bd550c6.yaml @@ -0,0 +1,29 @@ +--- +features: + - | + A new module, ``octavia.wsgi``, has been added as a place to gather WSGI + ``application`` objects. This is intended to ease deployment by providing + a consistent location for these objects. For example, if using uWSGI then + instead of: + + .. code-block:: ini + + [uwsgi] + wsgi-file = /bin/octavia-wsgi + + You can now use: + + .. code-block:: ini + + [uwsgi] + module = octavia.wsgi.api:application + + This also simplifies deployment with other WSGI servers that expect module + paths such as gunicorn. +upgrade: + - | + The WSGI script ``octavia-wsgi`` has been removed. Deployment tooling + should instead reference the Python module path for the wsgi module in + Octavia, ``octavia.wsgi.api:application`` if their chosen WSGI server + supports this (gunicorn, uWSGI, etc.) or implement a .wsgi script + themselves if not (mod_wsgi). diff --git a/releasenotes/notes/remove_user_group_option-56ba749d0064a394.yaml b/releasenotes/notes/remove_user_group_option-56ba749d0064a394.yaml new file mode 100644 index 0000000000..d1e60be4f2 --- /dev/null +++ b/releasenotes/notes/remove_user_group_option-56ba749d0064a394.yaml @@ -0,0 +1,5 @@ +--- +deprecations: + - | + Finally completely the remove user_group option, as it was deprecated in + Pike. \ No newline at end of file diff --git a/releasenotes/notes/removed-neutronclient-43f62f25210c3392.yaml b/releasenotes/notes/removed-neutronclient-43f62f25210c3392.yaml new file mode 100644 index 0000000000..ea3096390c --- /dev/null +++ b/releasenotes/notes/removed-neutronclient-43f62f25210c3392.yaml @@ -0,0 +1,23 @@ +--- +upgrade: + - | + Authentication settings for Neutron should be added + directly to the [neutron] section of the configuration now. The exact + settings depend on the `auth_type` used. Refer to + https://docs.openstack.org/keystoneauth/latest/plugin-options.html + for a list of possible options. +deprecations: + - | + In a future release Octavia will no longer take the authentication + settings for Neutron from the [service_auth] as a fallback. It will + require them to be in the [neutron] section. The *endpoint* option is now + deprecated and replaced by *endpoint_override*. Similarly, the + new name of the *endpoint_type* option is now *valid_interfaces* and + the new name of the *ca_certificates_file* option is now *cafile*. + Note that [service_auth] + settings will still be used for other services like Nova and Glance. +other: + - | + Replaced code that uses the deprecated python-neutronclient library with + code that uses openstacksdk and removed python-neutronclient as a + dependency. diff --git a/releasenotes/notes/removing-amphorav1-ff43992c07a2071d.yaml b/releasenotes/notes/removing-amphorav1-ff43992c07a2071d.yaml new file mode 100644 index 0000000000..6dfa25de09 --- /dev/null +++ b/releasenotes/notes/removing-amphorav1-ff43992c07a2071d.yaml @@ -0,0 +1,10 @@ +--- +upgrade: + - | + The *amphorav1* provider was removed. It is recommended to the users who + have kept using it to switch to the default *amphora* provider, which is an + alias for the *amphorav2* provider. +deprecations: + - | + The deprecated *amphorav1* provider was removed. The default + provider *amphora* is still an alias for the *amphorav2* provider. diff --git a/releasenotes/notes/render-disabled-members-to-make-statuses-consistent-69189f71da2e02e8.yaml b/releasenotes/notes/render-disabled-members-to-make-statuses-consistent-69189f71da2e02e8.yaml new file mode 100644 index 0000000000..8a7a5b72ea --- /dev/null +++ b/releasenotes/notes/render-disabled-members-to-make-statuses-consistent-69189f71da2e02e8.yaml @@ -0,0 +1,12 @@ +--- +fixes: + - | + Creating a member on a pool with no healthmonitor would sometimes briefly + update their operating status from `NO_MONITOR` to `OFFLINE` and back to + `NO_MONITOR` during the provisioning sequence. This flapping will no longer + occur. + - | + Members that are disabled via `admin_state_up=False` are now rendered in + the HAProxy configuration on the amphora as `disabled`. Previously they + were not rendered at all. This means that disabled members will now + appear in health messages, and will properly change status to OFFLINE. diff --git a/releasenotes/notes/reserved-ips-7ef3a63ab0b6b28a.yaml b/releasenotes/notes/reserved-ips-7ef3a63ab0b6b28a.yaml new file mode 100644 index 0000000000..c75d02e7ff --- /dev/null +++ b/releasenotes/notes/reserved-ips-7ef3a63ab0b6b28a.yaml @@ -0,0 +1,6 @@ +--- +security: + - | + Adds a configuration option, "reserved_ips" that allows the operator to + block addresses from being used in load balancer members. The default + setting blocks the nova metadata service address. diff --git a/releasenotes/notes/same-port-listeners-41198368d470e821.yaml b/releasenotes/notes/same-port-listeners-41198368d470e821.yaml new file mode 100644 index 0000000000..ad1b3fb7c7 --- /dev/null +++ b/releasenotes/notes/same-port-listeners-41198368d470e821.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed bug which prevented the creation of listeners for different protocols + on the same port (i.e: tcp port 53, and udp port 53). diff --git a/releasenotes/notes/separate-thread-pool-for-health-stats-update-c263c844075a7721.yaml b/releasenotes/notes/separate-thread-pool-for-health-stats-update-c263c844075a7721.yaml new file mode 100644 index 0000000000..1b4f752e22 --- /dev/null +++ b/releasenotes/notes/separate-thread-pool-for-health-stats-update-c263c844075a7721.yaml @@ -0,0 +1,10 @@ +--- +fixes: + - | + Add new parameters to specify the number of threads for updating amphora + health and stats. +deprecations: + - | + `status_update_threads` config option for healthmanager is deprecated + because it is replaced as `health_update_threads` and + `stats_update_threads`. diff --git a/releasenotes/notes/service-type-73efc939e48d5858.yaml b/releasenotes/notes/service-type-73efc939e48d5858.yaml new file mode 100644 index 0000000000..e9db18b03f --- /dev/null +++ b/releasenotes/notes/service-type-73efc939e48d5858.yaml @@ -0,0 +1,6 @@ +--- +other: + - | + Octavia will use the OpenStack service type 'load-balancer'. + For more information about service types, see the Octavia API reference: + https://developer.openstack.org/api-ref/load-balancer/v2/index.html#service-endpoints diff --git a/releasenotes/notes/spare-pool-removal-7d51eae592d05874.yaml b/releasenotes/notes/spare-pool-removal-7d51eae592d05874.yaml new file mode 100644 index 0000000000..d46dfd1546 --- /dev/null +++ b/releasenotes/notes/spare-pool-removal-7d51eae592d05874.yaml @@ -0,0 +1,6 @@ +--- +upgrade: + - | + The spare pool feature was removed after being deprecated in the Victoria + release. After an upgrade of the controllers, spare amphorae will be + automatically deleted by the Octavia health-manager service. diff --git a/releasenotes/notes/stats-update-drivers-interface-changes-c8f2bf3b02eec767.yaml b/releasenotes/notes/stats-update-drivers-interface-changes-c8f2bf3b02eec767.yaml new file mode 100644 index 0000000000..49d1feb839 --- /dev/null +++ b/releasenotes/notes/stats-update-drivers-interface-changes-c8f2bf3b02eec767.yaml @@ -0,0 +1,17 @@ +--- +features: + - | + Loadbalancer statistics can now be reported to multiple backend locations + simply by specifying multiple statistics drivers in config. +upgrade: + - | + The internal interface for loadbalancer statistics collection has moved. + When upgrading, see deprecation notes for the ``stats_update_driver`` + config option, as it will need to be moved and renamed. +deprecations: + - | + The option ``health_manager.health_update_driver`` has been deprecated as + it was never really used, so the driver layer was removed. + The option ``health_manager.stats_update_driver`` was moved and renamed + to ``controller_worker.statistics_drivers`` (note it is now plural). It + can now contain a list of multiple drivers for handling statistics. diff --git a/releasenotes/notes/statuses_alias-27559e3d74b9eaf0.yaml b/releasenotes/notes/statuses_alias-27559e3d74b9eaf0.yaml new file mode 100644 index 0000000000..216ff4c65f --- /dev/null +++ b/releasenotes/notes/statuses_alias-27559e3d74b9eaf0.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixes a neutron-lbaas LBaaS v2 API compatibility issue when requesting a + load balancer status tree via '/statuses'. diff --git a/releasenotes/notes/stop-logging-amphora-cert-2e188675699d60d5.yaml b/releasenotes/notes/stop-logging-amphora-cert-2e188675699d60d5.yaml new file mode 100644 index 0000000000..f1c42f010e --- /dev/null +++ b/releasenotes/notes/stop-logging-amphora-cert-2e188675699d60d5.yaml @@ -0,0 +1,7 @@ +--- +security: + - | + Fixed a debug level logging of Amphora certificates for flows + such as 'octavia-create-amp-for-lb-subflow-octavia-generate-serverpem' + (triggered with loadbalancer failover) and + 'octavia-create-amp-for-lb-subflow-octavia-update-cert-expiration'. diff --git a/releasenotes/notes/support-additional-vips-on-lb-creation-efe0dfa517c667a0.yaml b/releasenotes/notes/support-additional-vips-on-lb-creation-efe0dfa517c667a0.yaml new file mode 100644 index 0000000000..43c523c72b --- /dev/null +++ b/releasenotes/notes/support-additional-vips-on-lb-creation-efe0dfa517c667a0.yaml @@ -0,0 +1,12 @@ +--- +features: + - | + It is now possible to create a loadbalancer with more than one VIP. There + is a new structure ``additional_vips`` in the create body, which allows a + subnet, and optionally an IP, to be specified. All VIP subnets must be part + of the same network. +upgrade: + - | + To support multi-VIP loadbalancers, a new amphora image must be built. It is + safe to upload the new image before the upgrade, as it is fully backwards + compatible. diff --git a/releasenotes/notes/support-az-on-lb-create-562dcf639bb272ea.yaml b/releasenotes/notes/support-az-on-lb-create-562dcf639bb272ea.yaml new file mode 100644 index 0000000000..da92ce23dd --- /dev/null +++ b/releasenotes/notes/support-az-on-lb-create-562dcf639bb272ea.yaml @@ -0,0 +1,15 @@ +--- +features: + - | + The load balancer create command now accepts an availability_zone argument. + With the amphora driver this will create a load balancer in the targeted + compute availability_zone in nova. + + When using spare pools, it will create spares in each AZ. For the amphora + driver, if no ``[nova] availability_zone`` is configured and availability + zones are used, results may be slightly unpredictable. + + Note (for the ``amphora`` driver): if it is possible for an amphora to + change availability zone after initial creation (not typically possible + without outside intervention) this may affect the ability of this feature + to function properly. diff --git a/releasenotes/notes/support-http-health-check-with-host-header-e2cf1f2a98d4114f.yaml b/releasenotes/notes/support-http-health-check-with-host-header-e2cf1f2a98d4114f.yaml new file mode 100644 index 0000000000..1bfe2c41a3 --- /dev/null +++ b/releasenotes/notes/support-http-health-check-with-host-header-e2cf1f2a98d4114f.yaml @@ -0,0 +1,5 @@ +--- +features: + - Extend the Octavia Health Monitor API with two new fields ``http_version`` + and ``domain_name`` for support HTTP health check, which will inject the + domain name into HTTP host header. diff --git a/releasenotes/notes/support-networks-without-dhcp-3458a063333ab7a8.yaml b/releasenotes/notes/support-networks-without-dhcp-3458a063333ab7a8.yaml new file mode 100644 index 0000000000..287c516e90 --- /dev/null +++ b/releasenotes/notes/support-networks-without-dhcp-3458a063333ab7a8.yaml @@ -0,0 +1,5 @@ +--- +features: + - Adds support for networks that do not have DHCP services enabled. +upgrade: + - To support networks without DHCP you must upgrade your amphora image. diff --git a/releasenotes/notes/support-oslo_middleware-http_proxy_to_wsgi-928c6fc5ec3d421c.yaml b/releasenotes/notes/support-oslo_middleware-http_proxy_to_wsgi-928c6fc5ec3d421c.yaml new file mode 100644 index 0000000000..2bfcdcf39f --- /dev/null +++ b/releasenotes/notes/support-oslo_middleware-http_proxy_to_wsgi-928c6fc5ec3d421c.yaml @@ -0,0 +1,8 @@ +--- +features: + - | + Now supports ``oslo_middleware http_proxy_to_wsgi``, which will set up the + request URL correctly in the case that there is a proxy (for example, a + loadbalancer such as HAProxy) in front of the Octavia API. It is off by + default and can be enabled by setting ``enable_proxy_headers_parsing=True`` + in the ``[oslo_middleware]`` section of ``octavia.conf``. diff --git a/releasenotes/notes/support-pkcs7-intermediate-ca-bundles-279c12bad974bff7.yaml b/releasenotes/notes/support-pkcs7-intermediate-ca-bundles-279c12bad974bff7.yaml new file mode 100644 index 0000000000..493be007ac --- /dev/null +++ b/releasenotes/notes/support-pkcs7-intermediate-ca-bundles-279c12bad974bff7.yaml @@ -0,0 +1,6 @@ +--- +features: + - Adds support for PKCS7 PEM or DER encoded intermediate certificate bundles + for TERMINATED_HTTPS listeners. +fixes: + - Resolves an issue with using encrypted TLS private keys. diff --git a/releasenotes/notes/support-proxy-protocol-cc5991175a110619.yaml b/releasenotes/notes/support-proxy-protocol-cc5991175a110619.yaml new file mode 100644 index 0000000000..f18fad1a38 --- /dev/null +++ b/releasenotes/notes/support-proxy-protocol-cc5991175a110619.yaml @@ -0,0 +1,3 @@ +--- +features: + - Add support PROXY protocol for lbaas pool in octavia diff --git a/releasenotes/notes/support-redirect-http-code-1c2e87ef7fda12e97.yaml b/releasenotes/notes/support-redirect-http-code-1c2e87ef7fda12e97.yaml new file mode 100644 index 0000000000..ec23d53833 --- /dev/null +++ b/releasenotes/notes/support-redirect-http-code-1c2e87ef7fda12e97.yaml @@ -0,0 +1,6 @@ +--- +features: + - Now Octavia L7Policy API can accept an new option `redirect_http_code` + for L7Policy actions `REDIRECT_URL` or `REDIRECT_PREFIX`, then each HTTP + requests to the associated Listener will return the configured HTTP + response code. diff --git a/releasenotes/notes/support-redirect-prefix-7f8b289aee04fe99.yaml b/releasenotes/notes/support-redirect-prefix-7f8b289aee04fe99.yaml new file mode 100644 index 0000000000..bd6980e98a --- /dev/null +++ b/releasenotes/notes/support-redirect-prefix-7f8b289aee04fe99.yaml @@ -0,0 +1,3 @@ +--- +features: + - Support REDIRECT_PREFIX action for L7Policy diff --git a/releasenotes/notes/support-remote-debugging-fcb52df4a59c1467.yaml b/releasenotes/notes/support-remote-debugging-fcb52df4a59c1467.yaml new file mode 100644 index 0000000000..c19d6d06bd --- /dev/null +++ b/releasenotes/notes/support-remote-debugging-fcb52df4a59c1467.yaml @@ -0,0 +1,4 @@ +--- +features: + - Support remote debugging with PyDev. Please refer to the Contributor + documentation section to find more details. diff --git a/releasenotes/notes/support-rotating-server_certs_key_passphrase-c74a67ae5e169447.yaml b/releasenotes/notes/support-rotating-server_certs_key_passphrase-c74a67ae5e169447.yaml new file mode 100644 index 0000000000..54edd2859b --- /dev/null +++ b/releasenotes/notes/support-rotating-server_certs_key_passphrase-c74a67ae5e169447.yaml @@ -0,0 +1,13 @@ +--- +features: + - | + Added support for multiple Fernet keys in the ``[certificates]/server_certs_key_passphrase`` + configuration option by changing it to a ListOpt. The first key is used for + encryption and other keys is used for decryption adding support for rotating + the passphrase. +upgrade: + - | + The ``[certificates]/server_certs_key_passphrase`` configuration option is + now a ListOpt so multiple keys can be specified, the first key is used for + encryption and other keys is used for decryption adding support for rotating + the passphrase. diff --git a/releasenotes/notes/support-wsgi-deployment-56013fef7172e982.yaml b/releasenotes/notes/support-wsgi-deployment-56013fef7172e982.yaml new file mode 100644 index 0000000000..49fcdd53db --- /dev/null +++ b/releasenotes/notes/support-wsgi-deployment-56013fef7172e982.yaml @@ -0,0 +1,3 @@ +--- +features: + - Octavia API now supports WSGI deplyment. diff --git a/releasenotes/notes/switch-default-amphora-provider-7e17f90d7d4b2ee7.yaml b/releasenotes/notes/switch-default-amphora-provider-7e17f90d7d4b2ee7.yaml new file mode 100644 index 0000000000..caa2f3bca1 --- /dev/null +++ b/releasenotes/notes/switch-default-amphora-provider-7e17f90d7d4b2ee7.yaml @@ -0,0 +1,8 @@ +--- +features: + - | + AmphoraV2 provider that was intoduced in earier releases now is default + Amphora provider for Octavia. Alias `amphorav1` is available for previous + version of Amphora provider. Alias `amphorav2` is now the same as + `amphora`. By default, jobboard (usage persistence storage) is not + enabled, configurable via `jobboard_enabled` option in `task_flow` section. \ No newline at end of file diff --git a/releasenotes/notes/switch-taskflow-engine-parallel-8bf743eca15a0253.yaml b/releasenotes/notes/switch-taskflow-engine-parallel-8bf743eca15a0253.yaml new file mode 100644 index 0000000000..1b48a433c4 --- /dev/null +++ b/releasenotes/notes/switch-taskflow-engine-parallel-8bf743eca15a0253.yaml @@ -0,0 +1,10 @@ +--- +upgrade: + - | + The default TaskFlow engine is now set to 'parallel' instead of 'serial'. + The parallel engine schedules tasks onto different threads to allow for + running non-dependent tasks simultaneously. This has the benefit of + accelerating the execution of some Octavia Amphora flows such as + provisioning of active-standby amphora loadbalancers. Operators can revert + to previously default 'serial' engine type by setting the configuration + option [task_flow]/engine = serial diff --git a/releasenotes/notes/switch-to-live-drivers-cbae7c60eafa0f3e.yaml b/releasenotes/notes/switch-to-live-drivers-cbae7c60eafa0f3e.yaml new file mode 100644 index 0000000000..f7b2731aa9 --- /dev/null +++ b/releasenotes/notes/switch-to-live-drivers-cbae7c60eafa0f3e.yaml @@ -0,0 +1,6 @@ +--- +upgrade: + - | + The default drivers have been switched to live from noop drivers for the + most part. Volume and distributor remain set to noop drivers as those are + experimental features. Operators do not need to make configuration changes. diff --git a/releasenotes/notes/tls-cipher-prohibit-list-b5a23ca38149f3b8.yaml b/releasenotes/notes/tls-cipher-prohibit-list-b5a23ca38149f3b8.yaml new file mode 100644 index 0000000000..4b0fc90a89 --- /dev/null +++ b/releasenotes/notes/tls-cipher-prohibit-list-b5a23ca38149f3b8.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Added ``tls_cipher_prohibit_list`` to ``octavia.conf``. Listeners, pools, + and the default values for either will be blocked from using any of these + ciphers. By default, no ciphers are prohibited. diff --git a/releasenotes/notes/tls-versions-listeners-59cecde77e0238a0.yaml b/releasenotes/notes/tls-versions-listeners-59cecde77e0238a0.yaml new file mode 100644 index 0000000000..947d55ab94 --- /dev/null +++ b/releasenotes/notes/tls-versions-listeners-59cecde77e0238a0.yaml @@ -0,0 +1,14 @@ +--- +features: + - | + HTTPS-terminated listeners can now be configured to use only specified + versions of TLS. Default TLS versions for new listeners can be set with + ``default_listener_tls_versions`` in ``octavia.conf``. Existing listeners + will continue to use the old defaults. +upgrade: + - | + HTTPS-terminated listeners will now only allow TLS1.2 and TLS1.3 by + default. If no TLS versions are specified at listener create time, the + listener will only accept TLS1.2 and TLS1.3 connections. Previously TLS + listeners would accept any TLS version. Existing listeners will not be + changed. diff --git a/releasenotes/notes/tweak-ssl-cachesize-6893851feed43975.yaml b/releasenotes/notes/tweak-ssl-cachesize-6893851feed43975.yaml new file mode 100644 index 0000000000..373befe0ac --- /dev/null +++ b/releasenotes/notes/tweak-ssl-cachesize-6893851feed43975.yaml @@ -0,0 +1,9 @@ +--- +other: + - | + When a HTTPS termination listener gets configured, Octavia will tweak the + HAProxy `tune.ssl.cachesize` setting to use about half of the available + memory (free + buffers + cached) on the amphora minus the memory needed + for network sockets based on the global max connections setting. + This allows to make better reuse of existing SSL sessions and + helps to lower the number of computationally expensive SSL handshakes. diff --git a/releasenotes/notes/udp-delay-based-on-correct-setting-6a60856de2927ccd.yaml b/releasenotes/notes/udp-delay-based-on-correct-setting-6a60856de2927ccd.yaml new file mode 100644 index 0000000000..2d735aae10 --- /dev/null +++ b/releasenotes/notes/udp-delay-based-on-correct-setting-6a60856de2927ccd.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Delay between checks on UDP healthmonitors was using the incorrect config + value ``timeout``, when it should have been ``delay``. diff --git a/releasenotes/notes/update-diskimage-create-rhel-centos-defaults-2be19c634f10506f.yaml b/releasenotes/notes/update-diskimage-create-rhel-centos-defaults-2be19c634f10506f.yaml new file mode 100644 index 0000000000..2231fd7bfe --- /dev/null +++ b/releasenotes/notes/update-diskimage-create-rhel-centos-defaults-2be19c634f10506f.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + diskimage-create defaults now to distribution release 9 when selecting RHEL + as base OS and to release 9-stream when selecting CentOS as base OS. diff --git a/releasenotes/notes/update-ubuntu-amphora-image-default-to-jammy-fad22bfb80a13f2b.yaml b/releasenotes/notes/update-ubuntu-amphora-image-default-to-jammy-fad22bfb80a13f2b.yaml new file mode 100644 index 0000000000..efe2fb64b7 --- /dev/null +++ b/releasenotes/notes/update-ubuntu-amphora-image-default-to-jammy-fad22bfb80a13f2b.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + ``diskimage-create.sh`` has been updated to build Ubuntu Jammy (22.04) + amphora images per default. diff --git a/releasenotes/notes/update-ubuntu-amphora-image-default-to-noble-d2733d4bcc31fec9.yaml b/releasenotes/notes/update-ubuntu-amphora-image-default-to-noble-d2733d4bcc31fec9.yaml new file mode 100644 index 0000000000..b6042abad5 --- /dev/null +++ b/releasenotes/notes/update-ubuntu-amphora-image-default-to-noble-d2733d4bcc31fec9.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + ``diskimage-create.sh`` has been updated to build Ubuntu Noble (24.04) + amphora images per default. diff --git a/releasenotes/notes/use-dib-distribution-mirror-400d96c1a7df9862.yaml b/releasenotes/notes/use-dib-distribution-mirror-400d96c1a7df9862.yaml new file mode 100644 index 0000000000..bd4b94a5c7 --- /dev/null +++ b/releasenotes/notes/use-dib-distribution-mirror-400d96c1a7df9862.yaml @@ -0,0 +1,14 @@ +--- +features: + - | + The diskimage-create script now supports generic download mirrors via the + DIB_DISTRIBUTION_MIRROR environment variable, replacing the existing + distribution-specific elements +upgrade: + - | + For the diskimage-create script, the BASE_OS_MIRROR environment variable + was renamed to DIB_DISTRIBUTION_MIRROR +deprecations: + - | + These custom distribution mirror elements for the diskimage-script were + removed: apt-mirror, centos-mirror, fedora-mirror diff --git a/releasenotes/notes/use-nohz-full-to-improve-latency-9d5acd7333f7e462.yaml b/releasenotes/notes/use-nohz-full-to-improve-latency-9d5acd7333f7e462.yaml new file mode 100644 index 0000000000..b3dfa23c5a --- /dev/null +++ b/releasenotes/notes/use-nohz-full-to-improve-latency-9d5acd7333f7e462.yaml @@ -0,0 +1,13 @@ +--- +features: + - | + The cpu-pinning element for the amphora image sets the kernel bootarg + nohz_full=1-N to enable + full dynticks on all CPUs except the first one (on single CPU images this + will have no effect). This should reduce kernel noise on those CPUs to a + minimum and reduce latency. +upgrade: + - | + In order for the full dynticks optimization to become effective a new + amphora image needs to be built with the new optional CPU pinning + feature enabled. diff --git a/releasenotes/notes/validate-access-to-vip_subnet_id-48fc92b45529cafd.yaml b/releasenotes/notes/validate-access-to-vip_subnet_id-48fc92b45529cafd.yaml new file mode 100644 index 0000000000..30637a3a45 --- /dev/null +++ b/releasenotes/notes/validate-access-to-vip_subnet_id-48fc92b45529cafd.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fix a bug that allowed a user to create a load balancer on a + ``vip_subnet_id`` that belongs to another user using the subnet UUID. diff --git a/releasenotes/notes/validate-protocols-for-l7policies-83d678171f13136a.yaml b/releasenotes/notes/validate-protocols-for-l7policies-83d678171f13136a.yaml new file mode 100644 index 0000000000..114daaa3df --- /dev/null +++ b/releasenotes/notes/validate-protocols-for-l7policies-83d678171f13136a.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Validate that the creation of L7 policies is compatible with the protocol + of the listener in the Amphora driver. L7 policies are allowed for + Terminated HTTPS or HTTP protocol listeners, but not for HTTPS, TCP or UDP + protocols listeners. diff --git a/releasenotes/notes/validate-same-ip-protocol-in-udp-lb-2813b545131097ec.yaml b/releasenotes/notes/validate-same-ip-protocol-in-udp-lb-2813b545131097ec.yaml new file mode 100644 index 0000000000..8d7ae40649 --- /dev/null +++ b/releasenotes/notes/validate-same-ip-protocol-in-udp-lb-2813b545131097ec.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Adding a member with different IP protocol version than the VIP IP protocol + version in a UDP load balancer caused a crash in the amphora. A validation + step in the amphora driver now prevents mixing IP protocol versions in UDP + load balancers. diff --git a/releasenotes/notes/validate-tls-versions-and-alpn-protocols-d880b14848394e7d.yaml b/releasenotes/notes/validate-tls-versions-and-alpn-protocols-d880b14848394e7d.yaml new file mode 100644 index 0000000000..602e52f936 --- /dev/null +++ b/releasenotes/notes/validate-tls-versions-and-alpn-protocols-d880b14848394e7d.yaml @@ -0,0 +1,15 @@ +--- +fixes: + - | + Now the following options accept values which are actually supported, and + usage of an unsupported value may cause octavia services to fail to start. + Previously unsupported values were accepted and caused failures in actual + resource creation. + + - ``[api_settings] default_listener_tls_versions`` + - ``[api_settings] default_pool_tls_versions`` + - ``[api_settings] default_listener_alpn_protocols`` + - ``[api_settings] default_pool_alpn_protocols`` + + See ``octavia.conf`` file generated by the ``oslo-config-generator`` tool + to find supported values. diff --git a/releasenotes/notes/validate-url_path-value-in-requests-3eb3adedcd696433.yaml b/releasenotes/notes/validate-url_path-value-in-requests-3eb3adedcd696433.yaml new file mode 100644 index 0000000000..8d010bfddb --- /dev/null +++ b/releasenotes/notes/validate-url_path-value-in-requests-3eb3adedcd696433.yaml @@ -0,0 +1,7 @@ +--- +issues: + - | + Fixed configuration issue which allowed authenticated and authorized + users to inject code into HAProxy configuration using API requests. + Octavia API no longer accepts unencoded whitespace characters in url_path values + in update requests for healthmonitors. diff --git a/releasenotes/notes/validate-vip-network-params-57662cc3a99f80e5.yaml b/releasenotes/notes/validate-vip-network-params-57662cc3a99f80e5.yaml new file mode 100644 index 0000000000..e56e96e0b5 --- /dev/null +++ b/releasenotes/notes/validate-vip-network-params-57662cc3a99f80e5.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Add a validation step in the Octavia Amphora driver to ensure that the + port_security_enabled parameter is set on the VIP network. diff --git a/releasenotes/notes/vip-port-project-id-bbb26b657b08365e.yaml b/releasenotes/notes/vip-port-project-id-bbb26b657b08365e.yaml new file mode 100644 index 0000000000..1b2711d7a6 --- /dev/null +++ b/releasenotes/notes/vip-port-project-id-bbb26b657b08365e.yaml @@ -0,0 +1,9 @@ +--- +fixes: + - | + Neutron LBaaS was assigning the VIP port it created the user's project-id, + thus allowing the user to attach Floating-IPs to the VIP port. Octavia, + on the other hand, was assigning the Octavia project-id to the port, making + it impossible for the user to attach a Floating IP. This patch brings + Octavia's behavior in line with Neutron LBaaS and assigns the user's + project-id to the VIP port created by Octavia. diff --git a/releasenotes/notes/volume-based-amphora-9a1899634f5244b0.yaml b/releasenotes/notes/volume-based-amphora-9a1899634f5244b0.yaml new file mode 100644 index 0000000000..d5037ee342 --- /dev/null +++ b/releasenotes/notes/volume-based-amphora-9a1899634f5244b0.yaml @@ -0,0 +1,14 @@ +--- +features: + - | + Allow creation of volume based amphora. + Many deploy production use volume based instances because of more flexibility. + Octavia will create volume and attach this to the amphora. + + Have new settings: + * `volume_driver`: Whether to use volume driver (cinder) to create volume backed amphorae. + * `volume_size`: Size of root volume for Amphora Instance when using Cinder + * `volume_type` : Type of volume for Amphorae volume root disk + * `volume_create_retry_interval`: Interval time to wait volume is created in available state + * `volume_create_timeout`: Timeout When volume is not create success + * `volume_create_max_retries`: Maximum number of retries to create volume diff --git a/releasenotes/notes/workaround-for-haproxy-crash-on-reload-813859171a6ac023.yaml b/releasenotes/notes/workaround-for-haproxy-crash-on-reload-813859171a6ac023.yaml new file mode 100644 index 0000000000..845f7c23a3 --- /dev/null +++ b/releasenotes/notes/workaround-for-haproxy-crash-on-reload-813859171a6ac023.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Added a workaround that prevent the listener PUT API call from failing if + haproxy crashes during a reload. The amphora-agent ensures that in case of + crashes, haproxy is correctly restarted and ready to accept incoming + requests (see https://bugs.launchpad.net/octavia/+bug/2054666) diff --git a/releasenotes/notes/zombie_amp-1b435eb66643dab8.yaml b/releasenotes/notes/zombie_amp-1b435eb66643dab8.yaml new file mode 100644 index 0000000000..9c174e4eac --- /dev/null +++ b/releasenotes/notes/zombie_amp-1b435eb66643dab8.yaml @@ -0,0 +1,12 @@ +--- +fixes: + - | + This will automatically nova delete zombie amphora when they + are detected by Octavia. Zombie amphorae are amphorae which + report health messages but appear DELETED in Octavia's + database. +other: + - | + Processing zombie amphora is already expensive and this adds + another step which could increase the load on Octavia Health + Manager, especially during Nova API slowness. diff --git a/releasenotes/source/2023.1.rst b/releasenotes/source/2023.1.rst new file mode 100644 index 0000000000..2c9a36fae4 --- /dev/null +++ b/releasenotes/source/2023.1.rst @@ -0,0 +1,6 @@ +=========================== +2023.1 Series Release Notes +=========================== + +.. release-notes:: + :branch: unmaintained/2023.1 diff --git a/releasenotes/source/2023.2.rst b/releasenotes/source/2023.2.rst new file mode 100644 index 0000000000..a4838d7d0e --- /dev/null +++ b/releasenotes/source/2023.2.rst @@ -0,0 +1,6 @@ +=========================== +2023.2 Series Release Notes +=========================== + +.. release-notes:: + :branch: stable/2023.2 diff --git a/releasenotes/source/2024.1.rst b/releasenotes/source/2024.1.rst new file mode 100644 index 0000000000..4977a4f1a0 --- /dev/null +++ b/releasenotes/source/2024.1.rst @@ -0,0 +1,6 @@ +=========================== +2024.1 Series Release Notes +=========================== + +.. release-notes:: + :branch: stable/2024.1 diff --git a/releasenotes/source/2024.2.rst b/releasenotes/source/2024.2.rst new file mode 100644 index 0000000000..aaebcbc8c3 --- /dev/null +++ b/releasenotes/source/2024.2.rst @@ -0,0 +1,6 @@ +=========================== +2024.2 Series Release Notes +=========================== + +.. release-notes:: + :branch: stable/2024.2 diff --git a/releasenotes/source/2025.1.rst b/releasenotes/source/2025.1.rst new file mode 100644 index 0000000000..3add0e53aa --- /dev/null +++ b/releasenotes/source/2025.1.rst @@ -0,0 +1,6 @@ +=========================== +2025.1 Series Release Notes +=========================== + +.. release-notes:: + :branch: stable/2025.1 diff --git a/releasenotes/source/2025.2.rst b/releasenotes/source/2025.2.rst new file mode 100644 index 0000000000..4dae18d869 --- /dev/null +++ b/releasenotes/source/2025.2.rst @@ -0,0 +1,6 @@ +=========================== +2025.2 Series Release Notes +=========================== + +.. release-notes:: + :branch: stable/2025.2 diff --git a/releasenotes/source/_static/.placeholder b/releasenotes/source/_static/.placeholder new file mode 100644 index 0000000000..e69de29bb2 diff --git a/releasenotes/source/_templates/.placeholder b/releasenotes/source/_templates/.placeholder new file mode 100644 index 0000000000..e69de29bb2 diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py new file mode 100644 index 0000000000..c88b39272a --- /dev/null +++ b/releasenotes/source/conf.py @@ -0,0 +1,258 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Octavia Release Notes documentation build configuration file, created +# by # sphinx-quickstart on Tue Nov 3 17:40:50 2015. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# sys.path.insert(0, os.path.abspath('.')) + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'openstackdocstheme', + 'reno.sphinxext', +] + +# openstackdocstheme options +openstackdocs_repo_name = 'openstack/octavia' +openstackdocs_bug_project = 'octavia' +openstackdocs_bug_tag = 'doc' + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +copyright = '2015, Octavia Developers' + +# Release notes are version independent. +# The short X.Y version. + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = [] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'native' + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'openstackdocs' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# html_extra_path = [] + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = 'OctaviaReleaseNotesdoc' + + +# -- Options for LaTeX output --------------------------------------------- + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, +# documentclass [howto, manual, or own class]). +latex_documents = [ + ('index', 'OctaviaReleaseNotes.tex', + 'OctaviaRelease Notes Documentation', + 'Octavia Developers', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ('index', 'octaviareleasenotes', 'Octavia Release Notes ' + 'Documentation', ['Octavia Developers'], 1) +] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ('index', 'OctaviaReleaseNotes', 'Octavia Release Notes ' + 'Documentation', + 'Octavia Developers', 'OctaviaReleaseNotes', + 'One line description of project.', + 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# texinfo_no_detailmenu = False + +# -- Options for Internationalization output ------------------------------ +locale_dirs = ['locale/'] diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst new file mode 100644 index 0000000000..bd39ff0e3e --- /dev/null +++ b/releasenotes/source/index.rst @@ -0,0 +1,43 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +===================== +Octavia Release Notes +===================== + +.. toctree:: + :maxdepth: 1 + + unreleased + 2025.2 + 2025.1 + 2024.2 + 2024.1 + 2023.2 + 2023.1 + zed + yoga + xena + wallaby + victoria + ussuri + train + stein + rocky + queens + pike + ocata + newton + mitaka + liberty + diff --git a/releasenotes/source/liberty.rst b/releasenotes/source/liberty.rst new file mode 100644 index 0000000000..36217be844 --- /dev/null +++ b/releasenotes/source/liberty.rst @@ -0,0 +1,6 @@ +============================== + Liberty Series Release Notes +============================== + +.. release-notes:: + :branch: origin/stable/liberty diff --git a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po new file mode 100644 index 0000000000..7f5c4bb4c2 --- /dev/null +++ b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po @@ -0,0 +1,2075 @@ +# Andi Chandler , 2017. #zanata +# Andi Chandler , 2018. #zanata +# Andi Chandler , 2020. #zanata +# Andi Chandler , 2022. #zanata +# Andi Chandler , 2023. #zanata +# Andi Chandler , 2024. #zanata +msgid "" +msgstr "" +"Project-Id-Version: octavia\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2025-07-10 12:52+0000\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"PO-Revision-Date: 2024-10-01 01:24+0000\n" +"Last-Translator: Andi Chandler \n" +"Language-Team: English (United Kingdom)\n" +"Language: en_GB\n" +"X-Generator: Zanata 4.3.3\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" + +msgid "0.10.0" +msgstr "0.10.0" + +msgid "0.10.0-37" +msgstr "0.10.0-37" + +msgid "0.5.2-23" +msgstr "0.5.2-23" + +msgid "0.8.0" +msgstr "0.8.0" + +msgid "0.9.0" +msgstr "0.9.0" + +msgid "1.0.0" +msgstr "1.0.0" + +msgid "1.0.2" +msgstr "1.0.2" + +msgid "1.0.4" +msgstr "1.0.4" + +msgid "1.0.5-3" +msgstr "1.0.5-3" + +msgid "10.0.0" +msgstr "10.0.0" + +msgid "10.1.0" +msgstr "10.1.0" + +msgid "10.1.1" +msgstr "10.1.1" + +msgid "11.0.0" +msgstr "11.0.0" + +msgid "11.0.1" +msgstr "11.0.1" + +msgid "11.0.2" +msgstr "11.0.2" + +msgid "11.0.3" +msgstr "11.0.3" + +msgid "12.0.0" +msgstr "12.0.0" + +msgid "12.0.1" +msgstr "12.0.1" + +msgid "13.0.0" +msgstr "13.0.0" + +msgid "14.0.0" +msgstr "14.0.0" + +msgid "2.0.0" +msgstr "2.0.0" + +msgid "2.0.2" +msgstr "2.0.2" + +msgid "2.0.3" +msgstr "2.0.3" + +msgid "2.0.4" +msgstr "2.0.4" + +msgid "2.1.0" +msgstr "2.1.0" + +msgid "2.1.1" +msgstr "2.1.1" + +msgid "2.1.2" +msgstr "2.1.2" + +msgid "2.1.2-11" +msgstr "2.1.2-11" + +msgid "2023.1 Series Release Notes" +msgstr "2023.1 Series Release Notes" + +msgid "2023.2 Series Release Notes" +msgstr "2023.2 Series Release Notes" + +msgid "2024.1 Series Release Notes" +msgstr "2024.1 Series Release Notes" + +msgid "3.0.0" +msgstr "3.0.0" + +msgid "3.0.1" +msgstr "3.0.1" + +msgid "3.0.2" +msgstr "3.0.2" + +msgid "3.1.0" +msgstr "3.1.0" + +msgid "3.1.1" +msgstr "3.1.1" + +msgid "3.2.0" +msgstr "3.2.0" + +msgid "3.2.1" +msgstr "3.2.1" + +msgid "3.2.2" +msgstr "3.2.2" + +msgid "3.2.2-5" +msgstr "3.2.2-5" + +msgid "4.0.0" +msgstr "4.0.0" + +msgid "4.0.1" +msgstr "4.0.1" + +msgid "4.1.0" +msgstr "4.1.0" + +msgid "4.1.1" +msgstr "4.1.1" + +msgid "4.1.2" +msgstr "4.1.2" + +msgid "4.1.4" +msgstr "4.1.4" + +msgid "4.1.4-5" +msgstr "4.1.4-5" + +msgid "5.0.0" +msgstr "5.0.0" + +msgid "5.0.1" +msgstr "5.0.1" + +msgid "5.0.2" +msgstr "5.0.2" + +msgid "5.0.3" +msgstr "5.0.3" + +msgid "5.1.0" +msgstr "5.1.0" + +msgid "5.1.1" +msgstr "5.1.1" + +msgid "5.1.2" +msgstr "5.1.2" + +msgid "5.1.2-37" +msgstr "5.1.2-37" + +msgid "6.0.0" +msgstr "6.0.0" + +msgid "6.0.1" +msgstr "6.0.1" + +msgid "6.1.0" +msgstr "6.1.0" + +msgid "6.2.0" +msgstr "6.2.0" + +msgid "6.2.1" +msgstr "6.2.1" + +msgid "6.2.2" +msgstr "6.2.2" + +msgid "6.2.2-39" +msgstr "6.2.2-39" + +msgid "7.0.0" +msgstr "7.0.0" + +msgid "7.1.0" +msgstr "7.1.0" + +msgid "7.1.1" +msgstr "7.1.1" + +msgid "7.1.2" +msgstr "7.1.2" + +msgid "7.1.2-38" +msgstr "7.1.2-38" + +msgid "8.0.0" +msgstr "8.0.0" + +msgid "8.0.1" +msgstr "8.0.1" + +msgid "8.0.1-89" +msgstr "8.0.1-89" + +msgid "9.0.0" +msgstr "9.0.0" + +msgid "9.0.1" +msgstr "9.0.1" + +msgid "9.1.0" +msgstr "9.1.0" + +msgid "9.1.0-40" +msgstr "9.1.0-40" + +msgid "" +"A new amphora image is required to fix the potential certs-ramfs race " +"condition." +msgstr "" +"A new amphora image is required to fix the potential certs-ramfs race " +"condition." + +msgid "" +"A new amphora image is required to resolve the amphora memory issues when a " +"load balancer has multiple listeners and the amphora image uses haproxy 1.8 " +"or newer." +msgstr "" +"A new amphora image is required to resolve the amphora memory issues when a " +"load balancer has multiple listeners and the amphora image uses HAProxy 1.8 " +"or newer." + +msgid "" +"A new option is provided in the oslo_messaging namespace to disable " +"event_notifications." +msgstr "" +"A new option is provided in the oslo_messaging namespace to disable " +"event_notifications." + +msgid "" +"A patch that fixes an issue making the VIP port unreachable because of " +"missing IP rules requires an update of the Amphora image." +msgstr "" +"A patch that fixes an issue making the VIP port unreachable because of " +"missing IP rules requires an update of the Amphora image." + +msgid "" +"A provider driver developer guide has been added to the documentation to aid " +"driver providers." +msgstr "" +"A provider driver developer guide has been added to the documentation to aid " +"driver providers." + +msgid "" +"A race condition between the certs-ramfs and the amphora agent may lead to " +"tenant TLS content being stored on the amphora filesystem instead of in the " +"encrypted RAM filesystem." +msgstr "" +"A race condition between the certs-ramfs and the amphora agent may lead to " +"tenant TLS content being stored on the amphora filesystem instead of in the " +"encrypted RAM filesystem." + +msgid "Active/Standby support for Octavia." +msgstr "Active/Standby support for Octavia." + +msgid "Add a config variable to disable creation of TLS Terminated listeners." +msgstr "Add a config variable to disable creation of TLS Terminated listeners." + +msgid "" +"Add a new configuration option to define the default connection_limit for " +"new listeners that use the Amphora provider. The option is [haproxy_amphora]." +"default_connection_limit and its default value is 50,000. This value is used " +"when creating or setting a listener with -1 as connection_limit parameter, " +"or when unsetting connection_limit parameter." +msgstr "" +"Add a new configuration option to define the default connection_limit for " +"new listeners that use the Amphora provider. The option is [haproxy_amphora]." +"default_connection_limit and its default value is 50,000. This value is used " +"when creating or setting a listener with -1 as connection_limit parameter, " +"or when unsetting connection_limit parameter." + +msgid "" +"Add a validation step in the Octavia Amphora driver to ensure that the " +"port_security_enabled parameter is set on the VIP network." +msgstr "" +"Add a validation step in the Octavia Amphora driver to ensure that the " +"port_security_enabled parameter is set on the VIP network." + +msgid "" +"Add an API for allowing administrators to manage Octavia Availability Zones " +"and Availability Zone Profiles, which behave nearly identically to Flavors " +"and Flavor Profiles." +msgstr "" +"Add an API for allowing administrators to manage Octavia Availability Zones " +"and Availability Zone Profiles, which behave nearly identically to Flavours " +"and Flavour Profiles." + +msgid "" +"Add config variables to allow disabling either API version (v1 or v2.0)." +msgstr "" +"Add config variables to allow disabling either API version (v1 or v2.0)." + +msgid "" +"Add fake Amphora stats for when Octavia runs in noop mode / using noop " +"drivers." +msgstr "" +"Add fake Amphora stats for when Octavia runs in noop mode / using noop " +"drivers." + +msgid "Add l7policy and l7rule to octavia quota." +msgstr "Add l7policy and l7rule to Octavia quota." + +msgid "" +"Add listener and pool protocol validation. The pool and listener can't be " +"combined arbitrarily. We need some constraints on the protocol side." +msgstr "" +"Add listener and pool protocol validation. The pool and listener can't be " +"combined arbitrarily. We need some constraints on the protocol side." + +msgid "Add missing cloud-utils-growpart RPM to Red Hat based amphora images." +msgstr "Add missing cloud-utils-growpart RPM to Red Hat-based amphora images." + +msgid "Add missing cronie RPM to Red Hat based amphora images." +msgstr "Add missing cronie RPM to Red Hat-based amphora images." + +msgid "Add monitor address and port to member" +msgstr "Add monitor address and port to member" + +msgid "" +"Add new parameters to specify the number of threads for updating amphora " +"health and stats." +msgstr "" +"Add new parameters to specify the number of threads for updating amphora " +"health and stats." + +msgid "Add sos element to amphora images (Red Hat family only)." +msgstr "Add sos element to amphora images (Red Hat family only)." + +msgid "Add support PROXY protocol for lbaas pool in octavia" +msgstr "Add support PROXY protocol for LBaaS pool in Octavia" + +msgid "" +"Add support for SCTP protocol. SCTP support has been added in the Octavia " +"API for listener, pool, and health-monitor resources." +msgstr "" +"Add support for SCTP protocol. SCTP support has been added in the Octavia " +"API for listener, pool, and health-monitor resources." + +msgid "Add support for Ubuntu Xenial amphora images." +msgstr "Add support for Ubuntu Xenial Amphora images." + +msgid "" +"Add support for monitor_address and monitor_port attributes in UDP members. " +"Previously, monitor_address and monitor_port were ignored and address and " +"protocol_port attributes were used as monitoring address and port." +msgstr "" +"Add support for monitor_address and monitor_port attributes in UDP members. " +"Previously, monitor_address and monitor_port were ignored and address and " +"protocol_port attributes were used as monitoring address and port." + +msgid "" +"Add support for the SCTP protocol in the Amphora driver. Support for SCTP " +"listeners and pools is implemented using keepalived in the amphora. Support " +"for SCTP health monitors is provided by the amphora-health-checker script " +"and relies on an INIT/INIT-ACK/ABORT sequence of packets." +msgstr "" +"Add support for the SCTP protocol in the Amphora driver. Support for SCTP " +"listeners and pools is implemented using keepalived in the amphora. Support " +"for SCTP health monitors is provided by the amphora-health-checker script " +"and relies on an INIT/INIT-ACK/ABORT sequence of packets." + +msgid "" +"Added HTTP/2 over TLS support via ALPN protocol negotiation to the amphora " +"provider driver for TLS-enabled pools." +msgstr "" +"Added HTTP/2 over TLS support via ALPN protocol negotiation to the amphora " +"provider driver for TLS-enabled pools." + +msgid "" +"Added HTTP/2 over TLS support via ALPN protocol negotiation to the amphora " +"provider driver. Feature available in amphora images with HAProxy 2.0 or " +"newer." +msgstr "" +"Added HTTP/2 over TLS support via ALPN protocol negotiation to the amphora " +"provider driver. Feature available in amphora images with HAProxy 2.0 or " +"newer." + +msgid "Added UDP protocol support to listeners and pools." +msgstr "Added UDP protocol support to listeners and pools." + +msgid "" +"Added ``minimum_tls_version`` to ``octavia.conf``. Listeners, pools, and " +"the defaults for either will be blocked from using any lower TLS versions. " +"By default, there is no minumum version." +msgstr "" +"Added ``minimum_tls_version`` to ``octavia.conf``. Listeners, pools, and " +"the defaults for either will be blocked from using any lower TLS versions. " +"By default, there is no minumum version." + +msgid "" +"Added ``tls_cipher_prohibit_list`` to ``octavia.conf``. Listeners, pools, " +"and the default values for either will be blocked from using any of these " +"ciphers. By default, no ciphers are prohibited." +msgstr "" +"Added ``tls_cipher_prohibit_list`` to ``octavia.conf``. Listeners, pools, " +"and the default values for either will be blocked from using any of these " +"ciphers. By default, no ciphers are prohibited." + +msgid "" +"Added a configuration option that specifies the availability zone amphora " +"should be built in." +msgstr "" +"Added a configuration option that specifies the availability zone amphora " +"should be built in." + +msgid "" +"Added a filter to hide a bogus ComputeWaitTimeoutException exception when " +"creating an amphora when jobboard is disabled. This exception is part of the " +"flow when creating a load balancer or an amphora and should not be shown to " +"the user." +msgstr "" +"Added a filter to hide a bogus ComputeWaitTimeoutException exception when " +"creating an Amphora when job board is disabled. This exception is part of " +"the flow when creating a load balancer or an amphora and should not be shown " +"to the user." + +msgid "" +"Added a new PROMETHEUS listener that exposes a prometheus exporter endpoint." +msgstr "" +"Added a new PROMETHEUS listener that exposes a Prometheus exporter endpoint." + +msgid "" +"Added a new configuration setting (``[task_flow]/jobboard_enabled``) to " +"enable/disable jobboard functionality in the amphorav2 provider. When " +"disabled, the amphorav2 provider behaves similarly to the amphora v1 " +"provider and does not require extra dependencies. The default setting is " +"jobboard disabled while jobboard remains an experimental feature." +msgstr "" +"Added a new configuration setting (``[task_flow]/jobboard_enabled``) to " +"enable/disable jobboard functionality in the amphorav2 provider. When " +"disabled, the amphorav2 provider behaves similarly to the amphora v1 " +"provider and does not require extra dependencies. The default setting is " +"jobboard disabled while jobboard remains an experimental feature." + +msgid "" +"Added a new endpoint /v2.0/octavia/amphorae to expose internal details about " +"amphorae. This endpoint is admin only." +msgstr "" +"Added a new endpoint /v2.0/octavia/amphorae to expose internal details about " +"amphorae. This endpoint is admin only." + +msgid "" +"Added a new option named server_certs_key_passphrase under the certificates " +"section. The default value gets copied from an environment variable named " +"TLS_PASS_AMPS_DEFAULT. In a case where TLS_PASS_AMPS_DEFAULT is not set, and " +"the operator did not fill any other value directly, 'insecure-key-do-not-use-" +"this-key' will be used." +msgstr "" +"Added a new option named server_certs_key_passphrase under the certificates " +"section. The default value gets copied from an environment variable named " +"TLS_PASS_AMPS_DEFAULT. In a case where TLS_PASS_AMPS_DEFAULT is not set, and " +"the operator did not fill any other value directly, 'insecure-key-do-not-use-" +"this-key' will be used." + +msgid "" +"Added aarch64/arm64 amphora image support to the disk image create tool and " +"to the devstack plugin." +msgstr "" +"Added aarch64/arm64 amphora image support to the disk image creation tool " +"and to the devstack plugin." + +msgid "" +"Added ability for Octavia to automatically set Barbican ACLs on behalf of " +"the user. Such enables users to create TLS-terminated listeners without " +"having to add the Octavia keystone user id to the ACL list. Octavia will " +"also automatically revoke access to secrets whenever load balancing " +"resources no longer require access to them." +msgstr "" +"Added ability for Octavia to automatically set Barbican ACLs on behalf of " +"the user. Such enables users to create TLS-terminated listeners without " +"having to add the Octavia Keystone user id to the ACL list. Octavia will " +"also automatically revoke access to secrets whenever load balancing " +"resources no longer require access to them." + +msgid "" +"Added an option to the diskimage-create.sh script to specify the Octavia Git " +"branch to build the image from." +msgstr "" +"Added an option to the diskimage-create.sh script to specify the Octavia Git " +"branch to build the image from." + +msgid "" +"Added hook to plugin.sh: `octavia_create_network_interface_device` and " +"`octavia_delete_network_interface_device`. For each of these functions, if " +"they are defined during stack (respectively unstack), they are called to " +"create (respectively delete) the management network interface." +msgstr "" +"Added hook to plugin.sh: `octavia_create_network_interface_device` and " +"`octavia_delete_network_interface_device`. For each of these functions, if " +"they are defined during stack (respectively unstack), they are called to " +"create (respectively delete) the management network interface." + +msgid "" +"Added new tool ``octavia-status upgrade check``. This framework allows " +"adding various checks which can be run before a Octavia upgrade to ensure if " +"the upgrade can be performed safely." +msgstr "" +"Added new tool ``octavia-status upgrade check``. This framework allows " +"adding various checks which can be run before a Octavia upgrade to ensure if " +"the upgrade can be performed safely." + +msgid "" +"Added option 'sync_provisioning_status' to enable synchronizing provisioning " +"status of loadbalancers with the neutron-lbaas database. Enabling this " +"option will queue one additional message per amphora every heartbeat " +"interval." +msgstr "" +"Added option 'sync_provisioning_status' to enable synchronising provisioning " +"status of load balancers with the neutron-lbaas database. Enabling this " +"option will queue one additional message per amphora every heartbeat " +"interval." + +msgid "Added support for CentOS 8 amphora images." +msgstr "Added support for CentOS 8 amphora images." + +msgid "" +"Added support for TLS extension Application Layer Protocol Negotiation " +"(ALPN) to TLS-enabled pools. A new parameter ``alpn_protocols`` was added to " +"the Pool API." +msgstr "" +"Added support for TLS extension Application Layer Protocol Negotiation " +"(ALPN) to TLS-enabled pools. A new parameter ``alpn_protocols`` was added to " +"the Pool API." + +msgid "" +"Added support for TLS extension Application Layer Protocol Negotiation " +"(ALPN) to TLS-terminated HTTPS load balancers. A new parameter " +"``alpn_protocols`` was added to the Listener API." +msgstr "" +"Added support for TLS extension Application Layer Protocol Negotiation " +"(ALPN) to TLS-terminated HTTPS load balancers. A new parameter " +"``alpn_protocols`` was added to the Listener API." + +msgid "Added support for keystone default roles and system token scopes." +msgstr "Added support for Keystone default roles and system token scopes." + +msgid "Added support for nftables to the devstack plugin and the amphora." +msgstr "Added support for nftables to the devstack plugin and the amphora." + +msgid "Added support for proxy protocol version 2." +msgstr "Added support for proxy protocol version 2." + +msgid "" +"Added support to VIP access control list. Users can now limit incoming " +"traffic to a set of allowed CIDRs." +msgstr "" +"Added support to VIP access control list. Users can now limit incoming " +"traffic to a set of allowed CIDRs." + +msgid "Added support to create RHEL 8 amphora images." +msgstr "Added support to create RHEL 8 amphora images." + +msgid "" +"Added support to debug with the Python Visual Studio Debugger engine (ptvsd)." +msgstr "" +"Added support to debug with the Python Visual Studio Debugger engine (ptvsd)." + +msgid "Added tags property for Octavia resources. It includes:" +msgstr "Added tags property for Octavia resources. It includes:" + +msgid "" +"Added the 'failover' sub-resource for the Amphora API. Each amphora can be " +"triggered to failover by sending a PUT (with an empty body) to the resource " +"``/v2.0/octavia/amphorae//failover``. It will cause the amphora to be " +"recycled and replaced, in the same way as the health-triggered failover." +msgstr "" +"Added the 'failover' sub-resource for the Amphora API. Each amphora can be " +"triggered to failover by sending a PUT (with an empty body) to the resource " +"``/v2.0/octavia/amphorae//failover``. It will cause the amphora to be " +"recycled and replaced, in the same way as the health-triggered failover." + +msgid "Added the ability to delete amphora that are not in use." +msgstr "Added the ability to delete amphora that are not in use." + +msgid "" +"Added the oslo-middleware healthcheck app to the Octavia API. Hitting /" +"healthcheck will return a 200. This is enabled via the " +"[api_settings]healthcheck_enabled setting and is disabled by default." +msgstr "" +"Added the oslo-middleware healthcheck app to the Octavia API. Hitting /" +"healthcheck will return a 200. This is enabled via the " +"[api_settings]healthcheck_enabled setting and is disabled by default." + +msgid "" +"Adding `ID` column to the health_monitor table in Octavia, whose value is " +"same as the `pool_id` column. The database needs to be upgraded first, " +"followed by upgrade and restart of the API servers." +msgstr "" +"Adding `ID` column to the health_monitor table in Octavia, whose value is " +"same as the `pool_id` column. The database needs to be upgraded first, " +"followed by upgrade and restart of the API servers." + +msgid "" +"Adding a member with different IP protocol version than the VIP IP protocol " +"version in a UDP load balancer caused a crash in the amphora. A validation " +"step in the amphora driver now prevents mixing IP protocol versions in UDP " +"load balancers." +msgstr "" +"Adding a member with different IP protocol version than the VIP IP protocol " +"version in a UDP load balancer caused a crash in the amphora. A validation " +"step in the amphora driver now prevents mixing IP protocol versions in UDP " +"load balancers." + +msgid "Adding support for the listener X-Forwarded-Proto header insertion." +msgstr "Adding support for the listener X-Forwarded-Proto header insertion." + +msgid "" +"Adds a configuration option, \"reserved_ips\" that allows the operator to " +"block addresses from being used in load balancer members. The default " +"setting blocks the nova metadata service address." +msgstr "" +"Adds a configuration option, \"reserved_ips\" that allows the operator to " +"block addresses from being used in load balancer members. The default " +"setting blocks the Nova metadata service address." + +msgid "" +"Adds a health monitor type of UDP-CONNECT that does a basic UDP port connect." +msgstr "" +"Adds a health monitor type of UDP-CONNECT that does a basic UDP port connect." + +msgid "Adds a new config parameter to specify the anti-affinity policy" +msgstr "Adds a new config parameter to specify the anti-affinity policy" + +msgid "Adds an administrator API to access per-amphora statistics." +msgstr "Adds an administrator API to access per-amphora statistics." + +msgid "Adds quota support to the Octavia API." +msgstr "Adds quota support to the Octavia API." + +msgid "Adds support for IPv6" +msgstr "Adds support for IPv6" + +msgid "" +"Adds support for Layer 7 switching and shared pools features to Octavia. " +"This supports the equivalent feature added to Neutron LBaaS v2." +msgstr "" +"Adds support for Layer 7 switching and shared pools features to Octavia. " +"This supports the equivalent feature added to Neutron LBaaS v2." + +msgid "" +"Adds support for PKCS7 PEM or DER encoded intermediate certificate bundles " +"for TERMINATED_HTTPS listeners." +msgstr "" +"Adds support for PKCS7 PEM or DER encoded intermediate certificate bundles " +"for TERMINATED_HTTPS listeners." + +msgid "Adds support for amphora images that use systemd." +msgstr "Adds support for Amphora images that use systemd." + +msgid "Adds support for networks that do not have DHCP services enabled." +msgstr "Adds support for networks that do not have DHCP services enabled." + +msgid "Adds support for the driver agent to query for load balancer objects." +msgstr "Adds support for the driver agent to query for load balancer objects." + +msgid "Adds tables for active/standby." +msgstr "Adds tables for active/standby." + +msgid "" +"Adds the ability to define L7 rules based on TLS client authentication " +"information. The new L7 rules are\\: \"L7RULE_TYPE_SSL_CONN_HAS_CERT\", " +"\"L7RULE_TYPE_VERIFY_RESULT\", and \"L7RULE_TYPE_DN_FIELD\"." +msgstr "" +"Adds the ability to define L7 rules based on TLS client authentication " +"information. The new L7 rules are\\: \"L7RULE_TYPE_SSL_CONN_HAS_CERT\", " +"\"L7RULE_TYPE_VERIFY_RESULT\", and \"L7RULE_TYPE_DN_FIELD\"." + +msgid "" +"After setting \"auth_strategy = keystone\" all incoming requests to Octavia " +"API will be verified using Keystone are they send by authenticated person. " +"By default that option is disabled because Neutron LBaaS v2 is not " +"supporting that functionality properly." +msgstr "" +"After setting \"auth_strategy = keystone\" all incoming requests to Octavia " +"API will be verified using Keystone are they send by authenticated person. " +"By default that option is disabled because Neutron LBaaS v2 is not " +"supporting that functionality properly." + +msgid "" +"After this upgrade, users will no longer be able use network resources they " +"cannot see or \"show\" on load balancers. Operators can revert this behavior " +"by setting the \"allow_invisible_reourece_usage\" configuration file setting " +"to ``True``." +msgstr "" +"After this upgrade, users will no longer be able use network resources they " +"cannot see or \"show\" on load balancers. Operators can revert this " +"behaviour by setting the \"allow_invisible_reourece_usage\" configuration " +"file setting to ``True``." + +msgid "" +"After this upgrade, users will no longer be able use network resources they " +"cannot see or \"show\" on load balancers. Operators can revert this behavior " +"by setting the \"allow_invisible_resource_usage\" configuration file setting " +"to ``True``." +msgstr "" +"After this upgrade, users will no longer be able use network resources they " +"cannot see or \"show\" on load balancers. Operators can revert this " +"behaviour by setting the \"allow_invisible_resource_usage\" configuration " +"file setting to ``True``." + +msgid "" +"All pools configured under OVN provider driver are automatically migrated to " +"SOURCE_IP_PORT algorithm. Previously algorithm was named as ROUND_ROBIN, but " +"in fact it was not working like ROUND_ROBIN. After investigating, it was " +"observed that core OVN actually utilizes a 5 Tuple Hash/RSS Hash in DPDK/" +"Kernel as a Load Balancing algorithm. The 5 Tuple Hash has Source IP, " +"Destination IP, Protocol, Source Port, Destination Port. To reflect this the " +"name was changed to SOURCE_IP_PORT." +msgstr "" +"All pools configured under OVN provider driver are automatically migrated to " +"SOURCE_IP_PORT algorithm. Previously algorithm was named as ROUND_ROBIN, but " +"in fact it was not working like ROUND_ROBIN. After investigating, it was " +"observed that core OVN actually utilises a 5 Tuple Hash/RSS Hash in DPDK/" +"Kernel as a Load Balancing algorithm. The 5 Tuple Hash has Source IP, " +"Destination IP, Protocol, Source Port, Destination Port. To reflect this the " +"name was changed to SOURCE_IP_PORT." + +msgid "" +"Allow creation of volume based amphora. Many deploy production use volume " +"based instances because of more flexibility. Octavia will create volume and " +"attach this to the amphora." +msgstr "" +"Allow creation of volume based amphora. Many deploy production use volume " +"based instances because of more flexibility. Octavia will create volume and " +"attach this to the amphora." + +msgid "" +"Allow the loadbalancer's VIP to be created on the same network as the " +"management interface." +msgstr "" +"Allow the load balancer's VIP to be created on the same network as the " +"management interface." + +msgid "" +"Allows the operator to optionally restrict the amphora glance image " +"selection to a specific owner id. This is a recommended security setting for " +"clouds that allow user uploadable images." +msgstr "" +"Allows the operator to optionally restrict the amphora glance image " +"selection to a specific owner id. This is a recommended security setting for " +"clouds that allow user uploadable images." + +msgid "" +"Amphora API now can return the field `compute_flavor` which is the ID of the " +"compute instance flavor used to boot the amphora." +msgstr "" +"Amphora API now can return the field `compute_flavor` which is the ID of the " +"compute instance flavour used to boot the amphora." + +msgid "" +"Amphora API now returns the field `image_id` which is the ID of the glance " +"image used to boot the amphora." +msgstr "" +"Amphora API now returns the field `image_id` which is the ID of the glance " +"image used to boot the amphora." + +msgid "" +"Amphora failover is supported when active/standby is enabled. Should the " +"master or backup amphora fail, the health manager will rebuild it." +msgstr "" +"Amphora failover is supported when active/standby is enabled. Should the " +"master or backup Amphora fail, the health manager will rebuild it." + +msgid "Amphora image support for RH Linux flavors." +msgstr "Amphora image support for RH Linux flavours." + +msgid "" +"Amphora images with HAProxy older than 1.6 (CentOS 7, etc.) will still use " +"health monitor type TCP when PING is selected by the user." +msgstr "" +"Amphora images with HAProxy older than 1.6 (CentOS 7, etc.) will still use " +"health monitor type TCP when PING is selected by the user." + +msgid "" +"Amphora network configuration for the VIP interface and the pool member " +"interfaces are now applied with the amphora-interface tool. amphora-" +"interface uses pyroute2 low-level functions to configure the interfaces " +"instead of distribution-specific tools such as \"network-scripts\" or \"/etc/" +"network/interfaces\" files." +msgstr "" +"Amphora network configuration for the VIP interface and the pool member " +"interfaces are now applied with the amphora-interface tool. amphora-" +"interface uses pyroute2 low-level functions to configure the interfaces " +"instead of distribution-specific tools such as \"network-scripts\" or \"/etc/" +"network/interfaces\" files." + +msgid "" +"Amphora will need to be updated to a new image with this version of the " +"agent and ping-wrapper.sh script prior to updating the Octavia controllers. " +"If a load balancer is using a health monitor of type PING with an amphora " +"image that has not been updated, the next configuration change to the load " +"balancer will cause it to go into an ERROR state until it is failed over to " +"an updated image." +msgstr "" +"Amphora will need to be updated to a new image with this version of the " +"agent and ping-wrapper.sh script prior to updating the Octavia controllers. " +"If a load balancer is using a health monitor of type PING with an Amphora " +"image that has not been updated, the next configuration change to the load " +"balancer will cause it to go into an ERROR state until it is failed over to " +"an updated image." + +msgid "" +"Amphora with a terminated HTTPS load balancer can no longer be rebooted. If " +"they reboot, they will trigger a failover of the amphora." +msgstr "" +"Amphora with a terminated HTTPS load balancer can no longer be rebooted. If " +"they reboot, they will trigger a failover of the Amphora." + +msgid "Amphorae are unable to provide tenant flow logs for UDP listeners." +msgstr "Amphorae are unable to provide tenant flow logs for UDP listeners." + +msgid "" +"Amphorae that are booting for a specific loadbalancer will now be linked to " +"that loadbalancer immediately upon creation. Previously this would not " +"happen until near the end of the process, leaving a gap during booting " +"during which is was difficult to understand which booting amphora belonged " +"to which loadbalancer. This was especially problematic when attempting to " +"troubleshoot loadbalancers that entered ERROR status due to boot issues." +msgstr "" +"Amphorae that are booting for a specific loadbalancer will now be linked to " +"that loadbalancer immediately upon creation. Previously this would not " +"happen until near the end of the process, leaving a gap during booting " +"during which is was difficult to understand which booting amphora belonged " +"to which loadbalancer. This was especially problematic when attempting to " +"troubleshoot loadbalancers that entered ERROR status due to boot issues." + +msgid "" +"An amphora image update is recommended to pick up a workaround to an HAProxy " +"issue where it would fail to reload on configuration change should the local " +"peer name start with \"-x\"." +msgstr "" +"An amphora image update is recommended to pick up a workaround to an HAProxy " +"issue where it would fail to reload on configuration change should the local " +"peer name start with \"-x\"." + +msgid "" +"An operator documentation page has been added to list known Octavia provider " +"drivers and provide links to those drivers. Non-reference drivers, drivers " +"other than the \"amphora\" driver, will be outside of the octavia code " +"repository but are dynamically loadable via a well defined interface " +"described in the provider driver developers guide." +msgstr "" +"An operator documentation page has been added to list known Octavia provider " +"drivers and provide links to those drivers. Non-reference drivers, drivers " +"other than the \"amphora\" driver, will be outside of the Octavia code " +"repository but are dynamically loadable via a well defined interface " +"described in the provider driver developers guide." + +msgid "" +"As a followup to the fix that resolved CVE-2018-16856, Octavia will now " +"encrypt certificates and keys used for secure communication with amphorae, " +"in its internal workflows. Octavia used to exclude debug-level log prints " +"for specific tasks and flows that were explicitly specified by name, a " +"method that is susceptive to code changes." +msgstr "" +"As a followup to the fix that resolved CVE-2018-16856, Octavia will now " +"encrypt certificates and keys used for secure communication with amphorae, " +"in its internal workflows. Octavia used to exclude debug-level log prints " +"for specific tasks and flows that were explicitly specified by name, a " +"method that is susceptible to code changes." + +msgid "" +"As part of GDPR compliance, connection logs might be considered personal " +"data and might need to follow specific data retention policies. Disabling " +"connection logging might aid in making Octavia compliant by preventing the " +"output of such data. As always, consult with an expert on compliance prior " +"to making changes." +msgstr "" +"As part of GDPR compliance, connection logs might be considered personal " +"data and might need to follow specific data retention policies. Disabling " +"connection logging might aid in making Octavia compliant by preventing the " +"output of such data. As always, consult with an expert on compliance prior " +"to making changes." + +msgid "" +"Availability zone profiles can now override the ``valid_vip_networks`` " +"configuration option." +msgstr "" +"Availability zone profiles can now override the ``valid_vip_networks`` " +"configuration option." + +msgid "" +"Backend re-encryption allows users to configure pools to initiate TLS " +"connections to the backend member servers. This enables load balancers to " +"authenticate and encrypt connections from the load balancer to the backend " +"member server." +msgstr "" +"Backend re-encryption allows users to configure pools to initiate TLS " +"connections to the backend member servers. This enables load balancers to " +"authenticate and encrypt connections from the load balancer to the backend " +"member server." + +msgid "Bug Fixes" +msgstr "Bug Fixes" + +msgid "" +"Certificate and key storage for terminated HTTPS load balancers is now in an " +"encrypted ramfs path inside the amphora." +msgstr "" +"Certificate and key storage for terminated HTTPS load balancers is now in an " +"encrypted ramfs path inside the Amphora." + +msgid "" +"Certificate bundles can now be stored in any backend Castellan supports, and " +"can be retrieved via a Castellan driver, even if Barbican is not deployed." +msgstr "" +"Certificate bundles can now be stored in any backend Castellan supports, and " +"can be retrieved via a Castellan driver, even if Barbican is not deployed." + +msgid "" +"Cloud deployers can set `api_settings.allow_ping_health_monitors = False` in " +"`octavia.conf` to disable the ability to create PING health monitors." +msgstr "" +"Cloud deployers can set `api_settings.allow_ping_health_monitors = False` in " +"`octavia.conf` to disable the ability to create PING health monitors." + +msgid "" +"Communication between the control-plane and the amphora-agent now uses " +"minimum TLSv1.2 by default, and is configurable. The previous default of " +"SSLv2/3 is widely considered insecure." +msgstr "" +"Communication between the control-plane and the amphora-agent now uses " +"minimum TLSv1.2 by default, and is configurable. The previous default of " +"SSLv2/3 is widely considered insecure." + +msgid "" +"Config option `amp_ssh_access_allowed` is deprecated, as it overlaps with " +"`amp_ssh_key_name` in functionality and is not needed. Simply leave the " +"variable `amp_ssh_key_name` blank and no ssh key will be installed. This is " +"the same result as using `amp_ssh_access_allowed = False`." +msgstr "" +"Config option `amp_ssh_access_allowed` is deprecated, as it overlaps with " +"`amp_ssh_key_name` in functionality and is not needed. Simply leave the " +"variable `amp_ssh_key_name` blank and no ssh key will be installed. This is " +"the same result as using `amp_ssh_access_allowed = False`." + +msgid "" +"Creating a member on a pool with no healthmonitor would sometimes briefly " +"update their operating status from `NO_MONITOR` to `OFFLINE` and back to " +"`NO_MONITOR` during the provisioning sequence. This flapping will no longer " +"occur." +msgstr "" +"Creating a member on a pool with no healthmonitor would sometimes briefly " +"update their operating status from `NO_MONITOR` to `OFFLINE` and back to " +"`NO_MONITOR` during the provisioning sequence. This flapping will no longer " +"occur." + +msgid "Current Series Release Notes" +msgstr "Current Series Release Notes" + +msgid "" +"Depending on how the other queue is set up additional passwords for the " +"other queue will be in the Octavia config file. Operators should take care " +"of setting up appropriate users with appropriate restrictions to the " +"topic(s) needed." +msgstr "" +"Depending on how the other queue is set up additional passwords for the " +"other queue will be in the Octavia config file. Operators should take care " +"of setting up appropriate users with appropriate restrictions to the " +"topic(s) needed." + +msgid "Deprecation Notes" +msgstr "Deprecation Notes" + +msgid "" +"Disabling connection logging might make it more difficult to audit systems " +"for unauthorized access, from which IPs it originated, and which assets were " +"compromised." +msgstr "" +"Disabling connection logging might make it more difficult to audit systems " +"for unauthorised access, from which IPs it originated, and which assets were " +"compromised." + +msgid "Extended support for Keystone API v3." +msgstr "Extended support for Keystone API v3." + +msgid "" +"Finally completely remove tenant_id, as it was deprecated along with the " +"keystone v2 API in Mitaka, which means we're free of it in Pike!" +msgstr "" +"Finally completely remove tenant_id, as it was deprecated along with the " +"keystone v2 API in Mitaka, which means we're free of it in Pike!" + +msgid "" +"Fixed an issue that caused failover to unsuccessful if the vip network was " +"not DHCP enabled." +msgstr "" +"Fixed an issue that caused failover to unsuccessful if the VIP network was " +"not DHCP enabled." + +msgid "" +"Fixed an issue where health monitors of type PING were really doing a TCP " +"health check." +msgstr "" +"Fixed an issue where health monitors of type PING were really doing a TCP " +"health check." + +msgid "" +"Fixed an issue where the amphora would fail to bring up the VIP if the VIP " +"network did not have a gateway specified in neutron." +msgstr "" +"Fixed an issue where the amphora would fail to bring up the VIP if the VIP " +"network did not have a gateway specified in Neutron." + +msgid "" +"Fixes a bug where unspecified or unlimited listener connection limit " +"settings would lead to a 2000 connection limit when using the amphora/" +"octavia driver. This was the compiled in connection limit in some HAproxy " +"packages." +msgstr "" +"Fixes a bug where unspecified or unlimited listener connection limit " +"settings would lead to a 2000 connection limit when using the Amphora/" +"Octavia driver. This was the compiled in connection limit in some HAproxy " +"packages." + +msgid "" +"Fixes a neutron-lbaas LBaaS v2 API compatibility issue when requesting a " +"load balancer status tree via '/statuses'." +msgstr "" +"Fixes a neutron-lbaas LBaaS v2 API compatibility issue when requesting a " +"load balancer status tree via '/statuses'." + +msgid "Fixes admin-state-up=False action for loadbalancer and listener." +msgstr "Fixes admin-state-up=False action for load balancer and listener." + +msgid "" +"Fixes an issue where VIP return traffic was always routed, if a gateway was " +"defined, through the gateway address even if it was local traffic." +msgstr "" +"Fixes an issue where VIP return traffic was always routed, if a gateway was " +"defined, through the gateway address even if it was local traffic." + +msgid "" +"Fixes an issue where if more than one amphora fails at the same time, " +"failover might not fully complete, leaving the load balancer in ERROR." +msgstr "" +"Fixes an issue where if more than one Amphora fails at the same time, " +"failover might not fully complete, leaving the load balancer in ERROR." + +msgid "" +"Fixes an issue with hmac.compare_digest on python3 that could cause health " +"manager \"calculated hmac not equal to msg hmac\" errors." +msgstr "" +"Fixes an issue with hmac.compare_digest on python3 that could cause health " +"manager \"calculated hmac not equal to msg hmac\" errors." + +msgid "" +"Fixes the v2 API returning \"DELETED\" records until the amphora_expiry_age " +"timeout expired. The API will now immediately return a 404 HTTP status code " +"when deleted objects are requested. The API version has been raised to v2.1 " +"to reflect this change." +msgstr "" +"Fixes the v2 API returning \"DELETED\" records until the amphora_expiry_age " +"timeout expired. The API will now immediately return a 404 HTTP status code " +"when deleted objects are requested. The API version has been raised to v2.1 " +"to reflect this change." + +msgid "" +"For the OpenStack Pike release, the Octavia team is excited to announce " +"Octavia version 1.0.0 and introduce the Octavia v2 API. Octavia can now be " +"deployed without neutron-lbaas as a standalone endpoint. The Octavia v2 API " +"is fully backward compatible with the neutron-lbaas v2 API and is a superset " +"of the neutron-lbaas v2 API." +msgstr "" +"For the OpenStack Pike release, the Octavia team is excited to announce " +"Octavia version 1.0.0 and introduce the Octavia v2 API. Octavia can now be " +"deployed without neutron-lbaas as a standalone endpoint. The Octavia v2 API " +"is fully backward compatible with the neutron-lbaas v2 API and is a superset " +"of the neutron-lbaas v2 API." + +msgid "" +"For the diskimage-create script, the BASE_OS_MIRROR environment variable was " +"renamed to DIB_DISTRIBUTION_MIRROR" +msgstr "" +"For the diskimage-create script, the BASE_OS_MIRROR environment variable was " +"renamed to DIB_DISTRIBUTION_MIRROR" + +msgid "" +"From configuration file section \"keystone_authtoken_v3\" was removed and " +"all parameters are stored in \"keystone_authtoken\" section of configuration " +"file." +msgstr "" +"From configuration file section \"keystone_authtoken_v3\" was removed and " +"all parameters are stored in \"keystone_authtoken\" section of configuration " +"file." + +msgid "" +"Glance image containing the latest Amphora image can now be referenced using " +"a Glance tag. To use the feature, set amp_image_tag in [controller_worker]. " +"Note that amp_image_id should be unset for the new feature to take into " +"effect." +msgstr "" +"Glance image containing the latest Amphora image can now be referenced using " +"a Glance tag. To use the feature, set amp_image_tag in [controller_worker]. " +"Note that amp_image_id should be unset for the new feature to take into " +"effect." + +msgid "" +"Health Monitor type \"HTTPS\" now correctly performs the configured check. " +"This is done with all certificate validation disabled, so it will not work " +"if backend members are performing client certificate validation." +msgstr "" +"Health Monitor type \"HTTPS\" now correctly performs the configured check. " +"This is done with all certificate validation disabled, so it will not work " +"if backend members are performing client certificate validation." + +msgid "" +"If users have configured Health Monitors of type \"HTTPS\" and are expecting " +"a simple \"TLS-HELLO\" check, they will need to recreate their monitor with " +"the new \"TLS-HELLO\" type." +msgstr "" +"If users have configured Health Monitors of type \"HTTPS\" and are expecting " +"a simple \"TLS-HELLO\" check, they will need to recreate their monitor with " +"the new \"TLS-HELLO\" type." + +msgid "" +"Improvements to the keepalived system used in active/standby topologies. " +"keepalived is now monitored for health by the amphora agent (previously just " +"by the init system) and a systemd race condition between keepalived and " +"haproxy have been resolved." +msgstr "" +"Improvements to the keepalived system used in active/standby topologies. " +"keepalived is now monitored for health by the amphora agent (previously just " +"by the init system) and a systemd race condition between keepalived and " +"HAProxy have been resolved." + +msgid "" +"Improves error messages returned to the user, such as errors for attempting " +"to add a second health monitor to a pool." +msgstr "" +"Improves error messages returned to the user, such as errors for attempting " +"to add a second health monitor to a pool." + +msgid "" +"In some enviornments (e.g. OSA) Neutron and Octavia use different queues (at " +"least different vhosts) and so if Octavia posts to the Octavia queue and " +"Neutron listens on the Neutron queue the events will never make it over." +msgstr "" +"In some environments (e.g. OSA) Neutron and Octavia use different queues (at " +"least different vhosts) and so if Octavia posts to the Octavia queue and " +"Neutron listens on the Neutron queue the events will never make it over." + +msgid "" +"Installed drivers need to be enabled for use in the Octavia configuration " +"file once you are ready to expose the driver to users." +msgstr "" +"Installed drivers need to be enabled for use in the Octavia configuration " +"file once you are ready to expose the driver to users." + +msgid "" +"It is now possible to completely remove sshd from the amphora image, to " +"further lock down access and increase security. If this is set, providing an " +"`amp_ssh_key_name` in config will install the key, but ssh access will not " +"be possible as sshd will not be running." +msgstr "" +"It is now possible to completely remove SSHd from the Amphora image, to " +"further lock down access and increase security. If this is set, providing an " +"`amp_ssh_key_name` in config will install the key, but ssh access will not " +"be possible as SSHd will not be running." + +msgid "" +"It is now possible to completely update a pool's member list as a batch " +"operation. Using a PUT request on the base member endpoint of a pool, you " +"can specify a list of member objects and the service will perform any " +"necessary creates/deletes/updates as a single operation." +msgstr "" +"It is now possible to completely update a pool's member list as a batch " +"operation. Using a PUT request on the base member endpoint of a pool, you " +"can specify a list of member objects and the service will perform any " +"necessary creates/deletes/updates as a single operation." + +msgid "Known Issues" +msgstr "Known Issues" + +msgid "" +"Layer 7 policies allow a tenant / user to define actions the load balancer " +"may take other than routing requests to the default pool." +msgstr "" +"Layer 7 policies allow a tenant / user to define actions the load balancer " +"may take other than routing requests to the default pool." + +msgid "" +"Layer 7 rules control the logic behind whether a given Layer 7 policy is " +"followed." +msgstr "" +"Layer 7 rules control the logic behind whether a given Layer 7 policy is " +"followed." + +msgid "Liberty Series Release Notes" +msgstr "Liberty Series Release Notes" + +msgid "Listeners have four new timeout settings:" +msgstr "Listeners have four new timeout settings:" + +msgid "" +"Members have a new boolean option `backup`. When set to `true`, the member " +"will not receive traffic until all non-backup members are offline. Once all " +"non-backup members are offline, traffic will begin balancing between the " +"backup members." +msgstr "" +"Members have a new boolean option `backup`. When set to `true`, the member " +"will not receive traffic until all non-backup members are offline. Once all " +"non-backup members are offline, traffic will begin balancing between the " +"backup members." + +msgid "" +"Members that are disabled via `admin_state_up=False` are now rendered in the " +"HAProxy configuration on the amphora as `disabled`. Previously they were not " +"rendered at all. This means that disabled members will now appear in health " +"messages, and will properly change status to OFFLINE." +msgstr "" +"Members that are disabled via `admin_state_up=False` are now rendered in the " +"HAProxy configuration on the Amphora as `disabled`. Previously they were not " +"rendered at all. This means that disabled members will now appear in health " +"messages, and will properly change status to OFFLINE." + +msgid "Mitaka Series Release Notes" +msgstr "Mitaka Series Release Notes" + +msgid "" +"Neutron LBaaS was assigning the VIP port it created the user's project-id, " +"thus allowing the user to attach Floating-IPs to the VIP port. Octavia, on " +"the other hand, was assigning the Octavia project-id to the port, making it " +"impossible for the user to attach a Floating IP. This patch brings Octavia's " +"behavior in line with Neutron LBaaS and assigns the user's project-id to the " +"VIP port created by Octavia." +msgstr "" +"Neutron LBaaS was assigning the VIP port it created the user's project-id, " +"thus allowing the user to attach Floating-IPs to the VIP port. Octavia, on " +"the other hand, was assigning the Octavia project-id to the port, making it " +"impossible for the user to attach a Floating IP. This patch brings Octavia's " +"behaviour in line with Neutron LBaaS and assigns the user's project-id to " +"the VIP port created by Octavia." + +msgid "New Features" +msgstr "New Features" + +msgid "" +"New Health Monitor type \"TLS-HELLO\" to perform a simple TLS connection." +msgstr "" +"New Health Monitor type \"TLS-HELLO\" to perform a simple TLS connection." + +msgid "" +"New option `load_balancer_expiry_age` is added to the `house_keeping` config " +"section. It defines load balancer expiry age in seconds, the default value " +"is 604800." +msgstr "" +"New option `load_balancer_expiry_age` is added to the `house_keeping` config " +"section. It defines load balancer expiry age in seconds, the default value " +"is 604800." + +msgid "" +"New option in diskimage-create.sh `-n` to completely disable sshd on the " +"amphora." +msgstr "" +"New option in diskimage-create.sh `-n` to completely disable SSHd on the " +"Amphora image." + +msgid "Newton Series Release Notes" +msgstr "Newton Series Release Notes" + +msgid "" +"Note that while the Octavia v2 API now supports Role Bassed Access Control " +"(RBAC), the Octavia v1.0 API does not. The Octavia v1.0 API should not be " +"exposed publicly and should only be used internally such as for the neutron-" +"lbaas octavia driver. Publicly accessible instances of the Octavia API " +"should have the v1.0 API disabled via the Octavia configuration file." +msgstr "" +"Note that while the Octavia v2 API now supports Role Based Access Control " +"(RBAC), the Octavia v1.0 API does not. The Octavia v1.0 API should not be " +"exposed publicly and should only be used internally such as for the neutron-" +"lbaas Octavia driver. Publicly accessible instances of the Octavia API " +"should have the v1.0 API disabled via the Octavia configuration file." + +msgid "" +"Now Octavia API can accept the QoS Policy id from neutron to support the QoS " +"requirements towards Load Balancer VIP port when create/update load balancer." +msgstr "" +"Now Octavia API can accept the QoS Policy id from neutron to support the QoS " +"requirements towards Load Balancer VIP port when create/update load balancer." + +msgid "Ocata Series Release Notes" +msgstr "Ocata Series Release Notes" + +msgid "Octavia API now supports WSGI deplyment." +msgstr "Octavia API now supports WSGI deployment." + +msgid "Octavia Release Notes" +msgstr "Octavia Release Notes" + +msgid "" +"Octavia now has a v2 API that can be used as a standalone endpoint. The " +"Octavia v2 API is fully backward compatible with the neutron-lbaas v2 API " +"and is a superset of the neutron-lbaas v2 API. For more information see the " +"Octavia API reference: https://developer.openstack.org/api-ref/load-balancer/" +"v2/index.html" +msgstr "" +"Octavia now has a v2 API that can be used as a standalone endpoint. The " +"Octavia v2 API is fully backward compatible with the neutron-lbaas v2 API " +"and is a superset of the neutron-lbaas v2 API. For more information see the " +"Octavia API reference: https://developer.openstack.org/api-ref/load-balancer/" +"v2/index.html" + +msgid "" +"Octavia now has an up to date API reference for the Octavia v2 API. It is " +"available at: https://developer.openstack.org/api-ref/load-balancer/" +msgstr "" +"Octavia now has an up to date API reference for the Octavia v2 API. It is " +"available at: https://developer.openstack.org/api-ref/load-balancer/" + +msgid "" +"Octavia now has options to limit the amphora concurrent build rate. This may " +"be useful for deployments where nova can get overloaded. Amphora builds will " +"be prioritized in the following order: failover, normal, spares pool builds. " +"See the configuration guide for more information: https://docs.openstack.org/" +"octavia/latest/configuration/configref.html#haproxy_amphora.build_rate_limit" +msgstr "" +"Octavia now has options to limit the Amphora concurrent build rate. This may " +"be useful for deployments where Nova can get overloaded. Amphora builds will " +"be prioritised in the following order: failover, normal, spares pool builds. " +"See the configuration guide for more information: https://docs.openstack.org/" +"octavia/latest/configuration/configref.html#haproxy_amphora.build_rate_limit" + +msgid "" +"Octavia now supports provider drivers. This allows third party load " +"balancing drivers to be integrated with the Octavia v2 API. Users select the " +"\"provider\" for a load balancer at creation time." +msgstr "" +"Octavia now supports provider drivers. This allows third party load " +"balancing drivers to be integrated with the Octavia v2 API. Users select the " +"\"provider\" for a load balancer at creation time." + +msgid "" +"Octavia supports different Keystone APIs and choose authentication mechanism " +"based on configuration specified in \"keystone_authtoken\" section of " +"octavia.conf file." +msgstr "" +"Octavia supports different Keystone APIs and choose authentication mechanism " +"based on configuration specified in \"keystone_authtoken\" section of " +"octavia.conf file." + +msgid "" +"Octavia will use the OpenStack service type 'load-balancer'. For more " +"information about service types, see the Octavia API reference: https://" +"developer.openstack.org/api-ref/load-balancer/v2/index.html#service-endpoints" +msgstr "" +"Octavia will use the OpenStack service type 'load-balancer'. For more " +"information about service types, see the Octavia API reference: https://" +"developer.openstack.org/api-ref/load-balancer/v2/index.html#service-endpoints" + +msgid "Other Notes" +msgstr "Other Notes" + +msgid "Pike Series Release Notes" +msgstr "Pike Series Release Notes" + +msgid "" +"Policy.json enforcement in Octavia. * Enables verification of privileges on " +"specific API command for a specific user role and project_id." +msgstr "" +"Policy.json enforcement in Octavia. * Enables verification of privileges on " +"specific API command for a specific user role and project_id." + +msgid "Prelude" +msgstr "Prelude" + +msgid "" +"Private keys can no longer be password protected, as PKCS12 does not support " +"storing a passphrase in an explicitly defined way. Note that this is not " +"noticeably less secure than storing a passphrase protected private key in " +"the same place as the passphrase, as was the case with Barbican." +msgstr "" +"Private keys can no longer be password protected, as PKCS12 does not support " +"storing a passphrase in an explicitly defined way. Note that this is not " +"noticeably less secure than storing a passphrase protected private key in " +"the same place as the passphrase, as was the case with Barbican." + +msgid "" +"Provider of \"octavia\" has been deprecated in favor of \"amphora\" to " +"clarify the provider driver supporting the load balancer." +msgstr "" +"Provider of \"octavia\" has been deprecated in favour of \"amphora\" to " +"clarify the provider driver supporting the load balancer." + +msgid "Queens Series Release Notes" +msgstr "Queens Series Release Notes" + +msgid "" +"Remove duplicated config option 'cert_generator' in [controller_worker]. " +"Operators now should set it under [certificates]." +msgstr "" +"Remove duplicated config option 'cert_generator' in [controller_worker]. " +"Operators now should set it under [certificates]." + +msgid "" +"Resolved an issue that could cause provisioning status to become out of sync " +"between neutron-lbaas and octavia during high load." +msgstr "" +"Resolved an issue that could cause provisioning status to become out of sync " +"between neutron-lbaas and Octavia during high load." + +msgid "Resolves an issue with subnets larger than /24" +msgstr "Resolves an issue with subnets larger than /24" + +msgid "Resolves an issue with using encrypted TLS private keys." +msgstr "Resolves an issue with using encrypted TLS private keys." + +msgid "Rocky Series Release Notes" +msgstr "Rocky Series Release Notes" + +msgid "Security Issues" +msgstr "Security Issues" + +msgid "" +"Session persistence is maintained between the active and standby amphora." +msgstr "" +"Session persistence is maintained between the active and standby amphora." + +msgid "" +"Several API related variables are moving to their own section " +"`api_settings`. bind_host bind_port api_handler allow_pagination " +"allow_sorting pagination_max_limit api_base_uri" +msgstr "" +"Several API related variables are moving to their own section " +"`api_settings`. bind_host bind_port api_handler allow_pagination " +"allow_sorting pagination_max_limit api_base_uri" + +msgid "" +"Shared pools allow listeners or Layer 7 REDIRECT_TO_POOL policies to share " +"back-end pools." +msgstr "" +"Shared pools allow listeners or Layer 7 REDIRECT_TO_POOL policies to share " +"back-end pools." + +msgid "" +"Shared-pools introduces a new ``load_balancer_id`` column into the ``pools`` " +"table." +msgstr "" +"Shared-pools introduces a new ``load_balancer_id`` column into the ``pools`` " +"table." + +msgid "" +"Some versions of HAProxy incorrectly reported nodes in DRAIN status as being " +"UP, and Octavia code was written around this incorrect reporting. This has " +"been fixed in some versions of HAProxy and is now handled properly in " +"Octavia as well. Now it is possible for members to be in the status " +"DRAINING. Note that this is masked when statuses are forwarded to neutron-" +"lbaas in the eventstream, so no compatibility change is necessary." +msgstr "" +"Some versions of HAProxy incorrectly reported nodes in DRAIN status as being " +"UP, and Octavia code was written around this incorrect reporting. This has " +"been fixed in some versions of HAProxy and is now handled properly in " +"Octavia as well. Now it is possible for members to be in the status " +"DRAINING. Note that this is masked when statuses are forwarded to neutron-" +"lbaas in the event stream, so no compatibility change is necessary." + +msgid "" +"Stale load balancer entries with DELETED provisioning_status are now cleaned-" +"up by housekeeper after if they are older than `load_balancer_expiry_age`." +msgstr "" +"Stale load balancer entries with DELETED provisioning_status are now cleaned-" +"up by housekeeper after if they are older than `load_balancer_expiry_age`." + +msgid "Start using reno to manage release notes." +msgstr "Start using Reno to manage release notes." + +msgid "Stein Series Release Notes" +msgstr "Stein Series Release Notes" + +msgid "Support for Keystone token authentication on frontend Octavia API." +msgstr "Support for Keystone token authentication on frontend Octavia API." + +msgid "" +"The \"use_upstart\" configuration option is now deprecated because the " +"amphora agent can now automatically discover the init system in use in the " +"amphora image." +msgstr "" +"The \"use_upstart\" configuration option is now deprecated because the " +"amphora agent can now automatically discover the init system in use in the " +"Amphora image." + +msgid "" +"The Octavia API handlers are now deprecated and replaced by the new provider " +"driver support. Octavia API handlers will remain in the code to support the " +"Octavia v1 API (used for neutron-lbaas)." +msgstr "" +"The Octavia API handlers are now deprecated and replaced by the new provider " +"driver support. Octavia API handlers will remain in the code to support the " +"Octavia v1 API (used for neutron-lbaas)." + +msgid "" +"The Octavia project documentation has been reorganized as part of the " +"OpenStack documentation migration project. The Octavia project documentation " +"is now located at: https://docs.openstack.org/octavia/latest/" +msgstr "" +"The Octavia project documentation has been reorganised as part of the " +"OpenStack documentation migration project. The Octavia project documentation " +"is now located at: https://docs.openstack.org/octavia/latest/" + +msgid "" +"The Octavia v2 API now supports Role Based Access Control (RBAC). The " +"default rules require users to have a load-balancer_* role to be able to " +"access the Octavia v2 API. This can be overriden with the admin_or_owner-" +"policy.json sample file provided. See the `Octavia Policies `_ document for more " +"information." +msgstr "" +"The Octavia v2 API now supports Role Based Access Control (RBAC). The " +"default rules require users to have a load-balancer_* role to be able to " +"access the Octavia v2 API. This can be overridden with the admin_or_owner-" +"policy.json sample file provided. See the `Octavia Policies `_ document for more " +"information." + +msgid "" +"The amphora haproxy user_group setting is now automatically detected for " +"Ubuntu, CentOS, Fedora, or RHEL based amphora." +msgstr "" +"The Amphora HAProxy user_group setting is now automatically detected for " +"Ubuntu, CentOS, Fedora, or RHEL based Amphora." + +msgid "" +"The amphora-agent is now able to distinguish between operating systems and " +"choose the right course of action to manage files and networking on each " +"Linux flavor." +msgstr "" +"The Amphora-agent is now able to distinguish between operating systems and " +"choose the right course of action to manage files and networking on each " +"Linux flavour." + +msgid "" +"The compute zone (if applicable) is now cached in the database and returned " +"in the Amphora API as `cached_zone`. Please note that this is only set at " +"the original time of provisioning, and could be stale for various reasons " +"(for example, if live-migrations have taken place due to maintenances). We " +"recommend it be used for reference only, unless you are absolutey certain it " +"is current in your environment. The source of truth is still the system you " +"use for compute." +msgstr "" +"The compute zone (if applicable) is now cached in the database and returned " +"in the Amphora API as `cached_zone`. Please note that this is only set at " +"the original time of provisioning, and could be stale for various reasons " +"(for example, if live-migrations have taken place due to maintenances). We " +"recommend it be used for reference only, unless you are absolutely certain " +"it is current in your environment. The source of truth is still the system " +"you use for compute." + +msgid "" +"The configuration setting auth_strategy is now set to keystone by default." +msgstr "" +"The configuration setting auth_strategy is now set to Keystone by default." + +msgid "" +"The diskimage-create script now supports generic download mirrors via the " +"DIB_DISTRIBUTION_MIRROR environment variable, replacing the existing " +"distribution-specific elements" +msgstr "" +"The diskimage-create script now supports generic download mirrors via the " +"DIB_DISTRIBUTION_MIRROR environment variable, replacing the existing " +"distribution-specific elements" + +msgid "" +"The diskimage-create script supports different operating system flavors such " +"as Ubuntu (the default option), CentOS, Fedora and RHEL. Adaptations were " +"made to several elements to ensure all images are operational." +msgstr "" +"The diskimage-create script supports different operating system flavours " +"such as Ubuntu (the default option), CentOS, Fedora and RHEL. Adaptations " +"were made to several elements to ensure all images are operational." + +msgid "" +"The fix for the hmac.compare_digest on python3 requires you to upgrade your " +"health managers before updating the amphora image. The health manager is " +"compatible with older amphora images, but older controllers will reject the " +"health heartbeats from images with this fix." +msgstr "" +"The fix for the hmac.compare_digest on python3 requires you to upgrade your " +"health managers before updating the amphora image. The health manager is " +"compatible with older amphora images, but older controllers will reject the " +"health heartbeats from images with this fix." + +msgid "The keepalived improvements require the amphora image to be upgraded." +msgstr "The keepalived improvements require the amphora image to be upgraded." + +msgid "" +"The new option `[haproxy_amphora]/connection_logging` will disable logging " +"of connection data if set to False which can improve performance of the load " +"balancer and might aid compliance." +msgstr "" +"The new option `[haproxy_amphora]/connection_logging` will disable logging " +"of connection data if set to False which can improve performance of the load " +"balancer and might aid compliance." + +msgid "" +"The option ``[controller_worker]/amp_image_id`` has been deprecated since " +"Mitaka release and is now removed. This option was superseded by " +"``[controller_worker]/amp_image_tag`` option." +msgstr "" +"The option ``[controller_worker]/amp_image_id`` has been deprecated since " +"the Mitaka release and is now removed. This option was superseded by " +"``[controller_worker]/amp_image_tag`` option." + +msgid "" +"The project_id attribute of the POST method on the following objects is now " +"deprecated\\: listener, pool, health monitor, and member. These objects will " +"use the parent load balancer's project_id. Values passed into the project_id " +"on those objects will be ignored until the deprecation cycle has expired, at " +"which point they will cause an error." +msgstr "" +"The project_id attribute of the POST method on the following objects is now " +"deprecated\\: listener, pool, health monitor, and member. These objects will " +"use the parent load balancer's project_id. Values passed into the project_id " +"on those objects will be ignored until the deprecation cycle has expired, at " +"which point they will cause an error." + +msgid "" +"The provider driver support requires a database migration and follows " +"Octavia standard rolling upgrade procedures; database migration followed by " +"rolling control plane upgrades. Existing load balancers with no provider " +"specified will be assigned \"amphora\" as part of the database migration." +msgstr "" +"The provider driver support requires a database migration and follows " +"Octavia standard rolling upgrade procedures; database migration followed by " +"rolling control plane upgrades. Existing load balancers with no provider " +"specified will be assigned \"amphora\" as part of the database migration." + +msgid "" +"The quota objects named `health_monitor` and `load_balancer` have been " +"renamed to `healthmonitor` and `loadbalancer`, respectively. The old names " +"are deprecated, and will be removed in the T cycle." +msgstr "" +"The quota objects named `health_monitor` and `load_balancer` have been " +"renamed to `healthmonitor` and `loadbalancer`, respectively. The old names " +"are deprecated, and will be removed in the T cycle." + +msgid "The value for all of these fields is expected to be in milliseconds." +msgstr "The value for all of these fields is expected to be in milliseconds." + +msgid "There is now an API available to list enabled provider drivers." +msgstr "There is now an API available to list enabled provider drivers." + +msgid "" +"These custom distribution mirror elements for the diskimage-script were " +"removed: apt-mirror, centos-mirror, fedora-mirror" +msgstr "" +"These custom distribution mirror elements for the diskimage-script were " +"removed: apt-mirror, centos-mirror, fedora-mirror" + +msgid "" +"This adds a way to configure a custom queue for the event streamer thus " +"allowing to post messages to the Neutron queue if needed." +msgstr "" +"This adds a way to configure a custom queue for the event streamer thus " +"allowing to post messages to the Neutron queue if needed." + +msgid "" +"This feature add new configuration value \"auth_strategy\" which by default " +"is set for \"noauth\"." +msgstr "" +"This feature add new configuration value \"auth_strategy\" which by default " +"is set for \"noauth\"." + +msgid "To enable log offloading, the amphora image needs to be updated." +msgstr "To enable log offloading, the Amphora image needs to be updated." + +msgid "" +"To enabled encrypted ramfs storage for certificates and keys, you must " +"upgrade your amphora image." +msgstr "" +"To enabled encrypted ramfs storage for certificates and keys, you must " +"upgrade your amphora image." + +msgid "" +"To fix IPv6 VIP addresses, you must run the \"octavia-db-manage upgrade " +"head\" migration script." +msgstr "" +"To fix IPv6 VIP addresses, you must run the \"octavia-db-manage upgrade " +"head\" migration script." + +msgid "To fix the admin-state-up bug you must upgrade your amphora image." +msgstr "To fix the admin-state-up bug you must upgrade your Amphora image." + +msgid "" +"To fix the issue with active/standby load balancers or single topology load " +"balancers with members on the VIP subnet, you need to update the amphora " +"image." +msgstr "" +"To fix the issue with active/standby load balancers or single topology load " +"balancers with members on the VIP subnet, you need to update the Amphora " +"image." + +msgid "" +"To resolve the IPv6 VIP issues on active/standby load balancers you need to " +"build a new amphora image." +msgstr "" +"To resolve the IPv6 VIP issues on active/standby load balancers you need to " +"build a new Amphora image." + +msgid "" +"To support IPv6 a databse migration and amphora image update are required." +msgstr "" +"To support IPv6 a database migration and Amphora image update are required." + +msgid "" +"To support multi-VIP loadbalancers, a new amphora image must be built. It is " +"safe to upload the new image before the upgrade, as it is fully backwards " +"compatible." +msgstr "" +"To support multi-VIP load balancers, a new amphora image must be built. It " +"is safe to upload the new image before the upgrade, as it is fully backwards " +"compatible." + +msgid "To support networks without DHCP you must upgrade your amphora image." +msgstr "To support networks without DHCP you must upgrade your Amphora image." + +msgid "" +"To use CentOS, Fedora, or RHEL in your amphora image you must set the " +"user_group option, located in the [haproxy_amphora] section of the octavia." +"conf file to \"haproxy\". This will be made automatic in a future version." +msgstr "" +"To use CentOS, Fedora, or RHEL in your amphora image you must set the " +"user_group option, located in the [haproxy_amphora] section of the octavia." +"conf file to \"haproxy\". This will be made automatic in a future version." + +msgid "Train Series Release Notes" +msgstr "Train Series Release Notes" + +msgid "" +"Two new options are included with provider driver support. The " +"enabled_provider_drivers option defaults to \"amphora, octavia\" to support " +"existing Octavia load balancers. The default_provider_driver option defaults " +"to \"amphora\" for all new load balancers that do not specify a provider at " +"creation time. These defaults should cover most existing deployments." +msgstr "" +"Two new options are included with provider driver support. The " +"enabled_provider_drivers option defaults to \"amphora, octavia\" to support " +"existing Octavia load balancers. The default_provider_driver option defaults " +"to \"amphora\" for all new load balancers that do not specify a provider at " +"creation time. These defaults should cover most existing deployments." + +msgid "" +"Two new tables are created to handle Layer 7 switching. These are " +"``l7policy`` and ``l7rule``." +msgstr "" +"Two new tables are created to handle Layer 7 switching. These are " +"``l7policy`` and ``l7rule``." + +msgid "" +"Two new types of healthmonitoring are now valid for UDP listeners. Both " +"``HTTP`` and ``TCP`` check types can now be used." +msgstr "" +"Two new types of healthmonitoring are now valid for UDP listeners. Both " +"``HTTP`` and ``TCP`` check types can now be used." + +msgid "" +"UDP protocol support requires an update to the amphora image to support UDP " +"protocol statistics reporting and UDP-CONNECT health monitoring." +msgstr "" +"UDP protocol support requires an update to the amphora image to support UDP " +"protocol statistics reporting and UDP-CONNECT health monitoring." + +msgid "" +"Update Python base version from 3.6 to 3.8. As per Openstack Python runtime " +"versions policy Python 3.8 will be the the minimum Python version in the Zed " +"release cycle." +msgstr "" +"Update Python base version from 3.6 to 3.8. As per the OpenStack Python " +"runtime versions policy Python 3.8 will be the minimum Python version in the " +"Zed release cycle." + +msgid "Updates load balancer, listener, and amphora tables." +msgstr "Updates load balancer, listener, and Amphora tables." + +msgid "Upgrade Notes" +msgstr "Upgrade Notes" + +msgid "Upgrade requires a database migration." +msgstr "Upgrade requires a database migration." + +msgid "" +"Usage of ``castellan_cert_manager`` as cert_manager has been significantly " +"improved. Now you can define configuration options for castellan in octavia." +"conf and they will be passed properly to castellan beckend. This allows to " +"use allowed castellan backends as for certificate storage." +msgstr "" +"Usage of ``castellan_cert_manager`` as cert_manager has been significantly " +"improved. Now you can define configuration options for Castellan in octavia." +"conf and they will be passed properly to Castellan backend. This allows to " +"use allowed Castellan backends as for certificate storage." + +msgid "" +"Use of JSON policy files was deprecated by the ``oslo.policy`` library " +"during the Victoria development cycle. As a result, this deprecation is " +"being noted in the Wallaby cycle with an anticipated future removal of " +"support by ``oslo.policy``. As such operators will need to convert to YAML " +"policy files. Please see the upgrade notes for details on migration of any " +"custom policy files." +msgstr "" +"The use of JSON policy files was deprecated by the ``oslo.policy`` library " +"during the Victoria development cycle. As a result, this deprecation is " +"being noted in the Wallaby cycle with an anticipated future removal of " +"support by ``oslo.policy``. As such operators will need to convert to YAML " +"policy files. Please see the upgrade notes for details on the migration of " +"any custom policy files." + +msgid "" +"Users can now use a reference to a single PKCS12 bundle as their " +"`default_tls_container_ref` instead of a Barbican container with individual " +"secret objects. PKCS12 supports bundling a private key, certificate, and " +"intermediates. Private keys can no longer be passphrase protected when using " +"PKCS12 bundles. No configuration change is necessary to enable this feature. " +"Users may simply begin using this. Any use of the old style containers will " +"be detected and automatically fall back to using the old Barbican driver." +msgstr "" +"Users can now use a reference to a single PKCS12 bundle as their " +"`default_tls_container_ref` instead of a Barbican container with individual " +"secret objects. PKCS12 supports bundling a private key, certificate, and " +"intermediates. Private keys can no longer be passphrase protected when using " +"PKCS12 bundles. No configuration change is necessary to enable this feature. " +"Users may simply begin using this. Any use of the old style containers will " +"be detected and automatically fall back to using the old Barbican driver." + +msgid "Ussuri Series Release Notes" +msgstr "Ussuri Series Release Notes" + +msgid "" +"Validate that the creation of L7 policies is compatible with the protocol of " +"the listener in the Amphora driver. L7 policies are allowed for Terminated " +"HTTPS or HTTP protocol listeners, but not for HTTPS, TCP or UDP protocols " +"listeners." +msgstr "" +"Validate that the creation of L7 policies is compatible with the protocol of " +"the listener in the Amphora driver. L7 policies are allowed for Terminated " +"HTTPS or HTTP protocol listeners, but not for HTTPS, TCP or UDP protocol " +"listeners." + +msgid "Victoria Series Release Notes" +msgstr "Victoria Series Release Notes" + +msgid "Wallaby Series Release Notes" +msgstr "Wallaby Series Release Notes" + +msgid "" +"When enabled in the configuration file, Octavia will boot an active and " +"standby amphora for each load balancer." +msgstr "" +"When enabled in the configuration file, Octavia will boot an active and " +"standby Amphora for each load balancer." + +msgid "" +"When the amphora agent configuration update API is called on an amphora " +"running a version of the amphora agent that does not support configuration " +"updates, an ERROR log message will be posted to the controller log file " +"indicating that the amphora does not support agent configuration updates. In " +"this case, the amphora image should be updated to a newer version." +msgstr "" +"When the amphora agent configuration update API is called on an amphora " +"running a version of the amphora agent that does not support configuration " +"updates, an ERROR log message will be posted to the controller log file " +"indicating that the amphora does not support agent configuration updates. In " +"this case, the amphora image should be updated to a newer version." + +msgid "" +"Workaround an HAProxy issue where it would fail to reload on configuration " +"change should the local peer name start with \"-x\"." +msgstr "" +"Workaround an HAProxy issue where it would fail to reload on configuration " +"change should the local peer name start with \"-x\"." + +msgid "Works for HTTP and TERMINATED_HTTPS listeners." +msgstr "Works for HTTP and TERMINATED_HTTPS listeners." + +msgid "Xena Series Release Notes" +msgstr "Xena Series Release Notes" + +msgid "Yoga Series Release Notes" +msgstr "Yoga Series Release Notes" + +msgid "" +"You can now enable TLS backend re-encryption for connections to member " +"servers by enabling tls_enabled option on pools." +msgstr "" +"You can now enable TLS backend re-encryption for connections to member " +"servers by enabling tls_enabled option on pools." + +msgid "You can now enable TLS client authentication on listeners." +msgstr "You can now enable TLS client authentication on listeners." + +msgid "You can now filter API queries by the object tag." +msgstr "You can now filter API queries by the object tag." + +msgid "" +"You can now provide a certificate revocation list reference for listeners " +"using TLS client authentication." +msgstr "" +"You can now provide a certificate revocation list reference for listeners " +"using TLS client authentication." + +msgid "" +"You can now specify a ca_tls_container_ref and crl_container_ref on pools " +"for validating backend pool members using TLS." +msgstr "" +"You can now specify a ca_tls_container_ref and crl_container_ref on pools " +"for validating backend pool members using TLS." + +msgid "" +"You can now specify a certificate authority certificate reference, on " +"listeners, for use with TLS client authentication." +msgstr "" +"You can now specify a certificate authority certificate reference, on " +"listeners, for use with TLS client authentication." + +msgid "" +"You can now specify a tls_container_ref on pools for TLS client " +"authentication to pool members." +msgstr "" +"You can now specify a tls_container_ref on pools for TLS client " +"authentication to pool members." + +msgid "" +"You can now update the running configuration of the Octavia control plane " +"processes by sending the parent process a \"HUP\" signal. Note: The " +"configuration item must support mutation." +msgstr "" +"You can now update the running configuration of the Octavia control plane " +"processes by sending the parent process a \"HUP\" signal. Note: The " +"configuration item must support mutation." + +msgid "" +"You cannot mix IPv4 UDP listeners with IPv6 members at this time. This is " +"being tracked with this story https://storyboard.openstack.org/#!/" +"story/2003329" +msgstr "" +"You cannot mix IPv4 UDP listeners with IPv6 members at this time. This is " +"being tracked with this story https://storyboard.openstack.org/#!/" +"story/2003329" + +msgid "You must update the amphora image to support the SR-IOV VIP feature." +msgstr "You must update the amphora image to support the SR-IOV VIP feature." + +msgid "Zed Series Release Notes" +msgstr "Zed Series Release Notes" + +msgid "[DEFAULT] api_handler" +msgstr "[DEFAULT] api_handler" + +msgid "[DEFAULT] auth_strategy" +msgstr "[DEFAULT] auth_strategy" + +msgid "[DEFAULT] bind_host" +msgstr "[DEFAULT] bind_host" + +msgid "[DEFAULT] bind_port" +msgstr "[DEFAULT] bind_port" + +msgid "``[haproxy_amphora] respawn_count``" +msgstr "``[haproxy_amphora] respawn_count``" + +msgid "``[haproxy_amphora] respawn_interval``" +msgstr "``[haproxy_amphora] respawn_interval``" + +msgid "``[task_flow] jobboard_redis_sentinel_password``" +msgstr "``[task_flow] jobboard_redis_sentinel_password``" + +msgid "``[task_flow] jobboard_redis_sentinel_username``" +msgstr "``[task_flow] jobboard_redis_sentinel_username``" + +msgid "" +"``diskimage-create.sh`` has been updated to build Ubuntu Jammy (22.04) " +"amphora images per default." +msgstr "" +"``diskimage-create.sh`` has been updated to build Ubuntu Jammy (22.04) " +"amphora images per default." + +msgid "" +"``pools.load_balancer_id`` column is populated from ``listeners`` data using " +"ETL in the migration." +msgstr "" +"``pools.load_balancer_id`` column is populated from ``listeners`` data using " +"ETL in the migration." + +msgid "" +"`status_update_threads` config option for healthmanager is deprecated " +"because it is replaced as `health_update_threads` and `stats_update_threads`." +msgstr "" +"`status_update_threads` config option for healthmanager is deprecated " +"because it is replaced as `health_update_threads` and `stats_update_threads`." + +msgid "`timeout_client_data`: Frontend client inactivity timeout" +msgstr "`timeout_client_data`: Frontend client inactivity timeout" + +msgid "`timeout_member_connect`: Backend member connection timeout" +msgstr "`timeout_member_connect`: Backend member connection timeout" + +msgid "`timeout_member_data`: Backend member inactivity timeout" +msgstr "`timeout_member_data`: Backend member inactivity timeout" + +msgid "" +"`timeout_tcp_inspect`: Time to wait for TCP packets for content inspection" +msgstr "" +"`timeout_tcp_inspect`: Time to wait for TCP packets for content inspection" + +msgid "" +"agent_server_network_dir is now auto-detected for Ubuntu, CentOS, Fedora and " +"RHEL if one is not specified in the configuration file." +msgstr "" +"agent_server_network_dir is now auto-detected for Ubuntu, CentOS, Fedora and " +"RHEL if one is not specified in the configuration file." + +msgid "" +"amp_image_id option is deprecated and will be removed in one of the next " +"releases. Operators are adviced to migrate to the new amp_image_tag option." +msgstr "" +"amp_image_id option is deprecated and will be removed in one of the next " +"releases. Operators are advised to migrate to the new amp_image_tag option." + +msgid "" +"diskimage-create defaults now to distribution release 9 when selecting RHEL " +"as base OS and to release 9-stream when selecting CentOS as base OS." +msgstr "" +"diskimage-create defaults now to distribution release 9 when selecting RHEL " +"as base OS and to release 9-stream when selecting CentOS as base OS." + +msgid "" +"diskimage-create.sh used $AMP_OUTPUTFILENAME.$AMP_IMAGETYPE for constructing " +"the image file path when checking the file size, which was not correct and " +"caused an \"No such file or directory\" error." +msgstr "" +"diskimage-create.sh used $AMP_OUTPUTFILENAME.$AMP_IMAGETYPE for constructing " +"the image file path when checking the file size, which was not correct and " +"caused an \"No such file or directory\" error." + +msgid "" +"haproxy user_group is no longer being used. it is now auto-detected for " +"Ubuntu, CentOS, Fedora and RHEL based amphora images." +msgstr "" +"HAProxy user_group is no longer being used. it is now auto-detected for " +"Ubuntu, CentOS, Fedora and RHEL based Amphora images." diff --git a/releasenotes/source/mitaka.rst b/releasenotes/source/mitaka.rst new file mode 100644 index 0000000000..2f641558e3 --- /dev/null +++ b/releasenotes/source/mitaka.rst @@ -0,0 +1,6 @@ +============================== + Mitaka Series Release Notes +============================== + +.. release-notes:: + :branch: origin/stable/mitaka diff --git a/releasenotes/source/newton.rst b/releasenotes/source/newton.rst new file mode 100644 index 0000000000..97036ed251 --- /dev/null +++ b/releasenotes/source/newton.rst @@ -0,0 +1,6 @@ +=================================== + Newton Series Release Notes +=================================== + +.. release-notes:: + :branch: origin/stable/newton diff --git a/releasenotes/source/ocata.rst b/releasenotes/source/ocata.rst new file mode 100644 index 0000000000..ebe62f42e1 --- /dev/null +++ b/releasenotes/source/ocata.rst @@ -0,0 +1,6 @@ +=================================== + Ocata Series Release Notes +=================================== + +.. release-notes:: + :branch: origin/stable/ocata diff --git a/releasenotes/source/pike.rst b/releasenotes/source/pike.rst new file mode 100644 index 0000000000..e43bfc0ce1 --- /dev/null +++ b/releasenotes/source/pike.rst @@ -0,0 +1,6 @@ +=================================== + Pike Series Release Notes +=================================== + +.. release-notes:: + :branch: stable/pike diff --git a/releasenotes/source/queens.rst b/releasenotes/source/queens.rst new file mode 100644 index 0000000000..36ac6160ca --- /dev/null +++ b/releasenotes/source/queens.rst @@ -0,0 +1,6 @@ +=================================== + Queens Series Release Notes +=================================== + +.. release-notes:: + :branch: stable/queens diff --git a/releasenotes/source/rocky.rst b/releasenotes/source/rocky.rst new file mode 100644 index 0000000000..40dd517b75 --- /dev/null +++ b/releasenotes/source/rocky.rst @@ -0,0 +1,6 @@ +=================================== + Rocky Series Release Notes +=================================== + +.. release-notes:: + :branch: stable/rocky diff --git a/releasenotes/source/stein.rst b/releasenotes/source/stein.rst new file mode 100644 index 0000000000..efaceb667b --- /dev/null +++ b/releasenotes/source/stein.rst @@ -0,0 +1,6 @@ +=================================== + Stein Series Release Notes +=================================== + +.. release-notes:: + :branch: stable/stein diff --git a/releasenotes/source/train.rst b/releasenotes/source/train.rst new file mode 100644 index 0000000000..583900393c --- /dev/null +++ b/releasenotes/source/train.rst @@ -0,0 +1,6 @@ +========================== +Train Series Release Notes +========================== + +.. release-notes:: + :branch: stable/train diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst new file mode 100644 index 0000000000..cd22aabccc --- /dev/null +++ b/releasenotes/source/unreleased.rst @@ -0,0 +1,5 @@ +============================== + Current Series Release Notes +============================== + +.. release-notes:: diff --git a/releasenotes/source/ussuri.rst b/releasenotes/source/ussuri.rst new file mode 100644 index 0000000000..e21e50e0c6 --- /dev/null +++ b/releasenotes/source/ussuri.rst @@ -0,0 +1,6 @@ +=========================== +Ussuri Series Release Notes +=========================== + +.. release-notes:: + :branch: stable/ussuri diff --git a/releasenotes/source/victoria.rst b/releasenotes/source/victoria.rst new file mode 100644 index 0000000000..8ce9334198 --- /dev/null +++ b/releasenotes/source/victoria.rst @@ -0,0 +1,6 @@ +============================= +Victoria Series Release Notes +============================= + +.. release-notes:: + :branch: unmaintained/victoria diff --git a/releasenotes/source/wallaby.rst b/releasenotes/source/wallaby.rst new file mode 100644 index 0000000000..bcf35c5f80 --- /dev/null +++ b/releasenotes/source/wallaby.rst @@ -0,0 +1,6 @@ +============================ +Wallaby Series Release Notes +============================ + +.. release-notes:: + :branch: unmaintained/wallaby diff --git a/releasenotes/source/xena.rst b/releasenotes/source/xena.rst new file mode 100644 index 0000000000..d19eda4886 --- /dev/null +++ b/releasenotes/source/xena.rst @@ -0,0 +1,6 @@ +========================= +Xena Series Release Notes +========================= + +.. release-notes:: + :branch: unmaintained/xena diff --git a/releasenotes/source/yoga.rst b/releasenotes/source/yoga.rst new file mode 100644 index 0000000000..43cafdea89 --- /dev/null +++ b/releasenotes/source/yoga.rst @@ -0,0 +1,6 @@ +========================= +Yoga Series Release Notes +========================= + +.. release-notes:: + :branch: unmaintained/yoga diff --git a/releasenotes/source/zed.rst b/releasenotes/source/zed.rst new file mode 100644 index 0000000000..6cc2b1554c --- /dev/null +++ b/releasenotes/source/zed.rst @@ -0,0 +1,6 @@ +======================== +Zed Series Release Notes +======================== + +.. release-notes:: + :branch: unmaintained/zed diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000..c3a1b9cd57 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,54 @@ +# Requirements lower bounds listed here are our best effort to keep them up to +# date but we do not test them so no guarantee of having them all correct. If +# you find any incorrect lower bounds, let us know or propose a fix. + +alembic>=0.9.6 # MIT +cotyledon>=1.3.0 # Apache-2.0 +pecan>=1.3.2 # BSD +pbr>=3.1.1 # Apache-2.0 +SQLAlchemy>=1.2.19 # MIT +SQLAlchemy-Utils>=0.30.11 +futurist>=1.2.0 # Apache-2.0 +requests>=2.23.0 # Apache-2.0 +rfc3986>=1.2.0 # Apache-2.0 +keystoneauth1>=3.4.0 # Apache-2.0 +keystonemiddleware>=9.5.0 # Apache-2.0 +WebOb>=1.8.2 # MIT +stevedore>=1.20.0 # Apache-2.0 +openstacksdk>=0.103.0 # Apache-2.0 +oslo.config>=6.8.0 # Apache-2.0 +oslo.context>=2.22.0 # Apache-2.0 +oslo.db[mysql]>=8.4.0 # Apache-2.0 +oslo.i18n>=3.20.0 # Apache-2.0 +oslo.log>=4.3.0 # Apache-2.0 +oslo.messaging>=14.1.0 # Apache-2.0 +oslo.middleware>=4.0.1 # Apache-2.0 +oslo.policy>=4.5.0 # Apache-2.0 +oslo.reports>=1.18.0 # Apache-2.0 +oslo.serialization>=2.28.1 # Apache-2.0 +oslo.upgradecheck>=1.3.0 # Apache-2.0 +oslo.utils>=4.7.0 # Apache-2.0 +psutil>=5.7.1 # BSD +pyasn1!=0.2.3,>=0.1.8 # BSD +pyasn1-modules>=0.0.6 # BSD +python-barbicanclient>=4.5.2 # Apache-2.0 +python-glanceclient>=2.8.0 # Apache-2.0 +python-novaclient>=9.1.0 # Apache-2.0 +python-cinderclient>=3.3.0 # Apache-2.0 +WSME>=0.8.0 # MIT +Jinja2>=2.10 # BSD License (3 clause) +taskflow>=5.9.0 # Apache-2.0 +castellan>=0.16.0 # Apache-2.0 +tenacity>=5.0.4 # Apache-2.0 +distro>=1.2.0 # Apache-2.0 +jsonschema>=3.2.0 # MIT +octavia-lib>=3.8.0 # Apache-2.0 +setproctitle>=1.1.10 # BSD +python-dateutil>=2.7.0 # BSD + +#for the amphora api +Flask!=0.11,>=0.10 # BSD +cryptography>=42.0.0 # BSD/Apache-2.0 +pyroute2>=0.5.14;sys_platform!='win32' # Apache-2.0 (+ dual licensed GPL2) +gunicorn>=19.9.0 # MIT +Werkzeug>=0.14.1 # BSD License diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000000..2f4e59e437 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,121 @@ +[metadata] +name = octavia +summary = OpenStack Octavia Scalable Load Balancer as a Service +description_file = + README.rst +author = OpenStack +author_email = openstack-discuss@lists.openstack.org +home_page = https://docs.openstack.org/octavia/latest/ +python_requires = >=3.10 +classifier = + Development Status :: 5 - Production/Stable + Environment :: OpenStack + Intended Audience :: Developers + Intended Audience :: Information Technology + Intended Audience :: System Administrators + License :: OSI Approved :: Apache Software License + Operating System :: POSIX :: Linux + Programming Language :: Python + Programming Language :: Python :: 3 + Programming Language :: Python :: 3.10 + Programming Language :: Python :: 3.11 + Programming Language :: Python :: 3.12 + +[files] +packages = + octavia +data_files = + share/octavia = + LICENSE + README.rst + share/octavia/diskimage-create = + diskimage-create/diskimage-create.sh + diskimage-create/image-tests.sh + diskimage-create/README.rst + diskimage-create/requirements.txt + diskimage-create/test-requirements.txt + diskimage-create/tox.ini + diskimage-create/version.txt + +[entry_points] +console_scripts = + octavia-api = octavia.cmd.api:main + octavia-worker = octavia.cmd.octavia_worker:main + octavia-health-manager = octavia.cmd.health_manager:main + octavia-housekeeping = octavia.cmd.house_keeping:main + octavia-db-manage = octavia.db.migration.cli:main + octavia-driver-agent = octavia.cmd.driver_agent:main + amphora-agent = octavia.cmd.agent:main + haproxy-vrrp-check = octavia.cmd.haproxy_vrrp_check:main + octavia-status = octavia.cmd.status:main + amphora-health-checker = octavia.cmd.health_checker:main + amphora-interface = octavia.cmd.interface:main + prometheus-proxy = octavia.cmd.prometheus_proxy:main +octavia.api.drivers = + noop_driver = octavia.api.drivers.noop_driver.driver:NoopProviderDriver + noop_driver-alt = octavia.api.drivers.noop_driver.driver:NoopProviderDriver + amphora = octavia.api.drivers.amphora_driver.v2.driver:AmphoraProviderDriver + amphorav2 = octavia.api.drivers.amphora_driver.v2.driver:AmphoraProviderDriver + # octavia is an alias for backward compatibility + octavia = octavia.api.drivers.amphora_driver.v2.driver:AmphoraProviderDriver +octavia.amphora.drivers = + amphora_noop_driver = octavia.amphorae.drivers.noop_driver.driver:NoopAmphoraLoadBalancerDriver + amphora_haproxy_rest_driver = octavia.amphorae.drivers.haproxy.rest_api_driver:HaproxyAmphoraLoadBalancerDriver +octavia.statistics.drivers = + stats_logger = octavia.statistics.drivers.logger:StatsLogger + stats_db = octavia.statistics.drivers.update_db:StatsUpdateDb +octavia.amphora.udp_api_server = + keepalived_lvs = octavia.amphorae.backends.agent.api_server.keepalivedlvs:KeepalivedLvs +octavia.compute.drivers = + compute_noop_driver = octavia.compute.drivers.noop_driver.driver:NoopComputeDriver + compute_nova_driver = octavia.compute.drivers.nova_driver:VirtualMachineManager +octavia.driver_agent.provider_agents = + noop_agent = octavia.api.drivers.noop_driver.agent:noop_provider_agent +octavia.network.drivers = + network_noop_driver = octavia.network.drivers.noop_driver.driver:NoopNetworkDriver + allowed_address_pairs_driver = octavia.network.drivers.neutron.allowed_address_pairs:AllowedAddressPairsDriver +octavia.volume.drivers = + volume_noop_driver = octavia.volume.drivers.noop_driver.driver:NoopVolumeDriver + volume_cinder_driver = octavia.volume.drivers.cinder_driver:VolumeManager +octavia.image.drivers = + image_noop_driver = octavia.image.drivers.noop_driver.driver:NoopImageDriver + image_glance_driver = octavia.image.drivers.glance_driver:ImageManager +octavia.distributor.drivers = + distributor_noop_driver = octavia.distributor.drivers.noop_driver.driver:NoopDistributorDriver + single_VIP_amphora = octavia.distributor.drivers.single_VIP_amphora.driver:SingleVIPAmpDistributorDriver +octavia.cert_generator = + local_cert_generator = octavia.certificates.generator.local:LocalCertGenerator +octavia.cert_manager = + local_cert_manager = octavia.certificates.manager.local:LocalCertManager + barbican_cert_manager = octavia.certificates.manager.barbican:BarbicanCertManager + castellan_cert_manager = octavia.certificates.manager.castellan_mgr:CastellanCertManager + noop_cert_manager = octavia.certificates.manager.noop:NoopCertManager +octavia.barbican_auth = + barbican_acl_auth = octavia.certificates.common.auth.barbican_acl:BarbicanACLAuth +octavia.plugins = + hot_plug_plugin = octavia.controller.worker.v2.controller_worker:ControllerWorker +octavia.worker.jobboard_driver = + redis_taskflow_driver = octavia.controller.worker.v2.taskflow_jobboard_driver:RedisTaskFlowDriver + zookeeper_taskflow_driver = octavia.controller.worker.v2.taskflow_jobboard_driver:ZookeeperTaskFlowDriver + etcd_taskflow_driver = octavia.controller.worker.v2.taskflow_jobboard_driver:EtcdTaskFlowDriver +oslo.config.opts = + octavia = octavia.opts:list_opts +oslo.config.opts.defaults = + octavia = octavia.common.config:set_lib_defaults +oslo.policy.policies = + octavia = octavia.policies:list_rules +oslo.policy.enforcer = + octavia = octavia.common.policy:get_no_context_enforcer +oslo.middleware.healthcheck = + octavia_db_check = octavia.api.healthcheck.healthcheck_plugins:OctaviaDBHealthcheck + +[extras] +# Required in case of AmphoraV2 redis jobboard is used +redis = + redis>=3.4.0 +# Required in case of AmphoraV2 zookeeper jobboard is used +zookeeper = + kazoo>=2.6.0 # Apache-2.0 +# Required by Etcd jobboard +etcd = + etcd3gw>=2.4.1 # Apache-2.0 diff --git a/setup.py b/setup.py new file mode 100644 index 0000000000..cd35c3c35b --- /dev/null +++ b/setup.py @@ -0,0 +1,20 @@ +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import setuptools + +setuptools.setup( + setup_requires=['pbr>=2.0.0'], + pbr=True) diff --git a/specs-tests/__init__.py b/specs-tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/specs-tests/test_titles.py b/specs-tests/test_titles.py new file mode 100644 index 0000000000..64c804c0b1 --- /dev/null +++ b/specs-tests/test_titles.py @@ -0,0 +1,113 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import glob + +import docutils.core +from docutils.parsers import rst +from docutils.parsers.rst import directives +from docutils.parsers.rst import roles +import testtools + + +class FakeDirective(rst.Directive): + has_content = True + + def run(self): + return [] + + +def fake_role(name, rawtext, text, lineno, inliner, + options=None, content=None): + return [], [] + + +directives.register_directive('nwdiag', FakeDirective) +directives.register_directive('actdiag', FakeDirective) +directives.register_directive('graphviz', FakeDirective) +roles.register_local_role('doc', fake_role) + + +class TestTitles(testtools.TestCase): + def _get_title(self, section_tree): + section = { + 'subtitles': [], + } + for node in section_tree: + if node.tagname == 'title': + section['name'] = node.rawsource + elif node.tagname == 'section': + subsection = self._get_title(node) + section['subtitles'].append(subsection['name']) + return section + + def _get_titles(self, spec): + titles = {} + for node in spec: + if node.tagname == 'section': + section = self._get_title(node) + titles[section['name']] = section['subtitles'] + return titles + + def _check_titles(self, titles): + self.assertEqual(7, len(titles)) + + problem = 'Problem description' + self.assertIn(problem, titles) + self.assertEqual(0, len(titles[problem])) + + proposed = 'Proposed change' + self.assertIn(proposed, titles) + self.assertIn('Alternatives', titles[proposed]) + self.assertIn('Data model impact', titles[proposed]) + self.assertIn('REST API impact', titles[proposed]) + self.assertIn('Security impact', titles[proposed]) + self.assertIn('Notifications impact', titles[proposed]) + self.assertIn('Other end user impact', titles[proposed]) + self.assertIn('Performance Impact', titles[proposed]) + self.assertIn('Other deployer impact', titles[proposed]) + self.assertIn('Developer impact', titles[proposed]) + + impl = 'Implementation' + self.assertIn(impl, titles) + self.assertEqual(2, len(titles[impl])) + self.assertIn('Assignee(s)', titles[impl]) + self.assertIn('Work Items', titles[impl]) + + deps = 'Dependencies' + self.assertIn(deps, titles) + self.assertEqual(0, len(titles[deps])) + + testing = 'Testing' + self.assertIn(testing, titles) + self.assertEqual(0, len(titles[testing])) + + docs = 'Documentation Impact' + self.assertIn(docs, titles) + self.assertEqual(0, len(titles[docs])) + + refs = 'References' + self.assertIn(refs, titles) + self.assertEqual(0, len(titles[refs])) + + def test_template(self): + files = set(glob.glob('specs/*.rst') + glob.glob('specs/*/*')) + files = files - set(glob.glob('specs/*/*.dot')) + files = files - set(glob.glob('specs/*/*.diag')) + for filename in files: + self.assertTrue(filename.endswith(".rst"), + "spec's file must use 'rst' extension.") + with open(filename) as f: + data = f.read() + spec = docutils.core.publish_doctree(data) + titles = self._get_titles(spec) + self._check_titles(titles) diff --git a/specs/example.dot b/specs/example.dot new file mode 100644 index 0000000000..6eabe0d2f7 --- /dev/null +++ b/specs/example.dot @@ -0,0 +1,34 @@ +/* This work is licensed under a Creative Commons Attribution 3.0 + * Unported License. + * + * http://creativecommons.org/licenses/by/3.0/legalcode + */ +digraph G { + label="Sample Graph" + + subgraph cluster_0 { + style=filled; + color=lightgrey; + node [style=filled,color=white]; + a0 -> a1 -> a2 -> a3; + label = "process #1"; + } + + subgraph cluster_1 { + node [style=filled]; + b0 -> b1 -> b2 -> b3; + label = "process #2"; + color=blue + } + + start -> a0; + start -> b0; + a1 -> b3; + b2 -> a3; + a3 -> a0; + a3 -> end; + b3 -> end; + + start [shape=Mdiamond]; + end [shape=Msquare]; +} diff --git a/specs/skeleton.rst b/specs/skeleton.rst new file mode 100644 index 0000000000..ba77613de8 --- /dev/null +++ b/specs/skeleton.rst @@ -0,0 +1,82 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +========================================== +Title of your blueprint +========================================== + + +Problem description +=================== + + +Proposed change +=============== + + +Alternatives +------------ + + +Data model impact +----------------- + + +REST API impact +--------------- + + +Security impact +--------------- + + +Notifications impact +-------------------- + + +Other end user impact +--------------------- + + +Performance Impact +------------------ + + +Other deployer impact +--------------------- + + +Developer impact +---------------- + + +Implementation +============== + +Assignee(s) +----------- + + +Work Items +---------- + + +Dependencies +============ + + +Testing +======= + + +Documentation Impact +==================== + + +References +========== + + diff --git a/specs/template.rst b/specs/template.rst new file mode 100644 index 0000000000..cff7a9f4ad --- /dev/null +++ b/specs/template.rst @@ -0,0 +1,376 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +========================================== +Example Spec - The title of your blueprint +========================================== + +Include the URL of your launchpad blueprint: + +https://blueprints.launchpad.net/octavia/+spec/example + +Introduction paragraph -- why are we doing anything? A single paragraph of +prose that operators can understand. + +Some notes about using this template: + +* Your spec should be in ReSTructured text, like this template. + +* Please wrap text at 80 columns. + +* The filename in the git repository should match the launchpad URL, for + example a URL of: https://blueprints.launchpad.net/octavia/+spec/awesome-thing + should be named awesome-thing.rst + +* Please do not delete any of the sections in this template. If you have + nothing to say for a whole section, just write: None + +* For help with syntax, see http://sphinx-doc.org/rest.html + +* To test out your formatting, build the docs using tox, or see: + http://rst.ninjs.org + +* If you would like to provide a diagram with your spec, text representations + are preferred. http://asciiflow.com/ is a very nice tool to assist with + making ascii diagrams. These are described below. + For more complicated diagrams that need "real" graphics, yet still should + be in the git revision control system, GraphViz .dot files are acceptable. + If you require an image (screenshot) for your BP, attaching that to the BP + and checking it in is also accepted. However, text representations are + preferred. + +* Diagram examples + +asciiflow:: + + +----------+ +-----------+ +----------+ + | A | | B | | C | + | +-----+ +--------+ | + +----------+ +-----------+ +----------+ + +graphviz (external file) + +.. graphviz:: example.dot + + +Problem description +=================== + +A detailed description of the problem: + +* For a new feature this might be use cases. Ensure you are clear about the + actors in each use case: End User vs Deployer + +* For a major reworking of something existing it would describe the + problems in that feature that are being addressed. + + +Proposed change +=============== + +Here is where you cover the change you propose to make in detail. How do you +propose to solve this problem? + +If this is one part of a larger effort make it clear where this piece ends. In +other words, what's the scope of this effort? + +Alternatives +------------ + +What other ways could we do this thing? Why aren't we using those? This doesn't +have to be a full literature review, but it should demonstrate that thought has +been put into why the proposed solution is an appropriate one. + +Data model impact +----------------- + +Changes which require modifications to the data model often have a wider impact +on the system. The community often has strong opinions on how the data model +should be evolved, from both a functional and performance perspective. It is +therefore important to capture and gain agreement as early as possible on any +proposed changes to the data model. + +Questions which need to be addressed by this section include: + +* What new data objects and/or database schema changes is this going to + require? + +* What database migrations will accompany this change. + +* How will the initial set of new data objects be generated, for example if you + need to take into account existing instances, or modify other existing data + describe how that will work. + +REST API impact +--------------- + +Octavia includes several internal APIs (all of which should be versioned). +In defining how API(s) are affected by this change, make sure to +clearly indicate which API(s) specifically are being altered, which version +of the API(s) are being altered, and other pertinent details as described +below. + +While we are not using Neutron's attribute map facility since Octavia is +not Neutron, following the tried-and-true guidelines Neutron uses around +API changes is a good idea, including defining attribute map tables. For +reference: + +For each API resource to be implemented using Neutron's attribute map +facility (see the neutron.api.v2.attributes), describe the resource +collection and specify the name, type, and other essential details of +each new or modified attribute. A table similar to the following may +be used: + ++----------+-------+---------+---------+------------+--------------+ +|Attribute |Type |Access |Default |Validation/ |Description | +|Name | | |Value |Conversion | | ++==========+=======+=========+=========+============+==============+ +|id |string |RO, all |generated|N/A |identity | +| |(UUID) | | | | | ++----------+-------+---------+---------+------------+--------------+ +|name |string |RW, all |'' |string |human-readable| +| | | | | |name | ++----------+-------+---------+---------+------------+--------------+ +|color |string |RW, admin|'red' |'red', |color | +| | | | |'yellow', or|indicating | +| | | | |'green' |state | ++----------+-------+---------+---------+------------+--------------+ + + +Here is the other example of the table using csv-table + + +.. csv-table:: CSVTable + :header: Attribute Name,Type,Access,Default Value,Validation Conversion,Description + + id,string (UUID),"RO, all",generated,N/A,identity + name,string,"RW, all","''",string,human-readable name + color,string,"RW, admin",red,"'red', 'yellow' or 'green'",color indicating state + + +Each API method which is either added or changed that does not use +Neutron's attribute map facility should have the following: + +* Specification for the method + + * A description of what the method does suitable for use in + user documentation + + * Method type (POST/PUT/GET/DELETE) + + * Normal http response code(s) + + * Expected error http response code(s) + + * A description for each possible error code should be included + describing semantic errors which can cause it such as + inconsistent parameters supplied to the method, or when an + instance is not in an appropriate state for the request to + succeed. Errors caused by syntactic problems covered by the JSON + schema definition do not need to be included. + + * URL for the resource + + * Parameters which can be passed via the url + + * JSON schema definition for the body data if allowed + + * JSON schema definition for the response data if any + +* Example use case including typical API samples for both data supplied + by the caller and the response + +* Discuss any API policy changes, and discuss what things a deployer needs to + think about when defining their API policy. This is in reference to the + policy.yaml file. + +Note that the schema should be defined as restrictively as +possible. Parameters which are required should be marked as such and +only under exceptional circumstances should additional parameters +which are not defined in the schema be permitted (eg +additionalProperties should be False). + +Reuse of existing predefined parameter types such as regexps for +passwords and user defined names is highly encouraged. + +Security impact +--------------- + +Describe any potential security impact on the system. Some of the items to +consider include: + +* Does this change touch sensitive data such as tokens, keys, or user data? + +* Does this change alter the API in a way that may impact security, such as + a new way to access sensitive information or a new way to login? + +* Does this change involve cryptography or hashing? + +* Does this change require the use of sudo or any elevated privileges? + +* Does this change involve using or parsing user-provided data? This could + be directly at the API level or indirectly such as changes to a cache layer. + +* Can this change enable a resource exhaustion attack, such as allowing a + single API interaction to consume significant server resources? Some examples + of this include launching subprocesses for each connection, or entity + expansion attacks in XML. + +For more detailed guidance, please see the OpenStack Security Guidelines as +a reference (https://wiki.openstack.org/wiki/Security/Guidelines). These +guidelines are a work in progress and are designed to help you identify +security best practices. For further information, feel free to reach out +to the OpenStack Security Group at openstack-security@lists.openstack.org. + +Notifications impact +-------------------- + +Please specify any changes to notifications. Be that an extra notification, +changes to an existing notification, or removing a notification. + +Other end user impact +--------------------- + +Aside from the API, are there other ways a user will interact with this +feature? Keep in mind that 'user' in this context could mean either tenant or +operator. + +* Does this change have an impact on openstacksdk? What does the user + interface there look like? + +Performance Impact +------------------ + +Describe any potential performance impact on the system, for example +how often will new code be called, and is there a major change to the calling +pattern of existing code. + +Examples of things to consider here include: + +* A periodic task might look like a small addition but if it calls conductor or + another service the load is multiplied by the number of nodes in the system. + +* A small change in a utility function or a commonly used decorator can have a + large impacts on performance. + +* Calls which result in a database queries (whether direct or via conductor) + can have a profound impact on performance when called in critical sections + of the code. + +* Will the change include any locking, and if so what considerations are there + on holding the lock? + +Other deployer impact +--------------------- + +Discuss things that will affect how you deploy and configure OpenStack +that have not already been mentioned, such as: + +* What config options are being added? Should they be more generic than + proposed (for example a flag that other hypervisor drivers might want to + implement as well)? Are the default values ones which will work well in + real deployments? + +* Is this a change that takes immediate effect after its merged, or is it + something that has to be explicitly enabled? + +* If this change is a new binary, how would it be deployed? + +* Please state anything that those doing continuous deployment, or those + upgrading from the previous release, need to be aware of. Also describe + any plans to deprecate configuration values or features. For example, if we + change the directory name that instances are stored in, how do we handle + instance directories created before the change landed? Do we move them? Do + we have a special case in the code? Do we assume that the operator will + recreate all the instances in their cloud? + +Developer impact +---------------- + +Discuss things that will affect other developers working on OpenStack, +such as: + +* If the blueprint proposes a change to the API, discussion of how other + plugins would implement the feature is required. + + +Implementation +============== + +Assignee(s) +----------- + +Who is leading the writing of the code? Or is this a blueprint where you're +throwing it out there to see who picks it up? + +If more than one person is working on the implementation, please designate the +primary author and contact. + +Primary assignee: + + +Other contributors: + + +Work Items +---------- + +Work items or tasks -- break the feature up into the things that need to be +done to implement it. Those parts might end up being done by different people, +but we're mostly trying to understand the timeline for implementation. + + +Dependencies +============ + +* Include specific references to specs and/or blueprints in octavia, or in + other projects, that this one either depends on or is related to. + +* If this requires functionality of another project that is not currently used + by Octavia document that fact. + +* Does this feature require any new library dependencies or code otherwise not + included in OpenStack? Or does it depend on a specific version of library? + + +Testing +======= + +Please discuss how the change will be tested. We especially want to know what +tempest tests will be added. It is assumed that unit test coverage will be +added so that doesn't need to be mentioned explicitly, but discussion of why +you think unit tests are sufficient and we don't need to add more tempest +tests would need to be included. + +Is this untestable in gate given current limitations (specific hardware / +software configurations available)? If so, are there mitigation plans (3rd +party testing, gate enhancements, etc). + + +Documentation Impact +==================== + +What is the impact on the docs team of this change? Some changes might require +donating resources to the docs team to have the documentation updated. Don't +repeat details discussed above, but please reference them here. + + +References +========== + +Please add any useful references here. You are not required to have any +reference. Moreover, this specification should still make sense when your +references are unavailable. Examples of what you could include are: + +* Links to mailing list or IRC discussions + +* Links to notes from a summit session + +* Links to relevant research, if appropriate + +* Related specifications as appropriate (e.g. link any vendor documentation) + +* Anything else you feel it is worthwhile to refer to diff --git a/specs/version0.5/amphora-driver-interface.rst b/specs/version0.5/amphora-driver-interface.rst new file mode 100644 index 0000000000..04704afdee --- /dev/null +++ b/specs/version0.5/amphora-driver-interface.rst @@ -0,0 +1,287 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +========================================== +Amphora Driver Interface +========================================== +https://blueprints.launchpad.net/octavia/+spec/amphora-driver-interface + +This blueprint describes how a driver will interface with the controller. +It will describe the base class and other classes required. It will not +describe the REST interface needed to talk to an amphora nor +how health information or statistics are gathered from the amphora. + + +Problem description +=================== +The controller needs to talk through a driver to the amphora to allow +for custom APIs and custom rendering of configuration data for +different amphora implementations. + +The controller will heavily utilize taskflow [2] to accomplish its goals +so it is highly encouraged for drivers to use taskflow to organize their +work, too. + + +Proposed change +=============== +Establish a base class to model the desire functionality: + +.. code:: python + + class AmphoraLoadBalancerDriver(object): + + def update(self, listener, vip): + """updates the amphora with a new configuration + + for the listener on the vip. + """ + raise NotImplementedError + + def stop(self, listener, vip): + """stops the listener on the vip.""" + return None + + def start(self, listener, vip): + """starts the listener on the vip.""" + return None + + def delete(self, listener, vip): + """deletes the listener on the vip.""" + raise NotImplementedError + + def get_info(self, amphora): + """Get detailed information about an amphora + + returns information about the amphora, e.g. {"Rest Interface": + "1.0", "Amphorae": "1.0", "packages":{"ha proxy":"1.5"}, + "network-interfaces": {"eth0":{"ip":...}} some information might + come from querying the amphora + """ + raise NotImplementedError + + def get_diagnostics(self, amphora): + """OPTIONAL - Run diagnostics + + run some expensive self tests to determine if the amphora and the + lbs are healthy the idea is that those tests are triggered more + infrequent than the heartbeat + """ + raise NotImplementedError + + def finalize_amphora(self, amphora): + """OPTIONAL - called once an amphora has been build but before + + any listeners are configured. This is a hook for drivers who need + to do additional work before am amphora becomes ready to accept + listeners. Please keep in mind that amphora might be kept in am + offline pool after this call. + """ + pass + + def post_network_plug(self, amphora, port): + """OPTIONAL - called after adding a compute instance to a network. + + This will perform any necessary actions to allow for connectivity + for that network on that instance. + + port is an instance of octavia.network.data_models.Port. It + contains information about the port, subnet, and network that + was just plugged. + """ + + def post_vip_plug(self, load_balancer, amphorae_network_config): + """OPTIONAL - called after plug_vip method of the network driver. + + This is to do any additional work needed on the amphorae to plug + the vip, such as bring up interfaces. + + amphorae_network_config is a dictionary of objects that include + network specific information about each amphora's connections. + """ + + def start_health_check(self, health_mixin): + """start check health + + :param health_mixin: health mixin object + :type amphora: object + + Start listener process and calls HealthMixin to update + databases information. + """ + pass + + def stop_health_check(self): + """stop check health + + Stop listener process and calls HealthMixin to update + databases information. + """ + pass + +The referenced listener is a listener object and vip a vip as described +in our model. The model is detached from the DB so the driver can't write +to the DB. Because our initial goal is to render a whole config no special +methods for adding nodes, health monitors, etc. are supported at this +juncture. This might be added in later versions. + +No method for obtaining logs has been added. This will be done in a +future blueprint. + + +Exception Model +--------------- + +The driver is expected to raise the following well defined exceptions + +* NotImplementedError - this functionality is not implemented/not supported +* AmphoraDriverError - a super class for all other exceptions and the catch + all if no specific exception can be found + + * NotFoundError - this amphora couldn't be found/ was deleted by nova + * InfoException - gathering information about this amphora failed + * NetworkConfigException - gathering network information failed + * UnauthorizedException - the driver can't access the amphora + * TimeOutException - contacting the amphora timed out + * UnavailableException - the amphora is temporary unavailable + * SuspendFaied - this load balancer couldn't be suspended + * EnableFailed - this load balancer couldn't be enabled + * DeleteFailed - this load balancer couldn't be deleted + * ProvisioningErrors - those are errors which happen during provisioning + + * ListenerProvisioningError - could not provision Listener + * LoadBalancerProvisoningError - could not provision LoadBalancer + * HealthMonitorProvisioningError - could not provision HealthMonitor + * NodeProvisioningError - could not provision Node + + + + +Health and Stat Mixin +--------------------- +It has been suggested to gather health and statistic information +via UDP packets emitted from the amphora. This requires +each driver +to spin up a thread to listen on a UDP port and then hand the +information to the controller as a mixin to make sense of +it. + +Here is the mixin definition: + +.. code:: python + + class HealthMixIn(object): + def update_health(health): + #map: {"amphora-status":HEALTHY, loadbalancers: {"loadbalancer-id": {"loadbalancer-status": HEALTHY, + # "listeners":{"listener-id":{"listener-status":HEALTHY, "nodes":{"node-id":HEALTHY, ...}}, ...}, ...}} + # only items whose health has changed need to be submitted + # awesome update code + pass + + class StatsMixIn(object): + def update_stats(stats): + #uses map {"loadbalancer-id":{"listener-id": {"bytes-in": 123, "bytes_out":123, "active_connections":123, + # "total_connections", 123}, ...} + # elements are named to keep it extensible for future versions + #awesome update code and code to send to ceilometer + pass + +Things a good driver should do: +------------------------------- + + * Non blocking IO - throw an appropriate exception instead + to wait forever; use timeouts on sockets + * We might employ a circuit breaker to insulate driver + problems from controller problems [1] + * Use appropriate logging + * Use the preferred threading model + +This will be demonstrated in the Noop-driver code. + + +Alternatives +------------ +Require all amphora to implement a common REST interface +and use that as the integration point. + + +Data model impact +----------------- +None + + +REST API impact +--------------- +None + + +Security impact +--------------- +None + + +Notifications impact +-------------------- +None - since initial version + + +Other end user impact +--------------------- +None + + +Performance Impact +------------------ +Minimal + + +Other deployer impact +--------------------- +Deployers need to make sure to bundle the compatible +versions of amphora, driver, controller -- + + +Developer impact +---------------- +Need to write towards this clean interface. + + +Implementation +============== + +Assignee(s) +----------- +German Eichberger + +Work Items +---------- +* Write abstract interface +* Write Noop driver +* Write tests + + +Dependencies +============ +None + + +Testing +======= +* Unit tests with tox and Noop-Driver +* tempest tests with Noop-Driver + + +Documentation Impact +==================== +None - we won't document the interface for 0.5. If that changes +we need to write an interface documentation so +3rd party drivers know what we expect. + + +References +========== +[1] https://martinfowler.com/bliki/CircuitBreaker.html +[2] https://docs.openstack.org/taskflow/latest/ diff --git a/specs/version0.5/amphora-manager-interface.rst b/specs/version0.5/amphora-manager-interface.rst new file mode 100644 index 0000000000..9f21a4e4e4 --- /dev/null +++ b/specs/version0.5/amphora-manager-interface.rst @@ -0,0 +1,206 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +========================================== +Compute Driver Interface +========================================== +https://blueprints.launchpad.net/octavia/+spec/compute-driver-interface + +This blueprint describes how a driver will interface with Nova to +manage the creation and deletion of amphora instances. It will +describe the base class and other classes required to create, delete, +manage the execution state, and query the status of amphorae. + +Problem description +=================== +The controller needs to be able to create, delete, and monitor the +status of amphora instances. The amphorae may be virtual machines, +containers, bare-metal servers, or dedicated hardware load balancers. +This interface should hide the implementation details of the amphorae +from the caller to the maximum extent possible. + +This interface will provide means to specify: + - type (VM, Container, bare metal) + - flavor (provides means to specify memory and storage capacity) + - what else? + +Proposed change +=============== +Establish an abstract base class to model the desired functionality: + +.. code:: python + + class AmphoraComputeDriver(object): + + def build(self, amphora_type = VM, amphora_flavor = None, + image_id = None, keys = None, sec_groups = None, + network_ids = None,config_drive_files = None,user_data=None): + + """ build a new amphora. + + :param amphora_type: The type of amphora to create. For + version 0.5, only VM is supported. In the future this + may support Container, BareMetal, and HWLoadBalancer. + :param amphora_flavor: Optionally specify a flavor. The + interpretation of this parameter will depend upon the + amphora type and may not be applicable to all types. + :param image_id: ID of the base image for a VM amphora + :param keys: Optionally specify a list of ssh public keys + :param sec_groups: Optionally specify list of security + groups + :param network_ids: A list of network_ids to attach to + the amphora + :config_drive_files: A dict of files to overwrite on + the server upon boot. Keys are file names (i.e. /etc/passwd) + and values are the file contents (either as a string or as + a file-like object). A maximum of five entries is allowed, + and each file must be 10k or less. + :param user_data: user data to pass to be exposed by the + metadata server this can be a file type object as well or + a string + + :returns: The id of the new instance. + + """ + + raise NotImplementedError + + def delete(self, amphora_id): + """ delete the specified amphora + """ + + raise NotImplementedError + + def status(self, amphora_id): + + """ Check whether the specified amphora is up + + :param amphora_id: the ID of the desired amphora + :returns: the nova response from the amphora + """ + raise NotImplementedError + + def get_amphora(self, amphora_name = None, amphora_id = None): + """ Try to find an amphora given its name or id + + :param amphora_name: the name of the desired amphora + :param amphora_id: the id of the desired amphora + :returns: the amphora object + """ + raise NotImplementedError + +Exception Model +--------------- + +The driver is expected to raise the following well defined exceptions: + +* NotImplementedError - this functionality is not implemented/not supported +* AmphoraComputeError - a super class for all other exceptions and the catch + all if no specific exception can be found + + * AmphoraBuildError - An amphora of the specified type could + not be built + * DeleteFailed - this amphora couldn't be deleted + +* InstanceNotFoundError - an instance matching the desired criteria + could not be found +* NotSuspendedError - resume() attempted on an instance that was not suspended + + + +Things a good driver should do: +------------------------------- + + * Non blocking operations - If an operation will take a long time to execute, + perform it asynchronously. The definition of "a long time" is open to + interpretation, but a common UX guideline is 200 ms + * We might employ a circuit breaker to insulate driver + problems from controller problems [1] + * Use appropriate logging + * Use the preferred threading model + +This will be demonstrated in the Noop-driver code. + + +Alternatives +------------ + + +Data model impact +----------------- +None + + +REST API impact +--------------- +None + + +Security impact +--------------- +None + + +Notifications impact +-------------------- +None - since initial version + + +Other end user impact +--------------------- +None + + +Performance Impact +------------------ +Minimal + + +Other deployer impact +--------------------- +Deployers need to make sure to bundle the compatible +versions of amphora, driver, controller -- + + +Developer impact +---------------- +Need to write towards this clean interface. + + +Implementation +============== + +Assignee(s) +----------- +Al Miller + +Work Items +---------- +* Write abstract interface +* Write Noop driver +* Write tests + + +Dependencies +============ +None + + +Testing +======= +* Unit tests with tox and Noop-Driver +* tempest tests with Noop-Driver + + +Documentation Impact +==================== +None - this is an internal interface and need not be externally +documented. + + +References +========== +[1] http://martinfowler.com/bliki/CircuitBreaker.html diff --git a/specs/version0.5/base-image.rst b/specs/version0.5/base-image.rst new file mode 100644 index 0000000000..017f9d63c8 --- /dev/null +++ b/specs/version0.5/base-image.rst @@ -0,0 +1,181 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +========================================== +Octavia Base Image +========================================== + +Launchpad blueprint: + +https://blueprints.launchpad.net/octavia/+spec/base-image + +Octavia is an operator-grade reference implementation for Load Balancing as a +Service (LBaaS) for OpenStack. The component of Octavia that does the load +balancing is known as amphora. Amphora may be a virtual machine, may be a +container, or may run on bare metal. Creating images for bare metal amphora +installs is outside the scope of this 0.5 specification but may be added in a +future release. + +Amphora will need a base image that can be deployed by Octavia to provide load +balancing. + + +Problem description +=================== + +Octavia needs a method for generating base images to be deployed as load +balancing entities. + +Proposed change +=============== + +Leverage the OpenStack diskimage-builder project [1] tools to provide a script +that builds qcow2 images or a tar file suitable for use in creating containers. +This script will be modeled after the OpenStack Sahara [2] project's +diskimage-create.sh script. + +This script and associated elements will build Amphora images. Initial support +with be with an Ubuntu OS and HAProxy. The script will be able to use Fedora +or CentOS as a base OS but these will not initially be tested or supported. +As the project progresses and/or the diskimage-builder project adds support +for additional base OS options they may become available for Amphora images. +This does not mean that they are necessarily supported or tested. + +The script will use environment variables to customize the build beyond the +Octavia project defaults, such as adding elements. + +The initial supported and tested image will be created using the +diskimage-create.sh defaults (no command line parameters or environment +variables set). As the project progresses we may add additional supported +configurations. + +Command syntax: + +.. line-block:: + + $ diskimage-create.sh + [-a i386 | **amd64** | armhf ] + [-b **haproxy** ] + [-c **~/.cache/image-create** | ] + [-h] + [-i **ubuntu** | fedora | centos ] + [-o **amphora-x64-haproxy** | ] + [-r ] + [-s **5** | ] + [-t **qcow2** | tar ] + [-w ] + '-a' is the architecture type for the image (default: amd64) + '-b' is the backend type (default: haproxy) + '-c' is the path to the cache directory (default: ~/.cache/image-create) + '-h' display help message + '-i' is the base OS (default: ubuntu) + '-o' is the output image file name + '-r' enable the root account in the generated image (default: disabled) + '-s' is the image size to produce in gigabytes (default: 5) + '-t' is the image type (default: qcow2) + '-w' working directory for image building (default: .) + + +.. line-block:: + + Environment variables supported by the script: + DIB_DISTRIBUTION_MIRROR - URL to a mirror for the base OS selected (-i). + DIB_REPO_PATH - Path to the diskimage-builder repository (default: ../../diskimage-builder) + ELEMENTS_REPO_PATH - Path to the /tripleo-image-elements repository (default: ../../tripleo-image-elements) + DIB_ELEMENTS - Override the elements used to build the image + DIB_LOCAL_ELEMENTS - Elements to add to the build (requires DIB_LOCAL_ELEMENTS_PATH be specified) + DIB_LOCAL_ELEMENTS_PATH - Path to the local elements directory + +.. topic:: Container support + + The Docker command line required to import a tar file created with this script is [3]: + +.. code:: bash + + $ docker import - image:amphora-x64-haproxy < amphora-x64-haproxy.tar + +Alternatives +------------ + +Deployers can manually create an image or container, but they would need to +make sure the required components are included. + +Data model impact +----------------- +None + +REST API impact +--------------- +None + +Security impact +--------------- +None + +Notifications impact +-------------------- +None + +Other end user impact +--------------------- +None + +Performance Impact +------------------ +None + +Other deployer impact +--------------------- +This script will make creating an Octavia Amphora image or container simple. + +Developer impact +---------------- +None + +Implementation +============== + +Assignee(s) +----------- +Michael Johnson + +Work Items +---------- +1. Write diskimage-create.sh script based on Sahara project's script. + +2. Identify the list of packages required for Octavia Amphora. + +3. Create required elements not provided by the diskimage-builder project. + +4. Create unit tests + +Dependencies +============ + +This script will depend on the OpenStack diskimage-builder project. + +Testing +======= + +Initial testing will be completed using the default settings for the +diskimage-create.sh tool. + +* Unit tests with tox + * Validate that the image is the correct size and mounts via loopback + * Check that a valid kernel is installed + * Check that HAProxy and all required packages are installed +* tempest tests + +Documentation Impact +==================== + + +References +========== +.. line-block:: + [1] https://github.com/openstack/diskimage-builder + [2] https://github.com/openstack/sahara-image-elements + [3] https://github.com/openstack/diskimage-builder/blob/master/docs/docker.md diff --git a/specs/version0.5/component-design.rst b/specs/version0.5/component-design.rst new file mode 100644 index 0000000000..d44d3c0596 --- /dev/null +++ b/specs/version0.5/component-design.rst @@ -0,0 +1,110 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +============================================= +Octavia v0.5 master component design document +============================================= + + +Problem description +=================== +We need to define the various components that will make up Octavia v0.5. + +Proposed change +=============== +This is the first functional release of Octavia, incorporating a scalable +service delivery layer, but not yet concerned with a scalable command and +control layer. + +See doc/source/design/version0.5 for a detailed description of the v0.5 +component design. + +Alternatives +------------ +We're open to suggestions, but note that later designs already discussed on the +mailing list will incorporate several features of this design. + +Data model impact +----------------- +Octavia 0.5 introduces the main data model which will also be used in +subsequent releases. + + +REST API impact +--------------- +None + + +Security impact +--------------- +The only sensitive data used in Octavia 0.5 are the TLS private keys used with +TERMINATED_HTTPS functionality. However, the back-end storage aspect of these +secrets will be handled by Barbican. + +Octavia amphorae will also need to keep copies of these secrets locally in +order to facilitate seamless service restarts. These local stores should be +made on a memory filesystem. + + +Notifications impact +-------------------- +None + + +Other end user impact +--------------------- +None + + +Performance Impact +------------------ +None + + +Other deployer impact +--------------------- +Operator API and UI may need to be changed as a result of this specification. + + +Developer impact +---------------- +None beyond implementing the spec. :) + + +Implementation +============== + +Assignee(s) +----------- +Lots of us will be working on this! + + +Work Items +---------- +Again, lots of things to be done here. + + +Dependencies +============ +Barbican + + +Testing +======= +A lot of new tests will need to be written to test the separate components, +their interfaces, and likely failure scenarios. + + +Documentation Impact +==================== +This specification largely defines the documentation of the component design. + +Component design is becoming a part of the project standard documentation. + + +References +========== +Mailing list discussion of similar designs earlier this year diff --git a/specs/version0.5/controller-worker.rst b/specs/version0.5/controller-worker.rst new file mode 100644 index 0000000000..ab063eb6e3 --- /dev/null +++ b/specs/version0.5/controller-worker.rst @@ -0,0 +1,285 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +================================== +Controller Worker (deploy-worker) +================================== + +Launchpad blueprint: + +https://blueprints.launchpad.net/octavia/+spec/controller-worker + +Octavia is an operator-grade reference implementation for Load Balancing as a +Service (LBaaS) for OpenStack. The component of Octavia that does the load +balancing is known as Amphora. + +The component of Octavia that provides command and control of the Amphora is +the Octavia controller. + +Problem description +=================== + +Components of the Octavia controller require a shared library that provides +the orchestration of create/update/delete actions for Octavia objects such as +load balancers and listeners. + +It is expected that this library will be used by the Queue Consumer to service +API requests, by the Housekeeping Manager to manage the spare Amphora pool, +and by the Health Manager to fail over failed objects. + +Proposed change +=============== + +The Controller Worker will be implemented as a class that provides methods to +facilitate the create/update/delete actions. This class will be responsible +for managing the number of simultaneous operations being executed by +coordinating through the Octavia database. + +The Controller Worker will provide a base class that sets up and initializes +the TaskFlow engines required to complete the action. Users of the library +will then call the appropriate method for the action. These methods setup +and launch the appropriate flow. Each flow will be contained in a separate +class for code reuse and supportability. + +The Controller Worker library will provide the following methods: + +.. code:: python + + def create_amphora(self): + """Creates an Amphora. + + :returns: amphora_id + """ + raise NotImplementedError + + def delete_amphora(self, amphora_id): + """Deletes an existing Amphora. + + :param amphora_id: ID of the amphora to delete + :returns: None + :raises AmphoraNotFound: The referenced Amphora was not found + """ + raise NotImplementedError + + def create_load_balancer(self, load_balancer_id): + """Creates a load balancer by allocating Amphorae. + + :param load_balancer_id: ID of the load balancer to create + :returns: None + :raises NoSuitableAmphora: Unable to allocate an Amphora. + """ + raise NotImplementedError + + def update_load_balancer(self, load_balancer_id, load_balancer_updates): + """Updates a load balancer. + + :param load_balancer_id: ID of the load balancer to update + :param load_balancer_updates: Dict containing updated load balancer + attributes + :returns: None + :raises LBNotFound: The referenced load balancer was not found + """ + raise NotImplementedError + + def delete_load_balancer(self, load_balancer_id): + """Deletes a load balancer by de-allocating Amphorae. + + :param load_balancer_id: ID of the load balancer to delete + :returns: None + :raises LBNotFound: The referenced load balancer was not found + """ + raise NotImplementedError + + def create_listener(self, listener_id): + """Creates a listener. + + :param listener_id: ID of the listener to create + :returns: None + :raises NoSuitableLB: Unable to find the load balancer + """ + raise NotImplementedError + + def update_listener(self, listener_id, listener_updates): + """Updates a listener. + + :param listener_id: ID of the listener to update + :param listener_updates: Dict containing updated listener attributes + :returns: None + :raises ListenerNotFound: The referenced listener was not found + """ + raise NotImplementedError + + def delete_listener(self, listener_id): + """Deletes a listener. + + :param listener_id: ID of the listener to delete + :returns: None + :raises ListenerNotFound: The referenced listener was not found + """ + raise NotImplementedError + + def create_pool(self, pool_id): + """Creates a node pool. + + :param pool_id: ID of the pool to create + :returns: None + :raises NoSuitableLB: Unable to find the load balancer + """ + raise NotImplementedError + + def update_pool(self, pool_id, pool_updates): + """Updates a node pool. + + :param pool_id: ID of the pool to update + :param pool_updates: Dict containing updated pool attributes + :returns: None + :raises PoolNotFound: The referenced pool was not found + """ + raise NotImplementedError + + def delete_pool(self, pool_id): + """Deletes a node pool. + + :param pool_id: ID of the pool to delete + :returns: None + :raises PoolNotFound: The referenced pool was not found + """ + raise NotImplementedError + + def create_health_monitor(self, health_monitor_id): + """Creates a health monitor. + + :param health_monitor_id: ID of the health monitor to create + :returns: None + :raises NoSuitablePool: Unable to find the node pool + """ + raise NotImplementedError + + def update_health_monitor(self, health_monitor_id, health_monitor_updates): + """Updates a health monitor. + + :param health_monitor_id: ID of the health monitor to update + :param health_monitor_updates: Dict containing updated health monitor + attributes + :returns: None + :raises HMNotFound: The referenced health monitor was not found + """ + raise NotImplementedError + + def delete_health_monitor(self, health_monitor_id): + """Deletes a health monitor. + + :param health_monitor_id: ID of the health monitor to delete + :returns: None + :raises HMNotFound: The referenced health monitor was not found + """ + raise NotImplementedError + + def create_member(self, member_id): + """Creates a pool member. + + :param member_id: ID of the member to create + :returns: None + :raises NoSuitablePool: Unable to find the node pool + """ + raise NotImplementedError + + def update_member(self, member_id, member_updates): + """Updates a pool member. + + :param member_id: ID of the member to update + :param member_updates: Dict containing updated member attributes + :returns: None + :raises MemberNotFound: The referenced member was not found + """ + raise NotImplementedError + + def delete_member(self, member_id): + """Deletes a pool member. + + :param member_id: ID of the member to delete + :returns: None + :raises MemberNotFound: The referenced member was not found + """ + raise NotImplementedError + + def failover_amphora(self, amphora_id): + """Failover an amphora + + :param amp_id: ID of the amphora to fail over + :returns: None + :raises AmphoraNotFound: The referenced Amphora was not found + """ + raise NotImplementedError + +Alternatives +------------ +This code could be included in the Queue Consumer component of the controller. +However this would not allow the library to be shared with other components of +the controller, such as the Health Manager + +Data model impact +----------------- + + +REST API impact +--------------- +None + +Security impact +--------------- + + +Notifications impact +-------------------- + + +Other end user impact +--------------------- + + +Performance Impact +------------------ + + +Other deployer impact +--------------------- + + +Developer impact +---------------- + + +Implementation +============== + +Assignee(s) +----------- +Michael Johnson + +Work Items +---------- + + +Dependencies +============ +https://blueprints.launchpad.net/octavia/+spec/amphora-driver-interface +https://blueprints.launchpad.net/octavia/+spec/neutron-network-driver +https://blueprints.launchpad.net/octavia/+spec/nova-compute-driver + +Testing +======= +Unit tests + +Documentation Impact +==================== +None + +References +========== +https://blueprints.launchpad.net/octavia/+spec/health-manager +https://blueprints.launchpad.net/octavia/+spec/housekeeping-manager +https://blueprints.launchpad.net/octavia/+spec/queue-consumer diff --git a/specs/version0.5/controller.dot b/specs/version0.5/controller.dot new file mode 100644 index 0000000000..576fd34bbc --- /dev/null +++ b/specs/version0.5/controller.dot @@ -0,0 +1,82 @@ +/* + * Copyright 2014 Hewlett-Packard Development Company, L.P. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. You may obtain + * a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. +*/ +digraph G { + + + subgraph cluster0 { + style=filled; + color=gray75; + + + label = "Controller"; + + + queue [label="Queue\nConsumer", fontcolor=white, color=forestgreen, style=filled]; + health [label="Health\nManager", fontcolor=white, color=forestgreen, style=filled]; + house [label="Housekeeping\n(Spares/Cleanup)\nManager", fontcolor=white, color=forestgreen, style=filled]; + ctrl [label="Controller\nWorker", fontcolor=white, color=forestgreen, style=filled, shape=hexagon]; + proxy [label="Services\nProxy", fontcolor=white, color=forestgreen, style=filled]; + + + subgraph cluster1 { + style=filled; + color=gray90; + fontcolor=black; + + + label = "Amphora Driver"; + msg [label="Message\nHandler", fontcolor=white, color=forestgreen, style=filled]; + config [label="Config\nHandler", fontcolor=white, color=forestgreen, style=filled]; + stats [label="Stats\nHandler", fontcolor=white, color=forestgreen, style=filled]; + log [label="Log\nHandler", fontcolor=black, color=forestgreen, style=dashed]; + } + + + health -> msg; + } + + + db [label="Database", fontcolor=white, color=dodgerblue, style=filled]; + api [label="APIs", fontcolor=white, color=forestgreen, style=filled]; + oslo [label="Oslo\nMessaging", fontcolor=white, color=dodgerblue, style=filled]; + nova [label="Nova", fontcolor=white, color=dodgerblue, style=filled]; + neutron [label="Neutron", fontcolor=white, color=dodgerblue, style=filled]; + cert [label="Certificate\nLibrary", fontcolor=white, color=dodgerblue, style=filled]; + bbq [label="Barbican", fontcolor=white, color=dodgerblue, style=filled]; + swift [label="SWIFT", fontcolor=white, color=dodgerblue, style=filled]; + ceilo [label="Ceilometer", fontcolor=white, color=dodgerblue, style=filled]; + amp [label="Amphorae", fontcolor=black, color=coral2, style=filled]; + + + ctrl -> queue [dir="both"]; + db -> api -> oslo -> queue [dir="both"]; + db -> ctrl [dir="both"]; + db -> queue [dir="both"]; + db -> health [dir="both"]; + db -> house [dir="both"]; + db -> msg [dir="both"]; + nova -> ctrl [dir="both"]; + nova -> house [dir="both"]; + neutron -> ctrl [dir="both"]; + neutron -> house [dir="both"]; + proxy -> swift [dir="both"]; + proxy -> amp [dir="both"]; + cert -> ctrl [dir="both"]; + cert -> bbq [dir="both"]; + stats -> ceilo [dir="both"]; + msg -> amp [ltail=cluster1]; + msg -> amp [ltail=cluster1]; +} diff --git a/specs/version0.5/controller.rst b/specs/version0.5/controller.rst new file mode 100644 index 0000000000..deb60a1004 --- /dev/null +++ b/specs/version0.5/controller.rst @@ -0,0 +1,251 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +================== +Octavia Controller +================== + +Launchpad blueprint: + +https://blueprints.launchpad.net/octavia/+spec/controller + +Octavia is an operator-grade reference implementation for Load Balancing as a +Service (LBaaS) for OpenStack. The component of Octavia that does the load +balancing is known as Amphora. + +The component of Octavia that provides command and control of the Amphora is +the Octavia controller. + +Problem description +=================== + +Octavia requires a controller component that provides the following +capabilities: + +* Processing Amphora configuration updates and making them available to the + Amphora driver +* Providing certificate information to the Amphora driver +* Deploying Amphora instances +* Managing the Amphora spares pool +* Cleaning up Amphora instances that are no longer needed +* Monitoring the health of Amphora instances +* Processing alerts and messages from the Amphora (example "member down") +* Respecting colocation / apolocation / flavor requirements of the Amphora +* Processing statistical data from the Amphora including communicating with + metering services, such as Ceilometer + (https://blueprints.launchpad.net/ceilometer/+spec/ceilometer-meter-lbaas) +* Responding to API requests sent by the API processes +* Proxy Amphora data to other OpenStack services such as Swift for log file + archival + +Proposed change +=============== + +The Octavia controller will consist of the following components: + +* Amphora Driver +* Queue Consumer +* Certificate Library +* Compute Driver +* Controller Worker +* Health Manager +* Housekeeping Manager +* Network Driver +* Services Proxy + + .. graphviz:: controller.dot + +The manager and proxy components should be implemented as independent +processes to provide a level of autonomy to these controller functions. + +The highly available database will provide the persistent "brain" for the +Octavia controller. Octavia controller processes will share state and +information about the Amphora, load balancers, and listeners via the database. +It is expected that the Octavia controller and Amphora driver will directly +interact with the database but the Amphorae will never directly access the +database. + +By using a highly available database, Octavia controllers themselves do not +directly keep any stateful information on Amphorae. Because of this, Amphorae +are not assigned to any specific controller. Any controller is able to service +monitoring, heartbeat, API, and other requests coming to or from Amphorae. + +**Amphora Driver** + +The Amphora driver abstracts the backend implementation of an Amphora. The +controller will interact with Amphora via the Amphora driver. This interface +is defined in the amphora-driver-interface specification. + +**Queue Consumer** + +The Queue Consumer is event driven and tasked with servicing requests from the +API components via an Oslo messaging. It is also the primary lifecycle +management component for Amphora. + +To service requests the Queue Consumer will spawn a Controller Worker process. +Spawning a separate process makes sure that the Queue Consumer can continue to +service API requests while the longer running deployment process is +progressing. + +Messages received via Oslo messaging will include the load balancer ID, +requested action, and configuration update data. Passing the configuration +update data via Oslo messaging allows the deploy worker to rollback to a +"last known good" configuration should there be a problem with the +configuration update. The spawned worker will use this information to access +the Octavia database to gather any additional details that may be required to +complete the requested action. + +**Compute Driver** + +The Compute Driver abstracts the implementation of instantiating the virtual +machine, container, appliance, or device that the Amphora will run in. + +**Controller Worker** + +The Controller Worker is spawned from the Queue Consumer or the Health +Manager. It interfaces with the compute driver (in some deployment scenarios), +network driver, and Amphora driver to activate Amphora instances, +load balancers, and listeners. + +When a request for a new instance or failover is received the Controller Worker +will have responsibility for connecting the appropriate networking ports to the +Amphora via the network driver and triggering a configuration push via the +Amphora driver. This will include validating that the targeted Amphora +has the required networks plumbed to the Amphora. + +The Amphora configured by the Controller Worker may be an existing Amphora +instance, a new Amphora from the spares pool, or a newly created Amphora. +This determination will be made based on the apolocation requirements of +the load balancer, the load balancer count on the existing Amphora, and +the availability of ready spare Amphora in the spares pool. + +The Controller Worker will be responsible for passing in the required metadata +via config drive when deploying an Amphora. This metadata will include: +a list of controller IP addresses, controller certificate authority +certificate, and the Amphora certificate and key file. + +The main flow of the Controller Worker is described in the +amphora-lifecycle-management specification as the Activate Amphora sequence. + +**Certificate Library** + +The Certificate Library provides an abstraction for workers to access security +data stored in OpenStack Barbican from the Amphora Driver. It will provide a +short term (1 minute) cache of the security contents to facilitate the +efficient startup of a large number of listeners sharing security content. + +**Health Manager** + +The Health Manager is tasked with checking for missing or unhealthy Amphora +stored in the highly available database. The amphora-lifecycle-management +specification details the health monitoring sequence. + +The health monitor will have a separate thread that checks these timestamps on +a configurable interval to see if the Amphora has not provided a heartbeat in +the required amount of time which is another configurable setting. Should a +Amphora fail to report a heartbeat in the configured interval the +Health Manager will initiate a failover of the Amphora by spawning a deploy +worker and will update the status of the listener in the database. + +The Health Manager will have to be aware of the load balancer associated with +the failed listener to decide if it needs to fail over additional listeners to +migrate the failed listener to a new Amphora. + +**Housekeeping Manager** + +The Housekeeping Manager will manage the spare Amphora pool and the teardown +of Amphora that are no longer needed. On a configurable interval the +Housekeeping Manager will check the Octavia database to identify the required +cleanup and maintenance actions. The amphora-lifecycle-management +specification details the Create, Spare, and Delete Amphora sequences the +Housekeeping Manager will follow. + +The operator can specify a number of Amphora instances to be held in a spares +pool. Building Amphora instances can take a long time so the Housekeeping +Manager will spawn threads to manage the number of Amphorae in the spares pool. + +The Housekeeping Manager will interface with the compute driver, +network driver, and the Certificate Manager to accomplish the create +and delete actions. + +**Network Driver** + +The Network Driver abstracts the implementation of connecting an Amphora to +the required networks. + +**Services Proxy** + +The Services Proxy enables Amphora to reach other cloud services directly over +the Load Balancer Network where the controller may need to provide +authentication tokens on behalf of the Amphora, such as when archiving load +balancer traffic logs into customer swift containers. + + +Alternatives +------------ + + +Data model impact +----------------- + + +REST API impact +--------------- + + +Security impact +--------------- + + +Notifications impact +-------------------- + + +Other end user impact +--------------------- + + +Performance Impact +------------------ + + +Other deployer impact +--------------------- + + +Developer impact +---------------- + + +Implementation +============== + +Assignee(s) +----------- +Michael Johnson + +Work Items +---------- + + +Dependencies +============ + + +Testing +======= + + +Documentation Impact +==================== + + +References +========== + +| Amphora lifecycle management: https://review.opendev.org/#/c/130424/ +| LBaaS metering: +| https://blueprints.launchpad.net/ceilometer/+spec/ceilometer-meter-lbaas diff --git a/specs/version0.5/haproxy-amphora-api.rst b/specs/version0.5/haproxy-amphora-api.rst new file mode 100644 index 0000000000..305c0ff15e --- /dev/null +++ b/specs/version0.5/haproxy-amphora-api.rst @@ -0,0 +1,105 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +=================== +HAProxy Amphora API +=================== + +https://blueprints.launchpad.net/octavia/+spec/appliance-api + +The reference implementation of Octavia is going to make use of an haproxy- +based amphora. As such, there will be an haproxy reference driver that speaks +a well-defined protocol to the haproxy-based amphora. This document is meant +to be a foundation of this interface, outlining in sufficient detail the +various commands that will definitely be necessary. This design should be +iterated upon as necessary going forward. + +Problem description +=================== +This API specification is necessary in order to fully develop the haproxy +reference driver, both to ensure this interface is well documented, and so that +different people can work on different parts of bringing Octavia to fruition. + +Proposed change +=============== +Note that this spec does not yet attempt to define the following, though these +may follow shortly after this initial spec is approved: +* Method for bi-directional authentication between driver and amphora. +* Bootstrapping process of amphora +* Transition process from "spare" to "active" amphora and other amphora +lifecycle transitions + +This spec does attempt to provide an initial foundation for the following: +* RESTful interface exposed on amphora management + +Alternatives +------------ +None + +Data model impact +----------------- +None (yet) + +REST API impact +--------------- +Please note that the proposed changes in this spec do NOT affect either the +publicly-exposed user or operator APIs, nor really anything above the +haproxy reference driver. + +Please see doc/main/api/haproxy-amphora-api.rst + +Security impact +--------------- +None yet, though bi-directional authentication between driver and amphora needs +to be addressed. + +Notifications impact +-------------------- +None + +Other end user impact +--------------------- +None + +Performance Impact +------------------ +None + +Other deployer impact +--------------------- +None + +Developer impact +---------------- +None + +Implementation +============== + +Assignee(s) +----------- +stephen-balukoff +david-lenwell + +Work Items +---------- + +Dependencies +============ +haproxy reference driver + +Testing +======= +Unit tests + +Documentation Impact +==================== +None + +References +========== +None + diff --git a/specs/version0.5/housekeeping-manager-interface.rst b/specs/version0.5/housekeeping-manager-interface.rst new file mode 100644 index 0000000000..d201857b12 --- /dev/null +++ b/specs/version0.5/housekeeping-manager-interface.rst @@ -0,0 +1,201 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +========================================== +Housekeeping Manager Specification +========================================== +https://blueprints.launchpad.net/octavia/+spec/housekeeping-manager + +Problem description +=================== + +The Housekeeping Manager will manage the spare amphora pool and the +teardown of amphorae that are no longer needed. On a configurable +interval the Housekeeping Manager will check the Octavia database to +identify the required cleanup and maintenance actions required. The +amphora-lifecycle-management specification details the Create and +Deactivate amphora sequences the Housekeeping Manager will follow. + + +Proposed change +=============== +The housekeeping manager will run as a daemon process which will +perform the following actions: + +* Read the following from the configuration file + + * housekeeping_interval: The time (in seconds) that the + housekeeping manager will sleep before running its checks + again. + * spare_amphora_pool_size: The desired number of spare amphorae. + * maximum_deploying_amphora_count: The maximum number of amphorae + that may be deployed simultaneously. + * maximum_preserved_amphora_count: How many deactivated amphorae to + preserve. 0 means delete, 1 or greater means keep up to that many + amphorae for future diagnostics. Only amphorae in the ERROR and + PRESERVE states are eligible to be preserved. TODO: Right now + there is no PRESERVE state, for this to work we would need to + define one in the amphora spec. + * preservation_scheme + + * "keep": keep all preserved amphorae + * "cycle": maintain a queue of preserved amphorae, deleting the + oldest one when a new amphora is preserved. + + * preservation_method: Preservation must take into account the + possibility that amphorae instantiated in the future may reuse MAC + addresses. + + * "unplug": Disconnect the virtual NICs from the amphora + * "snapshot": Take a snapshot of the amphora, then stop it + +* Get the spare pool size + + * Log the spare pool size + * If the spare pool size is less than the spare pool target + capacity, initiate creation of appropriate number of amphorae. + +* Obtain the list of deactivated amphorae and schedule their removal. + If preservation_count > 0, and there are fewer than that many + amphorae in the preserved pool, preserve the amphora. After the + preserved pool size reaches preservation_count, use + preservation_scheme to determine whether to keep newly failed + amphorae. + +* Sleep for the time specified by housekeeping_interval. + +* Return to the top + +Establish a base class to model the desired functionality: + +.. code:: python + + + class HousekeepingManager(object): + + """ Class to manage the spare amphora pool. This class should do + very little actual work, its main job is to monitor the spare pool + and schedule creation of new amphrae and removal of used amphrae. + By default, used amphorae will be deleted, but they may optionally + be preserved for future analysis. + """ + + def get_spare_amphora_size(self): + """ Return the target capacity of the spare pool """ + raise NotImplementedError + + def get_ready_spare_amphora_count(self): + """ Return the number of available amphorae in the spare pool + """ + raise NotImplementedError + + def create_amphora(self, num_to_create = 1): + """ Schedule the creation of the specified number of amphorae + to be added to the spare pool.""" + raise NotImplementedError + + def remove_amphora(self, amphora_ids): + """ Schedule the removal of the amphorae specified by + amphora_ids.""" + raise NotImplementedError + +Exception Model +--------------- + +The manager is expected to raise or pass along the following +well-defined exceptions: + +* NotImplementedError - this functionality is not implemented/not supported +* AmphoraDriverError - a super class for all other exceptions and the catch + all if no specific exception can be found + * NotFoundError - this amphora couldn't be found/ was deleted by nova + * UnauthorizedException - the driver can't access the amphora + * UnavailableException - the amphora is temporary unavailable + * DeleteFailed - this load balancer couldn't be deleted + +Alternatives +------------ + +Data model impact +----------------- + +Requires the addition of the housekeeping_interval, spare_pool_size, +spare_amphora_pool_size, maximum_preserved_amphora_count, +preservation_scheme, and preservation_method to the config. + + +REST API impact +--------------- + +None. + +Security impact +--------------- + +Must follow standard practices for database access. + +Notifications impact +-------------------- + +Other deployer impact +--------------------- + +Other end user impact +--------------------- + +There should be no end-user-visible impact. + +Performance Impact +------------------ + +The housekeeping_interval and spare_pool_size parameters will be +adjustable by the operator in order to balance resource usage against +performance. + + +Developer impact +---------------- + +Developers of other modules need to be aware that amphorae may be +created, deleted, or saved for diagnosis by this daemon. + + +Implementation +============== + +Assignee(s) +----------- +Al Miller + +Work Items +---------- +* Write abstract interface +* Write Noop driver +* Write tests + + +Dependencies +============ +Amphora driver +Config manager + + + +Testing +======= +* Unit tests with tox and Noop-Driver +* tempest tests with Noop-Driver + + +Documentation Impact +==================== +None - we won't document the interface for 0.5. If that changes +we need to write an interface documentation so +3rd party drivers know what we expect. + + +References +========== diff --git a/specs/version0.5/network-driver-interface.rst b/specs/version0.5/network-driver-interface.rst new file mode 100644 index 0000000000..e238faf82e --- /dev/null +++ b/specs/version0.5/network-driver-interface.rst @@ -0,0 +1,330 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +======================== +Network Driver Interface +======================== + +Include the URL of your launchpad blueprint: + +https://blueprints.launchpad.net/octavia/+spec/network-driver-interface + +We need a generic interface in which to create networking resources. This is +to allow implementations that can support different networking infrastructures +that accomplish frontend and backend connectivity. + +Problem description +=================== + +There is a need to define a generic interface for a networking service. An +Octavia controller should not know what networking infrastructure is being used +underneath. It should only know an interface. This interface is needed to +support differing networking infrastructures. + + +Proposed change +=============== +In order to make the network driver as genericly functional as possible, it is +broken down into methods that Octavia will need at a high level to accomplish +frontend and backend connectivity. The idea is that to implement these methods +it may require multiple requests to the networking service to accomplish the +end result. The interface is meant to promote stateless implementations and +suffer no issues being run in parallel. + +In the future we would like to create a common module that implementations of +this interface can call to setup a taskflow engine to promote using a common +taskflow configuration. That however, can be left once this has had time +to mature. + +Existing data model: + +* class VIP + * load_balancer_id + * ip_address + * network_id - (neutron subnet) + * port_id - (neutron port) + +* class Amphora + * load_balancer_id + * compute_id + * lb_network_ip + * status + * vrrp_ip - if an active/passive topology, this is the ip where the vrrp + communication between peers happens + * ha_ip - this is the highly available IP. In an active/passive topology + it most likely exists on the MASTER amphora and on failure + it will be raised on the BACKUP amphora. In an active/active + topology it may exist on both amphorae. In the end, it is up + to the amphora driver to decide how to use this. + +New data models: + +* class Interface + * id + * network_id - (neutron subnet) + * amphora_id + * fixed_ips + +* class Delta + * amphora_id + * compute_id + * add_nics + * delete_nics + +* class Network + * id + * name + * subnets - (list of subnet ids) + * tenant_id + * admin_state_up + * provider_network_type + * provider_physical_network + * provider_segmentation_id + * router_external + * mtu + +* class Subnet + * id + * name + * network_id + * tenant_id + * gateway_ip + * cidr + * ip_version + +* class Port + * id + * name + * device_id + * device_owner + * mac_address + * network_id + * status + * tenant_id + * admin_state_up + * fixed_ips - list of FixedIP objects + +* FixedIP + * subnet_id + * ip_address + +* AmphoraNetworkConfig + * amphora - Amphora object + * vip_subnet - Subnet object + * vip_port - Port object + * vrrp_subnet - Subnet object + * vrrp_port - Port object + * ha_subnet - Subnet object + * ha_port - Port object + +New Exceptions defined in the octavia.network package: + +* NetworkException - Base Exception +* PlugVIPException +* UnplugVIPException +* PluggedVIPNotFound +* AllocateVIPException +* DeallocateVIPException +* PlugNetworkException +* UnplugNetworkException +* VIPInUse +* PortNotFound +* SubnetNotFound +* NetworkNotFound +* AmphoraNotFound + + +This class defines the methods for a fully functional network driver. +Implementations of this interface can expect a rollback to occur if any of +the non-nullipotent methods raise an exception. + +class AbstractNetworkDriver + +* plug_vip(loadbalancer, vip) + + * Sets up the routing of traffic from the vip to the load balancer and its + amphorae. + * loadbalancer - instance of data_models.LoadBalancer + + * this is to keep the parameters as generic as possible so different + implementations can use different properties of a load balancer. In + the future we may want to just take in a list of amphora compute + ids and the vip data model. + + * vip = instance of a VIP + * returns list of Amphora + * raises PlugVIPException, PortNotFound + +* unplug_vip(loadbalancer, vip) + + * Removes the routing of traffic from the vip to the load balancer and its + amphorae. + * loadbalancer = instance of a data_models.LoadBalancer + * vip = instance of a VIP + * returns None + * raises UnplugVIPException, PluggedVIPNotFound + +* allocate_vip(loadbalancer) + + * Allocates a virtual ip and reserves it for later use as the frontend + connection of a load balancer. + * loadbalancer = instance of a data_models.LoadBalancer + * returns VIP instance + * raises AllocateVIPException, PortNotFound, SubnetNotFound + +* deallocate_vip(vip) + + * Removes any resources that reserved this virtual ip. + * vip = VIP instance + * returns None + * raises DeallocateVIPException, VIPInUse + +* plug_network(compute_id, network_id, ip_address=None) + + * Connects an existing amphora to an existing network. + * compute_id = id of an amphora in the compute service + * network_id = id of the network to attach + * ip_address = ip address to attempt to be assigned to interface + * returns Interface instance + * raises PlugNetworkException, AmphoraNotFound, NetworkNotFound + +* unplug_network(compute_id, network_id, ip_address=None) + + * Disconnects an existing amphora from an existing network. If ip_address + is not specified then all interfaces on that network will be unplugged. + * compute_id = id of an amphora in the compute service to unplug + * network_id = id of network to unplug amphora + * ip_address = ip address of interface to unplug + * returns None + * raises UnplugNetworkException, AmphoraNotFound, NetworkNotFound, + NetworkException + +* get_plugged_networks(compute_id): + + * Retrieves the current plugged networking configuration + * compute_id = id of an amphora in the compute service + * returns = list of Instance instances + +* update_vip(loadbalancer): + + * Hook for the driver to update the VIP information based on the state + of the passed in loadbalancer + * loadbalancer: instance of a data_models.LoadBalancer + +* get_network(network_id): + + * Retrieves the network from network_id + * network_id = id of an network to retrieve + * returns = Network data model + * raises NetworkException, NetworkNotFound + +* get_subnet(subnet_id): + + * Retrieves the subnet from subnet_id + * subnet_id = id of a subnet to retrieve + * returns = Subnet data model + * raises NetworkException, SubnetNotFound + +* get_port(port_id): + + * Retrieves the port from port_id + * port_id = id of a port to retrieve + * returns = Port data model + * raises NetworkException, PortNotFound + +* failover_preparation(amphora): + + * Prepare an amphora for failover + * amphora = amphora data model + * returns = None + * raises PortNotFound + +Alternatives +------------ + +* Straight Neutron Interface (networks, subnets, ports, floatingips) +* Straight Nova-Network Interface (network, fixed_ips, floatingips) + +Data model impact +----------------- + +* The Interface data model defined above will just be a class. We may later + decide that it needs to be stored in the database, but we can optimize on + that in a later review if needed. + +REST API impact +--------------- + +None + +Security impact +--------------- + +None + +Notifications impact +-------------------- + +None + +Other end user impact +--------------------- + +None + +Performance Impact +------------------ + +None + +Other deployer impact +--------------------- + +Need a service account to own the resources these methods create. + +Developer impact +---------------- + +This will be creating an interface in which other code will be creating +network resources. + + +Implementation +============== + +Assignee(s) +----------- + +brandon-logan + +Work Items +---------- + +Define interface + + +Dependencies +============ + +None + + +Testing +======= + +None + + +Documentation Impact +==================== + +Just docstrings on methods. + + +References +========== + +None diff --git a/specs/version0.5/nova-compute-driver.rst b/specs/version0.5/nova-compute-driver.rst new file mode 100644 index 0000000000..4aa70116a7 --- /dev/null +++ b/specs/version0.5/nova-compute-driver.rst @@ -0,0 +1,110 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +=================== +Nova Compute Driver +=================== + +Blueprint: https://blueprints.launchpad.net/octavia/+spec/nova-compute-driver + +Octavia needs to interact with nova for creation of VMs for this version. This +spec will flesh out all the methods described in the compute-driver-interface +with nova VM specific commands. + +Problem description +=================== +This spec details operations for creating, updating, and modifying amphora that +will hold the actual load balancer. It will utilize the nova client python api +version 3 for the nova specific requests and commands. + +Proposed change +=============== +Expose nova operations + +- Build: Will need to build a virtual machine according to configuration + parameters + + - Will leverage the nova client ServerManager method "create" to build a + server + +- Get: Will need to retrieve details of the virtual machine from nova + + - Will leverage the nova client ServerManager method "get" to retrieve a + server, and return an amphora object + +- Delete: Will need to remove a virtual machine + + - Will leverage the nova client ServerManager method "delete" for removal of + server + +- Status: Will need to retrieve the status of the virtual machine + + - Will leverage the aforementioned get call to retrieve status of the server + +Alternatives +------------ +None + +Data model impact +----------------- +Add fields to existing Amphora object + +REST API impact +--------------- +None + +Security impact +--------------- +None + +Notifications impact +-------------------- +None + +Other end user impact +--------------------- +None + +Performance Impact +------------------ +None + +Other deployer impact +--------------------- +None + +Developer impact +---------------- +Will need a nova service account and necessary credentials stored in config + +Implementation +============== + +Assignee(s) +----------- +trevor-vardeman + +Work Items +---------- +Expose nova operations + +Dependencies +============ +compute-driver-interface + +Testing +======= +Unit tests +Functional tests + +Documentation Impact +==================== +None + +References +========== +https://blueprints.launchpad.net/octavia/+spec/nova-compute-driver +https://docs.openstack.org/python-novaclient/latest/reference/api/index.html diff --git a/specs/version0.5/operator-api.rst b/specs/version0.5/operator-api.rst new file mode 100644 index 0000000000..4c2d7c3a56 --- /dev/null +++ b/specs/version0.5/operator-api.rst @@ -0,0 +1,464 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +========================================== +Octavia Operator API Foundation +========================================== + +https://blueprints.launchpad.net/octavia/+spec/operator-api + +Octavia needs the foundation of the Operator API created. This spec is not +meant to address every functionality needed in the operator API, only to +create a solid foundation to iterate on in the future. + +Problem description +=================== +This is needed because this will be the mechanism to actually communicate with +Octavia. Doing CRUD operations on all entities will be needed ASAP so that the +system can be thoroughly tested. + +Proposed change +=============== +Expose Pecan resources +- Defined explicitly below in the REST API Impact + +Create WSME types +- These will be responsible for request validation and deserialization, and +also response serialization + +Setup paste deploy +- This will be used in the future to interact with keystone and other +middleware, however at first this will not have any authentication so +tenant_ids will just have to be made up uuids. + +Create a handler interface and a noop logging implementation +- A handler interface will be created. This abstraction layer is needed +because calling the controller in the resource layer will work for 0.5 but 1.0 +will be sending it off to a queue. With this abstraction layer we can easily +swap out a 0.5 controller with a 1.0 controller. + +Call database repositories +- Most if not all resources will make a call to the database + +Call handler +- Only create, update, and delete operations should call the handler + +Alternatives +------------ +None + +Data model impact +----------------- +Will need to add some methods to the database repository + +REST API impact +--------------- +Exposed Resources and Methods + +POST /loadbalancers +* Successful Status Code - 202 +* JSON Request Body Attributes +** vip - another JSON object with one required attribute from the following +*** net_port_id - uuid +*** subnet_id - uuid +*** floating_ip_id - uuid +*** floating_ip_network_id - uuid +** tenant_id - string - optional - default "0" * 36 (for now) +** name - string - optional - default null +** description - string - optional - default null +** enabled - boolean - optional - default true +* JSON Response Body Attributes +** id - uuid +** vip - another JSON object +*** net_port_id - uuid +*** subnet_id - uuid +*** floating_ip_id - uuid +*** floating_ip_network_id - uuid +** tenant_id - string +** name - string +** description - string +** enabled - boolean +** provisioning_status - string enum - (ACTIVE, PENDING_CREATE, PENDING_UPDATE, +PENDING_DELETE, DELETED, ERROR) +** operating_status - string enum - (ONLINE, OFFLINE, DEGRADED, ERROR) + +PUT /loadbalancers/{lb_id} +* Successful Status Code - 202 +* JSON Request Body Attributes +** name - string +** description - string +** enabled - boolean +* JSON Response Body Attributes +** id - uuid +** vip - another JSON object +*** net_port_id - uuid +*** subnet_id - uuid +*** floating_ip_id - uuid +*** floating_ip_network_id - uuid +** tenant_id - string +** name - string +** description - string +** enabled - boolean +** provisioning_status - string enum - (ACTIVE, PENDING_CREATE, PENDING_UPDATE, +PENDING_DELETE, DELETED, ERROR) +** operating_status - string enum - (ONLINE, OFFLINE, DEGRADED, ERROR) + +DELETE /loadbalancers/{lb_id} +* Successful Status Code - 202 +* No response or request body + +GET /loadbalancers/{lb_id} +* Successful Status Code - 200 +* JSON Response Body Attributes +** id - uuid +** vip - another JSON object +*** net_port_id - uuid +*** subnet_id - uuid +*** floating_ip_id - uuid +*** floating_ip_network_id - uuid +** tenant_id - string +** name - string +** description - string +** enabled - boolean +** provisioning_status - string enum - (ACTIVE, PENDING_CREATE, PENDING_UPDATE, +PENDING_DELETE, DELETED, ERROR) +** operating_status - string enum - (ONLINE, OFFLINE, DEGRADED, ERROR) + +GET /loadbalancers?tenant_id +* Successful Status Code - 200 +* tenant_id is an optional query parameter to filter by tenant_id +* returns a list of load balancers + + +POST /loadbalancers/{lb_id}/listeners +* Successful Status Code - 202 +* JSON Request Body Attributes +** protocol - string enum - (TCP, HTTP, HTTPS) - required +** protocol_port - integer - required +** connection_limit - integer - optional +** default_tls_container_id - uuid - optional +** tenant_id - string - optional - default "0" * 36 (for now) +** name - string - optional - default null +** description - string - optional - default null +** enabled - boolean - optional - default true +* JSON Response Body Attributes +** id - uuid +** protocol - string enum - (TCP, HTTP, HTTPS) +** protocol_port - integer +** connection_limit - integer +** default_tls_container_id - uuid +** tenant_id - string - optional +** name - string - optional +** description - string - optional +** enabled - boolean - optional +** provisioning_status - string enum - (ACTIVE, PENDING_CREATE, PENDING_UPDATE, +PENDING_DELETE, DELETED, ERROR) +** operating_status - string enum - (ONLINE, OFFLINE, DEGRADED, ERROR) + +PUT /loadbalancers/{lb_id}/listeners/{listener_id} +* Successful Status Code - 202 +* JSON Request Body Attributes +** protocol - string enum +** protocol_port - integer +** connection_limit - integer +** default_tls_container_id - uuid +** name - string +** description - string +** enabled - boolean +* JSON Response Body Attributes +** id - uuid +** protocol - string enum - (TCP, HTTP, HTTPS) +** protocol_port - integer +** connection_limit - integer +** default_tls_container_id - uuid +** tenant_id - string - optional +** name - string - optional +** description - string - optional +** enabled - boolean - optional +** provisioning_status - string enum - (ACTIVE, PENDING_CREATE, PENDING_UPDATE, +PENDING_DELETE, DELETED, ERROR) +** operating_status - string enum - (ONLINE, OFFLINE, DEGRADED, ERROR) + +DELETE /loadbalancers/{lb_id}/listeners/{listener_id} +* Successful Status Code - 202 +* No response or request body + +GET /loadbalancers/{lb_id}/listeners/{listener_id} +* Successful Status Code - 200 +* JSON Response Body Attributes +** id - uuid +** protocol - string enum - (TCP, HTTP, HTTPS) +** protocol_port - integer +** connection_limit - integer +** default_tls_container_id - uuid +** tenant_id - string - optional +** name - string - optional +** description - string - optional +** enabled - boolean - optional +** provisioning_status - string enum - (ACTIVE, PENDING_CREATE, PENDING_UPDATE, +PENDING_DELETE, DELETED, ERROR) +** operating_status - string enum - (ONLINE, OFFLINE, DEGRADED, ERROR) + +GET /loadbalancers/{lb_id}/listeners +* Successful Status Code - 200 +* A list of listeners on load balancer lb_id + + +POST /loadbalancers/{lb_id}/listeners/{listener_id}/pools +* Successful Status Code - 202 +* JSON Request Body Attributes +** protocol - string enum - (TCP, HTTP, HTTPS) - required +** lb_algorithm - string enum - (ROUND_ROBIN, LEAST_CONNECTIONS, +RANDOM) - required +** session_persistence - JSON object - optional +*** type - string enum - (SOURCE_IP, HTTP_COOKIE) - required +*** cookie_name - string - required for HTTP_COOKIE type +** tenant_id - string - optional - default "0" * 36 (for now) +** name - string - optional - default null +** description - string - optional - default null +** enabled - boolean - optional - default true +* JSON Response Body Attributes +** id - uuid +** protocol - string enum - (TCP, HTTP, HTTPS) +** lb_algorithm - string enum - (ROUND_ROBIN, LEAST_CONNECTIONS, RANDOM) +** session_persistence - JSON object +*** type - string enum - (SOURCE_IP, HTTP_COOKIE) +*** cookie_name - string +** name - string +** description - string +** enabled - boolean +** operating_status - string enum - (ONLINE, OFFLINE, DEGRADED, ERROR) + +PUT /loadbalancers/{lb_id}/listeners/{listener_id}/pools/{pool_id} +* Successful Status Code - 202 +* JSON Request Body Attributes +** protocol - string enum - (TCP, HTTP, HTTPS) +** lb_algorithm - string enum - (ROUND_ROBIN, LEAST_CONNECTIONS, RANDOM) +** session_persistence - JSON object +*** type - string enum - (SOURCE_IP, HTTP_COOKIE) +*** cookie_name - string +** name - string +** description - string +** enabled - boolean +* JSON Response Body Attributes +** id - uuid +** protocol - string enum - (TCP, HTTP, HTTPS) +** lb_algorithm - string enum - (ROUND_ROBIN, LEAST_CONNECTIONS, RANDOM) +** session_persistence - JSON object +*** type - string enum - (SOURCE_IP, HTTP_COOKIE) +*** cookie_name - string +** name - string +** description - string +** enabled - boolean +** operating_status - string enum - (ONLINE, OFFLINE, DEGRADED, ERROR) + +DELETE /loadbalancers/{lb_id}/listeners/{listener_id}/pools/{pool_id} +* Successful Status Code - 202 +No request or response body + +GET /loadbalancers/{lb_id}/listeners/{listener_id}/pools/{pool_id} +* Successful Status Code - 200 +* JSON Response Body Attributes +** id - uuid +** protocol - string enum - (TCP, HTTP, HTTPS) +** lb_algorithm - string enum - (ROUND_ROBIN, LEAST_CONNECTIONS, RANDOM) +** session_persistence - JSON object +*** type - string enum - (SOURCE_IP, HTTP_COOKIE) +*** cookie_name - string +** name - string +** description - string +** enabled - boolean +** operating_status - string enum - (ONLINE, OFFLINE, DEGRADED, ERROR) + +GET /loadbalancers/{lb_id}/listeners/{listener_id}/pools +* Successful Status Code - 200 +* Returns a list of pools + + +POST /loadbalancers/{lb_id}/listeners/{listener_id}/ +pools/{pool_id}/healthmonitor +* Successful Status Code - 202 +* JSON Request Body Attributes +** type - string enum - (HTTP, HTTPS, TCP) - required +** delay - integer - required +** timeout - integer - required +** fall_threshold - integer - required +** rise_threshold - integer - required +** http_method - string enum - (GET, POST, PUT, DELETE) - required for HTTP(S) +** url_path - string - required for HTTP(S) +** expected_codes - comma delimited string - required for HTTP(S) +** enabled - boolean - required - default true +* JSON Response Body Attributes +** type - string enum - (HTTP, HTTPS, TCP) +** delay - integer +** timeout - integer +** fall_threshold - integer +** rise_threshold - integer +** http_method - string enum - (GET, POST, PUT, DELETE) +** url_path - string +** expected_codes - comma delimited string +** enabled - boolean + +PUT /loadbalancers/{lb_id}/listeners/{listener_id}/ +pools/{pool_id}/healthmonitor +* Successful Status Code - 202 +* JSON Request Body Attributes +** type - string enum - (HTTP, HTTPS, TCP) +** delay - integer +** timeout - integer +** fall_threshold - integer +** rise_threshold - integer +** http_method - string enum - (GET, POST, PUT, DELETE) +** url_path - string +** expected_codes - comma delimited string +** enabled - boolean +* JSON Response Body Attributes +** type - string enum - (HTTP, HTTPS, TCP) +** delay - integer +** timeout - integer +** fall_threshold - integer +** rise_threshold - integer +** http_method - string enum - (GET, POST, PUT, DELETE) +** url_path - string +** expected_codes - comma delimited string +** enabled - boolean + +DELETE /loadbalancers/{lb_id}/listeners/{listener_id}/ +pools/{pool_id}/healthmonitor +* Successful Status Code - 202 +No request or response body + +GET /loadbalancers/{lb_id}/listeners/{listener_id}/ +pools/{pool_id}/healthmonitor +* Successful Status Code - 200 +* JSON Response Body Attributes +** type - string enum - (HTTP, HTTPS, TCP) +** delay - integer +** timeout - integer +** fall_threshold - integer +** rise_threshold - integer +** http_method - string enum - (GET, POST, PUT, DELETE) +** url_path - string +** expected_codes - comma delimited string +** enabled - boolean + + +POST /loadbalancers/{lb_id}/listeners/{listener_id}/ +pools/{pool_id}/members +* Successful Status Code - 202 +* JSON Request Body Attributes +** ip_address - IP Address - required +** protocol_port - integer - required +** weight - integer - optional +** subnet_id - uuid - optional +** tenant_id - string - optional - default "0" * 36 (for now) +** enabled - boolean - optional - default true +* JSON Response Body Attributes +** id - uuid +** ip_address - IP Address +** protocol_port - integer +** weight - integer +** subnet_id - uuid +** tenant_id - string +** enabled - boolean +** operating_status - string enum - (ONLINE, OFFLINE, DEGRADED, ERROR) + +PUT /loadbalancers/{lb_id}/listeners/{listener_id}/ +pools/{pool_id}/members/{member_id} +* Successful Status Code - 202 +* JSON Request Body Attributes +** protocol_port - integer - required +** weight - integer - optional +** enabled - boolean - optional - default true +* JSON Response Body Attributes +** id - uuid +** ip_address - IP Address +** protocol_port - integer +** weight - integer +** subnet_id - uuid +** tenant_id - string +** enabled - boolean +** operating_status - string enum - (ONLINE, OFFLINE, DEGRADED, ERROR) + +DELETE /loadbalancers/{lb_id}/listeners/{listener_id}/ +pools/{pool_id}/members/{member_id} +* Successful Status Code - 202 +No request or response body + +GET /loadbalancers/{lb_id}/listeners/{listener_id}/ +pools/{pool_id}/members/{member_id} +* Successful Status Code - 200 +* JSON Response Body Attributes +** id - uuid +** ip_address - IP Address +** protocol_port - integer +** weight - integer +** subnet_id - uuid +** tenant_id - string +** enabled - boolean +** operating_status - string enum - (ONLINE, OFFLINE, DEGRADED, ERROR) + +GET /loadbalancers/{lb_id}/listeners/{listener_id}/ +pools/{pool_id}/members +* Successful Status Code - 200 +Returns a list of members + +Security impact +--------------- +No authentication with keystone + +Notifications impact +-------------------- +None + +Other end user impact +--------------------- +Not ready for end user + +Performance Impact +------------------ +None + +Other deployer impact +--------------------- +None + +Developer impact +---------------- +None + +Implementation +============== + +Assignee(s) +----------- +brandon-logan + +Work Items +---------- +Expose Pecan resources +Create WSME types +Setup paste deploy +Create a handler interface and a noop logging implementation +Call database repositories +Call handler + +Dependencies +============ +db-repositories + +Testing +======= +Unit tests + +Documentation Impact +==================== +None + +References +========== +None + diff --git a/specs/version0.5/queue-consumer.rst b/specs/version0.5/queue-consumer.rst new file mode 100644 index 0000000000..7641247188 --- /dev/null +++ b/specs/version0.5/queue-consumer.rst @@ -0,0 +1,189 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +============== +Queue Consumer +============== +https://blueprints.launchpad.net/octavia/+spec/queue-consumer + +This blueprint describes how Oslo messages are consumed, processed and +delegated from the API-controller queue to the controller worker component of +Octavia. The component that is responsible for these activities is called the +Queue Consumer. + +Problem description +=================== +Oslo messages need to be consumed by the controller and delegated to the proper +controller worker. Something needs to interface with the API-controller queue +and spawn the controller workers. That "something" is what we are calling the +Queue Consumer. + +Proposed change +=============== +The major component of the Queue Consumer will be a class that acts as a +consumer to Oslo messages. It will be responsible for configuring and starting +a server that is then able to receive messages. There will be a one-to-one +mapping between API methods and consumer methods (see code snippet below). +Corresponding controller workers will be spawned depending on which consumer +methods are called. + +The threading will be handled by Oslo messaging using the 'eventlet' executor. +Using the 'eventlet' executor will allow for message throttling and removes +the need for the controller workers to manage threads. The benefit of using the +'eventlet' executor is that the Queue Consumer will not have to spawn threads +at all, since every message received will be in its own thread already. This +means that the Queue Consumer doesn't spawn a controller worker, rather it just +starts the execution of the deploy code. + +An 'oslo_messaging' configuration section will need to be added to octavia.conf +for Oslo messaging options. For the Queue Consumer, the 'rpc_thread_pool_size' +config option will need to be added. This option will determine how many +consumer threads will be able to read from the queue at any given time (per +consumer instance) and serve as a throttling mechanism for message consumption. +For example, if 'rpc_thread_pool_size' is set to 1 thread then only one +controller worker will be able to conduct work. When that controller worker +completes its task then a new message can be consumed and a new controller +worker flow started. + +Below are the planned interface methods for the queue consumer. The Queue +Consumer will be listening on the **OCTAVIA_PROV** (short for octavia +provisioning) topic. The *context* parameter will be supplied along with an +identifier such as a load balancer id, listener id, etc. relevant to the +particular interface method. The *context* parameter is a dictionary and is +reserved for metadata. For example, the Neutron LBaaS agent leverages this +parameter to send additional request information. Additionally, update methods +include a *\*_updates* parameter than includes the changes that need to be +made. Thus, the controller workers responsible for the update actions will +need to query the database to retrieve the old state and combine it with the +updates to provision appropriately. If a rollback or exception occur, then the +controller worker will only need to update the provisioning status to **ERROR** +and will not need to worry about making database changes to attributes of the +object being updated. + +.. code:: python + + def create_load_balancer(self, context, load_balancer_id): + pass + + def update_load_balancer(self, context, load_balancer_updates, + load_balancer_id): + pass + + def delete_load_balancer(self, context, load_balancer_id): + pass + + def create_listener(self, context, listener_id): + pass + + def update_listener(self, context, listener_updates, listener_id): + pass + + def delete_listener(self, context, listener_id): + pass + + def create_pool(self, context, pool_id): + pass + + def update_pool(self, context, pool_updates, pool_id): + pass + + def delete_pool(self, context, pool_id): + pass + + def create_health_monitor(self, context, health_monitor_id): + pass + + def update_health_monitor(self, context, health_monitor_updates, + health_monitor_id): + pass + + def delete_health_monitor(self, context, health_monitor_id): + pass + + def create_member(self, context, member_id): + pass + + def update_member(self, context, member_updates, member_id): + pass + + def delete_member(self, context, member_id): + pass + +Alternatives +------------ +There are a variety of ways to consume from Oslo messaging. For example, +instead of having a single consumer on the controller we could have multiple +consumers (i.e. one for CREATE messages, one for UPDATE messages, etc.). +However, since we merely need something to pass messages off to controller +workers other options are overkill. + +Data model impact +----------------- +While there is no direct data model impact it is worth noting that the API +will not be persisting updates to the database. Rather, delta updates will pass +from the user all the way to the controller worker. Thus, when the controller +worker successfully completes the prescribed action, only then will it persist +the updates to the database. No API changes are necessary for create and update +actions. + +REST API impact +--------------- +None + +Security impact +--------------- +None + +Notifications impact +-------------------- +None + +Other end user impact +--------------------- +None + +Performance Impact +------------------ +The only performance related item is queue throttling. This is done by design +so that operators can safely throttle incoming messages dependent on their +specific needs. + +Other deployer impact +--------------------- +Configuration options will need to be added to ocativa.conf. Please see above +for more details. + +Developer impact +---------------- +None + +Implementation +============== + +Assignee(s) +----------- +jorge-miramontes + +Work Items +---------- +- Implement consumer class +- Add executable queue-consumer.py to bin directory + +Dependencies +============ +https://blueprints.launchpad.net/octavia/+spec/controller-worker + +Testing +======= +Unit tests + +Documentation Impact +==================== +None + +References +========== +None diff --git a/specs/version0.5/tls-data-security-1.diag b/specs/version0.5/tls-data-security-1.diag new file mode 100644 index 0000000000..6af9c12cc2 --- /dev/null +++ b/specs/version0.5/tls-data-security-1.diag @@ -0,0 +1,16 @@ +seqdiag { + span_height = 10; + === If Certificate is pre-stored in Barbican === + User => Octavia [label="Create LB with TLS (passing tls_certificate_id)", note="HTTPS", return="202/400/401"] { + Octavia => Barbican [label="Fetch Certificate Container", note="HTTPS", return="Certificate Data"]; + } + === If Certificate is passed directly to Octavia === + User => Octavia [label="Create LB with TLS (passing tls_certificate, tls_private_key, etc)", note="HTTPS", return=" +202/400/401"] { + Octavia => Barbican [label="Store Secrets / Certificate Container", note="HTTPS", return="tls_certificate_id"]; + } + Octavia -> Octavia [label="Store tls_certificate_id"]; + === After certificate handling, in both cases === + Octavia -> Octavia [label="Fetch Amphora from Spare Pool"]; + Octavia => "Amphora API" [label="Configure Amphora", note="HTTPS", return="Update LB Status"]; +} diff --git a/specs/version0.5/tls-data-security-2.diag b/specs/version0.5/tls-data-security-2.diag new file mode 100644 index 0000000000..632b5c25e8 --- /dev/null +++ b/specs/version0.5/tls-data-security-2.diag @@ -0,0 +1,34 @@ +seqdiag { + span_height = 10; + activation = none; + Barbican; + === In Octavia === + Octavia -> Octavia [label="Get a new cert/key from CertGenerator"]; + Octavia -> "Compute Driver" [label="Create new Amphora"] { + "Compute Driver" -> Nova [label="Create instance", note="Ref Impl, ConfigDrive: Octavia Controller certificate and IP, and a generated/signed cert + private key"]; + } + Octavia => "Compute Driver" [label="Wait for Amphora Ready"]; + loop { + "Compute Driver" => Nova [label="Poll for ACTIVE Amphora", note="Ref Impl", return="Amphora Management IP"]; + } + Octavia -> Octavia [label="Store Amphora IP"]; + Octavia => "Amp Driver" [label="Run Amphora Self-Test", return="PASS/FAIL"] { + "Amp Driver" -> "Amp Driver" [label="Poll DB until first Heartbeat arrives", note="Ref Impl"]; + "Amp Driver" => "Amphora API" [label="Run Self-Test", note="Ref Impl"] { + === If Self-test passes === + Octavia -> Octavia [label="Add Amphora to standby pool"]; + === If Self-test fails === + Octavia -> Octavia [label="Delete Amphora"]; + } + } + + === In the Amphora (Ref Impl) === + Amphora -> Amphora [label="Start Services (API, Heartbeat)"]; + "Amp Heartbeat" -> "Amp Driver" [label="Announce", note="UDP"] { + "Amp Driver" -> "Amp Driver" [label="Verify Amphora by Signed UDP Heartbeat"]; + === If Verification fails === + "Amp Driver" -> "Amp Driver" [label="Log and Ignore"]; + === If Verification succeeds === + "Amp Driver" -> "Health Manager" [label="Store Heartbeat"]; + } +} diff --git a/specs/version0.5/tls-data-security.rst b/specs/version0.5/tls-data-security.rst new file mode 100644 index 0000000000..0e9ca1b272 --- /dev/null +++ b/specs/version0.5/tls-data-security.rst @@ -0,0 +1,167 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +============================== +TLS Data Security and Barbican +============================== +Launchpad blueprint: + +https://blueprints.launchpad.net/octavia/+spec/tls-data-security + +Octavia will have some need of secure storage for TLS related data. This BP is +intended to identify all of the data that needs secure storage, or any other +interaction that will require the use of Barbican or another secure solution. + +Problem description +=================== +1. Octavia will support TLS Termination (including SNI), which will require us +to store and retrieve certificates and private keys from a secure repository. + +2. Octavia will communicate with its Amphorae using TLS, so each Amphora +will need a certificate for the controller to validate. + +3. Octavia will need TLS data for exposing its own API via HTTPS. + +Proposed change +=============== +The initial supported implementation for TLS related functions will be +Barbican, but the interface will be generic such that other implementations +could be created later. + +.. Note:: a sequence diagram describing the communication between the User, + Octavia, Barbican and the Amphora API was removed, the diagram is + still available in the `documentation of older stable branches + `_. + + +1. Create a CertificateManager interface for storing and retrieving certificate +and private key pairs (and intermediate certs / private key passphrase). +Users will pass their TLS data to Octavia in the form of a certificate_id, +which is a reference to their data in some secure service. Octavia will store +that certificate_id for each Listener/SNI and will retrieve the data when +necessary. (Barbican specific: users will need to add Octavia's user account as +an authorized user on the Container and all Secrets [1] so we catch fetch the +data on their behalf.) + +We will need to validate the certificate data (including key and intermediates) +when we initially receive it, and will assume that it remains unchanged for +the lifespan of the LB (in Barbican the data is immutable so this is a safe +assumption, I do not know how well this will work for other services). In the +case of invalid TLS data, we will reject the request with a 400 (if it is an +initial create) or else put the LB into ERROR status (if it is on a failover +event or during some other non-interactive scenario). + +.. Note:: a sequence diagram describing the communication between the Octavia + components was removed, the diagram is still available in the + `documentation of older stable branches + `_. + + +2. Create a CertificateGenerator interface to generate certificates from CSRs. +When the controller creates an Amphora, it will generate a private key and a +CSR, generate a signed certificate from the CSR, and include the private key +and signed certificate in a ConfigDrive for the new Amphora. It will also +include a copy of the Controller's certificate on the ConfigDrive. All future +communications with the Amphora will do certificate validation based on these +certificates. For the Amphora, this will be based on our (private) certificate +authority and the CN of the Amphora's cert matching the ID of the Amphora. For +the Controller, the cert should be a complete match with the version provided. + +(The CertificateManager and CertificateGenerator interfaces are separate +because while Barbican can perform both functions, future implementations +may need to use two distinct services to achieve both.) + +3. The key/cert for the main Octavia API/controller should be maintained +manually by the server operators using whatever configuration management +they choose. We should not need to use a specific external repo for this. +The trusted CA Cert will also need to be retrieved from barbican and manually +loaded in the config. + +Alternatives +------------ +We could skip the interface and just use Barbican directly, but that would be +diverging from what seems to be the accepted OpenStack model for Secret Store +integration. + +We could also store everything locally or in the DB, but that isn't a real +option for production systems because it is incredibly insecure (though there +will be a "dummy driver" that operates this way for development purposes). + +Data model impact +----------------- +Nothing new, the models for this should already be in place. Some of the +columns/classes might need to be renamed more generically (currently there is +a tls_container_id column, which would become tls_certificate_id to be more +generic). + +REST API impact +--------------- +None + +Security impact +--------------- +Using Barbican is considered secure. + +Notifications impact +-------------------- +None + +Other end user impact +--------------------- +None + +Performance Impact +------------------ +Adding an external touchpoint (a certificate signing service) to the Amphora +spin-up workflow will increase the average time for readying an Amphora. This +shouldn't be a huge problem if the standby-pool size is sufficient for the +particular deployment. + +Other deployer impact +--------------------- +None + +Developer impact +---------------- +None + +Implementation +============== + +Assignee(s) +----------- +Adam Harwell (adam-harwell) + +Work Items +---------- +1. Create CertificateManager interface. + +2. Create CertificateGenerator interface. + +3. Create BarbicanCertificateManager implementation. + +4. Create BarbicanCertificateGenerator implementation. + +5. Create unit tests! + +Dependencies +============ +This script will depend on the OpenStack Barbican project, including some +features that are still only at the blueprint stage. + +Testing +======= +There will be testing. Yes. + +Documentation Impact +==================== +Documentation changes will be primarily internal. + +References +========== +.. line-block:: + [1] https://review.opendev.org/#/c/127353/ + [2] https://review.opendev.org/#/c/129048/ diff --git a/specs/version0.8/active_passive_loadbalancer.rst b/specs/version0.8/active_passive_loadbalancer.rst new file mode 100644 index 0000000000..edbf10294a --- /dev/null +++ b/specs/version0.8/active_passive_loadbalancer.rst @@ -0,0 +1,389 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +======================================= +Active-Standby Amphora Setup using VRRP +======================================= + +https://blueprints.launchpad.net/octavia/+spec/activepassiveamphora + +This blueprint describes how Octavia implements its Active/Standby +solution. It will describe the high level topology and the proposed code +changes from the current supported Single topology to realize the high +availability loadbalancer scenario. + +Problem description +=================== + +A tenant should be able to start high availability loadbalancer(s) for the +tenant's backend services as follows: + +* The operator should be able to configure an Active/Standby topology through + an octavia configuration file, which the loadbalancer shall support. An + Active/Standby topology shall be supported by Octavia in addition to the + Single topology that is currently supported. + +* In Active/Standby, two Amphorae shall host a replicated configuration of the + load balancing services. Both amphorae will also deploy a Virtual Router + Redundancy Protocol (VRRP) implementation [2]. + +* Upon failure of the master amphora, the backup one shall seamlessly take over + the load balancing functions. After the master amphora changes to a healthy + status, the backup amphora shall give up the load balancing functions to the + master again (see [2] section 3 for details on master election protocol). + +* Fail-overs shall be seamless to end-users and fail-over time should be + minimized. + +* The following diagram illustrates the Active/Standby topology. + +asciiflow:: + + +--------+ + | Tenant | + |Service | + | (1) | + +--------+ +-----------+ + | +--------+ +----+ Master +----+ + | | Tenant | |VIP | Amphora |IP1 | + | |Service | +--+-+-----+-----+-+--+ + | | (M) | | |MGMT |VRRP | | + | +--------+ | | IP | IP1 | | + | | Tenant | +--+--++----+ | + | | Network | | | | +-----------------+ Floating +---------+ + v-v-------------^----+---v-^----v-^-+ Router | IP | | + ^---------------+----v-^---+------+-+Floating <-> VIP <----------+ Internet| + | Management | | | | | | | | + | (MGMT) | | | | +-----------------+ +---------+ + | Network | +--+--++----+ | + | Paired |MGMT |VRRP | | + | | | IP | IP2 | | + +-----------+ | +-----+-----+ | + | Octavia | ++---+ Backup +-+--+ + |Controller | |VIP | Amphora |IP2 | + | (s) | +----+-----------+----+ + +-----------+ + +* The newly introduced VRRP IPs shall communicate on the same tenant network + (see security impact for more details). + +* The existing Haproxy Jinja configuration template shall include "peer" + setup for state synchronization over the VRRP IP addresses. + +* The VRRP IP addresses shall work with both IPv4 and IPv6. + +Proposed change +=============== + +The Active/Standby loadbalancers require the following high level changes: + +* Add support of VRRP in the amphora base image through Keepalived. + +* Extend the controller worker to be able to spawn N amphorae associated with + the same loadbalancer on N different compute nodes (This takes into account + future work on Active/Active topology). The amphorae shall be allowed to + use the VIP through "allow address pairing". These amphorae shall replicate + the same listeners, and pools configuration. Note: topology is a property + of a load balancer and not of one of its amphorae. + +* Extend the amphora driver interface, the amphora REST driver, and Jinja + configuration templates for the newly introduced VRRP service [4]. + +* Develop a Keepalived driver. + +* Extend the network driver to become aware of the different loadbalancer + topologies and add support of network creation. The network driver shall + also pair the different amphorae in a given topology to the same VIP address. + +* Extend the controller worker to build the right flow/sub-flows according to + the given topology. The controller worker is also responsible of creating + the correct stores needed by other flow/sub-flows. + +* Extend the Octavia configuration and Operator API to support the + Active/Standby topology. + +* MINOR: Extend the Health Manager to be aware of the role of the amphora + (Master/Backup) [9]. If the health manager decided to spawn a new amphora + to replace an unhealthy one (while a backup amphora is already in service), + it must replicate the same VRRP priorities, ids, and authentication + credentials to keep the loadbalancer in its appropriate configuration. + Listeners associated with this load balancer shall be put in a DEGRADED + provisioning state. + +Alternatives +------------ + +We could use heartbeats as an alternative to VRRP, which is also a widely +adopted solution. Heartbeats better suit redundant file servers, filesystems, +and databases rather than network services such as routers, firewalls, and +loadbalancers. Willy Tarreau, the creator of Haproxy, provides a detailed +view on the major differences between heartbeats and VRRP in [5]. + +Data model impact +----------------- + +The data model of the Octavia database shall be impacted as follows: + +* A new column in the load_balancer table shall indicate its topology. The + topology field takes values from: SINGLE, or ACTIVE/STANDBY. + +* A new column in the amphora table shall indicate an amphora's role in the + topology. If the topology is SINGLE, the amphora role shall be STANDALONE. If + the topology is ACTIVE/STANDBY, the amphora role shall be either MASTER or + BACKUP. This role field will also be of use for the Active/Active topology. + +* New value tables for the loadbalancer topology and the amphorae roles. + +* New columns in the amphora table shall indicate the VRRP priority, the VRRP + ID, and the VRRP interface of the amphora. + +* A new column in the listener table shall indicate the TCP port used for + listener internal data synchronization. + +* VRRP groups define the common VRRP configurations for all listeners on an + amphora. A new table shall hold the VRRP groups main configuration + primitives including at least: VRRP authentication information, role and + priority advertisement interval. Each Active/Standby loadbalancer defines one + and only one VRRP group. + +REST API impact +--------------- + +** Changes to amphora API: see [11] ** + +PUT /listeners/{amphora_id}/{listener_id}/haproxy + +PUT /vrrp/upload + +PUT /vrrp/{action} + +GET /interface/{ip_addr} + +** Changes to operator API: see [10] ** + +POST /loadbalancers +* Successful Status Code - 202 +* JSON Request Body Attributes +** vip - another JSON object with one required attribute from the following +*** net_port_id - uuid +*** subnet_id - uuid +*** floating_ip_id - uuid +*** floating_ip_network_id - uuid +** tenant_id - string - optional - default "0" * 36 (for now) +** name - string - optional - default null +** description - string - optional - default null +** enabled - boolean - optional - default true +* JSON Response Body Attributes +** id - uuid +** vip - another JSON object +*** net_port_id - uuid +*** subnet_id - uuid +*** floating_ip_id - uuid +*** floating_ip_network_id - uuid +** tenant_id - string +** name - string +** description - string +** enabled - boolean +** provisioning_status - string enum - (ACTIVE, PENDING_CREATE, PENDING_UPDATE, +PENDING_DELETE, DELETED, ERROR) +** operating_status - string enum - (ONLINE, OFFLINE, DEGRADED, ERROR) +** **topology - string enum - (SINGLE, ACTIVE_STANDBY)** + +PUT /loadbalancers/{lb_id} +* Successful Status Code - 202 +* JSON Request Body Attributes +** name - string +** description - string +** enabled - boolean +* JSON Response Body Attributes +** id - uuid +** vip - another JSON object +*** net_port_id - uuid +*** subnet_id - uuid +*** floating_ip_id - uuid +*** floating_ip_network_id - uuid +** tenant_id - string +** name - string +** description - string +** enabled - boolean +** provisioning_status - string enum - (ACTIVE, PENDING_CREATE, PENDING_UPDATE, +PENDING_DELETE, DELETED, ERROR) +** operating_status - string enum - (ONLINE, OFFLINE, DEGRADED, ERROR) +** **topology - string enum - (SINGLE, ACTIVE_STANDBY)** + +GET /loadbalancers/{lb_id} +* Successful Status Code - 200 +* JSON Response Body Attributes +** id - uuid +** vip - another JSON object +*** net_port_id - uuid +*** subnet_id - uuid +*** floating_ip_id - uuid +*** floating_ip_network_id - uuid +** tenant_id - string +** name - string +** description - string +** enabled - boolean +** provisioning_status - string enum - (ACTIVE, PENDING_CREATE, PENDING_UPDATE, +PENDING_DELETE, DELETED, ERROR) +** operating_status - string enum - (ONLINE, OFFLINE, DEGRADED, ERROR) +** **topology - string enum - (SINGLE, ACTIVE_STANDBY)** + +Security impact +--------------- + +* The VRRP driver must automatically add a security group rule to the amphora's + security group to allow VRRP traffic (Protocol number 112) on the same tenant + subnet. + +* The VRRP driver shall automatically add a security group rule to allow + Authentication Header traffic (Protocol number 51). + +* VRRP driver shall support authentication-type MD5. + +* The HAProxy driver must be updated to automatically add a security group rule + that allows multi-peers to synchronize their states. + +* Currently HAProxy **does not** support peer authentication, and state sync + messages are in plaintext. + +* At this point, VRRP shall communicate on the same tenant network. The + rationale is to fail-over based on a similar network interfaces condition + which the tenant operates experience. Also, VRRP traffic and sync messages + shall naturally inherit same protections applied to the tenant network. + This may create fake fail-overs if the tenant network is under unplanned, + heavy traffic. This is still better than failing over while the master is + actually serving tenant's traffic or not failing over at all if the master + has failed services. Additionally, the Keepalived shall check the health of + the HAproxy service. + +* In next steps the following shall be taken into account: + * Tenant quotas and supported topologies. + * Protection of VRRP Traffic, HAproxy state sync, Router IDs, and pass + phrases in both packets and DB. + +Notifications impact +-------------------- + +None. + +Other end user impact +--------------------- + +* The operator shall be able to specify the loadbalancer topology in the + Octavia configuration file (used by default). + +Performance Impact +------------------ + +The Active/Standby can consume up to twice the resources (storage, network, +compute) as required by the Single Topology. Nevertheless, one single amphora +shall be active (i.e. serving end-user) at any point in time. If the Master +amphora is healthy, the backup one shall remain idle until it receives no +VRRP advertisements from the master. + +The VRRP requires executing health checks in the amphorae at fine grain +granularity period. The health checks shall be as lightweight as possible +such that VRRP is able to execute all check scripts within a predefined +interval. If the check scripts failed to run within this predefined interval, +VRRP may become unstable and may alternate the amphorae roles between MASTER +and BACKUP incorrectly. + +Other deployer impact +--------------------- + +* An amphora_topology config option shall be added. The controller worker + shall change its taskflow behavior according to the requirement of different + topologies. + +* By default, the amphora_topology is SINGLE and the ACTIVE/STANDBY topology + shall be enabled/requested explicitly by operators. + +* The Keepalived version deployed in the amphora image must be newer than + 1.2.8 to support unicast VRRP mode. + +Developer impact +---------------- + +None. + + +Implementation +============== + +Assignee(s) +----------- + +Sherif Abdelwahab (abdelwas) + +Work Items +---------- + +* Amphora image update to include Keepalived. + +* Data model updates. + +* Control Worker extensions. + +* Keepalived driver. + +* Update Network driver. + +* Security rules. + +* Update Amphora REST APIs and Jinja Configurations. + +* Update Octavia Operator APIs. + + +Dependencies +============ + +Keepalived version deployed in the amphora image must be newer than 1.2.8 to +support unicast VRRP mode. + + +Testing +======= + +* Unit tests with tox. +* Function tests with tox. + + +Documentation Impact +==================== + +* Description of the different supported topologies: Single, Active/Standby. +* Octavia configuration file changes to enable the Active/Standby topology. +* CLI changes to enable the Active/Standby topology. +* Changes shall be introduced to the amphora APIs: see [11]. + + +References +========== + +[1] Implementing High Availability Instances with Neutron using VRRP +http://goo.gl/eP71g7 + +[2] RFC3768 Virtual Router Redundancy Protocol (VRRP) + +[3] https://review.opendev.org/#/c/38230/ + +[4] http://www.keepalived.org/LVS-NAT-Keepalived-HOWTO.html + +[5] http://www.formilux.org/archives/haproxy/1003/3259.html + +[6] https://blueprints.launchpad.net/octavia/+spec/base-image + +[7] https://blueprints.launchpad.net/octavia/+spec/controller-worker + +[8] https://blueprints.launchpad.net/octavia/+spec/amphora-driver-interface + +[9] https://blueprints.launchpad.net/octavia/+spec/controller + +[10] https://blueprints.launchpad.net/octavia/+spec/operator-api + +[11] doc/main/api/haproxy-amphora-api.rst diff --git a/specs/version0.8/use_glance_tag_to_refer_to_image.rst b/specs/version0.8/use_glance_tag_to_refer_to_image.rst new file mode 100644 index 0000000000..38c3516e7e --- /dev/null +++ b/specs/version0.8/use_glance_tag_to_refer_to_image.rst @@ -0,0 +1,147 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +=============================================================== +Allow to use Glance image tag to refer to desired Amphora image +=============================================================== + +https://blueprints.launchpad.net/octavia/+spec/use-glance-tags-to-manage-image + +Currently, Octavia allows to define the Glance image ID to be used to boot new +Amphoras. This spec suggests another way to define the desired image, by using +Glance tagging mechanism. + + +Problem description +=================== + +The need to hardcode image ID in the service configuration file has drawbacks. + +Specifically, when an updated image is uploaded into Glance, the operator is +required to orchestrate configuration file update on all Octavia nodes and then +restart all Octavia workers to apply the change. It is both complex and error +prone. + + +Proposed change +=============== + +The spec suggests an alternative way to configure the desired Glance image to +be used for Octavia: using Glance image tagging feature. + +Glance allows to tag an image with any tag which is represented by a string +value. + +With the proposed change, Octavia operator will be able to tell Octavia to use +an image with the specified tag. Then Octavia will talk to Glance to determine +the exact image ID that is marked with the tag, before booting a new Amphora. + + +Alternatives +------------ + +Alternatively, we could make Nova talk to Glance to determine the desired image +ID based on the tag provided by Octavia. This approach is not supported by Nova +community because they don't want to impose the complexity into their code +base. + +Another alternative is to use image name instead of its ID. Nova is capable of +fetching the right image from Glance by name as long as the name is unique. +This is not optimal in case when the operator does not want to remove the old +Amphora image right after a new image is uploaded (for example, if the operator +wants to test the new image before cleaning up the old one). + +Data model impact +----------------- + +None. + +REST API impact +--------------- + +None. + +Security impact +--------------- + +Image tags should be managed by the same user that owns the images themselves. + +Notifications impact +-------------------- + +None. + +Other end user impact +--------------------- + +The proposed change should not break existing mechanism. To achieve that, the +new mechanism will be guarded with a new configuration option that will store +the desired Glance tag. + +Performance Impact +------------------ + +If the feature is used, Octavia will need to reach to Glance before booting a +new Amphora. The performance impact is well isolated and is not expected to be +significant. + +Other deployer impact +--------------------- + +The change couples Octavia with Glance. It should not be an issue since there +are no use cases to use Octavia without Glance installed. + +The new feature deprecates amp_image_id option. Operators that still use the +old image referencing mechanism will be advised to switch to the new option. + +Eventually, the old mechanism will be removed from the tree. + +Developer impact +---------------- + +None. + +Implementation +============== + +Assignee(s) +----------- + +Primary assignee: + ihrachys (Ihar Hrachyshka) + +Work Items +---------- + +* introduce glanceclient integration into nova compute driver +* introduce new configuration option to store the glance tag +* introduce devstack plugin support to configure the feature +* provide documentation for the new feature + + +Dependencies +============ + +None. + +Testing +======= + +Unit tests will be written to cover the feature. + +Octavia plugin will be switched to using the new glance image referencing +mechanism. Tempest tests will be implemented to test the new feature. + + +Documentation Impact +==================== + +New feature should be documented in operator visible guides. + + +References +========== + diff --git a/specs/version0.9/active-active-distributor.rst b/specs/version0.9/active-active-distributor.rst new file mode 100644 index 0000000000..1cd4130801 --- /dev/null +++ b/specs/version0.9/active-active-distributor.rst @@ -0,0 +1,837 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +================================================= +Distributor for Active-Active, N+1 Amphorae Setup +================================================= + +.. attention:: + Please review the active-active topology blueprint first ( + :doc:`active-active-topology` ) + +https://blueprints.launchpad.net/octavia/+spec/active-active-topology + +Problem description +=================== + +This blueprint describes how Octavia implements a *Distributor* to support the +*active-active* loadbalancer (LB) solution, as described in the blueprint +linked above. It presents the high-level Distributor design and suggests +high-level code changes to the current code base to realize this design. + +In a nutshell, in an *active-active* topology, an *Amphora Cluster* of two +or more active Amphorae collectively provide the loadbalancing service. +It is designed as a 2-step loadbalancing process; first, a lightweight +*distribution* of VIP traffic over an Amphora Cluster; then, full-featured +loadbalancing of traffic over the back-end members. Since a single +loadbalancing service, which is addressable by a single VIP address, is +served by several Amphorae at the same time, there is a need to distribute +incoming requests among these Amphorae -- that is the role of the +*Distributor*. + +This blueprint uses terminology defined in the Octavia glossary when available, +and defines new terms to describe new components and features as necessary. + +.. _P2: + + **Note:** Items marked with [`P2`_] refer to lower priority features to be + designed / implemented only after initial release. + + +Proposed change +=============== + +* Octavia shall implement a Distributor to support the active-active + topology. + +* The operator should be able to select and configure the Distributor + (e.g., through an Octavia configuration file or [`P2`_] through a flavor + framework). + +* Octavia shall support a pluggable design for the Distributor, allowing + different implementations. In particular, the Distributor shall be + abstracted through a *driver*, similarly to the current support of + Amphora implementations. + +* Octavia shall support different provisioning types for the Distributor; + including VM-based (the default, similar to current Amphorae), + [`P2`_] container-based, and [`P2`_] external (vendor-specific) hardware. + +* The operator shall be able to configure the distribution policies, + including affinity and availability (see below for details). + + +Architecture +------------ + +High-level Topology Description +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* The following diagram illustrates the Distributor's role in an active-active + topology: + +:: + + + Front-End Back-End + Internet Networks Networks + (world) (tenants) (tenants) + ║ A B C A B C + ┌──╨───┐floating IP ║ ║ ║ ┌────────┬──────────┬────┐ ║ ║ ║ + │ ├─ to VIP ──►╢◄──────║───────║──┤f.e. IPs│ Amphorae │b.e.├►╜ ║ ║ + │ │ LB A ║ ║ ║ └──┬─────┤ of │ IPs│ ║ ║ + │ │ ║ ║ ║ │VIP A│ Tenant A ├────┘ ║ ║ + │ GW │ ║ ║ ║ └─────┴──────────┘ ║ ║ + │Router│floating IP ║ ║ ║ ┌────────┬──────────┬────┐ ║ ║ + │ ├─ to VIP ───║──────►╟◄──────║──┤f.e. IPs│ Amphorae │b.e.├──►╜ ║ + │ │ LB B ║ ║ ║ └──┬─────┤ of │ IPs│ ║ + │ │ ║ ║ ║ │VIP B│ Tenant B ├────┘ ║ + │ │ ║ ║ ║ └─────┴──────────┘ ║ + │ │floating IP ║ ║ ║ ┌────────┬──────────┬────┐ ║ + │ ├─ to VIP ───║───────║──────►╢◄─┤f.e. IPs│ Amphorae │b.e.├────►╜ + └──────┘ LB C ║ ║ ║ └──┬─────┤ of │ IPs│ + ║ ║ ║ │VIP C│ Tenant C ├────┘ + arp─►╢ arp─►╢ arp─►╢ └─────┴──────────┘ + ┌─┴─┐ ║┌─┴─┐ ║┌─┴─┐ ║ + │VIP│┌►╜│VIP│┌►╜│VIP│┌►╜ + ├───┴┴┐ ├───┴┴┐ ├───┴┴┐ + │IP A │ │IP B │ │IP C │ + ┌┴─────┴─┴─────┴─┴─────┴┐ + │ │ + │ Distributor │ + │ (multi-tenant) │ + └───────────────────────┘ + + +* In the above diagram, several tenants (A, B, C, ...) share the + Distributor, yet the Amphorae, and the front- and back-end (tenant) + networks are not shared between tenants. (See also "Distributor Sharing" + below.) Note that in the initial code implementing the distributor, the + distributor will not be shared between tenants, until tests verifying the + security of a shared distributor can be implemented. + +* The Distributor acts as a (one-legged) router, listening on each + load balancer's VIP and forwarding to one of its Amphorae. + +* Each load balancer's VIP is advertised and answered by the Distributor. + An ``arp`` request for any of the VIP addresses is answered by the + Distributor, hence any traffic sent for each VIP is received by the + Distributor (and forwarded to an appropriate Amphora). + +* ARP is disabled on all the Amphorae for the VIP interface. + +* The Distributor distributes the traffic of each VIP to an Amphora in the + corresponding load balancer Cluster. + +* An example of high-level data flow: + + 1. Internet clients access a tenant service through an externally visible + floating-IP (IPv4 or IPv6). + + 2. The GW router maps the floating IP into a loadbalancer's internal VIP on + the tenant's front-end network. + + 3. (1st packet to VIP only) the GW send an ``arp`` request on VIP + (tenant front-end) network. The Distributor answers the ``arp`` request + with its own MAC address on this network (all the Amphorae on the network + can serve the VIP, but do not answer the ``arp``). + + 4. The GW router forwards the client request to the Distributor. + + 5. The Distributor forwards the packet to one of the Amphorae on the + tenant's front-end network (distributed according to some policy, + as described below), without changing the destination IP (i.e., still + using the VIP). + + 6. The Amphora accepts the packet and continues the flow on the tenant's + back-end network as for other Octavia loadbalancer topologies (non + active-active). + + 7. The outgoing response packets from the Amphora are forwarded directly + to the GW router (that is, it does not pass through the Distributor). + +Affinity of Flows to Amphorae +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Affinity is required to make sure related packets are forwarded to the + same Amphora. At minimum, since TCP connections are terminated at the + Amphora, all packets that belong to the same flow must be sent to the + same Amphora. Enhanced affinity levels can be used to make sure that flows + with similar attributes are always sent to the same Amphora; this may be + desired to achieve better performance (see discussion below). + +- [`P2`_] The Distributor shall support different modes of client-to-Amphora + affinity. The operator should be able to select and configure the desired + affinity level. + +- Since the Distributor is L3 and the "heavy lifting" is expected to be + done by the Amphorae, this specification proposes implementing two + practical affinity alternatives. Other affinity alternatives may be + implemented at a later time. + + *Source IP and source port* + In this mode, the Distributor must always send packets from the same + combination of Source IP and Source port to the same Amphora. Since + the Target IP and Target Port are fixed per Listener, this mode implies + that all packets from the same TCP flow are sent to the same Amphora. + This is the minimal affinity mode, as without it TCP connections will + break. + + *Note*: related flows (e.g., parallel client calls from the same HTML + page) will typically be distributed to different Amphorae; however, + these should still be routed to the same back-end. This could be + guaranteed by using cookies and/or by synchronizing the stick-tables. + Also, the Amphorae in the Cluster could be configured to use the same + hashing parameters (avoid any random seed) to ensure all make similar + decisions. + + *Source IP* (default) + In this mode, the Distributor must always send packets from the same + source IP to the same Amphora, regardless of port. This mode allows TLS + session reuse (e.g., through session ids), where an abbreviated + handshake can be used to improve latency and computation time. + + The main disadvantage of sending all traffic from the same source IP to + the same Amphora is that it might lead to poor load distribution for + large workloads that have the same source IP (e.g., workload behind a + single nat or proxy). + + **Note on TLS implications**: + In some (typical) TLS sessions, the additional load incurred for each new + session is significantly larger than the load incurred for each new + request or connection on the same session; namely, the total load on each + Amphora will be more affected by the number of different source IPs it + serves than by the number of connections. Moreover, since the total load + on the Cluster incurred by all the connections depends on the level of + session reuse, spreading a single source IP over multiple Amphorae + *increases* the overall load on the Cluster. Thus, a Distributor that + uniformly spreads traffic without affinity per source IP (e.g., uses + per-flow affinity only) might cause an increase in overall load on the + Cluster that is proportional to the number of Amphorae. For example, in a + scale-out scenario (where a new Amphora is spawned to share the total + load), moving some flows to the new Amphora might increase the overall + Cluster load, negating the benefit of scaling-out. + + Session reuse helps with the certificate exchange phase. Improvements + in performance with the certificate exchange depend on the type of keys + used, and is greatest with RSA. Session reuse may be less important with + other schemes; shared TLS session tickets are another mechanism that may + circumvent the problem; also, upcoming versions of HA-Proxy may be able + to obviate this problem by synchronizing TLS state between Amphorae + (similar to stick-table protocol). + +- Per the agreement at the Mitaka mid-cycle, the default affinity shall be + based on source-IP only and a consistent hashing function (see below) + shall be used to distribute flows in a predictable manner; however, + abstraction will be used to allow other implementations at a later time. + +Forwarding with OVS and OpenFlow Rules +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* The reference implementation of the Distributor shall use OVS for + forwarding and configure the Distributor through OpenFlow rules. + + - OpenFlow rules can be implemented by a software switch (e.g., OVS) that + can run on a VM. Thus, can be created and managed by Octavia similarly + to creation and management of Amphora VMs. + + - OpenFlow rules are supported by several HW switches, so the same + control plane can be used for both SW and HW implementations. + +* Outline of Rules + + - A ``group`` with the ``select`` method is used to distribute IP traffic + over multiple Amphorae. There is one ``bucket`` per Amphora -- adding + an Amphora adds a new ``bucket`` and deleting and Amphora removes the + corresponding ``bucket``. + + - The ``select`` method supports (OpenFlow v1.5) hashed-based selection + of the ``bucket``. The hash can be set up to use different fields, + including by source IP only (default) and by source IP and source port. + + - All buckets route traffic back on the in-port (i.e., no forwarding + between ports). This ensures that the same front-end network is used + (i.e., the Distributor does not route between front-end networks; + therefore, does not mix traffic of different tenants). + + - The ``bucket`` actions do a re-write of the outgoing packets. It + supports re-write of the destination MAC to that of the specific + Amphora and re-write of the source MAC to that of the Distributor + interface (together these MAC re-writes provide L3 routing functionality). + + *Note:* alternative re-write rules can be used to support other forwarding + mechanisms. + + - OpenFlow rules are also used to answer ``arp`` requests on the VIP. + ``arp`` requests for each VIP are captured, re-written as ``arp`` + replies with the MAC address of the particular front-end interface and + sent back on the in-port. Again, there is no routing between interfaces. + +* Handling Amphora failure + + - Initial implementation will assume a fixed size for each cluster (no + elasticity). The hashing will be "consistent" by virtue of never + changing the number of ``buckets``. If the cluster size is changed on + the fly (there should not be an API to do so) then there are no + guarantees on shuffling. + + - If an Amphora fails then remapping cannot be avoided -- all flows of + the failed Amphora must be remapped to a different one. Rather than + mapping these flows to other active Amphorae in the cluster, the reference + implementation will map all flows to the cluster's *standby* Amphora (i.e. + the "+1" Amphora in this "N+1" cluster). This ensures that the cluster + size does not change. The only change in the OpenFlow rules would be to + replace the MAC of the failed Amphora with that of the standby Amphora. + + - This implementation is very similar to Active-Standby fail-over. There + will be a standby Amphora that can serve traffic in case of failure. + The differences from Active-Standby is that a single Amphora acts as a + standby for multiple ones; fail-over re-routing is handled through the + Distributor (rather than by VRRP); and a whole cluster of Amphorae is + active concurrently, to enable support of large workloads. + + - Health Manager will trigger re-creation of a failed Amphora. Once the + Amphora is ready it becomes the new *standby* (no changes to OpenFlow + rules). + + - [`P2`_] Handle concurrent failure of more than a single Amphora + +* Handling Distributor failover + + - To handle the event of a Distributor failover caused by a catastrophic + failure of a Distributor, and in order to preserve the client to Amphora + affinity when the Distributor is replaced, the Amphora registration process + with the Distributor should preserve positional information. This should + ensure that when a new Distributor is created, Amphorae will be assigned to + the same buckets to which they were previously assigned. + + - In the reference implementation, we propose making the Distributor API + return the complete list of Amphorae MAC addresses with positional + information each time an Amphora is registered or unregistered. + +Specific proposed changes +------------------------- + +**Note:** These are changes on top of the changes described in the +"Active-Active, N+1 Amphorae Setup" blueprint, (see +https://blueprints.launchpad.net/octavia/+spec/active-active-topology) + +* Create flow for the creation of an Amphora cluster with N active Amphora + and one extra standby Amphora. Set-up the Amphora roles accordingly. + +* Support the creation, connection, and configuration of the various + networks and interfaces as described in `high-level topology` diagram. + The Distributor shall have a separate interface for each loadbalancer and + shall not allow any routing between different ports. In particular, when + a loadbalancer is created the Distributor should: + + - Attach the Distributor to the loadbalancer's front-end network by + adding a VIP port to the Distributor (the LB VIP Neutron port). + + - Configure OpenFlow rules: create a group with the desired cluster size + and with the given Amphora MACs; create rules to answer ``arp`` + requests for the VIP address. + + **Notes:** + [`P2`_] It is desirable that the Distributor be considered as a router by + Neutron (to handle port security, network forwarding without ``arp`` + spoofing, etc.). This may require changes to Neutron and may also mean + that Octavia will be a privileged user of Neutron. + + Distributor needs to support IPv6 NDP + + [`P2`_] If the Distributor is implemented as a container then hot-plugging + a port for each VIP might not be possible. + + If DVR is used then routing rules must be used to forward external + traffic to the Distributor rather than rely on ``arp``. In particular, + DVR messes-up ``noarp`` settings. + +* Support Amphora failure recovery + + - Modify the HM and failure recovery flows to add tasks to notify the ACM + when ACTIVE-ACTIVE topology is in use. If an active Amphora fails then + it needs to be decommissioned on the Distributor and replaced with + the standby. + + - Failed Amphorae should be recreated as a standby (in the new + IN_CLUSTER_STANDBY role). The standby Amphora should also be monitored and + recovered on failure. + +* Distributor driver and Distributor image + + - The Distributor should be supported similarly to an amphora; namely, have + its own abstract driver. + + - Distributor image (for reference implementation) should include OVS + with a recent version (>1.5) that supports hash-based bucket selection. + As is done for Amphorae, Distributor image should be installed with + public keys to allow secure configuration by the Octavia controller. + + - Reference implementation shall spawn a new Distributor VM as needed. It + shall monitor its health and handle recovery using heartbeats sent to the + health monitor in a similar fashion to how this is done presently with + Amphorae. [`P2`_] Spawn a new Distributor if the number VIPs exceeds a + given limit (to limit the number of Neutron ports attached to one + Distributor). [`P2`_] Add configuration options and/or Operator API to + allow operator to request a dedicated Distributor for a VIP (or per + tenant). + +* Define a REST API for Distributor configuration (no SSH API). + See below for details. + +* Create data-model for Distributor. + +Alternatives +------------ + +TBD + +Data model impact +----------------- + +Add table ``distributor`` with the following columns: + +* id ``(sa.String(36) , nullable=False)`` + ID of Distributor instance. + +* compute_id ``(sa.String(36), nullable=True)`` + ID of compute node running the Distributor. + +* lb_network_ip ``(sa.String(64), nullable=True)`` + IP of Distributor on management network. + +* status ``(sa.String(36), nullable=True)`` + Provisioning status + +* vip_port_ids (list of ``sa.String(36)``) + List of Neutron port IDs. + New VIFs may be plugged into the Distributor when a new LB is created. We + may need to store the Neutron port IDs in order to support + fail-over from one Distributor instance to another. + +Add table ``distributor_health`` with the following columns: + +* distributor_id ``(sa.String(36) , nullable=False)`` + ID of Distributor instance. + +* last_update ``(sa.DateTime, nullable=False)`` + Last time distributor heartbeat was received by a health monitor. + +* busy ``(sa.Boolean, nullable=False)`` + Field indicating a create / delete or other action is being conducted on + the distributor instance (ie. to prevent a race condition when multiple + health managers are in use). + +Add table ``amphora_registration`` with the following columns. This describes +which Amphorae are registered with which Distributors and in which order: + +* lb_id ``(sa.String(36) , nullable=False)`` + ID of load balancer. + +* distributor_id ``(sa.String(36) , nullable=False)`` + ID of Distributor instance. + +* amphora_id ``(sa.String(36) , nullable=False)`` + ID of Amphora instance. + +* position ``(sa.Integer, nullable=True)`` + Order in which Amphorae are registered with the Distributor. + +REST API impact +--------------- +Distributor will be running its own rest API server. This API will be secured +using two-way SSL authentication, and use certificate rotation in the same +way this is done with Amphorae today. + +Following API calls will be addressed. + +1. Post VIP Plug + + Adding a VIP network interface to the Distributor involves tasks which run + outside the Distributor itself. Once these are complete, the Distributor + must be configured to use the new interface. This is a REST call, similar + to what is currently done for Amphorae when connecting to a new member + network. + + `lb_id` + An identifier for the particular loadbalancer/VIP. Used for subsequent + register/unregister of Amphorae. + + `vip_address` + The IP of the VIP (for which IP to answer ``arp`` requests) + + `subnet_cidr` + Netmask for the VIP's subnet. + + `gateway` + Gateway outbound packets from the VIP ip address should use. + + `mac_address` + MAC address of the new interface corresponding to the VIP. + + `vrrp_ip` + In the case of HA Distributor, this contains the IP address that will + be used in setting up the allowed address pairs relationship. (See + Amphora VIP plugging under the ACTIVE-STANDBY topology for an example + of how this is used.) + + `host_routes` + List of routes that should be added when the VIP is plugged. + + `alg_extras` + Extra arguments related to the algorithm that will be used to distribute + requests to Amphorae part of this load balancer configuration. This + consists of an algorithm name and affinity type. In the initial release + of ACTIVE-ACTIVE, the only valid algorithm will be *hash*, and the + affinity type may be ``Source_IP`` or [`P2`_] ``Source_IP_AND_port``. + +2. Pre VIP unplug + + Removing a VIP network interface will involve several tasks on the + Distributor to gracefully roll-back OVS configuration and other details + that were set-up when the VIP was plugged in. + + `lb_id` + ID of the VIP's loadbalancer that will be unplugged. + +3. Register Amphorae + + This adds Amphorae to the configuration for a given load balancer. The + Distributor should respond with a new list of all Amphorae registered with + the Distributor with positional information. + + `lb_id` + ID of the loadbalancer with which the Amphora will be registered + + `amphorae` + List of Amphorae MAC addresses and (optional) position argument in which + they should be registered. + +4. Unregister Amphorae + + This removes Amphorae from the configuration for a given load balancer. The + Distributor should respond with a new list of all Amphorae registered with + the Distributor with positional information. + + `lb_id` + ID of the loadbalancer with which the Amphora will be registered + + `amphorae` + List of Amphorae MAC addresses that should be unregistered with the + Distributor. + +Security impact +--------------- + +The Distributor is designed to be multi-tenant by default. (Note that the first +reference implementation will not be multi-tenant until tests can be developed +to verify the security of a multi-tenant reference distributor.) Although each +tenant has its own front-end network, the Distributor is connected to all, +which might allow leaks between these networks. The rationale is two fold: +First, the Distributor should be considered as a trusted infrastructure +component. Second, all traffic is external traffic before it reaches the +Amphora. Note that the GW router has exactly the same attributes; in other +words, logically, we can consider the Distributor to be an extension to the GW +(or even use the GW HW to implement the Distributor). + +This approach might not be considered secure enough for some cases, such as, if +LBaaS is used for internal tier-to-tier communication inside a tenant network. +Some tenants may want their loadbalancer's VIP to remain private and their +front-end network to be isolated. In these cases, in order to accomplish +active-active for this tenant we would need separate dedicated Distributor +instance(s). + +Notifications impact +-------------------- + +Other end user impact +--------------------- + +Performance Impact +------------------ + +Other deployer impact +--------------------- + +Developer impact +---------------- + +Further Discussion +------------------ + +.. Note:: + This section captures some background, ideas, concerns, and remarks that + were raised by various people. Some of the items here can be considered for + future/alternative design and some will hopefully make their way into, yet + to be written, related blueprints (e.g., auto-scaled topology). + +[`P2`_] Handling changes in Cluster size (manual or auto-scaled) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- The Distributor shall support different mechanism for preserving affinity + of flows to Amphorae following a *change in the size* of the Amphorae + Cluster. + +- The goal is to minimize shuffling of client-to-Amphora mapping during + cluster size changes: + + * When an Amphora is removed from the Cluster (e.g., due to failure or + scale-down action), all its flows are broken; however, flows to other + Amphorae should not be affected. Also, if a drain method is used to empty + the Amphora of client flows (in the case of a graceful removal), this + should prevent disruption. + + * When an Amphora is *added* to the Cluster (e.g., recovery of a failed + Amphora), some new flows should be distributed to the new Amphora; + however, most flows should still go to the same Amphora they were + distributed to before the new Amphora was added. For example, if the + affinity of flows to Amphorae is per Source IP and a new Amphora was just + added then the Distributor should forward packets from this IP only one + of only two Amphorae: either the same Amphora as before or the + Amphora that was added. + + Using a simple hash to maintain affinity does not meet this goal. + + For example, suppose we maintain affinity (for a fixed cluster size) using + a hash (for randomizing key distribution) as + `chosen_amphora_id = hash(sourceIP # port) mod number_of_amphorae`. + When a new Amphora is added or remove the number of Amphorae changes; + thus, a different Amphora will be chosen for most flows. + +- Below are the couple of ways to tackle this shuffling problem. + + *Consistent Hashing* + Consistent hashing is a hashing mechanism (regardless if key is based on + IP or IP/port) that preserves most hash mappings during changes in the + size of the Amphorae Cluster. In particular, for a cluster with N + Amphorae that grows to N+1 Amphorae, a consistent hashing function + ensures that, with high probability, only 1/N of inputs flows will be + re-hashed (more precisely, K/N keys will be rehashed). Note that, even + with consistent hashing, some flows will be remapped and there is only + a statistical bound on the number of remapped flows. + + The "classic" consistent hashing algorithm maps both server IDs and + keys to hash values and selects for each key the server with the + closest hash value to the key hash value. Lookup generally requires + O(log N) to search for the "closest" server. Achieving good + distribution requires multiple hashes per server (~10s) -- although + these can be pre-computed there is an ~10s*N memory footprint. Other + algorithms (e.g., MSFT's Magleb) have better performance, but provide + weaker guarantees. + + There are several consistent hashing libraries available. None are + supported in OVS. + + * Ketama https://github.com/RJ/ketama + + * Openstack swift https://docs.openstack.org/swift/latest/ring.html#ring + + * Amazon dynamo + http://www.allthingsdistributed.com/files/amazon-dynamo-sosp2007.pdf + + We should also strongly consider making any consistent hashing algorithm + we develop available to all OpenStack components by making it part of an + Oslo library. + + *Rendezvous hashing* + This method provides similar properties to Consistent Hashing (i.e., a + hashing function that remaps only 1/N of keys when a cluster with N + Amphorae grows to N+1 Amphorae. + + For each server ID, the algorithm concatenates the key and server ID and + computes a hash. The server with the largest hash is chosen. This + approach requires O(N) for each lookup, but is much simpler to + implement and has virtually no memory footprint. Through search-tree + encoding of the server IDs it is possible to achieve O(log N) lookup, + but implementation is harder and distribution is not as good. Another + feature is that more than one server can be chosen (e.g., two largest + values) to handle larger loads -- not directly useful for the + Distributor use case. + + *Hybrid, Permutation-based approach* + This is an alternative implementation of consistent hashing that may be + simpler to implement. Keys are hashed to a set of buckets; each bucket + is pre-mapped to a random permutation of the server IDs. Lookup is by + computing a hash of the key to obtain a bucket and then going over the + permutation selecting the first server. If a server is marked as "down" + the next server in the list is chosen. This approach is similar to + Rendezvous hashing if each key is directly pre-mapped to a random + permutation (and like it allows more than one server selection). If the + number of failed servers is small then lookup is about O(1); memory is + O(N * #buckets), where the granularity of distribution is improved by + increasing the number of buckets. The permutation-based approach is + useful to support clusters of fixed size that need to handle a few + nodes going down and then coming back up. If there is an assumption on + the number of failures then memory can be reduced to O( max_failures * + #buckets). This approach seems to suit the Distributor Active-Active + use-case for non-elastic workloads. + +- Flow tracking is required, even with the above hash functions, to handle + the (relatively few) remapped flows. If an existing flow is remapped, its + TCP connection would break. This is acceptable when an Amphora goes down + and it flows are mapped to a new one. On the other hand, it may be + unacceptable when an Amphora is added to the cluster and 1/N of existing + flows are remapped. The Distributor may support different modes, as follows. + + *None / Stateless* + In this mode, the Distributor applies its most recent forwarding rules, + regardless of previous state. Some existing flows might be remapped to a + different Amphora and would be broken. The client would have to recover + and establish a connection with the new Amphora (it would still be + mapped to the same back-end, if possible). Combined with consistent (or + similar) hashing, this may be good enough for many web applications + that are built for failure anyway, and can restore their state upon + reconnect. + + *Full flow Tracking* + In this mode, the Distributor tracks existing flows to provide full + affinity, i.e., only new flows can be remapped to different Amphorae. + The Linux connection tracking may be used (e.g., through IPTables or + through OpenFlow); however, this might not scale well. Alternatively, + the Distributor can use an independent mechanism similar to HA-Proxy + sticky-tables to track the flows. Note that the Distributor only needs to + track the mapping per source IP and source port (unlike Linux connection + tracking which follows the TCP state and related connections). + + *Use Ryu* + Ryu is a well supported and tested python binding for issuing OpenFlow + commands. Especially since Neutron recently moved to using this for + many of the things it does, using this in the Distributor might make + sense for Octavia as well. + +Forwarding Data-path Implementation Alternatives +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The current design uses L2 forwarding based only on L3 parameters and uses +Direct Return routing (one-legged). The rational behind this approach is +to keep the Distributor as light as possible and have the Amphorae do the +bulk of the work. This allows one (or a few) Distributor instance(s) to +serve all traffic even for very large workloads. Other approaches are +possible. + +2-legged Router +_______________ + +- Distributor acts as router, being in-path on both directions. + +- New network between Distributor and Amphorae -- Only Distributor on VIP + subnet. + +- No need to use MAC forwarding -- use routing rules + +LVS +___ + +Use LVS for Distributor. + +DNS +___ + +Use DNS for the Distributor. + +- Use DNS to map to particular Amphorae. Distribution will be of + domain name rather than VIP. + +- No problem with per-flow affinity, as client will use same IP for entire + TCP connection. + +- Need a different public IP for each Amphora (no VIP) + +Pure SDN +________ + +- Implement the OpenFlow rules directly in the network, without a + Distributor instance. + +- If the network infrastructure supports this then the Distributor can + become more robust and very lightweight, making it practical to have a + dedicated Distributor per VIP (only the rules will be dedicated as the + network and SDN controller are shared resources) + +Distributor Sharing +^^^^^^^^^^^^^^^^^^^ + +- The initial implementation of the Distributor will not be shared between + tenants until tests can be written to verify the security of this solution. + +- The implementation should support different Distributor sharing and + cardinality configurations. This includes single-shared Distributor, + multiple-dedicated Distributors, and multiple-shared Distributors. In + particular, an abstraction layer should be used and the data-model should + include an association between the load balancer and Distributor. + +- A shared Distributor uses the least amount of resources, but may not meet + isolation requirements (performance and/or security) or might become a + bottleneck. + +Distributor High-Availability +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- The Distributor should be highly-available (as this is one of the + motivations for the active-active topology). Once the initial active-active + functionality is delivered, developing a highly available distributor should + take a high priority. + +- A mechanism similar to the VRRP used on ACTIVE-STANDBY topology Amphorae + can be used. + +- Since the Distributor is stateless (for fixed cluster sizes and if no + connection tracking is used) it is possible to set up an active-active + configuration and advertise more than one Distributor (e.g, for ECMP). + +- As a first step, the initial implementation will use a single Distributor + instance (i.e., will not be highly-available). Health Manager will monitor + the Distributor health and initiate recovery if needed. + +- The implementation should support plugging-in a hardware-based + implementation of the Distributor that may have its own high-availability + support. + +- In order to preserve client to Amphora affinity in the case of a failover, + a VRRP-like HA Distributor has several options. We could potentially push + Amphora registrations to the standby Distributor with the position + arguments specified, in order to guarantee the active and standby Distributor + always have the same configuration. Or, we could invent and utilize a + synchronization protocol between the active and standby Distributors. This + will be explored and decided when an HA Distributor specification is + written and approved. + + +Implementation +============== + +Assignee(s) +----------- + +Work Items +---------- + +Dependencies +============ + + +Testing +======= + +* Unit tests with tox. +* Function tests with tox. + + +Documentation Impact +==================== + + +References +========== + +https://blueprints.launchpad.net/octavia/+spec/base-image +https://blueprints.launchpad.net/octavia/+spec/controller-worker +https://blueprints.launchpad.net/octavia/+spec/amphora-driver-interface +https://blueprints.launchpad.net/octavia/+spec/controller +https://blueprints.launchpad.net/octavia/+spec/operator-api +:doc:`../../api/haproxy-amphora-api` +https://blueprints.launchpad.net/octavia/+spec/active-active-topology diff --git a/specs/version0.9/active-active-topology.rst b/specs/version0.9/active-active-topology.rst new file mode 100644 index 0000000000..28f3cc6532 --- /dev/null +++ b/specs/version0.9/active-active-topology.rst @@ -0,0 +1,635 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + + +================================= +Active-Active, N+1 Amphorae Setup +================================= + +https://blueprints.launchpad.net/octavia/+spec/active-active-topology + +Problem description +=================== + +This blueprint describes how Octavia implements an *active-active* +loadbalancer (LB) solution that is highly-available through redundant +Amphorae. It presents the high-level service topology and suggests +high-level code changes to the current code base to realize this scenario. +In a nutshell, an *Amphora Cluster* of two or more active Amphorae +collectively provide the loadbalancing service. + +The Amphora Cluster shall be managed by an *Amphora Cluster Manager* (ACM). +The ACM shall provide an abstraction that allows different types of +active-active features (e.g., failure recovery, elasticity, etc.). The +initial implementation shall not rely on external services, but the +abstraction shall allow for interaction with external ACMs (to be developed +later). + +This blueprint uses terminology defined in Octavia glossary when available, +and defines new terms to describe new components and features as necessary. + +.. _P2: + + **Note:** Items marked with [`P2`_] refer to lower priority features to be + designed / implemented only after initial release. + + +Proposed change +=============== + +A tenant should be able to start a highly-available, loadbalancer for the +tenant's backend services as follows: + +* The operator should be able to configure an active-active topology + through an Octavia configuration file or [`P2`_] through a Neutron flavor, + which the loadbalancer shall support. Octavia shall support active-active + topologies in addition to the topologies that it currently supports. + +* In an active-active topology, a cluster of two or more amphorae shall + host a replicated configuration of the load-balancing services. Octavia + will manage this *Amphora Cluster* as a highly-available service using a + pool of active resources. + +* The Amphora Cluster shall provide the load-balancing services and support + the configurations that are supported by a single Amphora topology, + including L7 load-balancing, SSL termination, etc. + +* The active-active topology shall support various Amphora types and + implementations; including, virtual machines, [`P2`_] containers, and + bare-metal servers. + +* The operator should be able to configure the high-availability + requirements for the active-active load-balancing services. The operator + shall be able to specify the number of healthy Amphorae that must exist + in the load-balancing Amphora Cluster. If the number of healthy Amphorae + drops under the desired number, Octavia shall automatically and + seamlessly create and configure a new Amphora and add it to the Amphora + Cluster. [`P2`_] The operator should be further able to define that the + Amphora Cluster shall be allocated on separate physical resources. + +* An Amphora Cluster will collectively act to serve as a single logical + loadbalancer as defined in the Octavia glossary. Octavia will seamlessly + distribute incoming external traffic among the Amphorae in the Amphora + Cluster. To that end, Octavia will employ a *Distributor* component that + will forward external traffic towards the managed amphora instances. + Conceptually, the Distributor provides an extra level of load-balancing + for an active-active Octavia application, albeit a simplified one. + Octavia should be able to support several Distributor implementations + (e.g., software-based and hardware-based) and different affinity models + (at minimum, flow-affinity should be supported to allow TCP connectivity + between clients and Amphorae). + +* The detailed design of the Distributor component will be described in a + separate document (see "Distributor for Active-Active, N+1 Amphorae + Setup", active-active-distributor.rst). + + +High-level Topology Description +------------------------------- + +Single Tenant +~~~~~~~~~~~~~ + +* The following diagram illustrates the active-active topology: + +:: + + Front-End Back-End + Internet Network Network + (world) (tenant) (tenant) + ║ ║ ║ + ┌─╨────┐ floating IP ║ ║ ┌────────┐ + │Router│ to LB VIP ║ ┌────┬─────────┬────┐ ║ │ Tenant │ + │ GW ├──────────────►╫◄─┤ IP │ Amphora │ IP ├─►╫◄─┤Service │ + └──────┘ ║ └┬───┤ (1) │back│ ║ │ (1) │ + ║ │VIP├─┬──────┬┴────┘ ║ └────────┘ + ║ └───┘ │ MGMT │ ║ ┌────────┐ + ╓◄───────────────────║─────────┤ IP │ ║ │ Tenant │ + ║ ┌─────────┬────┐ ║ └──────┘ ╟◄─┤Service │ + ║ │ Distri- │ IP├►╢ ║ │ (2) │ + ║ │ butor ├───┬┘ ║ ┌────┬─────────┬────┐ ║ └────────┘ + ║ └─┬──────┬┤VIP│ ╟◄─┤ IP │ Amphora │ IP ├─►╢ ┌────────┐ + ║ │ MGMT │└─┬─┘ ║ └┬───┤ (2) │back│ ║ │ Tenant │ + ╟◄────┤ IP │ └arp►╢ │VIP├─┬──────┬┴────┘ ╟◄─┤Service │ + ║ └──────┘ ║ └───┘ │ MGMT │ ║ │ (3) │ + ╟◄───────────────────║─────────┤ IP │ ║ └────────┘ + ║ ┌───────────────┐ ║ └──────┘ ║ + ║ │ Octavia LBaaS │ ║ ••• ║ • + ╟◄─┤ Controller │ ║ ┌────┬─────────┬────┐ ║ • + ║ └┬─────────────┬┘ ╙◄─┤ IP │ Amphora │ IP ├─►╢ + ║ │ Amphora │ └┬───┤ (k) │back│ ║ ┌────────┐ + ║ │ Cluster Mgr.│ │VIP├─┬──────┬┴────┘ ║ │ Tenant │ + ║ └─────────────┘ └───┘ │ MGMT │ ╙◄─┤Service │ + ╟◄─────────────────────────────┤ IP │ │ (m) │ + ║ └──────┘ └────────┘ + ║ + Management Amphora Cluster Back-end Pool + Network 1..k 1..m + +* An example of high-level data-flow: + + 1. Internet clients access a tenant service through an externally visible + floating-IP (IPv4 or IPv6). + + 2. If IPv4, a gateway router maps the floating IP into a loadbalancer's + internal VIP on the tenant's front-end network. + + 3. The (multi-tenant) Distributor receives incoming requests to the + loadbalancer's VIP. It acts as a one-legged direct return LB, + answering ``arp`` requests for the loadbalancer's VIP (see Distributor + spec.). + + 4. The Distributor distributes incoming connections over the tenant's + Amphora Cluster, by forwarding each new connection opened with a + loadbalancer's VIP to a front-end MAC address of an Amphora in the + Amphora Cluster (layer-2 forwarding). *Note*: the Distributor may + implement other forwarding schemes to support more complex routing + mechanisms, such as DVR (see Distributor spec.). + + 5. An Amphora receives the connection and accepts traffic addressed to + the loadbalancer's VIP. The front-end IPs of the Amphorae are + allocated on the tenant's front-end network. Each Amphora accepts VIP + traffic, but does not answer ``arp`` request for the VIP address. + + 6. The Amphora load-balances the incoming connections to the back-end + pool of tenant servers, by forwarding each external request to a + member on the tenant network. The Amphora also performs SSL + termination if configured. + + 7. Outgoing traffic traverses from the back-end pool members, through + the Amphora and directly to the gateway (i.e., not through the + Distributor). + +Multi-tenant Support +~~~~~~~~~~~~~~~~~~~~ + +* The following diagram illustrates the active-active topology with + multiple tenants: + +:: + + Front-End Back-End + Internet Networks Networks + (world) (tenant) (tenant) + ║ B A A + ║ floating IP ║ ║ ║ ┌────────┐ + ┌─╨────┐ to LB VIP A ║ ║ ┌────┬─────────┬────┐ ║ │Tenant A│ + │Router├───────────────║─►╫◄─┤A IP│ Amphora │A IP├─►╫◄─┤Service │ + │ GW ├──────────────►╢ ║ └┬───┤ (1) │back│ ║ │ (1) │ + └──────┘ floating IP ║ ║ │VIP├─┬──────┬┴────┘ ║ └────────┘ + to LB VIP B ║ ║ └───┘ │ MGMT │ ║ ┌────────┐ + ╓◄───────────────────║──║─────────┤ IP │ ║ │Tenant A│ + ║ ║ ║ └──────┘ ╟◄─┤Service │ + M B A ┌────┬─────────┬────┐ ║ │ (2) │ + ║ ║ ╟◄─┤A IP│ Amphora │A IP├─►╢ └────────┘ + ║ ║ ║ └┬───┤ (2) │back│ ║ ┌────────┐ + ║ ║ ║ │VIP├─┬──────┬┴────┘ ║ │Tenant A│ + ║ ║ ║ └───┘ │ MGMT │ ╟◄─┤Service │ + ╟◄───────────────────║──║─────────┤ IP │ ║ │ (3) │ + ║ ║ ║ └──────┘ ║ └────────┘ + ║ B A ••• B • + ║ ┌─────────┬────┐ ║ ║ ┌────┬─────────┬────┐ ║ • + ║ │ │IP A├─╢─►╫◄─┤A IP│ Amphora │A IP├─►╢ ┌────────┐ + ║ │ ├───┬┘ ║ ║ └┬───┤ (k) │back│ ║ │Tenant A│ + ║ │ Distri- │VIP├─arp►╜ │VIP├─┬──────┬┴────┘ ╙◄─┤Service │ + ║ │ butor ├───┘ ║ └───┘ │ MGMT │ │ (m) │ + ╟◄─ │ │ ─────║────────────┤ IP │ └────────┘ + ║ │ ├────┐ ║ └──────┘ + ║ │ │IP B├►╢ tenant A + ║ │ ├───┬┘ ║ = = = = = = = = = = = = = = = = = = = = = + ║ │ │VIP│ ║ ┌────┬─────────┬────┐ B tenant B + ║ └─┬──────┬┴─┬─┘ ╟◄────┤B IP│ Amphora │B IP├─►╢ ┌────────┐ + ║ │ MGMT │ └arp►╢ └┬───┤ (1) │back│ ║ │Tenant B│ + ╟◄────┤ IP │ ║ │VIP├─┬──────┬┴────┘ ╟◄─┤Service │ + ║ └──────┘ ║ └───┘ │ MGMT │ ║ │ (1) │ + ╟◄───────────────────║────────────┤ IP │ ║ └────────┘ + ║ ┌───────────────┐ ║ └──────┘ ║ + M │ Octavia LBaaS │ B ••• B • + ╟◄─┤ Controller │ ║ ┌────┬─────────┬────┐ ║ • + ║ └┬─────────────┬┘ ╙◄────┤B IP│ Amphora │B IP├─►╢ + ║ │ Amphora │ └┬───┤ (q) │back│ ║ ┌────────┐ + ║ │ Cluster Mgr.│ │VIP├─┬──────┬┴────┘ ║ │Tenant B│ + ║ └─────────────┘ └───┘ │ MGMT │ ╙◄─┤Service │ + ╟◄────────────────────────────────┤ IP │ │ (r) │ + ║ └──────┘ └────────┘ + ║ + Management Amphora Clusters Back-end Pool + Network A(1..k), B(1..q) A(1..m),B(1..r) + + +* Both tenants A and B share the Distributor, but each has a different + front-end network. The Distributor listens on both loadbalancers' VIPs + and forwards to either A's or B's Amphorae. + +* The Amphorae and the back-end (tenant) networks are not shared between + tenants. + + +Problem Details +--------------- + +* Octavia should support different Distributor implementations, similar + to its support for different Amphora types. The operator should be able + to configure different types of algorithms for the Distributor. All + algorithms should provide flow-affinity to allow TLS termination at the + amphora. See :doc:`active-active-distributor` for details. + +* Octavia controller shall seamlessly configure any newly created Amphora + ([`P2`_] including peer state synchronization, such as sticky-tables, if + needed) and shall reconfigure the other solution components (e.g., + Neutron) as needed. The controller shall further manage all Amphora + life-cycle events. + +* Since it is impractical at scale for peer state synchronization to occur + between all Amphorae part of a single load balancer, Amphorae that are all + part of a single load balancer configuration need to be divided into smaller + peer groups (consisting of 2 or 3 Amphorae) with which they should + synchronize state information. + + +Required changes +---------------- + +The active-active loadbalancers require the following high-level changes: + + +Amphora related changes +~~~~~~~~~~~~~~~~~~~~~~~ + +* Updated Amphora image to support active-active topology. The front-end + still has both a unique IP (to allow direct addressing on front-end + network) and a VIP; however, it should not answer ARP requests for the + VIP address (all Amphorae in a single Amphora Cluster concurrently serve + the same VIP). Amphorae should continue to have a management IP on the LB + Network so Octavia can configure them. Amphorae should also generally + support hot-plugging interfaces into back-end tenant networks as they do + in the current implementation. [`P2`_] Finally, the Amphora configuration + may need to be changed to randomize the member list, in order to prevent + synchronized decisions by all Amphorae in the Amphora Cluster. + +* Extend data model to support active-active Amphora. This is somewhat + similar to active-passive (VRRP) support. Each Amphora needs to store its + IP and port on its front-end network (similar to ha_ip and ha_port_id + in the current model) and its role should indicate it is in a cluster. + + The provisioning status should be interpreted as referring to an Amphora + only and not the load-balancing service. The status of the load balancer + should correspond to the number of ``ONLINE`` Amphorae in the Cluster. + If all Amphoae are ``ONLINE``, the load balancer is also ``ONLINE``. If a + small number of Amphorae are not ``ONLINE``, then the load balancer is + ``DEGRADED``. If enough Amphorae are not ``ONLINE`` (past a threshold), then + the load balancer is ``DOWN``. + +* Rework some of the controller worker flows to support creation and + deletion of Amphorae by the ACM in an asynchronous manner. The compute + node may be created/deleted independently of the corresponding Amphora + flow, triggered as events by the ACM logic (e.g., node update). The flows + do not need much change (beyond those implied by the changes in the data + model), since the post-creation/pre-deletion configuration of each + Amphora is unchanged. This is also similar to the failure recovery flow, + where a recovery flow is triggered asynchronously. + +* Create a flow (or task) for the controller worker for (de-)registration + of Amphorae with Distributor. The Distributor has to be aware of the + current ``ONLINE`` Amphorae, to which it can forward traffic. [`P2`_] The + Distributor can do very basic monitoring of the Amphorae health (primarily + to make sure network connectivity between the Distributor and Amphorae is + working). Monitoring pool member health will remain the purview of the + pool health monitors. + +* All the Amphorae in the Amphora Cluster shall replicate the same + listeners, pools, and TLS configuration, as they do now. We assume all + Amphorae in the Amphora Cluster can perform exactly the same + load-balancing decisions and can be treated as equivalent by the + Distributor (except for affinity considerations). + +* Extend the Amphora (REST) API and/or *Plug VIP* task to allow disabling + of ``arp`` on the VIP. + +* In order to prevent losing session_persistence data in the event of an + Amphora failure, the Amphorae will need to be configured to share + session_persistence data (via stick tables) with a subset of other + Amphorae that are part of the same load balancer configuration (ie. a + peer group). + +Amphora Cluster Manager driver for the active-active topology (*new*) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* Add an active-active topology to the topology types. + +* Add a new driver to support creation/deletion of an Amphora Cluster via + an ACM. This will re-use existing controller-worker flows as much as + possible. The reference ACM will call the existing drivers to create + compute nodes for the Amphorae and configure them. + +* The ACM shall orchestrate creation and deletion of Amphora instances to + meet the availability requirements. Amphora failover will utilize the + existing health monitor flows, with hooks to notify the ACM when + ACTIVE-ACTIVE topology is used. [`P2`_] ACM shall handle graceful amphora + removal via draining (delay actual removal until existing connections are + terminated or some timeout has passed). + +* Change the flow of LB creation. The ACM driver shall create an Amphora + Cluster instance for each new loadbalancer. It should maintain the + desired number of Amphorae in the Cluster and meet the + high-availability configuration given by the operator. *Note*: a base + functionality is already supported by the Health Manager; it may be + enough to support a fixed or dynamic cluster size. In any case, existing + flows to manage Amphora life cycle will be re-used in the reference ACM + driver. + +* The ACM shall be responsible for providing health, performance, and + life-cycle management at the Cluster-level rather than at Amphora-level. + Maintaining the loadbalancer status (as described above) by some function + of the collective status of all Amphorae in the Cluster is one example. + Other examples include tracking configuration changes, providing Cluster + statistics, monitoring and maintaining compute nodes for the Cluster, + etc. The ACM abstraction would also support pluggable ACM implementations + that may provide more advance capabilities (e.g., elasticity, AZ aware + availability, etc.). The reference ACM driver will re-use existing + components and/or code which currently handle health, life-cycle, etc. + management for other load balancer topologies. + +* New data model for an Amphora Cluster which has a one-to-one mapping with + the loadbalancer. This defines the common properties of the Amphora + Cluster (e.g., id, min. size, desired size, etc.) and additional + properties for the specific implementation. + +* Add configuration file options to support configuration of an + active-active Amphora Cluster. Add default configuration. [`P2`_] Add + Operator API. + +* Add or update documentation for new components added and new or changed + functionality. + +* Communication between the ACM and Distributors should be secured using + two-way SSL certificate authentication much the same way this is accomplished + between other Octavia controller components and Amphorae today. + +Network driver changes +~~~~~~~~~~~~~~~~~~~~~~ + +* Support the creation, connection, and configuration of the various + networks and interfaces as described in 'high-level topology' diagram. + +* Adding a new loadbalancer requires attaching the Distributor to the + loadbalancer's front-end network, adding a VIP port to the Distributor, + and configuring the Distributor to answer ``arp`` requests for the VIP. + The Distributor shall have a separate interface for each loadbalancer and + shall not allow any routing between different ports; in particular, + Amphorae of different tenants must not be able to communicate with each + other. In the reference implementation, this will be accomplished by using + separate OVS bridges per load balancer. + +* Adding a new Amphora requires attaching it to the front-end and back-end + networks (similar to current implementation), adding the VIP (but with + ``arp`` disabled), and registering the Amphora with the Distributor. The + tenant's front-end and back-end networks must allow attachment of + dynamically created Amphorae by involving the ACM (e.g., when the health + monitor replaces a failed Amphora). ([`P2`_] extend the LBaaS API to allow + specifying an address range for new Amphorae usage, e.g., a subnet pool). + + +Amphora health-monitoring support +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* Modify Health Manager to manage the health for an Amphora Cluster through + the ACM; namely, forward Amphora health change events to the ACM, so it + can decide when the Amphora Cluster is considered to be in healthy state. + This should be done in addition to managing the health of each Amphora. + [`P2`_] Monitor the Amphorae also on their front-end network (i.e., from + the Distributor). + + +Distributor support +~~~~~~~~~~~~~~~~~~~ + +* **Note:** as mentioned above, the detailed design of the Distributor + component is described in a separate document). Some design + considerations are highlighted below. + +* The Distributor should be supported similarly to an Amphora; namely, have + its own abstract driver. + +* For a reference implementation, add support for a Distributor image. + +* Define a REST API for Distributor configuration (no SSH API). The API + shall support: + + - Add and remove a VIP (loadbalancer) and specify distribution parameters + (e.g., affinity, algorithm, etc.). + + - Registration and de-registration of Amphorae. + + - Status + + - [`P2`_] Macro-level stats + +* Spawn Distributors (if using on demand Distributor compute nodes) and/or + attach to existing ones as needed. Manage health and life-cycle of the + Distributor(s). Create, connect, and configure Distributor networks as + necessary. + +* Create data model for the Distributor. + +* Add Distributor driver and flows to (re-)configure the Distributor on + creation/destruction of a new loadbalancer (add/remove loadbalancer VIP) + and [`P2`_] configure the distribution algorithm for the loadbalancer's + Amphora Cluster. + +* Add flows to Octavia to (re-)configure the Distributor on adding/removing + Amphorae from the Amphora Cluster. + + +Packaging +~~~~~~~~~ + +* Extend Octavia installation scripts to create an image for the Distributor. + + +Alternatives +------------ + +* Use external services to manage the cluster directly. + This utilizes functionality that already exists in OpenStack (e.g., + like Heat and Ceilometer) rather than replicating it. This approach + would also benefit from future extensions to these services. On the + other hand, this adds undesirable dependencies on other projects (and + their corresponding teams), complicates handling of failures, and + require defensive coding around service calls. Furthermore, these + services cannot handle the LB-specific control configuration. + +* Implement a nested Octavia + Use another layer of Octavia to distribute traffic across the Amphora + Cluster (i.e., the Amphorae in the Cluster are back-end members of + another Octavia instance). This approach has the potential to provide + greater flexibility (e.g., provide NAT and/or more complex distribution + algorithms). It also potentially reuses existing code. However, we do + not want the Distributor to proxy connections so HA-Proxy cannot be + used. Furthermore, this approach might significantly increase the + overhead of the solution. + + +Data model impact +----------------- + +* loadbalancer table + + - `cluster_id`: associated Amphora Cluster (no changes to table, 1-1 + relationship from Cluster data-model) + +* lb_topology table + + - new value: ``ACTIVE_ACTIVE`` + +* amphora_role table + + - new value: ``IN_CLUSTER`` + +* Distributor table (*new*): Distributor information, similar to Amphora. + See :doc:`active-active-distributor` + +* Cluster table (*new*): an extension to loadbalancer (i.e., one-to-one + mapping to load-balancer) + + - `id` (primary key) + + - `cluster_name`: identifier of Cluster instance for Amphora Cluster + Manager + + - `desired_size`: required number of Amphorae in Cluster. Octavia will + create this many active-active Amphorae in the Amphora Cluster. + + - `min_size`: number of ``ACTIVE`` Amphorae in Cluster must be above this + number for Amphora Cluster status to be ``ACTIVE`` + + - `cooldown`: cooldown period between successive add/remove Amphora + operations (to avoid thrashing) + + - `load_balancer_id`: 1:1 relationship to loadbalancer + + - `distributor_id`: N:1 relationship to Distributor. Support multiple + Distributors + + - `provisioning_status` + + - `operating_status` + + - `enabled` + + - `cluster_type`: type of Amphora Cluster implementation + + +REST API impact +--------------- + +* Distributor REST API -- This is a new internal API that will be secured + via two-way SSL certificate authentication. See + :doc:`active-active-distributor` + +* Amphora REST API -- support configuration of disabling ``arp`` on VIP. + +* [`P2`_] LBaaS API -- support configuration of desired availability, perhaps + by selecting a flavor (e.g., gold is a minimum of 4 Amphorae, platinum is + a minimum of 10 Amphora). + +* Operator API -- + + - Topology to use + + - Cluster type + + - Default availability parameters for the Amphora Cluster + + +Security impact +--------------- + +* See :doc:`active-active-distributor` for Distributor related security impact. + + +Notifications impact +-------------------- + +None. + + +Other end user impact +--------------------- + +None. + + +Performance Impact +------------------ + +ACTIVE-ACTIVE should be able to deliver significantly higher performance than +SINGLE or ACTIVE-STANDBY topology. It will consume more resources to deliver +this higher performance. + + +Other deployer impact +--------------------- + +The reference ACM becomes a new process that is part of the Octavia control +components (like the controller worker, health monitor and housekeeper). If +the reference implementation is used, a new Distributor image will need to be +created and stored in glance much the same way the Amphora image is created +and stored today. + +Developer impact +---------------- + +None. + + +Implementation +============== + +Assignee(s) +----------- + +@TODO + + +Work Items +---------- + +@TODO + + +Dependencies +============ + +@TODO + + +Testing +======= + +* Unit tests with tox. +* Function tests with tox. +* Scenario tests. + + +Documentation Impact +==================== + +Need to document all new APIs and API changes, new ACTIVE-ACTIVE topology +design and features, and new instructions for operators seeking to deploy +Octavia with ACTIVE-ACTIVE topology. + + +References +========== + +https://blueprints.launchpad.net/octavia/+spec/base-image +https://blueprints.launchpad.net/octavia/+spec/controller-worker +https://blueprints.launchpad.net/octavia/+spec/amphora-driver-interface +https://blueprints.launchpad.net/octavia/+spec/controller +https://blueprints.launchpad.net/octavia/+spec/operator-api +:doc:`../../api/haproxy-amphora-api` +https://blueprints.launchpad.net/octavia/+spec/active-active-topology diff --git a/specs/version0.9/stats_api.rst b/specs/version0.9/stats_api.rst new file mode 100644 index 0000000000..519eb34027 --- /dev/null +++ b/specs/version0.9/stats_api.rst @@ -0,0 +1,167 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +============================================= +Add statistics gathering API for loadbalancer +============================================= + +https://blueprints.launchpad.net/octavia/+spec/stats-support + +Problem description +=================== +Currently, Octavia does not support the gathering of loadbalancer statistics. +This causes inconsistencies between the Octavia and Neutron-LBaaS APIs. +Another point is that the statistics data we get from the Octavia API for the +listener only reflects the first record for the listener in the Octavia +database, since we're supporting more topologies than SINGLE, this needs to +be to fixed too. + +Proposed change +=============== +Add one more data 'request_errors' to indicate the number of request errors +for each listener, we can get this data from the stats of haproxy 'ereq'. + +Add a new module 'stats' to octavia.common with a class 'StatsMixin' to +do the actual statistics calculation for both listener and loadbalancer. Make +the mixin class as a new base class for +octavia.api.v1.controllers.listener_statistics.ListenerStatisticsController, +to make sure we get correct stats from Octavia API. + +Add a new module 'loadbalancer_statistics' to octavia.api.v1.controllers with +a class LoadbalancerStatisticsController to provide a new REST API +for gathering statistics at the loadbalancer level. + +Use evenstream to serialize the statistics messages from the octavia to +neutron-lbaas via oslo_messaging, to keep consistent with neutron-lbaas API. + +Alternatives +------------ +Update the 'stats' method in neutron-lbaas for octavia driver, allow the +neutron-lbaas to get stats from octavia through REST API request, to keep +consistent with neutron-lbaas API. + +Data model impact +----------------- +One new column for table listener_statistics will be introduced to represent +request errors: + + +--------------------+-------------+------+-----+---------+-------+ + | Field | Type | Null | Key | Default | Extra | + +--------------------+-------------+------+-----+---------+-------+ + | request_errors | bigint(20) | NO | | NULL | | + +--------------------+-------------+------+-----+---------+-------+ + +REST API impact +--------------- + +Add 'request_errors' in the response of list listener statistics: +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +**Example List listener statistics: JSON response** + +.. code:: + + { + "listener": { + "bytes_in": 0, + "bytes_out": 0, + "active_connections": 0, + "total_connections": 0, + "request_errors": 0 + } + } + +Add a new API to list loadbalancer statistics +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Lists loadbalancer statistics. + + +----------------+------------------------------------------------+ + | Request Type | ``GET`` | + +----------------+------------------------------------------------+ + | Endpoint | ``URL/v1/loadbalancers/{lb_id}/stats`` | + +----------------+---------+--------------------------------------+ + | | Success | 200 | + | Response Codes +---------+--------------------------------------+ + | | Error | 401, 404, 500 | + +----------------+---------+--------------------------------------+ + +**Example List loadbalancer statistics: JSON response** + + +.. code:: + + { + "loadbalancer": { + "bytes_in": 0, + "bytes_out": 0, + "active_connections": 0, + "total_connections": 0, + "request_errors": 0, + "listeners": [{ + "id": "uuid" + "bytes_in": 0, + "bytes_out": 0, + "active_connections": 0, + "total_connections": 0, + "request_errors": 0, + }] + } + } + +Security impact +--------------- +None + +Notifications impact +-------------------- +None + +Other end user impact +--------------------- +None + +Performance Impact +------------------ +None + +Other deployer impact +--------------------- +None + +Developer impact +---------------- +None + +Implementation +============== + +Assignee(s) +----------- +li, chen + +Work Items +---------- +* Extend current stats collection for listener amphora +* Add module 'stats' +* Add new API for gathering statistics at the loadbalancer level +* Update stats to neutron database + +Dependencies +============ +None + +Testing +======= +Function tests with tox. + +Documentation Impact +==================== +Changes shall be introduced to the octavia APIs: see [1] + +References +========== +[1] https://docs.openstack.org/api-ref/load-balancer/v1/octaviaapi.html diff --git a/specs/version1.0/flavors.rst b/specs/version1.0/flavors.rst new file mode 100644 index 0000000000..e740b52d2a --- /dev/null +++ b/specs/version1.0/flavors.rst @@ -0,0 +1,340 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +================================= +Provider Flavor Framework +================================= + +https://blueprints.launchpad.net/octavia/+spec/octavia-lbaas-flavors + +A Provider Flavor framework provides a mechanism for providers to specify +capabilities that are not currently handled via the octavia api. It allows +the operators to enable capabilities that may possibly be unique to a +particular provider or simply just not available at the moment within +octavia. If it is a common feature it is highly encouraged to have the +non-existing features implemented via the standard Octavia api. In +addition operators can configure different flavors from a maintained list +of provider capabilities. This framework enables providers to supply new +features with speed to market and provides operators with an ease of use +experience. + + +Problem description +=================== + +Flavors are used in various services for specifying service capabilities +and other parameters. Having the ability to create loadbalancers with +various capabilities (such as HA, throughput or ddos protection) gives +users a way to better plan their LB services and get a benefit of LBaaS +functions which are not a part of Octavia API. Since Octavia will become +the new OpenStack LBaaS API, a new flavors API should be developed inside +Octavia. + +As for now, Octavia does not support multi providers. The ability to +define different LBaaS providers is a mandatory feature for Octavia to be +Openstack LbaaS API. Therefore, this spec depends on adding multi providers +support to Octavia. Service providers will be configured via Octavia +configuration file. + +Its important to mention that adding flavors capability to Octavia is not +actually dependent on the work for LBaaS API spinout, from Neutron to +Octavia, to be completed. This capability can be added to Octavia but not +actually used until the API spinout is complete and Octavia becomes the +official OpenStack LBaaS API. + +This spec is based on two existing specs from neutron: + +`Service Flavor Framework +`__ +`Flavor framework - Templates and meta-data +`__ + +However, this is a spec for the first and basic flavors support. +Following capabilities are not part of this spec: + +* Providing parameterized metainfo templates for provider profiles. +* Providing meta data for specific LBaaS object as part of its creation. + + +Proposed change +=============== + +The Provider Flavor framework enables the ability to create distinct +provider flavor profiles of supported parameters. Operators will have the +ability to query the provider driver interface for a list of supported +parameters. Operators can view the said list by provider and create flavors +by selecting one or many parameters from the list. The parameters that will +be used to enable specific functionality will be json type in transit and +at rest. This json payload is assigned to a provider and a flavor name. +Users then have the option of selecting from any of the existing flavors +and submitting the selected flavor upon the creation of the load balancer. +The following flavor name examples can be, but not limited to dev, stage, +prod or bronze, silver, gold. A provider can have many flavor names and a +flavor name can be used by only one provider. Each provider/flavor pair is +assigned a group of meta-parameters and forms a flavor profile. The flavor +name or id is submitted when creating a load balancer. + +The proposal is to add LBaaS service flavoring to Octavia. +This will include following aspects: + +* Adding new flavors API to Octavia API +* Adding flavors models to Octavia +* Adding flavors db tables to Octavia database +* Adding DB migration for new DB objects +* Ensuring backwards compatibility for loadbalancer objects which were + created before flavors support. This is for both cases, when loadbalancer + was created before multi providers support and when loadbalancer was + created with certain provider. +* Adding default entries to DB tables representing the default Octavia + flavor and default Octavia provider profile. +* Adding "default" flavor to devstack plugin. + +A sample use case of the operator flavor workflow would be the following: + +* The operator queries the provider capabilities +* The operator create flavor profile +* The flavor profile is validated with provider driver +* The flavor profile is stored in octavia db +* The end user creates lb with the flavor +* The profile is validated against driver once again, upon every lb-create + + +Alternatives +------------ + +An alternative is patchset-5 within this very same spec. While the concept +is the same, the design is different. Differences with patchset-5 to note +is primarily with the data schemas. With patchset-5 the metadata that is +passed to the load balancer has a one to one relationship with the +provider. Also key/values pairs are stored in json as opposed to in +normalized tables. And a list of provider supported capabilities is not +maintained. That said this alternative design is an option. + + +Data model impact +----------------- + +DB table 'flavor_profile' introduced to represent the profile that is +created when combining a provider with a flavor. + + +--------------------+--------------+------+---------+----------+ + | Field | Type | Null | Key | Default | + +--------------------+--------------+------+---------+----------+ + | id | varchar(36) | NO | PK | generated| + +--------------------+--------------+------+---------+----------+ + | provider_name | varchar(255) | NO | | | + +--------------------+--------------+------+---------+----------+ + | metadata | varchar(4096)| NO | | | + +--------------------+--------------+------+---------+----------+ + +.. note:: The provider_name is the name the driver is advertised as + via setuptools entry points. This will be validated when + the operator uploads the flavor profile and the metadata + is validated. + +DB table 'flavor' introduced to represent flavors. + + +--------------------+--------------+------+-----+----------+ + | Field | Type | Null | Key | Default | + +--------------------+--------------+------+-----+----------+ + | id | varchar(36) | NO | PK | generated| + +--------------------+--------------+------+-----+----------+ + | name | varchar(255) | NO | UK | | + +--------------------+--------------+------+-----+----------+ + | description | varchar(255) | YES | | NULL | + +--------------------+--------------+------+-----+----------+ + | enabled | tinyint(1) | NO | | True | + +--------------------+--------------+------+-----+----------+ + | flavor_profile_id | varchar(36) | NO | FK | | + +--------------------+--------------+------+-----+----------+ + + +DB table attribute 'load_balancer.flavor_id' introduced to link a +flavor to a load_balancer. + + +--------------------+--------------+------+-----+----------+ + | Field | Type | Null | Key | Default | + +--------------------+--------------+------+-----+----------+ + | flavor_id | varchar(36) | YES | FK1 | NULL | + +--------------------+--------------+------+-----+----------+ + + +REST API impact +--------------- + +FLAVOR(/flavors) + ++-----------------+-------+---------+---------+------------+-----------------+ +|Attribute |Type |Access |Default |Validation/ |Description | +|Name | | |Value |Conversion | | ++=================+=======+=========+=========+============+=================+ +|id |string |RO, admin|generated|N/A |identity | +| |(UUID) | | | | | ++-----------------+-------+---------+---------+------------+-----------------+ +|name |string |RO, admin|'' |string |human-readable | +| | | | | |name | ++-----------------+-------+---------+---------+------------+-----------------+ +|description |string |RO, admin|'' |string |human-readable | +| | | | | |description | ++-----------------+-------+---------+---------+------------+-----------------+ +|enabled |bool |RO, admin|true |bool |toggle | +| | | | | | | ++-----------------+-------+---------+---------+------------+-----------------+ +|flavor_profile_id|string |RO, admin| |string |human-readable | +| | | | | |flavor_profile_id| ++-----------------+-------+---------+---------+------------+-----------------+ + +FLAVOR PROFILE(/flavorprofiles) + ++-----------------+--------+---------+---------+------------+---------------+ +|Attribute |Type |Access |Default |Validation/ |Description | +|Name | | |Value |Conversion | | ++=================+========+=========+=========+============+===============+ +|id |string |admin |generated|N/A |identity | +| |(UUID) | | | | | ++-----------------+--------+---------+---------+------------+---------------+ +|name |string |admin |'' |string |human-readable | +| | | | | |name | ++-----------------+--------+---------+---------+------------+---------------+ +|provider-id |string |admin |'' |string |human-readable | +| | | | | |provider-id | ++-----------------+--------+---------+---------+------------+---------------+ +|metadata |string |admin |{} |json |flavor meta | +| | | | | |parameters | ++-----------------+--------+---------+---------+------------+---------------+ + +Security impact +--------------- + +The policy.json will be updated to allow all users to query the flavor +listing and request details about a specific flavor entry, with the +exception of flavor metadata. All other REST points for +create/update/delete operations will be admin only. Additionally, the CRUD +operations for Provider Profiles will be restricted to administrators. + + +Notifications impact +-------------------- + +N/A + +Other end user impact +--------------------- + +An existing LB cannot be updated with a different flavor profile. A flavor +profile can only be applied upon the creation of the LB. The flavor +profile will be immutable. + +Performance Impact +------------------ + +There will be a minimal overhead incurred when the logical representation is +scheduled onto the actual backend. Once the backend is selected, direct +communications will occur via driver calls. + +IPv6 impact +----------- + +None + +Other deployer impact +--------------------- + +The deployer will need to craft flavor configurations that they wish to expose +to their users. During migration the existing provider configurations will be +converted into basic flavor types. Once migrated, the deployer will have the +opportunity to modify the flavor definitions. + +Developer impact +---------------- + +The expected developer impact should be minimal as the framework only impacts +the initial scheduling of the logical service onto a backend. The driver +implementations should remain unchanged except for the addition of the metainfo +call. + +Community impact +---------------- + +This proposal allows operators to offer services beyond those +directly implemented, and to do so in a way that does not increase +community maintenance or burden. + +Provider driver impact +---------------------- + +The provider driver should have the following abilities: + +* Provide an interface to describe the available supported metadata options +* Provide an interface to validate the flavor metadata +* Be able to accept the flavor metadata parameters +* Exception handling for non-supported metadata + +Implementation +============== + +Assignee(s) +----------- +* Evgeny Fedoruk (evgenyf) +* Carlos Puga (cpuga) + +Work Items +---------- +* Implement the new models +* Implement the REST API Extension (including tests) +* Implementation migration script for existing deployments. +* Add client API support +* Add policies to the Octavia RBAC system + +Dependencies +============ + +Depends on provider support and provider drivers that support the validation +interface and accept the flavor profile metadata. + +Testing +======= + +Tempest Tests + +Tempest testing including new API and scenario tests to validate new entities. + +Functional Tests + +Functional tests will need to be created to cover the API and database +changes. + +API Tests + +The new API extensions will be tested using functional tests. + +Documentation Impact +==================== + +User Documentation + +User documentation will need be included to describe to users how to use +flavors when building their logical topology. + +Operator Documentation + +Operator documentation will need to be created to detail how to manage +Flavors, Providers and their respective Profiles. + +Developer Documentation + +Provider driver implementation documentation will need to be updated +to cover the new interfaces expected of provider drivers and the structure +of the metadata provided to the driver. + +API Reference + +The API reference documentation will need to be updated for the new API +extensions. + +References +========== +[1] https://docs.openstack.org/api-ref/load-balancer/v2/index.html diff --git a/specs/version1.0/health_ip_port.rst b/specs/version1.0/health_ip_port.rst new file mode 100644 index 0000000000..93ac4ca47f --- /dev/null +++ b/specs/version1.0/health_ip_port.rst @@ -0,0 +1,186 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +========================================== +LBaaS Alternative Monitoring IP/Port +========================================== + +https://blueprints.launchpad.net/octavia/+spec/lbaas-health-monitoring-port + +In the current state, the health monitor IP address/port pair is derived +from a load balancer's pool member's address and protocol port. In some use +cases it would be desirable to monitor a different IP address/port pair for +the health of a load balanced pool's member than the already specified address +and protocol port. Due to the current state this is not possible. + +Problem description +=================== + +The use case where this would be desirable would be when the End User is +making the health monitor application on the member available on a IP/port +that is mutually exclusive to the IP/port of the application that is being load +balanced on the member. The End User would find this advantageous when +attempting to limit access to health diagnostic information by not allowing it +to be served over the main ingress IP/port of their application. + +Beyond limiting access to any health APIs, it allows the End Users to design +different methods of health monitoring, such as creating distinct daemons +responsible for the health of their hosts applications. + +Proposed change +=============== + +The creation of a pool member would now allow the specification of an IP +address and port to monitor health. The process used to assess the health +of pool members would now use this new IP address and port to diagnose the +member. + +If a health monitor IP address or port is not specified the default behavior +would be to use the IP address and port specified by the member. + +There would likely need to be some Horizon changes to support this feature, +however by maintaining the old behavior as the default we will not create +a strong dependency. + +Alternatives +------------ + +An alternative is to not allow this functionality, and force all End Users +to ensure their health checks are available over the member's load balanced IP +address and protocol port. + +As stated in the *Problem Description* this would force End Users to provide +additional security around their health diagnostic information so that they do +not expose it to unintended audiences. Pushing this requirement on the End User +is a heavier burden and limits their configuration options of the applications +they run on Openstack that are load balanced. + +Data model impact +----------------- + +The Member data model would gain two new member fields called monitor_port +and monitor_address. These two member fields would store the port and IP +address, respectively, that the monitor will query for the health of the load +balancer's listener's pool member. + +It is important to have the default behavior fall back on the address and +protocol port of the member as this will allow any migrations to not break +existing deployments of Openstack. + +Any Member data models without this new feature would have the fields default +to the value of null to signify that Octavia's LBaaS service should use the +member's protocol port to assess health status. + +REST API impact +--------------- + +There are two APIs that will need to be modified, only slightly, to facilitate +this change. + +.. csv-table:: Octavia LBaaS APIs + :header: "Method", "URI" + :widths: 15, 30 + + "POST", "/v2.0/lbaas/pools/{pool_id}/members" + "PUT", "/v2.0/lbaas/pools/{pool_id}/members/{member_id}" + "GET", "/v2.0/lbaas/pools/{pool_id}/members/{member_id}" + +The POST and PUT calls will need two additional fields added to their JSON body +data for the request and the JSON response data. + +The GET call will need two additional fields as well, however they would only +be added to the JSON response data. + +The fields to be added to each is: + +.. csv-table:: Added Fields + :header: "Attribute Name","Type", "Access", "Default Value","Validation Conversion","Description" + + monitor_port,int,"RW, all",null,int,health check port (optional) + monitor_address,string,"RW, all",null,types.IPAddressType(),health check IP address (optional) + +Security impact +--------------- + +None + +Notifications impact +-------------------- + +None + +Other end user impact +--------------------- + +None + +Performance Impact +------------------ + +None + +Other deployer impact +--------------------- + +None + +Developer impact +---------------- + +Other plugins do not have to implement this feature as it is optional due to +the default behavior. If they decide to implement this feature, they would just +need to supply the protocol port in their POSTs and PUTs to the health monitor +APIs. + +Implementation +============== + +Assignee(s) +----------- + +Primary assignee: + a.amerine + +Other contributors: + None + +Work Items +---------- + +- Alter the Member Data Model +- Alter Pool Member APIs +- Update API reference documentation to reflect changes +- Write or Alter Unit, Functional, and Tempest Tests to verify new + functionality + + +Dependencies +============ + +None + + +Testing +======= + +Integration tests can be written to verify functionality. Generally, it should +only require an existing Openstack deployment that is running LBaaS to verify +health checks. + + +Documentation Impact +==================== + +The REST API impact will need to be addressed in documentation so developers +moving forward know about the feature and can use it. + +References +========== + +- Octavia Roadmap Considerations: Health monitoring on alternate IPs and/or + ports (https://wiki.openstack.org/wiki/Octavia/Roadmap) +- RFE Port based HealthMonitor in neutron_lbaas + (https://launchpad.net/bugs/1541579) diff --git a/specs/version1.0/n-lbaas-api-parity.rst b/specs/version1.0/n-lbaas-api-parity.rst new file mode 100644 index 0000000000..d4fbb087af --- /dev/null +++ b/specs/version1.0/n-lbaas-api-parity.rst @@ -0,0 +1,118 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +======================================== +Align octavia API With Neutron LBaaS API +======================================== + + +Problem description +=================== +For the octavia API to truly be standalone, it needs to have capability parity +with Neutron LBaaS's API. Neutron LBaaS has the luxury of piggy-backing off +of Neutron's API. This gives Neutron LBaaS's API resources many capabilities +for free. This document is meant to enumerate those capabilities that the +octavia API does not possess at the time of this writing. + +Proposed change +=============== +Complete the tasks enumerated in the `Work Items`_ section + +Alternatives +------------ +* Do nothing and keep the status quo + +Data model impact +----------------- +There will be some minor data model changes to octavia in support of this +change. + +REST API impact +--------------- +This change will have significant impact to the octavia API. + +Security impact +--------------- +This change will improve octavia security by adding keystone authentication. + +Notifications impact +-------------------- +No expected change. + +Other end user impact +--------------------- +Users will be able to use the new octavia API endpoint for LBaaS. + +Performance Impact +------------------ +This change may slightly improve performance by reducing the number of +software layers requests will traverse before responding to the request. + +Other deployer impact +--------------------- +Over time the neutron-lbaas package will be deprecated and deployers will +only require octavia for LBaaS. + +Developer impact +---------------- +This will simplify LBaaS development by reducing the number of databases +as well as repositories that require updating for LBaaS enhancements. + +Implementation +============== + +Assignee(s) +----------- +blogan +diltram +johnsom +rm_you +dougwig + +Work Items +---------- +Implement the following API Capabilities: + +* Keystone Authentication +* Policy Engine +* Pagination +* Quotas +* Filtering lists by query parameter +* Fields by query parameter +* Add the same root API endpoints as n-lbaas +* Support "provider" option in the API to select a driver to spin up a load + balancer. +* API Handler layer to become the same as n-lbaas driver layer and allow + multiple handlers/drivers. +* Neutron LBaaS V2 driver to octavia API Handler shim layer + +Implement the following additional features that n-lbaas maintains: + +* OSC extension via a new repository 'python-octaviaclient' + + +Other Features to be Considered: + +* Notifications for resource creating, updating, and deleting. +* Flavors +* Agent namespace driver or some lightweight functional driver. +* Testing octavia with all of the above +* REST API Microversioning + +Dependencies +============ +None + +Testing +======= +Api tests from neutron-lbaas will be used to validate the new octavia API. + +Documentation Impact +==================== +The octavia api reference will need to be updated. + +References +========== diff --git a/specs/version1.0/vip-qos-policy-application.rst b/specs/version1.0/vip-qos-policy-application.rst new file mode 100644 index 0000000000..c9fa0465a8 --- /dev/null +++ b/specs/version1.0/vip-qos-policy-application.rst @@ -0,0 +1,186 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +========================== +Vip QoS Policy Application +========================== + + +Problem description +=================== +For real cases, the bandwidth of vip should be limited, because the upstream +network resource is provided by the ISP or other organizations. That means it +is not free. The openstack provider or users should pay for the limited +bandwidth, for example, users buy the 50M bandwidth from ISP for openstack +environment to access Internet, also it will be used for the connection outside +of openstack to access the servers in openstack. And the servers are behind +LoadBalance VIP. We cannot offer the whole bandwidth to the servers, as maybe +there also are the VMs want to access the external network. So we should take a +bandwidth limitation towards vip port. + +Also, if the upstream network resource had been used up mostly, we still want +the backend servers behind loadbalancer are accessible and stable. The min +bandwidth limitation is needed for this scenario. + +For more QoS functions, in reality, we can't limit our users or +deployers to use loadbalance default drivers, such as haproxy driver and +Octavia driver. They may be more concerned about the fields/functions related +to QoS, like DSCP markings. They could integrate the third-party drivers which +are concerned about these fields. + + +Proposed change +=============== +This spec introduces the Neutron QoS function to meet the requirements. +Currently, there are 3 ports(at least) in the loadbalancer created by Octavia. +One is from the lb-mgmt-net, the others are from the vip-subnet, called +"loadbalancer-LOADBALANCER_ID" and "octavia-lb-vrrp-LOADBALNCER_ID". The first +one is vip port, the second one is for vrrp HA, and it will set +"allowed_address_pairs" toward vip fixed_ip. The QoS policy should focus on the +attached port "octavia-lb-vrrp-LOADBALNCER_ID". + +We could apply the Neutron QoS policy to the "octavia-lb-vrrp-LOADBALNCER_ID" +ports, whether the topology is active-active or standalone. + +There are the following changes: + +* Extend a new column named "qos_policy_id" in vip table. +* Extend Octavia API, we need pass the vip-qos-policy-id which created in + Neutron into LoadBalancer creation/update. +* Apply QoS policy on vip port in Loadbalancer working flow. + +Alternatives +------------ +We accept the QoS parameters and implement the QoS function on our own. + +Data model impact +----------------- +In this spec, the QoS function will be provided by Neutron, so Octavia should +know the relationship of QoS policies and the vip port of Loadbalancers. +There will be some minor data model changes to Octavia in support of this +change. + +* vip table + - `qos_policy_id`: associate QoS policy id with vip port. + +REST API impact +--------------- + +Proposed attribute:: + + EXTEND_FIELDS = { + 'vip_qos_policy_id':{'allow_post': True, 'allow_put': True, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'default': None} + } + +The definition in Octavia is like:: + vip_qos_policy_id = wtypes.wsattr(wtypes.UuidType()) + +Some samples in Loadbalancer creation/update. Users allow pass +"vip_qos_policy_id". + +Create/Update Loadbalancer Request:: + + POST/PUT /v2.0/lbaas/loadbalancers + { + "loadbalancer": { + "name": "loadbalancer1", + "description": "simple lb", + "project_id": "b7c1a69e88bf4b21a8148f787aef2081", + "tenant_id": "b7c1a69e88bf4b21a8148f787aef2081", + "vip_subnet_id": "013d3059-87a4-45a5-91e9-d721068ae0b2", + "vip_address": "10.0.0.4", + "admin_state_up": true, + "flavor": "a7ae5d5a-d855-4f9a-b187-af66b53f4d04", + "vip_qos_policy_id": "b61f8b45-e888-4056-94f0-e3d5af96211f" + } + } + + Response: + { + "loadbalancer": { + "admin_state_up": true, + "description": "simple lb", + "id": "a36c20d0-18e9-42ce-88fd-82a35977ee8c", + "listeners": [], + "name": "loadbalancer1", + "operating_status": "ONLINE", + "provisioning_status": "ACTIVE", + "project_id": "b7c1a69e88bf4b21a8148f787aef2081", + "tenant_id": "b7c1a69e88bf4b21a8148f787aef2081", + "vip_address": "10.0.0.4", + "vip_subnet_id": "013d3059-87a4-45a5-91e9-d721068ae0b2", + "flavor": "a7ae5d5a-d855-4f9a-b187-af66b53f4d04", + "provider": "sample_provider", + "pools": [], + "vip_qos_policy_id": "b61f8b45-e888-4056-94f0-e3d5af96211f" + } + } + + +Security impact +--------------- +None + +Notifications impact +-------------------- +No expected change. + +Other end user impact +--------------------- +Users will be able to specify qos_policy to create/update Loadbalancers. + +Performance Impact +------------------ +* It will be a very short time to cost in loadbalancer creation, as we need + validate the input QoS policy. +* The QoS policy in Neutron side will affect the network performance based on + the different types of QoS rules. + +Other deployer impact +--------------------- +None + +Developer impact +---------------- +TBD. + +Implementation +============== + +Assignee(s) +----------- +zhaobo +reedip + +Work Items +---------- +* Add the DB model and extend the table column. +* Extending Octavia V2 API to accept QoS policy. +* Add QoS application logic into Loadbalancer workflow. +* Add API validation code to validate access/existence of the qos_policy which + created in Neutron. +* Add UTs to Octavia. +* Add API tests. +* Update CLI to accept QoS fields. +* Documentation work. + +Dependencies +============ +None + +Testing +======= +Unit tests, Functional tests, API tests and Scenario tests are necessary. + +Documentation Impact +==================== +The Octavia API reference will need to be updated. + +References +========== diff --git a/specs/version1.1/active-active-l3-distributor.rst b/specs/version1.1/active-active-l3-distributor.rst new file mode 100644 index 0000000000..50e34a3aa8 --- /dev/null +++ b/specs/version1.1/active-active-l3-distributor.rst @@ -0,0 +1,666 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +=================================================== +Distributor for L3 Active-Active, N+1 Amphora Setup +=================================================== +.. attention:: + Please review the active-active topology blueprint first ( + :doc:`../version0.9/active-active-topology` ) + +https://blueprints.launchpad.net/octavia/+spec/l3-active-active + +Problem description +=================== + +This blueprint describes a *L3 active-active* distributor implementation to +support the Octavia *active-active-topology*. The *L3 active-active* +distributor will leverage the capabilities of a layer 3 Clos network fabric in +order to distribute traffic to an *Amphora Cluster* of 1 or more amphoras. +Specifically, the *L3 active-active* distributor design will leverage Equal +Cost Multipath Load Sharing (ECMP) with anycast routing to achieve traffic +distribution across the *Amphora Cluster*. In this reference implementation, +the BGP routing protocol will be used to inject anycast routes into the L3 +fabric. + +In order to scale a single VIP address across multiple active amphoras it is +required to have a *distributor* to balance the traffic. By leveraging the +existing capabilities of a modern L3 network, we can use the network itself as +the *distributor*. This approach has several advantages, which include: + +* Traffic will be routed via the best path to the destination amphora. There is + no need to add an additional hop (*distributor*) between the network and the + amphora. + +* The *distributor* is not in the data path and simply becomes a function of + the L3 network. + +* The performance and scale of the *distributor* is the same as the L3 network. + +* Native support for both IPv4 and IPv6, without customized logic for each + address family. + +.. _P2: + + **Note:** Items marked with [`P2`_] refer to lower priority features to be + designed / implemented only after initial release. + +Proposed change +=============== + +* Octavia shall implement the *L3 active-active* distributor through a + pluggable driver. + +* The distributor control plane function (*bgp speaker*) will run inside the + amphora and leverage the existing amphora lifecycle manager. + +* Each amphora will run a *bgp speaker* in the default namespace in order to + announce the anycast VIP into the L3 fabric. BGP peering and announcements + will occur over the lb-mgmt-net network. The anycast VIP will get advertised + as a /32 or /128 route with a next-hop of the front-end IP assigned to the + amphora instance. The front-end network IPs must be directly routable from + the L3 fabric, such as in the provider networking model. + +* Octavia shall implement the ability to specify an anycast VIP/subnet and + front-end subnet (provider network) when creating a new load balancer. The + amphora will have ports on three networks (anycast, front-end, management). + The anycast VIP will get configured on the loopback interface inside the + *amphora-haproxy* network namespace. + +* The operator shall be able to define a *bgp peer profile*, which includes the + required metadata for the amphora to establish a bgp peering session with + the L3 fabric. The bgp peering information will be passed into the + amphora-agent configuration file via config drive during boot. The amphora + will use the bgp peering information to establish a BGP peer and announce its + anycast VIP. + +* [`P2`_] Add the option to allow the *bgp speaker* to run on a dedicated + amphora instance that is not running the software load balancer (HAProxy). In + this model a dedicated *bgp speaker* could advertise anycast VIPs for one or + more amphoras. Each BGP speaker (peer) can only announce a single next-hop + route for an anycast VIP. In order to perform ECMP load sharing, multiple + dedicated amphoras running bgp speakers will be required, each of them would + then announce a different next-hop address for the anycast VIP. Each next-hop + address is the front-end (provider network) IP of an amphora instance running + the software load balancer. + +* [`P2`_] The *Amphora Cluster* will provide resilient flow handling in order + to handle ECMP group flow remapping events and support amphora connection + draining. + +* [`P2`_] Support Floating IPs (FIPs). In order to support FIPs the existing + Neutron *floatingips* API would need to be extended. This will be described + in more detail in a separate spec in the Neutron project. + +Architecture +------------ + +High-level Topology Description +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +The below diagram shows the interaction between 2 .. n amphora instances from +each tenant and how they interact with the L3 network distributor. + +:: + + Management Front-End + Internet Network Networks + (World) ║ (provider) + ║ ║ ┌─────────────────────────────┐ ║ + ║ ║ │ Amphora of Tenant A │ ║ + ┌──╨──────────┐ ║ ┌────┬┴──────────┬──────────────────┴┬───╨┐ + │ │ ╠══════╡MGMT│ns: default│ns: amphora-haproxy│f.e.│ + │ │ ║ │ IP ├-----------┼-------------------┤ IP │ + │ │ ║ └────┤ BGP │ Anycast VIP ├───╥┘ + │ │ ║ │ Speaker │ (loopback) │ ║ + │ │ ║ └───────────┴──────────────╥────┘ ║ + │ │ ║ | ║ ║ + │ │ ║ | ║ ║ + │ │ Peering Session 1..* | ║ ║ + │ │---------------------------+ ║ ║ + │ │ {anycast VIP}/32 next-hop {f.e. IP} ║ ║ + │ │ ║ ║ ║ + │ │ ║ ┌─────────────────────────╨───┐ ║ + │ │ ║ │ Amphora of Tenant B │ ║ + │ │ ║ ┌────┬┴──────────┬──────────────────┴┬───╨┐ + │ ╞════════╬══════╡MGMT│ns: default│ns: amphora-haproxy│f.e.│ + │ │ ║ │ IP ├-----------┼-------------------┤ IP │ + │ │ ║ └────┤ BGP │ Anycast VIP ├───╥┘ + │ │ ║ │ Speaker │ (loopback) │ ║ + │ │ ║ └───────────┴──────────────╥────┘ ║ + │ Distributor │ ║ | ║ ║ + │ (L3 Network)│ ║ | ║ ║ + │ │ Peering Session 1..* | ║ ║ + │ │---------------------------+ ║ ║ + │ │ {anycast VIP}/32 next-hop {f.e. IP} ║ ║ + │ │ ║ ║ ║ + │ │ ║ ┌─────────────────────────╨───┐ ║ + │ │ ║ │ Amphora of Tenant C │ ║ + │ │ ║ ┌────┬┴──────────┬──────────────────┴┬───╨┐ + │ │ ╚══════╡MGMT│ns: default│ns: amphora-haproxy│f.e.│ + │ │ │ IP ├-----------┼-------------------┤ IP │ + │ │ └────┤ BGP │ Anycast VIP ├────┘ + │ │ │ Speaker │ (loopback) │ + │ │ └───────────┴──────────────╥────┘ + │ │ | ║ + │ │ | ║ + │ │ Peering Session 1..* | ║ + │ │---------------------------+ ║ + │ │ {anycast VIP}/32 next-hop {f.e. IP} ║ + │ │ ║ + │ ╞═══════════════════════════════════════════════Anycast + └─────────────┘ 1..* Network + +* Whenever a new active-active amphora is instantiated it will create BGP + peering session(s) over the lb-mgmt-net to the L3 fabric. The BGP peer will + need to have a neighbor definition in order to allow the peering sessions + from the amphoras. In order to ease configuration, a neighbor statement + allowing peers from the entire lb-mgmt-net IP prefix range can be defined: + ``neighbor 10.10.10.0/24`` + +* The BGP peer IP can either be a route reflector (RR) or any other network + device that will redistribute routes learned from the amphora BGP speaker. + In order to help scaling, it is possible to peer with the ToR switch based on + the rack the amphora instance is provisioned in. The configuration can be + simplified by creating an ``anycast loopback interface`` on each ToR switch, + which will provide a consistent BGP peer IP regardless of which rack or + hypervisor is hosting the amphora instance. + +* Once a peering session is established between an amphora and the L3 fabric, + the amphora will need to announce its anycast VIP with a next-hop address of + its front-end network IP. The front-end network IP (provider) must be + routable and reachable from the L3 network in order to be used. + +* In order to leverage ECMP for distributing traffic across multiple amphoras, + multiple equal-cost routes must be installed into the network for the anycast + VIP. This requires the L3 network to have ``Multipath BGP`` enabled, so BGP + installs multiple paths and does not select a single best path. + +* After the amphoras in a cluster are initialized there will be an ECMP group + with multiple equal-cost routes for the anycast VIP. The data flow for + traffic is highlighted below: + + 1. Traffic will ingress into the L3 network fabric with a destination IP + address of the anycast VIP. + + 2. If this is a new flow, the flow will get hashed to one of the next-hop + addresses in the ECMP group. + + 3. The packet will get sent to the front-end IP address of the amphora + instance that was selected from the above step. + + 4. The amphora will accept the packet and send it to the back-end server + over the front-end network or a directly attached back-end (tenant) + network attached to the amphora. + + 5. The amphora will receive the response from the back-end server and + forward it on to the next-hop gateway of front-end (provider) network + using the anycast VIP as the source IP address. + + 6. All subsequent packets belonging to the same flow will get routed + through the same path. + +* Adding or removing members to a L3 active-active amphora cluster will result + in flow remapping, as different paths will be selected due to rehashing. It + is recommended to enable the ``resilient hashing`` feature on ECMP groups in + order to minimize flow remapping. + +Distributor (BGP Speaker) Lifecycle +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +The below diagram shows the interaction between an amphora instance that is +serving as a distributor and the L3 network. In this example we are peering +with the ToR switch in order to disseminate anycast VIP routes into the +L3 network. + +:: + + +------------------------------------------------+ + | Initialize Distributor on Amphora | + +------------------------------------------------+ + | | + | +---------------+ +---------------+ | + | |1 | |4 | | + | | Amphora | | Ready to | | + | | (boot) | | announce | | + | | | | VIP(s) | | + | +-------+-------+ +-------+-------+ | + | | ^ | + | | | | + | | | | + | | | | + | | | | + | v | | + | +-------+-------+ +-------+-------+ | + | |2 | |3 Establish | | + | | Read Config | | BGP connection| | + | | Drive +----------->+ to ToR(s) | | + | | (BGP Config) | | (BGP Speaker) | | + | +---------------+ +---------------+ | + | | + +------------------------------------------------+ + + +------------------------------------------------+ + | Register AMP to Distributor or Listener Start | + +------------------------------------------------+ + | | + | +---------------+ +---------------+ | + | |5 | |8 | | + | | Amphora | | Amphora | | + | | BGP Speaker | | (Receives VIP | | + | |(Announce VIP) | | Traffic) | | + | +-------+-------+ +-------+-------+ | + | | ^ | + | | | | + | |BGP Peering | | + | |Session(s) | | + | | | | + | v | | + | +-------+-------+ +-------+-------+ | + | |6 | |7 | | + | | ToR(s) | | L3 Fabric | | + | |(Injects Route +----------->+ Accepts Route | | + | | into Fabric) | | (ECMP) | | + | +---------------+ +---------------+ | + | | + +------------------------------------------------+ + + +------------------------------------------------+ + | Unregister AMP to Distributor or Listener Stop | + +------------------------------------------------+ + | | + | +---------------+ +---------------+ | + | |9 | |12 | | + | | Amphora | | Amphora | | + | | BGP Speaker | |(No longer sent| | + | |(Withdraw VIP) | | VIP traffic) | | + | +-------+-------+ +-------+-------+ | + | | ^ | + | | | | + | |BGP Peering | | + | |Session(s) | | + | | | | + | v | | + | +-------+-------+ +-------+-------+ | + | |10 | |11 | | + | | ToR(s) | | L3 Fabric | | + | |(Removes Route +----------->+ Removes Route | | + | | from Fabric) | | (ECMP) | | + | +---------------+ +---------------+ | + | | + +------------------------------------------------+ + +1. The amphora gets created and is booted. In this example, the amphora will + perform both the load balancing (HAProxy) and L3 Distributor function + (BGP Speaker). + +2. The amphora will read in the BGP configuration information from the config + drive and configure the BGP Speaker to peer with the ToR switch. + +3. The BGP Speaker process will start and establish a BGP peering session with + the ToR switch. + +4. Once the BGP peering session is active, the amphora is ready to advertise + its anycast VIP into the network with a next-hop of its front-end IP + address. + +5. The BGP speaker will communicate using the BGP protocol and send a BGP + "announce" message to the ToR switch in order to announce a VIP route. If + the amphora is serving as both a load balancer and distributor the + announcement will happen on listener start. Otherwise the announce will + happen on a register amphora request to the distributor. + +6. The ToR switch will learn this new route and advertise it into the L3 + fabric. At this point the L3 fabric will know of the new VIP route and how + to reach it (via the ToR that just announced it). + +7. The L3 fabric will create an ECMP group if it has received multiple route + advertisements for the same anycast VIP. This will result in a single VIP + address with multiple next-hop addresses. + +8. Once the route is accepted by the L3 fabric, traffic will get distributed + to the recently registered amphora (HAProxy). + +9. The BGP speaker will communicate using the BGP protocol and send a BGP + "withdraw" message to the ToR switch in order to withdraw a VIP route. If + the amphora is serving as both a load balancer and distributor the + withdrawal will happen on listener stop. Otherwise the withdraw will happen + on an unregister amphora request to the distributor. + +10. The ToR switch will tell the L3 fabric over BGP that the anycast VIP route + for the amphora being unregistered is no longer valid. + +11. The L3 fabric will remove the VIP address with the next-hop address to the + amphora (HAProxy) being unregistered. It will keep all other existing VIP + routes to other amphora (HAProxy) instances until they are explicitly + unregistered. + +12. Once the route is removed the amphora (HAProxy) will no longer receive any + traffic for the VIP. + +Alternatives +------------ +TBD + +Data model impact +----------------- +Add the following columns to the existing ``vip`` table: + +* distributor_id ``(String(36) , nullable=True)`` + ID of the distributor responsible for distributing traffic for the + corresponding VIP. + +Add table ``distributor`` with the following columns: + +* id ``(String(36) , nullable=False)`` + ID of Distributor instance. + +* distributor_type ``(String(36) , nullable=False)`` + Type of distributor ``L3_BGP``. + +* status ``(String(36) , nullable=True)`` + Provisioning status. + +Update existing table ``amphora``. An amphora can now serve as a distributor, +lb, or both. The vrrp_* tables will be renamed to frontend_* in order to make +the purpose of this interface more apparent and to better represent other use +cases besides active/standy. + +* load_balancer_id ``(String(36) , nullable=True)`` + This will be set to null if this amphora is a dedicated distributor and + should not run HAProxy. + +* service_type ``(String(36) , nullable=True)`` + New field added to the amphora table in order to describe the type of + amphora. This field is used to describe the function (service) the amphora + provides. For example, if this is a dedicated distributor the service type + would be set to "distributor". + +* frontend_ip ``(String(64) , nullable=True)`` + New name for former vrrp_ip field. This is the primary IP address inside + the amphora-haproxy namespace used for L3 communication to back-end + members. + +* frontend_subnet_id ``(String(36) , nullable=True)`` + New field added to the amphora table, which is the neutron subnet id of + the front-end network connected to the amphora. + +* frontend_port_id ``(String(36) , nullable=True)`` + New name for former vrrp_port_id field. This represents the neutron port ID + of a port attached to the front-end network. It should no longer be assumed + that the front-end subnet is the same as the VIP subnet. + +* frontend_interface ``(String(16) , nullable=True)`` + New name for former vrrp_interface field. + +* frontend_id ``(Integer , nullable=True)`` + New name for former vrrp_id field. + +* frontend_priority ``(Integer , nullable=True)`` + New name for former vrrp_priority field. + +Use existing table ``amphora_health`` with the following columns: + +* amphora_id ``(String(36) , nullable=False)`` + ID of amphora instance running lb and/or implementing distributor function. + +* last_update ``(DateTime , nullable=False)`` + Last time amphora heartbeat was received by a health monitor. + +* busy ``(Boolean , nullable=False)`` + Field indicating a create / delete or other action is being conducted on + the amphora instance (ie. to prevent a race condition when multiple health + managers are in use). + +Add table ``amphora_registration`` with the below columns. This table +determines the role of the amphora. The amphora can be dedicated as a +distributor, load balancer, or perform a combined role of load balancing and +distributor. A distributor amphora can be registered to multiple load +balancers. + +* amphora_id ``(String(36) , nullable=False)`` + ID of Amphora instance. + +* load_balancer_id ``(String(36) , nullable=False)`` + ID of load balancer. + +* distributor_id ``(String(36) , nullable=True)`` + ID of Distributor instance. + +Add table ``distributor_l3_bgp_speaker`` with the following columns: + +* id ``(String(36) , nullable=False)`` + ID of the BGP Speaker. + +* ip_version ``(Integer , nullable=False)`` + Protocol version of the BGP speaker. IP version ``4`` or ``6``. + +* local_as ``(Integer , nullable=False)`` + Local AS number for the BGP speaker. + +Add table ``distributor_l3_bgp_peer`` with the following columns: + +* id ``(String(36) , nullable=False)`` + ID of the BGP peer. + +* peer_ip ``(String(64) , nullable=False)`` + The IP address of the BGP neighbor. + +* remote_as ``(Integer , nullable=False)`` + Remote AS of the BGP peer. + +* auth_type ``(String(16) , nullable=True)`` + Authentication type, such as ``md5``. An additional parameter will need to + be set in the octavia configuration file by the admin to set the md5 + authentication password that will be used with the md5 auth type. + +* ttl_hops ``(Integer , nullable=True)`` + Number of hops between speaker and peer for ttl security ``1-254``. + +* hold_time ``(Integer , nullable=True)`` + Amount of time in seconds that can elapse between messages from peer. + +* keepalive_interval ``(Integer , nullable=True)`` + How often to send keep alive packets in seconds. + +Add table ``distributor_l3_bgp_peer_registration`` with the following columns: + +* distributor_l3_bgp_speaker_id ``(String(36) , nullable=False)`` + ID of the BGP Speaker. + +* distributor_l3_bgp_peer_id ``(String(36) , nullable=False)`` + ID of the BGP peer. + +Add table ``distributor_l3_amphora_bgp_speaker_registration`` with the +following columns: + +* distributor_l3_bgp_speaker_id ``(String(36) , nullable=False)`` + ID of the BGP Speaker. + +* amphora_id ``(String(36) , nullable=False)`` + ID of amphora instance that the BGP speaker will run on. + +Add table ``distributor_l3_amphora_vip_registration`` with the following +columns: + +* amphora_id ``(String(36) , nullable=False)`` + ID of the distributor amphora instance. + +* load_balancer_id ``(String(36) , nullable=False)`` + The ID of the load balancer. This will be used to get the VIP IP address. + +* nexthop_ip ``(String(64) , nullable=False)`` + The amphora front-end network IP used to handle VIP traffic. This is the + next-hop address that will be advertised for the VIP. This does not have to + be an IP address of an amphora, as it could be external such as for UDP + load balancing. + +* distributor_l3_bgp_peer_id ``(String(36) , nullable=True)`` + The BGP peer we will announce the anycast VIP to. If not specified, we will + announce over all peers. + +REST API impact +--------------- + +* Octavia API -- Allow the user to specify a separate VIP/subnet and front-end + subnet (provider network) when creating a new load balancer. Currently the + user can only specify the VIP subnet, which results in both the VIP and + front-end network being on the same subnet. + +* Extended Amphora API -- The L3 BGP distributor driver will call the extended + amphora API in order to implement the control plane (BGP) and advertise new + anycast VIP routes into the network. + +The below extended amphora API calls will be implemented for amphoras running +as a dedicated distributor: + +1. ``Register Amphora`` + + This call will result in the BGP speaker announcing the anycast VIP into the + L3 network with a next-hop of the front-end IP of the amphora being + registered. Prior to this call, the load balancing amphora will have to + configure the anycast VIP on the loopback interface inside the + amphora-haproxy namespace. + + - amphora_id + ID of the amphora running the load balancer to register. + + - vip_ip + The VIP IP address. + + - nexthop_ip + The amphora's front-end network IP address used to handle anycast VIP + traffic. + + - peer_id + ID of the peer that will be used to announce the anycast VIP. If not + specified, VIP will be announced across all peers. + +2. ``Unregister Amphora`` + + The BGP speaker will withdraw the anycast VIP route for the specified + amphora from the L3 network. After the route is withdrawn, the anycast VIP + IP will be removed from the loopback interface on the load balancing + amphora. + + - amphora_id + ID of the amphora running the load balancer to unregister. + + - vip_ip + The VIP IP address. + + - nexthop_ip + The amphora's front-end network IP Address used to handle anycast VIP + traffic. + + - peer_id + ID of the peer that will be used to withdraw the anycast VIP. If not + specified, route will be withdrawn from all peers. + +3. ``List Amphora`` + + Will return a list of all amphora IDs and their anycast VIP routes currently + being advertised by the BGP speaker. + +4. [`P2`_] ``Drain Amphora`` + + All new flows will get redirected to other members of the cluster and + existing flows will be drained. Once the active flows have been drained, the + BGP speaker will withdraw the anycast VIP route from the L3 network and + unconfigure the VIP from the lo interface. + +5. [`P2`_] ``Register VIP`` + + This call will be used for registering anycast routes for non-amphora + endpoints, such as for UDP load balancing. + + - vip_ip + The VIP IP address. + + - nexthop_ip + The nexthop network IP Address used to handle anycast VIP traffic. + + - peer_id + ID of the peer that will be used to announce the anycast VIP. If not + specified, route will be announced from all peers. + +6. [`P2`_] ``Unregister VIP`` + + This call will be used for unregistering anycast routes for non-amphora + endpoints, such as for UDP load balancing. + + - vip_ip + The VIP IP address. + + - nexthop_ip + The nexthop network IP Address used to handle anycast VIP traffic. + + - peer_id + ID of the peer that will be used to withdraw the anycast VIP. If not + specified, route will be withdrawn from all peers. + +6. [`P2`_] ``List VIP`` + + Will return a list of all non-amphora anycast VIP routes currently being + advertised by the BGP speaker. + +Security impact +--------------- +The distributor inherently supports multi-tenancy, as it is simply providing +traffic distribution across multiple amphoras. Network isolation on a per +tenant basis is handled by the amphoras themselves, as they service only a +single tenant. Further isolation can be provided by defining separate anycast +network(s) on a per tenant basis. Firewall or ACL policies can then be built +around these prefixes. + +To further enhance BGP security, route-maps, prefix-lists, and communities to +control what routes are allowed to be advertised in the L3 network from a +particular BGP peer can be used. MD5 password and GTSM can provide additional +security to limit unauthorized BGP peers to the L3 network. + +Notifications impact +-------------------- + +Other end user impact +--------------------- + +Performance Impact +------------------ + +Other deployer impact +--------------------- + +Developer impact +---------------- + +Implementation +============== + +Assignee(s) +----------- + +Work Items +---------- + +Dependencies +============ + +Testing +======= + +* Unit tests with tox. +* Function tests with tox. + +Documentation Impact +==================== +The API-Ref documentation will need to be updated for load balancer create. +An additional optional parameter frontend_network_id will be added. If set, +this parameter will result in the primary interface inside the amphora-haproxy +namespace getting created on the specified network. Default behavior is to +provision this interface on the VIP subnet. + +References +========== +* `Active-Active Topology + `_ diff --git a/specs/version1.1/enable-provider-driver.rst b/specs/version1.1/enable-provider-driver.rst new file mode 100644 index 0000000000..e1b9d0908b --- /dev/null +++ b/specs/version1.1/enable-provider-driver.rst @@ -0,0 +1,1828 @@ +============================== +Enable Provider Driver Support +============================== +.. contents:: Specification Table of Contents + :depth: 4 + :backlinks: none + +https://storyboard.openstack.org/#!/story/1655768 + +Provider drivers are implementations that give Octavia operators a choice of +which load balancing systems to use in their Octavia deployment. Currently, the +default Octavia driver is the only one available. Operators may want to employ +other load balancing implementations, including hardware appliances, +in addition to the default Octavia driver. + +Problem description +=================== +Neutron LBaaS v2 supports a *provider* parameter, giving LBaaS users a way to +direct LBaaS requests to a specific backend driver. The Octavia API includes a +*provider* parameter as well, but currently supports one provider, the +Octavia driver. Adding support for other drivers is needed. With this in place, +operators can configure load balancers using multiple providers, either the +Octavia default or others. + +Proposed change +=============== +Available drivers will be enabled by entries in the Octavia configuration file. +Drivers will be loaded via stevedore and Octavia will communicate with drivers +through a standard class interface defined below. Most driver functions will be +asynchronous to Octavia, and Octavia will provide a library of functions +that give drivers a way to update status and statistics. Functions that are +synchronous are noted below. + +Octavia API functions not listed here will continue to be handled by the +Octavia API and will not call into the driver. Examples would be show, list, +and quota requests. + +Driver Entry Points +------------------- + +Provider drivers will be loaded via +`stevedore `_. Drivers will +have an entry point defined in their setup tools configuration using the +Octavia driver namespace "octavia.api.drivers". This entry point name will +be used to enable the driver in the Octavia configuration file and as the +"provider" parameter users specify when creating a load balancer. An example +for the octavia reference driver would be: + +.. code-block:: python + + octavia = octavia.api.drivers.octavia.driver:OctaviaDriver + +Octavia Provider Driver API +--------------------------- + +Provider drivers will be expected to support the full interface described +by the Octavia API, currently v2.0. If a driver does not implement an API +function, drivers should fail a request by raising a ``NotImplementedError`` +exception. If a driver implements a function but does not support a particular +option passed in by the caller, the driver should raise an +``UnsupportedOptionError``. + +It is recommended that drivers use the +`jsonschema `_ package or +`voluptuous `_ to validate the +request against the current driver capabilities. + +See the `Exception Model`_ below for more details. + +.. note:: Driver developers should refer to the official `Octavia API reference ` document for details of the fields and expected outcome of these calls. + +Load balancer +^^^^^^^^^^^^^ + +* **create** + + Creates a load balancer. + + Octavia will pass in the load balancer object with all requested settings. + + The load balancer will be in the ``PENDING_CREATE`` provisioning_status and + ``OFFLINE`` operating_status when it is passed to the driver. The driver + will be responsible for updating the provisioning status of the load + balancer to either ``ACTIVE`` if successfully created, or ``ERROR`` if not + created. + + The Octavia API will accept and do basic API validation of the create + request from the user. The load balancer python object representing the + request body will be passed to the driver create method as it was received + and validated with the following exceptions: + + 1. The provider will be removed as this is used for driver selection. + 2. The flavor will be expanded from the provided ID to be the full + dictionary representing the flavor metadata. + + **Load balancer object** + + As of the writing of this specification the create load balancer object may + contain the following: + + +-----------------+--------+-----------------------------------------------+ + | Name | Type | Description | + +=================+========+===============================================+ + | admin_state_up | bool | Admin state: True if up, False if down. | + +-----------------+--------+-----------------------------------------------+ + | description | string | A human-readable description for the resource.| + +-----------------+--------+-----------------------------------------------+ + | flavor | dict | The flavor keys and values. | + +-----------------+--------+-----------------------------------------------+ + | listeners | list | A list of `Listener objects`_. | + +-----------------+--------+-----------------------------------------------+ + | loadbalancer_id | string | ID of load balancer to create. | + +-----------------+--------+-----------------------------------------------+ + | name | string | Human-readable name of the resource. | + +-----------------+--------+-----------------------------------------------+ + | pools | list | A list of `Pool object`_. | + +-----------------+--------+-----------------------------------------------+ + | project_id | string | ID of the project owning this resource. | + +-----------------+--------+-----------------------------------------------+ + | vip_address | string | The IP address of the Virtual IP (VIP). | + +-----------------+--------+-----------------------------------------------+ + | vip_network_id | string | The ID of the network for the VIP. | + +-----------------+--------+-----------------------------------------------+ + | vip_port_id | string | The ID of the VIP port. | + +-----------------+--------+-----------------------------------------------+ + |vip_qos_policy_id| string | The ID of the qos policy for the VIP. | + +-----------------+--------+-----------------------------------------------+ + | vip_subnet_id | string | The ID of the subnet for the VIP. | + +-----------------+--------+-----------------------------------------------+ + + The driver is expected to validate that the driver supports the request + and raise an exception if the request cannot be accepted. + + **VIP port creation** + + Some provider drivers will want to create the Neutron port for the VIP, and + others will want Octavia to create the port instead. In order to support both + use cases, the create_vip_port() method will ask provider drivers to create + a VIP port. If the driver expects Octavia to create the port, the driver + will raise a NotImplementedError exception. Octavia will call this function + before calling loadbalancer_create() in order to determine if it should + create the VIP port. Octavia will call create_vip_port() with a loadbalancer + ID and a partially defined VIP dictionary. Provider drivers that support + port creation will create the port and return a fully populated VIP + dictionary. + + **VIP dictionary** + + +-----------------+--------+-----------------------------------------------+ + | Name | Type | Description | + +=================+========+===============================================+ + | project_id | string | ID of the project owning this resource. | + +-----------------+--------+-----------------------------------------------+ + | vip_address | string | The IP address of the Virtual IP (VIP). | + +-----------------+--------+-----------------------------------------------+ + | vip_network_id | string | The ID of the network for the VIP. | + +-----------------+--------+-----------------------------------------------+ + | vip_port_id | string | The ID of the VIP port. | + +-----------------+--------+-----------------------------------------------+ + |vip_qos_policy_id| string | The ID of the qos policy for the VIP. | + +-----------------+--------+-----------------------------------------------+ + | vip_subnet_id | string | The ID of the subnet for the VIP. | + +-----------------+--------+-----------------------------------------------+ + + *Creating a Fully Populated Load Balancer* + + If the "listener" option is specified, the provider driver will iterate + through the list and create all of the child objects in addition to + creating the load balancer instance. + +* **delete** + + Removes an existing load balancer. + + Octavia will pass in the load balancer object and cascade boolean as + parameters. + + The load balancer will be in the ``PENDING_DELETE`` provisioning_status when + it is passed to the driver. The driver will notify Octavia that the delete + was successful by setting the provisioning_status to ``DELETED``. If the + delete failed, the driver will update the provisioning_status to ``ERROR``. + + The API includes an option for cascade delete. When cascade is set to + True, the provider driver will delete all child objects of the load balancer. + +* **failover** + + Performs a failover of a load balancer. + + Octavia will pass in the load balancer ID as a parameter. + + The load balancer will be in the ``PENDING_UPDATE`` provisioning_status when + it is passed to the driver. The driver will update the provisioning_status + of the load balancer to either ``ACTIVE`` if successfully failed over, or + ``ERROR`` if not failed over. + + Failover can mean different things in the context of a provider driver. For + example, the Octavia driver replaces the current amphora(s) with another + amphora. For another provider driver, failover may mean failing over from + an active system to a standby system. + +* **update** + + Modifies an existing load balancer using the values supplied in the load + balancer object. + + Octavia will pass in the original load balancer object which is the baseline + for the update, and a load balancer object with the fields to be updated. + + As of the writing of this specification the update load balancer object may + contain the following: + + +-----------------+--------+-----------------------------------------------+ + | Name | Type | Description | + +=================+========+===============================================+ + | admin_state_up | bool | Admin state: True if up, False if down. | + +-----------------+--------+-----------------------------------------------+ + | description | string | A human-readable description for the resource.| + +-----------------+--------+-----------------------------------------------+ + | loadbalancer_id | string | ID of load balancer to update. | + +-----------------+--------+-----------------------------------------------+ + | name | string | Human-readable name of the resource. | + +-----------------+--------+-----------------------------------------------+ + |vip_qos_policy_id| string | The ID of the qos policy for the VIP. | + +-----------------+--------+-----------------------------------------------+ + + The load balancer will be in the ``PENDING_UPDATE`` provisioning_status when + it is passed to the driver. The driver will update the provisioning_status + of the load balancer to either ``ACTIVE`` if successfully updated, or + ``ERROR`` if the update was not successful. + + The driver is expected to validate that the driver supports the request. + The method will then return or raise an exception if the request cannot be + accepted. + +**Abstract class definition** + +.. code-block:: python + + class Driver(object): + + def create_vip_port(self, loadbalancer_id, vip_dictionary): + """Creates a port for a load balancer VIP. + + If the driver supports creating VIP ports, the driver will create a + VIP port and return the vip_dictionary populated with the vip_port_id. + If the driver does not support port creation, the driver will raise + a NotImplementedError. + + :param: loadbalancer_id (string): ID of loadbalancer. + :param: vip_dictionary (dict): The VIP dictionary. + :returns: VIP dictionary with vip_port_id. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: The driver does not support creating + VIP ports. + """ + raise NotImplementedError() + + def loadbalancer_create(self, loadbalancer): + """Creates a new load balancer. + + :param loadbalancer (object): The load balancer object. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: The driver does not support create. + :raises UnsupportedOptionError: The driver does not + support one of the configuration options. + """ + raise NotImplementedError() + + def loadbalancer_delete(self, loadbalancer, cascade=False): + """Deletes a load balancer. + + :param loadbalancer (object): The load balancer object. + :param cascade (bool): If True, deletes all child objects (listeners, + pools, etc.) in addition to the load balancer. + :return: Nothing if the delete request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + """ + raise NotImplementedError() + + def loadbalancer_failover(self, loadbalancer_id): + """Performs a fail over of a load balancer. + + :param loadbalancer_id (string): ID of the load balancer to failover. + :return: Nothing if the failover request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises: NotImplementedError if driver does not support request. + """ + raise NotImplementedError() + + def loadbalancer_update(self, old_loadbalancer, new_loadbalancer): + """Updates a load balancer. + + :param old_loadbalancer (object): The baseline load balancer object. + :param new_loadbalancer (object): The updated load balancer object. + :return: Nothing if the update request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: The driver does not support request. + :raises UnsupportedOptionError: The driver does not + support one of the configuration options. + """ + raise NotImplementedError() + +Listener +^^^^^^^^ + +* **create** + + Creates a listener for a load balancer. + + Octavia will pass in the listener object with all requested settings. + + The listener will be in the ``PENDING_CREATE`` provisioning_status and + ``OFFLINE`` operating_status when it is passed to the driver. The driver + will be responsible for updating the provisioning status of the listener + to either ``ACTIVE`` if successfully created, or ``ERROR`` if not created. + + The Octavia API will accept and do basic API validation of the create + request from the user. The listener python object representing the + request body will be passed to the driver create method as it was received + and validated with the following exceptions: + + 1. The project_id will be removed, if present, as this field is now + deprecated. The listener will inherit the project_id from the parent + load balancer. + 2. The default_tls_container_ref will be expanded and provided to the driver + in pkcs12 format. + 3. The sni_container_refs will be expanded and provided to the driver in + pkcs12 format. + + .. _Listener objects: + + **Listener object** + + As of the writing of this specification the create listener object may + contain the following: + + +----------------------------+--------+-------------------------------------+ + | Name | Type | Description | + +============================+========+=====================================+ + | admin_state_up | bool | Admin state: True if up, False if | + | | | down. | + +----------------------------+--------+-------------------------------------+ + | connection_limit | int | The max number of connections | + | | | permitted for this listener. Default| + | | | is -1, which is infinite | + | | | connections. | + +----------------------------+--------+-------------------------------------+ + | default_pool | object | A `Pool object`_. | + +----------------------------+--------+-------------------------------------+ + | default_pool_id | string | The ID of the pool used by the | + | | | listener if no L7 policies match. | + +----------------------------+--------+-------------------------------------+ + | default_tls_container_data | dict | A `TLS container`_ dict. | + +----------------------------+--------+-------------------------------------+ + | default_tls_container_refs | string | The reference to the secrets | + | | | container. | + +----------------------------+--------+-------------------------------------+ + | description | string | A human-readable description for the| + | | | listener. | + +----------------------------+--------+-------------------------------------+ + | insert_headers | dict | A dictionary of optional headers to | + | | | insert into the request before it is| + | | | sent to the backend member. See | + | | | `Supported HTTP Header Insertions`_.| + | | | Keys and values are specified as | + | | | strings. | + +----------------------------+--------+-------------------------------------+ + | l7policies | list | A list of `L7policy objects`_. | + +----------------------------+--------+-------------------------------------+ + | listener_id | string | ID of listener to create. | + +----------------------------+--------+-------------------------------------+ + | loadbalancer_id | string | ID of load balancer. | + +----------------------------+--------+-------------------------------------+ + | name | string | Human-readable name of the listener.| + +----------------------------+--------+-------------------------------------+ + | protocol | string | Protocol type: One of HTTP, HTTPS, | + | | | TCP, or TERMINATED_HTTPS. | + +----------------------------+--------+-------------------------------------+ + | protocol_port | int | Protocol port number. | + +----------------------------+--------+-------------------------------------+ + | sni_container_data | list | A list of `TLS container`_ dict. | + +----------------------------+--------+-------------------------------------+ + | sni_container_refs | list | A list of references to the SNI | + | | | secrets containers. | + +----------------------------+--------+-------------------------------------+ + | timeout_client_data | int | Frontend client inactivity timeout | + | | | in milliseconds. | + +----------------------------+--------+-------------------------------------+ + | timeout_member_connect | int | Backend member connection timeout in| + | | | milliseconds. | + +----------------------------+--------+-------------------------------------+ + | timeout_member_data | int | Backend member inactivity timeout in| + | | | milliseconds. | + +----------------------------+--------+-------------------------------------+ + | timeout_tcp_inspect | int | Time, in milliseconds, to wait for | + | | | additional TCP packets for content | + | | | inspection. | + +----------------------------+--------+-------------------------------------+ + + .. _TLS container: + + As of the writing of this specification the TLS container dictionary + contains the following: + + +---------------+--------+------------------------------------------------+ + | Key | Type | Description | + +===============+========+================================================+ + | certificate | string | The PEM encoded certificate. | + +---------------+--------+------------------------------------------------+ + | intermediates | List | A list of intermediate PEM certificates. | + +---------------+--------+------------------------------------------------+ + | primary_cn | string | The primary common name of the certificate. | + +---------------+--------+------------------------------------------------+ + | private_key | string | The PEM encoded private key. | + +---------------+--------+------------------------------------------------+ + + .. _Supported HTTP Header Insertions: + + As of the writing of this specification the Supported HTTP Header Insertions + are: + + +-------------------+------+------------------------------------------------+ + | Key | Type | Description | + +===================+======+================================================+ + | X-Forwarded-For | bool | When True a X-Forwarded-For header is inserted | + | | | into the request to the backend member that | + | | | specifies the client IP address. | + +-------------------+------+------------------------------------------------+ + | X-Forwarded-Port | int | A X-Forwarded-Port header is inserted into the | + | | | request to the backend member that specifies | + | | | the integer provided. Typically this is used to| + | | | indicate the port the client connected to on | + | | | the load balancer. | + +-------------------+------+------------------------------------------------+ + + *Creating a Fully Populated Listener* + + If the "default_pool" or "l7policies" option is specified, the provider + driver will create all of the child objects in addition to creating the + listener instance. + +* **delete** + + Deletes an existing listener. + + Octavia will pass the listener object as a parameter. + + The listener will be in the ``PENDING_DELETE`` provisioning_status when + it is passed to the driver. The driver will notify Octavia that the delete + was successful by setting the provisioning_status to ``DELETED``. If the + delete failed, the driver will update the provisioning_status to ``ERROR``. + +* **update** + + Modifies an existing listener using the values supplied in the listener + object. + + Octavia will pass in the original listener object which is the baseline for + the update, and a listener object with the fields to be updated. + + As of the writing of this specification the update listener object may + contain the following: + + +----------------------------+--------+-------------------------------------+ + | Name | Type | Description | + +============================+========+=====================================+ + | admin_state_up | bool | Admin state: True if up, False if | + | | | down. | + +----------------------------+--------+-------------------------------------+ + | connection_limit | int | The max number of connections | + | | | permitted for this listener. Default| + | | | is -1, which is infinite | + | | | connections. | + +----------------------------+--------+-------------------------------------+ + | default_pool_id | string | The ID of the pool used by the | + | | | listener if no L7 policies match. | + +----------------------------+--------+-------------------------------------+ + | default_tls_container_data | dict | A `TLS container`_ dict. | + +----------------------------+--------+-------------------------------------+ + | default_tls_container_refs | string | The reference to the secrets | + | | | container. | + +----------------------------+--------+-------------------------------------+ + | description | string | A human-readable description for | + | | | the listener. | + +----------------------------+--------+-------------------------------------+ + | insert_headers | dict | A dictionary of optional headers to | + | | | insert into the request before it is| + | | | sent to the backend member. See | + | | | `Supported HTTP Header Insertions`_.| + | | | Keys and values are specified as | + | | | strings. | + +----------------------------+--------+-------------------------------------+ + | listener_id | string | ID of listener to update. | + +----------------------------+--------+-------------------------------------+ + | name | string | Human-readable name of the listener.| + +----------------------------+--------+-------------------------------------+ + | sni_container_data | list | A list of `TLS container`_ dict. | + +----------------------------+--------+-------------------------------------+ + | sni_container_refs | list | A list of references to the SNI | + | | | secrets containers. | + +----------------------------+--------+-------------------------------------+ + | timeout_client_data | int | Frontend client inactivity timeout | + | | | in milliseconds. | + +----------------------------+--------+-------------------------------------+ + | timeout_member_connect | int | Backend member connection timeout in| + | | | milliseconds. | + +----------------------------+--------+-------------------------------------+ + | timeout_member_data | int | Backend member inactivity timeout in| + | | | milliseconds. | + +----------------------------+--------+-------------------------------------+ + | timeout_tcp_inspect | int | Time, in milliseconds, to wait for | + | | | additional TCP packets for content | + | | | inspection. | + +----------------------------+--------+-------------------------------------+ + + The listener will be in the ``PENDING_UPDATE`` provisioning_status when + it is passed to the driver. The driver will update the provisioning_status + of the listener to either ``ACTIVE`` if successfully updated, or ``ERROR`` + if the update was not successful. + + The driver is expected to validate that the driver supports the request. + The method will then return or raise an exception if the request cannot be + accepted. + +**Abstract class definition** + +.. code-block:: python + + class Driver(object): + def listener_create(self, listener): + """Creates a new listener. + + :param listener (object): The listener object. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + + def listener_delete(self, listener): + """Deletes a listener. + + :param listener (object): The listener object. + :return: Nothing if the delete request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + """ + raise NotImplementedError() + + def listener_update(self, old_listener, new_listener): + """Updates a listener. + + :param old_listener (object): The baseline listener object. + :param new_listener (object): The updated listener object. + :return: Nothing if the update request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + +Pool +^^^^ + +* **create** + + Creates a pool for a load balancer. + + Octavia will pass in the pool object with all requested settings. + + The pool will be in the ``PENDING_CREATE`` provisioning_status and + ``OFFLINE`` operating_status when it is passed to the driver. The driver + will be responsible for updating the provisioning status of the pool + to either ``ACTIVE`` if successfully created, or ``ERROR`` if not created. + + The Octavia API will accept and do basic API validation of the create + request from the user. The pool python object representing the request + body will be passed to the driver create method as it was received and + validated with the following exceptions: + + 1. The project_id will be removed, if present, as this field is now + deprecated. The listener will inherit the project_id from the parent + load balancer. + + .. _Pool object: + + **Pool object** + + As of the writing of this specification the create pool object may + contain the following: + + +-----------------------+--------+------------------------------------------+ + | Name | Type | Description | + +=======================+========+==========================================+ + | admin_state_up | bool | Admin state: True if up, False if down. | + +-----------------------+--------+------------------------------------------+ + | description | string | A human-readable description for the | + | | | pool. | + +-----------------------+--------+------------------------------------------+ + | healthmonitor | object | A `Healthmonitor object`_. | + +-----------------------+--------+------------------------------------------+ + | lb_algorithm | string | Load balancing algorithm: One of | + | | | ROUND_ROBIN, LEAST_CONNECTIONS, or | + | | | SOURCE_IP. | + +-----------------------+--------+------------------------------------------+ + | loadbalancer_id | string | ID of load balancer. | + +-----------------------+--------+------------------------------------------+ + | listener_id | string | ID of listener. | + +-----------------------+--------+------------------------------------------+ + | members | list | A list of `Member objects`_. | + +-----------------------+--------+------------------------------------------+ + | name | string | Human-readable name of the pool. | + +-----------------------+--------+------------------------------------------+ + | pool_id | string | ID of pool to create. | + +-----------------------+--------+------------------------------------------+ + | protocol | string | Protocol type: One of HTTP, HTTPS, | + | | | PROXY, or TCP. | + +-----------------------+--------+------------------------------------------+ + | session_persistence | dict | Defines session persistence as one of | + | | | {'type': <'HTTP_COOKIE' | 'SOURCE_IP'>} | + | | | OR | + | | | {'type': 'APP_COOKIE', | + | | | 'cookie_name': } | + +-----------------------+--------+------------------------------------------+ + +* **delete** + + Removes an existing pool and all of its members. + + Octavia will pass the pool object as a parameter. + + The pool will be in the ``PENDING_DELETE`` provisioning_status when + it is passed to the driver. The driver will notify Octavia that the delete + was successful by setting the provisioning_status to ``DELETED``. If the + delete failed, the driver will update the provisioning_status to ``ERROR``. + +* **update** + + Modifies an existing pool using the values supplied in the pool object. + + Octavia will pass in the original pool object which is the baseline for the + update, and a pool object with the fields to be updated. + + As of the writing of this specification the update pool object may + contain the following: + + +-----------------------+--------+------------------------------------------+ + | Name | Type | Description | + +=======================+========+==========================================+ + | admin_state_up | bool | Admin state: True if up, False if down. | + +-----------------------+--------+------------------------------------------+ + | description | string | A human-readable description for the | + | | | pool. | + +-----------------------+--------+------------------------------------------+ + | lb_algorithm | string | Load balancing algorithm: One of | + | | | ROUND_ROBIN, LEAST_CONNECTIONS, or | + | | | SOURCE_IP. | + +-----------------------+--------+------------------------------------------+ + | name | string | Human-readable name of the pool. | + +-----------------------+--------+------------------------------------------+ + | pool_id | string | ID of pool to update. | + +-----------------------+--------+------------------------------------------+ + | session_persistence | dict | Defines session persistence as one of | + | | | {'type': <'HTTP_COOKIE' | 'SOURCE_IP'>} | + | | | OR | + | | | {'type': 'APP_COOKIE', | + | | | 'cookie_name': } | + +-----------------------+--------+------------------------------------------+ + + The pool will be in the ``PENDING_UPDATE`` provisioning_status when it is + passed to the driver. The driver will update the provisioning_status of the + pool to either ``ACTIVE`` if successfully updated, or ``ERROR`` if the + update was not successful. + + The driver is expected to validate that the driver supports the request. + The method will then return or raise an exception if the request cannot be + accepted. + +**Abstract class definition** + +.. code-block:: python + + class Driver(object): + def pool_create(self, pool): + """Creates a new pool. + + :param pool (object): The pool object. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + + def pool_delete(self, pool): + """Deletes a pool and its members. + + :param pool (object): The pool object. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + """ + raise NotImplementedError() + + def pool_update(self, old_pool, new_pool): + """Updates a pool. + + :param old_pool (object): The baseline pool object. + :param new_pool (object): The updated pool object. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + +Member +^^^^^^ + +* **create** + + Creates a member for a pool. + + Octavia will pass in the member object with all requested settings. + + The member will be in the ``PENDING_CREATE`` provisioning_status and + ``OFFLINE`` operating_status when it is passed to the driver. The driver + will be responsible for updating the provisioning status of the member + to either ``ACTIVE`` if successfully created, or ``ERROR`` if not created. + + The Octavia API will accept and do basic API validation of the create + request from the user. The member python object representing the + request body will be passed to the driver create method as it was received + and validated with the following exceptions: + + 1. The project_id will be removed, if present, as this field is now + deprecated. The member will inherit the project_id from the parent + load balancer. + + .. _Member objects: + + **Member object** + + As of the writing of this specification the create member object may + contain the following: + + +-----------------------+--------+------------------------------------------+ + | Name | Type | Description | + +=======================+========+==========================================+ + | address | string | The IP address of the backend member to | + | | | receive traffic from the load balancer. | + +-----------------------+--------+------------------------------------------+ + | admin_state_up | bool | Admin state: True if up, False if down. | + +-----------------------+--------+------------------------------------------+ + | backup | bool | Is the member a backup? Backup members | + | | | only receive traffic when all non-backup | + | | | members are down. | + +-----------------------+--------+------------------------------------------+ + | member_id | string | ID of member to create. | + +-----------------------+--------+------------------------------------------+ + | monitor_address | string | An alternate IP address used for health | + | | | monitoring a backend member. | + +-----------------------+--------+------------------------------------------+ + | monitor_port | int | An alternate protocol port used for | + | | | health monitoring a backend member. | + +-----------------------+--------+------------------------------------------+ + | name | string | Human-readable name of the member. | + +-----------------------+--------+------------------------------------------+ + | pool_id | string | ID of pool. | + +-----------------------+--------+------------------------------------------+ + | protocol_port | int | The port on which the backend member | + | | | listens for traffic. | + +-----------------------+--------+------------------------------------------+ + | subnet_id | string | Subnet ID. | + +-----------------------+--------+------------------------------------------+ + | weight | int | The weight of a member determines the | + | | | portion of requests or connections it | + | | | services compared to the other members of| + | | | the pool. For example, a member with a | + | | | weight of 10 receives five times as many | + | | | requests as a member with a weight of 2. | + | | | A value of 0 means the member does not | + | | | receive new connections but continues to | + | | | service existing connections. A valid | + | | | value is from 0 to 256. Default is 1. | + +-----------------------+--------+------------------------------------------+ + +* **delete** + + Removes a pool member. + + Octavia will pass the member object as a parameter. + + The member will be in the ``PENDING_DELETE`` provisioning_status when + it is passed to the driver. The driver will notify Octavia that the delete + was successful by setting the provisioning_status to ``DELETED``. If the + delete failed, the driver will update the provisioning_status to ``ERROR``. + +* **update** + + Modifies an existing member using the values supplied in the listener object. + + Octavia will pass in the original member object which is the baseline for the + update, and a member object with the fields to be updated. + + As of the writing of this specification the update member object may contain + the following: + + +-----------------------+--------+------------------------------------------+ + | Name | Type | Description | + +=======================+========+==========================================+ + | admin_state_up | bool | Admin state: True if up, False if down. | + +-----------------------+--------+------------------------------------------+ + | backup | bool | Is the member a backup? Backup members | + | | | only receive traffic when all non-backup | + | | | members are down. | + +-----------------------+--------+------------------------------------------+ + | member_id | string | ID of member to update. | + +-----------------------+--------+------------------------------------------+ + | monitor_address | string | An alternate IP address used for health | + | | | monitoring a backend member. | + +-----------------------+--------+------------------------------------------+ + | monitor_port | int | An alternate protocol port used for | + | | | health monitoring a backend member. | + +-----------------------+--------+------------------------------------------+ + | name | string | Human-readable name of the member. | + +-----------------------+--------+------------------------------------------+ + | weight | int | The weight of a member determines the | + | | | portion of requests or connections it | + | | | services compared to the other members of| + | | | the pool. For example, a member with a | + | | | weight of 10 receives five times as many | + | | | requests as a member with a weight of 2. | + | | | A value of 0 means the member does not | + | | | receive new connections but continues to | + | | | service existing connections. A valid | + | | | value is from 0 to 256. Default is 1. | + +-----------------------+--------+------------------------------------------+ + + The member will be in the ``PENDING_UPDATE`` provisioning_status when + it is passed to the driver. The driver will update the provisioning_status + of the member to either ``ACTIVE`` if successfully updated, or ``ERROR`` + if the update was not successful. + + The driver is expected to validate that the driver supports the request. + The method will then return or raise an exception if the request cannot be + accepted. + +* **batch update** + + Set the state of members for a pool in one API call. This may include + creating new members, deleting old members, and updating existing members. + Existing members are matched based on address/port combination. + + For example, assume a pool currently has two members. These members have the + following address/port combinations: '192.0.2.15:80' and '192.0.2.16:80'. + Now assume a PUT request is made that includes members with address/port + combinations: '192.0.2.16:80' and '192.0.2.17:80'. The member '192.0.2.15:80' + will be deleted because it was not in the request. The member '192.0.2.16:80' + will be updated to match the request data for that member, because it was + matched. The member '192.0.2.17:80' will be created, because no such member + existed. + + The members will be in the ``PENDING_CREATE``, ``PENDING_UPDATE``, or + ``PENDING_DELETE`` provisioning_status when it is passed to the driver. + The driver will update the provisioning_status of the members to either + ``ACTIVE`` or ``DELETED`` if successfully updated, or ``ERROR`` + if the update was not successful. + + The batch update method will supply a list of `Member objects`_. + Existing members not in this list should be deleted, + existing members in the list should be updated, + and members in the list that do not already exist should be created. + +**Abstract class definition** + +.. code-block:: python + + class Driver(object): + def member_create(self, member): + """Creates a new member for a pool. + + :param member (object): The member object. + + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + + def member_delete(self, member): + + """Deletes a pool member. + + :param member (object): The member object. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + """ + raise NotImplementedError() + + def member_update(self, old_member, new_member): + + """Updates a pool member. + + :param old_member (object): The baseline member object. + :param new_member (object): The updated member object. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + + def member_batch_update(self, members): + """Creates, updates, or deletes a set of pool members. + + :param members (list): List of member objects. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + +Health Monitor +^^^^^^^^^^^^^^ + +* **create** + + Creates a health monitor on a pool. + + Octavia will pass in the health monitor object with all requested settings. + + The health monitor will be in the ``PENDING_CREATE`` provisioning_status and + ``OFFLINE`` operating_status when it is passed to the driver. The driver + will be responsible for updating the provisioning status of the health + monitor to either ``ACTIVE`` if successfully created, or ``ERROR`` if not + created. + + The Octavia API will accept and do basic API validation of the create + request from the user. The healthmonitor python object representing the + request body will be passed to the driver create method as it was received + and validated with the following exceptions: + + 1. The project_id will be removed, if present, as this field is now + deprecated. The listener will inherit the project_id from the parent + load balancer. + + .. _Healthmonitor object: + + **Healthmonitor object** + + +-----------------------+--------+------------------------------------------+ + | Name | Type | Description | + +=======================+========+==========================================+ + | admin_state_up | bool | Admin state: True if up, False if down. | + +-----------------------+--------+------------------------------------------+ + | delay | int | The interval, in seconds, between health | + | | | checks. | + +-----------------------+--------+------------------------------------------+ + | expected_codes | string | The expected HTTP status codes to get | + | | | from a successful health check. This may | + | | | be a single value, a list, or a range. | + +-----------------------+--------+------------------------------------------+ + | healthmonitor_id | string | ID of health monitor to create. | + +-----------------------+--------+------------------------------------------+ + | http_method | string | The HTTP method that the health monitor | + | | | uses for requests. One of CONNECT, | + | | | DELETE, GET, HEAD, OPTIONS, PATCH, POST, | + | | | PUT, or TRACE. | + +-----------------------+--------+------------------------------------------+ + | max_retries | int | The number of successful checks before | + | | | changing the operating status of the | + | | | member to ONLINE. | + +-----------------------+--------+------------------------------------------+ + | max_retries_down | int | The number of allowed check failures | + | | | before changing the operating status of | + | | | the member to ERROR. A valid value is | + | | | from 1 to 10. | + +-----------------------+--------+------------------------------------------+ + | name | string | Human-readable name of the monitor. | + +-----------------------+--------+------------------------------------------+ + | pool_id | string | The pool to monitor. | + +-----------------------+--------+------------------------------------------+ + | timeout | int | The time, in seconds, after which a | + | | | health check times out. This value must | + | | | be less than the delay value. | + +-----------------------+--------+------------------------------------------+ + | type | string | The type of health monitor. One of HTTP, | + | | | HTTPS, PING, TCP, or TLS-HELLO. | + +-----------------------+--------+------------------------------------------+ + | url_path | string | The HTTP URL path of the request sent by | + | | | the monitor to test the health of a | + | | | backend member. Must be a string that | + | | | begins with a forward slash (/). | + +-----------------------+--------+------------------------------------------+ + +* **delete** + + Deletes an existing health monitor. + + Octavia will pass in the health monitor object as a parameter. + + The health monitor will be in the ``PENDING_DELETE`` provisioning_status + when it is passed to the driver. The driver will notify Octavia that the + delete was successful by setting the provisioning_status to ``DELETED``. + If the delete failed, the driver will update the provisioning_status to + ``ERROR``. + +* **update** + + Modifies an existing health monitor using the values supplied in the + health monitor object. + + Octavia will pass in the original health monitor object which is the baseline + for the update, and a health monitor object with the fields to be updated. + + As of the writing of this specification the update health monitor object may + contain the following: + + +-----------------------+--------+------------------------------------------+ + | Name | Type | Description | + +=======================+========+==========================================+ + | admin_state_up | bool | Admin state: True if up, False if down. | + +-----------------------+--------+------------------------------------------+ + | delay | int | The interval, in seconds, between health | + | | | checks. | + +-----------------------+--------+------------------------------------------+ + | expected_codes | string | The expected HTTP status codes to get | + | | | from a successful health check. This may | + | | | be a single value, a list, or a range. | + +-----------------------+--------+------------------------------------------+ + | healthmonitor_id | string | ID of health monitor to create. | + +-----------------------+--------+------------------------------------------+ + | http_method | string | The HTTP method that the health monitor | + | | | uses for requests. One of CONNECT, | + | | | DELETE, GET, HEAD, OPTIONS, PATCH, POST, | + | | | PUT, or TRACE. | + +-----------------------+--------+------------------------------------------+ + | max_retries | int | The number of successful checks before | + | | | changing the operating status of the | + | | | member to ONLINE. | + +-----------------------+--------+------------------------------------------+ + | max_retries_down | int | The number of allowed check failures | + | | | before changing the operating status of | + | | | the member to ERROR. A valid value is | + | | | from 1 to 10. | + +-----------------------+--------+------------------------------------------+ + | name | string | Human-readable name of the monitor. | + +-----------------------+--------+------------------------------------------+ + | timeout | int | The time, in seconds, after which a | + | | | health check times out. This value must | + | | | be less than the delay value. | + +-----------------------+--------+------------------------------------------+ + | url_path | string | The HTTP URL path of the request sent by | + | | | the monitor to test the health of a | + | | | backend member. Must be a string that | + | | | begins with a forward slash (/). | + +-----------------------+--------+------------------------------------------+ + + The health monitor will be in the ``PENDING_UPDATE`` provisioning_status + when it is passed to the driver. The driver will update the + provisioning_status of the health monitor to either ``ACTIVE`` if + successfully updated, or ``ERROR`` if the update was not successful. + + The driver is expected to validate that the driver supports the request. + The method will then return or raise an exception if the request cannot be + accepted. + +**Abstract class definition** + +.. code-block:: python + + class Driver(object): + def health_monitor_create(self, healthmonitor): + """Creates a new health monitor. + + :param healthmonitor (object): The health monitor object. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + + def health_monitor_delete(self, healthmonitor): + """Deletes a healthmonitor_id. + + :param healthmonitor (object): The health monitor object. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + """ + raise NotImplementedError() + + def health_monitor_update(self, old_healthmonitor, new_healthmonitor): + """Updates a health monitor. + + :param old_healthmonitor (object): The baseline health monitor + object. + :param new_healthmonitor (object): The updated health monitor object. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + +L7 Policy +^^^^^^^^^ + +* **create** + + Creates an L7 policy. + + Octavia will pass in the L7 policy object with all requested settings. + + The L7 policy will be in the ``PENDING_CREATE`` provisioning_status and + ``OFFLINE`` operating_status when it is passed to the driver. The driver + will be responsible for updating the provisioning status of the L7 policy + to either ``ACTIVE`` if successfully created, or ``ERROR`` if not created. + + The Octavia API will accept and do basic API validation of the create + request from the user. The l7policy python object representing the + request body will be passed to the driver create method as it was received + and validated with the following exceptions: + + 1. The project_id will be removed, if present, as this field is now + deprecated. The l7policy will inherit the project_id from the parent + load balancer. + + .. _L7policy objects: + + **L7policy object** + + As of the writing of this specification the create l7policy object may + contain the following: + + +-----------------------+--------+------------------------------------------+ + | Name | Type | Description | + +=======================+========+==========================================+ + | action | string | The L7 policy action. One of | + | | | REDIRECT_TO_POOL, REDIRECT_TO_URL, or | + | | | REJECT. | + +-----------------------+--------+------------------------------------------+ + | admin_state_up | bool | Admin state: True if up, False if down. | + +-----------------------+--------+------------------------------------------+ + | description | string | A human-readable description for the | + | | | L7 policy. | + +-----------------------+--------+------------------------------------------+ + | l7policy_id | string | The ID of the L7 policy. | + +-----------------------+--------+------------------------------------------+ + | listener_id | string | The ID of the listener. | + +-----------------------+--------+------------------------------------------+ + | name | string | Human-readable name of the L7 policy. | + +-----------------------+--------+------------------------------------------+ + | position | int | The position of this policy on the | + | | | listener. Positions start at 1. | + +-----------------------+--------+------------------------------------------+ + | redirect_pool_id | string | Requests matching this policy will be | + | | | redirected to the pool with this ID. | + | | | Only valid if action is REDIRECT_TO_POOL.| + +-----------------------+--------+------------------------------------------+ + | redirect_url | string | Requests matching this policy will be | + | | | redirected to this URL. Only valid if | + | | | action is REDIRECT_TO_URL. | + +-----------------------+--------+------------------------------------------+ + | rules | list | A list of l7rule objects. | + +-----------------------+--------+------------------------------------------+ + + *Creating a Fully Populated L7 policy* + + If the "rules" option is specified, the provider driver will create all of + the child objects in addition to creating the L7 policy instance. + +* **delete** + + Deletes an existing L7 policy. + + Octavia will pass in the L7 policy object as a parameter. + + The l7policy will be in the ``PENDING_DELETE`` provisioning_status when + it is passed to the driver. The driver will notify Octavia that the delete + was successful by setting the provisioning_status to ``DELETED``. If the + delete failed, the driver will update the provisioning_status to ``ERROR``. + +* **update** + + Modifies an existing L7 policy using the values supplied in the l7policy + object. + + Octavia will pass in the original L7 policy object which is the baseline for + the update, and an L7 policy object with the fields to be updated. + + As of the writing of this specification the update L7 policy object may + contain the following: + + +-----------------------+--------+------------------------------------------+ + | Name | Type | Description | + +=======================+========+==========================================+ + | action | string | The L7 policy action. One of | + | | | REDIRECT_TO_POOL, REDIRECT_TO_URL, or | + | | | REJECT. | + +-----------------------+--------+------------------------------------------+ + | admin_state_up | bool | Admin state: True if up, False if down. | + +-----------------------+--------+------------------------------------------+ + | description | string | A human-readable description for the | + | | | L7 policy. | + +-----------------------+--------+------------------------------------------+ + | l7policy_id | string | The ID of the L7 policy. | + +-----------------------+--------+------------------------------------------+ + | name | string | Human-readable name of the L7 policy. | + +-----------------------+--------+------------------------------------------+ + | position | int | The position of this policy on the | + | | | listener. Positions start at 1. | + +-----------------------+--------+------------------------------------------+ + | redirect_pool_id | string | Requests matching this policy will be | + | | | redirected to the pool with this ID. | + | | | Only valid if action is REDIRECT_TO_POOL.| + +-----------------------+--------+------------------------------------------+ + | redirect_url | string | Requests matching this policy will be | + | | | redirected to this URL. Only valid if | + | | | action is REDIRECT_TO_URL. | + +-----------------------+--------+------------------------------------------+ + + The L7 policy will be in the ``PENDING_UPDATE`` provisioning_status when + it is passed to the driver. The driver will update the provisioning_status + of the L7 policy to either ``ACTIVE`` if successfully updated, or ``ERROR`` + if the update was not successful. + + The driver is expected to validate that the driver supports the request. + The method will then return or raise an exception if the request cannot be + accepted. + +**Abstract class definition** + +.. code-block:: python + + class Driver(object): + def l7policy_create(self, l7policy): + """Creates a new L7 policy. + + :param l7policy (object): The l7policy object. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + + def l7policy_delete(self, l7policy): + """Deletes an L7 policy. + + :param l7policy (object): The l7policy object. + :return: Nothing if the delete request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + """ + raise NotImplementedError() + + def l7policy_update(self, old_l7policy, new_l7policy): + """Updates an L7 policy. + + :param old_l7policy (object): The baseline l7policy object. + :param new_l7policy (object): The updated l7policy object. + :return: Nothing if the update request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + +L7 Rule +^^^^^^^ + +* **create** + + Creates a new L7 rule for an existing L7 policy. + + Octavia will pass in the L7 rule object with all requested settings. + + The L7 rule will be in the ``PENDING_CREATE`` provisioning_status and + ``OFFLINE`` operating_status when it is passed to the driver. The driver + will be responsible for updating the provisioning status of the L7 rule + to either ``ACTIVE`` if successfully created, or ``ERROR`` if not created. + + The Octavia API will accept and do basic API validation of the create + request from the user. The l7rule python object representing the + request body will be passed to the driver create method as it was received + and validated with the following exceptions: + + 1. The project_id will be removed, if present, as this field is now + deprecated. The listener will inherit the project_id from the parent + load balancer. + + .. _L7rule objects: + + **L7rule object** + + As of the writing of this specification the create l7rule object may + contain the following: + + +-----------------------+--------+------------------------------------------+ + | Name | Type | Description | + +=======================+========+==========================================+ + | admin_state_up | bool | Admin state: True if up, False if down. | + +-----------------------+--------+------------------------------------------+ + | compare_type | string | The comparison type for the L7 rule. One | + | | | of CONTAINS, ENDS_WITH, EQUAL_TO, REGEX, | + | | | or STARTS_WITH. | + +-----------------------+--------+------------------------------------------+ + | invert | bool | When True the logic of the rule is | + | | | inverted. For example, with invert True, | + | | | equal to would become not equal to. | + +-----------------------+--------+------------------------------------------+ + | key | string | The key to use for the comparison. For | + | | | example, the name of the cookie to | + | | | evaluate. | + +-----------------------+--------+------------------------------------------+ + | l7policy_id | string | The ID of the L7 policy. | + +-----------------------+--------+------------------------------------------+ + | l7rule_id | string | The ID of the L7 rule. | + +-----------------------+--------+------------------------------------------+ + | type | string | The L7 rule type. One of COOKIE, | + | | | FILE_TYPE, HEADER, HOST_NAME, or PATH. | + +-----------------------+--------+------------------------------------------+ + | value | string | The value to use for the comparison. For | + | | | example, the file type to compare. | + +-----------------------+--------+------------------------------------------+ + +* **delete** + + Deletes an existing L7 rule. + + Octavia will pass in the L7 rule object as a parameter. + + The L7 rule will be in the ``PENDING_DELETE`` provisioning_status when + it is passed to the driver. The driver will notify Octavia that the delete + was successful by setting the provisioning_status to ``DELETED``. If the + delete failed, the driver will update the provisioning_status to ``ERROR``. + +* **update** + + Modifies an existing L7 rule using the values supplied in the l7rule object. + + Octavia will pass in the original L7 rule object which is the baseline for + the update, and an L7 rule object with the fields to be updated. + + As of the writing of this specification the update L7 rule object may + contain the following: + + +-----------------------+--------+------------------------------------------+ + | Name | Type | Description | + +=======================+========+==========================================+ + | admin_state_up | bool | Admin state: True if up, False if down. | + +-----------------------+--------+------------------------------------------+ + | compare_type | string | The comparison type for the L7 rule. One | + | | | of CONTAINS, ENDS_WITH, EQUAL_TO, REGEX, | + | | | or STARTS_WITH. | + +-----------------------+--------+------------------------------------------+ + | invert | bool | When True the logic of the rule is | + | | | inverted. For example, with invert True, | + | | | equal to would become not equal to. | + +-----------------------+--------+------------------------------------------+ + | key | string | The key to use for the comparison. For | + | | | example, the name of the cookie to | + | | | evaluate. | + +-----------------------+--------+------------------------------------------+ + | l7rule_id | string | The ID of the L7 rule. | + +-----------------------+--------+------------------------------------------+ + | type | string | The L7 rule type. One of COOKIE, | + | | | FILE_TYPE, HEADER, HOST_NAME, or PATH. | + +-----------------------+--------+------------------------------------------+ + | value | string | The value to use for the comparison. For | + | | | example, the file type to compare. | + +-----------------------+--------+------------------------------------------+ + + The L7 rule will be in the ``PENDING_UPDATE`` provisioning_status when + it is passed to the driver. The driver will update the provisioning_status + of the L7 rule to either ``ACTIVE`` if successfully updated, or ``ERROR`` + if the update was not successful. + + The driver is expected to validate that the driver supports the request. + The method will then return or raise an exception if the request cannot be + accepted. + +**Abstract class definition** + +.. code-block:: python + + class Driver(object): + def l7rule_create(self, l7rule): + + """Creates a new L7 rule. + + :param l7rule (object): The L7 rule object. + :return: Nothing if the create request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + + def l7rule_delete(self, l7rule): + + """Deletes an L7 rule. + + :param l7rule (object): The L7 rule object. + :return: Nothing if the delete request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + """ + raise NotImplementedError() + + def l7rule_update(self, old_l7rule, new_l7rule): + + """Updates an L7 rule. + + :param old_l7rule (object): The baseline L7 rule object. + :param new_l7rule (object): The updated L7 rule object. + :return: Nothing if the update request was accepted. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: if driver does not support request. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + +Flavor +^^^^^^ + +Octavia flavors are defined in a separate specification (see References below). +Support for flavors will be provided through two provider driver interfaces, +one to query supported flavor metadata keys and another to validate that a +flavor is supported. Both functions are synchronous. + +* **get_supported_flavor_keys** + + Retrieves a dictionary of supported flavor keys and their description. + + .. code-block:: python + + {"topology": "The load balancer topology for the flavor. One of: SINGLE, ACTIVE_STANDBY", + "compute_flavor": "The compute driver flavor to use for the load balancer instances"} + +* **validate_flavor** + + Validates that the driver supports the flavor metadata dictionary. + + The validate_flavor method will be passed a flavor metadata dictionary that + the driver will validate. This is used when an operator uploads a new flavor + that applies to the driver. + + The validate_flavor method will either return or raise a + ``UnsupportedOptionError`` exception. + +Following are interface definitions for flavor support: + +.. code-block:: python + + def get_supported_flavor_metadata(): + """Returns a dictionary of flavor metadata keys supported by this driver. + + The returned dictionary will include key/value pairs, 'name' and + 'description.' + + :returns: The flavor metadata dictionary + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: The driver does not support flavors. + """ + raise NotImplementedError() + +.. code-block:: python + + def validate_flavor(flavor_metadata): + """Validates if driver can support flavor as defined in flavor_metadata. + + :param flavor_metadata (dict): Dictionary with flavor metadata. + :return: Nothing if the flavor is valid and supported. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: The driver does not support flavors. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + +Exception Model +^^^^^^^^^^^^^^^ + +DriverError +""""""""""" + +This is a catch all exception that drivers can return if there is an +unexpected error. An example might be a delete call for a load balancer the +driver does not recognize. This exception includes two strings: The user fault +string and the optional operator fault string. The user fault string, +"user_fault_string", will be provided to the API requester. The operator fault +string, "operator_fault_string", will be logged in the Octavia API log file +for the operator to use when debugging. + +.. code-block:: python + + class DriverError(Exception): + user_fault_string = _("An unknown driver error occurred.") + operator_fault_string = _("An unknown driver error occurred.") + + def __init__(self, *args, **kwargs): + self.user_fault_string = kwargs.pop('user_fault_string', + self.user_fault_string) + self.operator_fault_string = kwargs.pop('operator_fault_string', + self.operator_fault_string) + + super(DriverError, self).__init__(*args, **kwargs) + +NotImplementedError +""""""""""""""""""" + +Driver implementations may not support all operations, and are free to reject +a request. If the driver does not implement an API function, the driver will +raise a NotImplementedError exception. + +.. code-block:: python + + class NotImplementedError(Exception): + user_fault_string = _("A feature is not implemented by this driver.") + operator_fault_string = _("A feature is not implemented by this driver.") + + def __init__(self, *args, **kwargs): + self.user_fault_string = kwargs.pop('user_fault_string', + self.user_fault_string) + self.operator_fault_string = kwargs.pop('operator_fault_string', + self.operator_fault_string) + + super(NotImplementedError, self).__init__(*args, **kwargs) + +UnsupportedOptionError +"""""""""""""""""""""" + +Provider drivers will validate that they can complete the request -- that all +options are supported by the driver. If the request fails validation, drivers +will raise an UnsupportedOptionError exception. For example, if a driver does +not support a flavor passed as an option to load balancer create(), the driver +will raise an UnsupportedOptionError and include a message parameter providing +an explanation of the failure. + +.. code-block:: python + + class UnsupportedOptionError(Exception): + user_fault_string = _("A specified option is not supported by this driver.") + operator_fault_string = _("A specified option is not supported by this driver.") + + def __init__(self, *args, **kwargs): + self.user_fault_string = kwargs.pop('user_fault_string', + self.user_fault_string) + self.operator_fault_string = kwargs.pop('operator_fault_string', + self.operator_fault_string) + + super(UnsupportedOptionError, self).__init__(*args, **kwargs) + + +Driver Support Library +---------------------- + +Provider drivers need support for updating provisioning status, operating +status, and statistics. Drivers will not directly use database operations, +and instead will callback to Octavia using a new API. + +.. warning:: + + The methods listed here are the only callable methods for drivers. + All other interfaces are not considered stable or safe for drivers to + access. + +Update provisioning and operating status API +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The update status API defined below can be used by provider drivers +to update the provisioning and/or operating status of Octavia resources +(load balancer, listener, pool, member, health monitor, L7 policy, or L7 +rule). + +For the following status API, valid values for provisioning status +and operating status parameters are as defined by Octavia status codes. If an +existing object is not included in the input parameter, the status remains +unchanged. + +provisioning_status: status associated with lifecycle of the +resource. See `Octavia Provisioning Status Codes `_. + +operating_status: the observed status of the resource. See `Octavia +Operating Status Codes `_. + +The dictionary takes this form: + +.. code-block:: python + + { "loadbalancers": [{"id": "123", + "provisioning_status": "ACTIVE", + "operating_status": "ONLINE"},...], + "healthmonitors": [], + "l7policies": [], + "l7rules": [], + "listeners": [], + "members": [], + "pools": [] + } + +.. code-block:: python + + def update_loadbalancer_status(status): + """Update load balancer status. + + :param status (dict): dictionary defining the provisioning status and + operating status for load balancer objects, including pools, + members, listeners, L7 policies, and L7 rules. + :raises: UpdateStatusError + :returns: None + """ + +Update statistics API +^^^^^^^^^^^^^^^^^^^^^ + +Provider drivers can update statistics for listeners using the following API. +Similar to the status function above, a single dictionary +with multiple listener statistics is used to update statistics in a single +call. If an existing listener is not included, the statistics for that object +will remain unchanged. + +The general form of the input dictionary is a list of listener statistics: + +.. code-block:: python + + { "listeners": [{"id": "123", + "active_connections": 12, + "bytes_in": 238908, + "bytes_out": 290234, + "request_errors": 0, + "total_connections": 3530},...] + } + +.. code-block:: python + + def update_listener_statistics(statistics): + """Update listener statistics. + + :param statistics (dict): Statistics for listeners: + id (string): ID of the listener. + active_connections (int): Number of currently active connections. + bytes_in (int): Total bytes received. + bytes_out (int): Total bytes sent. + request_errors (int): Total requests not fulfilled. + total_connections (int): The total connections handled. + :raises: UpdateStatisticsError + :returns: None + """ + +Get Resource Support +^^^^^^^^^^^^^^^^^^^^ + +Provider drivers may need to get information about an Octavia resource. +As an example of its use, a provider driver may need to sync with Octavia, +and therefore need to fetch all of the Octavia resources it is responsible +for managing. Provider drivers can use the existing Octavia API to get these +resources. See the `Octavia API Reference `_. + +API Exception Model +^^^^^^^^^^^^^^^^^^^ + +The driver support API will include two Exceptions, one for each of the +two API groups: + +* UpdateStatusError +* UpdateStatisticsError + +Each exception class will include a message field that describes the error and +references to the failed record if available. + +.. code-block:: python + + class UpdateStatusError(Exception): + fault_string = _("The status update had an unknown error.") + status_object = None + status_object_id = None + status_record = None + + def __init__(self, *args, **kwargs): + self.fault_string = kwargs.pop('fault_string', + self.fault_string) + self.status_object = kwargs.pop('status_object', None) + self.status_object_id = kwargs.pop('status_object_id', None) + self.status_record = kwargs.pop('status_record', None) + + super(UnsupportedOptionError, self).__init__(*args, **kwargs) + + class UpdateStatisticsError(Exception): + fault_string = _("The statistics update had an unknown error.") + stats_object = None + stats_object_id = None + stats_record = None + + def __init__(self, *args, **kwargs): + self.fault_string = kwargs.pop('fault_string', + self.fault_string) + self.stats_object = kwargs.pop('stats_object', None) + self.stats_object_id = kwargs.pop('stats_object_id', None) + self.stats_record = kwargs.pop('stats_record', None) + + super(UnsupportedOptionError, self).__init__(*args, **kwargs) + + +Alternatives +------------ +**Driver Support Library** + +An alternative to this library is a REST interface that drivers use directly. +A REST implementation can still be used within the library, but wrapping it +in an API simplifies the programming interface. + +Data model impact +----------------- +None, the required data model changes are already present. + +REST API impact +--------------- +None, the required REST API changes are already present. + +Security impact +--------------- +None. + +Notifications impact +-------------------- +None. + +Other end user impact +--------------------- +Users will be able to direct requests to specific backends using the *provider* +parameter. Users may want to understand the availability of provider drivers, +and can use Octavia APIs to do so. + +Performance Impact +------------------ +The performance impact on Octavia should be minimal. Driver requests will need +to be scheduled, and Octavia will process driver callbacks through a REST +interface. As provider drivers are loaded by Octavia, calls into drivers are +through direct interfaces. + +Other deployer impact +--------------------- +Minimal configuration is needed to support provider drivers. The work required +is adding a driver name to Octavia's configuration file, and installing +provider drivers supplied by third parties. + +Developer impact +---------------- +The proposal defines interaction between Octavia and backend drivers, so no +developer impact is expected. + +Implementation +============== + +Assignee(s) +----------- + +Work Items +---------- + +* Implement loading drivers defined the Octavia configuration. +* Implement scheduling requests to drivers. +* Implement validating flavors with provider drivers. +* Implement getting and testing flavors with provider drivers. +* Implement a no-op driver for testing. +* Implement driver support library functions: + + * Update status functions + * Update statistics functions + +* Migrate the existing Octavia reference driver to use this interface. + +Dependencies +============ +* Octavia API: + https://docs.openstack.org/api-ref/load-balancer/ +* Flavors: + https://docs.openstack.org/octavia/latest/contributor/specs/version1.0/flavors.html + +Testing +======= +Tempest tests should be added for testing: + +* Scheduling: test that Octavia effectively schedules to drivers besides + the default driver. +* Request validation: test request validation API. +* Flavor profile validation: test flavor validation. +* Flavor queries: test flavor queries. +* Statistics updates + +Functional API tests should be updated to test the provider API. + +Documentation Impact +==================== +A driver developer guide should be created. + +References +========== +Octavia API + https://docs.openstack.org/api-ref/load-balancer/v2/index.html + +Octavia Flavors Specification + https://docs.openstack.org/octavia/latest/contributor/specs/version1.0/flavors.html diff --git a/specs/version1.1/udp_support.rst b/specs/version1.1/udp_support.rst new file mode 100644 index 0000000000..f54cbd54c3 --- /dev/null +++ b/specs/version1.1/udp_support.rst @@ -0,0 +1,370 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +=========== +UDP Support +=========== + +https://storyboard.openstack.org/#!/story/1657091 + +Problem description +=================== +Currently, the default driver of Octavia (haproxy) only supports TCP, HTTP, +HTTPS, and TERMINATED_HTTPS. We need support for load balancing UDP. + +For some use-cases, UDP load balancing support is useful. One such case are +real-time media streaming applications which are based on RTSP [#foot1]_. + +For the Internet of Things (IoT) [#foot2]_, there are many services or +applications that use UDP as their transmission protocol. For example: +CoAP [#foot3]_ (Constrained Application Protocol), +DDS [#foot4]_ (Data Distribution Service) for Real-Time systems, and the +introduction protocol Thread [#foot5]_. + +Applications with high demand for real-time (like video chatting) run on +RDUP [#foot6]_ (Reliable User Datagram Protocol), +RTP [#foot7]_ (RealTime Protocol) and UDT [#foot8]_ +(UDP-based Data Transfer Protocol). These protocols also are based on UDP. + +There isn't any option in the API for these protocols, which Layer 4 UDP would +provide. This means that customers lack a way to support these services which +may be running on VM instances in an OpenStack environment. + + +Proposed change +=============== +This spec extends the LBaaSv2 API to support `UDP` as a protocol in Listener +and Pool resource requests. + +It will require a new load balancing engine to support this feature, as the +current haproxy engine only supports TCP based protocols. If users want a load +balancer which supports both TCP and UDP, this need cannot be met by launching +haproxy-based amphora instances. It's the good time to extend octavia to +support more load balancing scenarios. This spec will introduce how +LVS [#foot9]_ can work with haproxy for UDP loadbalancing. The reason for +choosing LVS is that we can easily integrate it with the existing +``keepalived`` service. That means we can configure LVS via ``keepalived``, and +check member health as well. + +For the current service VM driver implementation, haproxy runs in the +amphora-haproxy namespace in an amphora instance. So we also need to configure +``keeplived`` in the same namespace for UDP cases even in SINGLE topology. +For ACTIVE_STANDBY, ``keepalived`` will serve two purposes: UDP and VRRP. +So, one instance of ``keepalived`` must be bound in the namespace, along with +the LVS instance it configures. + +The main idea is to use ``keepalived`` to configure and manage LVS [#foot10]_ +and its configuration. We also need to check the members' statuses with +``keepalived`` instead of ``haproxy``, so there must be a different workflow +in Octavia resources and deployment topologies. The simplest implementation is +LVS within NAT mode, so we will only support this mode to start. If possible +we will add other modes in the future. + +Currently, a single ``keepalived`` instance can support multiple virtual server +configurations, but for minimal impact of reconfiguration to the existing +listeners, we'd better not to refresh all the ``keepalived`` configuration +files and restart the instances, because that would cause all listeners traffic +to be blocked if the LVS configuration maintained by ``keepalived`` is removed. +This spec proposes that each listener will have its own ``keepalived`` process, +but that process won't contain a VRRP instance, just the configuration +of virtual server and real servers. That means if the Loadbalancer service is +running with ACTIVE-STANDBY topology, each amphora instance will run multiple +``keepalived`` instances, the count being N+1 (where N is the UDP ``Listener`` +count, and +1 is the VRRP instance for HA). The existing ``keepalived`` +will be used, but each "UDP Listener keepalived process" will need to be +controlled by health check of the Main VRRP keepalived process. Then the VIP +could be moved to the BACKUP amphorae instance in ACTIVE/STANDBY topology if +there is any issue with these UDP keepalived processes. The health check will +simply reflect whether the keepalived processes are alive. + +The workflow for this feature contains: + +1. Add a new ``keepalived`` jinja template to support LVS configuration. +2. Add ``netcat`` into dib-elements for supporting all platforms. +3. Extend the ability of amphora agent to run ``keepalived`` with LVS + configuration in amphora instances, including the init configuration, such + as systemd, sysvinit and upstart. +4. Enhance the session persistence to work with UDP and enable/disable the + "One-Packet-Scheduling" option. +5. Update the database to allow listeners to support both ``tcp`` and ``udp`` + on the same port, add ``udp`` as a valid protocol and + ``ONE_PACKET_SCHEDULING`` as a valid session_persistence_type in the + database. +6. Setup validation code for supported features of UDP load balancing (such as + session persistence, types of health monitors, load balancing algorithms, + number of L7 policies allowed, etc). +7. Extend the existing LBaaSv2 API in Octavia to allow ``udp`` parameters in + the ``Listener`` resource. +8. Extend the Loadbalancer/Listener flows to support udp loadbalancer in the + particular topologies. + +Alternatives +------------ +Introduce a new UDP driver based on LVS or other Loadbalancer engines. Then +find a way to fix the gap of the current Octavia data models which have a +strong relationship with HTTP which based on TCP. + +Provide a new driver provider framework to change the amphorae backend from +haproxy to some other load balancer engines, for example, if we introduce LVS +driver, we may just support the simple L7 functions with LVS, as it's a risk to +change provider from existing haproxy-based amphora instances to LVS ones. If +possible, we need to limit the API to not support fields/resources if the +backend driver is LVS, such as "insert_headers" in Listener, L7Policies, +L7Rules and etc, a series fields/resources that related to L7 layer. The all +things are to match the real ability of backend. That means all the +configuration of L7 resources will be ignored or translate to LVS configuration +if the backend is LVS. For other load balancer engines which support UDP, such +as f5/nginx, we may also need to do this. + +Combining the 2 load balancer engines for a simple reference implementation, +LVS would only support the L4 layer LB, and haproxy would provide the L7 +LB functionality which is more specific and detailed. For other engines like +f5/nginx, Octavia can directly pass the UDP parameters to backend. This is +very good for the community environment. Then Octavia may support more powerful +and complex LoadBalancing solutions. + +Data model impact +----------------- +There may not be any data model changes, this spec just allows a user to +input the ``udp`` protocol to create/update the ``Listener`` and ``Pool`` +resources. So here, just extend the ``SUPPORTED_PROTOCOLS`` to add the value +``PROTOCOL_UDP``. + +.. code-block:: python + + SUPPORTED_PROTOCOLS = (PROTOCOL_TCP, PROTOCOL_HTTPS, PROTOCOL_HTTP, + PROTOCOL_TERMINATED_HTTPS, PROTOCOL_PROXY, + PROTOCOL_UDP) + +Also add a record into the table ``protocol`` for ``PROTOCOL_UDP``. + +As LVS only operates in Layer 4, there are some conflicts with current +Octavia data models. There are some limitation below: + +1. No L7 policies allowed. +2. For session persistence, this spec will intro ``persistence_timeout`` (sec) + and ``persistence_granularity`` (subnet mask) [#foot11]_ in the virtual + server configuration. The function will be based on the LVS. With no session + persistence specified, LVS will be configured with a persistence_timeout + of 0. There are two valid session persistence options for UDP (if session + persistence is specified), ``SOURCE_IP`` and ``ONE_PACKET_SCHEDULING``. +3. Intro a 'UDP_CONNECT' type for UDP in ``healthmonitor``, for the simple, + only check the UDP port is open by ``nc`` command. And for current API of + ``healthmonitor``, we need to make clear the meaning of LVS with the current + ``healthmonitor`` API like the mapping below + + +---------------------+--------------------------+-------------------------+ + | Option Mapping | Healthmonitor | Keepalived LVS | + | Healthmonitor->LVS | Description | Description | + +=====================+==========================+=========================+ + | | Set the time in seconds, | Delay timer for service | + | delay -> delay_loop | between sending probes | polling. | + | | to members. | | + +---------------------+--------------------------+-------------------------+ + | max_retires_down -> | Set the number of allowed| Number of retries | + | retry | check failure before | before fail. | + | | changing the operating | | + | | status of the member to | | + | | ERROR. | | + +---------------------+--------------------------+-------------------------+ + | timeout -> | Set the maximum time, in | delay before retry | + | delay_before_retry | seconds, that a monitor | (default 1 unless | + | | waits to connect before | otherwise specified) | + | | it times out. This value | | + | | must be less than the | | + | | delay value. | | + +---------------------+--------------------------+-------------------------+ + +4. For UDP load balancing, we can support the same algorithms at first. Such as + SOURCE_IP(sh), ROUND_ROBIN(rr) and LEAST_CONNECTIONS(lc). + +REST API impact +--------------- + +* Allow the ``protocol`` fields to accept ``udp``. +* Allow the ``healthmonitor.type`` field to accept UDP type values. +* Add some fields to ``session_persistence`` that are specific to UDP though + ``SOURCE_IP`` type and a new type ``ONE_PACKET_SCHEDULING``. + +Create/Update Listener Request:: + + POST/PUT /v2.0/lbaas/listeners + { + "listener": { + "admin_state_up": true, + "connection_limit": 100, + "description": "listener one", + "loadbalancer_id": "a36c20d0-18e9-42ce-88fd-82a35977ee8c", + "name": "listener1", + "protocol": "UDP", + "protocol_port": "18000" + } + } + +.. note:: It is the same as the current relationships, where one ``listener`` + will have only one default ``pool`` for UDP. A ``loadbalancer`` can + have multiple ``listeners`` for UDP loadbalancing on different ports. + +Create/Update Pool Request + +``SOURCE_IP`` type case:: + + POST/PUT /v2.0/lbaas/pools + + { + "pool": { + "admin_state_up": true, + "description": "simple pool", + "lb_algorithm": "ROUND_ROBIN", + "name": "my-pool", + "protocol": "UDP", + "session_persistence": { + "type": "SOURCE_IP", + "persistence_timeout": 60, + "persistence_granularity": "255.255.0.0", + } + "listener_id": "39de4d56-d663-46e5-85a1-5b9d5fa17829", + } + } + +``ONE_PACKET_SCHEDULING`` type case:: + + POST/PUT /v2.0/lbaas/pools + + { + "pool": { + "admin_state_up": true, + "description": "simple pool", + "lb_algorithm": "ROUND_ROBIN", + "name": "my-pool", + "protocol": "UDP", + "session_persistence": { + "type": "ONE_PACKET_SCHEDULING" + } + "listener_id": "39de4d56-d663-46e5-85a1-5b9d5fa17829", + } + } + +.. note:: The validation part for UDP will just allow to set the specific + fields which associated with UDP. For example, user can not set the + ``protocol`` with "udp" and ``insert_headers`` in the same request. + +Create/Update Health Monitor Request:: + + POST/PUT /v2.0/lbaas/healthmonitors + + { + "healthmonitor": { + "name": "Good health monitor" + "admin_state_up": true, + "pool_id": "c5e9e801-0473-463b-a017-90c8e5237bb3", + "delay": 10, + "max_retries": 4, + "max_retries_down": 4, + "timeout": 5, + "type": "UDP_CONNECT" + } + } + +.. note:: We don't allow to create a ``healthmonitor`` with any other L7 + parameters, like "http_method", "url_path" and "expected_code" if + the associated ``pool`` support UDP. But for the positional option + "max_retries", it's different from API description in keepalived/LVS, + so the default value is the same as the value of "max_retires_down" + if user specified. In general, "max_retires_down" should be + overridden by "max_retries". + +Security impact +--------------- +The security should be affected by the UDP server, we need to add another +neutron security group rule to the existing security group to support UDP. +Security impact is minimal as the keepalived/LVS will be running in the tenant +traffic network namespace. + +Notifications impact +-------------------- +No expected change. + +Other end user impact +--------------------- +Users will be able to pass "UDP" to create/update Listener/Pool resources for +UDP load balancer. + +Performance Impact +------------------ +* If enabled driver is LVS, it will have a good performance for L4 load + balancing, but lack the any functionality in L7. +* As this spec introduces LVS and Haproxy working together, if users update the + ``Listener`` or ``Pool`` resources in a ``LoadBalancer`` instance frequently, + the loadbalancer functionality may be delayed for a while as the refresh of + UDP related LVS configuration. +* As we need to add keepalived monitoring process for each UDP listeners, it is + necessary to consider RAM about amphora VM instances. + +Other deployer impact +--------------------- +No expected change. + +Developer impact +---------------- +No expected change. + +Implementation +============== + +Assignee(s) +----------- +zhaobo + + +Work Items +---------- +* Add/extend startup script templates for keepalived processes, including + configuration. +* Extend the ability of existing amphora agent and driver to generate and + control LVS by ``keepalived`` in amphora instances. +* Extend the exist Octavia V2 API to access ``udp`` parameter in ``Listener`` + and ``pools`` resources. +* Extend the Loadbalancer/Listener flows to support udp loadbalancer in the + particular topologies. +* Extend Octavia V2 API to accept UDP fields. +* Add the specified logic which involved into haproxy agent and the affected + resource workflow in Octavia. +* Add API validation code to validate the fields of UDP cases. +* Add Unit Tests to Octavia. +* Add API functional tests. +* Add scenario tests into octavia tempest plugin. +* Update CLI and Octavia-dashboard to support UDP fields input. +* Documentation work. + +Dependencies +============ +None + +Testing +======= +Unit tests, Functional tests, API tests and Scenario tests are necessary. + +Documentation Impact +==================== +The description of Octavia API reference will need to be updated. +The load balancing cookbook should be also updated. +Make it clear the difference of ``healthmonitor`` behaviors in UDP cases. + +References +========== + +.. [#foot1] https://en.wikipedia.org/wiki/Real_Time_Streaming_Protocol +.. [#foot2] https://en.wikipedia.org/wiki/Internet_of_things +.. [#foot3] https://en.wikipedia.org/wiki/Constrained_Application_Protocol +.. [#foot4] https://en.wikipedia.org/wiki/Data_Distribution_Service +.. [#foot5] https://en.wikipedia.org/wiki/Thread_(network_protocol) +.. [#foot6] https://en.wikipedia.org/wiki/Reliable_User_Datagram_Protocol +.. [#foot7] https://de.wikipedia.org/wiki/Real-Time_Transport_Protocol +.. [#foot8] https://en.wikipedia.org/wiki/UDP-based_Data_Transfer_Protocol +.. [#foot9] http://www.linuxvirtualserver.org/ +.. [#foot10] https://github.com/acassen/keepalived/blob/master/doc/keepalived.conf.SYNOPSIS#L559 +.. [#foot11] http://www.linuxvirtualserver.org/docs/persistence.html diff --git a/specs/version14.0/SRIOV.rst b/specs/version14.0/SRIOV.rst new file mode 100644 index 0000000000..9dcf93334b --- /dev/null +++ b/specs/version14.0/SRIOV.rst @@ -0,0 +1,220 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +========================================== +Support SR-IOV network ports in Octavia +========================================== + +The maximum performance of Octavia Amphora based load balancers is often +limited by the Software Defined Networking (SDN) used in the OpenStack +deployment. There are users that want very high connection rates and high +bandwidth through their load balancers. + +This specification describes how we can add Single Root I/O Virtualization +(SR-IOV) support to Octavia Amphora load balancers. + +Problem description +=================== + +* Users would like to use SR-IOV VFs for the VIP and member ports on their + Amphora based load balancers for improved maximum performance and reduced + latency. Initial testing showed a 9% increase in bandwidth and a 70% drop + in latency through the load balancer when using SR-IOV. + +* Users are overflowing tap interfaces with bursty "thundering herd" traffic + such that packets are unable to make it into the Amphora instance. + +Proposed change +=============== + +Since Octavia hot plugs the network interfaces into the Amphora instances, the +first work will be documenting how to configure nova to properly place the +Amphorae on hosts with the required hardware and networks. There is some +existing documentation for this in the nova guide, but we should summarize it +with a focus on Amphora. + +This documentation will include how to configure host aggregates, the compute +flavor, and the Octavia flavor to properly schedule the Amphora instances. + +In general, the SR-IOV ports will be handled the same as ports are with the +AAP driver, including registering the VIP as an AAP address even though this +is technically not required for SR-IOV ports, it will make sure the address is +allocated in neutron. Only the base VRRP ports will allocate an SR-IOV VF as +the AAP port will be "unbound" with a vnic_type of "normal". + +The create load balancer flow creation will be enhanced to create the base VRRP +port using an SR-IOV VF if the Octavia flavor has SRIOV_VIP set to true. If +placement/nova scheduling fail to find an appropriate host or the SR-IOV VF +port fails to plug into the Amphora, additional logging may be required, but +the normal revert flows should continue to handle the error situation and mark +the load balancer in provisioning status ERROR. + +The building of the listener create and update flows will need to be updated to +include extra tasks to configure nftables inside the Amphora to +replace the functionality of the neutron security groups lost when using +SR-IOV ports. + +The Amphora agent will need to be enhanced for a new "security group" endpoint +and to configure the Amphora nftables. The nftables rules will be added as +stateless rules, meaning conntrack will not be enabled. The load balancing +engines are already managing state for the flows, so there is no reason to also +have state management in the firewall. + +I am proposing we only support nftables inside the Amphora as most +distributions are moving away from iptables towards nftables. + +Alternatives +------------ + +There are two obvious alternatives: + +* Do nothing and continue to rely on SDN performance. + +* Use provider networks to remove some of the overhead of the SDN. + +It is not clear that SDN performance can improve to a level that would meet the +needs of Octavia Amphora load balancers and provider networks still have some +overhead and limitations depending on how they are implemented (tap interfaces, +etc.) + +Data model impact +----------------- + +The load balancer and member objects will be expanded to include the vnic type +for the ports. + +REST API impact +--------------- + +The Octavia API will be expanded to include the vnic type used for the VIP and +member ports. The field with either be "normal" for OVS/OVN ports or "direct" +for SR-IOV ports. This field with use the same terminology as neutron uses. + +The Amphora API will need to be expanded to have a security group endpoint. +This endpoint will accept POST calls that contain the: allowed_cidrs, protocol, +and port information required to configure the appropriate nftable rules. + +When this endpoint is called, the amphora agent will flush the current tables +and build up a fresh table. There will be chains for the VIP, VRRP, and member +ports. This will be implemented using the python nftables bindings. + +Security impact +--------------- + +Neutron security groups do not work on SR-IOV ports, so the amphora agent will +need to manage nftables for the SR-IOV ports. + +There is no current use case where Octavia would need TRUST mode VFs, so this +specification does not include any discussion of enabling TRUST on VFs used by +the Octavia amphora driver. The amphora will treat TRUST VFs as if they were +not TRUST enabled. + +Notifications impact +-------------------- + +None + +Other end user impact +--------------------- + +End users will need to select the appropriate Octavia flavor at load balancer +creation time. They will also need to specify the proper network that matches +the network(s) defined in the compute and Octavia flavors. + +Performance Impact +------------------ + +This proposal is specifically about improving data plane performance. + +I would expect little change to the provisioning time, or possibly a faster +provisioning time, when using SR-IOV ports as it should require fewer API +calls to Neutron. + +Other deployer impact +--------------------- + +If deployers want SR-IOV interface support at deployment time, they will need +to configure the required compute host aggregates, compute flavors, and +octavia flavor supporting the SR-IOV enabled hosts and networks. + +We also recommend that the FDB L2 agent be enabled, when needed, so that +virtual ports on the same compute host can communicate with the SR-IOV ports. + +The Amphora images will now require the nftables and python3-nftables packages. + +Developer impact +---------------- + +There should be minimal developer impact as it is enhancing existing flows. + +Implementation +============== + +Assignee(s) +----------- + +Primary assignee: + johnsom + +Work Items +---------- + +1. Document the required host aggregates, compute flavor, and Octavia flavor. +2. Update the load balancer "create" flow creation to use the SR-IOV tasks + when creating the VRRP base ports. +3. Update the load balancer data model to store the port vnic type. +4. Expand the load balancer API to include the vnic type used for the VIP. +5. Update the listener create/update flows to add the extra tasks to configure + the nftables inside the Amphora. +6. Add a security group endpoint to the Amphora agent to allow configuring and + updating the nftables inside the Amphora. +7. Add any necessary logging and error handling should nova fail to attach + SR-IOV ports. +8. Add the required unit and functional tests for the new code. +9. Add the required tempest tests to cover the usage scenarios (pending igb + driver support in the PTI platforms) + +Dependencies +============ + +None + +Testing +======= + +Currently this feature cannot fully be tested in the OpenDev gates as it will +require an SR-IOV capable nic in the test system. + +There will be unit and function test coverage. + +Recently qemu has added a virtual device, the "igb" device, that is capable of +emulating an SR-IOV device. Versions of qemu and the associated libraries that +include this new device are not yet shipping in any distribution supported by +OpenStack. + +When the "igb" device becomes available, we should be able to run scenario +tests with SR-IOV VIP and member ports. + +Performance testing will be out of scope because the OpenDev testing +environment does not contain SR-IOV capable NICs and is not setup for data +plane performance testing. + +Documentation Impact +==================== + +An administrative document will need to be created that describes the process +required to setup a compute and octavia flavor for SR-IOV devices. + +References +========== + +* https://docs.openstack.org/neutron/latest/admin/config-sriov.html + +* https://docs.openstack.org/nova/latest/reference/scheduler-hints-vs-flavor-extra-specs.html + +* https://specs.openstack.org/openstack/nova-specs/specs/rocky/implemented/granular-resource-requests.html + +* https://www.qemu.org/docs/master/system/devices/igb.html diff --git a/specs/version15.0/custom-security-groups-for-VIP-ports.rst b/specs/version15.0/custom-security-groups-for-VIP-ports.rst new file mode 100644 index 0000000000..0ab4b26766 --- /dev/null +++ b/specs/version15.0/custom-security-groups-for-VIP-ports.rst @@ -0,0 +1,203 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +================================================ +Support for Custom Security Groups for VIP Ports +================================================ + +This specification describes how Octavia can allow users to provide their own +Neutron Security Groups for the VIP Port of a load balancer. + + +Problem description +=================== + +Many users have requested a method for customizing the security groups of the +VIP ports of a load balancer in Octavia. There are some benefits from using +custom security groups: + +* Allowing incoming connections only from specific remote group IDs. + +* Having a unique API (The networking Security Groups API) to configure the + network security for all the users' resources. + +Note: The specification is not about Security Groups for the member ports, this +feature could be the subject of another spec. + + +Proposed change +=============== + +A user will be able to provide a ``vip_sg_ids`` parameter when creating a load +balancer. + +This parameter will be optional and defaulted to None. When set, it contains a +list of Neutron Security Group IDs. When it's not set, the behavior of the VIP +port would not change. +In this document, these security groups are called Custom security +groups, as opposed to the existing Octavia-managed security groups. + +If the parameter is set, Octavia would apply these Custom security +groups to the VIP and Amphora ports (known as VRRP ports internally). Then +Octavia would create and manage a security group (Octavia-managed security +group) with rules for its internal communication (haproxy peering, VRRP +communication). Thus the VIP port would have more than one Neutron security +group. + +No rules based on the port or the protocol of the listeners would be managed by +Octavia, for each new listener, the user would have to add their own rules to +their Custom security groups. + + +Alternatives +------------ + +An alternative method would be to implement an ``allowed_remote_group_ids`` +parameter when creating a load balancer. Users would have a feature that covers +the first point described in "Problem Description". + + +Data model impact +----------------- + +This feature requires some changes in the data model, a new table +``VipSecurityGroup`` is added, it contains: + +* ``load_balancer_id``: the UUID of the load balancer (which also represents a + Vip) + +* ``sg_id``: the UUID of a Custom Security Group + +A load balancer (identified by its ID) or a VIP are linked to one or more +Custom Security Groups. + +It also requires an update of the data model in octavia-lib. + + +REST API impact +--------------- + +The POST /v2/lbaas/loadbalancers endpoint is updated to accept an optional +``vip_sg_ids`` parameter (a list of UUIDs that represents Custom Security +Groups). + +If the parameter is set, Octavia checks that the Custom security groups exist +and that the user is allowed to use them, then Octavia creates new +VIPSecurityGroup objects with these new parameters. + +The PUT /v2/lbaas/loadbalancers endpoint is also updated, allowing to update +the list of Custom Security Groups. + +The ``vip_sg_ids`` parameter is also added to the reply of the GET method. + +Using ``vip_sg_ids`` is incompatible with some existing features in Octavia, +like ``allowed_cidrs`` in the listeners. Setting ``allowed_cidrs`` in a load +balancer with ``vip_sg_ids`` should be denied, updating the ``vip_sg_ids`` of a +load balancer that includes listeners with ``allowed_cidrs`` too. + +``vip_sg_ids`` is also incompatible with SR-IOV enabled load balancers and +other provider drivers. + + +Security impact +--------------- + +When this feature is enabled, Octavia no longer handles the security of the VIP +port, the users are responsible of the configuration of the Custom Security +Groups. + +A RBAC policy is added to Octavia, an administrator can limit the access to +this feature to a specific role. + + +Notifications impact +-------------------- + +None. + + +Other end user impact +--------------------- + +The impact for the end user is that they are responsible for allowing the +incomming traffic to their load balancer. The creation of a new listener would +request at least 2 API calls, one for creating the listener in Octavia, one for +adding a new security group rule to the Custom security group. + + +Performance Impact +------------------ + +Performance could be impacted if the user adds too many rules to the +Custom security group, but this issue is outside the scope of Octavia. + + +Other deployer impact +--------------------- + +None. + + +Developer impact +---------------- + +Impact is minimal, a few changes in the API and in the DB, only a few new +conditionals in the allowed_address_pairs module. + +It could have a more significant impact if this feature is added to the +octavia-dashboard. + + +Implementation +============== + +Assignee(s) +----------- + +Primary assignee: + gthiemonge + + +Work Items +---------- + +1. Update the data model of the VIP port in octavia_lib and octavia. +2. Update the API to handle the new ``vip_sg_id`` parameter. +3. Update the allowed_address_pairs module to handle this new feature. +4. Update the api-ref and the user guide. +5. Add required unit and functional tests. +6. Add support to python-octaviaclient and openstacksdk +7. Add tempest tests for this feature. + + +Dependencies +============ + +None. + + +Testing +======= + +The feature can easily be tested with tempest tests. + +- creation of a load balancer and its Custom security groups, check that + it's reachable +- update the list of Custom security groups, check that the connectivity + to the load balancer is impacted. + + +Documentation Impact +==================== + +The feature will be included in the cookbook. +The api-ref and feature matrix will be also updated. + + +References +========== + +None. diff --git a/specs/version15.0/rate_limiting.rst b/specs/version15.0/rate_limiting.rst new file mode 100644 index 0000000000..6306258212 --- /dev/null +++ b/specs/version15.0/rate_limiting.rst @@ -0,0 +1,244 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +============================================ +Support for traffic rate limiting in Octavia +============================================ +Rate limiting is an essential technique for managing +the traffic that is handled by +a load balancer and for ensuring fairness and system stability. + +Problem description +=================== +Without rate limiting malicious clients and bots +may be able to attack a server by flooding it with traffic or requests. +Rate limiting can help to limit the amount of resources that +single clients can allocate on server side and therefor +can help to mitigate DoS attacks. + +Octavia already allows to limit the number of concurrent connections +by using the ``connection_limit`` option when configuring a listener. This +option will continue to exist and will work independently of this new rate +limiting feature. + +Proposed change +=============== +Both the data model and the REST API need to be extended. +The concept of *rate limit policies* and *rate limit rules* allows to manage +rules for rate limiting and to apply them to listeners. This document +refers to them as policies and rules for simplicity. + +A policy consists of one or more rules. +Each policy defines an ``action`` that specifies the rate limiting method +that should be used. +Rules within a policy will be combined using a logical AND operation. +That means all rules within a policy need to be broken before rate limiting +gets applied. Multiple policies on a single listener logically OR +each other. + +Rate limiting can be implemented in various ways using different metrics for +different protocols. Hence, this specification tries to be as flexible +as possible while keeping the API simple. Drivers may choose to +implement only a subset of the possible configuration variants, +or even none of them. +The algorithm used for rate limiting is considered an implementation detail +of the driver and out of the scope of this document. + +Alternatives +------------ +Rate limiting for all request based protocols (HTTP protocols) could be +done by extending the L7 policy API and by managing rules as L7 rules. + +Rate limiting for all TCP based protocols could be supported +and configured using the listener API. + +Splitting the configuration between two different APIs may confuse users, +however. Using a separate API for rate limiting seems like the cleaner +approach. + +Data model impact +----------------- +A new ``RateLimitPolicy`` model class contains data about policies. +Its attributes are: + +* ``id`` (string) +* ``name`` (string) +* ``description`` (string) +* ``rules`` (``RateLimitRule``\s) +* ``action`` (string) +* ``listener_id`` (string) +* ``listener`` (string) +* ``enabled`` (boolean) +* ``provisioning_status`` (string) +* ``operating_status`` (string) +* ``project_id`` (string) +* ``created_at`` (DateTime) +* ``updated_at`` (DateTime) +* ``tags`` (string) + +The ``rules`` attribute forms a +one-to-many relationship with a new ``RateLimitRule`` model class. +``action`` defines the rate limiting method. +Possible values are +``DENY`` (respond with HTTP 429), +``REJECT`` (close the connection with no response), +``SILENT_DROP`` (like ``REJECT``, but without client notification) +``QUEUE`` (queue new requests, "leaky bucket") using a Python enum. +The existing ``Listener`` model class gets a new +one-to-may relationship with the +``RateLimitPolicy`` model class using a new ``rate_limit_policies`` +attribute. That means a listener may have multiple policies, but a policy +can be linked to only one listener. + +The new ``RateLimitRule`` model class defines a specific +rate limiting rule. Its attributes are: + +* ``id`` (string) +* ``name`` (string) +* ``project_id`` (string) +* ``metric`` (string) +* ``threshold`` (integer) +* ``interval`` (integer, defaults to 30) +* ``urls`` (ScalarListType) +* ``provisioning_status`` (string) +* ``operating_status`` (string) +* ``tags`` (string) + +Possible values of ``metric`` are +``REQUESTS`` ``REQUESTS_PER_URL``, +``KBYTES`` and ``PACKETS``. +``interval`` denotes the time interval in seconds in +which the metric gets measured for each client. +``threshold`` defines the threshold at which the rate gets limited. +The ``urls`` field defines the URL paths for the specific rule and is +ignored if ``metric`` is not ``REQUESTS_PER_URL``. + +REST API impact +--------------- +If not stated otherwise the attributes in the responses match with the +ones in the data model. The relationships will be shown using IDs of +related objects. + +Listener +~~~~~~~~ +The listener API gets a new ``rate_limit_policies`` (Optional) attribute. +Valid values are ``null`` (the default) or a list of policy IDs. + +Rate Limit Policy +~~~~~~~~~~~~~~~~~ +The request of the ``POST /v2/lbaas/ratelimitpolicies`` +and ``PUT /v2/lbaas/ratelimitpolicies/{policy_id}`` methods of the +``Rate Limit Policy`` API takes the attributes +``name`` (Optional), ``description`` (Optional), ``listener_id``, +``action``, +``enabled`` (Optional), ``project_id`` (Optional), ``tags`` (Optional). +The response contains all attributes in the data model. +The ``GET /v2/lbaas/ratelimitpolicies`` method supports the attributes +the ``project_id`` (Optional) and ``fields`` (Optional). +The response is a list of policies filtered by the optional ``project_id`` +and containing the desired ``fields`` (or all). +The endpoint ``/v2/lbaas/ratelimitpolicies/{policy_id}`` supports the +``GET`` and ``DELETE`` methods. + +Rate Limit Rule +~~~~~~~~~~~~~~~ +The ``GET /v2/lbaas/ratelimitpolicies/{policy_id}/rules`` +method behaves like the GET method for the policy, but for rules. +The ``POST /v2/lbaas/ratelimitpolicies/{policy_id}/rules`` method accepts +the request attributes ``listener_id``, +``project_id`` (Optional), +``metric``, ``threshold``, ``interval`` (Optional), ``urls`` (Optional) +``tags`` (Optional). +The ``GET /v2/lbaas/ratelimitpolicies/{policy_id}/rules/{rule_id}`` request +accepts an optional ``fields`` attribute. +The ``PUT /v2/lbaas/ratelimitpolicies/{policy_id}/rules/{rule_id}`` +method accepts +the request attributes `, ``project_id`` (Optional), +``metric``, ``threshold``, ``interval`` (Optional), ``urls`` (Optional), +``tags`` (Optional). +The ``DELETE /v2/lbaas/ratelimitpolicies/{policy_id}/rules/{rule_id}`` +method has no response body. + +Security impact +--------------- +None. + +Notifications impact +-------------------- +None. + +Other end user impact +--------------------- +None. + +Performance Impact +------------------ +Rate limiting is an optional feature and has no performance impact in a +default configuration. Depending on the complexity of the rules and the +implementation, some processing overhead may impact performance. In the +ACTIVE/STANDBY topology some additional network overhead for synchronization +of request statistics (ie. stick tables for Amphorae) is to be expected. + +Overall, +however, fairness and performance can improve when using rate limiting. + +Other deployer impact +--------------------- +Deployers might want to review the RAM setting of the Nova flavor +that is used for the load balancers. Rate limiting will require some +additional memory on Amphorae, depending on the number of rules and +the interval setting. + +Developer impact +---------------- +Driver developers are impacted by the extended API and data model that allows +them to implement the new feature in future versions. + +Implementation +============== +The reference implementation using the Amphora driver will use HAProxy's own +rate limiting capabilities. In addition to limiting the number of +HTTP requests it will also be possible to limit the number of HTTP requests +by URL path [#haproxy-url-path]_. +The sliding window rate limiting algorithm will be +used [#haproxy-four-examples]_. + +Rate limiting based on the TCP protocol is not part of the +initial implementation, but might be added in a future version. +This could be done using ``nftables`` rules [#nftables]_. + +Assignee(s) +----------- +Primary assignee: + Tom Weininger + +Work Items +---------- +#. Adjust API documentation +#. Create user documentation +#. Implement HTTP rate limiting in Amphora driver +#. Implement HTTP by URL rate limiting in Amphora driver +#. Implement unit tests + +Dependencies +============ +None. + +Testing +======= +Testing should focus on API changes, verification and correctness of +generated HAProxy configuration. + +Documentation Impact +==================== +API and user documentation will need to be extended. + +References +========== + +.. [#haproxy-four-examples] https://www.haproxy.com/blog/four-examples-of-haproxy-rate-limiting +.. [#nftables] https://wiki.nftables.org/wiki-nftables/index.php/Meters +.. [#haproxy-url-path] https://www.haproxy.com/documentation/haproxy-configuration-tutorials/traffic-policing/#rate-limit-http-requests-by-url-path diff --git a/specs/version15.0/resize.rst b/specs/version15.0/resize.rst new file mode 100644 index 0000000000..14f1f505f1 --- /dev/null +++ b/specs/version15.0/resize.rst @@ -0,0 +1,149 @@ +====================== +Load balancer resizing +====================== +Link to blueprint: https://blueprints.launchpad.net/octavia/+spec/octavia-resize-loadbalancer + +This spec's goal is to describe the functionality of resizing of load +balancers. The main aim of this new feature is to enable you to change the +flavor directly from the API. + +Problem description +=================== +Today's users can't easily change the flavor. They have to recreate their load +balancer with the new flavor and migrate their configurations such as +l7 rules, listeners, etc. +This can be very tedious for a user who wants to quickly resize his load +balancer. It can be especially complicated to script. + +Proposed change +=============== +The proposed change would be to add an endpoint to allow load balancer +resizing. It would also be easy to cancel a resize in progress and return +to the previous flavor. + +To achieve this, the endpoint will launch a workflow to initiate a failover +with the new flavor ID. This will involve patching the `get_failover_LB_flow` +to add the `flavorId` parameter. At the end of the workflow the `flavor_id` +will be updated in the `loadbalancer` table. + +A check will be added before the start of the failover to prevent migration +to a flavor profile topology different from the original one. A user cannot +migrate from a flavorprofile standalone to active/passive. + +If a problem occurs during resizing, the load balancer status will be set +to ERROR. The flavor will remain the same in database, allowing the user +to perform a failover or retry the same call. + +Alternatives +------------ + * Rebuild the vm of the loadbalancer with the new flavor compute. + * Use the "backup" and "restore". + + +Data model impact +----------------- +None + + +REST API impact +--------------- +Add one endpoint in `/v2.0/lbaas/loadbalancers`. + +To run this endpoint, the user must have the role `load-balancer:write"`. + +Start a resize of a load balancer:: + + PUT /v2.0/lbaas/loadbalancers/{loadbalancer_id}/resize + + { + "new_flavor_id": "6d425a5e-429f-4848-b240-ab31c6d211e4" + } + +.. list-table:: Response code + :widths: 15 50 + :header-rows: 1 + + * - Code + - Description + * - 202 Accepted + - Resize starting + * - 400 Bad request + - Resize object is invalid + * - 401 Unauthorized + - X-Auth-Token is invalid + * - 403 Forbidden + - X-Auth-Token is valid, but the associated project does not have + the appropriate role/scope + * - 404 Not Found + - Load balancer not found + +Security impact +--------------- +None + +Notifications impact +-------------------- +Add a notification to announce a loadbalancer resize. + +Other end user impact +--------------------- +Add one command to launch resize in CLI client. + +Start a resize: openstack loadbalancer resize \ + --flavor \ + + +Add functions to resize in the `openstacksdk`. + +Performance Impact +------------------ +None + + +Other deployer impact +--------------------- +None + + +Developer impact +---------------- +None + + +Implementation +============== + +Assignee(s) +----------- +TBD + +Work Items +---------- + - Create endpoints + - Patch the `get_failover_LB_flow` to add `flavorId` parameter. + - Add unit tests + - Add API functional tests + - Add tempest tests + - Update Octavia CLI and OpenstackSDK + - Write Documentation + +Dependencies +============ +None + +Testing +======= +Tempest tests should be added for testing this new feature: + - Create a loadbalancer + - Try to resize + +Documentation Impact +==================== + - A user guide to explain how that works. + - Add a note on the fact that some flavor changes can cause data plane + downtime. Similarly, going from a newer image tag to an older one may + cause failures or features to be disabled. + +References +========== +None diff --git a/test-requirements.txt b/test-requirements.txt new file mode 100644 index 0000000000..f6c5463a06 --- /dev/null +++ b/test-requirements.txt @@ -0,0 +1,18 @@ +hacking>=6.1.0,<6.2.0 # Apache-2.0 +requests-mock>=1.2.0 # Apache-2.0 +coverage!=4.4,>=4.0 # Apache-2.0 +fixtures>=3.0.0 # Apache-2.0/BSD +flake8-import-order>=0.18.0,<0.19.0 # LGPLv3 +oslotest>=3.2.0 # Apache-2.0 +pylint>=2.5.3,<4.0.0 # GPLv2 +stestr>=2.0.0 # Apache-2.0 +testrepository>=0.0.18 # Apache-2.0/BSD +testtools>=2.2.0 # MIT +testresources>=2.0.0 # Apache-2.0/BSD +testscenarios>=0.4 # Apache-2.0/BSD +doc8>=0.6.0 # Apache-2.0 +bandit!=1.6.0,>=1.1.0 # Apache-2.0 +# Required for pep8 - doc8 tests +sphinx>=2.0.0,!=2.1.0 # BSD +bashate>=0.5.1 # Apache-2.0 +WebTest>=2.0.26 # MIT diff --git a/tools/__init__.py b/tools/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tools/check_unit_test_structure.sh b/tools/check_unit_test_structure.sh new file mode 100755 index 0000000000..c7f226f257 --- /dev/null +++ b/tools/check_unit_test_structure.sh @@ -0,0 +1,57 @@ +#!/usr/bin/env bash + +# This script identifies the unit test modules that do not correspond +# directly with a module in the code tree. See TESTING.rst for the +# intended structure. + +octavia_path=$(cd "$(dirname "$0")/.." && pwd) +base_test_path=octavia/tests/unit +test_path=$octavia_path/$base_test_path + +test_files=$(find ${test_path} -iname 'test_*.py') + +ignore_regexes=( + "^amphorae/drivers/haproxy/test_rest_api_driver_0_5.py$" + "^amphorae/drivers/haproxy/test_rest_api_driver_1_0.py$" + "^controller/worker/v2/tasks/test_database_tasks_quota.py$" +) + +error_count=0 +ignore_count=0 +total_count=0 +for test_file in ${test_files[@]}; do + relative_path=${test_file#$test_path/} + expected_path=$(dirname $octavia_path/octavia/$relative_path) + test_filename=$(basename "$test_file") + expected_filename=${test_filename#test_} + # Module filename (e.g. foo/bar.py -> foo/test_bar.py) + filename=$expected_path/$expected_filename + # Package dir (e.g. foo/ -> test_foo.py) + package_dir=${filename%.py} + if [ -d "$package_dir" ]; then + echo "Package dir: $base_test_path/$relative_path" + fi + if [ ! -f "$filename" ] && [ ! -d "$package_dir" ]; then + for ignore_regex in ${ignore_regexes[@]}; do + if [[ "$relative_path" =~ $ignore_regex ]]; then + ignore_count=$((ignore_count + 1)) + continue 2 + fi + done + echo "Unexpected test file: $base_test_path/$relative_path" + error_count=$((error_count + 1)) + fi + total_count=$((total_count + 1)) +done + +if [ "$ignore_count" -ne 0 ]; then + echo "$ignore_count unmatched test modules were ignored" +fi + +if [ "$error_count" -eq 0 ]; then + echo 'Success! All test modules match targets in the code tree.' + exit 0 +else + echo "Failure! $error_count of $total_count test modules do not match targets in the code tree." + exit 1 +fi diff --git a/tools/coding-checks.sh b/tools/coding-checks.sh new file mode 100755 index 0000000000..0750a28126 --- /dev/null +++ b/tools/coding-checks.sh @@ -0,0 +1,66 @@ +#!/bin/sh +# This script is copied from neutron and adapted for octavia. +set -eu + +usage () { + echo "Usage: $0 [OPTION]..." + echo "Run octavia's coding check(s)" + echo "" + echo " -Y, --pylint [] Run pylint check on the entire octavia module or just files changed in basecommit (e.g. HEAD~1)" + echo " -h, --help Print this usage message" + echo + exit 0 +} + +join_args() { + if [ -z "$scriptargs" ]; then + scriptargs="$opt" + else + scriptargs="$scriptargs $opt" + fi +} + +process_options () { + i=1 + while [ $i -le $# ]; do + eval opt=\$$i + case $opt in + -h|--help) usage;; + -Y|--pylint) pylint=1;; + *) join_args;; + esac + i=$((i+1)) + done +} + +run_pylint () { + local target="${scriptargs:-all}" + + if [ "$target" = "all" ]; then + files="octavia" + else + case "$target" in + *HEAD~[0-9]*) files=$(git diff --diff-filter=AM --name-only $target -- "*.py");; + *) echo "$target is an unrecognized basecommit"; exit 1;; + esac + fi + + echo "Running pylint..." + echo "You can speed this up by running it on 'HEAD~[0-9]' (e.g. HEAD~1, this change only)..." + if [ -n "${files}" ]; then + pylint -j 0 --max-nested-blocks 7 --rcfile=.pylintrc --output-format=colorized ${files} + else + echo "No python changes in this commit, pylint check not required." + exit 0 + fi +} + +scriptargs= +pylint=1 + +process_options $@ + +if [ $pylint -eq 1 ]; then + run_pylint + exit 0 +fi diff --git a/tools/create_flow_docs.py b/tools/create_flow_docs.py new file mode 100755 index 0000000000..7c7ef4bbad --- /dev/null +++ b/tools/create_flow_docs.py @@ -0,0 +1,220 @@ +#!/usr/bin/env python +# Copyright 2016 Hewlett Packard Enterprise Development Company LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import argparse +import importlib +import os +from unittest.mock import patch + +import graphviz +from taskflow import engines + +from octavia.api.drivers import utils +from octavia.common import constants +from octavia.common import rpc +from octavia.tests.common import data_model_helpers as dmh + + +def main(): + arg_parser = argparse.ArgumentParser( + description='Generate graphviz representations of the ' + 'Octavia TaskFlow flows.') + arg_parser.add_argument('-f', '--flow-list', required=True, + help='Path to flow list file') + arg_parser.add_argument('-o', '--output-directory', required=True, + help='Path to flow list file') + args = arg_parser.parse_args() + generate(args.flow_list, args.output_directory) + + +def generate(flow_list, output_directory): + # Create the diagrams + base_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), + os.path.pardir) + diagram_list = [] + with open(os.path.join(base_path, flow_list)) as flowlist: + for row in flowlist: + if row.startswith('#'): + continue + current_tuple = tuple(row.strip().split(' ')) + current_class = getattr(importlib.import_module(current_tuple[0]), + current_tuple[1]) + current_instance = current_class() + get_flow_method = getattr(current_instance, current_tuple[2]) + if (current_tuple[1] == 'AmphoraFlows' and + current_tuple[2] == 'get_failover_amphora_flow'): + amp1 = dmh.generate_amphora() + amp2 = dmh.generate_amphora() + lb = dmh.generate_load_balancer(amphorae=[amp1, amp2]) + if 'v2' in current_tuple[0]: + lb = utils.lb_dict_to_provider_dict(lb.to_dict()) + amp1 = amp1.to_dict() + current_engine = engines.load( + get_flow_method(amp1, 2)) + elif (current_tuple[1] == 'LoadBalancerFlows' and + current_tuple[2] == 'get_create_load_balancer_flow'): + class fake_notifier: + def prepare(self): + pass + rpc.NOTIFIER = fake_notifier() + rpc.TRANSPORT = "fake" + rpc.NOTIFICATION_TRANSPORT = "fake" + current_engine = engines.load( + get_flow_method( + constants.TOPOLOGY_ACTIVE_STANDBY)) + elif (current_tuple[1] == 'LoadBalancerFlows' and + current_tuple[2] == 'get_delete_load_balancer_flow'): + lb = dmh.generate_load_balancer() + if 'v2' in current_tuple[0]: + lb = utils.lb_dict_to_provider_dict(lb.to_dict()) + delete_flow = get_flow_method(lb) + else: + delete_flow, store = get_flow_method(lb) + current_engine = engines.load(delete_flow) + elif (current_tuple[1] == 'LoadBalancerFlows' and + current_tuple[2] == 'get_cascade_delete_load_balancer_flow'): + listeners = [{constants.LISTENER_ID: + '368dffc7-7440-4ee0-aca5-11052d001b05'}, + {constants.LISTENER_ID: + 'd9c45ec4-9dbe-491b-9f21-6886562348bf'}] + pools = [{constants.POOL_ID: + '6886a40b-1f2a-41a3-9ece-5c51845a7ac4'}, + {constants.POOL_ID: + '08ada7a2-3eff-42c6-bdd8-b6f2ecd73358'}] + lb = dmh.generate_load_balancer() + with patch('octavia.db.repositories.AmphoraRepository.' + 'get_amphorae_ids_on_lb', + return_value=[ + 'a9aa2b0b-0442-471e-b400-e04847e3ef1f']): + with patch('octavia.db.repositories.' + 'AmphoraMemberPortRepository.get_port_ids', + return_value=[ + '6e03e9ad-726a-46ee-90e0-1cad753ba1b0']): + if 'v2' in current_tuple[0]: + lb = utils.lb_dict_to_provider_dict(lb.to_dict()) + delete_flow = get_flow_method(lb, listeners, pools) + else: + delete_flow, store = get_flow_method(lb) + current_engine = engines.load(delete_flow) + elif (current_tuple[1] == 'LoadBalancerFlows' and + current_tuple[2] == 'get_failover_LB_flow'): + amp1 = dmh.generate_amphora() + amp2 = dmh.generate_amphora() + lb = dmh.generate_load_balancer( + amphorae=[amp1, amp2], + topology=constants.TOPOLOGY_ACTIVE_STANDBY) + if 'v2' in current_tuple[0]: + lb = utils.lb_dict_to_provider_dict(lb.to_dict()) + flavor = {constants.LOADBALANCER_TOPOLOGY: + constants.TOPOLOGY_ACTIVE_STANDBY} + lb[constants.FLAVOR] = flavor + amp1 = amp1.to_dict() + amp2 = amp2.to_dict() + current_engine = engines.load( + get_flow_method([amp1, amp2], lb)) + elif (current_tuple[1] == 'MemberFlows' and + current_tuple[2] == 'get_batch_update_members_flow'): + current_engine = engines.load( + get_flow_method([], [], [])) + else: + current_engine = engines.load(get_flow_method()) + current_engine.compile() + # We need to render svg and not dot here so we can scale + # the image in the restructured text page + src = graphviz.Source(current_engine.compilation. + execution_graph.export_to_dot()) + src.format = 'svg' + src.render(filename=current_tuple[1] + '-' + current_tuple[2], + directory=os.path.join(base_path, output_directory), + cleanup=True) + diagram_list.append((current_tuple[1], current_tuple[2])) + + # Create the class docs + diagram_list = sorted(diagram_list, key=getDiagKey) + class_tracker = None + current_doc_file = None + for doc_tuple in diagram_list: + # If we are still working on the same class, append + if doc_tuple[0] == class_tracker: + current_doc_file.write('\n') + current_doc_file.write(doc_tuple[1] + '\n') + current_doc_file.write('-' * len(doc_tuple[1]) + '\n') + current_doc_file.write('\n') + current_doc_file.write('.. only:: html\n') + current_doc_file.write('\n') + current_doc_file.write(' .. image:: ' + doc_tuple[0] + + '-' + doc_tuple[1] + '.svg\n') + current_doc_file.write(' :width: 660px\n') + current_doc_file.write(' :target: ../../../_images/' + + doc_tuple[0] + + '-' + doc_tuple[1] + '.svg\n') + current_doc_file.write('\n') + current_doc_file.write('.. only:: latex\n') + current_doc_file.write('\n') + current_doc_file.write(' .. image:: ' + doc_tuple[0] + + '-' + doc_tuple[1] + '.svg\n') + current_doc_file.write(' :width: 660px\n') + + # First or new class, create the file + else: + if current_doc_file is not None: + current_doc_file.close() + current_doc_file = open(os.path.join( + base_path, output_directory, doc_tuple[0] + '.rst'), 'w+') + class_tracker = doc_tuple[0] + + file_title = constants.FLOW_DOC_TITLES.get(doc_tuple[0], + 'Unknown Flows') + + current_doc_file.write('=' * len(file_title) + '\n') + current_doc_file.write(file_title + '\n') + current_doc_file.write('=' * len(file_title) + '\n') + current_doc_file.write('\n') + current_doc_file.write('.. contents::\n') + current_doc_file.write(' :depth: 2\n') + current_doc_file.write(' :backlinks: top\n') + current_doc_file.write('\n') + current_doc_file.write('.. only:: html\n') + current_doc_file.write('\n') + current_doc_file.write(' Click on any flow to view full size.\n') + current_doc_file.write('\n') + current_doc_file.write(doc_tuple[1] + '\n') + current_doc_file.write('-' * len(doc_tuple[1]) + '\n') + current_doc_file.write('\n') + current_doc_file.write('.. only:: html\n') + current_doc_file.write('\n') + current_doc_file.write(' .. image:: ' + doc_tuple[0] + + '-' + doc_tuple[1] + '.svg\n') + current_doc_file.write(' :width: 660px\n') + current_doc_file.write(' :target: ../../../_images/' + + doc_tuple[0] + + '-' + doc_tuple[1] + '.svg\n') + current_doc_file.write('\n') + current_doc_file.write('.. only:: latex\n') + current_doc_file.write('\n') + current_doc_file.write(' .. image:: ' + doc_tuple[0] + + '-' + doc_tuple[1] + '.svg\n') + current_doc_file.write(' :width: 660px\n') + + current_doc_file.close() + + +def getDiagKey(item): + return item[0] + '-' + item[1] + + +if __name__ == "__main__": + main() diff --git a/tools/flow-list-v2.txt b/tools/flow-list-v2.txt new file mode 100644 index 0000000000..49710ee3a6 --- /dev/null +++ b/tools/flow-list-v2.txt @@ -0,0 +1,31 @@ +# List of TaskFlow flows that should be documented +# Some flows are used by other flows, so just list the primary flows here +# Format: +# module class flow +octavia.controller.worker.v2.flows.amphora_flows AmphoraFlows get_failover_amphora_flow +octavia.controller.worker.v2.flows.amphora_flows AmphoraFlows cert_rotate_amphora_flow +octavia.controller.worker.v2.flows.load_balancer_flows LoadBalancerFlows get_create_load_balancer_flow +octavia.controller.worker.v2.flows.load_balancer_flows LoadBalancerFlows get_delete_load_balancer_flow +octavia.controller.worker.v2.flows.load_balancer_flows LoadBalancerFlows get_cascade_delete_load_balancer_flow +octavia.controller.worker.v2.flows.load_balancer_flows LoadBalancerFlows get_update_load_balancer_flow +octavia.controller.worker.v2.flows.load_balancer_flows LoadBalancerFlows get_failover_LB_flow +octavia.controller.worker.v2.flows.listener_flows ListenerFlows get_create_listener_flow +octavia.controller.worker.v2.flows.listener_flows ListenerFlows get_create_all_listeners_flow +octavia.controller.worker.v2.flows.listener_flows ListenerFlows get_delete_listener_flow +octavia.controller.worker.v2.flows.listener_flows ListenerFlows get_update_listener_flow +octavia.controller.worker.v2.flows.pool_flows PoolFlows get_create_pool_flow +octavia.controller.worker.v2.flows.pool_flows PoolFlows get_delete_pool_flow +octavia.controller.worker.v2.flows.pool_flows PoolFlows get_update_pool_flow +octavia.controller.worker.v2.flows.member_flows MemberFlows get_create_member_flow +octavia.controller.worker.v2.flows.member_flows MemberFlows get_delete_member_flow +octavia.controller.worker.v2.flows.member_flows MemberFlows get_update_member_flow +octavia.controller.worker.v2.flows.member_flows MemberFlows get_batch_update_members_flow +octavia.controller.worker.v2.flows.health_monitor_flows HealthMonitorFlows get_create_health_monitor_flow +octavia.controller.worker.v2.flows.health_monitor_flows HealthMonitorFlows get_delete_health_monitor_flow +octavia.controller.worker.v2.flows.health_monitor_flows HealthMonitorFlows get_update_health_monitor_flow +octavia.controller.worker.v2.flows.l7policy_flows L7PolicyFlows get_create_l7policy_flow +octavia.controller.worker.v2.flows.l7policy_flows L7PolicyFlows get_delete_l7policy_flow +octavia.controller.worker.v2.flows.l7policy_flows L7PolicyFlows get_update_l7policy_flow +octavia.controller.worker.v2.flows.l7rule_flows L7RuleFlows get_create_l7rule_flow +octavia.controller.worker.v2.flows.l7rule_flows L7RuleFlows get_delete_l7rule_flow +octavia.controller.worker.v2.flows.l7rule_flows L7RuleFlows get_update_l7rule_flow diff --git a/tools/misc-sanity-checks.sh b/tools/misc-sanity-checks.sh new file mode 100755 index 0000000000..3f2ab2eb8d --- /dev/null +++ b/tools/misc-sanity-checks.sh @@ -0,0 +1,42 @@ +#! /bin/sh + +# Copyright (C) 2014 VA Linux Systems Japan K.K. +# Copyright (C) 2014 YAMAMOTO Takashi +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +TMPDIR=`mktemp -d /tmp/${0##*/}.XXXXXX` || exit 1 +export TMPDIR +trap "rm -rf $TMPDIR" EXIT + +FAILURES=$TMPDIR/failures + + +check_pot_files_errors () { + find octavia -type f -regex '.*\.pot?' \ + -print0|xargs -0 -n 1 --no-run-if-empty msgfmt \ + --check-format -o /dev/null + if [ "$?" -ne 0 ]; then + echo "PO files syntax is not correct!" >>$FAILURES + fi +} + +# Add your checks here... +check_pot_files_errors + +# Fail, if there are emitted failures +if [ -f $FAILURES ]; then + cat $FAILURES + exit 1 +fi diff --git a/tools/pkcs7_to_pem.py b/tools/pkcs7_to_pem.py new file mode 100755 index 0000000000..df0515811b --- /dev/null +++ b/tools/pkcs7_to_pem.py @@ -0,0 +1,104 @@ +#!/usr/bin/python +# +# Copyright 2016 IBM. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Converts a PKCS7 certificate bundle in DER or PEM format into +# a sequence of PEM-encoded certificates. + +import base64 +import sys + +from cryptography.hazmat import backends +from cryptography.hazmat.primitives import serialization +from cryptography import x509 +from pyasn1.codec.der import decoder as der_decoder +from pyasn1.codec.der import encoder as der_encoder +from pyasn1_modules import rfc2315 + + +PKCS7_BEG = """-----BEGIN PKCS7-----""" +PKCS7_END = """-----END PKCS7-----""" + + +# Based on pyasn1-modules.pem.readPemBlocksFromFile, but eliminates the need +# to operate on a file handle. +def _read_pem_blocks(data, *markers): + stSpam, stHam, stDump = 0, 1, 2 + + startMarkers = dict(map(lambda x: (x[1], x[0]), + enumerate(map(lambda x: x[0], markers)))) + stopMarkers = dict(map(lambda x: (x[1], x[0]), + enumerate(map(lambda x: x[1], markers)))) + idx = -1 + state = stSpam + data = data.decode('utf-8') + for certLine in data.replace('\r', '').split('\n'): + if not certLine: + break + certLine = certLine.strip() + if state == stSpam: + if certLine in startMarkers: + certLines = [] + idx = startMarkers[certLine] + state = stHam + continue + if state == stHam: + if certLine in stopMarkers and stopMarkers[certLine] == idx: + state = stDump + else: + certLines.append(certLine) + if state == stDump: + yield b''.join([ + base64.b64decode(x) for x in certLines]) + state = stSpam + + +def _process_pkcs7_substrate(substrate): + contentInfo, _ = der_decoder.decode(substrate, + asn1Spec=rfc2315.ContentInfo()) + + contentType = contentInfo.getComponentByName('contentType') + + if contentType != rfc2315.signedData: + raise Exception + + content, _ = der_decoder.decode( + contentInfo.getComponentByName('content'), + asn1Spec=rfc2315.SignedData()) + + for blob in content.getComponentByName('certificates'): + cert = x509.load_der_x509_certificate(der_encoder.encode(blob), + backends.default_backend()) + print(cert.public_bytes( + encoding=serialization.Encoding.PEM).decode( + 'unicode_escape'), end='') + + +# Main program code +if len(sys.argv) != 1: + print('Usage: cat | %s' % sys.argv[0]) + sys.exit(-1) + +# Need to read in binary bytes in case DER encoding of PKCS7 bundle +data = sys.stdin.buffer.read() + +# Look for PEM encoding +if PKCS7_BEG in str(data): + for substrate in _read_pem_blocks(data, (PKCS7_BEG, PKCS7_END)): + _process_pkcs7_substrate(substrate) + +# If no PEM encoding, assume this is DER encoded and try to decode +else: + _process_pkcs7_substrate(data) diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000000..50ac1e3585 --- /dev/null +++ b/tox.ini @@ -0,0 +1,214 @@ +[tox] +minversion = 3.18.0 +envlist = docs,py3,functional-py3,pep8,specs,pip-missing-reqs,pip-extra-reqs +ignore_basepython_conflict = True + +[testenv] +usedevelop = True +setenv = + PYTHONWARNINGS=always::DeprecationWarning +install_command = + pip install {opts} {packages} +allowlist_externals = find +deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} + -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = + stestr run {posargs} + stestr slowest + +[testenv:api-ref] +# This environment is called from CI scripts to test and publish +# the API Ref to docs.openstack.org. +deps = + -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} + -r{toxinidir}/requirements.txt + -r{toxinidir}/doc/requirements.txt +allowlist_externals = rm +commands = + rm -rf api-ref/build + sphinx-build -W -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html + +[testenv:cover] +allowlist_externals = sh +setenv = + {[testenv]setenv} + PYTHON=coverage run --source octavia --parallel-mode +commands = + coverage erase + sh -c 'OS_TEST_PATH={toxinidir}/octavia/tests/unit stestr run {posargs}' + sh -c 'OS_TEST_PATH={toxinidir}/octavia/tests/functional stestr run {posargs}' + coverage combine + # Generate a new HTML report with the combined results + # otherwise the HTML report will only show partial results + coverage html -d cover + coverage xml -o cover/coverage.xml + coverage report --fail-under=92 --skip-covered + +[testenv:py3] +setenv = OS_TEST_PATH={toxinidir}/octavia/tests/unit + PYTHONWARNINGS=always::DeprecationWarning + +[testenv:functional] +# This will use whatever 'basepython' is set to, so the name is ambiguous. +setenv = OS_TEST_PATH={toxinidir}/octavia/tests/functional + PYTHONWARNINGS=always::DeprecationWarning + +[testenv:functional-py3] +setenv = OS_TEST_PATH={toxinidir}/octavia/tests/functional + +[testenv:functional-py39] +setenv = OS_TEST_PATH={toxinidir}/octavia/tests/functional + +[testenv:functional-py310] +setenv = OS_TEST_PATH={toxinidir}/octavia/tests/functional + +[testenv:functional-py311] +setenv = OS_TEST_PATH={toxinidir}/octavia/tests/functional + +[testenv:functional-py312] +setenv = OS_TEST_PATH={toxinidir}/octavia/tests/functional + +[testenv:debug] +commands = oslo_debug_helper {posargs} + +[testenv:pep8] +commands = flake8 + # RST linter + doc8 --ignore-path doc/source/contributor/modules specs \ + doc/source octavia CONSTITUTION.rst HACKING.rst README.rst \ + TESTING.rst + # Run security linter + {[testenv:bandit]commands} + # Make sure specs follow our template + find . -type f -name "*.pyc" -delete + python -m unittest specs-tests.test_titles + sh ./tools/misc-sanity-checks.sh + {toxinidir}/tools/coding-checks.sh --pylint {posargs} + {toxinidir}/tools/check_unit_test_structure.sh + {[testenv:bashate]commands} +allowlist_externals = + sh + find + bash + {toxinidir}/tools/coding-checks.sh + {toxinidir}/tools/check_unit_test_structure.sh + +[testenv:docs] +deps = + -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} + -r{toxinidir}/requirements.txt + -r{toxinidir}/doc/requirements.txt +allowlist_externals = rm +commands = + rm -rf doc/build api-guide/build api-ref/build doc/source/contributor/modules + sphinx-build -W -b html doc/source doc/build/html + sphinx-build -W -b html api-ref/source api-ref/build/html + +[testenv:pdf-docs] +deps = {[testenv:docs]deps} +allowlist_externals = + make + rm +commands = + rm -rf doc/build/pdf + sphinx-build -W -b latex doc/source doc/build/pdf + make -C doc/build/pdf + +[testenv:venv] +commands = {posargs} + +[testenv:genconfig] +allowlist_externals = mkdir +commands = + mkdir -p etc/octavia + oslo-config-generator \ + --config-file etc/config/octavia-config-generator.conf + +[testenv:genpolicy] +allowlist_externals = mkdir +commands = + mkdir -p etc/octavia + oslopolicy-sample-generator \ + --config-file etc/policy/octavia-policy-generator.conf + +[testenv:specs] +allowlist_externals = + rm + find +commands = + find . -type f -name "*.pyc" -delete + python -m unittest specs-tests.test_titles + + +[testenv:bandit] +commands = bandit -r octavia -ll -ii -x tests + +[flake8] +# [H104]: Empty file with only comments +# [W504]: Line break after binary operator +# [I202]: Additional newline in a group of imports. +ignore = H104,W504,I202 +show-source = true +builtins = _ +exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build +import-order-style = pep8 +# [H106]: Don't put vim configuration in source files +# [H203]: Use assertIs(Not)None to check for None +# [H204]: Use assert(Not)Equal to check for equality +# [H205]: Use assert(Greater|Less)(Equal) for comparison +# [H904]: Delay string interpolations at logging calls +enable-extensions=H106,H203,H204,H205,H904 + +[testenv:bashate] +commands = bash -c "find {toxinidir} \ + -not \( -type d -name .tox\* -prune \) \ + -not \( -type d -name .venv\* -prune \) \ + -type f \ + -name \*.sh \ +# [E005]: File does not begin with #! or have a .sh prefix +# [E006]: Check for lines longer than 79 columns +# [E042]: Local declaration hides errors +# [E043]: Arithmetic compound has inconsistent return semantics + -print0 | xargs -0 bashate -v -iE006 -eE005,E042,E043" + +[hacking] +import_exceptions = octavia.i18n + +[flake8:local-plugins] +extension = + O323 = checks:assert_equal_true_or_false + O324 = checks:no_mutable_default_args + O339 = checks:no_log_warn + O341 = checks:no_translate_logs + O342 = checks:check_raised_localized_exceptions + O345 = checks:check_no_eventlet_imports + O346 = checks:check_line_continuation_no_backslash + O347 = checks:revert_must_have_kwargs + O348 = checks:check_no_logging_imports +paths = + ./octavia/hacking + +[doc8] +max-line-length = 79 + +[testenv:releasenotes] +deps = + -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} + -r{toxinidir}/doc/requirements.txt +allowlist_externals = rm +commands = + rm -rf releasenotes/build + sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html + +[testenv:pip-missing-reqs] +deps = pip-check-reqs>=2.3.2 + -r{toxinidir}/requirements.txt +commands = + pip-missing-reqs --ignore-file=octavia/tests/* octavia + +[testenv:pip-extra-reqs] +deps = pip-check-reqs>=2.3.2 + -r{toxinidir}/requirements.txt +commands = + pip-extra-reqs --ignore-file=octavia/tests/* octavia diff --git a/zuul.d/amphorav2-jobs.yaml b/zuul.d/amphorav2-jobs.yaml new file mode 100644 index 0000000000..421578bdb4 --- /dev/null +++ b/zuul.d/amphorav2-jobs.yaml @@ -0,0 +1,128 @@ +- job: + name: octavia-v2-dsvm-scenario-amphora-v2 + parent: octavia-v2-dsvm-scenario + vars: + devstack_localrc: + OCTAVIA_ENABLE_AMPHORAV2_JOBBOARD: True + devstack_local_conf: + post-config: + $OCTAVIA_CONF: + api_settings: + default_provider_driver: amphorav2 + enabled_provider_drivers: amphorav2:The v2 amphora driver. + task_flow: + jobboard_expiration_time: 100 + test-config: + "$TEMPEST_CONFIG": + load_balancer: + enabled_provider_drivers: amphorav2:The v2 amphora driver. + provider: amphorav2 + +- job: + name: octavia-v2-dsvm-noop-api-amphora-v2 + parent: octavia-v2-dsvm-noop-api + vars: + devstack_localrc: + OCTAVIA_ENABLE_AMPHORAV2_JOBBOARD: True + devstack_local_conf: + post-config: + $OCTAVIA_CONF: + api_settings: + default_provider_driver: amphorav2 + enabled_provider_drivers: amphorav2:The v2 amphora driver. + task_flow: + jobboard_expiration_time: 100 + test-config: + "$TEMPEST_CONFIG": + load_balancer: + enabled_provider_drivers: amphorav2:The v2 amphora driver. + provider: amphorav2 + +- job: + name: octavia-v2-dsvm-tls-barbican-amphora-v2 + parent: octavia-v2-dsvm-tls-barbican + vars: + devstack_localrc: + OCTAVIA_ENABLE_AMPHORAV2_JOBBOARD: True + devstack_local_conf: + post-config: + $OCTAVIA_CONF: + api_settings: + default_provider_driver: amphorav2 + enabled_provider_drivers: amphorav2:The v2 amphora driver. + task_flow: + jobboard_expiration_time: 100 + test-config: + "$TEMPEST_CONFIG": + load_balancer: + enabled_provider_drivers: amphorav2:The v2 amphora driver. + provider: amphorav2 + +- job: + name: octavia-v2-act-stdby-dsvm-scenario-amphora-v2 + parent: octavia-v2-act-stdby-dsvm-scenario + vars: + devstack_localrc: + OCTAVIA_ENABLE_AMPHORAV2_JOBBOARD: True + devstack_local_conf: + post-config: + $OCTAVIA_CONF: + api_settings: + default_provider_driver: amphorav2 + enabled_provider_drivers: amphorav2:The v2 amphora driver. + task_flow: + jobboard_expiration_time: 100 + test-config: + "$TEMPEST_CONFIG": + load_balancer: + enabled_provider_drivers: amphorav2:The v2 amphora driver. + provider: amphorav2 + +- job: + name: octavia-grenade-amphora-v2 + parent: octavia-grenade + vars: + grenade_devstack_localrc: + new: + OCTAVIA_ENABLE_AMPHORAV2_JOBBOARD: True + OCTAVIA_JOBBOARD_EXPIRATION_TIME: 100 + +- job: + name: octavia-v2-dsvm-cinder-amphora-v2 + parent: octavia-v2-dsvm-cinder-amphora + vars: + devstack_localrc: + OCTAVIA_ENABLE_AMPHORAV2_JOBBOARD: True + devstack_local_conf: + post-config: + $OCTAVIA_CONF: + api_settings: + default_provider_driver: amphorav2 + enabled_provider_drivers: amphorav2:The v2 amphora driver. + task_flow: + jobboard_expiration_time: 100 + test-config: + "$TEMPEST_CONFIG": + load_balancer: + enabled_provider_drivers: amphorav2:The v2 amphora driver. + provider: amphorav2 + +- job: + name: octavia-v2-dsvm-scenario-two-node-amphora-v2 + parent: octavia-v2-dsvm-scenario-two-node + vars: + devstack_localrc: + OCTAVIA_ENABLE_AMPHORAV2_JOBBOARD: True + devstack_local_conf: + post-config: + $OCTAVIA_CONF: + api_settings: + default_provider_driver: amphorav2 + enabled_provider_drivers: amphorav2:The v2 amphora driver. + task_flow: + jobboard_expiration_time: 100 + test-config: + "$TEMPEST_CONFIG": + load_balancer: + enabled_provider_drivers: amphorav2:The v2 amphora driver. + provider: amphorav2 \ No newline at end of file diff --git a/zuul.d/jobs.yaml b/zuul.d/jobs.yaml new file mode 100644 index 0000000000..72ddd2e988 --- /dev/null +++ b/zuul.d/jobs.yaml @@ -0,0 +1,245 @@ +- job: + name: publish-openstack-octavia-amphora-image + parent: publish-openstack-artifacts + run: playbooks/image-build/run.yaml + post-run: playbooks/image-build/post.yaml + required-projects: + - openstack/diskimage-builder + - openstack/octavia + - openstack/octavia-lib + +- job: + name: publish-openstack-octavia-amphora-image-noble + parent: publish-openstack-octavia-amphora-image + nodeset: ubuntu-noble + description: | + Publish Ubuntu Noble (24.04) based amphora image to tarballs.o.o. + vars: + amphora_os: ubuntu + amphora_os_release: noble + +- job: + name: publish-openstack-octavia-amphora-image-centos-9-stream + nodeset: centos-9-stream + parent: publish-openstack-octavia-amphora-image + description: | + Publish CentOS 9 Stream based amphora image to tarballs.o.o. + vars: + amphora_os: centos + amphora_os_release: 9-stream + +- job: + name: octavia-grenade + parent: grenade + nodeset: octavia-single-node-ubuntu-noble + required-projects: &o-grenade-required-projects + - opendev.org/openstack/grenade + - opendev.org/openstack/octavia + - opendev.org/openstack/octavia-lib + - opendev.org/openstack/octavia-tempest-plugin + - opendev.org/openstack/python-octaviaclient + vars: &o-grenade-vars + grenade_devstack_localrc: + shared: + DIB_LOCAL_ELEMENTS: openstack-ci-mirrors + LIBVIRT_TYPE: kvm + LIBVIRT_CPU_MODE: host-passthrough + devstack_local_conf: + test-config: + "$TEMPEST_CONFIG": + load_balancer: + check_interval: 1 + check_timeout: 180 + devstack_plugins: + neutron: https://opendev.org/openstack/neutron.git + octavia: https://opendev.org/openstack/octavia.git + octavia-tempest-plugin: https://opendev.org/openstack/octavia-tempest-plugin.git + devstack_services: + s-account: false + s-container: false + s-object: false + s-proxy: false + c-api: false + c-bak: false + c-vol: false + cinder: false + octavia: true + o-api: true + o-cw: true + o-hm: true + o-hk: true + tempest_plugins: + - octavia-tempest-plugin + tempest_test_regex: ^octavia_tempest_plugin.*\[.*\bsmoke\b.*\] + tox_envlist: all + zuul_copy_output: + '/var/log/dib-build/': 'logs' + '/var/log/octavia/octavia-tenant-traffic.log': 'logs' + '/var/log/octavia/octavia-amphora.log': 'logs' + +- job: + name: octavia-grenade-skip-level + description: | + Grenade job that skips a release, validating that deployers can skip + specific releases as prescribed by our process. + parent: grenade-skip-level + nodeset: octavia-single-node-ubuntu-noble + required-projects: *o-grenade-required-projects + vars: *o-grenade-vars + +- job: + name: octavia-v2-dsvm-scenario-jobboard + parent: octavia-v2-dsvm-scenario + vars: + devstack_localrc: + OCTAVIA_ENABLE_AMPHORAV2_JOBBOARD: True + +- job: + name: octavia-v2-dsvm-scenario-traffic-ops-jobboard + parent: octavia-v2-dsvm-scenario-traffic-ops + vars: + devstack_localrc: + OCTAVIA_ENABLE_AMPHORAV2_JOBBOARD: True + +- job: + name: octavia-v2-dsvm-scenario-non-traffic-ops-jobboard + parent: octavia-v2-dsvm-scenario-non-traffic-ops + vars: + devstack_localrc: + OCTAVIA_ENABLE_AMPHORAV2_JOBBOARD: True + +- job: + name: octavia-v2-dsvm-scenario-traffic-ops-jobboard-etcd + parent: octavia-v2-dsvm-scenario-traffic-ops + vars: + devstack_localrc: + OCTAVIA_ENABLE_AMPHORAV2_JOBBOARD: True + OCTAVIA_JOBBOARD_BACKEND: etcd + required-projects: + - openstack/taskflow + +- job: + name: octavia-v2-dsvm-scenario-non-traffic-ops-jobboard-etcd + parent: octavia-v2-dsvm-scenario-non-traffic-ops + vars: + devstack_localrc: + OCTAVIA_ENABLE_AMPHORAV2_JOBBOARD: True + OCTAVIA_JOBBOARD_BACKEND: etcd + required-projects: + - openstack/taskflow + +- project-template: + name: octavia-tox-tips + check: + jobs: + - octavia-tox-py311-tips + - octavia-tox-functional-py311-tips + +- job: + name: octavia-tox-py311-tips + parent: openstack-tox-py311 + description: | + Run tox python 3.11 unit tests against master of related libraries. + vars: + tox_install_siblings: true + zuul_work_dir: src/opendev.org/openstack/octavia + required-projects: + - openstack/octavia-lib + - openstack/octavia + +- job: + name: octavia-tox-functional-py311-tips + parent: openstack-tox-functional-py311 + description: | + Run tox python 3.11 functional against master of related libraries. + vars: + tox_install_siblings: true + zuul_work_dir: src/opendev.org/openstack/octavia + required-projects: + - openstack/octavia-lib + - openstack/octavia + +- job: + name: octavia-amphora-image-build + parent: base + description: | + Builds the amphora image using the released diskimage-builder version, + not Git master. This job does not publish the image. + run: playbooks/image-build/run.yaml + required-projects: + - openstack/diskimage-builder + - openstack/octavia + - openstack/octavia-lib + vars: + amphora_os: ubuntu + amphora_os_release: noble + +- job: + name: octavia-amphora-image-build-live-noble + parent: octavia-amphora-image-build + description: | + Builds an Ubuntu Noble amphora image using diskimage-builder from Git + master. This job does not publish the image. + vars: + amphora_os: ubuntu + amphora_os_release: noble + +- job: + name: octavia-amphora-image-build-live-centos-9-stream + parent: octavia-amphora-image-build + nodeset: centos-9-stream + description: | + Builds a CentOS 9 Stream amphora image using diskimage-builder from Git + master. This job does not publish the image. + vars: + amphora_os: centos + amphora_os_release: 9-stream + +- job: + name: octavia-amphora-image-build-live-rocky-9 + parent: octavia-amphora-image-build + nodeset: centos-9-stream + description: | + Builds a Rocky Linux 9 amphora image using diskimage-builder from Git + master. This job does not publish the image. + vars: + amphora_os: rocky + amphora_os_release: 9 + +- job: + name: octavia-v2-dsvm-scenario-nftables + parent: octavia-v2-dsvm-scenario + vars: + devstack_localrc: + OCTAVIA_AMP_USE_NFTABLES: True + +- job: + name: openstack-tox-pip-check-reqs + parent: openstack-tox + description: | + Run pip-missing-reqs and pip-extra-reqs tests to check for missing or + extra requirements.txt entries. + + Uses tox with the ``pip-missing-reqs`` and ``pip-extra-reqs`` + environments. + required-projects: + - openstack/octavia + vars: + # TODO (johnsom) Add pip-extra-reqs to this list when fixed + tox_envlist: pip-missing-reqs + +- job: + name: octavia-v2-dsvm-scenario-fips + parent: octavia-v2-dsvm-scenario + nodeset: octavia-single-node-centos-9-stream + description: | + Functional testing for a FIPS enabled Centos 9 system. + pre-run: playbooks/enable-fips.yaml + timeout: 10800 + vars: + nslookup_target: 'opendev.org' + devstack_localrc: + OCTAVIA_AMP_BASE_OS: centos + OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID: 9-stream + OCTAVIA_AMP_IMAGE_SIZE: 3 + OCTAVIA_AMP_ENABLE_FIPS: True diff --git a/zuul.d/projects.yaml b/zuul.d/projects.yaml new file mode 100644 index 0000000000..7c5269adaf --- /dev/null +++ b/zuul.d/projects.yaml @@ -0,0 +1,129 @@ +# Note: Some official OpenStack wide jobs are still defined in the +# project-config repository +- project: + templates: + - check-requirements + - periodic-stable-jobs-neutron + - openstack-cover-jobs + - openstack-python3-jobs + - publish-openstack-docs-pti + - release-notes-jobs-python3 + - octavia-tox-tips + check: + jobs: + - openstack-tox-pip-check-reqs: + irrelevant-files: + - ^.*\.rst$ + - ^api-ref/.*$ + - ^doc/.*$ + - ^etc/.*$ + - ^octavia/tests/.*$ + - ^releasenotes/.*$ + - ^\.pre-commit-config\.yaml$ + - openstack-tox-functional-py310: + irrelevant-files: &tox-func-irrelevant-files + - ^.*\.rst$ + - ^api-ref/.*$ + - ^doc/.*$ + - ^etc/.*$ + - ^octavia/tests/unit/.*$ + - ^releasenotes/.*$ + - ^\.pre-commit-config\.yaml$ + - openstack-tox-functional-py311: + irrelevant-files: *tox-func-irrelevant-files + # Undefined yet: +# - openstack-tox-functional-py312: +# irrelevant-files: *tox-irrelevant-files +# voting: false + - octavia-v2-dsvm-noop-api: + irrelevant-files: &irrelevant-files + - ^.*\.rst$ + - ^api-ref/.*$ + - ^doc/.*$ + - ^octavia/tests/.*$ + - ^releasenotes/.*$ + - ^\.pre-commit-config\.yaml$ + - octavia-v2-dsvm-scenario-traffic-ops: + irrelevant-files: *irrelevant-files + - octavia-v2-dsvm-scenario-non-traffic-ops: + irrelevant-files: *irrelevant-files + - octavia-v2-dsvm-scenario-traffic-ops-jobboard: + irrelevant-files: *irrelevant-files + - octavia-v2-dsvm-scenario-non-traffic-ops-jobboard: + irrelevant-files: *irrelevant-files + - octavia-v2-dsvm-tls-barbican: + irrelevant-files: *irrelevant-files + - octavia-grenade: + irrelevant-files: &grenade-irrelevant-files + - ^.*\.rst$ + - ^api-ref/.*$ + - ^doc/.*$ + - ^octavia/tests/.*$ + - ^releasenotes/.*$ + - ^setup.cfg$ + - ^tools/.*$ + - ^(test-|)requirements.txt$ + - ^tox.ini$ + - ^\.pre-commit-config\.yaml$ + - octavia-grenade-skip-level: + irrelevant-files: *grenade-irrelevant-files + voting: false + - octavia-v2-act-stdby-dsvm-scenario: + irrelevant-files: *irrelevant-files + voting: false + - octavia-v2-dsvm-cinder-amphora: + irrelevant-files: *irrelevant-files + voting: false + - octavia-v2-dsvm-scenario-two-node: + irrelevant-files: *irrelevant-files + voting: false + - octavia-v2-dsvm-scenario-ipv6-only: + irrelevant-files: *irrelevant-files + voting: false + queue: octavia + gate: + fail-fast: true + jobs: + - openstack-tox-pip-check-reqs: + irrelevant-files: + - ^.*\.rst$ + - ^api-ref/.*$ + - ^doc/.*$ + - ^etc/.*$ + - ^octavia/tests/.*$ + - ^releasenotes/.*$ + - ^\.pre-commit-config\.yaml$ + - openstack-tox-functional-py311: + irrelevant-files: + - ^.*\.rst$ + - ^api-ref/.*$ + - ^doc/.*$ + - ^etc/.*$ + - ^octavia/tests/unit/.*$ + - ^releasenotes/.*$ + - ^\.pre-commit-config\.yaml$ + - octavia-v2-dsvm-noop-api + - octavia-v2-dsvm-scenario-traffic-ops + - octavia-v2-dsvm-scenario-non-traffic-ops + - octavia-v2-dsvm-tls-barbican + - octavia-grenade + #- octavia-grenade-skip-level + periodic: + jobs: + - publish-openstack-octavia-amphora-image-noble: + branches: + regex: ^stable/.*$ + negate: true + - publish-openstack-octavia-amphora-image-centos-9-stream: + branches: + regex: ^stable/.*$ + negate: true + - octavia-amphora-image-build +# Putting octavia-v2-dsvm-scenario-fips in periodic as centos 8 is too slow + - octavia-v2-dsvm-scenario-fips: + voting: false + experimental: + jobs: + - octavia-v2-dsvm-scenario-nftables + - octavia-v2-dsvm-scenario-traffic-ops-jobboard-etcd + - octavia-v2-dsvm-scenario-non-traffic-ops-jobboard-etcd