From fb259e1477997c8b16b8f8a09c410e7a506fd39c Mon Sep 17 00:00:00 2001 From: Fabian von Feilitzsch Date: Fri, 13 Mar 2020 15:04:04 -0400 Subject: [PATCH 01/90] Prevent 503s from killing the client during discovery --- dynamic/discovery.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/dynamic/discovery.py b/dynamic/discovery.py index 9468a274..24d48d81 100644 --- a/dynamic/discovery.py +++ b/dynamic/discovery.py @@ -23,7 +23,7 @@ from urllib3.exceptions import ProtocolError, MaxRetryError from kubernetes import __version__ -from .exceptions import NotFoundError, ResourceNotFoundError, ResourceNotUniqueError, ApiException +from .exceptions import NotFoundError, ResourceNotFoundError, ResourceNotUniqueError, ApiException, ServiceUnavailableError from .resource import Resource, ResourceList @@ -155,7 +155,10 @@ def get_resources_for_api_version(self, prefix, group, version, preferred): subresources = {} path = '/'.join(filter(None, [prefix, group, version])) - resources_response = self.client.request('GET', path).resources or [] + try: + resources_response = self.client.request('GET', path).resources or [] + except ServiceUnavailableError: + resources_response = [] resources_raw = list(filter(lambda resource: '/' not in resource['name'], resources_response)) subresources_raw = list(filter(lambda resource: '/' in resource['name'], resources_response)) @@ -251,13 +254,11 @@ def __search(self, parts, resources, reqParams): # Check if we've requested resources for this group if not resourcePart.resources: prefix, group, version = reqParams[0], reqParams[1], part - try: - resourcePart.resources = self.get_resources_for_api_version(prefix, - group, part, resourcePart.preferred) - except NotFoundError: - raise ResourceNotFoundError + resourcePart.resources = self.get_resources_for_api_version( + prefix, group, part, resourcePart.preferred) + self._cache['resources'][prefix][group][version] = resourcePart - self.__update_cache=True + self.__update_cache = True return self.__search(parts[1:], resourcePart.resources, reqParams) elif isinstance(resourcePart, dict): # In this case parts [0] will be a specified prefix, group, version From ab515103d8f33d80e1b0e6c2a995d686bee66445 Mon Sep 17 00:00:00 2001 From: vishnu667 Date: Thu, 14 May 2020 07:44:54 +0530 Subject: [PATCH 02/90] Adding ability to pass kube_config as a dict. --- config/kube_config.py | 57 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 54 insertions(+), 3 deletions(-) diff --git a/config/kube_config.py b/config/kube_config.py index 43676728..f82265c0 100644 --- a/config/kube_config.py +++ b/config/kube_config.py @@ -706,13 +706,34 @@ def _get_kube_config_loader_for_yaml_file( config_base_path=None, **kwargs) +def _get_kube_config_loader( + filename=None,config_dict=None, persist_config=False, **kwargs): + + if (config_dict is None): + kcfg = KubeConfigMerger(filename) + if persist_config and 'config_persister' not in kwargs: + kwargs['config_persister'] = kcfg.save_changes + + if kcfg.config is None: + raise ConfigException( + 'Invalid kube-config file. ' + 'No configuration found.') + return KubeConfigLoader( + config_dict=kcfg.config, + config_base_path=None, + **kwargs) + else: + return KubeConfigLoader( + config_dict=config_dict, + config_base_path=None, + **kwargs) def list_kube_config_contexts(config_file=None): if config_file is None: config_file = KUBE_CONFIG_DEFAULT_LOCATION - loader = _get_kube_config_loader_for_yaml_file(config_file) + loader = _get_kube_config_loader(filename=config_file) return loader.list_contexts(), loader.current_context @@ -734,8 +755,8 @@ def load_kube_config(config_file=None, context=None, if config_file is None: config_file = KUBE_CONFIG_DEFAULT_LOCATION - loader = _get_kube_config_loader_for_yaml_file( - config_file, active_context=context, + loader = _get_kube_config_loader( + filename=config_file, active_context=context, persist_config=persist_config) if client_configuration is None: @@ -745,6 +766,36 @@ def load_kube_config(config_file=None, context=None, else: loader.load_and_set(client_configuration) +def load_kube_config_from_dict(config_dict, context=None, + client_configuration=None, + persist_config=True): + """Loads authentication and cluster information from kube-config file + and stores them in kubernetes.client.configuration. + + :param config_dict: Takes the config file as a dict. + :param context: set the active context. If is set to None, current_context + from config file will be used. + :param client_configuration: The kubernetes.client.Configuration to + set configs to. + :param persist_config: If True, config file will be updated when changed + (e.g GCP token refresh). + """ + + if config_dict is None: + raise ConfigException( + 'Invalid kube-config dict. ' + 'No configuration found.') + + loader = _get_kube_config_loader( + config_dict=config_dict, active_context=context, + persist_config=persist_config) + + if client_configuration is None: + config = type.__call__(Configuration) + loader.load_and_set(config) + Configuration.set_default(config) + else: + loader.load_and_set(client_configuration) def new_client_from_config( config_file=None, From 27a1b811d4e2e4e68c049d090c3298cb9e545751 Mon Sep 17 00:00:00 2001 From: vishnu667 Date: Thu, 14 May 2020 16:13:47 +0530 Subject: [PATCH 03/90] Re-using the _get_kube_config_loader in _get_kube_config_loader_for_yaml_file --- config/kube_config.py | 25 ++++++++----------------- 1 file changed, 8 insertions(+), 17 deletions(-) diff --git a/config/kube_config.py b/config/kube_config.py index f82265c0..023ace74 100644 --- a/config/kube_config.py +++ b/config/kube_config.py @@ -688,28 +688,19 @@ def save_config(self, path): yaml.safe_dump(self.config_files[path], f, default_flow_style=False) - def _get_kube_config_loader_for_yaml_file( filename, persist_config=False, **kwargs): - - kcfg = KubeConfigMerger(filename) - if persist_config and 'config_persister' not in kwargs: - kwargs['config_persister'] = kcfg.save_changes - - if kcfg.config is None: - raise ConfigException( - 'Invalid kube-config file. ' - 'No configuration found.') - - return KubeConfigLoader( - config_dict=kcfg.config, - config_base_path=None, + return _get_kube_config_loader( + filename=filename, + persist_config=persist_config, **kwargs) def _get_kube_config_loader( - filename=None,config_dict=None, persist_config=False, **kwargs): - - if (config_dict is None): + filename=None, + config_dict=None, + persist_config=False, + **kwargs): + if config_dict is None: kcfg = KubeConfigMerger(filename) if persist_config and 'config_persister' not in kwargs: kwargs['config_persister'] = kcfg.save_changes From e92495f0d1e2a108dcdd562900a4f7fa5ee1f5fe Mon Sep 17 00:00:00 2001 From: vishnu667 Date: Fri, 15 May 2020 20:16:04 +0530 Subject: [PATCH 04/90] Adding test Cases --- config/kube_config_test.py | 45 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/config/kube_config_test.py b/config/kube_config_test.py index c8a4c93b..c8dce3b4 100644 --- a/config/kube_config_test.py +++ b/config/kube_config_test.py @@ -34,7 +34,9 @@ KubeConfigMerger, _cleanup_temp_files, _create_temp_file_with_content, _get_kube_config_loader_for_yaml_file, + _get_kube_config_loader, list_kube_config_contexts, load_kube_config, + load_kube_config_from_dict, new_client_from_config) BEARER_TOKEN_FORMAT = "Bearer %s" @@ -1229,6 +1231,16 @@ def test_load_kube_config(self): client_configuration=actual) self.assertEqual(expected, actual) + def test_load_kube_config_from_dict(self): + expected = FakeConfig(host=TEST_HOST, + token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64) + + actual = FakeConfig() + load_kube_config_from_dict(config_dict=self.TEST_KUBE_CONFIG, + context="simple_token", + client_configuration=actual) + self.assertEqual(expected, actual) + def test_list_kube_config_contexts(self): config_file = self._create_temp_file( yaml.safe_dump(self.TEST_KUBE_CONFIG)) @@ -1344,6 +1356,39 @@ def test__get_kube_config_loader_for_yaml_file_persist(self): self.assertTrue(callable(actual._config_persister)) self.assertEquals(actual._config_persister.__name__, "save_changes") + def test__get_kube_config_loader_file_no_persist(self): + expected = FakeConfig(host=TEST_HOST, + token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64) + config_file = self._create_temp_file( + yaml.safe_dump(self.TEST_KUBE_CONFIG)) + actual = _get_kube_config_loader(filename=config_file) + self.assertIsNone(actual._config_persister) + + def test__get_kube_config_loader_file_persist(self): + expected = FakeConfig(host=TEST_HOST, + token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64) + config_file = self._create_temp_file( + yaml.safe_dump(self.TEST_KUBE_CONFIG)) + actual = _get_kube_config_loader(filename=config_file, + persist_config=True) + self.assertTrue(callable(actual._config_persister)) + self.assertEquals(actual._config_persister.__name__, "save_changes") + + def test__get_kube_config_loader_dict_no_persist(self): + expected = FakeConfig(host=TEST_HOST, + token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64) + actual = _get_kube_config_loader_for_yaml_file(config_dict=self.TEST_KUBE_CONFIG) + self.assertIsNone(actual._config_persister) + + def test__get_kube_config_loader_dict_persist(self): + expected = FakeConfig(host=TEST_HOST, + token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64) + actual = _get_kube_config_loader(config_dict=self.TEST_KUBE_CONFIG, + persist_config=True) + self.assertTrue(callable(actual._config_persister)) + self.assertEquals(actual._config_persister.__name__, "save_changes") + + class TestKubernetesClientConfiguration(BaseTestCase): # Verifies properties of kubernetes.client.Configuration. From c6e8194127009b19a95b1ba9b67820d748df9fbf Mon Sep 17 00:00:00 2001 From: vishnu667 Date: Fri, 15 May 2020 22:14:22 +0530 Subject: [PATCH 05/90] Adding config to init file and indentation fixes --- config/__init__.py | 2 +- config/kube_config_test.py | 17 ++++++++--------- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/config/__init__.py b/config/__init__.py index 3476ff71..83bd581c 100644 --- a/config/__init__.py +++ b/config/__init__.py @@ -15,4 +15,4 @@ from .config_exception import ConfigException from .incluster_config import load_incluster_config from .kube_config import (list_kube_config_contexts, load_kube_config, - new_client_from_config) + new_client_from_config, load_kube_config_from_dict) diff --git a/config/kube_config_test.py b/config/kube_config_test.py index c8dce3b4..bc855e1a 100644 --- a/config/kube_config_test.py +++ b/config/kube_config_test.py @@ -33,11 +33,10 @@ ConfigNode, FileOrData, KubeConfigLoader, KubeConfigMerger, _cleanup_temp_files, _create_temp_file_with_content, - _get_kube_config_loader_for_yaml_file, _get_kube_config_loader, + _get_kube_config_loader_for_yaml_file, list_kube_config_contexts, load_kube_config, - load_kube_config_from_dict, - new_client_from_config) + load_kube_config_from_dict, new_client_from_config) BEARER_TOKEN_FORMAT = "Bearer %s" @@ -1237,8 +1236,8 @@ def test_load_kube_config_from_dict(self): actual = FakeConfig() load_kube_config_from_dict(config_dict=self.TEST_KUBE_CONFIG, - context="simple_token", - client_configuration=actual) + context="simple_token", + client_configuration=actual) self.assertEqual(expected, actual) def test_list_kube_config_contexts(self): @@ -1370,26 +1369,26 @@ def test__get_kube_config_loader_file_persist(self): config_file = self._create_temp_file( yaml.safe_dump(self.TEST_KUBE_CONFIG)) actual = _get_kube_config_loader(filename=config_file, - persist_config=True) + persist_config=True) self.assertTrue(callable(actual._config_persister)) self.assertEquals(actual._config_persister.__name__, "save_changes") def test__get_kube_config_loader_dict_no_persist(self): expected = FakeConfig(host=TEST_HOST, token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64) - actual = _get_kube_config_loader_for_yaml_file(config_dict=self.TEST_KUBE_CONFIG) + actual = _get_kube_config_loader_for_yaml_file( + config_dict=self.TEST_KUBE_CONFIG) self.assertIsNone(actual._config_persister) def test__get_kube_config_loader_dict_persist(self): expected = FakeConfig(host=TEST_HOST, token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64) actual = _get_kube_config_loader(config_dict=self.TEST_KUBE_CONFIG, - persist_config=True) + persist_config=True) self.assertTrue(callable(actual._config_persister)) self.assertEquals(actual._config_persister.__name__, "save_changes") - class TestKubernetesClientConfiguration(BaseTestCase): # Verifies properties of kubernetes.client.Configuration. # These tests guard against changes to the upstream configuration class, From 6c327377e820dc70b02f379b6c5ec4dea22667e7 Mon Sep 17 00:00:00 2001 From: vishnu667 Date: Fri, 22 May 2020 00:05:15 +0530 Subject: [PATCH 06/90] test case fix __get_kube_config_loader_dict_no_persist --- config/kube_config_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/kube_config_test.py b/config/kube_config_test.py index bc855e1a..d6501a58 100644 --- a/config/kube_config_test.py +++ b/config/kube_config_test.py @@ -1376,7 +1376,7 @@ def test__get_kube_config_loader_file_persist(self): def test__get_kube_config_loader_dict_no_persist(self): expected = FakeConfig(host=TEST_HOST, token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64) - actual = _get_kube_config_loader_for_yaml_file( + actual = _get_kube_config_loader( config_dict=self.TEST_KUBE_CONFIG) self.assertIsNone(actual._config_persister) From 52a3bdc159b0a388a546dbbd85e8e0db5dcd05c5 Mon Sep 17 00:00:00 2001 From: vishnu667 Date: Fri, 22 May 2020 00:57:46 +0530 Subject: [PATCH 07/90] removing load from dict presist from the added test cases. --- config/kube_config_test.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/config/kube_config_test.py b/config/kube_config_test.py index d6501a58..25508d8b 100644 --- a/config/kube_config_test.py +++ b/config/kube_config_test.py @@ -1380,15 +1380,6 @@ def test__get_kube_config_loader_dict_no_persist(self): config_dict=self.TEST_KUBE_CONFIG) self.assertIsNone(actual._config_persister) - def test__get_kube_config_loader_dict_persist(self): - expected = FakeConfig(host=TEST_HOST, - token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64) - actual = _get_kube_config_loader(config_dict=self.TEST_KUBE_CONFIG, - persist_config=True) - self.assertTrue(callable(actual._config_persister)) - self.assertEquals(actual._config_persister.__name__, "save_changes") - - class TestKubernetesClientConfiguration(BaseTestCase): # Verifies properties of kubernetes.client.Configuration. # These tests guard against changes to the upstream configuration class, From 3ff79da50cd4f02cb789eee12461ad70ba151303 Mon Sep 17 00:00:00 2001 From: ACXLM Date: Tue, 2 Jun 2020 13:55:43 +0800 Subject: [PATCH 08/90] fix cfg is none, load kube config error Signed-off-by: zhu hui --- config/kube_config.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/config/kube_config.py b/config/kube_config.py index 43676728..cb7a9bb3 100644 --- a/config/kube_config.py +++ b/config/kube_config.py @@ -663,9 +663,8 @@ def load_config(self, path): for item in ('clusters', 'contexts', 'users'): config_merged[item] = [] self.config_merged = ConfigNode(path, config_merged, path) - for item in ('clusters', 'contexts', 'users'): - self._merge(item, config.get(item, {}), path) + self._merge(item, config.get(item, []) or [], path) self.config_files[path] = config def _merge(self, item, add_cfg, path): From 91812350e4c2e8a965bd29c0e0c948d82a57936d Mon Sep 17 00:00:00 2001 From: vishnu667 Date: Sat, 20 Jun 2020 00:50:39 +0530 Subject: [PATCH 09/90] updated docstring for load_kube_config_from_dict --- config/kube_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/kube_config.py b/config/kube_config.py index 023ace74..423178b6 100644 --- a/config/kube_config.py +++ b/config/kube_config.py @@ -760,7 +760,7 @@ def load_kube_config(config_file=None, context=None, def load_kube_config_from_dict(config_dict, context=None, client_configuration=None, persist_config=True): - """Loads authentication and cluster information from kube-config file + """Loads authentication and cluster information from config_dict file and stores them in kubernetes.client.configuration. :param config_dict: Takes the config file as a dict. From 982de11392c481a248bb0090e223c7b176a29a1a Mon Sep 17 00:00:00 2001 From: Haowei Cai Date: Mon, 22 Jun 2020 15:43:21 -0700 Subject: [PATCH 10/90] generated by scripts/update-pycodestyle.sh in main repo --- config/__init__.py | 2 +- config/kube_config.py | 9 +++++++-- config/kube_config_test.py | 1 + 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/config/__init__.py b/config/__init__.py index 83bd581c..b57bf185 100644 --- a/config/__init__.py +++ b/config/__init__.py @@ -15,4 +15,4 @@ from .config_exception import ConfigException from .incluster_config import load_incluster_config from .kube_config import (list_kube_config_contexts, load_kube_config, - new_client_from_config, load_kube_config_from_dict) + load_kube_config_from_dict, new_client_from_config) diff --git a/config/kube_config.py b/config/kube_config.py index 423178b6..892e9043 100644 --- a/config/kube_config.py +++ b/config/kube_config.py @@ -688,6 +688,7 @@ def save_config(self, path): yaml.safe_dump(self.config_files[path], f, default_flow_style=False) + def _get_kube_config_loader_for_yaml_file( filename, persist_config=False, **kwargs): return _get_kube_config_loader( @@ -695,6 +696,7 @@ def _get_kube_config_loader_for_yaml_file( persist_config=persist_config, **kwargs) + def _get_kube_config_loader( filename=None, config_dict=None, @@ -719,6 +721,7 @@ def _get_kube_config_loader( config_base_path=None, **kwargs) + def list_kube_config_contexts(config_file=None): if config_file is None: @@ -757,9 +760,10 @@ def load_kube_config(config_file=None, context=None, else: loader.load_and_set(client_configuration) + def load_kube_config_from_dict(config_dict, context=None, - client_configuration=None, - persist_config=True): + client_configuration=None, + persist_config=True): """Loads authentication and cluster information from config_dict file and stores them in kubernetes.client.configuration. @@ -788,6 +792,7 @@ def load_kube_config_from_dict(config_dict, context=None, else: loader.load_and_set(client_configuration) + def new_client_from_config( config_file=None, context=None, diff --git a/config/kube_config_test.py b/config/kube_config_test.py index 25508d8b..3dca177c 100644 --- a/config/kube_config_test.py +++ b/config/kube_config_test.py @@ -1380,6 +1380,7 @@ def test__get_kube_config_loader_dict_no_persist(self): config_dict=self.TEST_KUBE_CONFIG) self.assertIsNone(actual._config_persister) + class TestKubernetesClientConfiguration(BaseTestCase): # Verifies properties of kubernetes.client.Configuration. # These tests guard against changes to the upstream configuration class, From 30d9e2af1cb7d1416ba4e4f3ddc3c36653b7284f Mon Sep 17 00:00:00 2001 From: ACXLM Date: Tue, 9 Jun 2020 17:53:06 +0800 Subject: [PATCH 11/90] change test case --- config/kube_config_test.py | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/config/kube_config_test.py b/config/kube_config_test.py index c8a4c93b..792d4bde 100644 --- a/config/kube_config_test.py +++ b/config/kube_config_test.py @@ -1342,7 +1342,7 @@ def test__get_kube_config_loader_for_yaml_file_persist(self): actual = _get_kube_config_loader_for_yaml_file(config_file, persist_config=True) self.assertTrue(callable(actual._config_persister)) - self.assertEquals(actual._config_persister.__name__, "save_changes") + self.assertEqual(actual._config_persister.__name__, "save_changes") class TestKubernetesClientConfiguration(BaseTestCase): @@ -1517,6 +1517,26 @@ class TestKubeConfigMerger(BaseTestCase): } ] } + TEST_KUBE_CONFIG_PART6 = { + "current-context": "no_user", + "contexts": [ + { + "name": "no_user", + "context": { + "cluster": "default" + } + }, + ], + "clusters": [ + { + "name": "default", + "cluster": { + "server": TEST_HOST + } + }, + ], + "users": None + } def _create_multi_config(self): files = [] @@ -1525,7 +1545,8 @@ def _create_multi_config(self): self.TEST_KUBE_CONFIG_PART2, self.TEST_KUBE_CONFIG_PART3, self.TEST_KUBE_CONFIG_PART4, - self.TEST_KUBE_CONFIG_PART5): + self.TEST_KUBE_CONFIG_PART5, + self.TEST_KUBE_CONFIG_PART6): files.append(self._create_temp_file(yaml.safe_dump(part))) return ENV_KUBECONFIG_PATH_SEPARATOR.join(files) From a270ea294621687bad118a04508a8e820c0de8a8 Mon Sep 17 00:00:00 2001 From: Abhijeet Kasurde Date: Thu, 2 Jul 2020 23:04:48 +0530 Subject: [PATCH 12/90] FileOrData: Handle None object Return when object is None in FileOrData class Signed-off-by: Abhijeet Kasurde --- config/kube_config.py | 2 ++ config/kube_config_test.py | 10 ++++++++++ 2 files changed, 12 insertions(+) diff --git a/config/kube_config.py b/config/kube_config.py index 9786e0e5..ec185871 100644 --- a/config/kube_config.py +++ b/config/kube_config.py @@ -97,6 +97,8 @@ def __init__(self, obj, file_key_name, data_key_name=None, self._file = None self._data = None self._base64_file_content = base64_file_content + if not obj: + return if data_key_name in obj: self._data = obj[data_key_name] elif file_key_name in obj: diff --git a/config/kube_config_test.py b/config/kube_config_test.py index ef5616e4..0c3b42ae 100644 --- a/config/kube_config_test.py +++ b/config/kube_config_test.py @@ -255,6 +255,16 @@ def test_file_given_data_bytes_no_base64(self): data_key_name=TEST_DATA_KEY, base64_file_content=False) self.assertEqual(TEST_DATA, self.get_file_content(t.as_file())) + def test_file_given_no_object(self): + t = FileOrData(obj=None, file_key_name=TEST_FILE_KEY, + data_key_name=TEST_DATA_KEY) + self.assertEqual(t.as_file(), None) + + def test_file_given_no_object_data(self): + t = FileOrData(obj=None, file_key_name=TEST_FILE_KEY, + data_key_name=TEST_DATA_KEY) + self.assertEqual(t.as_data(), None) + class TestConfigNode(BaseTestCase): From 06e48c585c003742ff42fb1995ec18e85226055e Mon Sep 17 00:00:00 2001 From: Mitar Date: Mon, 11 Feb 2019 00:23:39 -0800 Subject: [PATCH 13/90] Retry watch if request expires. --- watch/watch.py | 30 ++++++++++++++++++++++++++++-- watch/watch_test.py | 27 +++++++++++++++++++++++++++ 2 files changed, 55 insertions(+), 2 deletions(-) diff --git a/watch/watch.py b/watch/watch.py index fe7a9247..f67dbe45 100644 --- a/watch/watch.py +++ b/watch/watch.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import http import json import pydoc @@ -86,7 +87,7 @@ def get_watch_argument_name(self, func): def unmarshal_event(self, data, return_type): js = json.loads(data) js['raw_object'] = js['object'] - if return_type: + if return_type and js['type'] != 'ERROR': obj = SimpleNamespace(data=json.dumps(js['raw_object'])) js['object'] = self._api_client.deserialize(obj, return_type) if hasattr(js['object'], 'metadata'): @@ -102,6 +103,14 @@ def unmarshal_event(self, data, return_type): def stream(self, func, *args, **kwargs): """Watch an API resource and stream the result back via a generator. + Note that watching an API resource can expire. The method tries to + resume automatically once from the last result, but if that last result + is too old as well, an `ApiException` exception will be thrown with + ``code`` 410. In that case you have to recover yourself, probably + by listing the API resource to obtain the latest state and then + watching from that state on by setting ``resource_version`` to + one returned from listing. + :param func: The API function pointer. Any parameter to the function can be passed after this parameter. @@ -134,6 +143,7 @@ def stream(self, func, *args, **kwargs): self.resource_version = kwargs['resource_version'] timeouts = ('timeout_seconds' in kwargs) + retry_after_410 = False while True: resp = func(*args, **kwargs) try: @@ -141,7 +151,23 @@ def stream(self, func, *args, **kwargs): # unmarshal when we are receiving events from watch, # return raw string when we are streaming log if watch_arg == "watch": - yield self.unmarshal_event(line, return_type) + event = self.unmarshal_event(line, return_type) + if isinstance(event, dict) \ + and event['type'] == 'ERROR': + obj = event['raw_object'] + # Current request expired, let's retry, + # but only if we have not already retried. + if not retry_after_410 and \ + obj['code'] == http.HTTPStatus.GONE: + retry_after_410 = True + break + else: + reason = "%s: %s" % (obj['reason'], obj['message']) + raise client.rest.ApiException(status=obj['code'], + reason=reason) + else: + retry_after_410 = False + yield event else: yield line if self._stop: diff --git a/watch/watch_test.py b/watch/watch_test.py index 6fec23ec..b8cefd20 100644 --- a/watch/watch_test.py +++ b/watch/watch_test.py @@ -16,6 +16,8 @@ from mock import Mock, call +from kubernetes import client + from .watch import Watch @@ -273,6 +275,31 @@ def test_watch_with_exception(self): fake_resp.close.assert_called_once() fake_resp.release_conn.assert_called_once() + def test_watch_with_error_event(self): + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.read_chunked = Mock( + return_value=[ + '{"type": "ERROR", "object": {"code": 410, ' + '"reason": "Gone", "message": "error message"}}\n']) + + fake_api = Mock() + fake_api.get_thing = Mock(return_value=fake_resp) + + w = Watch() + try: + for _ in w.stream(fake_api.get_thing): + self.fail(self, "Should fail with ApiException.") + except client.rest.ApiException: + pass + + fake_api.get_thing.assert_called_once_with( + _preload_content=False, watch=True) + fake_resp.read_chunked.assert_called_once_with(decode_content=False) + fake_resp.close.assert_called_once() + fake_resp.release_conn.assert_called_once() + if __name__ == '__main__': unittest.main() From a7c78291bf249a32c8ef32c00e952d9c1dee9dbb Mon Sep 17 00:00:00 2001 From: Haowei Cai Date: Thu, 16 Jul 2020 00:38:24 -0700 Subject: [PATCH 14/90] add old api_key to set auth attributes --- config/kube_config_test.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/config/kube_config_test.py b/config/kube_config_test.py index ef5616e4..1349cafe 100644 --- a/config/kube_config_test.py +++ b/config/kube_config_test.py @@ -1399,11 +1399,13 @@ def test_get_api_key_with_prefix_returns_token(self): def test_auth_settings_calls_get_api_key_with_prefix(self): expected_token = 'expected_token' + old_token = 'old_token' def fake_get_api_key_with_prefix(identifier): self.assertEqual('authorization', identifier) return expected_token config = Configuration() + config.api_key['authorization'] = old_token config.get_api_key_with_prefix = fake_get_api_key_with_prefix self.assertEqual(expected_token, config.auth_settings()['BearerToken']['value']) From a54f404366c0800497f8b62122d7be77c143297f Mon Sep 17 00:00:00 2001 From: Nabarun Pal Date: Thu, 16 Jul 2020 14:02:12 +0530 Subject: [PATCH 15/90] Fix a Python 2 compatibility issue PR #133 introduces the usage of `http` module for checking the status code for `GONE` HTTP status. However, this doesn't work in Python 2.7. This commit checks if the interpreter is Python 2 and imports the status code from `httplib` module instead and unifies the approach to the checks. Signed-off-by: Nabarun Pal --- watch/watch.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/watch/watch.py b/watch/watch.py index f67dbe45..6410dfab 100644 --- a/watch/watch.py +++ b/watch/watch.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import http import json import pydoc +import sys from kubernetes import client @@ -29,6 +29,15 @@ TYPE_LIST_SUFFIX = "List" +PY2 = sys.version_info[0] == 2 +if PY2: + import httplib + HTTP_STATUS_GONE = httplib.GONE +else: + import http + HTTP_STATUS_GONE = http.HTTPStatus.GONE + + class SimpleNamespace: def __init__(self, **kwargs): @@ -158,7 +167,7 @@ def stream(self, func, *args, **kwargs): # Current request expired, let's retry, # but only if we have not already retried. if not retry_after_410 and \ - obj['code'] == http.HTTPStatus.GONE: + obj['code'] == HTTP_STATUS_GONE: retry_after_410 = True break else: From b68ca3055178e31a5d87a0a98780e4987a4d23ae Mon Sep 17 00:00:00 2001 From: Nabarun Pal Date: Thu, 16 Jul 2020 14:08:44 +0530 Subject: [PATCH 16/90] Fixes codestyle failures Signed-off-by: Nabarun Pal --- watch/watch.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/watch/watch.py b/watch/watch.py index 6410dfab..3058ed9a 100644 --- a/watch/watch.py +++ b/watch/watch.py @@ -171,9 +171,10 @@ def stream(self, func, *args, **kwargs): retry_after_410 = True break else: - reason = "%s: %s" % (obj['reason'], obj['message']) - raise client.rest.ApiException(status=obj['code'], - reason=reason) + reason = "%s: %s" % ( + obj['reason'], obj['message']) + raise client.rest.ApiException( + status=obj['code'], reason=reason) else: retry_after_410 = False yield event From b85aff2b3e6c950cb9128d281cd6f7394563e202 Mon Sep 17 00:00:00 2001 From: Graham Reed Date: Fri, 29 May 2020 17:09:38 +0100 Subject: [PATCH 17/90] Accept client certificates from an authn/authz plugin (Plugin interface reference: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#input-and-output-formats) When handling the response from the authn/authz plugin, `token` will be used if provided, which maintains current behaviour. Newly added is handling `clientCertificateData`: if it is present, that certificate (and its key) will be used as provided by the plugin. (And any certificate/key pair provided via the `users` section of the configuration file will be ignored.) --- config/kube_config.py | 46 +++++++++++++++++++++++++++++--------- config/kube_config_test.py | 35 +++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+), 11 deletions(-) diff --git a/config/kube_config.py b/config/kube_config.py index 9786e0e5..c3ba04ca 100644 --- a/config/kube_config.py +++ b/config/kube_config.py @@ -472,11 +472,31 @@ def _load_from_exec_plugin(self): return try: status = ExecProvider(self._user['exec']).run() - if 'token' not in status: - logging.error('exec: missing token field in plugin output') - return None - self.token = "Bearer %s" % status['token'] - return True + if 'token' in status: + self.token = "Bearer %s" % status['token'] + return True + if 'clientCertificateData' in status: + # https://kubernetes.io/docs/reference/access-authn-authz/authentication/#input-and-output-formats + # Plugin has provided certificates instead of a token. + if 'clientKeyData' not in status: + logging.error('exec: missing clientKeyData field in ' + 'plugin output') + return None + base_path = self._get_base_path(self._cluster.path) + self.cert_file = FileOrData( + status, None, + data_key_name='clientCertificateData', + file_base_path=base_path, + base64_file_content=False).as_file() + self.key_file = FileOrData( + status, None, + data_key_name='clientKeyData', + file_base_path=base_path, + base64_file_content=False).as_file() + return True + logging.error('exec: missing token or clientCertificateData field ' + 'in plugin output') + return None except Exception as e: logging.error(str(e)) @@ -512,12 +532,16 @@ def _load_cluster_info(self): self.ssl_ca_cert = FileOrData( self._cluster, 'certificate-authority', file_base_path=base_path).as_file() - self.cert_file = FileOrData( - self._user, 'client-certificate', - file_base_path=base_path).as_file() - self.key_file = FileOrData( - self._user, 'client-key', - file_base_path=base_path).as_file() + if 'cert_file' not in self.__dict__: + # cert_file could have been provided by + # _load_from_exec_plugin; only load from the _user + # section if we need it. + self.cert_file = FileOrData( + self._user, 'client-certificate', + file_base_path=base_path).as_file() + self.key_file = FileOrData( + self._user, 'client-key', + file_base_path=base_path).as_file() if 'insecure-skip-tls-verify' in self._cluster: self.verify_ssl = not self._cluster['insecure-skip-tls-verify'] diff --git a/config/kube_config_test.py b/config/kube_config_test.py index 1349cafe..63cf11aa 100644 --- a/config/kube_config_test.py +++ b/config/kube_config_test.py @@ -541,6 +541,13 @@ class TestKubeConfigLoader(BaseTestCase): "user": "exec_cred_user" } }, + { + "name": "exec_cred_user_certificate", + "context": { + "cluster": "ssl", + "user": "exec_cred_user_certificate" + } + }, { "name": "contexttestcmdpath", "context": { @@ -865,6 +872,16 @@ class TestKubeConfigLoader(BaseTestCase): } } }, + { + "name": "exec_cred_user_certificate", + "user": { + "exec": { + "apiVersion": "client.authentication.k8s.io/v1beta1", + "command": "custom-certificate-authenticator", + "args": [] + } + } + }, { "name": "usertestcmdpath", "user": { @@ -1295,6 +1312,24 @@ def test_user_exec_auth(self, mock): active_context="exec_cred_user").load_and_set(actual) self.assertEqual(expected, actual) + @mock.patch('kubernetes.config.kube_config.ExecProvider.run') + def test_user_exec_auth_certificates(self, mock): + mock.return_value = { + "clientCertificateData": TEST_CLIENT_CERT, + "clientKeyData": TEST_CLIENT_KEY, + } + expected = FakeConfig( + host=TEST_SSL_HOST, + cert_file=self._create_temp_file(TEST_CLIENT_CERT), + key_file=self._create_temp_file(TEST_CLIENT_KEY), + ssl_ca_cert=self._create_temp_file(TEST_CERTIFICATE_AUTH), + verify_ssl=True) + actual = FakeConfig() + KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="exec_cred_user_certificate").load_and_set(actual) + self.assertEqual(expected, actual) + def test_user_cmd_path(self): A = namedtuple('A', ['token', 'expiry']) token = "dummy" From f65f06b1ed4388a1ab030215deb4381ec438f318 Mon Sep 17 00:00:00 2001 From: Dylan Shepard Date: Wed, 19 Aug 2020 12:36:32 -0700 Subject: [PATCH 18/90] commiting changes to branch --- config/kube_config.py | 29 +++++++++++++++++++++-------- config/kube_config_test.py | 15 +++++++++++++++ 2 files changed, 36 insertions(+), 8 deletions(-) diff --git a/config/kube_config.py b/config/kube_config.py index 68910841..a1fc59c9 100644 --- a/config/kube_config.py +++ b/config/kube_config.py @@ -19,6 +19,7 @@ import json import logging import os +import io import platform import subprocess import tempfile @@ -667,19 +668,31 @@ def __init__(self, paths): self.paths = [] self.config_files = {} self.config_merged = None - - for path in paths.split(ENV_KUBECONFIG_PATH_SEPARATOR): - if path: - path = os.path.expanduser(path) - if os.path.exists(path): - self.paths.append(path) - self.load_config(path) - self.config_saved = copy.deepcopy(self.config_files) + if hasattr(paths, 'read'): + self.load_config_from_fileish(paths) + else: + for path in paths.split(ENV_KUBECONFIG_PATH_SEPARATOR): + if path: + path = os.path.expanduser(path) + if os.path.exists(path): + self.paths.append(path) + self.load_config(path) + self.config_saved = copy.deepcopy(self.config_files) @property def config(self): return self.config_merged + def load_config_from_fileish(self, string): + if hasattr(string, 'getvalue'): + config = yaml.safe_load(string.getvalue()) + else: + config = yaml.safe_load(string.read()) + + if self.config_merged is None: + self.config_merged = copy.deepcopy(config) + # doesn't need to do any further merging + def load_config(self, path): with open(path) as f: config = yaml.safe_load(f) diff --git a/config/kube_config_test.py b/config/kube_config_test.py index 4b406b34..a666cff2 100644 --- a/config/kube_config_test.py +++ b/config/kube_config_test.py @@ -16,6 +16,7 @@ import datetime import json import os +import io import shutil import tempfile import unittest @@ -1257,6 +1258,14 @@ def test_load_kube_config(self): client_configuration=actual) self.assertEqual(expected, actual) + def test_load_kube_config_from_stringio(self): + expected = FakeConfig(host=TEST_HOST, + token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64) + kubeconfig = self._create_stringio_config() + actual = FakeConfig() + load_kube_config(config_file=kubeconfig, context="simple_token", client_configuration=actual) + self.assertEqual(expected, actual) + def test_load_kube_config_from_dict(self): expected = FakeConfig(host=TEST_HOST, token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64) @@ -1633,6 +1642,11 @@ def _create_multi_config(self): files.append(self._create_temp_file(yaml.safe_dump(part))) return ENV_KUBECONFIG_PATH_SEPARATOR.join(files) + def _create_stringio_config(self): + obj = io.StringIO() + obj.write(self.TEST_KUBE_CONFIG_PART1) + return obj + def test_list_kube_config_contexts(self): kubeconfigs = self._create_multi_config() expected_contexts = [ @@ -1660,6 +1674,7 @@ def test_new_client_from_config(self): self.assertEqual(BEARER_TOKEN_FORMAT % TEST_DATA_BASE64, client.configuration.api_key['authorization']) + def test_save_changes(self): kubeconfigs = self._create_multi_config() From aac4e35ca9f14aaaa741f200283f3cfe0a85f1d9 Mon Sep 17 00:00:00 2001 From: Dylan Shepard Date: Wed, 19 Aug 2020 12:49:33 -0700 Subject: [PATCH 19/90] correcting tests --- config/kube_config_test.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/config/kube_config_test.py b/config/kube_config_test.py index a666cff2..e53c5767 100644 --- a/config/kube_config_test.py +++ b/config/kube_config_test.py @@ -1258,12 +1258,13 @@ def test_load_kube_config(self): client_configuration=actual) self.assertEqual(expected, actual) - def test_load_kube_config_from_stringio(self): + def test_load_kube_config_from_fileish(self): expected = FakeConfig(host=TEST_HOST, token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64) - kubeconfig = self._create_stringio_config() + config_fileish = io.StringIO() + config_fileish.write(yaml.safe_dump(self.TEST_KUBE_CONFIG)) actual = FakeConfig() - load_kube_config(config_file=kubeconfig, context="simple_token", client_configuration=actual) + load_kube_config(config_file=config_fileish, context="simple_token", client_configuration=actual) self.assertEqual(expected, actual) def test_load_kube_config_from_dict(self): @@ -1642,11 +1643,6 @@ def _create_multi_config(self): files.append(self._create_temp_file(yaml.safe_dump(part))) return ENV_KUBECONFIG_PATH_SEPARATOR.join(files) - def _create_stringio_config(self): - obj = io.StringIO() - obj.write(self.TEST_KUBE_CONFIG_PART1) - return obj - def test_list_kube_config_contexts(self): kubeconfigs = self._create_multi_config() expected_contexts = [ From fd62214e288076c8fde7dfeed1c5576c62002044 Mon Sep 17 00:00:00 2001 From: "Patrick J. McNerthney" Date: Wed, 26 Aug 2020 18:18:00 -1000 Subject: [PATCH 20/90] Refactor stream package to enable common method helpers for other streaming api classes. --- stream/stream.py | 19 +++---- stream/ws_client.py | 117 ++++++++++++++++++++++++-------------------- 2 files changed, 70 insertions(+), 66 deletions(-) diff --git a/stream/stream.py b/stream/stream.py index 6d5f05f8..627fd1a3 100644 --- a/stream/stream.py +++ b/stream/stream.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import types + from . import ws_client @@ -19,19 +21,10 @@ def stream(func, *args, **kwargs): """Stream given API call using websocket. Extra kwarg: capture-all=True - captures all stdout+stderr for use with WSClient.read_all()""" - def _intercept_request_call(*args, **kwargs): - # old generated code's api client has config. new ones has - # configuration - try: - config = func.__self__.api_client.configuration - except AttributeError: - config = func.__self__.api_client.config - - return ws_client.websocket_call(config, *args, **kwargs) - - prev_request = func.__self__.api_client.request + api_client = func.__self__.api_client + prev_request = api_client.request try: - func.__self__.api_client.request = _intercept_request_call + api_client.request = types.MethodType(ws_client.websocket_call, api_client) return func(*args, **kwargs) finally: - func.__self__.api_client.request = prev_request + api_client.request = prev_request diff --git a/stream/ws_client.py b/stream/ws_client.py index 2b599381..31300363 100644 --- a/stream/ws_client.py +++ b/stream/ws_client.py @@ -23,7 +23,7 @@ import six import yaml -from six.moves.urllib.parse import urlencode, quote_plus, urlparse, urlunparse +from six.moves.urllib.parse import urlencode, urlparse, urlunparse from six import StringIO from websocket import WebSocket, ABNF, enableTrace @@ -51,47 +51,13 @@ def __init__(self, configuration, url, headers, capture_all): like port forwarding can forward different pods' streams to different channels. """ - enableTrace(False) - header = [] self._connected = False self._channels = {} if capture_all: self._all = StringIO() else: self._all = _IgnoredIO() - - # We just need to pass the Authorization, ignore all the other - # http headers we get from the generated code - if headers and 'authorization' in headers: - header.append("authorization: %s" % headers['authorization']) - - if headers and 'sec-websocket-protocol' in headers: - header.append("sec-websocket-protocol: %s" % - headers['sec-websocket-protocol']) - else: - header.append("sec-websocket-protocol: v4.channel.k8s.io") - - if url.startswith('wss://') and configuration.verify_ssl: - ssl_opts = { - 'cert_reqs': ssl.CERT_REQUIRED, - 'ca_certs': configuration.ssl_ca_cert or certifi.where(), - } - if configuration.assert_hostname is not None: - ssl_opts['check_hostname'] = configuration.assert_hostname - else: - ssl_opts = {'cert_reqs': ssl.CERT_NONE} - - if configuration.cert_file: - ssl_opts['certfile'] = configuration.cert_file - if configuration.key_file: - ssl_opts['keyfile'] = configuration.key_file - - self.sock = WebSocket(sslopt=ssl_opts, skip_utf8_validation=False) - if configuration.proxy: - proxy_url = urlparse(configuration.proxy) - self.sock.connect(url, header=header, http_proxy_host=proxy_url.hostname, http_proxy_port=proxy_url.port) - else: - self.sock.connect(url, header=header) + self.sock = create_websocket(configuration, url, headers) self._connected = True def peek_channel(self, channel, timeout=0): @@ -259,41 +225,86 @@ def close(self, **kwargs): WSResponse = collections.namedtuple('WSResponse', ['data']) -def get_websocket_http://github.com/url(url): +def get_websocket_url(http://github.com/url,%20query_params=None): parsed_url = urlparse(url) parts = list(parsed_url) if parsed_url.scheme == 'http': parts[0] = 'ws' elif parsed_url.scheme == 'https': parts[0] = 'wss' + if query_params: + query = [] + for key, value in query_params: + if key == 'command' and isinstance(value, list): + for command in value: + query.append((key, command)) + else: + query.append((key, value)) + if query: + parts[4] = urlencode(query) return urlunparse(parts) -def websocket_call(configuration, *args, **kwargs): +def create_websocket(configuration, url, headers=None): + enableTrace(False) + + # We just need to pass the Authorization, ignore all the other + # http headers we get from the generated code + header = [] + if headers and 'authorization' in headers: + header.append("authorization: %s" % headers['authorization']) + if headers and 'sec-websocket-protocol' in headers: + header.append("sec-websocket-protocol: %s" % + headers['sec-websocket-protocol']) + else: + header.append("sec-websocket-protocol: v4.channel.k8s.io") + + if url.startswith('wss://') and configuration.verify_ssl: + ssl_opts = { + 'cert_reqs': ssl.CERT_REQUIRED, + 'ca_certs': configuration.ssl_ca_cert or certifi.where(), + } + if configuration.assert_hostname is not None: + ssl_opts['check_hostname'] = configuration.assert_hostname + else: + ssl_opts = {'cert_reqs': ssl.CERT_NONE} + + if configuration.cert_file: + ssl_opts['certfile'] = configuration.cert_file + if configuration.key_file: + ssl_opts['keyfile'] = configuration.key_file + + websocket = WebSocket(sslopt=ssl_opts, skip_utf8_validation=False) + if configuration.proxy: + proxy_url = urlparse(configuration.proxy) + websocket.connect(url, header=header, http_proxy_host=proxy_url.hostname, http_proxy_port=proxy_url.port) + else: + websocket.connect(url, header=header) + return websocket + + +def _configuration(api_client): + # old generated code's api client has config. new ones has + # configuration + try: + return api_client.configuration + except AttributeError: + return api_client.config + + +def websocket_call(api_client, _method, url, **kwargs): """An internal function to be called in api-client when a websocket connection is required. args and kwargs are the parameters of apiClient.request method.""" - url = args[1] + url = get_websocket_url(http://github.com/url,%20kwargs.get(%22query_params")) + headers = kwargs.get("headers") _request_timeout = kwargs.get("_request_timeout", 60) _preload_content = kwargs.get("_preload_content", True) capture_all = kwargs.get("capture_all", True) - headers = kwargs.get("headers") - - # Expand command parameter list to indivitual command params - query_params = [] - for key, value in kwargs.get("query_params", {}): - if key == 'command' and isinstance(value, list): - for command in value: - query_params.append((key, command)) - else: - query_params.append((key, value)) - - if query_params: - url += '?' + urlencode(query_params) try: - client = WSClient(configuration, get_websocket_http://github.com/url(url), headers, capture_all) + client = WSClient(_configuration(api_client), url, headers, capture_all) if not _preload_content: return client client.run_forever(timeout=_request_timeout) From a00ed7f87a8aea045fba35a5a89aec799e6180b9 Mon Sep 17 00:00:00 2001 From: "Patrick J. McNerthney" Date: Thu, 27 Aug 2020 16:07:05 -1000 Subject: [PATCH 21/90] Put extracting the "configuration" back into the stream.py module, and use functools.partial to orchestrate calling the websocket request hanlder. --- stream/stream.py | 23 +++++++++++++++-------- stream/ws_client.py | 15 +++------------ 2 files changed, 18 insertions(+), 20 deletions(-) diff --git a/stream/stream.py b/stream/stream.py index 627fd1a3..9bb59017 100644 --- a/stream/stream.py +++ b/stream/stream.py @@ -12,19 +12,26 @@ # See the License for the specific language governing permissions and # limitations under the License. -import types +import functools from . import ws_client -def stream(func, *args, **kwargs): - """Stream given API call using websocket. - Extra kwarg: capture-all=True - captures all stdout+stderr for use with WSClient.read_all()""" - - api_client = func.__self__.api_client +def _websocket_reqeust(websocket_request, api_method, *args, **kwargs): + """Override the ApiClient.request method with an alternative websocket based + method and call the supplied Kubernetes API method with that in place.""" + api_client = api_method.__self__.api_client + # old generated code's api client has config. new ones has configuration + try: + configuration = api_client.configuration + except AttributeError: + configuration = api_client.config prev_request = api_client.request try: - api_client.request = types.MethodType(ws_client.websocket_call, api_client) - return func(*args, **kwargs) + api_client.request = functools.partial(websocket_request, configuration) + return api_method(*args, **kwargs) finally: api_client.request = prev_request + + +stream = functools.partial(_websocket_reqeust, ws_client.websocket_call) diff --git a/stream/ws_client.py b/stream/ws_client.py index 31300363..fa7f393e 100644 --- a/stream/ws_client.py +++ b/stream/ws_client.py @@ -283,18 +283,9 @@ def create_websocket(configuration, url, headers=None): return websocket -def _configuration(api_client): - # old generated code's api client has config. new ones has - # configuration - try: - return api_client.configuration - except AttributeError: - return api_client.config - - -def websocket_call(api_client, _method, url, **kwargs): +def websocket_call(configuration, _method, url, **kwargs): """An internal function to be called in api-client when a websocket - connection is required. args and kwargs are the parameters of + connection is required. method, url, and kwargs are the parameters of apiClient.request method.""" url = get_websocket_url(http://github.com/url,%20kwargs.get(%22query_params")) @@ -304,7 +295,7 @@ def websocket_call(api_client, _method, url, **kwargs): capture_all = kwargs.get("capture_all", True) try: - client = WSClient(_configuration(api_client), url, headers, capture_all) + client = WSClient(configuration, url, headers, capture_all) if not _preload_content: return client client.run_forever(timeout=_request_timeout) From 74d0e292b8d637f168c51c6f655813af023df758 Mon Sep 17 00:00:00 2001 From: "Patrick J. McNerthney" Date: Sun, 23 Aug 2020 13:34:41 -1000 Subject: [PATCH 22/90] Implement port forwarding. --- stream/__init__.py | 2 +- stream/stream.py | 8 ++- stream/ws_client.py | 172 +++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 178 insertions(+), 4 deletions(-) diff --git a/stream/__init__.py b/stream/__init__.py index e72d0583..cd346528 100644 --- a/stream/__init__.py +++ b/stream/__init__.py @@ -12,4 +12,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .stream import stream +from .stream import stream, portforward diff --git a/stream/stream.py b/stream/stream.py index 9bb59017..57bac758 100644 --- a/stream/stream.py +++ b/stream/stream.py @@ -17,9 +17,12 @@ from . import ws_client -def _websocket_reqeust(websocket_request, api_method, *args, **kwargs): +def _websocket_reqeust(websocket_request, force_kwargs, api_method, *args, **kwargs): """Override the ApiClient.request method with an alternative websocket based method and call the supplied Kubernetes API method with that in place.""" + if force_kwargs: + for kwarg, value in force_kwargs.items(): + kwargs[kwarg] = value api_client = api_method.__self__.api_client # old generated code's api client has config. new ones has configuration try: @@ -34,4 +37,5 @@ def _websocket_reqeust(websocket_request, api_method, *args, **kwargs): api_client.request = prev_request -stream = functools.partial(_websocket_reqeust, ws_client.websocket_call) +stream = functools.partial(_websocket_reqeust, ws_client.websocket_call, None) +portforward = functools.partial(_websocket_reqeust, ws_client.portforward_call, {'_preload_content':False}) diff --git a/stream/ws_client.py b/stream/ws_client.py index fa7f393e..69274d55 100644 --- a/stream/ws_client.py +++ b/stream/ws_client.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from kubernetes.client.rest import ApiException +from kubernetes.client.rest import ApiException, ApiValueError import certifi import collections import select +import socket import ssl +import threading import time import six @@ -225,6 +227,143 @@ def close(self, **kwargs): WSResponse = collections.namedtuple('WSResponse', ['data']) +class PortForward: + def __init__(self, websocket, ports): + """A websocket client with support for port forwarding. + + Port Forward command sends on 2 channels per port, a read/write + data channel and a read only error channel. Both channels are sent an + initial frame contaning the port number that channel is associated with. + """ + + self.websocket = websocket + self.ports = {} + for ix, port_number in enumerate(ports): + self.ports[port_number] = self._Port(ix, port_number) + threading.Thread( + name="Kubernetes port forward proxy", target=self._proxy, daemon=True + ).start() + + def socket(self, port_number): + if port_number not in self.ports: + raise ValueError("Invalid port number") + return self.ports[port_number].socket + + def error(self, port_number): + if port_number not in self.ports: + raise ValueError("Invalid port number") + return self.ports[port_number].error + + def close(self): + for port in self.ports.values(): + port.socket.close() + + class _Port: + def __init__(self, ix, number): + self.number = number + self.channel = bytes([ix * 2]) + s, self.python = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM) + self.socket = self._Socket(s) + self.data = b'' + self.error = None + + class _Socket: + def __init__(self, socket): + self._socket = socket + + def __getattr__(self, name): + return getattr(self._socket, name) + + def setsockopt(self, level, optname, value): + # The following socket option is not valid with a socket created from socketpair, + # and is set when creating an SSLSocket from this socket. + if level == socket.IPPROTO_TCP and optname == socket.TCP_NODELAY: + return + self._socket.setsockopt(level, optname, value) + + # Proxy all socket data between the python code and the kubernetes websocket. + def _proxy(self): + channel_ports = [] + channel_initialized = [] + python_ports = {} + rlist = [] + for port in self.ports.values(): + channel_ports.append(port) + channel_initialized.append(False) + channel_ports.append(port) + channel_initialized.append(False) + python_ports[port.python] = port + rlist.append(port.python) + rlist.append(self.websocket.sock) + kubernetes_data = b'' + while True: + wlist = [] + for port in self.ports.values(): + if port.data: + wlist.append(port.python) + if kubernetes_data: + wlist.append(self.websocket.sock) + r, w, _ = select.select(rlist, wlist, []) + for s in w: + if s == self.websocket.sock: + sent = self.websocket.sock.send(kubernetes_data) + kubernetes_data = kubernetes_data[sent:] + else: + port = python_ports[s] + sent = port.python.send(port.data) + port.data = port.data[sent:] + for s in r: + if s == self.websocket.sock: + opcode, frame = self.websocket.recv_data_frame(True) + if opcode == ABNF.OPCODE_CLOSE: + for port in self.ports.values(): + port.python.close() + return + if opcode == ABNF.OPCODE_BINARY: + if not frame.data: + raise RuntimeError("Unexpected frame data size") + channel = frame.data[0] + if channel >= len(channel_ports): + raise RuntimeError("Unexpected channel number: " + str(channel)) + port = channel_ports[channel] + if channel_initialized[channel]: + if channel % 2: + port.error = frame.data[1:].decode() + if port.python in rlist: + port.python.close() + rlist.remove(port.python) + port.data = b'' + else: + port.data += frame.data[1:] + else: + if len(frame.data) != 3: + raise RuntimeError( + "Unexpected initial channel frame data size" + ) + port_number = frame.data[1] + (frame.data[2] * 256) + if port_number != port.number: + raise RuntimeError( + "Unexpected port number in initial channel frame: " + str(port_number) + ) + channel_initialized[channel] = True + elif opcode not in (ABNF.OPCODE_PING, ABNF.OPCODE_PONG): + raise RuntimeError("Unexpected websocket opcode: " + str(opcode)) + else: + port = python_ports[s] + data = port.python.recv(1024 * 1024) + if data: + kubernetes_data += ABNF.create_frame( + port.channel + data, + ABNF.OPCODE_BINARY, + ).format() + else: + port.python.close() + rlist.remove(s) + if len(rlist) == 1: + self.websocket.close() + return + + def get_websocket_url(http://github.com/url,%20query_params=None): parsed_url = urlparse(url) parts = list(parsed_url) @@ -302,3 +441,34 @@ def websocket_call(configuration, _method, url, **kwargs): return WSResponse('%s' % ''.join(client.read_all())) except (Exception, KeyboardInterrupt, SystemExit) as e: raise ApiException(status=0, reason=str(e)) + + +def portforward_call(configuration, _method, url, **kwargs): + """An internal function to be called in api-client when a websocket + connection is required for port forwarding. args and kwargs are the + parameters of apiClient.request method.""" + + query_params = kwargs.get("query_params") + + ports = [] + for key, value in query_params: + if key == 'ports': + for port in value.split(','): + try: + port = int(port) + if not (0 < port < 65536): + raise ValueError + ports.append(port) + except ValueError: + raise ApiValueError("Invalid port number `" + str(port) + "`") + if not ports: + raise ApiValueError("Missing required parameter `ports`") + + url = get_websocket_url(http://github.com/url,%20query_params) + headers = kwargs.get("headers") + + try: + websocket = create_websocket(configuration, url, headers) + return PortForward(websocket, ports) + except (Exception, KeyboardInterrupt, SystemExit) as e: + raise ApiException(status=0, reason=str(e)) From f85a41fa31d47c7a5b153bdc2ca4fb0b1c60a710 Mon Sep 17 00:00:00 2001 From: Dylan Shepard Date: Mon, 31 Aug 2020 12:01:11 -0700 Subject: [PATCH 23/90] renaming functions and setting to internal --- config/kube_config.py | 21 ++++++++++++--------- config/kube_config_test.py | 12 ++++++------ 2 files changed, 18 insertions(+), 15 deletions(-) diff --git a/config/kube_config.py b/config/kube_config.py index a1fc59c9..14fd71a6 100644 --- a/config/kube_config.py +++ b/config/kube_config.py @@ -669,21 +669,15 @@ def __init__(self, paths): self.config_files = {} self.config_merged = None if hasattr(paths, 'read'): - self.load_config_from_fileish(paths) + self._load_config_from_file_like_object(paths) else: - for path in paths.split(ENV_KUBECONFIG_PATH_SEPARATOR): - if path: - path = os.path.expanduser(path) - if os.path.exists(path): - self.paths.append(path) - self.load_config(path) - self.config_saved = copy.deepcopy(self.config_files) + self._load_config_from_file_path(paths) @property def config(self): return self.config_merged - def load_config_from_fileish(self, string): + def _load_config_from_file_like_object(self, string): if hasattr(string, 'getvalue'): config = yaml.safe_load(string.getvalue()) else: @@ -693,6 +687,15 @@ def load_config_from_fileish(self, string): self.config_merged = copy.deepcopy(config) # doesn't need to do any further merging + def _load_config_from_file_path(self, string): + for path in string.split(ENV_KUBECONFIG_PATH_SEPARATOR): + if path: + path = os.path.expanduser(path) + if os.path.exists(path): + self.paths.append(path) + self.load_config(path) + self.config_saved = copy.deepcopy(self.config_files) + def load_config(self, path): with open(path) as f: config = yaml.safe_load(f) diff --git a/config/kube_config_test.py b/config/kube_config_test.py index e53c5767..1f74d345 100644 --- a/config/kube_config_test.py +++ b/config/kube_config_test.py @@ -1248,7 +1248,7 @@ def test_ssl_with_relative_ssl_files(self): finally: shutil.rmtree(temp_dir) - def test_load_kube_config(self): + def test_load_kube_config_from_file_path(self): expected = FakeConfig(host=TEST_HOST, token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64) config_file = self._create_temp_file( @@ -1258,19 +1258,19 @@ def test_load_kube_config(self): client_configuration=actual) self.assertEqual(expected, actual) - def test_load_kube_config_from_fileish(self): + def test_load_kube_config_from_file_like_object(self): expected = FakeConfig(host=TEST_HOST, token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64) - config_fileish = io.StringIO() - config_fileish.write(yaml.safe_dump(self.TEST_KUBE_CONFIG)) + config_file_like_object = io.StringIO() + config_file_like_object.write(yaml.safe_dump(self.TEST_KUBE_CONFIG)) actual = FakeConfig() - load_kube_config(config_file=config_fileish, context="simple_token", client_configuration=actual) + load_kube_config(config_file=config_file_like_object, context="simple_token", + client_configuration=actual) self.assertEqual(expected, actual) def test_load_kube_config_from_dict(self): expected = FakeConfig(host=TEST_HOST, token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64) - actual = FakeConfig() load_kube_config_from_dict(config_dict=self.TEST_KUBE_CONFIG, context="simple_token", From cc9ae10549db26dd1391de55f0da2f4946de4ad7 Mon Sep 17 00:00:00 2001 From: "Patrick J. McNerthney" Date: Mon, 31 Aug 2020 15:53:59 -1000 Subject: [PATCH 24/90] Address the following PR issues: * Rename `_Port.error` to be `_Port.error_channel`. * Correct comment about where setsockopt is being called. * Add comments clarifying why the double call to the same methods to setup channel information. * Allow for ports specified with both local and remote port numbers. --- stream/ws_client.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/stream/ws_client.py b/stream/ws_client.py index 69274d55..5decad80 100644 --- a/stream/ws_client.py +++ b/stream/ws_client.py @@ -249,7 +249,7 @@ def socket(self, port_number): raise ValueError("Invalid port number") return self.ports[port_number].socket - def error(self, port_number): + def error_channel(self, port_number): if port_number not in self.ports: raise ValueError("Invalid port number") return self.ports[port_number].error @@ -276,7 +276,7 @@ def __getattr__(self, name): def setsockopt(self, level, optname, value): # The following socket option is not valid with a socket created from socketpair, - # and is set when creating an SSLSocket from this socket. + # and is set by the http.client.HTTPConnection.connect method. if level == socket.IPPROTO_TCP and optname == socket.TCP_NODELAY: return self._socket.setsockopt(level, optname, value) @@ -288,8 +288,10 @@ def _proxy(self): python_ports = {} rlist = [] for port in self.ports.values(): + # Setup the data channel for this port number channel_ports.append(port) channel_initialized.append(False) + # Setup the error channel for this port number channel_ports.append(port) channel_initialized.append(False) python_ports[port.python] = port @@ -455,7 +457,8 @@ def portforward_call(configuration, _method, url, **kwargs): if key == 'ports': for port in value.split(','): try: - port = int(port) + # The last specified port is the remote port + port = int(port.split(':')[-1]) if not (0 < port < 65536): raise ValueError ports.append(port) From 72e372599d68c4e268512c4085ac9e2e13368ae2 Mon Sep 17 00:00:00 2001 From: "Patrick J. McNerthney" Date: Tue, 1 Sep 2020 18:33:33 -1000 Subject: [PATCH 25/90] Rework the parsing of the requested ports to support both a local port and a remote port. --- stream/ws_client.py | 77 ++++++++++++++++++++++++++++----------------- 1 file changed, 48 insertions(+), 29 deletions(-) diff --git a/stream/ws_client.py b/stream/ws_client.py index 5decad80..971ab6b4 100644 --- a/stream/ws_client.py +++ b/stream/ws_client.py @@ -237,30 +237,30 @@ def __init__(self, websocket, ports): """ self.websocket = websocket - self.ports = {} - for ix, port_number in enumerate(ports): - self.ports[port_number] = self._Port(ix, port_number) + self.local_ports = {} + for ix, local_remote in enumerate(ports): + self.local_ports[local_remote[0]] = self._Port(ix, local_remote[1]) threading.Thread( name="Kubernetes port forward proxy", target=self._proxy, daemon=True ).start() - def socket(self, port_number): - if port_number not in self.ports: + def socket(self, local_number): + if local_number not in self.local_ports: raise ValueError("Invalid port number") - return self.ports[port_number].socket + return self.local_ports[local_number].socket - def error_channel(self, port_number): - if port_number not in self.ports: + def error(self, local_number): + if local_number not in self.local_ports: raise ValueError("Invalid port number") - return self.ports[port_number].error + return self.local_ports[local_number].error def close(self): - for port in self.ports.values(): + for port in self.local_ports.values(): port.socket.close() class _Port: - def __init__(self, ix, number): - self.number = number + def __init__(self, ix, remote_number): + self.remote_number = remote_number self.channel = bytes([ix * 2]) s, self.python = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM) self.socket = self._Socket(s) @@ -287,7 +287,7 @@ def _proxy(self): channel_initialized = [] python_ports = {} rlist = [] - for port in self.ports.values(): + for port in self.local_ports.values(): # Setup the data channel for this port number channel_ports.append(port) channel_initialized.append(False) @@ -300,7 +300,7 @@ def _proxy(self): kubernetes_data = b'' while True: wlist = [] - for port in self.ports.values(): + for port in self.local_ports.values(): if port.data: wlist.append(port.python) if kubernetes_data: @@ -318,7 +318,7 @@ def _proxy(self): if s == self.websocket.sock: opcode, frame = self.websocket.recv_data_frame(True) if opcode == ABNF.OPCODE_CLOSE: - for port in self.ports.values(): + for port in self.local_ports.values(): port.python.close() return if opcode == ABNF.OPCODE_BINARY: @@ -330,11 +330,9 @@ def _proxy(self): port = channel_ports[channel] if channel_initialized[channel]: if channel % 2: - port.error = frame.data[1:].decode() - if port.python in rlist: - port.python.close() - rlist.remove(port.python) - port.data = b'' + if port.error is None: + port.error = '' + port.error += frame.data[1:].decode() else: port.data += frame.data[1:] else: @@ -343,7 +341,7 @@ def _proxy(self): "Unexpected initial channel frame data size" ) port_number = frame.data[1] + (frame.data[2] * 256) - if port_number != port.number: + if port_number != port.remote_number: raise RuntimeError( "Unexpected port number in initial channel frame: " + str(port_number) ) @@ -453,17 +451,38 @@ def portforward_call(configuration, _method, url, **kwargs): query_params = kwargs.get("query_params") ports = [] - for key, value in query_params: - if key == 'ports': - for port in value.split(','): + for ix in range(len(query_params)): + if query_params[ix][0] == 'ports': + remote_ports = [] + for port in query_params[ix][1].split(','): try: - # The last specified port is the remote port - port = int(port.split(':')[-1]) - if not (0 < port < 65536): + local_remote = port.split(':') + if len(local_remote) > 2: raise ValueError - ports.append(port) + if len(local_remote) == 1: + local_remote[0] = int(local_remote[0]) + if not (0 < local_remote[0] < 65536): + raise ValueError + local_remote.append(local_remote[0]) + elif len(local_remote) == 2: + if local_remote[0]: + local_remote[0] = int(local_remote[0]) + if not (0 <= local_remote[0] < 65536): + raise ValueError + else: + local_remote[0] = 0 + local_remote[1] = int(local_remote[1]) + if not (0 < local_remote[1] < 65536): + raise ValueError + if not local_remote[0]: + local_remote[0] = len(ports) + 1 + else: + raise ValueError + ports.append(local_remote) + remote_ports.append(str(local_remote[1])) except ValueError: - raise ApiValueError("Invalid port number `" + str(port) + "`") + raise ApiValueError("Invalid port number `" + port + "`") + query_params[ix] = ('ports', ','.join(remote_ports)) if not ports: raise ApiValueError("Missing required parameter `ports`") From 7bf04b384b8cfcdba6387cf61e1cd9d6052669ee Mon Sep 17 00:00:00 2001 From: "Patrick J. McNerthney" Date: Sun, 6 Sep 2020 09:25:58 -1000 Subject: [PATCH 26/90] Rework how the PortForward._proxy thread determines when and how to terminate. --- stream/ws_client.py | 151 +++++++++++++++++++++++--------------------- 1 file changed, 78 insertions(+), 73 deletions(-) diff --git a/stream/ws_client.py b/stream/ws_client.py index 971ab6b4..fafba79a 100644 --- a/stream/ws_client.py +++ b/stream/ws_client.py @@ -238,33 +238,51 @@ def __init__(self, websocket, ports): self.websocket = websocket self.local_ports = {} - for ix, local_remote in enumerate(ports): - self.local_ports[local_remote[0]] = self._Port(ix, local_remote[1]) + for ix, port_number in enumerate(ports): + self.local_ports[port_number] = self._Port(ix, port_number) + # There is a thread run per PortForward instance which performs the translation between the + # raw socket data sent by the python application and the websocket protocol. This thread + # terminates after either side has closed all ports, and after flushing all pending data. threading.Thread( - name="Kubernetes port forward proxy", target=self._proxy, daemon=True + name="Kubernetes port forward proxy: %s" % ', '.join([str(port) for port in ports]), + target=self._proxy, + daemon=True ).start() - def socket(self, local_number): - if local_number not in self.local_ports: + def socket(self, port_number): + if port_number not in self.local_ports: raise ValueError("Invalid port number") - return self.local_ports[local_number].socket + return self.local_ports[port_number].socket - def error(self, local_number): - if local_number not in self.local_ports: + def error(self, port_number): + if port_number not in self.local_ports: raise ValueError("Invalid port number") - return self.local_ports[local_number].error + return self.local_ports[port_number].error def close(self): for port in self.local_ports.values(): port.socket.close() class _Port: - def __init__(self, ix, remote_number): - self.remote_number = remote_number + def __init__(self, ix, port_number): + # The remote port number + self.port_number = port_number + # The websocket channel byte number for this port self.channel = bytes([ix * 2]) + # A socket pair is created to provide a means of translating the data flow + # between the python application and the kubernetes websocket. The self.python + # half of the socket pair is used by the _proxy method to receive and send data + # to the running python application. s, self.python = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM) + # The self.socket half of the pair is used by the python application to send + # and receive data to the eventual pod port. It is wrapped in the _Socket class + # because a socket pair is an AF_UNIX socket, not a AF_NET socket. This allows + # intercepting setting AF_INET socket options that would error against an AD_UNIX + # socket. self.socket = self._Socket(s) + # Data accumulated from the websocket to be sent to the python application. self.data = b'' + # All data sent from kubernetes on the port error channel. self.error = None class _Socket: @@ -285,8 +303,7 @@ def setsockopt(self, level, optname, value): def _proxy(self): channel_ports = [] channel_initialized = [] - python_ports = {} - rlist = [] + local_ports = {} for port in self.local_ports.values(): # Setup the data channel for this port number channel_ports.append(port) @@ -294,33 +311,36 @@ def _proxy(self): # Setup the error channel for this port number channel_ports.append(port) channel_initialized.append(False) - python_ports[port.python] = port - rlist.append(port.python) - rlist.append(self.websocket.sock) + port.python.setblocking(True) + local_ports[port.python] = port + # The data to send on the websocket socket kubernetes_data = b'' while True: - wlist = [] + rlist = [] # List of sockets to read from + wlist = [] # List of sockets to write to + if self.websocket.connected: + rlist.append(self.websocket) + if kubernetes_data: + wlist.append(self.websocket) + all_closed = True for port in self.local_ports.values(): - if port.data: - wlist.append(port.python) - if kubernetes_data: - wlist.append(self.websocket.sock) + if port.python.fileno() != -1: + if port.data: + wlist.append(port.python) + all_closed = False + else: + if self.websocket.connected: + rlist.append(port.python) + all_closed = False + else: + port.python.close() + if all_closed and (not self.websocket.connected or not kubernetes_data): + self.websocket.close() + return r, w, _ = select.select(rlist, wlist, []) - for s in w: - if s == self.websocket.sock: - sent = self.websocket.sock.send(kubernetes_data) - kubernetes_data = kubernetes_data[sent:] - else: - port = python_ports[s] - sent = port.python.send(port.data) - port.data = port.data[sent:] - for s in r: - if s == self.websocket.sock: + for sock in r: + if sock == self.websocket: opcode, frame = self.websocket.recv_data_frame(True) - if opcode == ABNF.OPCODE_CLOSE: - for port in self.local_ports.values(): - port.python.close() - return if opcode == ABNF.OPCODE_BINARY: if not frame.data: raise RuntimeError("Unexpected frame data size") @@ -341,15 +361,15 @@ def _proxy(self): "Unexpected initial channel frame data size" ) port_number = frame.data[1] + (frame.data[2] * 256) - if port_number != port.remote_number: + if port_number != port.port_number: raise RuntimeError( "Unexpected port number in initial channel frame: " + str(port_number) ) channel_initialized[channel] = True - elif opcode not in (ABNF.OPCODE_PING, ABNF.OPCODE_PONG): + elif opcode not in (ABNF.OPCODE_PING, ABNF.OPCODE_PONG, ABNF.OPCODE_CLOSE): raise RuntimeError("Unexpected websocket opcode: " + str(opcode)) else: - port = python_ports[s] + port = local_ports[sock] data = port.python.recv(1024 * 1024) if data: kubernetes_data += ABNF.create_frame( @@ -357,11 +377,16 @@ def _proxy(self): ABNF.OPCODE_BINARY, ).format() else: - port.python.close() - rlist.remove(s) - if len(rlist) == 1: - self.websocket.close() - return + if not port.data: + port.python.close() + for sock in w: + if sock == self.websocket: + sent = self.websocket.sock.send(kubernetes_data) + kubernetes_data = kubernetes_data[sent:] + else: + port = local_ports[sock] + sent = port.python.send(port.data) + port.data = port.data[sent:] def get_websocket_url(http://github.com/url,%20query_params=None): @@ -451,38 +476,18 @@ def portforward_call(configuration, _method, url, **kwargs): query_params = kwargs.get("query_params") ports = [] - for ix in range(len(query_params)): - if query_params[ix][0] == 'ports': - remote_ports = [] - for port in query_params[ix][1].split(','): + for param, value in query_params: + if param == 'ports': + for port in value.split(','): try: - local_remote = port.split(':') - if len(local_remote) > 2: - raise ValueError - if len(local_remote) == 1: - local_remote[0] = int(local_remote[0]) - if not (0 < local_remote[0] < 65536): - raise ValueError - local_remote.append(local_remote[0]) - elif len(local_remote) == 2: - if local_remote[0]: - local_remote[0] = int(local_remote[0]) - if not (0 <= local_remote[0] < 65536): - raise ValueError - else: - local_remote[0] = 0 - local_remote[1] = int(local_remote[1]) - if not (0 < local_remote[1] < 65536): - raise ValueError - if not local_remote[0]: - local_remote[0] = len(ports) + 1 - else: - raise ValueError - ports.append(local_remote) - remote_ports.append(str(local_remote[1])) + port_number = int(port) except ValueError: - raise ApiValueError("Invalid port number `" + port + "`") - query_params[ix] = ('ports', ','.join(remote_ports)) + raise ApiValueError("Invalid port number: %s" % port) + if not (0 < port_number < 65536): + raise ApiValueError("Port number must be between 0 and 65536: %s" % port) + if port_number in ports: + raise ApiValueError("Duplicate port numbers: %s" % port) + ports.append(port_number) if not ports: raise ApiValueError("Missing required parameter `ports`") From ce3a1a298a1c4d38dfd1e0d228b2eafff2e647a4 Mon Sep 17 00:00:00 2001 From: "Patrick J. McNerthney" Date: Mon, 7 Sep 2020 11:56:01 -1000 Subject: [PATCH 27/90] Rework loop which collects the local python sockets for read and writing. --- stream/ws_client.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/stream/ws_client.py b/stream/ws_client.py index fafba79a..b8204599 100644 --- a/stream/ws_client.py +++ b/stream/ws_client.py @@ -322,19 +322,21 @@ def _proxy(self): rlist.append(self.websocket) if kubernetes_data: wlist.append(self.websocket) - all_closed = True + local_all_closed = True for port in self.local_ports.values(): if port.python.fileno() != -1: - if port.data: - wlist.append(port.python) - all_closed = False + if self.websocket.connected: + rlist.append(port.python) + if port.data: + wlist.append(port.python) + local_all_closed = False else: - if self.websocket.connected: - rlist.append(port.python) - all_closed = False + if port.data: + wlist.append(port.python) + local_all_closed = False else: port.python.close() - if all_closed and (not self.websocket.connected or not kubernetes_data): + if local_all_closed and not (self.websocket.connected and kubernetes_data): self.websocket.close() return r, w, _ = select.select(rlist, wlist, []) From 2e86b713341faaf3309d22f7494b3c68a6a6e04e Mon Sep 17 00:00:00 2001 From: "Patrick J. McNerthney" Date: Mon, 7 Sep 2020 13:06:44 -1000 Subject: [PATCH 28/90] Better handling of error channel reponse, and comment typo. --- stream/ws_client.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/stream/ws_client.py b/stream/ws_client.py index b8204599..0f8dc327 100644 --- a/stream/ws_client.py +++ b/stream/ws_client.py @@ -249,6 +249,10 @@ def __init__(self, websocket, ports): daemon=True ).start() + @property + def connected(self): + return self.websocket.connected + def socket(self, port_number): if port_number not in self.local_ports: raise ValueError("Invalid port number") @@ -276,8 +280,8 @@ def __init__(self, ix, port_number): s, self.python = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM) # The self.socket half of the pair is used by the python application to send # and receive data to the eventual pod port. It is wrapped in the _Socket class - # because a socket pair is an AF_UNIX socket, not a AF_NET socket. This allows - # intercepting setting AF_INET socket options that would error against an AD_UNIX + # because a socket pair is an AF_UNIX socket, not a AF_INET socket. This allows + # intercepting setting AF_INET socket options that would error against an AF_UNIX # socket. self.socket = self._Socket(s) # Data accumulated from the websocket to be sent to the python application. @@ -325,17 +329,17 @@ def _proxy(self): local_all_closed = True for port in self.local_ports.values(): if port.python.fileno() != -1: - if self.websocket.connected: - rlist.append(port.python) - if port.data: - wlist.append(port.python) - local_all_closed = False - else: + if port.error or not self.websocket.connected: if port.data: wlist.append(port.python) local_all_closed = False else: port.python.close() + else: + rlist.append(port.python) + if port.data: + wlist.append(port.python) + local_all_closed = False if local_all_closed and not (self.websocket.connected and kubernetes_data): self.websocket.close() return From 5d39d0d5f0e077ea9d19a0f7d94383bed36f7a27 Mon Sep 17 00:00:00 2001 From: "Patrick J. McNerthney" Date: Mon, 7 Sep 2020 19:38:54 -1000 Subject: [PATCH 29/90] Support both python 2.7 and 3.x. --- stream/ws_client.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/stream/ws_client.py b/stream/ws_client.py index 0f8dc327..356440c8 100644 --- a/stream/ws_client.py +++ b/stream/ws_client.py @@ -243,11 +243,12 @@ def __init__(self, websocket, ports): # There is a thread run per PortForward instance which performs the translation between the # raw socket data sent by the python application and the websocket protocol. This thread # terminates after either side has closed all ports, and after flushing all pending data. - threading.Thread( + proxy = threading.Thread( name="Kubernetes port forward proxy: %s" % ', '.join([str(port) for port in ports]), - target=self._proxy, - daemon=True - ).start() + target=self._proxy + ) + proxy.daemon = True + proxy.start() @property def connected(self): @@ -272,7 +273,7 @@ def __init__(self, ix, port_number): # The remote port number self.port_number = port_number # The websocket channel byte number for this port - self.channel = bytes([ix * 2]) + self.channel = six.int2byte(ix * 2) # A socket pair is created to provide a means of translating the data flow # between the python application and the kubernetes websocket. The self.python # half of the socket pair is used by the _proxy method to receive and send data @@ -350,9 +351,9 @@ def _proxy(self): if opcode == ABNF.OPCODE_BINARY: if not frame.data: raise RuntimeError("Unexpected frame data size") - channel = frame.data[0] + channel = six.byte2int(frame.data) if channel >= len(channel_ports): - raise RuntimeError("Unexpected channel number: " + str(channel)) + raise RuntimeError("Unexpected channel number: %s" % channel) port = channel_ports[channel] if channel_initialized[channel]: if channel % 2: @@ -366,14 +367,14 @@ def _proxy(self): raise RuntimeError( "Unexpected initial channel frame data size" ) - port_number = frame.data[1] + (frame.data[2] * 256) + port_number = six.byte2int(frame.data[1:2]) + (six.byte2int(frame.data[2:3]) * 256) if port_number != port.port_number: raise RuntimeError( - "Unexpected port number in initial channel frame: " + str(port_number) + "Unexpected port number in initial channel frame: %s" % port_number ) channel_initialized[channel] = True elif opcode not in (ABNF.OPCODE_PING, ABNF.OPCODE_PONG, ABNF.OPCODE_CLOSE): - raise RuntimeError("Unexpected websocket opcode: " + str(opcode)) + raise RuntimeError("Unexpected websocket opcode: %s" % opcode) else: port = local_ports[sock] data = port.python.recv(1024 * 1024) @@ -383,8 +384,7 @@ def _proxy(self): ABNF.OPCODE_BINARY, ).format() else: - if not port.data: - port.python.close() + port.python.close() for sock in w: if sock == self.websocket: sent = self.websocket.sock.send(kubernetes_data) From bfa968140cb6e7554ecb87e034f519ed2724ba8d Mon Sep 17 00:00:00 2001 From: Dylan Shepard Date: Wed, 9 Sep 2020 07:03:45 -0700 Subject: [PATCH 30/90] supporting 2.7, reading works fine, writing reqs unicode --- config/kube_config_test.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/config/kube_config_test.py b/config/kube_config_test.py index 1f74d345..b2b90ce9 100644 --- a/config/kube_config_test.py +++ b/config/kube_config_test.py @@ -1262,7 +1262,12 @@ def test_load_kube_config_from_file_like_object(self): expected = FakeConfig(host=TEST_HOST, token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64) config_file_like_object = io.StringIO() - config_file_like_object.write(yaml.safe_dump(self.TEST_KUBE_CONFIG)) + #py3 (won't have unicode) vs py2 (requires it) + try: + unicode('') + config_file_like_object.write(unicode(yaml.safe_dump(self.TEST_KUBE_CONFIG)), errors='replace') + except NameError: + config_file_like_object.write(yaml.safe_dump(self.TEST_KUBE_CONFIG)) actual = FakeConfig() load_kube_config(config_file=config_file_like_object, context="simple_token", client_configuration=actual) From 49cbb1de99ec4bd3213a1f66c8fcd00a55ff761f Mon Sep 17 00:00:00 2001 From: Dylan Shepard Date: Wed, 9 Sep 2020 07:07:13 -0700 Subject: [PATCH 31/90] replace inside the parens --- config/kube_config_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/kube_config_test.py b/config/kube_config_test.py index b2b90ce9..8fcfcc5d 100644 --- a/config/kube_config_test.py +++ b/config/kube_config_test.py @@ -1265,7 +1265,7 @@ def test_load_kube_config_from_file_like_object(self): #py3 (won't have unicode) vs py2 (requires it) try: unicode('') - config_file_like_object.write(unicode(yaml.safe_dump(self.TEST_KUBE_CONFIG)), errors='replace') + config_file_like_object.write(unicode(yaml.safe_dump(self.TEST_KUBE_CONFIG), errors='replace')) except NameError: config_file_like_object.write(yaml.safe_dump(self.TEST_KUBE_CONFIG)) actual = FakeConfig() From 9f4775f43f8d0d205941a3ae6e1f885d517410aa Mon Sep 17 00:00:00 2001 From: Dylan Shepard Date: Wed, 9 Sep 2020 07:22:04 -0700 Subject: [PATCH 32/90] trying to fix pycodestyle problems --- config/kube_config_test.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/config/kube_config_test.py b/config/kube_config_test.py index 8fcfcc5d..f12a0b3e 100644 --- a/config/kube_config_test.py +++ b/config/kube_config_test.py @@ -1262,15 +1262,23 @@ def test_load_kube_config_from_file_like_object(self): expected = FakeConfig(host=TEST_HOST, token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64) config_file_like_object = io.StringIO() - #py3 (won't have unicode) vs py2 (requires it) + # py3 (won't have unicode) vs py2 (requires it) try: unicode('') - config_file_like_object.write(unicode(yaml.safe_dump(self.TEST_KUBE_CONFIG), errors='replace')) + config_file_like_object.write( + unicode( + yaml.safe_dump( + self.TEST_KUBE_CONFIG), + errors='replace')) except NameError: - config_file_like_object.write(yaml.safe_dump(self.TEST_KUBE_CONFIG)) + config_file_like_object.write( + yaml.safe_dump( + self.TEST_KUBE_CONFIG)) actual = FakeConfig() - load_kube_config(config_file=config_file_like_object, context="simple_token", - client_configuration=actual) + load_kube_config( + config_file=config_file_like_object, + context="simple_token", + client_configuration=actual) self.assertEqual(expected, actual) def test_load_kube_config_from_dict(self): @@ -1675,7 +1683,6 @@ def test_new_client_from_config(self): self.assertEqual(BEARER_TOKEN_FORMAT % TEST_DATA_BASE64, client.configuration.api_key['authorization']) - def test_save_changes(self): kubeconfigs = self._create_multi_config() From 0559445cb4a61548b34c68698e37219d837033c9 Mon Sep 17 00:00:00 2001 From: Dylan Shepard Date: Wed, 9 Sep 2020 07:28:51 -0700 Subject: [PATCH 33/90] unused io import, pre-setting pycodestyle --- config/kube_config.py | 1 - config/kube_config_test.py | 12 ++++++------ 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/config/kube_config.py b/config/kube_config.py index 14fd71a6..0ed5a71c 100644 --- a/config/kube_config.py +++ b/config/kube_config.py @@ -19,7 +19,6 @@ import json import logging import os -import io import platform import subprocess import tempfile diff --git a/config/kube_config_test.py b/config/kube_config_test.py index f12a0b3e..de1dcc1b 100644 --- a/config/kube_config_test.py +++ b/config/kube_config_test.py @@ -14,9 +14,9 @@ import base64 import datetime +import io import json import os -import io import shutil import tempfile import unittest @@ -1272,13 +1272,13 @@ def test_load_kube_config_from_file_like_object(self): errors='replace')) except NameError: config_file_like_object.write( - yaml.safe_dump( - self.TEST_KUBE_CONFIG)) + yaml.safe_dump( + self.TEST_KUBE_CONFIG)) actual = FakeConfig() load_kube_config( - config_file=config_file_like_object, - context="simple_token", - client_configuration=actual) + config_file=config_file_like_object, + context="simple_token", + client_configuration=actual) self.assertEqual(expected, actual) def test_load_kube_config_from_dict(self): From acdd0588f7fa482a61e41b58c1aa9978069f4d75 Mon Sep 17 00:00:00 2001 From: Haowei Cai Date: Wed, 14 Oct 2020 14:34:17 -0700 Subject: [PATCH 34/90] restore discovery client exception handling --- dynamic/discovery.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/dynamic/discovery.py b/dynamic/discovery.py index 24d48d81..d2f801f2 100644 --- a/dynamic/discovery.py +++ b/dynamic/discovery.py @@ -254,8 +254,11 @@ def __search(self, parts, resources, reqParams): # Check if we've requested resources for this group if not resourcePart.resources: prefix, group, version = reqParams[0], reqParams[1], part - resourcePart.resources = self.get_resources_for_api_version( - prefix, group, part, resourcePart.preferred) + try: + resourcePart.resources = self.get_resources_for_api_version( + prefix, group, part, resourcePart.preferred) + except NotFoundError: + raise ResourceNotFoundError self._cache['resources'][prefix][group][version] = resourcePart self.__update_cache = True From 3412151aa96738a1860e3144c2d2a0e87d8e9a63 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?G=C3=A1bor=20Lipt=C3=A1k?= Date: Mon, 23 Nov 2020 14:09:08 -0500 Subject: [PATCH 35/90] Add Python 3.9 to build MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Gábor Lipták --- .travis.yml | 4 ++++ tox.ini | 4 +++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index ddff691a..b44ec90a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -44,3 +44,7 @@ jobs: env: TOXENV=py38 - python: 3.8 env: TOXENV=py38-functional + - python: 3.9 + env: TOXENV=py39 + - python: 3.9 + env: TOXENV=py39-functional diff --git a/tox.ini b/tox.ini index 2d92c46e..71c4d2d8 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,8 @@ [tox] skipsdist = True -envlist = py27, py35, py36, py37 +envlist = + py27, py3{5,6,7,8,9} + py27-functional, py3{5,6,7,8,9}-functional [testenv] passenv = TOXENV CI TRAVIS TRAVIS_* From 7199c14a8d12c9aa623a1df2de6bef6c9f6d800a Mon Sep 17 00:00:00 2001 From: Moshe Shitrit Date: Fri, 25 Dec 2020 12:11:42 -0500 Subject: [PATCH 36/90] Change KUBE_CONFIG_DEFAULT_LOCATION to use pathlib.Path.home instead of hard-coded "~". This is a more "Pythonic" way of setting that value. --- config/kube_config.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/config/kube_config.py b/config/kube_config.py index 0ed5a71c..5c862287 100644 --- a/config/kube_config.py +++ b/config/kube_config.py @@ -19,6 +19,7 @@ import json import logging import os +import pathlib import platform import subprocess import tempfile @@ -45,7 +46,7 @@ pass EXPIRY_SKEW_PREVENTION_DELAY = datetime.timedelta(minutes=5) -KUBE_CONFIG_DEFAULT_LOCATION = os.environ.get('KUBECONFIG', '~/.kube/config') +KUBE_CONFIG_DEFAULT_LOCATION = os.environ.get('KUBECONFIG', f'{pathlib.Path.home()}/.kube/config') ENV_KUBECONFIG_PATH_SEPARATOR = ';' if platform.system() == 'Windows' else ':' _temp_files = {} From 0c662bb33dfb49236ca4c68b81d426d8948da224 Mon Sep 17 00:00:00 2001 From: Moshe Shitrit Date: Fri, 25 Dec 2020 12:22:38 -0500 Subject: [PATCH 37/90] Adding load_config wrapper method to have a more generic way of initializing the client config --- config/__init__.py | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/config/__init__.py b/config/__init__.py index b57bf185..d9d7f4bb 100644 --- a/config/__init__.py +++ b/config/__init__.py @@ -12,7 +12,22 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os from .config_exception import ConfigException from .incluster_config import load_incluster_config from .kube_config import (list_kube_config_contexts, load_kube_config, - load_kube_config_from_dict, new_client_from_config) + load_kube_config_from_dict, new_client_from_config, KUBE_CONFIG_DEFAULT_LOCATION) + + +def load_config(**kwargs): + """ + Wrapper function to load the kube_config. + It will initially try to load_kube_config from provided path, then check if the KUBE_CONFIG_DEFAULT_LOCATION exists + If neither exists- it will fall back to load_incluster_config and inform the user accordingly. + """ + if "kube_config_path" in kwargs.keys() or os.path.exists(KUBE_CONFIG_DEFAULT_LOCATION): + load_kube_config(**kwargs) + else: + print(f"kube_config_path not provided and default location ({KUBE_CONFIG_DEFAULT_LOCATION}) does not exist. " + "Using inCluster Config. This might not work.") + load_incluster_config(**kwargs) From 10db259908b025cfdcbba28c455de9bac54e16aa Mon Sep 17 00:00:00 2001 From: Moshe Shitrit Date: Fri, 25 Dec 2020 12:59:27 -0500 Subject: [PATCH 38/90] Document kwargs param --- config/__init__.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/config/__init__.py b/config/__init__.py index d9d7f4bb..1ff2dec2 100644 --- a/config/__init__.py +++ b/config/__init__.py @@ -24,6 +24,9 @@ def load_config(**kwargs): Wrapper function to load the kube_config. It will initially try to load_kube_config from provided path, then check if the KUBE_CONFIG_DEFAULT_LOCATION exists If neither exists- it will fall back to load_incluster_config and inform the user accordingly. + + :param kwargs: A combination of all possible kwargs that can be passed to either load_kube_config or + load_incluster_config functions. """ if "kube_config_path" in kwargs.keys() or os.path.exists(KUBE_CONFIG_DEFAULT_LOCATION): load_kube_config(**kwargs) From 3f05359afce73f3f7bc760d2d718180109bc705a Mon Sep 17 00:00:00 2001 From: Haowei Cai Date: Mon, 4 Jan 2021 17:47:34 -0800 Subject: [PATCH 39/90] configmap-e2e: use labels --- dynamic/test_client.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/dynamic/test_client.py b/dynamic/test_client.py index 11546798..b68e081f 100644 --- a/dynamic/test_client.py +++ b/dynamic/test_client.py @@ -331,6 +331,9 @@ def test_configmap_apis(self): "apiVersion": "v1", "metadata": { "name": name, + "labels": { + "e2e-test": "true", + }, }, "data": { "config.json": "{\"command\":\"/usr/bin/mysqld_safe\"}", @@ -344,7 +347,7 @@ def test_configmap_apis(self): self.assertEqual(name, resp.metadata.name) resp = api.get( - name=name, namespace='default') + name=name, namespace='default', label_selector="e2e-test=true") self.assertEqual(name, resp.metadata.name) test_configmap['data']['config.json'] = "{}" @@ -354,7 +357,7 @@ def test_configmap_apis(self): resp = api.delete( name=name, body={}, namespace='default') - resp = api.get(namespace='default', pretty=True) + resp = api.get(namespace='default', pretty=True, label_selector="e2e-test=true") self.assertEqual([], resp.items) def test_node_apis(self): From 2c9ddf94b6614c9f16a234de0ce69e01270466c6 Mon Sep 17 00:00:00 2001 From: Moshe Shitrit Date: Mon, 4 Jan 2021 23:58:25 -0500 Subject: [PATCH 40/90] Revert switch to pathlib, to maintain Python2 support --- config/kube_config.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/config/kube_config.py b/config/kube_config.py index 5c862287..0ed5a71c 100644 --- a/config/kube_config.py +++ b/config/kube_config.py @@ -19,7 +19,6 @@ import json import logging import os -import pathlib import platform import subprocess import tempfile @@ -46,7 +45,7 @@ pass EXPIRY_SKEW_PREVENTION_DELAY = datetime.timedelta(minutes=5) -KUBE_CONFIG_DEFAULT_LOCATION = os.environ.get('KUBECONFIG', f'{pathlib.Path.home()}/.kube/config') +KUBE_CONFIG_DEFAULT_LOCATION = os.environ.get('KUBECONFIG', '~/.kube/config') ENV_KUBECONFIG_PATH_SEPARATOR = ';' if platform.system() == 'Windows' else ':' _temp_files = {} From 4d29af161b3d0e8c531c5829da98ef3ee4f03eb1 Mon Sep 17 00:00:00 2001 From: Sumant Date: Fri, 31 Jul 2020 19:09:24 -0400 Subject: [PATCH 41/90] Leader Election issue #434 changed file naming style consistent with the existing go client code Update example.py Changed file and folder names Rename LeaderElection.py to leaderelection.py Rename threadingWithException.py to threadingwithexception.py Rename ConfigMapLock.py to configmaplock.py LeaderElection to leaderelection Added boiler plate headers, updated variable and function names consistent with the guidelines, removed the ctypes dependency by using traces to kill threads, changed logic for leader now it gives up and doesn't re-join as a follower if it fails to update lease added correct boiler plate year Rename threadingWithTrace.py to threadingwithtrace.py Update leaderelection.py Update example.py Changes based on review - logging, OnStoppedLeading is not killed abruptly, OnStartedLeading is not run in a separate thread, adding README Update example.py updated comments set threads as daemon Update README.md Code made consistent with other clients. Update example.py Update leaderelection.py Error & exception handling for the annotation, reduced indentation Adding serializing functions for serializing & de-serializing locks, leader_election_record as a class Adding a test Adding boilerplate header Rename leaderelectiontest.py to leaderelection_test.py Updated boiler plates handling imports for pytest handling 'HTTP not found' compatibility with python 2 & 3, & handling relative imports Update leaderelection.py to check tests for tox assertEquals -> assertEqual Update leaderelection_test.py making Threading compatible for Python 2 changing datetime.timestamp for backward compatibility with Python 2.7 Adding comments for test_Leader_election_with_renew_deadline & making candidates run in parallel for test_leader_election remove redundant daemon = True reassignment common thread lock for MockResourceLock --- leaderelection/README.md | 18 ++ leaderelection/__init__.py | 13 + leaderelection/electionconfig.py | 59 ++++ leaderelection/example.py | 54 ++++ leaderelection/leaderelection.py | 191 +++++++++++++ leaderelection/leaderelection_test.py | 270 +++++++++++++++++++ leaderelection/leaderelectionrecord.py | 22 ++ leaderelection/resourcelock/__init__.py | 13 + leaderelection/resourcelock/configmaplock.py | 129 +++++++++ 9 files changed, 769 insertions(+) create mode 100644 leaderelection/README.md create mode 100644 leaderelection/__init__.py create mode 100644 leaderelection/electionconfig.py create mode 100644 leaderelection/example.py create mode 100644 leaderelection/leaderelection.py create mode 100644 leaderelection/leaderelection_test.py create mode 100644 leaderelection/leaderelectionrecord.py create mode 100644 leaderelection/resourcelock/__init__.py create mode 100644 leaderelection/resourcelock/configmaplock.py diff --git a/leaderelection/README.md b/leaderelection/README.md new file mode 100644 index 00000000..41ed1c48 --- /dev/null +++ b/leaderelection/README.md @@ -0,0 +1,18 @@ +## Leader Election Example +This example demonstrates how to use the leader election library. + +## Running +Run the following command in multiple separate terminals preferably an odd number. +Each running process uses a unique identifier displayed when it starts to run. + +- When a program runs, if a lock object already exists with the specified name, +all candidates will start as followers. +- If a lock object does not exist with the specified name then whichever candidate +creates a lock object first will become the leader and the rest will be followers. +- The user will be prompted about the status of the candidates and transitions. + +### Command to run +```python example.py``` + +Now kill the existing leader. You will see from the terminal outputs that one of the + remaining running processes will be elected as the new leader. diff --git a/leaderelection/__init__.py b/leaderelection/__init__.py new file mode 100644 index 00000000..37da225c --- /dev/null +++ b/leaderelection/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2021 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/leaderelection/electionconfig.py b/leaderelection/electionconfig.py new file mode 100644 index 00000000..7b0db639 --- /dev/null +++ b/leaderelection/electionconfig.py @@ -0,0 +1,59 @@ +# Copyright 2021 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import logging +logging.basicConfig(level=logging.INFO) + + +class Config: + # Validate config, exit if an error is detected + def __init__(self, lock, lease_duration, renew_deadline, retry_period, onstarted_leading, onstopped_leading): + self.jitter_factor = 1.2 + + if lock is None: + sys.exit("lock cannot be None") + self.lock = lock + + if lease_duration <= renew_deadline: + sys.exit("lease_duration must be greater than renew_deadline") + + if renew_deadline <= self.jitter_factor * retry_period: + sys.exit("renewDeadline must be greater than retry_period*jitter_factor") + + if lease_duration < 1: + sys.exit("lease_duration must be greater than one") + + if renew_deadline < 1: + sys.exit("renew_deadline must be greater than one") + + if retry_period < 1: + sys.exit("retry_period must be greater than one") + + self.lease_duration = lease_duration + self.renew_deadline = renew_deadline + self.retry_period = retry_period + + if onstarted_leading is None: + sys.exit("callback onstarted_leading cannot be None") + self.onstarted_leading = onstarted_leading + + if onstopped_leading is None: + self.onstopped_leading = self.on_stoppedleading_callback + else: + self.onstopped_leading = onstopped_leading + + # Default callback for when the current candidate if a leader, stops leading + def on_stoppedleading_callback(self): + logging.info("stopped leading".format(self.lock.identity)) diff --git a/leaderelection/example.py b/leaderelection/example.py new file mode 100644 index 00000000..b8d8e616 --- /dev/null +++ b/leaderelection/example.py @@ -0,0 +1,54 @@ +# Copyright 2021 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid +from kubernetes import client, config +from leaderelection import leaderelection +from leaderelection.resourcelock.configmaplock import ConfigMapLock +from leaderelection import electionconfig + + +# Authenticate using config file +config.load_kube_config(config_file=r"") + +# Parameters required from the user + +# A unique identifier for this candidate +candidate_id = uuid.uuid4() + +# Name of the lock object to be created +lock_name = "examplepython" + +# Kubernetes namespace +lock_namespace = "default" + + +# The function that a user wants to run once a candidate is elected as a leader +def example_func(): + print("I am leader") + + +# A user can choose not to provide any callbacks for what to do when a candidate fails to lead - onStoppedLeading() +# In that case, a default callback function will be used + +# Create config +config = electionconfig.Config(ConfigMapLock(lock_name, lock_namespace, candidate_id), lease_duration=17, + renew_deadline=15, retry_period=5, onstarted_leading=example_func, + onstopped_leading=None) + +# Enter leader election +leaderelection.LeaderElection(config).run() + +# User can choose to do another round of election or simply exit +print("Exited leader election") diff --git a/leaderelection/leaderelection.py b/leaderelection/leaderelection.py new file mode 100644 index 00000000..a707fbac --- /dev/null +++ b/leaderelection/leaderelection.py @@ -0,0 +1,191 @@ +# Copyright 2021 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import sys +import time +import json +import threading +from .leaderelectionrecord import LeaderElectionRecord +import logging +# if condition to be removed when support for python2 will be removed +if sys.version_info > (3, 0): + from http import HTTPStatus +else: + import httplib +logging.basicConfig(level=logging.INFO) + +""" +This package implements leader election using an annotation in a Kubernetes object. +The onstarted_leading function is run in a thread and when it returns, if it does +it might not be safe to run it again in a process. + +At first all candidates are considered followers. The one to create a lock or update +an existing lock first becomes the leader and remains so until it keeps renewing its +lease. +""" + + +class LeaderElection: + def __init__(self, election_config): + if election_config is None: + sys.exit("argument config not passed") + + # Latest record observed in the created lock object + self.observed_record = None + + # The configuration set for this candidate + self.election_config = election_config + + # Latest update time of the lock + self.observed_time_milliseconds = 0 + + # Point of entry to Leader election + def run(self): + # Try to create/ acquire a lock + if self.acquire(): + logging.info("{} successfully acquired lease".format(self.election_config.lock.identity)) + + # Start leading and call OnStartedLeading() + threading.daemon = True + threading.Thread(target=self.election_config.onstarted_leading).start() + + self.renew_loop() + + # Failed to update lease, run OnStoppedLeading callback + self.election_config.onstopped_leading() + + def acquire(self): + # Follower + logging.info("{} is a follower".format(self.election_config.lock.identity)) + retry_period = self.election_config.retry_period + + while True: + succeeded = self.try_acquire_or_renew() + + if succeeded: + return True + + time.sleep(retry_period) + + def renew_loop(self): + # Leader + logging.info("Leader has entered renew loop and will try to update lease continuously") + + retry_period = self.election_config.retry_period + renew_deadline = self.election_config.renew_deadline * 1000 + + while True: + timeout = int(time.time() * 1000) + renew_deadline + succeeded = False + + while int(time.time() * 1000) < timeout: + succeeded = self.try_acquire_or_renew() + + if succeeded: + break + time.sleep(retry_period) + + if succeeded: + time.sleep(retry_period) + continue + + # failed to renew, return + return + + def try_acquire_or_renew(self): + now_timestamp = time.time() + now = datetime.datetime.fromtimestamp(now_timestamp) + + # Check if lock is created + lock_status, old_election_record = self.election_config.lock.get(self.election_config.lock.name, + self.election_config.lock.namespace) + + # create a default Election record for this candidate + leader_election_record = LeaderElectionRecord(self.election_config.lock.identity, + str(self.election_config.lease_duration), str(now), str(now)) + + # A lock is not created with that name, try to create one + if not lock_status: + # To be removed when support for python2 will be removed + if sys.version_info > (3, 0): + if json.loads(old_election_record.body)['code'] != HTTPStatus.NOT_FOUND: + logging.info("Error retrieving resource lock {} as {}".format(self.election_config.lock.name, + old_election_record.reason)) + return False + else: + if json.loads(old_election_record.body)['code'] != httplib.NOT_FOUND: + logging.info("Error retrieving resource lock {} as {}".format(self.election_config.lock.name, + old_election_record.reason)) + return False + + logging.info("{} is trying to create a lock".format(leader_election_record.holder_identity)) + create_status = self.election_config.lock.create(name=self.election_config.lock.name, + namespace=self.election_config.lock.namespace, + election_record=leader_election_record) + + if create_status is False: + logging.info("{} Failed to create lock".format(leader_election_record.holder_identity)) + return False + + self.observed_record = leader_election_record + self.observed_time_milliseconds = int(time.time() * 1000) + return True + + # A lock exists with that name + # Validate old_election_record + if old_election_record is None: + # try to update lock with proper annotation and election record + return self.update_lock(leader_election_record) + + if (old_election_record.holder_identity is None or old_election_record.lease_duration is None + or old_election_record.acquire_time is None or old_election_record.renew_time is None): + # try to update lock with proper annotation and election record + return self.update_lock(leader_election_record) + + # Report transitions + if self.observed_record and self.observed_record.holder_identity != old_election_record.holder_identity: + logging.info("Leader has switched to {}".format(old_election_record.holder_identity)) + + if self.observed_record is None or old_election_record.__dict__ != self.observed_record.__dict__: + self.observed_record = old_election_record + self.observed_time_milliseconds = int(time.time() * 1000) + + # If This candidate is not the leader and lease duration is yet to finish + if (self.election_config.lock.identity != self.observed_record.holder_identity + and self.observed_time_milliseconds + self.election_config.lease_duration * 1000 > int(now_timestamp * 1000)): + logging.info("yet to finish lease_duration, lease held by {} and has not expired".format(old_election_record.holder_identity)) + return False + + # If this candidate is the Leader + if self.election_config.lock.identity == self.observed_record.holder_identity: + # Leader updates renewTime, but keeps acquire_time unchanged + leader_election_record.acquire_time = self.observed_record.acquire_time + + return self.update_lock(leader_election_record) + + def update_lock(self, leader_election_record): + # Update object with latest election record + update_status = self.election_config.lock.update(self.election_config.lock.name, + self.election_config.lock.namespace, + leader_election_record) + + if update_status is False: + logging.info("{} failed to acquire lease".format(leader_election_record.holder_identity)) + return False + + self.observed_record = leader_election_record + self.observed_time_milliseconds = int(time.time() * 1000) + logging.info("leader {} has successfully acquired lease".format(leader_election_record.holder_identity)) + return True diff --git a/leaderelection/leaderelection_test.py b/leaderelection/leaderelection_test.py new file mode 100644 index 00000000..9fb6d9bc --- /dev/null +++ b/leaderelection/leaderelection_test.py @@ -0,0 +1,270 @@ +# Copyright 2021 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from . import leaderelection +from .leaderelectionrecord import LeaderElectionRecord +from kubernetes.client.rest import ApiException +from . import electionconfig +import unittest +import threading +import json +import time +import pytest + +thread_lock = threading.RLock() + +class LeaderElectionTest(unittest.TestCase): + def test_simple_leader_election(self): + election_history = [] + leadership_history = [] + + def on_create(): + election_history.append("create record") + leadership_history.append("get leadership") + + def on_update(): + election_history.append("update record") + + def on_change(): + election_history.append("change record") + + mock_lock = MockResourceLock("mock", "mock_namespace", "mock", thread_lock, on_create, on_update, on_change, None) + + def on_started_leading(): + leadership_history.append("start leading") + + def on_stopped_leading(): + leadership_history.append("stop leading") + + # Create config 4.5 4 3 + config = electionconfig.Config(lock=mock_lock, lease_duration=2.5, + renew_deadline=2, retry_period=1.5, onstarted_leading=on_started_leading, + onstopped_leading=on_stopped_leading) + + # Enter leader election + leaderelection.LeaderElection(config).run() + + self.assert_history(election_history, ["create record", "update record", "update record", "update record"]) + self.assert_history(leadership_history, ["get leadership", "start leading", "stop leading"]) + + def test_leader_election(self): + election_history = [] + leadership_history = [] + + def on_create_A(): + election_history.append("A creates record") + leadership_history.append("A gets leadership") + + def on_update_A(): + election_history.append("A updates record") + + def on_change_A(): + election_history.append("A gets leadership") + + mock_lock_A = MockResourceLock("mock", "mock_namespace", "MockA", thread_lock, on_create_A, on_update_A, on_change_A, None) + mock_lock_A.renew_count_max = 3 + + def on_started_leading_A(): + leadership_history.append("A starts leading") + + def on_stopped_leading_A(): + leadership_history.append("A stops leading") + + config_A = electionconfig.Config(lock=mock_lock_A, lease_duration=2.5, + renew_deadline=2, retry_period=1.5, onstarted_leading=on_started_leading_A, + onstopped_leading=on_stopped_leading_A) + + def on_create_B(): + election_history.append("B creates record") + leadership_history.append("B gets leadership") + + def on_update_B(): + election_history.append("B updates record") + + def on_change_B(): + leadership_history.append("B gets leadership") + + mock_lock_B = MockResourceLock("mock", "mock_namespace", "MockB", thread_lock, on_create_B, on_update_B, on_change_B, None) + mock_lock_B.renew_count_max = 4 + + def on_started_leading_B(): + leadership_history.append("B starts leading") + + def on_stopped_leading_B(): + leadership_history.append("B stops leading") + + config_B = electionconfig.Config(lock=mock_lock_B, lease_duration=2.5, + renew_deadline=2, retry_period=1.5, onstarted_leading=on_started_leading_B, + onstopped_leading=on_stopped_leading_B) + + mock_lock_B.leader_record = mock_lock_A.leader_record + + threading.daemon = True + # Enter leader election for A + threading.Thread(target=leaderelection.LeaderElection(config_A).run()).start() + + # Enter leader election for B + threading.Thread(target=leaderelection.LeaderElection(config_B).run()).start() + + time.sleep(5) + + self.assert_history(election_history, + ["A creates record", + "A updates record", + "A updates record", + "B updates record", + "B updates record", + "B updates record", + "B updates record"]) + self.assert_history(leadership_history, + ["A gets leadership", + "A starts leading", + "A stops leading", + "B gets leadership", + "B starts leading", + "B stops leading"]) + + + """Expected behavior: to check if the leader stops leading if it fails to update the lock within the renew_deadline + and stops leading after finally timing out. The difference between each try comes out to be approximately the sleep + time. + Example: + create record: 0s + on try update: 1.5s + on update: zzz s + on try update: 3s + on update: zzz s + on try update: 4.5s + on try update: 6s + Timeout - Leader Exits""" + def test_Leader_election_with_renew_deadline(self): + election_history = [] + leadership_history = [] + + def on_create(): + election_history.append("create record") + leadership_history.append("get leadership") + + def on_update(): + election_history.append("update record") + + def on_change(): + election_history.append("change record") + + def on_try_update(): + election_history.append("try update record") + + mock_lock = MockResourceLock("mock", "mock_namespace", "mock", thread_lock, on_create, on_update, on_change, on_try_update) + mock_lock.renew_count_max = 3 + + def on_started_leading(): + leadership_history.append("start leading") + + def on_stopped_leading(): + leadership_history.append("stop leading") + + # Create config + config = electionconfig.Config(lock=mock_lock, lease_duration=2.5, + renew_deadline=2, retry_period=1.5, onstarted_leading=on_started_leading, + onstopped_leading=on_stopped_leading) + + # Enter leader election + leaderelection.LeaderElection(config).run() + + self.assert_history(election_history, + ["create record", + "try update record", + "update record", + "try update record", + "update record", + "try update record", + "try update record"]) + + self.assert_history(leadership_history, ["get leadership", "start leading", "stop leading"]) + + def assert_history(self, history, expected): + self.assertIsNotNone(expected) + self.assertIsNotNone(history) + self.assertEqual(len(expected), len(history)) + + for idx in range(len(history)): + self.assertEqual(history[idx], expected[idx], + msg="Not equal at index {}, expected {}, got {}".format(idx, expected[idx], + history[idx])) + + +class MockResourceLock: + def __init__(self, name, namespace, identity, shared_lock, on_create=None, on_update=None, on_change=None, on_try_update=None): + # self.leader_record is shared between two MockResourceLock objects + self.leader_record = [] + self.renew_count = 0 + self.renew_count_max = 4 + self.name = name + self.namespace = namespace + self.identity = str(identity) + self.lock = shared_lock + + self.on_create = on_create + self.on_update = on_update + self.on_change = on_change + self.on_try_update = on_try_update + + def get(self, name, namespace): + self.lock.acquire() + try: + if self.leader_record: + return True, self.leader_record[0] + + ApiException.body = json.dumps({'code': 404}) + return False, ApiException + finally: + self.lock.release() + + def create(self, name, namespace, election_record): + self.lock.acquire() + try: + if len(self.leader_record) == 1: + return False + self.leader_record.append(election_record) + self.on_create() + self.renew_count += 1 + return True + finally: + self.lock.release() + + def update(self, name, namespace, updated_record): + self.lock.acquire() + try: + if self.on_try_update: + self.on_try_update() + if self.renew_count >= self.renew_count_max: + return False + + old_record = self.leader_record[0] + self.leader_record[0] = updated_record + + self.on_update() + + if old_record.holder_identity != updated_record.holder_identity: + self.on_change() + + self.renew_count += 1 + return True + finally: + self.lock.release() + + +if __name__ == '__main__': + unittest.main() diff --git a/leaderelection/leaderelectionrecord.py b/leaderelection/leaderelectionrecord.py new file mode 100644 index 00000000..ebb550d4 --- /dev/null +++ b/leaderelection/leaderelectionrecord.py @@ -0,0 +1,22 @@ +# Copyright 2021 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class LeaderElectionRecord: + # Annotation used in the lock object + def __init__(self, holder_identity, lease_duration, acquire_time, renew_time): + self.holder_identity = holder_identity + self.lease_duration = lease_duration + self.acquire_time = acquire_time + self.renew_time = renew_time diff --git a/leaderelection/resourcelock/__init__.py b/leaderelection/resourcelock/__init__.py new file mode 100644 index 00000000..37da225c --- /dev/null +++ b/leaderelection/resourcelock/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2021 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/leaderelection/resourcelock/configmaplock.py b/leaderelection/resourcelock/configmaplock.py new file mode 100644 index 00000000..8d155e29 --- /dev/null +++ b/leaderelection/resourcelock/configmaplock.py @@ -0,0 +1,129 @@ +# Copyright 2021 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from kubernetes.client.rest import ApiException +from kubernetes import client, config +from kubernetes.client.api_client import ApiClient +from leaderelection.leaderelectionrecord import LeaderElectionRecord +import json +import logging +logging.basicConfig(level=logging.INFO) + + +class ConfigMapLock: + def __init__(self, name, namespace, identity): + """ + :param name: name of the lock + :param namespace: namespace + :param identity: A unique identifier that the candidate is using + """ + self.api_instance = client.CoreV1Api() + self.leader_electionrecord_annotationkey = 'control-plane.alpha.kubernetes.io/leader' + self.name = name + self.namespace = namespace + self.identity = str(identity) + self.configmap_reference = None + self.lock_record = { + 'holderIdentity': None, + 'leaseDurationSeconds': None, + 'acquireTime': None, + 'renewTime': None + } + + # get returns the election record from a ConfigMap Annotation + def get(self, name, namespace): + """ + :param name: Name of the configmap object information to get + :param namespace: Namespace in which the configmap object is to be searched + :return: 'True, election record' if object found else 'False, exception response' + """ + try: + api_response = self.api_instance.read_namespaced_config_map(name, namespace) + + # If an annotation does not exist - add the leader_electionrecord_annotationkey + annotations = api_response.metadata.annotations + if annotations is None or annotations == '': + api_response.metadata.annotations = {self.leader_electionrecord_annotationkey: ''} + self.configmap_reference = api_response + return True, None + + # If an annotation exists but, the leader_electionrecord_annotationkey does not then add it as a key + if not annotations.get(self.leader_electionrecord_annotationkey): + api_response.metadata.annotations = {self.leader_electionrecord_annotationkey: ''} + self.configmap_reference = api_response + return True, None + + lock_record = self.get_lock_object(json.loads(annotations[self.leader_electionrecord_annotationkey])) + + self.configmap_reference = api_response + return True, lock_record + except ApiException as e: + return False, e + + def create(self, name, namespace, election_record): + """ + :param electionRecord: Annotation string + :param name: Name of the configmap object to be created + :param namespace: Namespace in which the configmap object is to be created + :return: 'True' if object is created else 'False' if failed + """ + body = client.V1ConfigMap( + metadata={"name": name, + "annotations": {self.leader_electionrecord_annotationkey: json.dumps(self.get_lock_dict(election_record))}}) + + try: + api_response = self.api_instance.create_namespaced_config_map(namespace, body, pretty=True) + return True + except ApiException as e: + logging.info("Failed to create lock as {}".format(e)) + return False + + def update(self, name, namespace, updated_record): + """ + :param name: name of the lock to be updated + :param namespace: namespace the lock is in + :param updated_record: the updated election record + :return: True if update is succesful False if it fails + """ + try: + # Set the updated record + self.configmap_reference.metadata.annotations[self.leader_electionrecord_annotationkey] = json.dumps(self.get_lock_dict(updated_record)) + api_response = self.api_instance.replace_namespaced_config_map(name=name, namespace=namespace, + body=self.configmap_reference) + return True + except ApiException as e: + logging.info("Failed to update lock as {}".format(e)) + return False + + def get_lock_object(self, lock_record): + leader_election_record = LeaderElectionRecord(None, None, None, None) + + if lock_record.get('holderIdentity'): + leader_election_record.holder_identity = lock_record['holderIdentity'] + if lock_record.get('leaseDurationSeconds'): + leader_election_record.lease_duration = lock_record['leaseDurationSeconds'] + if lock_record.get('acquireTime'): + leader_election_record.acquire_time = lock_record['acquireTime'] + if lock_record.get('renewTime'): + leader_election_record.renew_time = lock_record['renewTime'] + + return leader_election_record + + def get_lock_dict(self, leader_election_record): + self.lock_record['holderIdentity'] = leader_election_record.holder_identity + self.lock_record['leaseDurationSeconds'] = leader_election_record.lease_duration + self.lock_record['acquireTime'] = leader_election_record.acquire_time + self.lock_record['renewTime'] = leader_election_record.renew_time + + return self.lock_record \ No newline at end of file From 8a3bdb8ebe8dc59037671fec70227d09ab34bf45 Mon Sep 17 00:00:00 2001 From: Mridul Seth Date: Sat, 23 Jan 2021 19:23:45 +0100 Subject: [PATCH 42/90] take care of empty kube_config files --- config/kube_config.py | 8 ++++++++ config/kube_config_test.py | 9 +++++++++ 2 files changed, 17 insertions(+) diff --git a/config/kube_config.py b/config/kube_config.py index 0ed5a71c..b90dbd02 100644 --- a/config/kube_config.py +++ b/config/kube_config.py @@ -682,6 +682,9 @@ def _load_config_from_file_like_object(self, string): else: config = yaml.safe_load(string.read()) + if config is None: + raise ConfigException( + 'Invalid kube-config.') if self.config_merged is None: self.config_merged = copy.deepcopy(config) # doesn't need to do any further merging @@ -699,6 +702,11 @@ def load_config(self, path): with open(path) as f: config = yaml.safe_load(f) + if config is None: + raise ConfigException( + 'Invalid kube-config. ' + '%s file is empty' % path) + if self.config_merged is None: config_merged = copy.deepcopy(config) for item in ('clusters', 'contexts', 'users'): diff --git a/config/kube_config_test.py b/config/kube_config_test.py index de1dcc1b..a4d47fc7 100644 --- a/config/kube_config_test.py +++ b/config/kube_config_test.py @@ -1290,6 +1290,15 @@ def test_load_kube_config_from_dict(self): client_configuration=actual) self.assertEqual(expected, actual) + def test_load_kube_config_from_empty_file(self): + config_file_like_object = io.StringIO() + self.assertRaises(ConfigException, load_kube_config, config_file_like_object) + + def test_load_kube_config_from_empty_file_like_object(self): + config_file = self._create_temp_file( + yaml.safe_dump(None)) + self.assertRaises(ConfigException, load_kube_config, config_file) + def test_list_kube_config_contexts(self): config_file = self._create_temp_file( yaml.safe_dump(self.TEST_KUBE_CONFIG)) From 76d8fbda2832eb9fbdb87af46495179152c42e3f Mon Sep 17 00:00:00 2001 From: Mridul Seth Date: Mon, 25 Jan 2021 13:43:26 +0100 Subject: [PATCH 43/90] fix codestyle --- config/kube_config_test.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/config/kube_config_test.py b/config/kube_config_test.py index a4d47fc7..f18e5e0f 100644 --- a/config/kube_config_test.py +++ b/config/kube_config_test.py @@ -1292,12 +1292,18 @@ def test_load_kube_config_from_dict(self): def test_load_kube_config_from_empty_file(self): config_file_like_object = io.StringIO() - self.assertRaises(ConfigException, load_kube_config, config_file_like_object) + self.assertRaises( + ConfigException, + load_kube_config, + config_file_like_object) def test_load_kube_config_from_empty_file_like_object(self): config_file = self._create_temp_file( yaml.safe_dump(None)) - self.assertRaises(ConfigException, load_kube_config, config_file) + self.assertRaises( + ConfigException, + load_kube_config, + config_file) def test_list_kube_config_contexts(self): config_file = self._create_temp_file( From 18a5ccc3ef621e85a8d02249270bad0a46e3addc Mon Sep 17 00:00:00 2001 From: jamesgetx Date: Fri, 29 Jan 2021 17:27:01 +0800 Subject: [PATCH 44/90] fix: load cache error when CacheDecoder object is not callable --- dynamic/discovery.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/dynamic/discovery.py b/dynamic/discovery.py index d2f801f2..5c2f4ac4 100644 --- a/dynamic/discovery.py +++ b/dynamic/discovery.py @@ -15,8 +15,10 @@ import os import six import json +import logging import hashlib import tempfile +from functools import partial from collections import defaultdict from abc import abstractmethod, abstractproperty @@ -54,11 +56,12 @@ def __init_cache(self, refresh=False): else: try: with open(self.__cache_file, 'r') as f: - self._cache = json.load(f, cls=CacheDecoder(self.client)) + self._cache = json.load(f, cls=partial(CacheDecoder, self.client)) if self._cache.get('library_version') != __version__: # Version mismatch, need to refresh cache self.invalidate_cache() - except Exception: + except Exception as e: + logging.error("load cache error: %s", e) self.invalidate_cache() self._load_server_info() self.discover() From ec1e85ec105bd05404bcec728a57bed0e74a8d1f Mon Sep 17 00:00:00 2001 From: jamesgetx Date: Mon, 1 Feb 2021 21:18:40 +0800 Subject: [PATCH 45/90] test: self._cache = json.load(f, cls=partial(CacheDecoder, self.client)) --- dynamic/test_discovery.py | 40 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 dynamic/test_discovery.py diff --git a/dynamic/test_discovery.py b/dynamic/test_discovery.py new file mode 100644 index 00000000..ef3cd8e1 --- /dev/null +++ b/dynamic/test_discovery.py @@ -0,0 +1,40 @@ +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import unittest + +from kubernetes.e2e_test import base +from kubernetes.client import api_client + +from . import DynamicClient + + +class TestDiscoverer(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.config = base.get_e2e_configuration() + + def test_init_cache_from_file(self): + client = DynamicClient(api_client.ApiClient(configuration=self.config)) + client.resources.get(api_version='v1', kind='Node') + mtime1 = os.path.getmtime(client.resources._Discoverer__cache_file) + + client = DynamicClient(api_client.ApiClient(configuration=self.config)) + client.resources.get(api_version='v1', kind='Node') + mtime2 = os.path.getmtime(client.resources._Discoverer__cache_file) + + # test no Discoverer._write_cache called + self.assertTrue(mtime1 == mtime2) \ No newline at end of file From e09312a31e8bb12759421a49088f350ed2735b52 Mon Sep 17 00:00:00 2001 From: jamesgetx Date: Mon, 1 Feb 2021 21:20:19 +0800 Subject: [PATCH 46/90] test: self._cache = json.load(f, cls=partial(CacheDecoder, self.client)) --- dynamic/test_discovery.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dynamic/test_discovery.py b/dynamic/test_discovery.py index ef3cd8e1..4897f244 100644 --- a/dynamic/test_discovery.py +++ b/dynamic/test_discovery.py @@ -37,4 +37,4 @@ def test_init_cache_from_file(self): mtime2 = os.path.getmtime(client.resources._Discoverer__cache_file) # test no Discoverer._write_cache called - self.assertTrue(mtime1 == mtime2) \ No newline at end of file + self.assertTrue(mtime1 == mtime2) From 3c719874c6278ec4cc5ac3110951ce149fc72d66 Mon Sep 17 00:00:00 2001 From: Mridul Seth Date: Mon, 1 Feb 2021 19:13:29 +0100 Subject: [PATCH 47/90] fix typo in test file --- config/kube_config_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/config/kube_config_test.py b/config/kube_config_test.py index f18e5e0f..9d299e9d 100644 --- a/config/kube_config_test.py +++ b/config/kube_config_test.py @@ -1290,14 +1290,14 @@ def test_load_kube_config_from_dict(self): client_configuration=actual) self.assertEqual(expected, actual) - def test_load_kube_config_from_empty_file(self): + def test_load_kube_config_from_empty_file_like_object(self): config_file_like_object = io.StringIO() self.assertRaises( ConfigException, load_kube_config, config_file_like_object) - def test_load_kube_config_from_empty_file_like_object(self): + def test_load_kube_config_from_empty_file(self): config_file = self._create_temp_file( yaml.safe_dump(None)) self.assertRaises( From ebea7e343046d7afbbdc0e199294d5c79ae87362 Mon Sep 17 00:00:00 2001 From: Chris Ayoub Date: Thu, 25 Feb 2021 00:27:33 -0500 Subject: [PATCH 48/90] Fix Watch retries with 410 errors --- watch/watch.py | 10 +++++---- watch/watch_test.py | 54 +++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 58 insertions(+), 6 deletions(-) diff --git a/watch/watch.py b/watch/watch.py index 3058ed9a..b432778e 100644 --- a/watch/watch.py +++ b/watch/watch.py @@ -151,7 +151,9 @@ def stream(self, func, *args, **kwargs): if 'resource_version' in kwargs: self.resource_version = kwargs['resource_version'] - timeouts = ('timeout_seconds' in kwargs) + # Do not attempt retries if user specifies a timeout. + # We want to ensure we are returning within that timeout. + disable_retries = ('timeout_seconds' in kwargs) retry_after_410 = False while True: resp = func(*args, **kwargs) @@ -164,9 +166,9 @@ def stream(self, func, *args, **kwargs): if isinstance(event, dict) \ and event['type'] == 'ERROR': obj = event['raw_object'] - # Current request expired, let's retry, + # Current request expired, let's retry, (if enabled) # but only if we have not already retried. - if not retry_after_410 and \ + if not disable_retries and not retry_after_410 and \ obj['code'] == HTTP_STATUS_GONE: retry_after_410 = True break @@ -190,5 +192,5 @@ def stream(self, func, *args, **kwargs): else: self._stop = True - if timeouts or self._stop: + if self._stop or disable_retries: break diff --git a/watch/watch_test.py b/watch/watch_test.py index b8cefd20..32cf6334 100644 --- a/watch/watch_test.py +++ b/watch/watch_test.py @@ -287,15 +287,65 @@ def test_watch_with_error_event(self): fake_api = Mock() fake_api.get_thing = Mock(return_value=fake_resp) + w = Watch() + # No events are generated when no initial resourceVersion is passed + # No retry is attempted either, preventing an ApiException + assert not list(w.stream(fake_api.get_thing)) + + fake_api.get_thing.assert_called_once_with( + _preload_content=False, watch=True) + fake_resp.read_chunked.assert_called_once_with(decode_content=False) + fake_resp.close.assert_called_once() + fake_resp.release_conn.assert_called_once() + + def test_watch_retries_on_error_event(self): + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.read_chunked = Mock( + return_value=[ + '{"type": "ERROR", "object": {"code": 410, ' + '"reason": "Gone", "message": "error message"}}\n']) + + fake_api = Mock() + fake_api.get_thing = Mock(return_value=fake_resp) + w = Watch() try: - for _ in w.stream(fake_api.get_thing): + for _ in w.stream(fake_api.get_thing, resource_version=0): + self.fail(self, "Should fail with ApiException.") + except client.rest.ApiException: + pass + + # Two calls should be expected during a retry + fake_api.get_thing.assert_has_calls( + [call(resource_version=0, _preload_content=False, watch=True)] * 2) + fake_resp.read_chunked.assert_has_calls( + [call(decode_content=False)] * 2) + assert fake_resp.close.call_count == 2 + assert fake_resp.release_conn.call_count == 2 + + def test_watch_with_error_event_and_timeout_param(self): + fake_resp = Mock() + fake_resp.close = Mock() + fake_resp.release_conn = Mock() + fake_resp.read_chunked = Mock( + return_value=[ + '{"type": "ERROR", "object": {"code": 410, ' + '"reason": "Gone", "message": "error message"}}\n']) + + fake_api = Mock() + fake_api.get_thing = Mock(return_value=fake_resp) + + w = Watch() + try: + for _ in w.stream(fake_api.get_thing, timeout_seconds=10): self.fail(self, "Should fail with ApiException.") except client.rest.ApiException: pass fake_api.get_thing.assert_called_once_with( - _preload_content=False, watch=True) + _preload_content=False, watch=True, timeout_seconds=10) fake_resp.read_chunked.assert_called_once_with(decode_content=False) fake_resp.close.assert_called_once() fake_resp.release_conn.assert_called_once() From 2ea3efbc628597ed3ed2bf3c16e684727addd75b Mon Sep 17 00:00:00 2001 From: Bob Killen Date: Sun, 28 Feb 2021 15:05:02 -0500 Subject: [PATCH 49/90] Remove inactive members from OWNERS As a part of cleaning up inactive members (those with no activity within the past 18 months) from OWNERS files, this commit moves mbohlool from an approver to emeritus_approver. --- OWNERS | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/OWNERS b/OWNERS index cfec4b11..c331e688 100644 --- a/OWNERS +++ b/OWNERS @@ -1,6 +1,8 @@ # See the OWNERS docs at https://go.k8s.io/owners approvers: - - mbohlool - yliaog - roycaihw +emeritus_approvers: + - mbohlool + From ed98daeae96c6dc4f245421497c390c009dcec72 Mon Sep 17 00:00:00 2001 From: Tom Haddon Date: Fri, 19 Mar 2021 05:56:27 +0100 Subject: [PATCH 50/90] Fix trivial typo in error messages - 'does not exist' vs. 'does not exists' --- config/incluster_config.py | 4 ++-- config/incluster_config_test.py | 4 ++-- config/kube_config.py | 2 +- config/kube_config_test.py | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/config/incluster_config.py b/config/incluster_config.py index 288a2688..5dabd4b7 100644 --- a/config/incluster_config.py +++ b/config/incluster_config.py @@ -70,13 +70,13 @@ def _load_config(self): self._environ[SERVICE_PORT_ENV_NAME])) if not os.path.isfile(self._token_filename): - raise ConfigException("Service token file does not exists.") + raise ConfigException("Service token file does not exist.") self._read_token_file() if not os.path.isfile(self._cert_filename): raise ConfigException( - "Service certification file does not exists.") + "Service certification file does not exist.") with open(self._cert_filename) as f: if not f.read(): diff --git a/config/incluster_config_test.py b/config/incluster_config_test.py index ef7468d7..856752be 100644 --- a/config/incluster_config_test.py +++ b/config/incluster_config_test.py @@ -142,7 +142,7 @@ def test_empty_host(self): def test_no_cert_file(self): loader = self.get_test_loader(cert_filename="not_exists_file_1123") - self._should_fail_load(loader, "cert file does not exists") + self._should_fail_load(loader, "cert file does not exist") def test_empty_cert_file(self): loader = self.get_test_loader( @@ -151,7 +151,7 @@ def test_empty_cert_file(self): def test_no_token_file(self): loader = self.get_test_loader(token_filename="not_exists_file_1123") - self._should_fail_load(loader, "token file does not exists") + self._should_fail_load(loader, "token file does not exist") def test_empty_token_file(self): loader = self.get_test_loader( diff --git a/config/kube_config.py b/config/kube_config.py index b90dbd02..61a261f6 100644 --- a/config/kube_config.py +++ b/config/kube_config.py @@ -120,7 +120,7 @@ def as_file(self): else: self._file = _create_temp_file_with_content(self._data) if self._file and not os.path.isfile(self._file): - raise ConfigException("File does not exists: %s" % self._file) + raise ConfigException("File does not exist: %s" % self._file) return self._file def as_data(self): diff --git a/config/kube_config_test.py b/config/kube_config_test.py index 9d299e9d..a82ef40e 100644 --- a/config/kube_config_test.py +++ b/config/kube_config_test.py @@ -178,7 +178,7 @@ def test_file_given_non_existing_file(self): temp_filename = NON_EXISTING_FILE obj = {TEST_FILE_KEY: temp_filename} t = FileOrData(obj=obj, file_key_name=TEST_FILE_KEY) - self.expect_exception(t.as_file, "does not exists") + self.expect_exception(t.as_file, "does not exist") def test_file_given_data(self): obj = {TEST_DATA_KEY: TEST_DATA_BASE64} @@ -1165,7 +1165,7 @@ def test_ssl_no_cert_files(self): active_context="ssl-no_file") self.expect_exception( loader.load_and_set, - "does not exists", + "does not exist", FakeConfig()) def test_ssl(self): From 9bce8696ffb10e30757e93e72d5c4970d5144c16 Mon Sep 17 00:00:00 2001 From: Moshe Shitrit Date: Sun, 21 Mar 2021 23:01:35 -0400 Subject: [PATCH 51/90] Switching print statement to use legacy .format() method, in order to maintain backwards-compatibility with pre-3.6 Python versions --- config/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/config/__init__.py b/config/__init__.py index 1ff2dec2..204819eb 100644 --- a/config/__init__.py +++ b/config/__init__.py @@ -31,6 +31,6 @@ def load_config(**kwargs): if "kube_config_path" in kwargs.keys() or os.path.exists(KUBE_CONFIG_DEFAULT_LOCATION): load_kube_config(**kwargs) else: - print(f"kube_config_path not provided and default location ({KUBE_CONFIG_DEFAULT_LOCATION}) does not exist. " - "Using inCluster Config. This might not work.") + print("kube_config_path not provided and default location ({0}) does not exist. " + "Using inCluster Config. This might not work.".format(KUBE_CONFIG_DEFAULT_LOCATION)) load_incluster_config(**kwargs) From 0395a107185cef66592dfd26dbb8118179d272c4 Mon Sep 17 00:00:00 2001 From: Moshe Shitrit Date: Sun, 21 Mar 2021 23:27:47 -0400 Subject: [PATCH 52/90] Run black linter to make update-pycodestyle happy --- config/__init__.py | 21 +++++++++++---- watch/watch.py | 64 ++++++++++++++++++++++++---------------------- 2 files changed, 50 insertions(+), 35 deletions(-) diff --git a/config/__init__.py b/config/__init__.py index 204819eb..2ab141cd 100644 --- a/config/__init__.py +++ b/config/__init__.py @@ -15,8 +15,13 @@ import os from .config_exception import ConfigException from .incluster_config import load_incluster_config -from .kube_config import (list_kube_config_contexts, load_kube_config, - load_kube_config_from_dict, new_client_from_config, KUBE_CONFIG_DEFAULT_LOCATION) +from .kube_config import ( + list_kube_config_contexts, + load_kube_config, + load_kube_config_from_dict, + new_client_from_config, + KUBE_CONFIG_DEFAULT_LOCATION, +) def load_config(**kwargs): @@ -28,9 +33,15 @@ def load_config(**kwargs): :param kwargs: A combination of all possible kwargs that can be passed to either load_kube_config or load_incluster_config functions. """ - if "kube_config_path" in kwargs.keys() or os.path.exists(KUBE_CONFIG_DEFAULT_LOCATION): + if "kube_config_path" in kwargs.keys() or os.path.exists( + KUBE_CONFIG_DEFAULT_LOCATION + ): load_kube_config(**kwargs) else: - print("kube_config_path not provided and default location ({0}) does not exist. " - "Using inCluster Config. This might not work.".format(KUBE_CONFIG_DEFAULT_LOCATION)) + print( + "kube_config_path not provided and default location ({0}) does not exist. " + "Using inCluster Config. This might not work.".format( + KUBE_CONFIG_DEFAULT_LOCATION + ) + ) load_incluster_config(**kwargs) diff --git a/watch/watch.py b/watch/watch.py index b432778e..4047be0f 100644 --- a/watch/watch.py +++ b/watch/watch.py @@ -32,14 +32,15 @@ PY2 = sys.version_info[0] == 2 if PY2: import httplib + HTTP_STATUS_GONE = httplib.GONE else: import http + HTTP_STATUS_GONE = http.HTTPStatus.GONE class SimpleNamespace: - def __init__(self, **kwargs): self.__dict__.update(kwargs) @@ -47,7 +48,7 @@ def __init__(self, **kwargs): def _find_return_type(func): for line in pydoc.getdoc(func).splitlines(): if line.startswith(PYDOC_RETURN_LABEL): - return line[len(PYDOC_RETURN_LABEL):].strip() + return line[len(PYDOC_RETURN_LABEL) :].strip() return "" @@ -55,7 +56,7 @@ def iter_resp_lines(resp): prev = "" for seg in resp.read_chunked(decode_content=False): if isinstance(seg, bytes): - seg = seg.decode('utf8') + seg = seg.decode("utf8") seg = prev + seg lines = seg.split("\n") if not seg.endswith("\n"): @@ -69,7 +70,6 @@ def iter_resp_lines(resp): class Watch(object): - def __init__(self, return_type=None): self._raw_return_type = return_type self._stop = False @@ -84,29 +84,31 @@ def get_return_type(self, func): return self._raw_return_type return_type = _find_return_type(func) if return_type.endswith(TYPE_LIST_SUFFIX): - return return_type[:-len(TYPE_LIST_SUFFIX)] + return return_type[: -len(TYPE_LIST_SUFFIX)] return return_type def get_watch_argument_name(self, func): if PYDOC_FOLLOW_PARAM in pydoc.getdoc(func): - return 'follow' + return "follow" else: - return 'watch' + return "watch" def unmarshal_event(self, data, return_type): js = json.loads(data) - js['raw_object'] = js['object'] - if return_type and js['type'] != 'ERROR': - obj = SimpleNamespace(data=json.dumps(js['raw_object'])) - js['object'] = self._api_client.deserialize(obj, return_type) - if hasattr(js['object'], 'metadata'): - self.resource_version = js['object'].metadata.resource_version + js["raw_object"] = js["object"] + if return_type and js["type"] != "ERROR": + obj = SimpleNamespace(data=json.dumps(js["raw_object"])) + js["object"] = self._api_client.deserialize(obj, return_type) + if hasattr(js["object"], "metadata"): + self.resource_version = js["object"].metadata.resource_version # For custom objects that we don't have model defined, json # deserialization results in dictionary - elif (isinstance(js['object'], dict) and 'metadata' in js['object'] - and 'resourceVersion' in js['object']['metadata']): - self.resource_version = js['object']['metadata'][ - 'resourceVersion'] + elif ( + isinstance(js["object"], dict) + and "metadata" in js["object"] + and "resourceVersion" in js["object"]["metadata"] + ): + self.resource_version = js["object"]["metadata"]["resourceVersion"] return js def stream(self, func, *args, **kwargs): @@ -147,13 +149,13 @@ def stream(self, func, *args, **kwargs): return_type = self.get_return_type(func) watch_arg = self.get_watch_argument_name(func) kwargs[watch_arg] = True - kwargs['_preload_content'] = False - if 'resource_version' in kwargs: - self.resource_version = kwargs['resource_version'] + kwargs["_preload_content"] = False + if "resource_version" in kwargs: + self.resource_version = kwargs["resource_version"] # Do not attempt retries if user specifies a timeout. # We want to ensure we are returning within that timeout. - disable_retries = ('timeout_seconds' in kwargs) + disable_retries = "timeout_seconds" in kwargs retry_after_410 = False while True: resp = func(*args, **kwargs) @@ -163,20 +165,22 @@ def stream(self, func, *args, **kwargs): # return raw string when we are streaming log if watch_arg == "watch": event = self.unmarshal_event(line, return_type) - if isinstance(event, dict) \ - and event['type'] == 'ERROR': - obj = event['raw_object'] + if isinstance(event, dict) and event["type"] == "ERROR": + obj = event["raw_object"] # Current request expired, let's retry, (if enabled) # but only if we have not already retried. - if not disable_retries and not retry_after_410 and \ - obj['code'] == HTTP_STATUS_GONE: + if ( + not disable_retries + and not retry_after_410 + and obj["code"] == HTTP_STATUS_GONE + ): retry_after_410 = True break else: - reason = "%s: %s" % ( - obj['reason'], obj['message']) + reason = "%s: %s" % (obj["reason"], obj["message"]) raise client.rest.ApiException( - status=obj['code'], reason=reason) + status=obj["code"], reason=reason + ) else: retry_after_410 = False yield event @@ -188,7 +192,7 @@ def stream(self, func, *args, **kwargs): resp.close() resp.release_conn() if self.resource_version is not None: - kwargs['resource_version'] = self.resource_version + kwargs["resource_version"] = self.resource_version else: self._stop = True From 34b8304d5fe0b95df8b9968f766cf9e8598e778a Mon Sep 17 00:00:00 2001 From: Moshe Shitrit Date: Sun, 21 Mar 2021 23:38:48 -0400 Subject: [PATCH 53/90] autopep8 --- watch/watch.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/watch/watch.py b/watch/watch.py index 4047be0f..7a143f7e 100644 --- a/watch/watch.py +++ b/watch/watch.py @@ -48,7 +48,7 @@ def __init__(self, **kwargs): def _find_return_type(func): for line in pydoc.getdoc(func).splitlines(): if line.startswith(PYDOC_RETURN_LABEL): - return line[len(PYDOC_RETURN_LABEL) :].strip() + return line[len(PYDOC_RETURN_LABEL):].strip() return "" @@ -177,7 +177,8 @@ def stream(self, func, *args, **kwargs): retry_after_410 = True break else: - reason = "%s: %s" % (obj["reason"], obj["message"]) + reason = "%s: %s" % ( + obj["reason"], obj["message"]) raise client.rest.ApiException( status=obj["code"], reason=reason ) From 0a5b04feead64f73ae042665251e3aef5e35f84e Mon Sep 17 00:00:00 2001 From: Moshe Shitrit Date: Sun, 21 Mar 2021 23:44:32 -0400 Subject: [PATCH 54/90] Revert black and only try autopep8 this time --- config/__init__.py | 21 ++++------------ watch/watch.py | 61 +++++++++++++++++++++------------------------- 2 files changed, 33 insertions(+), 49 deletions(-) diff --git a/config/__init__.py b/config/__init__.py index 2ab141cd..204819eb 100644 --- a/config/__init__.py +++ b/config/__init__.py @@ -15,13 +15,8 @@ import os from .config_exception import ConfigException from .incluster_config import load_incluster_config -from .kube_config import ( - list_kube_config_contexts, - load_kube_config, - load_kube_config_from_dict, - new_client_from_config, - KUBE_CONFIG_DEFAULT_LOCATION, -) +from .kube_config import (list_kube_config_contexts, load_kube_config, + load_kube_config_from_dict, new_client_from_config, KUBE_CONFIG_DEFAULT_LOCATION) def load_config(**kwargs): @@ -33,15 +28,9 @@ def load_config(**kwargs): :param kwargs: A combination of all possible kwargs that can be passed to either load_kube_config or load_incluster_config functions. """ - if "kube_config_path" in kwargs.keys() or os.path.exists( - KUBE_CONFIG_DEFAULT_LOCATION - ): + if "kube_config_path" in kwargs.keys() or os.path.exists(KUBE_CONFIG_DEFAULT_LOCATION): load_kube_config(**kwargs) else: - print( - "kube_config_path not provided and default location ({0}) does not exist. " - "Using inCluster Config. This might not work.".format( - KUBE_CONFIG_DEFAULT_LOCATION - ) - ) + print("kube_config_path not provided and default location ({0}) does not exist. " + "Using inCluster Config. This might not work.".format(KUBE_CONFIG_DEFAULT_LOCATION)) load_incluster_config(**kwargs) diff --git a/watch/watch.py b/watch/watch.py index 7a143f7e..b432778e 100644 --- a/watch/watch.py +++ b/watch/watch.py @@ -32,15 +32,14 @@ PY2 = sys.version_info[0] == 2 if PY2: import httplib - HTTP_STATUS_GONE = httplib.GONE else: import http - HTTP_STATUS_GONE = http.HTTPStatus.GONE class SimpleNamespace: + def __init__(self, **kwargs): self.__dict__.update(kwargs) @@ -56,7 +55,7 @@ def iter_resp_lines(resp): prev = "" for seg in resp.read_chunked(decode_content=False): if isinstance(seg, bytes): - seg = seg.decode("utf8") + seg = seg.decode('utf8') seg = prev + seg lines = seg.split("\n") if not seg.endswith("\n"): @@ -70,6 +69,7 @@ def iter_resp_lines(resp): class Watch(object): + def __init__(self, return_type=None): self._raw_return_type = return_type self._stop = False @@ -84,31 +84,29 @@ def get_return_type(self, func): return self._raw_return_type return_type = _find_return_type(func) if return_type.endswith(TYPE_LIST_SUFFIX): - return return_type[: -len(TYPE_LIST_SUFFIX)] + return return_type[:-len(TYPE_LIST_SUFFIX)] return return_type def get_watch_argument_name(self, func): if PYDOC_FOLLOW_PARAM in pydoc.getdoc(func): - return "follow" + return 'follow' else: - return "watch" + return 'watch' def unmarshal_event(self, data, return_type): js = json.loads(data) - js["raw_object"] = js["object"] - if return_type and js["type"] != "ERROR": - obj = SimpleNamespace(data=json.dumps(js["raw_object"])) - js["object"] = self._api_client.deserialize(obj, return_type) - if hasattr(js["object"], "metadata"): - self.resource_version = js["object"].metadata.resource_version + js['raw_object'] = js['object'] + if return_type and js['type'] != 'ERROR': + obj = SimpleNamespace(data=json.dumps(js['raw_object'])) + js['object'] = self._api_client.deserialize(obj, return_type) + if hasattr(js['object'], 'metadata'): + self.resource_version = js['object'].metadata.resource_version # For custom objects that we don't have model defined, json # deserialization results in dictionary - elif ( - isinstance(js["object"], dict) - and "metadata" in js["object"] - and "resourceVersion" in js["object"]["metadata"] - ): - self.resource_version = js["object"]["metadata"]["resourceVersion"] + elif (isinstance(js['object'], dict) and 'metadata' in js['object'] + and 'resourceVersion' in js['object']['metadata']): + self.resource_version = js['object']['metadata'][ + 'resourceVersion'] return js def stream(self, func, *args, **kwargs): @@ -149,13 +147,13 @@ def stream(self, func, *args, **kwargs): return_type = self.get_return_type(func) watch_arg = self.get_watch_argument_name(func) kwargs[watch_arg] = True - kwargs["_preload_content"] = False - if "resource_version" in kwargs: - self.resource_version = kwargs["resource_version"] + kwargs['_preload_content'] = False + if 'resource_version' in kwargs: + self.resource_version = kwargs['resource_version'] # Do not attempt retries if user specifies a timeout. # We want to ensure we are returning within that timeout. - disable_retries = "timeout_seconds" in kwargs + disable_retries = ('timeout_seconds' in kwargs) retry_after_410 = False while True: resp = func(*args, **kwargs) @@ -165,23 +163,20 @@ def stream(self, func, *args, **kwargs): # return raw string when we are streaming log if watch_arg == "watch": event = self.unmarshal_event(line, return_type) - if isinstance(event, dict) and event["type"] == "ERROR": - obj = event["raw_object"] + if isinstance(event, dict) \ + and event['type'] == 'ERROR': + obj = event['raw_object'] # Current request expired, let's retry, (if enabled) # but only if we have not already retried. - if ( - not disable_retries - and not retry_after_410 - and obj["code"] == HTTP_STATUS_GONE - ): + if not disable_retries and not retry_after_410 and \ + obj['code'] == HTTP_STATUS_GONE: retry_after_410 = True break else: reason = "%s: %s" % ( - obj["reason"], obj["message"]) + obj['reason'], obj['message']) raise client.rest.ApiException( - status=obj["code"], reason=reason - ) + status=obj['code'], reason=reason) else: retry_after_410 = False yield event @@ -193,7 +188,7 @@ def stream(self, func, *args, **kwargs): resp.close() resp.release_conn() if self.resource_version is not None: - kwargs["resource_version"] = self.resource_version + kwargs['resource_version'] = self.resource_version else: self._stop = True From cf2f312fd06debceee9a06afe2eefccbd2649f1e Mon Sep 17 00:00:00 2001 From: Moshe Shitrit Date: Sun, 21 Mar 2021 23:59:43 -0400 Subject: [PATCH 55/90] Applied autopep8 properly this time. This should work --- config/__init__.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/config/__init__.py b/config/__init__.py index 204819eb..c7c68777 100644 --- a/config/__init__.py +++ b/config/__init__.py @@ -15,8 +15,12 @@ import os from .config_exception import ConfigException from .incluster_config import load_incluster_config -from .kube_config import (list_kube_config_contexts, load_kube_config, - load_kube_config_from_dict, new_client_from_config, KUBE_CONFIG_DEFAULT_LOCATION) +from .kube_config import ( + list_kube_config_contexts, + load_kube_config, + load_kube_config_from_dict, + new_client_from_config, + KUBE_CONFIG_DEFAULT_LOCATION) def load_config(**kwargs): @@ -28,9 +32,11 @@ def load_config(**kwargs): :param kwargs: A combination of all possible kwargs that can be passed to either load_kube_config or load_incluster_config functions. """ - if "kube_config_path" in kwargs.keys() or os.path.exists(KUBE_CONFIG_DEFAULT_LOCATION): + if "kube_config_path" in kwargs.keys() or os.path.exists( + KUBE_CONFIG_DEFAULT_LOCATION): load_kube_config(**kwargs) else: - print("kube_config_path not provided and default location ({0}) does not exist. " - "Using inCluster Config. This might not work.".format(KUBE_CONFIG_DEFAULT_LOCATION)) + print( + "kube_config_path not provided and default location ({0}) does not exist. " + "Using inCluster Config. This might not work.".format(KUBE_CONFIG_DEFAULT_LOCATION)) load_incluster_config(**kwargs) From b5aa2dd3718949a066cf1f01927ef4432f2e4dcc Mon Sep 17 00:00:00 2001 From: Moshe Shitrit Date: Mon, 22 Mar 2021 00:16:52 -0400 Subject: [PATCH 56/90] Address remarks from pycodestyle --- config/__init__.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/config/__init__.py b/config/__init__.py index c7c68777..607adc72 100644 --- a/config/__init__.py +++ b/config/__init__.py @@ -26,10 +26,13 @@ def load_config(**kwargs): """ Wrapper function to load the kube_config. - It will initially try to load_kube_config from provided path, then check if the KUBE_CONFIG_DEFAULT_LOCATION exists - If neither exists- it will fall back to load_incluster_config and inform the user accordingly. + It will initially try to load_kube_config from provided path, + then check if the KUBE_CONFIG_DEFAULT_LOCATION exists + If neither exists- it will fall back to load_incluster_config + and inform the user accordingly. - :param kwargs: A combination of all possible kwargs that can be passed to either load_kube_config or + :param kwargs: A combination of all possible kwargs that + can be passed to either load_kube_config or load_incluster_config functions. """ if "kube_config_path" in kwargs.keys() or os.path.exists( @@ -37,6 +40,8 @@ def load_config(**kwargs): load_kube_config(**kwargs) else: print( - "kube_config_path not provided and default location ({0}) does not exist. " - "Using inCluster Config. This might not work.".format(KUBE_CONFIG_DEFAULT_LOCATION)) + "kube_config_path not provided and " + "default location ({0}) does not exist. " + "Using inCluster Config. " + "This might not work.".format(KUBE_CONFIG_DEFAULT_LOCATION)) load_incluster_config(**kwargs) From 698299af9d3229d02624c4e6bb87e076bdcea000 Mon Sep 17 00:00:00 2001 From: Moshe Shitrit Date: Mon, 22 Mar 2021 00:22:04 -0400 Subject: [PATCH 57/90] isort --- config/__init__.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/config/__init__.py b/config/__init__.py index 607adc72..41702b96 100644 --- a/config/__init__.py +++ b/config/__init__.py @@ -13,14 +13,12 @@ # limitations under the License. import os + from .config_exception import ConfigException from .incluster_config import load_incluster_config -from .kube_config import ( - list_kube_config_contexts, - load_kube_config, - load_kube_config_from_dict, - new_client_from_config, - KUBE_CONFIG_DEFAULT_LOCATION) +from .kube_config import (KUBE_CONFIG_DEFAULT_LOCATION, + list_kube_config_contexts, load_kube_config, + load_kube_config_from_dict, new_client_from_config) def load_config(**kwargs): From 90399663f378b33227f723d3f0c1677965b6d96b Mon Sep 17 00:00:00 2001 From: Darren Hague Date: Thu, 8 Apr 2021 13:49:46 +0100 Subject: [PATCH 58/90] Fixes kubernetes-client/python issue 1047 "ResponseNotChunked from watch" In recent versions of K8S (>1.16?), when a `Watch.stream()` call uses a resource_version which is too old the resulting 410 error is wrapped in JSON and returned in a non-chunked 200 response. Using `resp.stream()` instead of `resp.read_chunked()` automatically handles the response being either chunked or non-chunked. --- watch/watch.py | 2 +- watch/watch_test.py | 44 +++++++++++++++++++++++++------------------- 2 files changed, 26 insertions(+), 20 deletions(-) diff --git a/watch/watch.py b/watch/watch.py index b432778e..3bbb770d 100644 --- a/watch/watch.py +++ b/watch/watch.py @@ -53,7 +53,7 @@ def _find_return_type(func): def iter_resp_lines(resp): prev = "" - for seg in resp.read_chunked(decode_content=False): + for seg in resp.stream(amt=None, decode_content=False): if isinstance(seg, bytes): seg = seg.decode('utf8') seg = prev + seg diff --git a/watch/watch_test.py b/watch/watch_test.py index 32cf6334..cad72fd8 100644 --- a/watch/watch_test.py +++ b/watch/watch_test.py @@ -30,7 +30,7 @@ def test_watch_with_decode(self): fake_resp = Mock() fake_resp.close = Mock() fake_resp.release_conn = Mock() - fake_resp.read_chunked = Mock( + fake_resp.stream = Mock( return_value=[ '{"type": "ADDED", "object": {"metadata": {"name": "test1",' '"resourceVersion": "1"}, "spec": {}, "status": {}}}\n', @@ -63,7 +63,8 @@ def test_watch_with_decode(self): fake_api.get_namespaces.assert_called_once_with( _preload_content=False, watch=True) - fake_resp.read_chunked.assert_called_once_with(decode_content=False) + fake_resp.stream.assert_called_once_with( + amt=None, decode_content=False) fake_resp.close.assert_called_once() fake_resp.release_conn.assert_called_once() @@ -71,7 +72,7 @@ def test_watch_for_follow(self): fake_resp = Mock() fake_resp.close = Mock() fake_resp.release_conn = Mock() - fake_resp.read_chunked = Mock( + fake_resp.stream = Mock( return_value=[ 'log_line_1\n', 'log_line_2\n']) @@ -92,7 +93,8 @@ def test_watch_for_follow(self): fake_api.read_namespaced_pod_log.assert_called_once_with( _preload_content=False, follow=True) - fake_resp.read_chunked.assert_called_once_with(decode_content=False) + fake_resp.stream.assert_called_once_with( + amt=None, decode_content=False) fake_resp.close.assert_called_once() fake_resp.release_conn.assert_called_once() @@ -112,6 +114,7 @@ def test_watch_resource_version_set(self): '{"type": "ADDED", "object": {"metadata": {"name": "test3",' '"resourceVersion": "3"}, "spec": {}, "status": {}}}\n' ] + # return nothing on the first call and values on the second # this emulates a watch from a rv that returns nothing in the first k8s # watch reset and values later @@ -123,7 +126,7 @@ def get_values(*args, **kwargs): else: return values - fake_resp.read_chunked = Mock( + fake_resp.stream = Mock( side_effect=get_values) fake_api = Mock() @@ -170,7 +173,7 @@ def test_watch_stream_twice(self): fake_resp = Mock() fake_resp.close = Mock() fake_resp.release_conn = Mock() - fake_resp.read_chunked = Mock( + fake_resp.stream = Mock( return_value=['{"type": "ADDED", "object": 1}\n'] * 4) fake_api = Mock() @@ -186,8 +189,8 @@ def test_watch_stream_twice(self): self.assertEqual(count, 3) fake_api.get_namespaces.assert_called_once_with( _preload_content=False, watch=True) - fake_resp.read_chunked.assert_called_once_with( - decode_content=False) + fake_resp.stream.assert_called_once_with( + amt=None, decode_content=False) fake_resp.close.assert_called_once() fake_resp.release_conn.assert_called_once() @@ -197,7 +200,7 @@ def test_watch_stream_loop(self): fake_resp = Mock() fake_resp.close = Mock() fake_resp.release_conn = Mock() - fake_resp.read_chunked = Mock( + fake_resp.stream = Mock( return_value=['{"type": "ADDED", "object": 1}\n']) fake_api = Mock() @@ -219,7 +222,7 @@ def test_watch_stream_loop(self): self.assertEqual(count, 2) self.assertEqual(fake_api.get_namespaces.call_count, 2) - self.assertEqual(fake_resp.read_chunked.call_count, 2) + self.assertEqual(fake_resp.stream.call_count, 2) self.assertEqual(fake_resp.close.call_count, 2) self.assertEqual(fake_resp.release_conn.call_count, 2) @@ -256,7 +259,7 @@ def test_watch_with_exception(self): fake_resp = Mock() fake_resp.close = Mock() fake_resp.release_conn = Mock() - fake_resp.read_chunked = Mock(side_effect=KeyError('expected')) + fake_resp.stream = Mock(side_effect=KeyError('expected')) fake_api = Mock() fake_api.get_thing = Mock(return_value=fake_resp) @@ -271,7 +274,8 @@ def test_watch_with_exception(self): fake_api.get_thing.assert_called_once_with( _preload_content=False, watch=True) - fake_resp.read_chunked.assert_called_once_with(decode_content=False) + fake_resp.stream.assert_called_once_with( + amt=None, decode_content=False) fake_resp.close.assert_called_once() fake_resp.release_conn.assert_called_once() @@ -279,7 +283,7 @@ def test_watch_with_error_event(self): fake_resp = Mock() fake_resp.close = Mock() fake_resp.release_conn = Mock() - fake_resp.read_chunked = Mock( + fake_resp.stream = Mock( return_value=[ '{"type": "ERROR", "object": {"code": 410, ' '"reason": "Gone", "message": "error message"}}\n']) @@ -294,7 +298,8 @@ def test_watch_with_error_event(self): fake_api.get_thing.assert_called_once_with( _preload_content=False, watch=True) - fake_resp.read_chunked.assert_called_once_with(decode_content=False) + fake_resp.stream.assert_called_once_with( + amt=None, decode_content=False) fake_resp.close.assert_called_once() fake_resp.release_conn.assert_called_once() @@ -302,7 +307,7 @@ def test_watch_retries_on_error_event(self): fake_resp = Mock() fake_resp.close = Mock() fake_resp.release_conn = Mock() - fake_resp.read_chunked = Mock( + fake_resp.stream = Mock( return_value=[ '{"type": "ERROR", "object": {"code": 410, ' '"reason": "Gone", "message": "error message"}}\n']) @@ -320,8 +325,8 @@ def test_watch_retries_on_error_event(self): # Two calls should be expected during a retry fake_api.get_thing.assert_has_calls( [call(resource_version=0, _preload_content=False, watch=True)] * 2) - fake_resp.read_chunked.assert_has_calls( - [call(decode_content=False)] * 2) + fake_resp.stream.assert_has_calls( + [call(amt=None, decode_content=False)] * 2) assert fake_resp.close.call_count == 2 assert fake_resp.release_conn.call_count == 2 @@ -329,7 +334,7 @@ def test_watch_with_error_event_and_timeout_param(self): fake_resp = Mock() fake_resp.close = Mock() fake_resp.release_conn = Mock() - fake_resp.read_chunked = Mock( + fake_resp.stream = Mock( return_value=[ '{"type": "ERROR", "object": {"code": 410, ' '"reason": "Gone", "message": "error message"}}\n']) @@ -346,7 +351,8 @@ def test_watch_with_error_event_and_timeout_param(self): fake_api.get_thing.assert_called_once_with( _preload_content=False, watch=True, timeout_seconds=10) - fake_resp.read_chunked.assert_called_once_with(decode_content=False) + fake_resp.stream.assert_called_once_with( + amt=None, decode_content=False) fake_resp.close.assert_called_once() fake_resp.release_conn.assert_called_once() From 10ae4760b53a917116ae7525a7bbc94f35632cfb Mon Sep 17 00:00:00 2001 From: Yu Liao Date: Mon, 12 Apr 2021 17:17:42 -0700 Subject: [PATCH 59/90] quick fix of decoding error for BOOKMARK event --- watch/watch.py | 6 +++++- watch/watch_test.py | 13 +++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/watch/watch.py b/watch/watch.py index 3bbb770d..71fd4591 100644 --- a/watch/watch.py +++ b/watch/watch.py @@ -96,7 +96,11 @@ def get_watch_argument_name(self, func): def unmarshal_event(self, data, return_type): js = json.loads(data) js['raw_object'] = js['object'] - if return_type and js['type'] != 'ERROR': + # BOOKMARK event is treated the same as ERROR for a quick fix of + # decoding exception + # TODO: make use of the resource_version in BOOKMARK event for more + # efficient WATCH + if return_type and js['type'] != 'ERROR' and js['type'] != 'BOOKMARK': obj = SimpleNamespace(data=json.dumps(js['raw_object'])) js['object'] = self._api_client.deserialize(obj, return_type) if hasattr(js['object'], 'metadata'): diff --git a/watch/watch_test.py b/watch/watch_test.py index cad72fd8..f87a4ea8 100644 --- a/watch/watch_test.py +++ b/watch/watch_test.py @@ -255,6 +255,19 @@ def test_unmarshal_with_custom_object(self): self.assertEqual("1", event['object']['metadata']['resourceVersion']) self.assertEqual("1", w.resource_version) + def test_unmarshal_with_bookmark(self): + w = Watch() + event = w.unmarshal_event( + '{"type":"BOOKMARK","object":{"kind":"Job","apiVersion":"batch/v1"' + ',"metadata":{"resourceVersion":"1"},"spec":{"template":{' + '"metadata":{},"spec":{"containers":null}}},"status":{}}}', + 'V1Job') + self.assertEqual("BOOKMARK", event['type']) + # Watch.resource_version is *not* updated, as BOOKMARK is treated the + # same as ERROR for a quick fix of decoding exception, + # resource_version in BOOKMARK is *not* used at all. + self.assertEqual(None, w.resource_version) + def test_watch_with_exception(self): fake_resp = Mock() fake_resp.close = Mock() From fc5b7302b161697ed6fbdf0c5aa85a119768255a Mon Sep 17 00:00:00 2001 From: JackYoon Date: Mon, 12 Apr 2021 18:57:34 +0800 Subject: [PATCH 60/90] load_kube_config_from_dict() support define custom temp files path --- config/kube_config.py | 45 +++++++++++++++++++++++++------------- config/kube_config_test.py | 23 +++++++++++++++++++ 2 files changed, 53 insertions(+), 15 deletions(-) diff --git a/config/kube_config.py b/config/kube_config.py index 61a261f6..584b8a41 100644 --- a/config/kube_config.py +++ b/config/kube_config.py @@ -60,7 +60,7 @@ def _cleanup_temp_files(): _temp_files = {} -def _create_temp_file_with_content(content): +def _create_temp_file_with_content(content, temp_file_path=None): if len(_temp_files) == 0: atexit.register(_cleanup_temp_files) # Because we may change context several times, try to remember files we @@ -68,7 +68,9 @@ def _create_temp_file_with_content(content): content_key = str(content) if content_key in _temp_files: return _temp_files[content_key] - _, name = tempfile.mkstemp() + if temp_file_path and not os.path.isdir(temp_file_path): + os.makedirs(name=temp_file_path) + _, name = tempfile.mkstemp(dir=temp_file_path) _temp_files[content_key] = name with open(name, 'wb') as fd: fd.write(content.encode() if isinstance(content, str) else content) @@ -91,12 +93,14 @@ class FileOrData(object): result in base64 encode of the file content after read.""" def __init__(self, obj, file_key_name, data_key_name=None, - file_base_path="", base64_file_content=True): + file_base_path="", base64_file_content=True, + temp_file_path=None): if not data_key_name: data_key_name = file_key_name + "-data" self._file = None self._data = None self._base64_file_content = base64_file_content + self._temp_file_path = temp_file_path if not obj: return if data_key_name in obj: @@ -116,9 +120,10 @@ def as_file(self): else: content = self._data self._file = _create_temp_file_with_content( - base64.standard_b64decode(content)) + base64.standard_b64decode(content), self._temp_file_path) else: - self._file = _create_temp_file_with_content(self._data) + self._file = _create_temp_file_with_content( + self._data, self._temp_file_path) if self._file and not os.path.isfile(self._file): raise ConfigException("File does not exist: %s" % self._file) return self._file @@ -182,7 +187,8 @@ class KubeConfigLoader(object): def __init__(self, config_dict, active_context=None, get_google_credentials=None, config_base_path="", - config_persister=None): + config_persister=None, + temp_file_path=None): if config_dict is None: raise ConfigException( @@ -199,6 +205,7 @@ def __init__(self, config_dict, active_context=None, self.set_active_context(active_context) self._config_base_path = config_base_path self._config_persister = config_persister + self._temp_file_path = temp_file_path def _refresh_credentials_with_cmd_path(): config = self._user['auth-provider']['config'] @@ -489,12 +496,14 @@ def _load_from_exec_plugin(self): status, None, data_key_name='clientCertificateData', file_base_path=base_path, - base64_file_content=False).as_file() + base64_file_content=False, + temp_file_path=self._temp_file_path).as_file() self.key_file = FileOrData( status, None, data_key_name='clientKeyData', file_base_path=base_path, - base64_file_content=False).as_file() + base64_file_content=False, + temp_file_path=self._temp_file_path).as_file() return True logging.error('exec: missing token or clientCertificateData field ' 'in plugin output') @@ -507,7 +516,8 @@ def _load_user_token(self): token = FileOrData( self._user, 'tokenFile', 'token', file_base_path=base_path, - base64_file_content=False).as_data() + base64_file_content=False, + temp_file_path=self._temp_file_path).as_data() if token: self.token = "Bearer %s" % token return True @@ -533,17 +543,20 @@ def _load_cluster_info(self): base_path = self._get_base_path(self._cluster.path) self.ssl_ca_cert = FileOrData( self._cluster, 'certificate-authority', - file_base_path=base_path).as_file() + file_base_path=base_path, + temp_file_path=self._temp_file_path).as_file() if 'cert_file' not in self.__dict__: # cert_file could have been provided by # _load_from_exec_plugin; only load from the _user # section if we need it. self.cert_file = FileOrData( self._user, 'client-certificate', - file_base_path=base_path).as_file() + file_base_path=base_path, + temp_file_path=self._temp_file_path).as_file() self.key_file = FileOrData( self._user, 'client-key', - file_base_path=base_path).as_file() + file_base_path=base_path, + temp_file_path=self._temp_file_path).as_file() if 'insecure-skip-tls-verify' in self._cluster: self.verify_ssl = not self._cluster['insecure-skip-tls-verify'] @@ -811,7 +824,8 @@ def load_kube_config(config_file=None, context=None, def load_kube_config_from_dict(config_dict, context=None, client_configuration=None, - persist_config=True): + persist_config=True, + temp_file_path=None): """Loads authentication and cluster information from config_dict file and stores them in kubernetes.client.configuration. @@ -822,8 +836,8 @@ def load_kube_config_from_dict(config_dict, context=None, set configs to. :param persist_config: If True, config file will be updated when changed (e.g GCP token refresh). + :param temp_file_path: store temp files path. """ - if config_dict is None: raise ConfigException( 'Invalid kube-config dict. ' @@ -831,7 +845,8 @@ def load_kube_config_from_dict(config_dict, context=None, loader = _get_kube_config_loader( config_dict=config_dict, active_context=context, - persist_config=persist_config) + persist_config=persist_config, + temp_file_path=temp_file_path) if client_configuration is None: config = type.__call__(Configuration) diff --git a/config/kube_config_test.py b/config/kube_config_test.py index a82ef40e..c33ffed7 100644 --- a/config/kube_config_test.py +++ b/config/kube_config_test.py @@ -1290,6 +1290,29 @@ def test_load_kube_config_from_dict(self): client_configuration=actual) self.assertEqual(expected, actual) + def test_load_kube_config_from_dict_with_temp_file_path(self): + expected = FakeConfig( + host=TEST_SSL_HOST, + token=BEARER_TOKEN_FORMAT % TEST_DATA_BASE64, + cert_file=self._create_temp_file(TEST_CLIENT_CERT), + key_file=self._create_temp_file(TEST_CLIENT_KEY), + ssl_ca_cert=self._create_temp_file(TEST_CERTIFICATE_AUTH), + verify_ssl=True + ) + actual = FakeConfig() + tmp_path = os.path.join( + os.path.dirname( + os.path.dirname( + os.path.abspath(__file__))), + 'tmp_file_path_test') + load_kube_config_from_dict(config_dict=self.TEST_KUBE_CONFIG, + context="ssl", + client_configuration=actual, + temp_file_path=tmp_path) + self.assertFalse(True if not os.listdir(tmp_path) else False) + self.assertEqual(expected, actual) + _cleanup_temp_files + def test_load_kube_config_from_empty_file_like_object(self): config_file_like_object = io.StringIO() self.assertRaises( From 96bb22fac5f65b2ea7696a0d48f3f1aa42f9457a Mon Sep 17 00:00:00 2001 From: Haowei Cai Date: Tue, 20 Apr 2021 17:06:52 -0700 Subject: [PATCH 61/90] add PR template --- .github/PULL_REQUEST_TEMPLATE.md | 72 ++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 .github/PULL_REQUEST_TEMPLATE.md diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..f6af35b4 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,72 @@ + + +#### What type of PR is this? + + + +#### What this PR does / why we need it: + +#### Which issue(s) this PR fixes: + +Fixes # + +#### Special notes for your reviewer: + +#### Does this PR introduce a user-facing change? + +```release-note + +``` + +#### Additional documentation e.g., KEPs (Kubernetes Enhancement Proposals), usage docs, etc.: + + +```docs + +``` From 90e16c698e189c7a674868803ea1e1ff70452d10 Mon Sep 17 00:00:00 2001 From: jonasdlindner Date: Fri, 30 Apr 2021 23:53:25 +0200 Subject: [PATCH 62/90] Rename Method _websocket_reqeust to _websocket_request --- stream/stream.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stream/stream.py b/stream/stream.py index 57bac758..115a899b 100644 --- a/stream/stream.py +++ b/stream/stream.py @@ -17,7 +17,7 @@ from . import ws_client -def _websocket_reqeust(websocket_request, force_kwargs, api_method, *args, **kwargs): +def _websocket_request(websocket_request, force_kwargs, api_method, *args, **kwargs): """Override the ApiClient.request method with an alternative websocket based method and call the supplied Kubernetes API method with that in place.""" if force_kwargs: @@ -37,5 +37,5 @@ def _websocket_reqeust(websocket_request, force_kwargs, api_method, *args, **kwa api_client.request = prev_request -stream = functools.partial(_websocket_reqeust, ws_client.websocket_call, None) -portforward = functools.partial(_websocket_reqeust, ws_client.portforward_call, {'_preload_content':False}) +stream = functools.partial(_websocket_request, ws_client.websocket_call, None) +portforward = functools.partial(_websocket_request, ws_client.portforward_call, {'_preload_content':False}) From bde3935f2698b0145b8c1cf50fae0f67215e7c1f Mon Sep 17 00:00:00 2001 From: Yash Kumar Singh Date: Tue, 27 Apr 2021 12:05:04 +0530 Subject: [PATCH 63/90] =?UTF-8?q?Support=20customizing=20=E2=80=9CAccept?= =?UTF-8?q?=E2=80=9D=20header=20and=20added=20a=20testcase=20to=20test=20c?= =?UTF-8?q?ustom=20header?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dynamic/client.py | 13 ++++++++----- dynamic/test_client.py | 18 +++++++++++++++++- 2 files changed, 25 insertions(+), 6 deletions(-) diff --git a/dynamic/client.py b/dynamic/client.py index 7b82b3d6..f6926508 100644 --- a/dynamic/client.py +++ b/dynamic/client.py @@ -219,11 +219,14 @@ def request(self, method, path, body=None, **params): header_params = params.get('header_params', {}) form_params = [] local_var_files = {} - # HTTP header `Accept` - header_params['Accept'] = self.client.select_header_accept([ - 'application/json', - 'application/yaml', - ]) + + # Checking Accept header. + new_header_params = dict((key.lower(), value) for key, value in header_params.items()) + if not 'accept' in new_header_params: + header_params['Accept'] = self.client.select_header_accept([ + 'application/json', + 'application/yaml', + ]) # HTTP header `Content-Type` if params.get('content_type'): diff --git a/dynamic/test_client.py b/dynamic/test_client.py index b68e081f..54e41bb4 100644 --- a/dynamic/test_client.py +++ b/dynamic/test_client.py @@ -359,7 +359,7 @@ def test_configmap_apis(self): resp = api.get(namespace='default', pretty=True, label_selector="e2e-test=true") self.assertEqual([], resp.items) - + def test_node_apis(self): client = DynamicClient(api_client.ApiClient(configuration=self.config)) api = client.resources.get(api_version='v1', kind='Node') @@ -367,3 +367,19 @@ def test_node_apis(self): for item in api.get().items: node = api.get(name=item.metadata.name) self.assertTrue(len(dict(node.metadata.labels)) > 0) + + # test_node_apis_partial_object_metadata lists all nodes in the cluster, but only retrieves object metadata + def test_node_apis_partial_object_metadata(self): + client = DynamicClient(api_client.ApiClient(configuration=self.config)) + api = client.resources.get(api_version='v1', kind='Node') + + params = {'header_params': {'Accept': 'application/json;as=PartialObjectMetadataList;v=v1;g=meta.k8s.io'}} + resp = api.get(**params) + self.assertEqual('PartialObjectMetadataList', resp.kind) + self.assertEqual('meta.k8s.io/v1', resp.apiVersion) + + params = {'header_params': {'aCcePt': 'application/json;as=PartialObjectMetadataList;v=v1;g=meta.k8s.io'}} + resp = api.get(**params) + self.assertEqual('PartialObjectMetadataList', resp.kind) + self.assertEqual('meta.k8s.io/v1', resp.apiVersion) + From 711d4ab880d66b7c34f5dae5a5379d05d51d26fe Mon Sep 17 00:00:00 2001 From: Priyanka Saggu Date: Fri, 14 May 2021 22:51:52 +0530 Subject: [PATCH 64/90] drop python2 support - remove python2 from the .travis.yaml file - remove python2 from the tox.ini file - remove `-y` flag from `isort` command in `update-pycodestle.sh` script - add update-pycodestyle, coverage & codecov tests for python3 Signed-off-by: Priyanka Saggu --- .travis.yml | 10 +++------- tox.ini | 4 ++-- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/.travis.yml b/.travis.yml index b44ec90a..86a1bfa2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -18,16 +18,12 @@ jobs: script: ./hack/verify-boilerplate.sh python: 3.7 - stage: test - python: 2.7 - env: TOXENV=py27 - - python: 2.7 - env: TOXENV=py27-functional - - python: 2.7 + python: 3.9 env: TOXENV=update-pycodestyle + - python: 3.9 + env: TOXENV=coverage,codecov - python: 3.7 env: TOXENV=docs - - python: 2.7 - env: TOXENV=coverage,codecov - python: 3.5 env: TOXENV=py35 - python: 3.5 diff --git a/tox.ini b/tox.ini index 71c4d2d8..37a188f1 100644 --- a/tox.ini +++ b/tox.ini @@ -1,8 +1,8 @@ [tox] skipsdist = True envlist = - py27, py3{5,6,7,8,9} - py27-functional, py3{5,6,7,8,9}-functional + py3{5,6,7,8,9} + py3{5,6,7,8,9}-functional [testenv] passenv = TOXENV CI TRAVIS TRAVIS_* From 6d1c8d3713057e87d973d853b36373c06901d092 Mon Sep 17 00:00:00 2001 From: Moshe Shitrit Date: Sat, 19 Jun 2021 17:42:37 +0300 Subject: [PATCH 65/90] Apply suggestion --- config/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/__init__.py b/config/__init__.py index 41702b96..76297f81 100644 --- a/config/__init__.py +++ b/config/__init__.py @@ -26,7 +26,7 @@ def load_config(**kwargs): Wrapper function to load the kube_config. It will initially try to load_kube_config from provided path, then check if the KUBE_CONFIG_DEFAULT_LOCATION exists - If neither exists- it will fall back to load_incluster_config + If neither exists, it will fall back to load_incluster_config and inform the user accordingly. :param kwargs: A combination of all possible kwargs that From dca0ca6df23098b63a347e12ceb0dca028a20572 Mon Sep 17 00:00:00 2001 From: David Otto Date: Wed, 7 Jul 2021 12:54:38 +0200 Subject: [PATCH 66/90] Fix load_config: expand ~ --- config/kube_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/kube_config.py b/config/kube_config.py index 584b8a41..040234d9 100644 --- a/config/kube_config.py +++ b/config/kube_config.py @@ -45,7 +45,7 @@ pass EXPIRY_SKEW_PREVENTION_DELAY = datetime.timedelta(minutes=5) -KUBE_CONFIG_DEFAULT_LOCATION = os.environ.get('KUBECONFIG', '~/.kube/config') +KUBE_CONFIG_DEFAULT_LOCATION = os.path.expanduser(os.environ.get('KUBECONFIG', '~/.kube/config')) ENV_KUBECONFIG_PATH_SEPARATOR = ';' if platform.system() == 'Windows' else ':' _temp_files = {} From e2ba3fb9fc2cd9dfdb873ab5d48255a1e7c2d26a Mon Sep 17 00:00:00 2001 From: David Otto Date: Mon, 12 Jul 2021 11:21:13 +0200 Subject: [PATCH 67/90] do expanduser in load_config --- config/__init__.py | 5 ++--- config/kube_config.py | 2 +- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/config/__init__.py b/config/__init__.py index 76297f81..e1bf7f57 100644 --- a/config/__init__.py +++ b/config/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os +from os.path import exists, expanduser from .config_exception import ConfigException from .incluster_config import load_incluster_config @@ -33,8 +33,7 @@ def load_config(**kwargs): can be passed to either load_kube_config or load_incluster_config functions. """ - if "kube_config_path" in kwargs.keys() or os.path.exists( - KUBE_CONFIG_DEFAULT_LOCATION): + if "kube_config_path" in kwargs.keys() or exists(expanduser(KUBE_CONFIG_DEFAULT_LOCATION)): load_kube_config(**kwargs) else: print( diff --git a/config/kube_config.py b/config/kube_config.py index 040234d9..584b8a41 100644 --- a/config/kube_config.py +++ b/config/kube_config.py @@ -45,7 +45,7 @@ pass EXPIRY_SKEW_PREVENTION_DELAY = datetime.timedelta(minutes=5) -KUBE_CONFIG_DEFAULT_LOCATION = os.path.expanduser(os.environ.get('KUBECONFIG', '~/.kube/config')) +KUBE_CONFIG_DEFAULT_LOCATION = os.environ.get('KUBECONFIG', '~/.kube/config') ENV_KUBECONFIG_PATH_SEPARATOR = ';' if platform.system() == 'Windows' else ':' _temp_files = {} From 6f9e3327a8fbdb791a654afffe94840081390189 Mon Sep 17 00:00:00 2001 From: Andrei Marin Date: Sun, 27 Jun 2021 18:26:50 +0300 Subject: [PATCH 68/90] Fix replication controller pods delete in tests --- dynamic/test_client.py | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/dynamic/test_client.py b/dynamic/test_client.py index 54e41bb4..78be0cc4 100644 --- a/dynamic/test_client.py +++ b/dynamic/test_client.py @@ -318,8 +318,10 @@ def test_replication_controller_apis(self): self.assertEqual(name, resp.metadata.name) self.assertEqual(2, resp.spec.replicas) - resp = api.delete( - name=name, body={}, namespace='default') + api.delete( + name=name, + namespace='default', + propagation_policy='Background') def test_configmap_apis(self): client = DynamicClient(api_client.ApiClient(configuration=self.config)) @@ -357,9 +359,12 @@ def test_configmap_apis(self): resp = api.delete( name=name, body={}, namespace='default') - resp = api.get(namespace='default', pretty=True, label_selector="e2e-test=true") + resp = api.get( + namespace='default', + pretty=True, + label_selector="e2e-test=true") self.assertEqual([], resp.items) - + def test_node_apis(self): client = DynamicClient(api_client.ApiClient(configuration=self.config)) api = client.resources.get(api_version='v1', kind='Node') @@ -367,19 +372,23 @@ def test_node_apis(self): for item in api.get().items: node = api.get(name=item.metadata.name) self.assertTrue(len(dict(node.metadata.labels)) > 0) - - # test_node_apis_partial_object_metadata lists all nodes in the cluster, but only retrieves object metadata + + # test_node_apis_partial_object_metadata lists all nodes in the cluster, + # but only retrieves object metadata def test_node_apis_partial_object_metadata(self): client = DynamicClient(api_client.ApiClient(configuration=self.config)) api = client.resources.get(api_version='v1', kind='Node') - - params = {'header_params': {'Accept': 'application/json;as=PartialObjectMetadataList;v=v1;g=meta.k8s.io'}} + + params = { + 'header_params': { + 'Accept': 'application/json;as=PartialObjectMetadataList;v=v1;g=meta.k8s.io'}} resp = api.get(**params) self.assertEqual('PartialObjectMetadataList', resp.kind) self.assertEqual('meta.k8s.io/v1', resp.apiVersion) - params = {'header_params': {'aCcePt': 'application/json;as=PartialObjectMetadataList;v=v1;g=meta.k8s.io'}} + params = { + 'header_params': { + 'aCcePt': 'application/json;as=PartialObjectMetadataList;v=v1;g=meta.k8s.io'}} resp = api.get(**params) self.assertEqual('PartialObjectMetadataList', resp.kind) self.assertEqual('meta.k8s.io/v1', resp.apiVersion) - From cbb71698d775c910d6b18432f48a06dd35449e76 Mon Sep 17 00:00:00 2001 From: Mike Graves Date: Mon, 26 Jul 2021 13:23:57 -0400 Subject: [PATCH 69/90] Add support for dryRun parameter --- dynamic/client.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/dynamic/client.py b/dynamic/client.py index f6926508..72b38113 100644 --- a/dynamic/client.py +++ b/dynamic/client.py @@ -215,6 +215,8 @@ def request(self, method, path, body=None, **params): query_params.append(('propagationPolicy', params['propagation_policy'])) if params.get('orphan_dependents') is not None: query_params.append(('orphanDependents', params['orphan_dependents'])) + if params.get('dry_run') is not None: + query_params.append(('dryRun', params['dry_run'])) header_params = params.get('header_params', {}) form_params = [] From 59ba58b49469bd63b69650d7a0ad0429bc08a0a3 Mon Sep 17 00:00:00 2001 From: Hedi Nasr Date: Wed, 23 Jun 2021 11:27:40 +0200 Subject: [PATCH 70/90] Add the ability to stop the watcher gracefully. --- dynamic/client.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/dynamic/client.py b/dynamic/client.py index f6926508..43f6bf40 100644 --- a/dynamic/client.py +++ b/dynamic/client.py @@ -144,7 +144,7 @@ def patch(self, resource, body=None, name=None, namespace=None, **kwargs): return self.request('patch', path, body=body, content_type=content_type, **kwargs) - def watch(self, resource, namespace=None, name=None, label_selector=None, field_selector=None, resource_version=None, timeout=None): + def watch(self, resource, namespace=None, name=None, label_selector=None, field_selector=None, resource_version=None, timeout=None, watcher=None): """ Stream events for a resource from the Kubernetes API @@ -156,6 +156,7 @@ def watch(self, resource, namespace=None, name=None, label_selector=None, field_ :param resource_version: The version with which to filter results. Only events with a resource_version greater than this value will be returned :param timeout: The amount of time in seconds to wait before terminating the stream + :param watcher: The Watcher object that will be used to stream the resource :return: Event object with these keys: 'type': The type of event such as "ADDED", "DELETED", etc. @@ -164,13 +165,17 @@ def watch(self, resource, namespace=None, name=None, label_selector=None, field_ Example: client = DynamicClient(k8s_client) + watcher = watch.Watch() v1_pods = client.resources.get(api_version='v1', kind='Pod') - for e in v1_pods.watch(resource_version=0, namespace=default, timeout=5): + for e in v1_pods.watch(resource_version=0, namespace=default, timeout=5, watcher=watcher): print(e['type']) print(e['object'].metadata) + # If you want to gracefully stop the stream watcher + watcher.stop() """ - watcher = watch.Watch() + if not watcher: watcher = watch.Watch() + for event in watcher.stream( resource.get, namespace=namespace, From b0b0ddeedc0338df0aa36c0e16d277ab8165ad1c Mon Sep 17 00:00:00 2001 From: Fabian von Feilitzsch Date: Thu, 29 Jul 2021 16:56:44 -0400 Subject: [PATCH 71/90] Add fabianvf to reviewers --- OWNERS | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/OWNERS b/OWNERS index c331e688..47444bf9 100644 --- a/OWNERS +++ b/OWNERS @@ -5,4 +5,5 @@ approvers: - roycaihw emeritus_approvers: - mbohlool - +reviewers: + - fabianvf From 66a45cd081b17041afd62712c5c213d310fa30b3 Mon Sep 17 00:00:00 2001 From: piglei Date: Sun, 22 Aug 2021 11:20:59 +0800 Subject: [PATCH 72/90] Make duck-typing in serialize_body method more restrictive --- dynamic/client.py | 7 ++++++- dynamic/test_client.py | 30 ++++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 1 deletion(-) diff --git a/dynamic/client.py b/dynamic/client.py index 9d32770b..353a481b 100644 --- a/dynamic/client.py +++ b/dynamic/client.py @@ -98,7 +98,12 @@ def ensure_namespace(self, resource, namespace, body): return namespace def serialize_body(self, body): - if hasattr(body, 'to_dict'): + """Serialize body to raw dict so apiserver can handle it + + :param body: kubernetes resource body, current support: Union[Dict, ResourceInstance] + """ + # This should match any `ResourceInstance` instances + if callable(getattr(body, 'to_dict', None)): return body.to_dict() return body or {} diff --git a/dynamic/test_client.py b/dynamic/test_client.py index 78be0cc4..ab1df93f 100644 --- a/dynamic/test_client.py +++ b/dynamic/test_client.py @@ -20,6 +20,7 @@ from kubernetes.client import api_client from . import DynamicClient +from .resource import ResourceInstance, ResourceField from .exceptions import ResourceNotFoundError @@ -392,3 +393,32 @@ def test_node_apis_partial_object_metadata(self): resp = api.get(**params) self.assertEqual('PartialObjectMetadataList', resp.kind) self.assertEqual('meta.k8s.io/v1', resp.apiVersion) + + +class TestDynamicClientSerialization(unittest.TestCase): + + @classmethod + def setUpClass(cls): + config = base.get_e2e_configuration() + cls.client = DynamicClient(api_client.ApiClient(configuration=config)) + cls.pod_manifest = { + 'apiVersion': 'v1', + 'kind': 'Pod', + 'metadata': {'name': 'foo-pod'}, + 'spec': {'containers': [{'name': "main", 'image': "busybox"}]}, + } + + def test_dict_type(self): + self.assertEqual(self.client.serialize_body(self.pod_manifest), self.pod_manifest) + + def test_resource_instance_type(self): + inst = ResourceInstance(self.client, self.pod_manifest) + self.assertEqual(self.client.serialize_body(inst), self.pod_manifest) + + def test_resource_field(self): + """`ResourceField` is a special type which overwrites `__getattr__` method to return `None` + when a non-existent attribute was accessed. which means it can pass any `hasattr(...)` tests. + """ + res = ResourceField(foo='bar') + # method will return original object when it doesn't know how to proceed + self.assertEqual(self.client.serialize_body(res), res) From 70b78cd8488068c014b6d762a0c8d358273865b4 Mon Sep 17 00:00:00 2001 From: Eric Menendez Date: Fri, 27 Aug 2021 15:25:07 -0600 Subject: [PATCH 73/90] Refresh exec-based API credentials when they expire This is a fix for kubernetes-client/python#741. As described in kubernetes-client/python#741, some of the authentication schemes supported by Kubernetes require updating the client's credentials from time to time. The Kubernetes Python client currently does not support this, except for when using the `gcp` auth scheme. This is because the OpenAPI-generated client code does not generally expect credentials to change after the client is configured. However, in OpenAPITools/openapi-generator#3594, the OpenAPI generator added a (undocumented) hook on the `Configuration` object which provides a method for the client credentials to be refreshed as needed. Now that this hook exists, the `load_kube_config()` function, used by the Kubernetes API to set up the `Configuration` object from the client's local k8s config, just needs to be updated to take advantage of this hook. This patch does this for `exec`-based authentication, which should resolve kubernetes-client/python#741. Also, as noted above, `load_kube_config()` already has a special-case monkeypatch to refresh GCP tokens. I presume this functionality was added before the OpenAPI generator added support for the refresh hook. This patch also refactors the GCP token refreshing code to use the new hook instead of the monkeypatch. Tests are also updated. --- config/kube_config.py | 38 +++++++--------- config/kube_config_test.py | 91 +++++++++++++++++++++++++------------- 2 files changed, 76 insertions(+), 53 deletions(-) diff --git a/config/kube_config.py b/config/kube_config.py index 584b8a41..f295dbcd 100644 --- a/config/kube_config.py +++ b/config/kube_config.py @@ -359,6 +359,8 @@ def _load_gcp_token(self, provider): self._refresh_gcp_token() self.token = "Bearer %s" % provider['config']['access-token'] + if 'expiry' in provider['config']: + self.expiry = parse_rfc3339(provider['config']['expiry']) return self.token def _refresh_gcp_token(self): @@ -483,8 +485,7 @@ def _load_from_exec_plugin(self): status = ExecProvider(self._user['exec']).run() if 'token' in status: self.token = "Bearer %s" % status['token'] - return True - if 'clientCertificateData' in status: + elif 'clientCertificateData' in status: # https://kubernetes.io/docs/reference/access-authn-authz/authentication/#input-and-output-formats # Plugin has provided certificates instead of a token. if 'clientKeyData' not in status: @@ -504,10 +505,13 @@ def _load_from_exec_plugin(self): file_base_path=base_path, base64_file_content=False, temp_file_path=self._temp_file_path).as_file() - return True - logging.error('exec: missing token or clientCertificateData field ' - 'in plugin output') - return None + else: + logging.error('exec: missing token or clientCertificateData ' + 'field in plugin output') + return None + if 'expirationTimestamp' in status: + self.expiry = parse_rfc3339(status['expirationTimestamp']) + return True except Exception as e: logging.error(str(e)) @@ -560,25 +564,15 @@ def _load_cluster_info(self): if 'insecure-skip-tls-verify' in self._cluster: self.verify_ssl = not self._cluster['insecure-skip-tls-verify'] - def _using_gcp_auth_provider(self): - return self._user and \ - 'auth-provider' in self._user and \ - 'name' in self._user['auth-provider'] and \ - self._user['auth-provider']['name'] == 'gcp' - def _set_config(self, client_configuration): - if self._using_gcp_auth_provider(): - # GCP auth tokens must be refreshed regularly, but swagger expects - # a constant token. Replace the swagger-generated client config's - # get_api_key_with_prefix method with our own to allow automatic - # token refresh. - def _gcp_get_api_key(*args): - return self._load_gcp_token(self._user['auth-provider']) - client_configuration.get_api_key_with_prefix = _gcp_get_api_key if 'token' in self.__dict__: - # Note: this line runs for GCP auth tokens as well, but this entry - # will not be updated upon GCP token refresh. client_configuration.api_key['authorization'] = self.token + + def _refresh_api_key(client_configuration): + if ('expiry' in self.__dict__ and _is_expired(self.expiry)): + self._load_authentication() + self._set_config(client_configuration) + client_configuration.refresh_api_key_hook = _refresh_api_key # copy these keys directly from self to configuration object keys = ['host', 'ssl_ca_cert', 'cert_file', 'key_file', 'verify_ssl'] for key in keys: diff --git a/config/kube_config_test.py b/config/kube_config_test.py index c33ffed7..8151f948 100644 --- a/config/kube_config_test.py +++ b/config/kube_config_test.py @@ -29,7 +29,7 @@ from kubernetes.client import Configuration from .config_exception import ConfigException -from .dateutil import parse_rfc3339 +from .dateutil import format_rfc3339, parse_rfc3339 from .kube_config import (ENV_KUBECONFIG_PATH_SEPARATOR, CommandTokenSource, ConfigNode, FileOrData, KubeConfigLoader, KubeConfigMerger, _cleanup_temp_files, @@ -346,9 +346,12 @@ def test_get_with_name_on_duplicate_name(self): class FakeConfig: FILE_KEYS = ["ssl_ca_cert", "key_file", "cert_file"] + IGNORE_KEYS = ["refresh_api_key_hook"] def __init__(self, token=None, **kwargs): self.api_key = {} + # Provided by the OpenAPI-generated Configuration class + self.refresh_api_key_hook = None if token: self.api_key['authorization'] = token @@ -358,6 +361,8 @@ def __eq__(self, other): if len(self.__dict__) != len(other.__dict__): return for k, v in self.__dict__.items(): + if k in self.IGNORE_KEYS: + continue if k not in other.__dict__: return if k in self.FILE_KEYS: @@ -956,17 +961,15 @@ def test_load_user_token(self): def test_gcp_no_refresh(self): fake_config = FakeConfig() - # swagger-generated config has this, but FakeConfig does not. - self.assertFalse(hasattr(fake_config, 'get_api_key_with_prefix')) + self.assertIsNone(fake_config.refresh_api_key_hook) KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="gcp", get_google_credentials=lambda: _raise_exception( "SHOULD NOT BE CALLED")).load_and_set(fake_config) # Should now be populated with a gcp token fetcher. - self.assertIsNotNone(fake_config.get_api_key_with_prefix) + self.assertIsNotNone(fake_config.refresh_api_key_hook) self.assertEqual(TEST_HOST, fake_config.host) - # For backwards compatibility, authorization field should still be set. self.assertEqual(BEARER_TOKEN_FORMAT % TEST_DATA_BASE64, fake_config.api_key['authorization']) @@ -997,7 +1000,7 @@ def cred(): return None self.assertEqual(BEARER_TOKEN_FORMAT % TEST_ANOTHER_DATA_BASE64, loader.token) - def test_gcp_get_api_key_with_prefix(self): + def test_gcp_refresh_api_key_hook(self): class cred_old: token = TEST_DATA_BASE64 expiry = DATETIME_EXPIRY_PAST @@ -1015,15 +1018,13 @@ class cred_new: get_google_credentials=_get_google_credentials) loader.load_and_set(fake_config) original_expiry = _get_expiry(loader, "expired_gcp_refresh") - # Call GCP token fetcher. - token = fake_config.get_api_key_with_prefix() + # Refresh the GCP token. + fake_config.refresh_api_key_hook(fake_config) new_expiry = _get_expiry(loader, "expired_gcp_refresh") self.assertTrue(new_expiry > original_expiry) self.assertEqual(BEARER_TOKEN_FORMAT % TEST_ANOTHER_DATA_BASE64, loader.token) - self.assertEqual(BEARER_TOKEN_FORMAT % TEST_ANOTHER_DATA_BASE64, - token) def test_oidc_no_refresh(self): loader = KubeConfigLoader( @@ -1383,6 +1384,38 @@ def test_user_exec_auth(self, mock): active_context="exec_cred_user").load_and_set(actual) self.assertEqual(expected, actual) + @mock.patch('kubernetes.config.kube_config.ExecProvider.run') + def test_user_exec_auth_with_expiry(self, mock): + expired_token = "expired" + current_token = "current" + mock.side_effect = [ + { + "token": expired_token, + "expirationTimestamp": format_rfc3339(DATETIME_EXPIRY_PAST) + }, + { + "token": current_token, + "expirationTimestamp": format_rfc3339(DATETIME_EXPIRY_FUTURE) + } + ] + + fake_config = FakeConfig() + self.assertIsNone(fake_config.refresh_api_key_hook) + + KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="exec_cred_user").load_and_set(fake_config) + # The kube config should use the first token returned from the + # exec provider. + self.assertEqual(fake_config.api_key["authorization"], + BEARER_TOKEN_FORMAT % expired_token) + # Should now be populated with a method to refresh expired tokens. + self.assertIsNotNone(fake_config.refresh_api_key_hook) + # Refresh the token; the kube config should be updated. + fake_config.refresh_api_key_hook(fake_config) + self.assertEqual(fake_config.api_key["authorization"], + BEARER_TOKEN_FORMAT % current_token) + @mock.patch('kubernetes.config.kube_config.ExecProvider.run') def test_user_exec_auth_certificates(self, mock): mock.return_value = { @@ -1412,7 +1445,6 @@ def test_user_cmd_path(self): KubeConfigLoader( config_dict=self.TEST_KUBE_CONFIG, active_context="contexttestcmdpath").load_and_set(actual) - del actual.get_api_key_with_prefix self.assertEqual(expected, actual) def test_user_cmd_path_empty(self): @@ -1490,31 +1522,28 @@ def test__get_kube_config_loader_dict_no_persist(self): class TestKubernetesClientConfiguration(BaseTestCase): # Verifies properties of kubernetes.client.Configuration. # These tests guard against changes to the upstream configuration class, - # since GCP authorization overrides get_api_key_with_prefix to refresh its - # token regularly. + # since GCP and Exec authorization use refresh_api_key_hook to refresh + # their tokens regularly. - def test_get_api_key_with_prefix_exists(self): - self.assertTrue(hasattr(Configuration, 'get_api_key_with_prefix')) + def test_refresh_api_key_hook_exists(self): + self.assertTrue(hasattr(Configuration(), 'refresh_api_key_hook')) - def test_get_api_key_with_prefix_returns_token(self): - expected_token = 'expected_token' - config = Configuration() - config.api_key['authorization'] = expected_token - self.assertEqual(expected_token, - config.get_api_key_with_prefix('authorization')) - - def test_auth_settings_calls_get_api_key_with_prefix(self): + def test_get_api_key_calls_refresh_api_key_hook(self): + identifier = 'authorization' expected_token = 'expected_token' old_token = 'old_token' + config = Configuration( + api_key={identifier: old_token}, + api_key_prefix={identifier: 'Bearer'} + ) + + def refresh_api_key_hook(client_config): + self.assertEqual(client_config, config) + client_config.api_key[identifier] = expected_token + config.refresh_api_key_hook = refresh_api_key_hook - def fake_get_api_key_with_prefix(identifier): - self.assertEqual('authorization', identifier) - return expected_token - config = Configuration() - config.api_key['authorization'] = old_token - config.get_api_key_with_prefix = fake_get_api_key_with_prefix - self.assertEqual(expected_token, - config.auth_settings()['BearerToken']['value']) + self.assertEqual('Bearer ' + expected_token, + config.get_api_key_with_prefix(identifier)) class TestKubeConfigMerger(BaseTestCase): From bd944a58a31f878c5bf4964f458d53512df2ece3 Mon Sep 17 00:00:00 2001 From: jamesgetx Date: Fri, 3 Sep 2021 17:30:56 +0800 Subject: [PATCH 74/90] fix: field extra_args recursive growth caused by Resource and Subresource to_dict method when cache with CacheDecoder --- dynamic/resource.py | 18 ++++++++++-------- dynamic/test_discovery.py | 21 +++++++++++++++++++++ 2 files changed, 31 insertions(+), 8 deletions(-) diff --git a/dynamic/resource.py b/dynamic/resource.py index c83ae9fd..6dac1d87 100644 --- a/dynamic/resource.py +++ b/dynamic/resource.py @@ -48,7 +48,7 @@ def __init__(self, prefix=None, group=None, api_version=None, kind=None, self.extra_args = kwargs def to_dict(self): - return { + d = { '_type': 'Resource', 'prefix': self.prefix, 'group': self.group, @@ -58,12 +58,13 @@ def to_dict(self): 'verbs': self.verbs, 'name': self.name, 'preferred': self.preferred, - 'singular_name': self.singular_name, - 'short_names': self.short_names, + 'singularName': self.singular_name, + 'shortNames': self.short_names, 'categories': self.categories, 'subresources': {k: sr.to_dict() for k, sr in self.subresources.items()}, - 'extra_args': self.extra_args, } + d.update(self.extra_args) + return d @property def group_version(self): @@ -236,7 +237,7 @@ def __init__(self, parent, **kwargs): self.api_version = parent.api_version self.kind = kwargs.pop('kind') self.name = kwargs.pop('name') - self.subresource = self.name.split('/')[1] + self.subresource = kwargs.pop('subresource', None) or self.name.split('/')[1] self.namespaced = kwargs.pop('namespaced', False) self.verbs = kwargs.pop('verbs', None) self.extra_args = kwargs @@ -262,14 +263,15 @@ def __getattr__(self, name): return partial(getattr(self.parent.client, name), self) def to_dict(self): - return { + d = { 'kind': self.kind, 'name': self.name, 'subresource': self.subresource, 'namespaced': self.namespaced, - 'verbs': self.verbs, - 'extra_args': self.extra_args, + 'verbs': self.verbs } + d.update(self.extra_args) + return d class ResourceInstance(object): diff --git a/dynamic/test_discovery.py b/dynamic/test_discovery.py index 4897f244..639ccdd3 100644 --- a/dynamic/test_discovery.py +++ b/dynamic/test_discovery.py @@ -38,3 +38,24 @@ def test_init_cache_from_file(self): # test no Discoverer._write_cache called self.assertTrue(mtime1 == mtime2) + + def test_cache_decoder_resource_and_subresource(self): + client = DynamicClient(api_client.ApiClient(configuration=self.config)) + # first invalidate cache + client.resources.invalidate_cache() + + # do Discoverer.__init__ + client = DynamicClient(api_client.ApiClient(configuration=self.config)) + # the resources of client will use _cache['resources'] in memory + deploy1 = client.resources.get(kind='Deployment') + + # do Discoverer.__init__ + client = DynamicClient(api_client.ApiClient(configuration=self.config)) + # the resources of client will use _cache['resources'] decode from cache file + deploy2 = client.resources.get(kind='Deployment') + + # test Resource is the same + self.assertTrue(deploy1 == deploy2) + + # test Subresource is the same + self.assertTrue(deploy1.status == deploy2.status) From c040d87bd847d5afe480dcc2d39ad46cb6234cc3 Mon Sep 17 00:00:00 2001 From: schneesu Date: Tue, 28 Sep 2021 10:05:17 +0800 Subject: [PATCH 75/90] fix: ignore ResourceNotFoundError in the first call of LazyDiscoverer.__search --- dynamic/discovery.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/dynamic/discovery.py b/dynamic/discovery.py index 5c2f4ac4..dbf94101 100644 --- a/dynamic/discovery.py +++ b/dynamic/discovery.py @@ -237,7 +237,11 @@ def api_groups(self): return self.parse_api_groups(request_resources=False, update=True)['apis'].keys() def search(self, **kwargs): - results = self.__search(self.__build_search(**kwargs), self.__resources, []) + # In first call, ignore ResourceNotFoundError and set default value for results + try: + results = self.__search(self.__build_search(**kwargs), self.__resources, []) + except ResourceNotFoundError: + results = [] if not results: self.invalidate_cache() results = self.__search(self.__build_search(**kwargs), self.__resources, []) From 281f17ab237384bc1f5b022555635710b6e6aff3 Mon Sep 17 00:00:00 2001 From: abikouo Date: Thu, 16 Sep 2021 10:52:44 +0200 Subject: [PATCH 76/90] add support for server side apply --- dynamic/client.py | 18 ++++++++++++++++++ dynamic/test_client.py | 24 ++++++++++++++++++++++++ 2 files changed, 42 insertions(+) diff --git a/dynamic/client.py b/dynamic/client.py index 353a481b..a81039b8 100644 --- a/dynamic/client.py +++ b/dynamic/client.py @@ -149,6 +149,20 @@ def patch(self, resource, body=None, name=None, namespace=None, **kwargs): return self.request('patch', path, body=body, content_type=content_type, **kwargs) + def server_side_apply(self, resource, body=None, name=None, namespace=None, force_conflicts=None, **kwargs): + body = self.serialize_body(body) + name = name or body.get('metadata', {}).get('name') + if not name: + raise ValueError("name is required to patch {}.{}".format(resource.group_version, resource.kind)) + if resource.namespaced: + namespace = self.ensure_namespace(resource, namespace, body) + + # force content type to 'application/apply-patch+yaml' + kwargs.update({'content_type': 'application/apply-patch+yaml'}) + path = resource.path(name=name, namespace=namespace) + + return self.request('patch', path, body=body, force_conflicts=force_conflicts, **kwargs) + def watch(self, resource, namespace=None, name=None, label_selector=None, field_selector=None, resource_version=None, timeout=None, watcher=None): """ Stream events for a resource from the Kubernetes API @@ -227,6 +241,10 @@ def request(self, method, path, body=None, **params): query_params.append(('orphanDependents', params['orphan_dependents'])) if params.get('dry_run') is not None: query_params.append(('dryRun', params['dry_run'])) + if params.get('field_manager') is not None: + query_params.append(('fieldManager', params['field_manager'])) + if params.get('force_conflicts') is not None: + query_params.append(('force', params['force_conflicts'])) header_params = params.get('header_params', {}) form_params = [] diff --git a/dynamic/test_client.py b/dynamic/test_client.py index ab1df93f..c31270bc 100644 --- a/dynamic/test_client.py +++ b/dynamic/test_client.py @@ -15,6 +15,7 @@ import time import unittest import uuid +import json from kubernetes.e2e_test import base from kubernetes.client import api_client @@ -394,6 +395,29 @@ def test_node_apis_partial_object_metadata(self): self.assertEqual('PartialObjectMetadataList', resp.kind) self.assertEqual('meta.k8s.io/v1', resp.apiVersion) + def test_server_side_apply_api(self): + client = DynamicClient(api_client.ApiClient(configuration=self.config)) + api = client.resources.get( + api_version='v1', kind='Pod') + + name = 'pod-' + short_uuid() + pod_manifest = { + 'apiVersion': 'v1', + 'kind': 'Pod', + 'metadata': {'labels': {'name': name}, + 'name': name}, + 'spec': {'containers': [{ + 'image': 'nginx', + 'name': 'nginx', + 'ports': [{'containerPort': 80, + 'protocol': 'TCP'}]}]}} + + body = json.dumps(pod_manifest).encode() + resp = api.server_side_apply( + name=name, namespace='default', body=body, + field_manager='kubernetes-unittests', dry_run="All") + self.assertEqual('kubernetes-unittests', resp.metadata.managedFields[0].manager) + class TestDynamicClientSerialization(unittest.TestCase): From 769bc57ec7b0271a7cb018becee8ad156cf82704 Mon Sep 17 00:00:00 2001 From: itaru2622 Date: Wed, 29 Sep 2021 09:18:55 +0900 Subject: [PATCH 77/90] add proxy authentication supporting for websocket (stream/ws_client.py) --- stream/ws_client.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/stream/ws_client.py b/stream/ws_client.py index 356440c8..2a60a8be 100644 --- a/stream/ws_client.py +++ b/stream/ws_client.py @@ -29,6 +29,7 @@ from six import StringIO from websocket import WebSocket, ABNF, enableTrace +from base64 import b64decode STDIN_CHANNEL = 0 STDOUT_CHANNEL = 1 @@ -445,11 +446,20 @@ def create_websocket(configuration, url, headers=None): ssl_opts['keyfile'] = configuration.key_file websocket = WebSocket(sslopt=ssl_opts, skip_utf8_validation=False) + connect_opt = { + 'header': header + } if configuration.proxy: proxy_url = urlparse(configuration.proxy) - websocket.connect(url, header=header, http_proxy_host=proxy_url.hostname, http_proxy_port=proxy_url.port) - else: - websocket.connect(url, header=header) + connect_opt.update({'http_proxy_host': proxy_url.hostname, 'http_proxy_port': proxy_url.port}) + if configuration.proxy_headers: + for key,value in configuration.proxy_headers.items(): + if key == 'proxy-authorization' and value.startswith('Basic'): + b64value = value.split()[1] + auth = b64decode(b64value).decode().split(':') + connect_opt.update({'http_proxy_auth': (auth[0], auth[1]) }) + + websocket.connect(url, **connect_opt) return websocket From 877727110956253be05e45dfb0e18bd094c54e90 Mon Sep 17 00:00:00 2001 From: itaru2622 Date: Tue, 5 Oct 2021 20:50:01 +0900 Subject: [PATCH 78/90] proxy authentication supporting for websocket (stream/ws_client.py), with unittest --- stream/ws_client.py | 22 ++++++++++++++-------- stream/ws_client_test.py | 29 +++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+), 8 deletions(-) diff --git a/stream/ws_client.py b/stream/ws_client.py index 2a60a8be..419d28b2 100644 --- a/stream/ws_client.py +++ b/stream/ws_client.py @@ -449,18 +449,24 @@ def create_websocket(configuration, url, headers=None): connect_opt = { 'header': header } + + if configuration.proxy or coniguration.proxy_headers: + connect_opt = websocket_proxycare(connect_opt, configuration, url, headers) + + websocket.connect(url, **connect_opt) + return websocket + +def websocket_proxycare(connect_opt, configuration, url, headers): if configuration.proxy: proxy_url = urlparse(configuration.proxy) connect_opt.update({'http_proxy_host': proxy_url.hostname, 'http_proxy_port': proxy_url.port}) if configuration.proxy_headers: - for key,value in configuration.proxy_headers.items(): - if key == 'proxy-authorization' and value.startswith('Basic'): - b64value = value.split()[1] - auth = b64decode(b64value).decode().split(':') - connect_opt.update({'http_proxy_auth': (auth[0], auth[1]) }) - - websocket.connect(url, **connect_opt) - return websocket + for key,value in configuration.proxy_headers.items(): + if key == 'proxy-authorization' and value.startswith('Basic'): + b64value = value.split()[1] + auth = b64decode(b64value).decode().split(':') + connect_opt.update({'http_proxy_auth': (auth[0], auth[1]) }) + return(connect_opt) def websocket_call(configuration, _method, url, **kwargs): diff --git a/stream/ws_client_test.py b/stream/ws_client_test.py index a8f4049d..bfcd64d5 100644 --- a/stream/ws_client_test.py +++ b/stream/ws_client_test.py @@ -15,7 +15,21 @@ import unittest from .ws_client import get_websocket_url +from .ws_client import websocket_proxycare +from kubernetes.client.configuration import Configuration +try: + import urllib3 + urllib3.disable_warnings() +except ImportError: + pass + +def dictval(dict, key, default=None): + try: + val = dict[key] + except KeyError: + val = default + return val class WSClientTest(unittest.TestCase): @@ -32,6 +46,21 @@ def test_websocket_client(self): ]: self.assertEqual(get_websocket_http://github.com/url(url), ws_url) + def test_websocket_proxycare(self): + for proxy, idpass, expect_host, expect_port, expect_auth in [ + ( None, None, None, None, None ), + ( 'http://proxy.example.com:8080/', None, 'proxy.example.com', 8080, None ), + ( 'http://proxy.example.com:8080/', 'user:pass', 'proxy.example.com', 8080, ('user','pass')) + ]: + config = Configuration() + if proxy is not None: + setattr(config, 'proxy', proxy) + if idpass is not None: + setattr(config, 'proxy_headers', urllib3.util.make_headers(proxy_basic_auth=idpass)) + connect_opt = websocket_proxycare( {}, config, None, None) + self.assertEqual( dictval(connect_opt,'http_proxy_host'), expect_host) + self.assertEqual( dictval(connect_opt,'http_proxy_port'), expect_port) + self.assertEqual( dictval(connect_opt,'http_proxy_auth'), expect_auth) if __name__ == '__main__': unittest.main() From 59e7d115b22bcc2f640949ab880da39da5a0c046 Mon Sep 17 00:00:00 2001 From: itaru2622 Date: Sat, 9 Oct 2021 08:48:00 +0900 Subject: [PATCH 79/90] change base64decode to urlsafe_b64decode --- stream/ws_client.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stream/ws_client.py b/stream/ws_client.py index 419d28b2..4b26ddd5 100644 --- a/stream/ws_client.py +++ b/stream/ws_client.py @@ -29,7 +29,7 @@ from six import StringIO from websocket import WebSocket, ABNF, enableTrace -from base64 import b64decode +from base64 import urlsafe_b64decode STDIN_CHANNEL = 0 STDOUT_CHANNEL = 1 @@ -464,7 +464,7 @@ def websocket_proxycare(connect_opt, configuration, url, headers): for key,value in configuration.proxy_headers.items(): if key == 'proxy-authorization' and value.startswith('Basic'): b64value = value.split()[1] - auth = b64decode(b64value).decode().split(':') + auth = urlsafe_b64decode(b64value).decode().split(':') connect_opt.update({'http_proxy_auth': (auth[0], auth[1]) }) return(connect_opt) From f23b2840f88ee51d96089555fae6596d77242112 Mon Sep 17 00:00:00 2001 From: itaru2622 Date: Sun, 10 Oct 2021 11:46:38 +0900 Subject: [PATCH 80/90] fix typo in proxy auth (stream/ws_client.py) --- stream/ws_client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stream/ws_client.py b/stream/ws_client.py index 4b26ddd5..732ac470 100644 --- a/stream/ws_client.py +++ b/stream/ws_client.py @@ -450,7 +450,7 @@ def create_websocket(configuration, url, headers=None): 'header': header } - if configuration.proxy or coniguration.proxy_headers: + if configuration.proxy or configuration.proxy_headers: connect_opt = websocket_proxycare(connect_opt, configuration, url, headers) websocket.connect(url, **connect_opt) From 95e2e85af5928546b92b9fe06554b48db7f3baaf Mon Sep 17 00:00:00 2001 From: DiptoChakrabarty Date: Fri, 15 Oct 2021 19:14:37 +0530 Subject: [PATCH 81/90] closes open file descriptors to prevent leaks --- config/kube_config.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/config/kube_config.py b/config/kube_config.py index 584b8a41..e5368f47 100644 --- a/config/kube_config.py +++ b/config/kube_config.py @@ -70,7 +70,8 @@ def _create_temp_file_with_content(content, temp_file_path=None): return _temp_files[content_key] if temp_file_path and not os.path.isdir(temp_file_path): os.makedirs(name=temp_file_path) - _, name = tempfile.mkstemp(dir=temp_file_path) + fd, name = tempfile.mkstemp(dir=temp_file_path) + os.close(fd) _temp_files[content_key] = name with open(name, 'wb') as fd: fd.write(content.encode() if isinstance(content, str) else content) From 4ef4139e77eb435faf74944be90ce7f8bbe2e58f Mon Sep 17 00:00:00 2001 From: itaru2622 Date: Mon, 18 Oct 2021 09:30:09 +0900 Subject: [PATCH 82/90] add no_proxy support to stream/ws_client.py --- stream/ws_client.py | 7 +++++++ stream/ws_client_test.py | 18 ++++++++++++++---- 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/stream/ws_client.py b/stream/ws_client.py index 732ac470..68840593 100644 --- a/stream/ws_client.py +++ b/stream/ws_client.py @@ -30,6 +30,7 @@ from websocket import WebSocket, ABNF, enableTrace from base64 import urlsafe_b64decode +from requests.utils import should_bypass_proxies STDIN_CHANNEL = 0 STDOUT_CHANNEL = 1 @@ -457,6 +458,12 @@ def create_websocket(configuration, url, headers=None): return websocket def websocket_proxycare(connect_opt, configuration, url, headers): + """ An internal function to be called in api-client when a websocket + create is requested. + """ + if configuration.no_proxy: + connect_opt.update({ 'http_no_proxy': configuration.no_proxy.split(',') }) + if configuration.proxy: proxy_url = urlparse(configuration.proxy) connect_opt.update({'http_proxy_host': proxy_url.hostname, 'http_proxy_port': proxy_url.port}) diff --git a/stream/ws_client_test.py b/stream/ws_client_test.py index bfcd64d5..a7a11f5c 100644 --- a/stream/ws_client_test.py +++ b/stream/ws_client_test.py @@ -47,20 +47,30 @@ def test_websocket_client(self): self.assertEqual(get_websocket_http://github.com/url(url), ws_url) def test_websocket_proxycare(self): - for proxy, idpass, expect_host, expect_port, expect_auth in [ - ( None, None, None, None, None ), - ( 'http://proxy.example.com:8080/', None, 'proxy.example.com', 8080, None ), - ( 'http://proxy.example.com:8080/', 'user:pass', 'proxy.example.com', 8080, ('user','pass')) + for proxy, idpass, no_proxy, expect_host, expect_port, expect_auth, expect_noproxy in [ + ( None, None, None, None, None, None, None ), + ( 'http://proxy.example.com:8080/', None, None, 'proxy.example.com', 8080, None, None ), + ( 'http://proxy.example.com:8080/', 'user:pass', None, 'proxy.example.com', 8080, ('user','pass'), None), + ( 'http://proxy.example.com:8080/', 'user:pass', '', 'proxy.example.com', 8080, ('user','pass'), None), + ( 'http://proxy.example.com:8080/', 'user:pass', '*', 'proxy.example.com', 8080, ('user','pass'), ['*']), + ( 'http://proxy.example.com:8080/', 'user:pass', '.example.com', 'proxy.example.com', 8080, ('user','pass'), ['.example.com']), + ( 'http://proxy.example.com:8080/', 'user:pass', 'localhost,.local,.example.com', 'proxy.example.com', 8080, ('user','pass'), ['localhost','.local','.example.com']), ]: + # setup input config = Configuration() if proxy is not None: setattr(config, 'proxy', proxy) if idpass is not None: setattr(config, 'proxy_headers', urllib3.util.make_headers(proxy_basic_auth=idpass)) + if no_proxy is not None: + setattr(config, 'no_proxy', no_proxy) + # setup done + # test starts connect_opt = websocket_proxycare( {}, config, None, None) self.assertEqual( dictval(connect_opt,'http_proxy_host'), expect_host) self.assertEqual( dictval(connect_opt,'http_proxy_port'), expect_port) self.assertEqual( dictval(connect_opt,'http_proxy_auth'), expect_auth) + self.assertEqual( dictval(connect_opt,'http_no_proxy'), expect_noproxy) if __name__ == '__main__': unittest.main() From d47030ac835e00b1bc315349dffa66c252967b1e Mon Sep 17 00:00:00 2001 From: aagten Date: Tue, 9 Nov 2021 21:41:53 +0100 Subject: [PATCH 83/90] Make socket Windows-proof --- stream/ws_client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stream/ws_client.py b/stream/ws_client.py index 68840593..4e164e86 100644 --- a/stream/ws_client.py +++ b/stream/ws_client.py @@ -280,7 +280,7 @@ def __init__(self, ix, port_number): # between the python application and the kubernetes websocket. The self.python # half of the socket pair is used by the _proxy method to receive and send data # to the running python application. - s, self.python = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM) + s, self.python = socket.socketpair() # The self.socket half of the pair is used by the python application to send # and receive data to the eventual pod port. It is wrapped in the _Socket class # because a socket pair is an AF_UNIX socket, not a AF_INET socket. This allows From 8b306c0f570152d8bbf65736a74b7895d20cf246 Mon Sep 17 00:00:00 2001 From: WalkerWang731 Date: Wed, 17 Nov 2021 16:53:22 +0800 Subject: [PATCH 84/90] add a new method of config.kube_config.new_client_from_config_dict Signed-off-by: WalkerWang731 --- config/__init__.py | 2 +- config/kube_config.py | 18 ++++++++++++++++++ config/kube_config_test.py | 9 ++++++++- 3 files changed, 27 insertions(+), 2 deletions(-) diff --git a/config/__init__.py b/config/__init__.py index e1bf7f57..69ed7f1f 100644 --- a/config/__init__.py +++ b/config/__init__.py @@ -18,7 +18,7 @@ from .incluster_config import load_incluster_config from .kube_config import (KUBE_CONFIG_DEFAULT_LOCATION, list_kube_config_contexts, load_kube_config, - load_kube_config_from_dict, new_client_from_config) + load_kube_config_from_dict, new_client_from_config, new_client_from_config_dict) def load_config(**kwargs): diff --git a/config/kube_config.py b/config/kube_config.py index e5368f47..0b6fe56e 100644 --- a/config/kube_config.py +++ b/config/kube_config.py @@ -871,3 +871,21 @@ def new_client_from_config( client_configuration=client_config, persist_config=persist_config) return ApiClient(configuration=client_config) + + +def new_client_from_config_dict( + config_dict=None, + context=None, + persist_config=True, + temp_file_path=None): + """ + Loads configuration the same as load_kube_config_from_dict but returns an ApiClient + to be used with any API object. This will allow the caller to concurrently + talk with multiple clusters. + """ + client_config = type.__call__(Configuration) + load_kube_config_from_dict(config_dict=config_dict, context=context, + client_configuration=client_config, + persist_config=persist_config, + temp_file_path=temp_file_path) + return ApiClient(configuration=client_config) diff --git a/config/kube_config_test.py b/config/kube_config_test.py index c33ffed7..b9030759 100644 --- a/config/kube_config_test.py +++ b/config/kube_config_test.py @@ -37,7 +37,7 @@ _get_kube_config_loader, _get_kube_config_loader_for_yaml_file, list_kube_config_contexts, load_kube_config, - load_kube_config_from_dict, new_client_from_config) + load_kube_config_from_dict, new_client_from_config, new_client_from_config_dict) BEARER_TOKEN_FORMAT = "Bearer %s" @@ -1351,6 +1351,13 @@ def test_new_client_from_config(self): self.assertEqual(BEARER_TOKEN_FORMAT % TEST_DATA_BASE64, client.configuration.api_key['authorization']) + def test_new_client_from_config_dict(self): + client = new_client_from_config_dict( + config_dict=self.TEST_KUBE_CONFIG, context="simple_token") + self.assertEqual(TEST_HOST, client.configuration.host) + self.assertEqual(BEARER_TOKEN_FORMAT % TEST_DATA_BASE64, + client.configuration.api_key['authorization']) + def test_no_users_section(self): expected = FakeConfig(host=TEST_HOST) actual = FakeConfig() From bc697ae8f089b048a8feed0b73b0afc0be3435cf Mon Sep 17 00:00:00 2001 From: Ping He Date: Wed, 24 Nov 2021 15:14:10 +0800 Subject: [PATCH 85/90] Fix leaderelection/example.py, now works in package. Signed-off-by: Ping He --- leaderelection/example.py | 6 +++--- leaderelection/resourcelock/configmaplock.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/leaderelection/example.py b/leaderelection/example.py index b8d8e616..3b3336c8 100644 --- a/leaderelection/example.py +++ b/leaderelection/example.py @@ -14,9 +14,9 @@ import uuid from kubernetes import client, config -from leaderelection import leaderelection -from leaderelection.resourcelock.configmaplock import ConfigMapLock -from leaderelection import electionconfig +from kubernetes.leaderelection import leaderelection +from kubernetes.leaderelection.resourcelock.configmaplock import ConfigMapLock +from kubernetes.leaderelection import electionconfig # Authenticate using config file diff --git a/leaderelection/resourcelock/configmaplock.py b/leaderelection/resourcelock/configmaplock.py index 8d155e29..54a7bb43 100644 --- a/leaderelection/resourcelock/configmaplock.py +++ b/leaderelection/resourcelock/configmaplock.py @@ -15,7 +15,7 @@ from kubernetes.client.rest import ApiException from kubernetes import client, config from kubernetes.client.api_client import ApiClient -from leaderelection.leaderelectionrecord import LeaderElectionRecord +from ..leaderelectionrecord import LeaderElectionRecord import json import logging logging.basicConfig(level=logging.INFO) From 18828d92cca7e9736d310aab5b2c1f22f0d7f9e7 Mon Sep 17 00:00:00 2001 From: John Sun Date: Mon, 29 Nov 2021 17:33:52 +1100 Subject: [PATCH 86/90] Use select.poll() for exec on linux/darwin --- stream/ws_client.py | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/stream/ws_client.py b/stream/ws_client.py index 356440c8..9a9442e5 100644 --- a/stream/ws_client.py +++ b/stream/ws_client.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import sys from kubernetes.client.rest import ApiException, ApiValueError @@ -165,8 +166,25 @@ def update(self, timeout=0): if not self.sock.connected: self._connected = False return - r, _, _ = select.select( - (self.sock.sock, ), (), (), timeout) + + # The options here are: + # select.select() - this will work on most OS, however, it has a + # limitation of only able to read fd numbers up to 1024. + # i.e. does not scale well. This was the original + # implementation. + # select.poll() - this will work on most unix based OS, but not as + # efficient as epoll. Will work for fd numbers above 1024. + # select.epoll() - newest and most efficient way of polling. + # However, only works on linux. + if sys.platform.startswith('linux') or sys.platform in ['darwin']: + poll = select.poll() + poll.register(self.sock.sock, select.POLLIN) + r = poll.poll(timeout) + poll.unregister(self.sock.sock) + else: + r, _, _ = select.select( + (self.sock.sock, ), (), (), timeout) + if r: op_code, frame = self.sock.recv_data_frame(True) if op_code == ABNF.OPCODE_CLOSE: From 79e066a0d46a8e7b84366fdd1903965d60ca92a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A9lien=20Gasser?= Date: Mon, 13 Dec 2021 19:32:32 -0500 Subject: [PATCH 87/90] fix: WSClient.returncode not idempotent --- stream/ws_client.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/stream/ws_client.py b/stream/ws_client.py index 4e164e86..89ad5c2d 100644 --- a/stream/ws_client.py +++ b/stream/ws_client.py @@ -63,6 +63,7 @@ def __init__(self, configuration, url, headers, capture_all): self._all = _IgnoredIO() self.sock = create_websocket(configuration, url, headers) self._connected = True + self._returncode = None def peek_channel(self, channel, timeout=0): """Peek a channel and return part of the input, @@ -210,12 +211,14 @@ def returncode(self): if self.is_open(): return None else: - err = self.read_channel(ERROR_CHANNEL) - err = yaml.safe_load(err) - if err['status'] == "Success": - return 0 - return int(err['details']['causes'][0]['message']) - + if self._returncode is None: + err = self.read_channel(ERROR_CHANNEL) + err = yaml.safe_load(err) + if err['status'] == "Success": + self._returncode = 0 + else: + self._returncode = int(err['details']['causes'][0]['message']) + return self._returncode def close(self, **kwargs): """ From 1c5bf586f0882c81c03181588830887345703ca5 Mon Sep 17 00:00:00 2001 From: April Schleck Date: Thu, 23 Dec 2021 14:46:23 -0800 Subject: [PATCH 88/90] Run kubeconfig exec commands in the correct directory. This fixes configs that rely on relative paths. --- config/exec_provider.py | 4 +++- config/exec_provider_test.py | 21 +++++++++++++++------ config/kube_config.py | 4 ++-- 3 files changed, 20 insertions(+), 9 deletions(-) diff --git a/config/exec_provider.py b/config/exec_provider.py index 4008f2e8..ef3fac66 100644 --- a/config/exec_provider.py +++ b/config/exec_provider.py @@ -31,7 +31,7 @@ class ExecProvider(object): * caching """ - def __init__(self, exec_config): + def __init__(self, exec_config, cwd): """ exec_config must be of type ConfigNode because we depend on safe_get(self, key) to correctly handle optional exec provider @@ -53,6 +53,7 @@ def __init__(self, exec_config): value = item['value'] additional_vars[name] = value self.env.update(additional_vars) + self.cwd = cwd def run(self, previous_response=None): kubernetes_exec_info = { @@ -69,6 +70,7 @@ def run(self, previous_response=None): self.args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, + cwd=self.cwd, env=self.env, universal_newlines=True) (stdout, stderr) = process.communicate() diff --git a/config/exec_provider_test.py b/config/exec_provider_test.py index 44579beb..a545b556 100644 --- a/config/exec_provider_test.py +++ b/config/exec_provider_test.py @@ -47,7 +47,7 @@ def test_missing_input_keys(self): ConfigNode('test3', {'apiVersion': ''})] for exec_config in exec_configs: with self.assertRaises(ConfigException) as context: - ExecProvider(exec_config) + ExecProvider(exec_config, None) self.assertIn('exec: malformed request. missing key', context.exception.args[0]) @@ -57,7 +57,7 @@ def test_error_code_returned(self, mock): instance.wait.return_value = 1 instance.communicate.return_value = ('', '') with self.assertRaises(ConfigException) as context: - ep = ExecProvider(self.input_ok) + ep = ExecProvider(self.input_ok, None) ep.run() self.assertIn('exec: process returned %d' % instance.wait.return_value, context.exception.args[0]) @@ -68,7 +68,7 @@ def test_nonjson_output_returned(self, mock): instance.wait.return_value = 0 instance.communicate.return_value = ('', '') with self.assertRaises(ConfigException) as context: - ep = ExecProvider(self.input_ok) + ep = ExecProvider(self.input_ok, None) ep.run() self.assertIn('exec: failed to decode process output', context.exception.args[0]) @@ -102,7 +102,7 @@ def test_missing_output_keys(self, mock): for output in outputs: instance.communicate.return_value = (output, '') with self.assertRaises(ConfigException) as context: - ep = ExecProvider(self.input_ok) + ep = ExecProvider(self.input_ok, None) ep.run() self.assertIn('exec: malformed response. missing key', context.exception.args[0]) @@ -123,7 +123,7 @@ def test_mismatched_api_version(self, mock): """ % wrong_api_version instance.communicate.return_value = (output, '') with self.assertRaises(ConfigException) as context: - ep = ExecProvider(self.input_ok) + ep = ExecProvider(self.input_ok, None) ep.run() self.assertIn( 'exec: plugin api version %s does not match' % @@ -135,11 +135,20 @@ def test_ok_01(self, mock): instance = mock.return_value instance.wait.return_value = 0 instance.communicate.return_value = (self.output_ok, '') - ep = ExecProvider(self.input_ok) + ep = ExecProvider(self.input_ok, None) result = ep.run() self.assertTrue(isinstance(result, dict)) self.assertTrue('token' in result) + @mock.patch('subprocess.Popen') + def test_run_in_dir(self, mock): + instance = mock.return_value + instance.wait.return_value = 0 + instance.communicate.return_value = (self.output_ok, '') + ep = ExecProvider(self.input_ok, '/some/directory') + ep.run() + self.assertEqual(mock.call_args.kwargs['cwd'], '/some/directory') + if __name__ == '__main__': unittest.main() diff --git a/config/kube_config.py b/config/kube_config.py index a04a6e3e..f37ed43e 100644 --- a/config/kube_config.py +++ b/config/kube_config.py @@ -483,7 +483,8 @@ def _load_from_exec_plugin(self): if 'exec' not in self._user: return try: - status = ExecProvider(self._user['exec']).run() + base_path = self._get_base_path(self._cluster.path) + status = ExecProvider(self._user['exec'], base_path).run() if 'token' in status: self.token = "Bearer %s" % status['token'] elif 'clientCertificateData' in status: @@ -493,7 +494,6 @@ def _load_from_exec_plugin(self): logging.error('exec: missing clientKeyData field in ' 'plugin output') return None - base_path = self._get_base_path(self._cluster.path) self.cert_file = FileOrData( status, None, data_key_name='clientCertificateData', From 6efd33d5c16243929d32139d3b0d0bc34820ea7b Mon Sep 17 00:00:00 2001 From: April Schleck Date: Wed, 5 Jan 2022 17:56:07 -0800 Subject: [PATCH 89/90] Add a test to kube_config_test to check the cwd of the ExecProvider --- config/kube_config_test.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/config/kube_config_test.py b/config/kube_config_test.py index 6ac3db2d..02127d15 100644 --- a/config/kube_config_test.py +++ b/config/kube_config_test.py @@ -1441,6 +1441,20 @@ def test_user_exec_auth_certificates(self, mock): active_context="exec_cred_user_certificate").load_and_set(actual) self.assertEqual(expected, actual) + @mock.patch('kubernetes.config.kube_config.ExecProvider.run', autospec=True) + def test_user_exec_cwd(self, mock): + capture = {} + def capture_cwd(exec_provider): + capture['cwd'] = exec_provider.cwd + mock.side_effect = capture_cwd + + expected = "/some/random/path" + KubeConfigLoader( + config_dict=self.TEST_KUBE_CONFIG, + active_context="exec_cred_user", + config_base_path=expected).load_and_set(FakeConfig()) + self.assertEqual(expected, capture['cwd']) + def test_user_cmd_path(self): A = namedtuple('A', ['token', 'expiry']) token = "dummy" From 4539902540c19bd824944e6aebad7c0998b648b2 Mon Sep 17 00:00:00 2001 From: Yu Liao Date: Tue, 1 Feb 2022 09:44:14 -0800 Subject: [PATCH 90/90] Update README.md --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index f916e343..9804e0d5 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,8 @@ [![Build Status](https://travis-ci.org/kubernetes-client/python-base.svg?branch=master)](https://travis-ci.org/kubernetes-client/python-base) +**This repo has been merged into the [python client](https://github.com/kubernetes-client/python/tree/master/kubernetes/base). Please file issues, contribute PRs there. This repo is kept open to provide the history of issues and PRs.** + This is the utility part of the [python client](https://github.com/kubernetes-client/python). It has been added to the main repo using git submodules. This structure allow other developers to create their own kubernetes client and still use standard kubernetes python utilities.