diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml
index cb89b2e3..eecb84c2 100644
--- a/.github/.OwlBot.lock.yaml
+++ b/.github/.OwlBot.lock.yaml
@@ -1,3 +1,3 @@
docker:
image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest
- digest: sha256:ec49167c606648a063d1222220b48119c912562849a0528f35bfb592a9f72737
+ digest: sha256:ae600f36b6bc972b368367b6f83a1d91ec2c82a4a116b383d67d547c56fe6de3
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index efc461e0..62aced93 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -3,9 +3,10 @@
#
# For syntax help see:
# https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners#codeowners-syntax
+# Note: This file is autogenerated. To make changes to the codeowner team, please update .repo-metadata.json.
-# The @googleapis/yoshi-python is the default owner for changes in this repo
-* @googleapis/yoshi-python
+# @googleapis/yoshi-python @googleapis/cdpe-cloudai are the default owners for changes in this repo
+* @googleapis/yoshi-python @googleapis/cdpe-cloudai
-
-/samples/**/*.py @telpirion @sirtorry @googleapis/python-samples-owners
+# @googleapis/python-samples-reviewers @googleapis/cdpe-cloudai are the default owners for samples changes
+/samples/ @googleapis/python-samples-reviewers @googleapis/cdpe-cloudai
diff --git a/.github/release-please.yml b/.github/release-please.yml
index 4507ad05..466597e5 100644
--- a/.github/release-please.yml
+++ b/.github/release-please.yml
@@ -1 +1,2 @@
releaseType: python
+handleGHRelease: true
diff --git a/.github/release-trigger.yml b/.github/release-trigger.yml
new file mode 100644
index 00000000..d4ca9418
--- /dev/null
+++ b/.github/release-trigger.yml
@@ -0,0 +1 @@
+enabled: true
diff --git a/.kokoro/release.sh b/.kokoro/release.sh
index 9fb1ab29..9c3fb7c1 100755
--- a/.kokoro/release.sh
+++ b/.kokoro/release.sh
@@ -26,7 +26,7 @@ python3 -m pip install --upgrade twine wheel setuptools
export PYTHONUNBUFFERED=1
# Move into the package, build the distribution and upload.
-TWINE_PASSWORD=$(cat "${KOKORO_GFILE_DIR}/secret_manager/google-cloud-pypi-token")
+TWINE_PASSWORD=$(cat "${KOKORO_KEYSTORE_DIR}/73713_google-cloud-pypi-token-keystore-1")
cd github/python-automl
python3 setup.py sdist bdist_wheel
twine upload --username __token__ --password "${TWINE_PASSWORD}" dist/*
diff --git a/.kokoro/release/common.cfg b/.kokoro/release/common.cfg
index 5926a5cf..f105268c 100644
--- a/.kokoro/release/common.cfg
+++ b/.kokoro/release/common.cfg
@@ -23,8 +23,18 @@ env_vars: {
value: "github/python-automl/.kokoro/release.sh"
}
+# Fetch PyPI password
+before_action {
+ fetch_keystore {
+ keystore_resource {
+ keystore_config_id: 73713
+ keyname: "google-cloud-pypi-token-keystore-1"
+ }
+ }
+}
+
# Tokens needed to report release status back to GitHub
env_vars: {
key: "SECRET_MANAGER_KEYS"
- value: "releasetool-publish-reporter-app,releasetool-publish-reporter-googleapis-installation,releasetool-publish-reporter-pem,google-cloud-pypi-token"
+ value: "releasetool-publish-reporter-app,releasetool-publish-reporter-googleapis-installation,releasetool-publish-reporter-pem"
}
diff --git a/.repo-metadata.json b/.repo-metadata.json
index ee97ad54..c0257b34 100644
--- a/.repo-metadata.json
+++ b/.repo-metadata.json
@@ -1,14 +1,17 @@
{
- "name": "automl",
- "name_pretty": "Cloud AutoML",
- "product_documentation": "https://cloud.google.com/automl/docs/",
- "client_documentation": "https://googleapis.dev/python/automl/latest",
- "issue_tracker": "https://issuetracker.google.com/savedsearches/559744",
- "release_level": "ga",
- "language": "python",
- "library_type": "GAPIC_AUTO",
- "repo": "googleapis/python-automl",
- "distribution_name": "google-cloud-automl",
- "api_id": "automl.googleapis.com",
- "requires_billing": true
-}
\ No newline at end of file
+ "name": "automl",
+ "name_pretty": "Cloud AutoML",
+ "product_documentation": "https://cloud.google.com/automl/docs/",
+ "client_documentation": "https://cloud.google.com/python/docs/reference/automl/latest",
+ "issue_tracker": "https://issuetracker.google.com/savedsearches/559744",
+ "release_level": "stable",
+ "language": "python",
+ "library_type": "GAPIC_AUTO",
+ "repo": "googleapis/python-automl",
+ "distribution_name": "google-cloud-automl",
+ "api_id": "automl.googleapis.com",
+ "requires_billing": true,
+ "default_version": "v1",
+ "codeowner_team": "@googleapis/cdpe-cloudai",
+ "api_shortname": "automl"
+}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 4c7f705f..8f31f975 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,6 +4,18 @@
[1]: https://pypi.org/project/google-cloud-automl/#history
+## [2.6.0](https://github.com/googleapis/python-automl/compare/v2.5.2...v2.6.0) (2022-01-15)
+
+
+### Features
+
+* publish updated protos for cloud/automl/v1 service ([#318](https://github.com/googleapis/python-automl/issues/318)) ([3bf0271](https://github.com/googleapis/python-automl/commit/3bf0271dce60fe9843711068e85978b627f77db6))
+
+
+### Bug Fixes
+
+* **deps:** allow google-cloud-storage < 3.0.0dev ([#316](https://github.com/googleapis/python-automl/issues/316)) ([ba271a8](https://github.com/googleapis/python-automl/commit/ba271a8cfea916f7fb3df536258cda2dca32a423))
+
### [2.5.2](https://www.github.com/googleapis/python-automl/compare/v2.5.1...v2.5.2) (2021-11-01)
diff --git a/README.rst b/README.rst
index 0cdb8b3e..abb1b3e1 100644
--- a/README.rst
+++ b/README.rst
@@ -18,7 +18,7 @@ transfer learning, and Neural Architecture Search technology.
.. |versions| image:: https://img.shields.io/pypi/pyversions/google-cloud-automl.svg
:target: https://pypi.org/project/google-cloud-automl/
.. _Cloud AutoML API: https://cloud.google.com/automl
-.. _Client Library Documentation: https://googleapis.dev/python/automl/latest
+.. _Client Library Documentation: https://cloud.google.com/python/docs/reference/automl/latest
.. _Product Documentation: https://cloud.google.com/automl
Quick Start
diff --git a/google/cloud/automl_v1/services/auto_ml/async_client.py b/google/cloud/automl_v1/services/auto_ml/async_client.py
index cea29911..adc27c99 100644
--- a/google/cloud/automl_v1/services/auto_ml/async_client.py
+++ b/google/cloud/automl_v1/services/auto_ml/async_client.py
@@ -19,14 +19,17 @@
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
-from google.api_core.client_options import ClientOptions # type: ignore
-from google.api_core import exceptions as core_exceptions # type: ignore
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import retry as retries # type: ignore
+from google.api_core.client_options import ClientOptions
+from google.api_core import exceptions as core_exceptions
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
-OptionalRetry = Union[retries.Retry, object]
+try:
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
+except AttributeError: # pragma: NO COVER
+ OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
@@ -70,7 +73,7 @@ class AutoMlAsyncClient:
Currently the only supported ``location_id`` is "us-central1".
On any input that is documented to expect a string parameter in
- snake_case or kebab-case, either of those cases is accepted.
+ snake_case or dash-case, either of those cases is accepted.
"""
_client: AutoMlClient
@@ -1408,7 +1411,6 @@ async def deploy_model(
r"""Deploys a model. If a model is already deployed, deploying it
with the same parameters has no effect. Deploying with different
parametrs (as e.g. changing
-
[node_number][google.cloud.automl.v1p1beta.ImageObjectDetectionModelDeploymentMetadata.node_number])
will reset the deployment state without pausing the model's
availability.
diff --git a/google/cloud/automl_v1/services/auto_ml/client.py b/google/cloud/automl_v1/services/auto_ml/client.py
index 6ca00fc8..8fdeecdd 100644
--- a/google/cloud/automl_v1/services/auto_ml/client.py
+++ b/google/cloud/automl_v1/services/auto_ml/client.py
@@ -14,23 +14,25 @@
# limitations under the License.
#
from collections import OrderedDict
-from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
-from google.api_core import client_options as client_options_lib # type: ignore
-from google.api_core import exceptions as core_exceptions # type: ignore
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import retry as retries # type: ignore
+from google.api_core import client_options as client_options_lib
+from google.api_core import exceptions as core_exceptions
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
-OptionalRetry = Union[retries.Retry, object]
+try:
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
+except AttributeError: # pragma: NO COVER
+ OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
@@ -105,7 +107,7 @@ class AutoMlClient(metaclass=AutoMlClientMeta):
Currently the only supported ``location_id`` is "us-central1".
On any input that is documented to expect a string parameter in
- snake_case or kebab-case, either of those cases is accepted.
+ snake_case or dash-case, either of those cases is accepted.
"""
@staticmethod
@@ -374,8 +376,15 @@ def __init__(
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
- use_client_cert = bool(
- util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
+ if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
+ "true",
+ "false",
+ ):
+ raise ValueError(
+ "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+ use_client_cert = (
+ os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
client_cert_source_func = None
@@ -1584,7 +1593,6 @@ def deploy_model(
r"""Deploys a model. If a model is already deployed, deploying it
with the same parameters has no effect. Deploying with different
parametrs (as e.g. changing
-
[node_number][google.cloud.automl.v1p1beta.ImageObjectDetectionModelDeploymentMetadata.node_number])
will reset the deployment state without pausing the model's
availability.
diff --git a/google/cloud/automl_v1/services/auto_ml/transports/base.py b/google/cloud/automl_v1/services/auto_ml/transports/base.py
index 57f0f0fc..d96b3823 100644
--- a/google/cloud/automl_v1/services/auto_ml/transports/base.py
+++ b/google/cloud/automl_v1/services/auto_ml/transports/base.py
@@ -18,11 +18,11 @@
import pkg_resources
import google.auth # type: ignore
-import google.api_core # type: ignore
-from google.api_core import exceptions as core_exceptions # type: ignore
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import retry as retries # type: ignore
-from google.api_core import operations_v1 # type: ignore
+import google.api_core
+from google.api_core import exceptions as core_exceptions
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
+from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
@@ -107,7 +107,6 @@ def __init__(
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
-
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
diff --git a/google/cloud/automl_v1/services/auto_ml/transports/grpc.py b/google/cloud/automl_v1/services/auto_ml/transports/grpc.py
index f1995874..6765f1db 100644
--- a/google/cloud/automl_v1/services/auto_ml/transports/grpc.py
+++ b/google/cloud/automl_v1/services/auto_ml/transports/grpc.py
@@ -16,9 +16,9 @@
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
-from google.api_core import grpc_helpers # type: ignore
-from google.api_core import operations_v1 # type: ignore
-from google.api_core import gapic_v1 # type: ignore
+from google.api_core import grpc_helpers
+from google.api_core import operations_v1
+from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
@@ -53,7 +53,7 @@ class AutoMlGrpcTransport(AutoMlTransport):
Currently the only supported ``location_id`` is "us-central1".
On any input that is documented to expect a string parameter in
- snake_case or kebab-case, either of those cases is accepted.
+ snake_case or dash-case, either of those cases is accepted.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
@@ -626,7 +626,6 @@ def deploy_model(
Deploys a model. If a model is already deployed, deploying it
with the same parameters has no effect. Deploying with different
parametrs (as e.g. changing
-
[node_number][google.cloud.automl.v1p1beta.ImageObjectDetectionModelDeploymentMetadata.node_number])
will reset the deployment state without pausing the model's
availability.
diff --git a/google/cloud/automl_v1/services/auto_ml/transports/grpc_asyncio.py b/google/cloud/automl_v1/services/auto_ml/transports/grpc_asyncio.py
index 38c244e4..f5644c10 100644
--- a/google/cloud/automl_v1/services/auto_ml/transports/grpc_asyncio.py
+++ b/google/cloud/automl_v1/services/auto_ml/transports/grpc_asyncio.py
@@ -16,9 +16,9 @@
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import grpc_helpers_async # type: ignore
-from google.api_core import operations_v1 # type: ignore
+from google.api_core import gapic_v1
+from google.api_core import grpc_helpers_async
+from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
@@ -54,7 +54,7 @@ class AutoMlGrpcAsyncIOTransport(AutoMlTransport):
Currently the only supported ``location_id`` is "us-central1".
On any input that is documented to expect a string parameter in
- snake_case or kebab-case, either of those cases is accepted.
+ snake_case or dash-case, either of those cases is accepted.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
@@ -638,7 +638,6 @@ def deploy_model(
Deploys a model. If a model is already deployed, deploying it
with the same parameters has no effect. Deploying with different
parametrs (as e.g. changing
-
[node_number][google.cloud.automl.v1p1beta.ImageObjectDetectionModelDeploymentMetadata.node_number])
will reset the deployment state without pausing the model's
availability.
diff --git a/google/cloud/automl_v1/services/prediction_service/async_client.py b/google/cloud/automl_v1/services/prediction_service/async_client.py
index c376485c..e2eeb0f0 100644
--- a/google/cloud/automl_v1/services/prediction_service/async_client.py
+++ b/google/cloud/automl_v1/services/prediction_service/async_client.py
@@ -19,14 +19,17 @@
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
-from google.api_core.client_options import ClientOptions # type: ignore
-from google.api_core import exceptions as core_exceptions # type: ignore
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import retry as retries # type: ignore
+from google.api_core.client_options import ClientOptions
+from google.api_core import exceptions as core_exceptions
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
-OptionalRetry = Union[retries.Retry, object]
+try:
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
+except AttributeError: # pragma: NO COVER
+ OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
@@ -44,7 +47,7 @@ class PredictionServiceAsyncClient:
"""AutoML Prediction API.
On any input that is documented to expect a string parameter in
- snake_case or kebab-case, either of those cases is accepted.
+ snake_case or dash-case, either of those cases is accepted.
"""
_client: PredictionServiceClient
@@ -267,7 +270,6 @@ async def predict(
AutoML Tables
``feature_importance`` : (boolean) Whether
-
[feature_importance][google.cloud.automl.v1.TablesModelColumnInfo.feature_importance]
is populated in the returned list of
[TablesAnnotation][google.cloud.automl.v1.TablesAnnotation]
diff --git a/google/cloud/automl_v1/services/prediction_service/client.py b/google/cloud/automl_v1/services/prediction_service/client.py
index be8b4b4d..22de25cd 100644
--- a/google/cloud/automl_v1/services/prediction_service/client.py
+++ b/google/cloud/automl_v1/services/prediction_service/client.py
@@ -14,23 +14,25 @@
# limitations under the License.
#
from collections import OrderedDict
-from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
-from google.api_core import client_options as client_options_lib # type: ignore
-from google.api_core import exceptions as core_exceptions # type: ignore
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import retry as retries # type: ignore
+from google.api_core import client_options as client_options_lib
+from google.api_core import exceptions as core_exceptions
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
-OptionalRetry = Union[retries.Retry, object]
+try:
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
+except AttributeError: # pragma: NO COVER
+ OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
@@ -83,7 +85,7 @@ class PredictionServiceClient(metaclass=PredictionServiceClientMeta):
"""AutoML Prediction API.
On any input that is documented to expect a string parameter in
- snake_case or kebab-case, either of those cases is accepted.
+ snake_case or dash-case, either of those cases is accepted.
"""
@staticmethod
@@ -294,8 +296,15 @@ def __init__(
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
- use_client_cert = bool(
- util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
+ if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
+ "true",
+ "false",
+ ):
+ raise ValueError(
+ "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+ use_client_cert = (
+ os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
client_cert_source_func = None
@@ -455,7 +464,6 @@ def predict(
AutoML Tables
``feature_importance`` : (boolean) Whether
-
[feature_importance][google.cloud.automl.v1.TablesModelColumnInfo.feature_importance]
is populated in the returned list of
[TablesAnnotation][google.cloud.automl.v1.TablesAnnotation]
diff --git a/google/cloud/automl_v1/services/prediction_service/transports/base.py b/google/cloud/automl_v1/services/prediction_service/transports/base.py
index 486a0cdb..7470d7e9 100644
--- a/google/cloud/automl_v1/services/prediction_service/transports/base.py
+++ b/google/cloud/automl_v1/services/prediction_service/transports/base.py
@@ -18,11 +18,11 @@
import pkg_resources
import google.auth # type: ignore
-import google.api_core # type: ignore
-from google.api_core import exceptions as core_exceptions # type: ignore
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import retry as retries # type: ignore
-from google.api_core import operations_v1 # type: ignore
+import google.api_core
+from google.api_core import exceptions as core_exceptions
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
+from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
@@ -101,7 +101,6 @@ def __init__(
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
-
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
diff --git a/google/cloud/automl_v1/services/prediction_service/transports/grpc.py b/google/cloud/automl_v1/services/prediction_service/transports/grpc.py
index a1ba6ec4..ec174da7 100644
--- a/google/cloud/automl_v1/services/prediction_service/transports/grpc.py
+++ b/google/cloud/automl_v1/services/prediction_service/transports/grpc.py
@@ -16,9 +16,9 @@
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
-from google.api_core import grpc_helpers # type: ignore
-from google.api_core import operations_v1 # type: ignore
-from google.api_core import gapic_v1 # type: ignore
+from google.api_core import grpc_helpers
+from google.api_core import operations_v1
+from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
@@ -36,7 +36,7 @@ class PredictionServiceGrpcTransport(PredictionServiceTransport):
AutoML Prediction API.
On any input that is documented to expect a string parameter in
- snake_case or kebab-case, either of those cases is accepted.
+ snake_case or dash-case, either of those cases is accepted.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
diff --git a/google/cloud/automl_v1/services/prediction_service/transports/grpc_asyncio.py b/google/cloud/automl_v1/services/prediction_service/transports/grpc_asyncio.py
index 8549d7a9..cca25f11 100644
--- a/google/cloud/automl_v1/services/prediction_service/transports/grpc_asyncio.py
+++ b/google/cloud/automl_v1/services/prediction_service/transports/grpc_asyncio.py
@@ -16,9 +16,9 @@
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import grpc_helpers_async # type: ignore
-from google.api_core import operations_v1 # type: ignore
+from google.api_core import gapic_v1
+from google.api_core import grpc_helpers_async
+from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
@@ -37,7 +37,7 @@ class PredictionServiceGrpcAsyncIOTransport(PredictionServiceTransport):
AutoML Prediction API.
On any input that is documented to expect a string parameter in
- snake_case or kebab-case, either of those cases is accepted.
+ snake_case or dash-case, either of those cases is accepted.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
diff --git a/google/cloud/automl_v1/types/annotation_payload.py b/google/cloud/automl_v1/types/annotation_payload.py
index 74b63c72..56f7401a 100644
--- a/google/cloud/automl_v1/types/annotation_payload.py
+++ b/google/cloud/automl_v1/types/annotation_payload.py
@@ -40,20 +40,25 @@ class AnnotationPayload(proto.Message):
Attributes:
translation (google.cloud.automl_v1.types.TranslationAnnotation):
Annotation details for translation.
+
This field is a member of `oneof`_ ``detail``.
classification (google.cloud.automl_v1.types.ClassificationAnnotation):
Annotation details for content or image
classification.
+
This field is a member of `oneof`_ ``detail``.
image_object_detection (google.cloud.automl_v1.types.ImageObjectDetectionAnnotation):
Annotation details for image object
detection.
+
This field is a member of `oneof`_ ``detail``.
text_extraction (google.cloud.automl_v1.types.TextExtractionAnnotation):
Annotation details for text extraction.
+
This field is a member of `oneof`_ ``detail``.
text_sentiment (google.cloud.automl_v1.types.TextSentimentAnnotation):
Annotation details for text sentiment.
+
This field is a member of `oneof`_ ``detail``.
annotation_spec_id (str):
Output only . The resource ID of the
diff --git a/google/cloud/automl_v1/types/annotation_spec.py b/google/cloud/automl_v1/types/annotation_spec.py
index 5bcbcff3..05953b96 100644
--- a/google/cloud/automl_v1/types/annotation_spec.py
+++ b/google/cloud/automl_v1/types/annotation_spec.py
@@ -27,7 +27,6 @@ class AnnotationSpec(proto.Message):
Attributes:
name (str):
Output only. Resource name of the annotation spec. Form:
-
'projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/annotationSpecs/{annotation_spec_id}'
display_name (str):
Required. The name of the annotation spec to show in the
diff --git a/google/cloud/automl_v1/types/classification.py b/google/cloud/automl_v1/types/classification.py
index 40202f16..2103966d 100644
--- a/google/cloud/automl_v1/types/classification.py
+++ b/google/cloud/automl_v1/types/classification.py
@@ -169,7 +169,6 @@ class ConfusionMatrix(proto.Message):
annotation_spec_id (Sequence[str]):
Output only. IDs of the annotation specs used in the
confusion matrix. For Tables CLASSIFICATION
-
[prediction_type][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]
only list of [annotation_spec_display_name-s][] is
populated.
@@ -177,7 +176,6 @@ class ConfusionMatrix(proto.Message):
Output only. Display name of the annotation specs used in
the confusion matrix, as they were at the moment of the
evaluation. For Tables CLASSIFICATION
-
[prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type],
distinct values of the target column at the moment of the
model evaluation are populated here.
diff --git a/google/cloud/automl_v1/types/data_items.py b/google/cloud/automl_v1/types/data_items.py
index 8aaabc7a..041552e4 100644
--- a/google/cloud/automl_v1/types/data_items.py
+++ b/google/cloud/automl_v1/types/data_items.py
@@ -44,6 +44,7 @@ class Image(proto.Message):
Image content represented as a stream of bytes. Note: As
with all ``bytes`` fields, protobuffers use a pure binary
representation, whereas JSON representations use base64.
+
This field is a member of `oneof`_ ``data``.
thumbnail_uri (str):
Output only. HTTP URI to the thumbnail image.
@@ -138,7 +139,6 @@ class Layout(proto.Message):
The position of the
[text_segment][google.cloud.automl.v1.Document.Layout.text_segment]
in the page. Contains exactly 4
-
[normalized_vertices][google.cloud.automl.v1p1beta.BoundingPoly.normalized_vertices]
and they are connected by edges in the order provided, which
will represent a rectangle parallel to the frame. The
@@ -199,12 +199,15 @@ class ExamplePayload(proto.Message):
Attributes:
image (google.cloud.automl_v1.types.Image):
Example image.
+
This field is a member of `oneof`_ ``payload``.
text_snippet (google.cloud.automl_v1.types.TextSnippet):
Example text.
+
This field is a member of `oneof`_ ``payload``.
document (google.cloud.automl_v1.types.Document):
Example document.
+
This field is a member of `oneof`_ ``payload``.
"""
diff --git a/google/cloud/automl_v1/types/dataset.py b/google/cloud/automl_v1/types/dataset.py
index fcca1cd2..2dbb31ab 100644
--- a/google/cloud/automl_v1/types/dataset.py
+++ b/google/cloud/automl_v1/types/dataset.py
@@ -39,26 +39,32 @@ class Dataset(proto.Message):
Attributes:
translation_dataset_metadata (google.cloud.automl_v1.types.TranslationDatasetMetadata):
Metadata for a dataset used for translation.
+
This field is a member of `oneof`_ ``dataset_metadata``.
image_classification_dataset_metadata (google.cloud.automl_v1.types.ImageClassificationDatasetMetadata):
Metadata for a dataset used for image
classification.
+
This field is a member of `oneof`_ ``dataset_metadata``.
text_classification_dataset_metadata (google.cloud.automl_v1.types.TextClassificationDatasetMetadata):
Metadata for a dataset used for text
classification.
+
This field is a member of `oneof`_ ``dataset_metadata``.
image_object_detection_dataset_metadata (google.cloud.automl_v1.types.ImageObjectDetectionDatasetMetadata):
Metadata for a dataset used for image object
detection.
+
This field is a member of `oneof`_ ``dataset_metadata``.
text_extraction_dataset_metadata (google.cloud.automl_v1.types.TextExtractionDatasetMetadata):
Metadata for a dataset used for text
extraction.
+
This field is a member of `oneof`_ ``dataset_metadata``.
text_sentiment_dataset_metadata (google.cloud.automl_v1.types.TextSentimentDatasetMetadata):
Metadata for a dataset used for text
sentiment.
+
This field is a member of `oneof`_ ``dataset_metadata``.
name (str):
Output only. The resource name of the dataset. Form:
diff --git a/google/cloud/automl_v1/types/image.py b/google/cloud/automl_v1/types/image.py
index 129cb5d7..e5d1e151 100644
--- a/google/cloud/automl_v1/types/image.py
+++ b/google/cloud/automl_v1/types/image.py
@@ -61,13 +61,13 @@ class ImageClassificationModelMetadata(proto.Message):
``location`` as the new model to create, and have the same
``model_type``.
train_budget_milli_node_hours (int):
- The train budget of creating this model, expressed in milli
- node hours i.e. 1,000 value in this field means 1 node hour.
- The actual ``train_cost`` will be equal or less than this
- value. If further model training ceases to provide any
- improvements, it will stop without using full budget and the
- stop_reason will be ``MODEL_CONVERGED``. Note, node_hour =
- actual_hour \* number_of_nodes_invovled. For model type
+ Optional. The train budget of creating this model, expressed
+ in milli node hours i.e. 1,000 value in this field means 1
+ node hour. The actual ``train_cost`` will be equal or less
+ than this value. If further model training ceases to provide
+ any improvements, it will stop without using full budget and
+ the stop_reason will be ``MODEL_CONVERGED``. Note, node_hour
+ = actual_hour \* number_of_nodes_invovled. For model type
``cloud``\ (default), the train budget must be between 8,000
and 800,000 milli node hours, inclusive. The default value
is 192, 000 which represents one day in wall time. For model
@@ -199,13 +199,13 @@ class ImageObjectDetectionModelMetadata(proto.Message):
Output only. The reason that this create model operation
stopped, e.g. ``BUDGET_REACHED``, ``MODEL_CONVERGED``.
train_budget_milli_node_hours (int):
- The train budget of creating this model, expressed in milli
- node hours i.e. 1,000 value in this field means 1 node hour.
- The actual ``train_cost`` will be equal or less than this
- value. If further model training ceases to provide any
- improvements, it will stop without using full budget and the
- stop_reason will be ``MODEL_CONVERGED``. Note, node_hour =
- actual_hour \* number_of_nodes_invovled. For model type
+ Optional. The train budget of creating this model, expressed
+ in milli node hours i.e. 1,000 value in this field means 1
+ node hour. The actual ``train_cost`` will be equal or less
+ than this value. If further model training ceases to provide
+ any improvements, it will stop without using full budget and
+ the stop_reason will be ``MODEL_CONVERGED``. Note, node_hour
+ = actual_hour \* number_of_nodes_invovled. For model type
``cloud-high-accuracy-1``\ (default) and
``cloud-low-latency-1``, the train budget must be between
20,000 and 900,000 milli node hours, inclusive. The default
@@ -242,7 +242,6 @@ class ImageClassificationModelDeploymentMetadata(proto.Message):
Input only. The number of nodes to deploy the model on. A
node is an abstraction of a machine resource, which can
handle online prediction QPS as given in the model's
-
[node_qps][google.cloud.automl.v1.ImageClassificationModelMetadata.node_qps].
Must be between 1 and 100, inclusive on both ends.
"""
@@ -258,7 +257,6 @@ class ImageObjectDetectionModelDeploymentMetadata(proto.Message):
Input only. The number of nodes to deploy the model on. A
node is an abstraction of a machine resource, which can
handle online prediction QPS as given in the model's
-
[qps_per_node][google.cloud.automl.v1.ImageObjectDetectionModelMetadata.qps_per_node].
Must be between 1 and 100, inclusive on both ends.
"""
diff --git a/google/cloud/automl_v1/types/io.py b/google/cloud/automl_v1/types/io.py
index 4c01feb4..171d5484 100644
--- a/google/cloud/automl_v1/types/io.py
+++ b/google/cloud/automl_v1/types/io.py
@@ -613,7 +613,6 @@ class InputConfig(proto.Message):
[gcs_source][google.cloud.automl.v1.InputConfig.gcs_source] or
[bigquery_source][google.cloud.automl.v1.InputConfig.bigquery_source].
All input is concatenated into a single
-
[primary_table_spec_id][google.cloud.automl.v1.TablesDatasetMetadata.primary_table_spec_id]
**For gcs_source:**
@@ -632,9 +631,7 @@ class InputConfig(proto.Message):
"Id","First Name","Last Name","Dob","Addresses"
-
"1","John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]"
-
"2","Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
@@ -721,6 +718,7 @@ class InputConfig(proto.Message):
``gcs_source`` points to a CSV file with a structure
described in
[InputConfig][google.cloud.automl.v1.InputConfig].
+
This field is a member of `oneof`_ ``source``.
params (Sequence[google.cloud.automl_v1.types.InputConfig.ParamsEntry]):
Additional domain-specific parameters describing the
@@ -1011,7 +1009,6 @@ class BatchPredictInputConfig(proto.Message):
contain values for the corresponding columns.
The column names must contain the model's
-
[input_feature_column_specs'][google.cloud.automl.v1.TablesModelMetadata.input_feature_column_specs]
[display_name-s][google.cloud.automl.v1.ColumnSpec.display_name]
(order doesn't matter). The columns corresponding to the model's
@@ -1025,9 +1022,7 @@ class BatchPredictInputConfig(proto.Message):
"First Name","Last Name","Dob","Addresses"
-
"John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]"
-
"Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
@@ -1037,7 +1032,6 @@ class BatchPredictInputConfig(proto.Message):
table must be 100GB or smaller.
The column names must contain the model's
-
[input_feature_column_specs'][google.cloud.automl.v1.TablesModelMetadata.input_feature_column_specs]
[display_name-s][google.cloud.automl.v1.ColumnSpec.display_name]
(order doesn't matter). The columns corresponding to the model's
@@ -1082,6 +1076,7 @@ class BatchPredictInputConfig(proto.Message):
gcs_source (google.cloud.automl_v1.types.GcsSource):
Required. The Google Cloud Storage location
for the input content.
+
This field is a member of `oneof`_ ``source``.
"""
@@ -1117,23 +1112,21 @@ class OutputConfig(proto.Message):
- For Tables: Output depends on whether the dataset was imported
from Google Cloud Storage or BigQuery. Google Cloud Storage
case:
-
- [gcs_destination][google.cloud.automl.v1p1beta.OutputConfig.gcs_destination]
- must be set. Exported are CSV file(s) ``tables_1.csv``,
- ``tables_2.csv``,...,\ ``tables_N.csv`` with each having as header
- line the table's column names, and all other lines contain values
- for the header columns. BigQuery case:
-
- [bigquery_destination][google.cloud.automl.v1p1beta.OutputConfig.bigquery_destination]
- pointing to a BigQuery project must be set. In the given project a
- new dataset will be created with name
-
- ``export_data__``
- where will be made BigQuery-dataset-name compatible (e.g. most
- special characters will become underscores), and timestamp will be
- in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In that
- dataset a new table called ``primary_table`` will be created, and
- filled with precisely the same data as this obtained on import.
+ [gcs_destination][google.cloud.automl.v1p1beta.OutputConfig.gcs_destination]
+ must be set. Exported are CSV file(s) ``tables_1.csv``,
+ ``tables_2.csv``,...,\ ``tables_N.csv`` with each having as
+ header line the table's column names, and all other lines
+ contain values for the header columns. BigQuery case:
+ [bigquery_destination][google.cloud.automl.v1p1beta.OutputConfig.bigquery_destination]
+ pointing to a BigQuery project must be set. In the given
+ project a new dataset will be created with name
+ ``export_data__``
+ where will be made BigQuery-dataset-name compatible (e.g. most
+ special characters will become underscores), and timestamp
+ will be in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601"
+ format. In that dataset a new table called ``primary_table``
+ will be created, and filled with precisely the same data as
+ this obtained on import.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
@@ -1147,6 +1140,7 @@ class OutputConfig(proto.Message):
export_data-- where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ
ISO-8601 format. All export output will be written into that
directory.
+
This field is a member of `oneof`_ ``destination``.
"""
@@ -1159,7 +1153,6 @@ class BatchPredictOutputConfig(proto.Message):
r"""Output configuration for BatchPredict Action.
As destination the
-
[gcs_destination][google.cloud.automl.v1.BatchPredictOutputConfig.gcs_destination]
must be set unless specified otherwise for a domain. If
gcs_destination is set then in the given directory a new directory
@@ -1184,10 +1177,8 @@ class BatchPredictOutputConfig(proto.Message):
predictions). These files will have a JSON representation of a
proto that wraps the same "ID" : "" but here followed
by exactly one
-
- [``google.rpc.Status``](https:
- //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
- containing only ``code`` and ``message``\ fields.
+ ```google.rpc.Status`` `__
+ containing only ``code`` and ``message``\ fields.
- For Image Object Detection: In the created directory files
``image_object_detection_1.jsonl``,
@@ -1206,10 +1197,8 @@ class BatchPredictOutputConfig(proto.Message):
predictions). These files will have a JSON representation of a
proto that wraps the same "ID" : "" but here followed
by exactly one
-
- [``google.rpc.Status``](https:
- //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
- containing only ``code`` and ``message``\ fields.
+ ```google.rpc.Status`` `__
+ containing only ``code`` and ``message``\ fields.
- For Video Classification: In the created directory a
video_classification.csv file, and a .JSON file per each video
@@ -1219,27 +1208,25 @@ class BatchPredictOutputConfig(proto.Message):
::
The format of video_classification.csv is:
-
- GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS
- where: GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1
- to 1 the prediction input lines (i.e. video_classification.csv has
- precisely the same number of lines as the prediction input had.)
- JSON_FILE_NAME = Name of .JSON file in the output directory, which
- contains prediction responses for the video time segment. STATUS =
- "OK" if prediction completed successfully, or an error code with
- message otherwise. If STATUS is not "OK" then the .JSON file for
- that line may not exist or be empty.
-
- ::
-
- Each .JSON file, assuming STATUS is "OK", will contain a list of
- AnnotationPayload protos in JSON format, which are the predictions
- for the video time segment the file is assigned to in the
- video_classification.csv. All AnnotationPayload protos will have
- video_classification field set, and will be sorted by
- video_classification.type field (note that the returned types are
- governed by `classifaction_types` parameter in
- [PredictService.BatchPredictRequest.params][]).
+ GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS
+ where:
+ GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1
+ the prediction input lines (i.e. video_classification.csv has
+ precisely the same number of lines as the prediction input had.)
+ JSON_FILE_NAME = Name of .JSON file in the output directory, which
+ contains prediction responses for the video time segment.
+ STATUS = "OK" if prediction completed successfully, or an error code
+ with message otherwise. If STATUS is not "OK" then the .JSON file
+ for that line may not exist or be empty.
+
+ Each .JSON file, assuming STATUS is "OK", will contain a list of
+ AnnotationPayload protos in JSON format, which are the predictions
+ for the video time segment the file is assigned to in the
+ video_classification.csv. All AnnotationPayload protos will have
+ video_classification field set, and will be sorted by
+ video_classification.type field (note that the returned types are
+ governed by `classifaction_types` parameter in
+ [PredictService.BatchPredictRequest.params][]).
- For Video Object Tracking: In the created directory a
video_object_tracking.csv file will be created, and multiple
@@ -1251,24 +1238,22 @@ class BatchPredictOutputConfig(proto.Message):
::
The format of video_object_tracking.csv is:
-
- GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS
- where: GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1
- to 1 the prediction input lines (i.e. video_object_tracking.csv has
- precisely the same number of lines as the prediction input had.)
- JSON_FILE_NAME = Name of .JSON file in the output directory, which
- contains prediction responses for the video time segment. STATUS =
- "OK" if prediction completed successfully, or an error code with
- message otherwise. If STATUS is not "OK" then the .JSON file for
- that line may not exist or be empty.
-
- ::
-
- Each .JSON file, assuming STATUS is "OK", will contain a list of
- AnnotationPayload protos in JSON format, which are the predictions
- for each frame of the video time segment the file is assigned to in
- video_object_tracking.csv. All AnnotationPayload protos will have
- video_object_tracking field set.
+ GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS
+ where:
+ GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1
+ the prediction input lines (i.e. video_object_tracking.csv has
+ precisely the same number of lines as the prediction input had.)
+ JSON_FILE_NAME = Name of .JSON file in the output directory, which
+ contains prediction responses for the video time segment.
+ STATUS = "OK" if prediction completed successfully, or an error
+ code with message otherwise. If STATUS is not "OK" then the .JSON
+ file for that line may not exist or be empty.
+
+ Each .JSON file, assuming STATUS is "OK", will contain a list of
+ AnnotationPayload protos in JSON format, which are the predictions
+ for each frame of the video time segment the file is assigned to in
+ video_object_tracking.csv. All AnnotationPayload protos will have
+ video_object_tracking field set.
- For Text Classification: In the created directory files
``text_classification_1.jsonl``,
@@ -1291,10 +1276,8 @@ class BatchPredictOutputConfig(proto.Message):
`errors_N.jsonl` files will be created (N depends on total number of
failed predictions). These files will have a JSON representation of a
proto that wraps input file followed by exactly one
-
- [``google.rpc.Status``](https:
- //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
- containing only ``code`` and ``message``.
+ [`google.rpc.Status`](https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
+ containing only `code` and `message`.
- For Text Sentiment: In the created directory files
``text_sentiment_1.jsonl``,
@@ -1317,10 +1300,8 @@ class BatchPredictOutputConfig(proto.Message):
`errors_N.jsonl` files will be created (N depends on total number of
failed predictions). These files will have a JSON representation of a
proto that wraps input file followed by exactly one
-
- [``google.rpc.Status``](https:
- //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
- containing only ``code`` and ``message``.
+ [`google.rpc.Status`](https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
+ containing only `code` and `message`.
- For Text Extraction: In the created directory files
``text_extraction_1.jsonl``,
@@ -1351,98 +1332,78 @@ class BatchPredictOutputConfig(proto.Message):
will have a JSON representation of a proto that wraps either the
"id" : "" (in case of inline) or the document proto (in
case of document) but here followed by exactly one
-
- [``google.rpc.Status``](https:
- //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
- containing only ``code`` and ``message``.
+ ```google.rpc.Status`` `__
+ containing only ``code`` and ``message``.
- For Tables: Output depends on whether
-
- [gcs_destination][google.cloud.automl.v1p1beta.BatchPredictOutputConfig.gcs_destination]
- or
-
- [bigquery_destination][google.cloud.automl.v1p1beta.BatchPredictOutputConfig.bigquery_destination]
- is set (either is allowed). Google Cloud Storage case: In the
- created directory files ``tables_1.csv``, ``tables_2.csv``,...,
- ``tables_N.csv`` will be created, where N may be 1, and depends on
- the total number of the successfully predicted rows. For all
- CLASSIFICATION
-
- [prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]:
- Each .csv file will contain a header, listing all columns'
-
- [display_name-s][google.cloud.automl.v1p1beta.ColumnSpec.display_name]
- given on input followed by M target column names in the format of
-
- "<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec]
-
- [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>\_\_score"
- where M is the number of distinct target values, i.e. number of
- distinct values in the target column of the table used to train the
- model. Subsequent lines will contain the respective values of
- successfully predicted rows, with the last, i.e. the target, columns
- having the corresponding prediction
- [scores][google.cloud.automl.v1p1beta.TablesAnnotation.score]. For
- REGRESSION and FORECASTING
-
- [prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]:
- Each .csv file will contain a header, listing all columns'
- [display_name-s][google.cloud.automl.v1p1beta.display_name] given on
- input followed by the predicted target column with name in the
- format of
-
- "predicted_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec]
-
- [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>"
- Subsequent lines will contain the respective values of successfully
- predicted rows, with the last, i.e. the target, column having the
- predicted target value. If prediction for any rows failed, then an
- additional ``errors_1.csv``, ``errors_2.csv``,..., ``errors_N.csv``
- will be created (N depends on total number of failed rows). These
- files will have analogous format as ``tables_*.csv``, but always
- with a single target column having
-
- [``google.rpc.Status``](https:
- //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
- represented as a JSON string, and containing only ``code`` and
- ``message``. BigQuery case:
-
- [bigquery_destination][google.cloud.automl.v1p1beta.OutputConfig.bigquery_destination]
- pointing to a BigQuery project must be set. In the given project a
- new dataset will be created with name
- ``prediction__``
- where will be made BigQuery-dataset-name compatible (e.g. most
- special characters will become underscores), and timestamp will be
- in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the
- dataset two tables will be created, ``predictions``, and ``errors``.
- The ``predictions`` table's column names will be the input columns'
-
- [display_name-s][google.cloud.automl.v1p1beta.ColumnSpec.display_name]
- followed by the target column with name in the format of
-
- "predicted_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec]
-
- [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>"
- The input feature columns will contain the respective values of
- successfully predicted rows, with the target column having an ARRAY
- of
-
- [AnnotationPayloads][google.cloud.automl.v1p1beta.AnnotationPayload],
- represented as STRUCT-s, containing
- [TablesAnnotation][google.cloud.automl.v1p1beta.TablesAnnotation].
- The ``errors`` table contains rows for which the prediction has
- failed, it has analogous input columns while the target column name
- is in the format of
-
- "errors_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec]
-
- [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>",
- and as a value has
-
- [``google.rpc.Status``](https:
- //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
- represented as a STRUCT, and containing only ``code`` and
- ``message``.
+ [gcs_destination][google.cloud.automl.v1p1beta.BatchPredictOutputConfig.gcs_destination]
+ or
+ [bigquery_destination][google.cloud.automl.v1p1beta.BatchPredictOutputConfig.bigquery_destination]
+ is set (either is allowed). Google Cloud Storage case: In the
+ created directory files ``tables_1.csv``, ``tables_2.csv``,...,
+ ``tables_N.csv`` will be created, where N may be 1, and depends
+ on the total number of the successfully predicted rows. For all
+ CLASSIFICATION
+ [prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]:
+ Each .csv file will contain a header, listing all columns'
+ [display_name-s][google.cloud.automl.v1p1beta.ColumnSpec.display_name]
+ given on input followed by M target column names in the format of
+ "<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec]
+ [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>*\ score"
+ where M is the number of distinct target values, i.e. number of
+ distinct values in the target column of the table used to train
+ the model. Subsequent lines will contain the respective values of
+ successfully predicted rows, with the last, i.e. the target,
+ columns having the corresponding prediction
+ [scores][google.cloud.automl.v1p1beta.TablesAnnotation.score].
+ For REGRESSION and FORECASTING
+ [prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]:
+ Each .csv file will contain a header, listing all columns'
+ [display_name-s][google.cloud.automl.v1p1beta.display_name] given
+ on input followed by the predicted target column with name in the
+ format of
+ "predicted\ <[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec]
+ [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>"
+ Subsequent lines will contain the respective values of
+ successfully predicted rows, with the last, i.e. the target,
+ column having the predicted target value. If prediction for any
+ rows failed, then an additional ``errors_1.csv``,
+ ``errors_2.csv``,..., ``errors_N.csv`` will be created (N depends
+ on total number of failed rows). These files will have analogous
+ format as ``tables_*.csv``, but always with a single target
+ column
+ having*\ ```google.rpc.Status`` `__\ *represented
+ as a JSON string, and containing only ``code`` and ``message``.
+ BigQuery case:
+ [bigquery_destination][google.cloud.automl.v1p1beta.OutputConfig.bigquery_destination]
+ pointing to a BigQuery project must be set. In the given project
+ a new dataset will be created with name
+ ``prediction__``
+ where will be made BigQuery-dataset-name compatible (e.g. most
+ special characters will become underscores), and timestamp will
+ be in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the
+ dataset two tables will be created, ``predictions``, and
+ ``errors``. The ``predictions`` table's column names will be the
+ input columns'
+ [display_name-s][google.cloud.automl.v1p1beta.ColumnSpec.display_name]
+ followed by the target column with name in the format of
+ "predicted*\ <[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec]
+ [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>"
+ The input feature columns will contain the respective values of
+ successfully predicted rows, with the target column having an
+ ARRAY of
+ [AnnotationPayloads][google.cloud.automl.v1p1beta.AnnotationPayload],
+ represented as STRUCT-s, containing
+ [TablesAnnotation][google.cloud.automl.v1p1beta.TablesAnnotation].
+ The ``errors`` table contains rows for which the prediction has
+ failed, it has analogous input columns while the target column
+ name is in the format of
+ "errors_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec]
+ [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>",
+ and as a value has
+ ```google.rpc.Status`` `__
+ represented as a STRUCT, and containing only ``code`` and
+ ``message``.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
@@ -1452,6 +1413,7 @@ class BatchPredictOutputConfig(proto.Message):
Required. The Google Cloud Storage location
of the directory where the output is to be
written to.
+
This field is a member of `oneof`_ ``destination``.
"""
@@ -1477,6 +1439,7 @@ class ModelExportOutputConfig(proto.Message):
YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format, will be created.
Inside the model and any of its supporting files will be
written.
+
This field is a member of `oneof`_ ``destination``.
model_format (str):
The format in which the model must be exported. The
@@ -1513,10 +1476,8 @@ class ModelExportOutputConfig(proto.Message):
- docker - Used for Docker containers. Use the params field
to customize the container. The container is verified to
work correctly on ubuntu 16.04 operating system. See more
- at [containers
-
- quickstart](https:
- //cloud.google.com/vision/automl/docs/containers-gcs-quickstart)
+ at `containers
+ quickstart `__
- core_ml - Used for iOS mobile devices.
params (Sequence[google.cloud.automl_v1.types.ModelExportOutputConfig.ParamsEntry]):
diff --git a/google/cloud/automl_v1/types/model.py b/google/cloud/automl_v1/types/model.py
index 0bee0213..d451188f 100644
--- a/google/cloud/automl_v1/types/model.py
+++ b/google/cloud/automl_v1/types/model.py
@@ -37,21 +37,27 @@ class Model(proto.Message):
Attributes:
translation_model_metadata (google.cloud.automl_v1.types.TranslationModelMetadata):
Metadata for translation models.
+
This field is a member of `oneof`_ ``model_metadata``.
image_classification_model_metadata (google.cloud.automl_v1.types.ImageClassificationModelMetadata):
Metadata for image classification models.
+
This field is a member of `oneof`_ ``model_metadata``.
text_classification_model_metadata (google.cloud.automl_v1.types.TextClassificationModelMetadata):
Metadata for text classification models.
+
This field is a member of `oneof`_ ``model_metadata``.
image_object_detection_model_metadata (google.cloud.automl_v1.types.ImageObjectDetectionModelMetadata):
Metadata for image object detection models.
+
This field is a member of `oneof`_ ``model_metadata``.
text_extraction_model_metadata (google.cloud.automl_v1.types.TextExtractionModelMetadata):
Metadata for text extraction models.
+
This field is a member of `oneof`_ ``model_metadata``.
text_sentiment_model_metadata (google.cloud.automl_v1.types.TextSentimentModelMetadata):
Metadata for text sentiment models.
+
This field is a member of `oneof`_ ``model_metadata``.
name (str):
Output only. Resource name of the model. Format:
diff --git a/google/cloud/automl_v1/types/model_evaluation.py b/google/cloud/automl_v1/types/model_evaluation.py
index db187bae..2b483a46 100644
--- a/google/cloud/automl_v1/types/model_evaluation.py
+++ b/google/cloud/automl_v1/types/model_evaluation.py
@@ -44,24 +44,28 @@ class ModelEvaluation(proto.Message):
video and tables classification.
Tables problem is considered a classification
when the target column is CATEGORY DataType.
+
This field is a member of `oneof`_ ``metrics``.
translation_evaluation_metrics (google.cloud.automl_v1.types.TranslationEvaluationMetrics):
Model evaluation metrics for translation.
+
This field is a member of `oneof`_ ``metrics``.
image_object_detection_evaluation_metrics (google.cloud.automl_v1.types.ImageObjectDetectionEvaluationMetrics):
Model evaluation metrics for image object
detection.
+
This field is a member of `oneof`_ ``metrics``.
text_sentiment_evaluation_metrics (google.cloud.automl_v1.types.TextSentimentEvaluationMetrics):
Evaluation metrics for text sentiment models.
+
This field is a member of `oneof`_ ``metrics``.
text_extraction_evaluation_metrics (google.cloud.automl_v1.types.TextExtractionEvaluationMetrics):
Evaluation metrics for text extraction
models.
+
This field is a member of `oneof`_ ``metrics``.
name (str):
Output only. Resource name of the model evaluation. Format:
-
``projects/{project_id}/locations/{location_id}/models/{model_id}/modelEvaluations/{model_evaluation_id}``
annotation_spec_id (str):
Output only. The ID of the annotation spec that the model
@@ -69,7 +73,6 @@ class ModelEvaluation(proto.Message):
model evaluation. For Tables annotation specs in the dataset
do not exist and this ID is always not set, but for
CLASSIFICATION
-
[prediction_type-s][google.cloud.automl.v1.TablesModelMetadata.prediction_type]
the
[display_name][google.cloud.automl.v1.ModelEvaluation.display_name]
@@ -82,7 +85,6 @@ class ModelEvaluation(proto.Message):
trained from the same dataset, the values may differ, since
display names could had been changed between the two model's
trainings. For Tables CLASSIFICATION
-
[prediction_type-s][google.cloud.automl.v1.TablesModelMetadata.prediction_type]
distinct values of the target column at the moment of the
model evaluation are populated here. The display_name is
@@ -99,7 +101,6 @@ class ModelEvaluation(proto.Message):
examples used for evaluation. Otherwise, this is the count
of examples that according to the ground truth were
annotated by the
-
[annotation_spec_id][google.cloud.automl.v1.ModelEvaluation.annotation_spec_id].
"""
diff --git a/google/cloud/automl_v1/types/operations.py b/google/cloud/automl_v1/types/operations.py
index a18624d5..14dde266 100644
--- a/google/cloud/automl_v1/types/operations.py
+++ b/google/cloud/automl_v1/types/operations.py
@@ -51,30 +51,39 @@ class OperationMetadata(proto.Message):
Attributes:
delete_details (google.cloud.automl_v1.types.DeleteOperationMetadata):
Details of a Delete operation.
+
This field is a member of `oneof`_ ``details``.
deploy_model_details (google.cloud.automl_v1.types.DeployModelOperationMetadata):
Details of a DeployModel operation.
+
This field is a member of `oneof`_ ``details``.
undeploy_model_details (google.cloud.automl_v1.types.UndeployModelOperationMetadata):
Details of an UndeployModel operation.
+
This field is a member of `oneof`_ ``details``.
create_model_details (google.cloud.automl_v1.types.CreateModelOperationMetadata):
Details of CreateModel operation.
+
This field is a member of `oneof`_ ``details``.
create_dataset_details (google.cloud.automl_v1.types.CreateDatasetOperationMetadata):
Details of CreateDataset operation.
+
This field is a member of `oneof`_ ``details``.
import_data_details (google.cloud.automl_v1.types.ImportDataOperationMetadata):
Details of ImportData operation.
+
This field is a member of `oneof`_ ``details``.
batch_predict_details (google.cloud.automl_v1.types.BatchPredictOperationMetadata):
Details of BatchPredict operation.
+
This field is a member of `oneof`_ ``details``.
export_data_details (google.cloud.automl_v1.types.ExportDataOperationMetadata):
Details of ExportData operation.
+
This field is a member of `oneof`_ ``details``.
export_model_details (google.cloud.automl_v1.types.ExportModelOperationMetadata):
Details of ExportModel operation.
+
This field is a member of `oneof`_ ``details``.
progress_percent (int):
Output only. Progress of operation. Range: [0, 100]. Not
@@ -203,6 +212,7 @@ class ExportDataOutputInfo(proto.Message):
The full path of the Google Cloud Storage
directory created, into which the exported data
is written.
+
This field is a member of `oneof`_ ``output_location``.
"""
@@ -227,7 +237,6 @@ class BatchPredictOperationMetadata(proto.Message):
class BatchPredictOutputInfo(proto.Message):
r"""Further describes this batch predict's output. Supplements
-
[BatchPredictOutputConfig][google.cloud.automl.v1.BatchPredictOutputConfig].
@@ -238,6 +247,7 @@ class BatchPredictOutputInfo(proto.Message):
The full path of the Google Cloud Storage
directory created, into which the prediction
output is written.
+
This field is a member of `oneof`_ ``output_location``.
"""
diff --git a/google/cloud/automl_v1/types/prediction_service.py b/google/cloud/automl_v1/types/prediction_service.py
index 30660c69..1a8de0e2 100644
--- a/google/cloud/automl_v1/types/prediction_service.py
+++ b/google/cloud/automl_v1/types/prediction_service.py
@@ -68,7 +68,6 @@ class PredictRequest(proto.Message):
AutoML Tables
``feature_importance`` : (boolean) Whether
-
[feature_importance][google.cloud.automl.v1.TablesModelColumnInfo.feature_importance]
is populated in the returned list of
[TablesAnnotation][google.cloud.automl.v1.TablesAnnotation]
diff --git a/google/cloud/automl_v1/types/service.py b/google/cloud/automl_v1/types/service.py
index 0c72c544..fa115967 100644
--- a/google/cloud/automl_v1/types/service.py
+++ b/google/cloud/automl_v1/types/service.py
@@ -92,11 +92,11 @@ class ListDatasetsRequest(proto.Message):
An expression for filtering the results of the request.
- ``dataset_metadata`` - for existence of the case (e.g.
- image_classification_dataset_metadata:*). Some examples
- of using the filter are:
+ ``image_classification_dataset_metadata:*``). Some
+ examples of using the filter are:
- ``translation_dataset_metadata:*`` --> The dataset has
- translation_dataset_metadata.
+ ``translation_dataset_metadata``.
page_size (int):
Requested page size. Server may return fewer
results than requested. If unspecified, server
@@ -257,13 +257,13 @@ class ListModelsRequest(proto.Message):
An expression for filtering the results of the request.
- ``model_metadata`` - for existence of the case (e.g.
- video_classification_model_metadata:*).
+ ``video_classification_model_metadata:*``).
- ``dataset_id`` - for = or !=. Some examples of using the
filter are:
- ``image_classification_model_metadata:*`` --> The model
- has image_classification_model_metadata.
+ has ``image_classification_model_metadata``.
- ``dataset_id=5`` --> The model was created from a dataset
with ID 5.
@@ -352,10 +352,12 @@ class DeployModelRequest(proto.Message):
image_object_detection_model_deployment_metadata (google.cloud.automl_v1.types.ImageObjectDetectionModelDeploymentMetadata):
Model deployment metadata specific to Image
Object Detection.
+
This field is a member of `oneof`_ ``model_deployment_metadata``.
image_classification_model_deployment_metadata (google.cloud.automl_v1.types.ImageClassificationModelDeploymentMetadata):
Model deployment metadata specific to Image
Classification.
+
This field is a member of `oneof`_ ``model_deployment_metadata``.
name (str):
Required. Resource name of the model to
diff --git a/google/cloud/automl_v1/types/text_extraction.py b/google/cloud/automl_v1/types/text_extraction.py
index 11243879..606f8050 100644
--- a/google/cloud/automl_v1/types/text_extraction.py
+++ b/google/cloud/automl_v1/types/text_extraction.py
@@ -34,6 +34,7 @@ class TextExtractionAnnotation(proto.Message):
An entity annotation will set this, which is
the part of the original text to which the
annotation pertains.
+
This field is a member of `oneof`_ ``annotation``.
score (float):
Output only. A confidence estimate between
diff --git a/google/cloud/automl_v1beta1/services/auto_ml/async_client.py b/google/cloud/automl_v1beta1/services/auto_ml/async_client.py
index c19a86bd..08465444 100644
--- a/google/cloud/automl_v1beta1/services/auto_ml/async_client.py
+++ b/google/cloud/automl_v1beta1/services/auto_ml/async_client.py
@@ -19,14 +19,17 @@
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
-from google.api_core.client_options import ClientOptions # type: ignore
-from google.api_core import exceptions as core_exceptions # type: ignore
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import retry as retries # type: ignore
+from google.api_core.client_options import ClientOptions
+from google.api_core import exceptions as core_exceptions
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
-OptionalRetry = Union[retries.Retry, object]
+try:
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
+except AttributeError: # pragma: NO COVER
+ OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
diff --git a/google/cloud/automl_v1beta1/services/auto_ml/client.py b/google/cloud/automl_v1beta1/services/auto_ml/client.py
index 631650c0..217d1a83 100644
--- a/google/cloud/automl_v1beta1/services/auto_ml/client.py
+++ b/google/cloud/automl_v1beta1/services/auto_ml/client.py
@@ -14,23 +14,25 @@
# limitations under the License.
#
from collections import OrderedDict
-from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
-from google.api_core import client_options as client_options_lib # type: ignore
-from google.api_core import exceptions as core_exceptions # type: ignore
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import retry as retries # type: ignore
+from google.api_core import client_options as client_options_lib
+from google.api_core import exceptions as core_exceptions
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
-OptionalRetry = Union[retries.Retry, object]
+try:
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
+except AttributeError: # pragma: NO COVER
+ OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
@@ -422,8 +424,15 @@ def __init__(
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
- use_client_cert = bool(
- util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
+ if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
+ "true",
+ "false",
+ ):
+ raise ValueError(
+ "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+ use_client_cert = (
+ os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
client_cert_source_func = None
diff --git a/google/cloud/automl_v1beta1/services/auto_ml/transports/base.py b/google/cloud/automl_v1beta1/services/auto_ml/transports/base.py
index 8b05d97e..f3b1f45c 100644
--- a/google/cloud/automl_v1beta1/services/auto_ml/transports/base.py
+++ b/google/cloud/automl_v1beta1/services/auto_ml/transports/base.py
@@ -18,11 +18,11 @@
import pkg_resources
import google.auth # type: ignore
-import google.api_core # type: ignore
-from google.api_core import exceptions as core_exceptions # type: ignore
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import retry as retries # type: ignore
-from google.api_core import operations_v1 # type: ignore
+import google.api_core
+from google.api_core import exceptions as core_exceptions
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
+from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
@@ -110,7 +110,6 @@ def __init__(
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
-
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
diff --git a/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc.py b/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc.py
index 98731c7a..5d11126f 100644
--- a/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc.py
+++ b/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc.py
@@ -16,9 +16,9 @@
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
-from google.api_core import grpc_helpers # type: ignore
-from google.api_core import operations_v1 # type: ignore
-from google.api_core import gapic_v1 # type: ignore
+from google.api_core import grpc_helpers
+from google.api_core import operations_v1
+from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
diff --git a/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc_asyncio.py b/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc_asyncio.py
index 7584fd26..70fff362 100644
--- a/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc_asyncio.py
+++ b/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc_asyncio.py
@@ -16,9 +16,9 @@
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import grpc_helpers_async # type: ignore
-from google.api_core import operations_v1 # type: ignore
+from google.api_core import gapic_v1
+from google.api_core import grpc_helpers_async
+from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
diff --git a/google/cloud/automl_v1beta1/services/prediction_service/async_client.py b/google/cloud/automl_v1beta1/services/prediction_service/async_client.py
index 62131f62..32fe3c70 100644
--- a/google/cloud/automl_v1beta1/services/prediction_service/async_client.py
+++ b/google/cloud/automl_v1beta1/services/prediction_service/async_client.py
@@ -19,14 +19,17 @@
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
-from google.api_core.client_options import ClientOptions # type: ignore
-from google.api_core import exceptions as core_exceptions # type: ignore
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import retry as retries # type: ignore
+from google.api_core.client_options import ClientOptions
+from google.api_core import exceptions as core_exceptions
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
-OptionalRetry = Union[retries.Retry, object]
+try:
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
+except AttributeError: # pragma: NO COVER
+ OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
diff --git a/google/cloud/automl_v1beta1/services/prediction_service/client.py b/google/cloud/automl_v1beta1/services/prediction_service/client.py
index 4cb9580d..71629b8c 100644
--- a/google/cloud/automl_v1beta1/services/prediction_service/client.py
+++ b/google/cloud/automl_v1beta1/services/prediction_service/client.py
@@ -14,23 +14,25 @@
# limitations under the License.
#
from collections import OrderedDict
-from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
-from google.api_core import client_options as client_options_lib # type: ignore
-from google.api_core import exceptions as core_exceptions # type: ignore
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import retry as retries # type: ignore
+from google.api_core import client_options as client_options_lib
+from google.api_core import exceptions as core_exceptions
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
-OptionalRetry = Union[retries.Retry, object]
+try:
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
+except AttributeError: # pragma: NO COVER
+ OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
@@ -294,8 +296,15 @@ def __init__(
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
- use_client_cert = bool(
- util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
+ if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
+ "true",
+ "false",
+ ):
+ raise ValueError(
+ "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+ use_client_cert = (
+ os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
client_cert_source_func = None
diff --git a/google/cloud/automl_v1beta1/services/prediction_service/transports/base.py b/google/cloud/automl_v1beta1/services/prediction_service/transports/base.py
index 385fd157..5d3ace19 100644
--- a/google/cloud/automl_v1beta1/services/prediction_service/transports/base.py
+++ b/google/cloud/automl_v1beta1/services/prediction_service/transports/base.py
@@ -18,11 +18,11 @@
import pkg_resources
import google.auth # type: ignore
-import google.api_core # type: ignore
-from google.api_core import exceptions as core_exceptions # type: ignore
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import retry as retries # type: ignore
-from google.api_core import operations_v1 # type: ignore
+import google.api_core
+from google.api_core import exceptions as core_exceptions
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
+from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
@@ -101,7 +101,6 @@ def __init__(
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
-
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
diff --git a/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc.py b/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc.py
index a6d6f940..ae97e4bf 100644
--- a/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc.py
+++ b/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc.py
@@ -16,9 +16,9 @@
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
-from google.api_core import grpc_helpers # type: ignore
-from google.api_core import operations_v1 # type: ignore
-from google.api_core import gapic_v1 # type: ignore
+from google.api_core import grpc_helpers
+from google.api_core import operations_v1
+from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
diff --git a/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc_asyncio.py b/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc_asyncio.py
index b1373afa..0464e415 100644
--- a/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc_asyncio.py
+++ b/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc_asyncio.py
@@ -16,9 +16,9 @@
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import grpc_helpers_async # type: ignore
-from google.api_core import operations_v1 # type: ignore
+from google.api_core import gapic_v1
+from google.api_core import grpc_helpers_async
+from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
diff --git a/google/cloud/automl_v1beta1/services/tables/tables_client.py b/google/cloud/automl_v1beta1/services/tables/tables_client.py
index 86a22c09..dcf2be40 100644
--- a/google/cloud/automl_v1beta1/services/tables/tables_client.py
+++ b/google/cloud/automl_v1beta1/services/tables/tables_client.py
@@ -38,12 +38,12 @@
def to_proto_value(value):
"""translates a Python value to a google.protobuf.Value.
- Args:
- value: The Python value to be translated.
+ Args:
+ value: The Python value to be translated.
- Returns:
- Tuple of the translated google.protobuf.Value and error if any.
- """
+ Returns:
+ Tuple of the translated google.protobuf.Value and error if any.
+ """
# possible Python types (this is a Python3 module):
# https://simplejson.readthedocs.io/en/latest/#encoders-and-decoders
# JSON Python 2 Python 3
@@ -158,8 +158,8 @@ def __init__(
to use for requests.
client_options (Union[dict, google.api_core.client_options.ClientOptions]):
Custom options for the client.
- client_info (google.api_core.gapic_v1.client_info.ClientInfo):
- The client info used to send a user-agent string along with
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
API requests.
"""
version = _GAPIC_LIBRARY_VERSION
@@ -1247,7 +1247,8 @@ def update_column_spec(
... project='my-project', region='us-central1')
...
>>> client.update_column_spec(dataset_display_name='my_dataset',
- ... column_spec_display_name='Outcome', type_code='CATEGORY')
+ ... column_spec_display_name='Outcome',
+ ... type_code=automl_v1beta1.TypeCode.CATEGORY)
...
Args:
diff --git a/google/cloud/automl_v1beta1/types/annotation_payload.py b/google/cloud/automl_v1beta1/types/annotation_payload.py
index bf9c1146..fb53ae83 100644
--- a/google/cloud/automl_v1beta1/types/annotation_payload.py
+++ b/google/cloud/automl_v1beta1/types/annotation_payload.py
@@ -41,30 +41,38 @@ class AnnotationPayload(proto.Message):
Attributes:
translation (google.cloud.automl_v1beta1.types.TranslationAnnotation):
Annotation details for translation.
+
This field is a member of `oneof`_ ``detail``.
classification (google.cloud.automl_v1beta1.types.ClassificationAnnotation):
Annotation details for content or image
classification.
+
This field is a member of `oneof`_ ``detail``.
image_object_detection (google.cloud.automl_v1beta1.types.ImageObjectDetectionAnnotation):
Annotation details for image object
detection.
+
This field is a member of `oneof`_ ``detail``.
video_classification (google.cloud.automl_v1beta1.types.VideoClassificationAnnotation):
Annotation details for video classification.
Returned for Video Classification predictions.
+
This field is a member of `oneof`_ ``detail``.
video_object_tracking (google.cloud.automl_v1beta1.types.VideoObjectTrackingAnnotation):
Annotation details for video object tracking.
+
This field is a member of `oneof`_ ``detail``.
text_extraction (google.cloud.automl_v1beta1.types.TextExtractionAnnotation):
Annotation details for text extraction.
+
This field is a member of `oneof`_ ``detail``.
text_sentiment (google.cloud.automl_v1beta1.types.TextSentimentAnnotation):
Annotation details for text sentiment.
+
This field is a member of `oneof`_ ``detail``.
tables (google.cloud.automl_v1beta1.types.TablesAnnotation):
Annotation details for Tables.
+
This field is a member of `oneof`_ ``detail``.
annotation_spec_id (str):
Output only . The resource ID of the
diff --git a/google/cloud/automl_v1beta1/types/data_items.py b/google/cloud/automl_v1beta1/types/data_items.py
index f97cd13b..bb22d9fb 100644
--- a/google/cloud/automl_v1beta1/types/data_items.py
+++ b/google/cloud/automl_v1beta1/types/data_items.py
@@ -50,10 +50,12 @@ class Image(proto.Message):
Image content represented as a stream of bytes. Note: As
with all ``bytes`` fields, protobuffers use a pure binary
representation, whereas JSON representations use base64.
+
This field is a member of `oneof`_ ``data``.
input_config (google.cloud.automl_v1beta1.types.InputConfig):
An input config specifying the content of the
image.
+
This field is a member of `oneof`_ ``data``.
thumbnail_uri (str):
Output only. HTTP URI to the thumbnail image.
@@ -238,15 +240,19 @@ class ExamplePayload(proto.Message):
Attributes:
image (google.cloud.automl_v1beta1.types.Image):
Example image.
+
This field is a member of `oneof`_ ``payload``.
text_snippet (google.cloud.automl_v1beta1.types.TextSnippet):
Example text.
+
This field is a member of `oneof`_ ``payload``.
document (google.cloud.automl_v1beta1.types.Document):
Example document.
+
This field is a member of `oneof`_ ``payload``.
row (google.cloud.automl_v1beta1.types.Row):
Example relational table row.
+
This field is a member of `oneof`_ ``payload``.
"""
diff --git a/google/cloud/automl_v1beta1/types/data_stats.py b/google/cloud/automl_v1beta1/types/data_stats.py
index fecc4584..094f36b8 100644
--- a/google/cloud/automl_v1beta1/types/data_stats.py
+++ b/google/cloud/automl_v1beta1/types/data_stats.py
@@ -45,21 +45,27 @@ class DataStats(proto.Message):
Attributes:
float64_stats (google.cloud.automl_v1beta1.types.Float64Stats):
The statistics for FLOAT64 DataType.
+
This field is a member of `oneof`_ ``stats``.
string_stats (google.cloud.automl_v1beta1.types.StringStats):
The statistics for STRING DataType.
+
This field is a member of `oneof`_ ``stats``.
timestamp_stats (google.cloud.automl_v1beta1.types.TimestampStats):
The statistics for TIMESTAMP DataType.
+
This field is a member of `oneof`_ ``stats``.
array_stats (google.cloud.automl_v1beta1.types.ArrayStats):
The statistics for ARRAY DataType.
+
This field is a member of `oneof`_ ``stats``.
struct_stats (google.cloud.automl_v1beta1.types.StructStats):
The statistics for STRUCT DataType.
+
This field is a member of `oneof`_ ``stats``.
category_stats (google.cloud.automl_v1beta1.types.CategoryStats):
The statistics for CATEGORY DataType.
+
This field is a member of `oneof`_ ``stats``.
distinct_value_count (int):
The number of distinct values.
diff --git a/google/cloud/automl_v1beta1/types/data_types.py b/google/cloud/automl_v1beta1/types/data_types.py
index 3e98e018..0c6f25eb 100644
--- a/google/cloud/automl_v1beta1/types/data_types.py
+++ b/google/cloud/automl_v1beta1/types/data_types.py
@@ -52,6 +52,7 @@ class DataType(proto.Message):
[type_code][google.cloud.automl.v1beta1.DataType.type_code]
== [ARRAY][google.cloud.automl.v1beta1.TypeCode.ARRAY], then
``list_element_type`` is the type of the elements.
+
This field is a member of `oneof`_ ``details``.
struct_type (google.cloud.automl_v1beta1.types.StructType):
If
@@ -59,6 +60,7 @@ class DataType(proto.Message):
== [STRUCT][google.cloud.automl.v1beta1.TypeCode.STRUCT],
then ``struct_type`` provides type information for the
struct's fields.
+
This field is a member of `oneof`_ ``details``.
time_format (str):
If
@@ -76,6 +78,7 @@ class DataType(proto.Message):
the Unix epoch); or be written in ``strftime`` syntax. If
time_format is not set, then the default format as
described on the type_code is used.
+
This field is a member of `oneof`_ ``details``.
type_code (google.cloud.automl_v1beta1.types.TypeCode):
Required. The
diff --git a/google/cloud/automl_v1beta1/types/dataset.py b/google/cloud/automl_v1beta1/types/dataset.py
index f9e086b1..d8ab7506 100644
--- a/google/cloud/automl_v1beta1/types/dataset.py
+++ b/google/cloud/automl_v1beta1/types/dataset.py
@@ -43,37 +43,46 @@ class Dataset(proto.Message):
Attributes:
translation_dataset_metadata (google.cloud.automl_v1beta1.types.TranslationDatasetMetadata):
Metadata for a dataset used for translation.
+
This field is a member of `oneof`_ ``dataset_metadata``.
image_classification_dataset_metadata (google.cloud.automl_v1beta1.types.ImageClassificationDatasetMetadata):
Metadata for a dataset used for image
classification.
+
This field is a member of `oneof`_ ``dataset_metadata``.
text_classification_dataset_metadata (google.cloud.automl_v1beta1.types.TextClassificationDatasetMetadata):
Metadata for a dataset used for text
classification.
+
This field is a member of `oneof`_ ``dataset_metadata``.
image_object_detection_dataset_metadata (google.cloud.automl_v1beta1.types.ImageObjectDetectionDatasetMetadata):
Metadata for a dataset used for image object
detection.
+
This field is a member of `oneof`_ ``dataset_metadata``.
video_classification_dataset_metadata (google.cloud.automl_v1beta1.types.VideoClassificationDatasetMetadata):
Metadata for a dataset used for video
classification.
+
This field is a member of `oneof`_ ``dataset_metadata``.
video_object_tracking_dataset_metadata (google.cloud.automl_v1beta1.types.VideoObjectTrackingDatasetMetadata):
Metadata for a dataset used for video object
tracking.
+
This field is a member of `oneof`_ ``dataset_metadata``.
text_extraction_dataset_metadata (google.cloud.automl_v1beta1.types.TextExtractionDatasetMetadata):
Metadata for a dataset used for text
extraction.
+
This field is a member of `oneof`_ ``dataset_metadata``.
text_sentiment_dataset_metadata (google.cloud.automl_v1beta1.types.TextSentimentDatasetMetadata):
Metadata for a dataset used for text
sentiment.
+
This field is a member of `oneof`_ ``dataset_metadata``.
tables_dataset_metadata (google.cloud.automl_v1beta1.types.TablesDatasetMetadata):
Metadata for a dataset used for Tables.
+
This field is a member of `oneof`_ ``dataset_metadata``.
name (str):
Output only. The resource name of the dataset. Form:
diff --git a/google/cloud/automl_v1beta1/types/io.py b/google/cloud/automl_v1beta1/types/io.py
index 44f29a97..57629283 100644
--- a/google/cloud/automl_v1beta1/types/io.py
+++ b/google/cloud/automl_v1beta1/types/io.py
@@ -347,9 +347,11 @@ class InputConfig(proto.Message):
The Google Cloud Storage location for the input content. In
ImportData, the gcs_source points to a csv with structure
described in the comment.
+
This field is a member of `oneof`_ ``source``.
bigquery_source (google.cloud.automl_v1beta1.types.BigQuerySource):
The BigQuery location for the input content.
+
This field is a member of `oneof`_ ``source``.
params (Sequence[google.cloud.automl_v1beta1.types.InputConfig.ParamsEntry]):
Additional domain-specific parameters describing the
@@ -536,9 +538,11 @@ class BatchPredictInputConfig(proto.Message):
gcs_source (google.cloud.automl_v1beta1.types.GcsSource):
The Google Cloud Storage location for the
input content.
+
This field is a member of `oneof`_ ``source``.
bigquery_source (google.cloud.automl_v1beta1.types.BigQuerySource):
The BigQuery location for the input content.
+
This field is a member of `oneof`_ ``source``.
"""
@@ -607,10 +611,12 @@ class OutputConfig(proto.Message):
new directory will be created with name: export_data-- where
timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format.
All export output will be written into that directory.
+
This field is a member of `oneof`_ ``destination``.
bigquery_destination (google.cloud.automl_v1beta1.types.BigQueryDestination):
The BigQuery location where the output is to
be written to.
+
This field is a member of `oneof`_ ``destination``.
"""
@@ -921,10 +927,12 @@ class BatchPredictOutputConfig(proto.Message):
gcs_destination (google.cloud.automl_v1beta1.types.GcsDestination):
The Google Cloud Storage location of the
directory where the output is to be written to.
+
This field is a member of `oneof`_ ``destination``.
bigquery_destination (google.cloud.automl_v1beta1.types.BigQueryDestination):
The BigQuery location where the output is to
be written to.
+
This field is a member of `oneof`_ ``destination``.
"""
@@ -958,6 +966,7 @@ class ModelExportOutputConfig(proto.Message):
YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format, will be created.
Inside the model and any of its supporting files will be
written.
+
This field is a member of `oneof`_ ``destination``.
gcr_destination (google.cloud.automl_v1beta1.types.GcrDestination):
The GCR location where model image is to be
@@ -966,6 +975,7 @@ class ModelExportOutputConfig(proto.Message):
The model image will be created under the given
URI.
+
This field is a member of `oneof`_ ``destination``.
model_format (str):
The format in which the model must be exported. The
@@ -1081,6 +1091,7 @@ class ExportEvaluatedExamplesOutputConfig(proto.Message):
bigquery_destination (google.cloud.automl_v1beta1.types.BigQueryDestination):
The BigQuery location where the output is to
be written to.
+
This field is a member of `oneof`_ ``destination``.
"""
diff --git a/google/cloud/automl_v1beta1/types/model.py b/google/cloud/automl_v1beta1/types/model.py
index 34851363..1b70043d 100644
--- a/google/cloud/automl_v1beta1/types/model.py
+++ b/google/cloud/automl_v1beta1/types/model.py
@@ -39,30 +39,39 @@ class Model(proto.Message):
Attributes:
translation_model_metadata (google.cloud.automl_v1beta1.types.TranslationModelMetadata):
Metadata for translation models.
+
This field is a member of `oneof`_ ``model_metadata``.
image_classification_model_metadata (google.cloud.automl_v1beta1.types.ImageClassificationModelMetadata):
Metadata for image classification models.
+
This field is a member of `oneof`_ ``model_metadata``.
text_classification_model_metadata (google.cloud.automl_v1beta1.types.TextClassificationModelMetadata):
Metadata for text classification models.
+
This field is a member of `oneof`_ ``model_metadata``.
image_object_detection_model_metadata (google.cloud.automl_v1beta1.types.ImageObjectDetectionModelMetadata):
Metadata for image object detection models.
+
This field is a member of `oneof`_ ``model_metadata``.
video_classification_model_metadata (google.cloud.automl_v1beta1.types.VideoClassificationModelMetadata):
Metadata for video classification models.
+
This field is a member of `oneof`_ ``model_metadata``.
video_object_tracking_model_metadata (google.cloud.automl_v1beta1.types.VideoObjectTrackingModelMetadata):
Metadata for video object tracking models.
+
This field is a member of `oneof`_ ``model_metadata``.
text_extraction_model_metadata (google.cloud.automl_v1beta1.types.TextExtractionModelMetadata):
Metadata for text extraction models.
+
This field is a member of `oneof`_ ``model_metadata``.
tables_model_metadata (google.cloud.automl_v1beta1.types.TablesModelMetadata):
Metadata for Tables models.
+
This field is a member of `oneof`_ ``model_metadata``.
text_sentiment_model_metadata (google.cloud.automl_v1beta1.types.TextSentimentModelMetadata):
Metadata for text sentiment models.
+
This field is a member of `oneof`_ ``model_metadata``.
name (str):
Output only. Resource name of the model. Format:
diff --git a/google/cloud/automl_v1beta1/types/model_evaluation.py b/google/cloud/automl_v1beta1/types/model_evaluation.py
index 0d9b31df..c44519a0 100644
--- a/google/cloud/automl_v1beta1/types/model_evaluation.py
+++ b/google/cloud/automl_v1beta1/types/model_evaluation.py
@@ -45,30 +45,37 @@ class ModelEvaluation(proto.Message):
video and tables classification.
Tables problem is considered a classification
when the target column is CATEGORY DataType.
+
This field is a member of `oneof`_ ``metrics``.
regression_evaluation_metrics (google.cloud.automl_v1beta1.types.RegressionEvaluationMetrics):
Model evaluation metrics for Tables
regression. Tables problem is considered a
regression when the target column has FLOAT64
DataType.
+
This field is a member of `oneof`_ ``metrics``.
translation_evaluation_metrics (google.cloud.automl_v1beta1.types.TranslationEvaluationMetrics):
Model evaluation metrics for translation.
+
This field is a member of `oneof`_ ``metrics``.
image_object_detection_evaluation_metrics (google.cloud.automl_v1beta1.types.ImageObjectDetectionEvaluationMetrics):
Model evaluation metrics for image object
detection.
+
This field is a member of `oneof`_ ``metrics``.
video_object_tracking_evaluation_metrics (google.cloud.automl_v1beta1.types.VideoObjectTrackingEvaluationMetrics):
Model evaluation metrics for video object
tracking.
+
This field is a member of `oneof`_ ``metrics``.
text_sentiment_evaluation_metrics (google.cloud.automl_v1beta1.types.TextSentimentEvaluationMetrics):
Evaluation metrics for text sentiment models.
+
This field is a member of `oneof`_ ``metrics``.
text_extraction_evaluation_metrics (google.cloud.automl_v1beta1.types.TextExtractionEvaluationMetrics):
Evaluation metrics for text extraction
models.
+
This field is a member of `oneof`_ ``metrics``.
name (str):
Output only. Resource name of the model evaluation. Format:
diff --git a/google/cloud/automl_v1beta1/types/operations.py b/google/cloud/automl_v1beta1/types/operations.py
index 2384ceeb..5611902d 100644
--- a/google/cloud/automl_v1beta1/types/operations.py
+++ b/google/cloud/automl_v1beta1/types/operations.py
@@ -51,30 +51,39 @@ class OperationMetadata(proto.Message):
Attributes:
delete_details (google.cloud.automl_v1beta1.types.DeleteOperationMetadata):
Details of a Delete operation.
+
This field is a member of `oneof`_ ``details``.
deploy_model_details (google.cloud.automl_v1beta1.types.DeployModelOperationMetadata):
Details of a DeployModel operation.
+
This field is a member of `oneof`_ ``details``.
undeploy_model_details (google.cloud.automl_v1beta1.types.UndeployModelOperationMetadata):
Details of an UndeployModel operation.
+
This field is a member of `oneof`_ ``details``.
create_model_details (google.cloud.automl_v1beta1.types.CreateModelOperationMetadata):
Details of CreateModel operation.
+
This field is a member of `oneof`_ ``details``.
import_data_details (google.cloud.automl_v1beta1.types.ImportDataOperationMetadata):
Details of ImportData operation.
+
This field is a member of `oneof`_ ``details``.
batch_predict_details (google.cloud.automl_v1beta1.types.BatchPredictOperationMetadata):
Details of BatchPredict operation.
+
This field is a member of `oneof`_ ``details``.
export_data_details (google.cloud.automl_v1beta1.types.ExportDataOperationMetadata):
Details of ExportData operation.
+
This field is a member of `oneof`_ ``details``.
export_model_details (google.cloud.automl_v1beta1.types.ExportModelOperationMetadata):
Details of ExportModel operation.
+
This field is a member of `oneof`_ ``details``.
export_evaluated_examples_details (google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOperationMetadata):
Details of ExportEvaluatedExamples operation.
+
This field is a member of `oneof`_ ``details``.
progress_percent (int):
Output only. Progress of operation. Range: [0, 100]. Not
@@ -202,11 +211,13 @@ class ExportDataOutputInfo(proto.Message):
The full path of the Google Cloud Storage
directory created, into which the exported data
is written.
+
This field is a member of `oneof`_ ``output_location``.
bigquery_output_dataset (str):
The path of the BigQuery dataset created, in
bq://projectId.bqDatasetId format, into which
the exported data is written.
+
This field is a member of `oneof`_ ``output_location``.
"""
@@ -249,11 +260,13 @@ class BatchPredictOutputInfo(proto.Message):
The full path of the Google Cloud Storage
directory created, into which the prediction
output is written.
+
This field is a member of `oneof`_ ``output_location``.
bigquery_output_dataset (str):
The path of the BigQuery dataset created, in
bq://projectId.bqDatasetId format, into which
the prediction output is written.
+
This field is a member of `oneof`_ ``output_location``.
"""
diff --git a/google/cloud/automl_v1beta1/types/service.py b/google/cloud/automl_v1beta1/types/service.py
index 2487e9eb..0bc90ef8 100644
--- a/google/cloud/automl_v1beta1/types/service.py
+++ b/google/cloud/automl_v1beta1/types/service.py
@@ -102,11 +102,11 @@ class ListDatasetsRequest(proto.Message):
An expression for filtering the results of the request.
- ``dataset_metadata`` - for existence of the case (e.g.
- image_classification_dataset_metadata:*). Some examples
- of using the filter are:
+ ``image_classification_dataset_metadata:*``). Some
+ examples of using the filter are:
- ``translation_dataset_metadata:*`` --> The dataset has
- translation_dataset_metadata.
+ ``translation_dataset_metadata``.
page_size (int):
Requested page size. Server may return fewer
results than requested. If unspecified, server
@@ -446,13 +446,13 @@ class ListModelsRequest(proto.Message):
An expression for filtering the results of the request.
- ``model_metadata`` - for existence of the case (e.g.
- video_classification_model_metadata:*).
+ ``video_classification_model_metadata:*``).
- ``dataset_id`` - for = or !=. Some examples of using the
filter are:
- ``image_classification_model_metadata:*`` --> The model
- has image_classification_model_metadata.
+ has ``image_classification_model_metadata``.
- ``dataset_id=5`` --> The model was created from a dataset
with ID 5.
@@ -522,10 +522,12 @@ class DeployModelRequest(proto.Message):
image_object_detection_model_deployment_metadata (google.cloud.automl_v1beta1.types.ImageObjectDetectionModelDeploymentMetadata):
Model deployment metadata specific to Image
Object Detection.
+
This field is a member of `oneof`_ ``model_deployment_metadata``.
image_classification_model_deployment_metadata (google.cloud.automl_v1beta1.types.ImageClassificationModelDeploymentMetadata):
Model deployment metadata specific to Image
Classification.
+
This field is a member of `oneof`_ ``model_deployment_metadata``.
name (str):
Required. Resource name of the model to
diff --git a/google/cloud/automl_v1beta1/types/tables.py b/google/cloud/automl_v1beta1/types/tables.py
index f24af474..8dd51d62 100644
--- a/google/cloud/automl_v1beta1/types/tables.py
+++ b/google/cloud/automl_v1beta1/types/tables.py
@@ -129,11 +129,13 @@ class TablesModelMetadata(proto.Message):
Required when optimization_objective is
"MAXIMIZE_PRECISION_AT_RECALL". Must be between 0 and 1,
inclusive.
+
This field is a member of `oneof`_ ``additional_optimization_objective_config``.
optimization_objective_precision_value (float):
Required when optimization_objective is
"MAXIMIZE_RECALL_AT_PRECISION". Must be between 0 and 1,
inclusive.
+
This field is a member of `oneof`_ ``additional_optimization_objective_config``.
target_column_spec (google.cloud.automl_v1beta1.types.ColumnSpec):
Column spec of the dataset's primary table's column the
diff --git a/google/cloud/automl_v1beta1/types/text_extraction.py b/google/cloud/automl_v1beta1/types/text_extraction.py
index cf8f811f..3de19a94 100644
--- a/google/cloud/automl_v1beta1/types/text_extraction.py
+++ b/google/cloud/automl_v1beta1/types/text_extraction.py
@@ -34,6 +34,7 @@ class TextExtractionAnnotation(proto.Message):
An entity annotation will set this, which is
the part of the original text to which the
annotation pertains.
+
This field is a member of `oneof`_ ``annotation``.
score (float):
Output only. A confidence estimate between
diff --git a/noxfile.py b/noxfile.py
index c9b1d885..8e1a3af2 100644
--- a/noxfile.py
+++ b/noxfile.py
@@ -190,6 +190,7 @@ def docs(session):
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
+ "-W", # warnings as errors
"-T", # show full traceback on exception
"-N", # no colors
"-b",
diff --git a/owlbot.py b/owlbot.py
index fa21da1a..204c8ba0 100644
--- a/owlbot.py
+++ b/owlbot.py
@@ -71,12 +71,4 @@
python.py_samples(skip_readmes=True)
-# This is being added to AutoML because the proto comments are long and
-# regex replaces are a brittle temporary solution.
-s.replace(
-"noxfile.py",
-""""-W", # warnings as errors
-\s+"-T", \# show full traceback on exception""",
-""""-T", # show full traceback on exception""")
-
s.shell.run(["nox", "-s", "blacken"], hide_output=False)
diff --git a/samples/AUTHORING_GUIDE.md b/samples/AUTHORING_GUIDE.md
index 55c97b32..8249522f 100644
--- a/samples/AUTHORING_GUIDE.md
+++ b/samples/AUTHORING_GUIDE.md
@@ -1 +1 @@
-See https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/AUTHORING_GUIDE.md
\ No newline at end of file
+See https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/AUTHORING_GUIDE.md
\ No newline at end of file
diff --git a/samples/CONTRIBUTING.md b/samples/CONTRIBUTING.md
index 34c882b6..f5fe2e6b 100644
--- a/samples/CONTRIBUTING.md
+++ b/samples/CONTRIBUTING.md
@@ -1 +1 @@
-See https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/CONTRIBUTING.md
\ No newline at end of file
+See https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/CONTRIBUTING.md
\ No newline at end of file
diff --git a/samples/beta/noxfile.py b/samples/beta/noxfile.py
index 93a9122c..3bbef5d5 100644
--- a/samples/beta/noxfile.py
+++ b/samples/beta/noxfile.py
@@ -14,6 +14,7 @@
from __future__ import print_function
+import glob
import os
from pathlib import Path
import sys
@@ -184,37 +185,44 @@ def blacken(session: nox.sessions.Session) -> None:
def _session_tests(
session: nox.sessions.Session, post_install: Callable = None
) -> None:
- if TEST_CONFIG["pip_version_override"]:
- pip_version = TEST_CONFIG["pip_version_override"]
- session.install(f"pip=={pip_version}")
- """Runs py.test for a particular project."""
- if os.path.exists("requirements.txt"):
- if os.path.exists("constraints.txt"):
- session.install("-r", "requirements.txt", "-c", "constraints.txt")
- else:
- session.install("-r", "requirements.txt")
-
- if os.path.exists("requirements-test.txt"):
- if os.path.exists("constraints-test.txt"):
- session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt")
- else:
- session.install("-r", "requirements-test.txt")
-
- if INSTALL_LIBRARY_FROM_SOURCE:
- session.install("-e", _get_repo_root())
-
- if post_install:
- post_install(session)
-
- session.run(
- "pytest",
- *(PYTEST_COMMON_ARGS + session.posargs),
- # Pytest will return 5 when no tests are collected. This can happen
- # on travis where slow and flaky tests are excluded.
- # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
- success_codes=[0, 5],
- env=get_pytest_env_vars(),
- )
+ # check for presence of tests
+ test_list = glob.glob("*_test.py") + glob.glob("test_*.py")
+ if len(test_list) == 0:
+ print("No tests found, skipping directory.")
+ else:
+ if TEST_CONFIG["pip_version_override"]:
+ pip_version = TEST_CONFIG["pip_version_override"]
+ session.install(f"pip=={pip_version}")
+ """Runs py.test for a particular project."""
+ if os.path.exists("requirements.txt"):
+ if os.path.exists("constraints.txt"):
+ session.install("-r", "requirements.txt", "-c", "constraints.txt")
+ else:
+ session.install("-r", "requirements.txt")
+
+ if os.path.exists("requirements-test.txt"):
+ if os.path.exists("constraints-test.txt"):
+ session.install(
+ "-r", "requirements-test.txt", "-c", "constraints-test.txt"
+ )
+ else:
+ session.install("-r", "requirements-test.txt")
+
+ if INSTALL_LIBRARY_FROM_SOURCE:
+ session.install("-e", _get_repo_root())
+
+ if post_install:
+ post_install(session)
+
+ session.run(
+ "pytest",
+ *(PYTEST_COMMON_ARGS + session.posargs),
+ # Pytest will return 5 when no tests are collected. This can happen
+ # on travis where slow and flaky tests are excluded.
+ # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
+ success_codes=[0, 5],
+ env=get_pytest_env_vars(),
+ )
@nox.session(python=ALL_VERSIONS)
diff --git a/samples/beta/requirements.txt b/samples/beta/requirements.txt
index 9bea146f..22ec816c 100644
--- a/samples/beta/requirements.txt
+++ b/samples/beta/requirements.txt
@@ -1 +1 @@
-google-cloud-automl==2.5.1
+google-cloud-automl==2.5.2
diff --git a/samples/snippets/noxfile.py b/samples/snippets/noxfile.py
index 93a9122c..3bbef5d5 100644
--- a/samples/snippets/noxfile.py
+++ b/samples/snippets/noxfile.py
@@ -14,6 +14,7 @@
from __future__ import print_function
+import glob
import os
from pathlib import Path
import sys
@@ -184,37 +185,44 @@ def blacken(session: nox.sessions.Session) -> None:
def _session_tests(
session: nox.sessions.Session, post_install: Callable = None
) -> None:
- if TEST_CONFIG["pip_version_override"]:
- pip_version = TEST_CONFIG["pip_version_override"]
- session.install(f"pip=={pip_version}")
- """Runs py.test for a particular project."""
- if os.path.exists("requirements.txt"):
- if os.path.exists("constraints.txt"):
- session.install("-r", "requirements.txt", "-c", "constraints.txt")
- else:
- session.install("-r", "requirements.txt")
-
- if os.path.exists("requirements-test.txt"):
- if os.path.exists("constraints-test.txt"):
- session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt")
- else:
- session.install("-r", "requirements-test.txt")
-
- if INSTALL_LIBRARY_FROM_SOURCE:
- session.install("-e", _get_repo_root())
-
- if post_install:
- post_install(session)
-
- session.run(
- "pytest",
- *(PYTEST_COMMON_ARGS + session.posargs),
- # Pytest will return 5 when no tests are collected. This can happen
- # on travis where slow and flaky tests are excluded.
- # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
- success_codes=[0, 5],
- env=get_pytest_env_vars(),
- )
+ # check for presence of tests
+ test_list = glob.glob("*_test.py") + glob.glob("test_*.py")
+ if len(test_list) == 0:
+ print("No tests found, skipping directory.")
+ else:
+ if TEST_CONFIG["pip_version_override"]:
+ pip_version = TEST_CONFIG["pip_version_override"]
+ session.install(f"pip=={pip_version}")
+ """Runs py.test for a particular project."""
+ if os.path.exists("requirements.txt"):
+ if os.path.exists("constraints.txt"):
+ session.install("-r", "requirements.txt", "-c", "constraints.txt")
+ else:
+ session.install("-r", "requirements.txt")
+
+ if os.path.exists("requirements-test.txt"):
+ if os.path.exists("constraints-test.txt"):
+ session.install(
+ "-r", "requirements-test.txt", "-c", "constraints-test.txt"
+ )
+ else:
+ session.install("-r", "requirements-test.txt")
+
+ if INSTALL_LIBRARY_FROM_SOURCE:
+ session.install("-e", _get_repo_root())
+
+ if post_install:
+ post_install(session)
+
+ session.run(
+ "pytest",
+ *(PYTEST_COMMON_ARGS + session.posargs),
+ # Pytest will return 5 when no tests are collected. This can happen
+ # on travis where slow and flaky tests are excluded.
+ # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
+ success_codes=[0, 5],
+ env=get_pytest_env_vars(),
+ )
@nox.session(python=ALL_VERSIONS)
diff --git a/samples/snippets/requirements.txt b/samples/snippets/requirements.txt
index ffc9d62b..390316bb 100644
--- a/samples/snippets/requirements.txt
+++ b/samples/snippets/requirements.txt
@@ -1,3 +1,3 @@
-google-cloud-translate==3.6.0
-google-cloud-storage==1.42.3
-google-cloud-automl==2.5.1
+google-cloud-translate==3.6.1
+google-cloud-storage==2.0.0
+google-cloud-automl==2.5.2
diff --git a/samples/snippets/translate_create_model.py b/samples/snippets/translate_create_model.py
index 47304d4d..fecdb022 100644
--- a/samples/snippets/translate_create_model.py
+++ b/samples/snippets/translate_create_model.py
@@ -27,7 +27,6 @@ def create_model(project_id, dataset_id, display_name):
# A resource that represents Google Cloud Platform location.
project_location = f"projects/{project_id}/locations/us-central1"
- # Leave model unset to use the default base model provided by Google
translation_model_metadata = automl.TranslationModelMetadata()
model = automl.Model(
display_name=display_name,
diff --git a/samples/snippets/translate_create_model_test.py b/samples/snippets/translate_create_model_test.py
index f03de69e..933a45d2 100644
--- a/samples/snippets/translate_create_model_test.py
+++ b/samples/snippets/translate_create_model_test.py
@@ -26,6 +26,10 @@ def test_translate_create_model(capsys):
PROJECT_ID, DATASET_ID, "translate_test_create_model"
)
out, _ = capsys.readouterr()
+ # After setting DATASET_ID, change line below to
+ # assert "Training started..." in out
assert "Dataset does not exist." in out
except Exception as e:
+ # After setting DATASET_ID, change line below to
+ # assert "Training started..." in e.message
assert "Dataset does not exist." in e.message
diff --git a/samples/tables/noxfile.py b/samples/tables/noxfile.py
index 93a9122c..3bbef5d5 100644
--- a/samples/tables/noxfile.py
+++ b/samples/tables/noxfile.py
@@ -14,6 +14,7 @@
from __future__ import print_function
+import glob
import os
from pathlib import Path
import sys
@@ -184,37 +185,44 @@ def blacken(session: nox.sessions.Session) -> None:
def _session_tests(
session: nox.sessions.Session, post_install: Callable = None
) -> None:
- if TEST_CONFIG["pip_version_override"]:
- pip_version = TEST_CONFIG["pip_version_override"]
- session.install(f"pip=={pip_version}")
- """Runs py.test for a particular project."""
- if os.path.exists("requirements.txt"):
- if os.path.exists("constraints.txt"):
- session.install("-r", "requirements.txt", "-c", "constraints.txt")
- else:
- session.install("-r", "requirements.txt")
-
- if os.path.exists("requirements-test.txt"):
- if os.path.exists("constraints-test.txt"):
- session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt")
- else:
- session.install("-r", "requirements-test.txt")
-
- if INSTALL_LIBRARY_FROM_SOURCE:
- session.install("-e", _get_repo_root())
-
- if post_install:
- post_install(session)
-
- session.run(
- "pytest",
- *(PYTEST_COMMON_ARGS + session.posargs),
- # Pytest will return 5 when no tests are collected. This can happen
- # on travis where slow and flaky tests are excluded.
- # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
- success_codes=[0, 5],
- env=get_pytest_env_vars(),
- )
+ # check for presence of tests
+ test_list = glob.glob("*_test.py") + glob.glob("test_*.py")
+ if len(test_list) == 0:
+ print("No tests found, skipping directory.")
+ else:
+ if TEST_CONFIG["pip_version_override"]:
+ pip_version = TEST_CONFIG["pip_version_override"]
+ session.install(f"pip=={pip_version}")
+ """Runs py.test for a particular project."""
+ if os.path.exists("requirements.txt"):
+ if os.path.exists("constraints.txt"):
+ session.install("-r", "requirements.txt", "-c", "constraints.txt")
+ else:
+ session.install("-r", "requirements.txt")
+
+ if os.path.exists("requirements-test.txt"):
+ if os.path.exists("constraints-test.txt"):
+ session.install(
+ "-r", "requirements-test.txt", "-c", "constraints-test.txt"
+ )
+ else:
+ session.install("-r", "requirements-test.txt")
+
+ if INSTALL_LIBRARY_FROM_SOURCE:
+ session.install("-e", _get_repo_root())
+
+ if post_install:
+ post_install(session)
+
+ session.run(
+ "pytest",
+ *(PYTEST_COMMON_ARGS + session.posargs),
+ # Pytest will return 5 when no tests are collected. This can happen
+ # on travis where slow and flaky tests are excluded.
+ # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
+ success_codes=[0, 5],
+ env=get_pytest_env_vars(),
+ )
@nox.session(python=ALL_VERSIONS)
diff --git a/samples/tables/predict_test.py b/samples/tables/predict_test.py
index 1da6bfc2..5530e6eb 100644
--- a/samples/tables/predict_test.py
+++ b/samples/tables/predict_test.py
@@ -16,18 +16,23 @@
import os
+import backoff
+
from google.cloud.automl_v1beta1 import Model
import automl_tables_model
import automl_tables_predict
import model_test
-
PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"]
REGION = "us-central1"
STATIC_MODEL = model_test.STATIC_MODEL
+MAX_TIMEOUT = 200
+@backoff.on_exception(
+ wait_gen=lambda: iter([50, 150, MAX_TIMEOUT]), exception=Exception, max_tries=3
+)
def test_predict(capsys):
inputs = {
"Age": 31,
diff --git a/samples/tables/requirements-test.txt b/samples/tables/requirements-test.txt
index 92709451..0f6247f7 100644
--- a/samples/tables/requirements-test.txt
+++ b/samples/tables/requirements-test.txt
@@ -1 +1,2 @@
pytest==6.2.5
+backoff==1.11.1
\ No newline at end of file
diff --git a/samples/tables/requirements.txt b/samples/tables/requirements.txt
index 9bea146f..22ec816c 100644
--- a/samples/tables/requirements.txt
+++ b/samples/tables/requirements.txt
@@ -1 +1 @@
-google-cloud-automl==2.5.1
+google-cloud-automl==2.5.2
diff --git a/setup.py b/setup.py
index b1a189af..2a501217 100644
--- a/setup.py
+++ b/setup.py
@@ -19,7 +19,7 @@
name = "google-cloud-automl"
description = "Cloud AutoML API client library"
-version = "2.5.2"
+version = "2.6.0"
release_status = "Development Status :: 5 - Production/Stable"
dependencies = [
# NOTE: Maintainers, please do not require google-api-core>=2.x.x
@@ -31,7 +31,7 @@
extras = {
"libcst": "libcst >= 0.2.5",
"pandas": ["pandas>=0.23.0"],
- "storage": ["google-cloud-storage >= 1.18.0, < 2.0.0dev"],
+ "storage": ["google-cloud-storage >=1.18.0, <3.0.0dev"],
}
package_root = os.path.abspath(os.path.dirname(__file__))
@@ -68,6 +68,8 @@
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
"Operating System :: OS Independent",
"Topic :: Internet",
],
diff --git a/tests/unit/gapic/automl_v1/test_auto_ml.py b/tests/unit/gapic/automl_v1/test_auto_ml.py
index 94431974..2267ee42 100644
--- a/tests/unit/gapic/automl_v1/test_auto_ml.py
+++ b/tests/unit/gapic/automl_v1/test_auto_ml.py
@@ -245,20 +245,20 @@ def test_auto_ml_client_client_options(client_class, transport_class, transport_
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
- client = client_class()
+ client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
- client = client_class()
+ client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class(transport=transport_name, client_options=options)
+ client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
@@ -313,7 +313,7 @@ def test_auto_ml_client_mtls_env_auto(
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class(transport=transport_name, client_options=options)
+ client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
@@ -404,7 +404,7 @@ def test_auto_ml_client_client_options_scopes(
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class(transport=transport_name, client_options=options)
+ client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
@@ -431,7 +431,7 @@ def test_auto_ml_client_client_options_credentials_file(
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class(transport=transport_name, client_options=options)
+ client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
@@ -462,9 +462,8 @@ def test_auto_ml_client_client_options_from_dict():
)
-def test_create_dataset(
- transport: str = "grpc", request_type=service.CreateDatasetRequest
-):
+@pytest.mark.parametrize("request_type", [service.CreateDatasetRequest, dict,])
+def test_create_dataset(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -488,10 +487,6 @@ def test_create_dataset(
assert isinstance(response, future.Future)
-def test_create_dataset_from_dict():
- test_create_dataset(request_type=dict)
-
-
def test_create_dataset_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -614,12 +609,16 @@ def test_create_dataset_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
- assert args[0].dataset == gca_dataset.Dataset(
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].dataset
+ mock_val = gca_dataset.Dataset(
translation_dataset_metadata=translation.TranslationDatasetMetadata(
source_language_code="source_language_code_value"
)
)
+ assert arg == mock_val
def test_create_dataset_flattened_error():
@@ -666,12 +665,16 @@ async def test_create_dataset_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
- assert args[0].dataset == gca_dataset.Dataset(
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].dataset
+ mock_val = gca_dataset.Dataset(
translation_dataset_metadata=translation.TranslationDatasetMetadata(
source_language_code="source_language_code_value"
)
)
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -692,7 +695,8 @@ async def test_create_dataset_flattened_error_async():
)
-def test_get_dataset(transport: str = "grpc", request_type=service.GetDatasetRequest):
+@pytest.mark.parametrize("request_type", [service.GetDatasetRequest, dict,])
+def test_get_dataset(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -730,10 +734,6 @@ def test_get_dataset(transport: str = "grpc", request_type=service.GetDatasetReq
assert response.etag == "etag_value"
-def test_get_dataset_from_dict():
- test_get_dataset(request_type=dict)
-
-
def test_get_dataset_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -858,7 +858,9 @@ def test_get_dataset_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
def test_get_dataset_flattened_error():
@@ -890,7 +892,9 @@ async def test_get_dataset_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -905,9 +909,8 @@ async def test_get_dataset_flattened_error_async():
)
-def test_list_datasets(
- transport: str = "grpc", request_type=service.ListDatasetsRequest
-):
+@pytest.mark.parametrize("request_type", [service.ListDatasetsRequest, dict,])
+def test_list_datasets(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -934,10 +937,6 @@ def test_list_datasets(
assert response.next_page_token == "next_page_token_value"
-def test_list_datasets_from_dict():
- test_list_datasets(request_type=dict)
-
-
def test_list_datasets_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -1054,7 +1053,9 @@ def test_list_datasets_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
def test_list_datasets_flattened_error():
@@ -1088,7 +1089,9 @@ async def test_list_datasets_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -1103,8 +1106,10 @@ async def test_list_datasets_flattened_error_async():
)
-def test_list_datasets_pager():
- client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials,)
+def test_list_datasets_pager(transport_name: str = "grpc"):
+ client = AutoMlClient(
+ credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
+ )
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
@@ -1137,8 +1142,10 @@ def test_list_datasets_pager():
assert all(isinstance(i, dataset.Dataset) for i in results)
-def test_list_datasets_pages():
- client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials,)
+def test_list_datasets_pages(transport_name: str = "grpc"):
+ client = AutoMlClient(
+ credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
+ )
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
@@ -1225,9 +1232,8 @@ async def test_list_datasets_async_pages():
assert page_.raw_page.next_page_token == token
-def test_update_dataset(
- transport: str = "grpc", request_type=service.UpdateDatasetRequest
-):
+@pytest.mark.parametrize("request_type", [service.UpdateDatasetRequest, dict,])
+def test_update_dataset(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -1265,10 +1271,6 @@ def test_update_dataset(
assert response.etag == "etag_value"
-def test_update_dataset_from_dict():
- test_update_dataset(request_type=dict)
-
-
def test_update_dataset_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -1404,12 +1406,16 @@ def test_update_dataset_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].dataset == gca_dataset.Dataset(
+ arg = args[0].dataset
+ mock_val = gca_dataset.Dataset(
translation_dataset_metadata=translation.TranslationDatasetMetadata(
source_language_code="source_language_code_value"
)
)
- assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
+ assert arg == mock_val
+ arg = args[0].update_mask
+ mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
+ assert arg == mock_val
def test_update_dataset_flattened_error():
@@ -1454,12 +1460,16 @@ async def test_update_dataset_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].dataset == gca_dataset.Dataset(
+ arg = args[0].dataset
+ mock_val = gca_dataset.Dataset(
translation_dataset_metadata=translation.TranslationDatasetMetadata(
source_language_code="source_language_code_value"
)
)
- assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
+ assert arg == mock_val
+ arg = args[0].update_mask
+ mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -1480,9 +1490,8 @@ async def test_update_dataset_flattened_error_async():
)
-def test_delete_dataset(
- transport: str = "grpc", request_type=service.DeleteDatasetRequest
-):
+@pytest.mark.parametrize("request_type", [service.DeleteDatasetRequest, dict,])
+def test_delete_dataset(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -1506,10 +1515,6 @@ def test_delete_dataset(
assert isinstance(response, future.Future)
-def test_delete_dataset_from_dict():
- test_delete_dataset(request_type=dict)
-
-
def test_delete_dataset_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -1625,7 +1630,9 @@ def test_delete_dataset_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
def test_delete_dataset_flattened_error():
@@ -1659,7 +1666,9 @@ async def test_delete_dataset_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -1674,7 +1683,8 @@ async def test_delete_dataset_flattened_error_async():
)
-def test_import_data(transport: str = "grpc", request_type=service.ImportDataRequest):
+@pytest.mark.parametrize("request_type", [service.ImportDataRequest, dict,])
+def test_import_data(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -1698,10 +1708,6 @@ def test_import_data(transport: str = "grpc", request_type=service.ImportDataReq
assert isinstance(response, future.Future)
-def test_import_data_from_dict():
- test_import_data(request_type=dict)
-
-
def test_import_data_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -1822,10 +1828,14 @@ def test_import_data_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
- assert args[0].input_config == io.InputConfig(
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+ arg = args[0].input_config
+ mock_val = io.InputConfig(
gcs_source=io.GcsSource(input_uris=["input_uris_value"])
)
+ assert arg == mock_val
def test_import_data_flattened_error():
@@ -1868,10 +1878,14 @@ async def test_import_data_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
- assert args[0].input_config == io.InputConfig(
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+ arg = args[0].input_config
+ mock_val = io.InputConfig(
gcs_source=io.GcsSource(input_uris=["input_uris_value"])
)
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -1890,7 +1904,8 @@ async def test_import_data_flattened_error_async():
)
-def test_export_data(transport: str = "grpc", request_type=service.ExportDataRequest):
+@pytest.mark.parametrize("request_type", [service.ExportDataRequest, dict,])
+def test_export_data(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -1914,10 +1929,6 @@ def test_export_data(transport: str = "grpc", request_type=service.ExportDataReq
assert isinstance(response, future.Future)
-def test_export_data_from_dict():
- test_export_data(request_type=dict)
-
-
def test_export_data_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -2040,12 +2051,16 @@ def test_export_data_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
- assert args[0].output_config == io.OutputConfig(
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+ arg = args[0].output_config
+ mock_val = io.OutputConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
)
)
+ assert arg == mock_val
def test_export_data_flattened_error():
@@ -2092,12 +2107,16 @@ async def test_export_data_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
- assert args[0].output_config == io.OutputConfig(
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+ arg = args[0].output_config
+ mock_val = io.OutputConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
)
)
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -2118,9 +2137,8 @@ async def test_export_data_flattened_error_async():
)
-def test_get_annotation_spec(
- transport: str = "grpc", request_type=service.GetAnnotationSpecRequest
-):
+@pytest.mark.parametrize("request_type", [service.GetAnnotationSpecRequest, dict,])
+def test_get_annotation_spec(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -2151,10 +2169,6 @@ def test_get_annotation_spec(
assert response.example_count == 1396
-def test_get_annotation_spec_from_dict():
- test_get_annotation_spec(request_type=dict)
-
-
def test_get_annotation_spec_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -2287,7 +2301,9 @@ def test_get_annotation_spec_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
def test_get_annotation_spec_flattened_error():
@@ -2323,7 +2339,9 @@ async def test_get_annotation_spec_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -2338,7 +2356,8 @@ async def test_get_annotation_spec_flattened_error_async():
)
-def test_create_model(transport: str = "grpc", request_type=service.CreateModelRequest):
+@pytest.mark.parametrize("request_type", [service.CreateModelRequest, dict,])
+def test_create_model(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -2362,10 +2381,6 @@ def test_create_model(transport: str = "grpc", request_type=service.CreateModelR
assert isinstance(response, future.Future)
-def test_create_model_from_dict():
- test_create_model(request_type=dict)
-
-
def test_create_model_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -2488,12 +2503,16 @@ def test_create_model_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
- assert args[0].model == gca_model.Model(
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].model
+ mock_val = gca_model.Model(
translation_model_metadata=translation.TranslationModelMetadata(
base_model="base_model_value"
)
)
+ assert arg == mock_val
def test_create_model_flattened_error():
@@ -2540,12 +2559,16 @@ async def test_create_model_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
- assert args[0].model == gca_model.Model(
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].model
+ mock_val = gca_model.Model(
translation_model_metadata=translation.TranslationModelMetadata(
base_model="base_model_value"
)
)
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -2566,7 +2589,8 @@ async def test_create_model_flattened_error_async():
)
-def test_get_model(transport: str = "grpc", request_type=service.GetModelRequest):
+@pytest.mark.parametrize("request_type", [service.GetModelRequest, dict,])
+def test_get_model(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -2604,10 +2628,6 @@ def test_get_model(transport: str = "grpc", request_type=service.GetModelRequest
assert response.etag == "etag_value"
-def test_get_model_from_dict():
- test_get_model(request_type=dict)
-
-
def test_get_model_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -2732,7 +2752,9 @@ def test_get_model_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
def test_get_model_flattened_error():
@@ -2764,7 +2786,9 @@ async def test_get_model_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -2779,7 +2803,8 @@ async def test_get_model_flattened_error_async():
)
-def test_list_models(transport: str = "grpc", request_type=service.ListModelsRequest):
+@pytest.mark.parametrize("request_type", [service.ListModelsRequest, dict,])
+def test_list_models(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -2806,10 +2831,6 @@ def test_list_models(transport: str = "grpc", request_type=service.ListModelsReq
assert response.next_page_token == "next_page_token_value"
-def test_list_models_from_dict():
- test_list_models(request_type=dict)
-
-
def test_list_models_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -2926,7 +2947,9 @@ def test_list_models_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
def test_list_models_flattened_error():
@@ -2960,7 +2983,9 @@ async def test_list_models_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -2975,8 +3000,10 @@ async def test_list_models_flattened_error_async():
)
-def test_list_models_pager():
- client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials,)
+def test_list_models_pager(transport_name: str = "grpc"):
+ client = AutoMlClient(
+ credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
+ )
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_models), "__call__") as call:
@@ -3005,8 +3032,10 @@ def test_list_models_pager():
assert all(isinstance(i, model.Model) for i in results)
-def test_list_models_pages():
- client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials,)
+def test_list_models_pages(transport_name: str = "grpc"):
+ client = AutoMlClient(
+ credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
+ )
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_models), "__call__") as call:
@@ -3081,7 +3110,8 @@ async def test_list_models_async_pages():
assert page_.raw_page.next_page_token == token
-def test_delete_model(transport: str = "grpc", request_type=service.DeleteModelRequest):
+@pytest.mark.parametrize("request_type", [service.DeleteModelRequest, dict,])
+def test_delete_model(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -3105,10 +3135,6 @@ def test_delete_model(transport: str = "grpc", request_type=service.DeleteModelR
assert isinstance(response, future.Future)
-def test_delete_model_from_dict():
- test_delete_model(request_type=dict)
-
-
def test_delete_model_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -3224,7 +3250,9 @@ def test_delete_model_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
def test_delete_model_flattened_error():
@@ -3258,7 +3286,9 @@ async def test_delete_model_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -3273,7 +3303,8 @@ async def test_delete_model_flattened_error_async():
)
-def test_update_model(transport: str = "grpc", request_type=service.UpdateModelRequest):
+@pytest.mark.parametrize("request_type", [service.UpdateModelRequest, dict,])
+def test_update_model(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -3311,10 +3342,6 @@ def test_update_model(transport: str = "grpc", request_type=service.UpdateModelR
assert response.etag == "etag_value"
-def test_update_model_from_dict():
- test_update_model(request_type=dict)
-
-
def test_update_model_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -3446,12 +3473,16 @@ def test_update_model_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].model == gca_model.Model(
+ arg = args[0].model
+ mock_val = gca_model.Model(
translation_model_metadata=translation.TranslationModelMetadata(
base_model="base_model_value"
)
)
- assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
+ assert arg == mock_val
+ arg = args[0].update_mask
+ mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
+ assert arg == mock_val
def test_update_model_flattened_error():
@@ -3496,12 +3527,16 @@ async def test_update_model_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].model == gca_model.Model(
+ arg = args[0].model
+ mock_val = gca_model.Model(
translation_model_metadata=translation.TranslationModelMetadata(
base_model="base_model_value"
)
)
- assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
+ assert arg == mock_val
+ arg = args[0].update_mask
+ mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -3522,7 +3557,8 @@ async def test_update_model_flattened_error_async():
)
-def test_deploy_model(transport: str = "grpc", request_type=service.DeployModelRequest):
+@pytest.mark.parametrize("request_type", [service.DeployModelRequest, dict,])
+def test_deploy_model(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -3546,10 +3582,6 @@ def test_deploy_model(transport: str = "grpc", request_type=service.DeployModelR
assert isinstance(response, future.Future)
-def test_deploy_model_from_dict():
- test_deploy_model(request_type=dict)
-
-
def test_deploy_model_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -3665,7 +3697,9 @@ def test_deploy_model_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
def test_deploy_model_flattened_error():
@@ -3699,7 +3733,9 @@ async def test_deploy_model_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -3714,9 +3750,8 @@ async def test_deploy_model_flattened_error_async():
)
-def test_undeploy_model(
- transport: str = "grpc", request_type=service.UndeployModelRequest
-):
+@pytest.mark.parametrize("request_type", [service.UndeployModelRequest, dict,])
+def test_undeploy_model(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -3740,10 +3775,6 @@ def test_undeploy_model(
assert isinstance(response, future.Future)
-def test_undeploy_model_from_dict():
- test_undeploy_model(request_type=dict)
-
-
def test_undeploy_model_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -3859,7 +3890,9 @@ def test_undeploy_model_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
def test_undeploy_model_flattened_error():
@@ -3893,7 +3926,9 @@ async def test_undeploy_model_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -3908,7 +3943,8 @@ async def test_undeploy_model_flattened_error_async():
)
-def test_export_model(transport: str = "grpc", request_type=service.ExportModelRequest):
+@pytest.mark.parametrize("request_type", [service.ExportModelRequest, dict,])
+def test_export_model(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -3932,10 +3968,6 @@ def test_export_model(transport: str = "grpc", request_type=service.ExportModelR
assert isinstance(response, future.Future)
-def test_export_model_from_dict():
- test_export_model(request_type=dict)
-
-
def test_export_model_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -4058,12 +4090,16 @@ def test_export_model_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
- assert args[0].output_config == io.ModelExportOutputConfig(
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+ arg = args[0].output_config
+ mock_val = io.ModelExportOutputConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
)
)
+ assert arg == mock_val
def test_export_model_flattened_error():
@@ -4110,12 +4146,16 @@ async def test_export_model_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
- assert args[0].output_config == io.ModelExportOutputConfig(
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+ arg = args[0].output_config
+ mock_val = io.ModelExportOutputConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
)
)
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -4136,9 +4176,8 @@ async def test_export_model_flattened_error_async():
)
-def test_get_model_evaluation(
- transport: str = "grpc", request_type=service.GetModelEvaluationRequest
-):
+@pytest.mark.parametrize("request_type", [service.GetModelEvaluationRequest, dict,])
+def test_get_model_evaluation(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -4176,10 +4215,6 @@ def test_get_model_evaluation(
assert response.evaluated_example_count == 2446
-def test_get_model_evaluation_from_dict():
- test_get_model_evaluation(request_type=dict)
-
-
def test_get_model_evaluation_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -4314,7 +4349,9 @@ def test_get_model_evaluation_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
def test_get_model_evaluation_flattened_error():
@@ -4350,7 +4387,9 @@ async def test_get_model_evaluation_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -4365,9 +4404,8 @@ async def test_get_model_evaluation_flattened_error_async():
)
-def test_list_model_evaluations(
- transport: str = "grpc", request_type=service.ListModelEvaluationsRequest
-):
+@pytest.mark.parametrize("request_type", [service.ListModelEvaluationsRequest, dict,])
+def test_list_model_evaluations(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -4396,10 +4434,6 @@ def test_list_model_evaluations(
assert response.next_page_token == "next_page_token_value"
-def test_list_model_evaluations_from_dict():
- test_list_model_evaluations(request_type=dict)
-
-
def test_list_model_evaluations_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -4530,8 +4564,12 @@ def test_list_model_evaluations_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
- assert args[0].filter == "filter_value"
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].filter
+ mock_val = "filter_value"
+ assert arg == mock_val
def test_list_model_evaluations_flattened_error():
@@ -4571,8 +4609,12 @@ async def test_list_model_evaluations_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
- assert args[0].filter == "filter_value"
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].filter
+ mock_val = "filter_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -4589,8 +4631,10 @@ async def test_list_model_evaluations_flattened_error_async():
)
-def test_list_model_evaluations_pager():
- client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials,)
+def test_list_model_evaluations_pager(transport_name: str = "grpc"):
+ client = AutoMlClient(
+ credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
+ )
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -4635,8 +4679,10 @@ def test_list_model_evaluations_pager():
assert all(isinstance(i, model_evaluation.ModelEvaluation) for i in results)
-def test_list_model_evaluations_pages():
- client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials,)
+def test_list_model_evaluations_pages(transport_name: str = "grpc"):
+ client = AutoMlClient(
+ credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
+ )
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -5390,7 +5436,7 @@ def test_parse_common_location_path():
assert expected == actual
-def test_client_withDEFAULT_CLIENT_INFO():
+def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
diff --git a/tests/unit/gapic/automl_v1/test_prediction_service.py b/tests/unit/gapic/automl_v1/test_prediction_service.py
index bbcfb3e4..08f6bd1f 100644
--- a/tests/unit/gapic/automl_v1/test_prediction_service.py
+++ b/tests/unit/gapic/automl_v1/test_prediction_service.py
@@ -258,20 +258,20 @@ def test_prediction_service_client_client_options(
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
- client = client_class()
+ client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
- client = client_class()
+ client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class(transport=transport_name, client_options=options)
+ client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
@@ -340,7 +340,7 @@ def test_prediction_service_client_mtls_env_auto(
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class(transport=transport_name, client_options=options)
+ client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
@@ -435,7 +435,7 @@ def test_prediction_service_client_client_options_scopes(
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class(transport=transport_name, client_options=options)
+ client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
@@ -466,7 +466,7 @@ def test_prediction_service_client_client_options_credentials_file(
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class(transport=transport_name, client_options=options)
+ client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
@@ -499,9 +499,8 @@ def test_prediction_service_client_client_options_from_dict():
)
-def test_predict(
- transport: str = "grpc", request_type=prediction_service.PredictRequest
-):
+@pytest.mark.parametrize("request_type", [prediction_service.PredictRequest, dict,])
+def test_predict(request_type, transport: str = "grpc"):
client = PredictionServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -525,10 +524,6 @@ def test_predict(
assert isinstance(response, prediction_service.PredictResponse)
-def test_predict_from_dict():
- test_predict(request_type=dict)
-
-
def test_predict_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -652,11 +647,17 @@ def test_predict_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
- assert args[0].payload == data_items.ExamplePayload(
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+ arg = args[0].payload
+ mock_val = data_items.ExamplePayload(
image=data_items.Image(image_bytes=b"image_bytes_blob")
)
- assert args[0].params == {"key_value": "value_value"}
+ assert arg == mock_val
+ arg = args[0].params
+ mock_val = {"key_value": "value_value"}
+ assert arg == mock_val
def test_predict_flattened_error():
@@ -703,11 +704,17 @@ async def test_predict_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
- assert args[0].payload == data_items.ExamplePayload(
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+ arg = args[0].payload
+ mock_val = data_items.ExamplePayload(
image=data_items.Image(image_bytes=b"image_bytes_blob")
)
- assert args[0].params == {"key_value": "value_value"}
+ assert arg == mock_val
+ arg = args[0].params
+ mock_val = {"key_value": "value_value"}
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -729,9 +736,10 @@ async def test_predict_flattened_error_async():
)
-def test_batch_predict(
- transport: str = "grpc", request_type=prediction_service.BatchPredictRequest
-):
+@pytest.mark.parametrize(
+ "request_type", [prediction_service.BatchPredictRequest, dict,]
+)
+def test_batch_predict(request_type, transport: str = "grpc"):
client = PredictionServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -755,10 +763,6 @@ def test_batch_predict(
assert isinstance(response, future.Future)
-def test_batch_predict_from_dict():
- test_batch_predict(request_type=dict)
-
-
def test_batch_predict_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -887,16 +891,24 @@ def test_batch_predict_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
- assert args[0].input_config == io.BatchPredictInputConfig(
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+ arg = args[0].input_config
+ mock_val = io.BatchPredictInputConfig(
gcs_source=io.GcsSource(input_uris=["input_uris_value"])
)
- assert args[0].output_config == io.BatchPredictOutputConfig(
+ assert arg == mock_val
+ arg = args[0].output_config
+ mock_val = io.BatchPredictOutputConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
)
)
- assert args[0].params == {"key_value": "value_value"}
+ assert arg == mock_val
+ arg = args[0].params
+ mock_val = {"key_value": "value_value"}
+ assert arg == mock_val
def test_batch_predict_flattened_error():
@@ -953,16 +965,24 @@ async def test_batch_predict_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
- assert args[0].input_config == io.BatchPredictInputConfig(
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+ arg = args[0].input_config
+ mock_val = io.BatchPredictInputConfig(
gcs_source=io.GcsSource(input_uris=["input_uris_value"])
)
- assert args[0].output_config == io.BatchPredictOutputConfig(
+ assert arg == mock_val
+ arg = args[0].output_config
+ mock_val = io.BatchPredictOutputConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
)
)
- assert args[0].params == {"key_value": "value_value"}
+ assert arg == mock_val
+ arg = args[0].params
+ mock_val = {"key_value": "value_value"}
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -1535,7 +1555,7 @@ def test_parse_common_location_path():
assert expected == actual
-def test_client_withDEFAULT_CLIENT_INFO():
+def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
diff --git a/tests/unit/gapic/automl_v1beta1/test_auto_ml.py b/tests/unit/gapic/automl_v1beta1/test_auto_ml.py
index 5be6ec51..4a36cc1f 100644
--- a/tests/unit/gapic/automl_v1beta1/test_auto_ml.py
+++ b/tests/unit/gapic/automl_v1beta1/test_auto_ml.py
@@ -254,20 +254,20 @@ def test_auto_ml_client_client_options(client_class, transport_class, transport_
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
- client = client_class()
+ client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
- client = client_class()
+ client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class(transport=transport_name, client_options=options)
+ client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
@@ -322,7 +322,7 @@ def test_auto_ml_client_mtls_env_auto(
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class(transport=transport_name, client_options=options)
+ client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
@@ -413,7 +413,7 @@ def test_auto_ml_client_client_options_scopes(
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class(transport=transport_name, client_options=options)
+ client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
@@ -440,7 +440,7 @@ def test_auto_ml_client_client_options_credentials_file(
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class(transport=transport_name, client_options=options)
+ client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
@@ -471,9 +471,8 @@ def test_auto_ml_client_client_options_from_dict():
)
-def test_create_dataset(
- transport: str = "grpc", request_type=service.CreateDatasetRequest
-):
+@pytest.mark.parametrize("request_type", [service.CreateDatasetRequest, dict,])
+def test_create_dataset(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -511,10 +510,6 @@ def test_create_dataset(
assert response.etag == "etag_value"
-def test_create_dataset_from_dict():
- test_create_dataset(request_type=dict)
-
-
def test_create_dataset_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -646,12 +641,16 @@ def test_create_dataset_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
- assert args[0].dataset == gca_dataset.Dataset(
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].dataset
+ mock_val = gca_dataset.Dataset(
translation_dataset_metadata=translation.TranslationDatasetMetadata(
source_language_code="source_language_code_value"
)
)
+ assert arg == mock_val
def test_create_dataset_flattened_error():
@@ -696,12 +695,16 @@ async def test_create_dataset_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
- assert args[0].dataset == gca_dataset.Dataset(
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].dataset
+ mock_val = gca_dataset.Dataset(
translation_dataset_metadata=translation.TranslationDatasetMetadata(
source_language_code="source_language_code_value"
)
)
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -722,7 +725,8 @@ async def test_create_dataset_flattened_error_async():
)
-def test_get_dataset(transport: str = "grpc", request_type=service.GetDatasetRequest):
+@pytest.mark.parametrize("request_type", [service.GetDatasetRequest, dict,])
+def test_get_dataset(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -760,10 +764,6 @@ def test_get_dataset(transport: str = "grpc", request_type=service.GetDatasetReq
assert response.etag == "etag_value"
-def test_get_dataset_from_dict():
- test_get_dataset(request_type=dict)
-
-
def test_get_dataset_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -888,7 +888,9 @@ def test_get_dataset_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
def test_get_dataset_flattened_error():
@@ -920,7 +922,9 @@ async def test_get_dataset_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -935,9 +939,8 @@ async def test_get_dataset_flattened_error_async():
)
-def test_list_datasets(
- transport: str = "grpc", request_type=service.ListDatasetsRequest
-):
+@pytest.mark.parametrize("request_type", [service.ListDatasetsRequest, dict,])
+def test_list_datasets(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -964,10 +967,6 @@ def test_list_datasets(
assert response.next_page_token == "next_page_token_value"
-def test_list_datasets_from_dict():
- test_list_datasets(request_type=dict)
-
-
def test_list_datasets_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -1084,7 +1083,9 @@ def test_list_datasets_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
def test_list_datasets_flattened_error():
@@ -1118,7 +1119,9 @@ async def test_list_datasets_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -1133,8 +1136,10 @@ async def test_list_datasets_flattened_error_async():
)
-def test_list_datasets_pager():
- client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials,)
+def test_list_datasets_pager(transport_name: str = "grpc"):
+ client = AutoMlClient(
+ credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
+ )
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
@@ -1167,8 +1172,10 @@ def test_list_datasets_pager():
assert all(isinstance(i, dataset.Dataset) for i in results)
-def test_list_datasets_pages():
- client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials,)
+def test_list_datasets_pages(transport_name: str = "grpc"):
+ client = AutoMlClient(
+ credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
+ )
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
@@ -1255,9 +1262,8 @@ async def test_list_datasets_async_pages():
assert page_.raw_page.next_page_token == token
-def test_update_dataset(
- transport: str = "grpc", request_type=service.UpdateDatasetRequest
-):
+@pytest.mark.parametrize("request_type", [service.UpdateDatasetRequest, dict,])
+def test_update_dataset(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -1295,10 +1301,6 @@ def test_update_dataset(
assert response.etag == "etag_value"
-def test_update_dataset_from_dict():
- test_update_dataset(request_type=dict)
-
-
def test_update_dataset_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -1433,11 +1435,13 @@ def test_update_dataset_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].dataset == gca_dataset.Dataset(
+ arg = args[0].dataset
+ mock_val = gca_dataset.Dataset(
translation_dataset_metadata=translation.TranslationDatasetMetadata(
source_language_code="source_language_code_value"
)
)
+ assert arg == mock_val
def test_update_dataset_flattened_error():
@@ -1480,11 +1484,13 @@ async def test_update_dataset_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].dataset == gca_dataset.Dataset(
+ arg = args[0].dataset
+ mock_val = gca_dataset.Dataset(
translation_dataset_metadata=translation.TranslationDatasetMetadata(
source_language_code="source_language_code_value"
)
)
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -1504,9 +1510,8 @@ async def test_update_dataset_flattened_error_async():
)
-def test_delete_dataset(
- transport: str = "grpc", request_type=service.DeleteDatasetRequest
-):
+@pytest.mark.parametrize("request_type", [service.DeleteDatasetRequest, dict,])
+def test_delete_dataset(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -1530,10 +1535,6 @@ def test_delete_dataset(
assert isinstance(response, future.Future)
-def test_delete_dataset_from_dict():
- test_delete_dataset(request_type=dict)
-
-
def test_delete_dataset_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -1649,7 +1650,9 @@ def test_delete_dataset_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
def test_delete_dataset_flattened_error():
@@ -1683,7 +1686,9 @@ async def test_delete_dataset_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -1698,7 +1703,8 @@ async def test_delete_dataset_flattened_error_async():
)
-def test_import_data(transport: str = "grpc", request_type=service.ImportDataRequest):
+@pytest.mark.parametrize("request_type", [service.ImportDataRequest, dict,])
+def test_import_data(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -1722,10 +1728,6 @@ def test_import_data(transport: str = "grpc", request_type=service.ImportDataReq
assert isinstance(response, future.Future)
-def test_import_data_from_dict():
- test_import_data(request_type=dict)
-
-
def test_import_data_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -1846,10 +1848,14 @@ def test_import_data_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
- assert args[0].input_config == io.InputConfig(
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+ arg = args[0].input_config
+ mock_val = io.InputConfig(
gcs_source=io.GcsSource(input_uris=["input_uris_value"])
)
+ assert arg == mock_val
def test_import_data_flattened_error():
@@ -1892,10 +1898,14 @@ async def test_import_data_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
- assert args[0].input_config == io.InputConfig(
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+ arg = args[0].input_config
+ mock_val = io.InputConfig(
gcs_source=io.GcsSource(input_uris=["input_uris_value"])
)
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -1914,7 +1924,8 @@ async def test_import_data_flattened_error_async():
)
-def test_export_data(transport: str = "grpc", request_type=service.ExportDataRequest):
+@pytest.mark.parametrize("request_type", [service.ExportDataRequest, dict,])
+def test_export_data(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -1938,10 +1949,6 @@ def test_export_data(transport: str = "grpc", request_type=service.ExportDataReq
assert isinstance(response, future.Future)
-def test_export_data_from_dict():
- test_export_data(request_type=dict)
-
-
def test_export_data_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -2064,12 +2071,16 @@ def test_export_data_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
- assert args[0].output_config == io.OutputConfig(
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+ arg = args[0].output_config
+ mock_val = io.OutputConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
)
)
+ assert arg == mock_val
def test_export_data_flattened_error():
@@ -2116,12 +2127,16 @@ async def test_export_data_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
- assert args[0].output_config == io.OutputConfig(
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+ arg = args[0].output_config
+ mock_val = io.OutputConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
)
)
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -2142,9 +2157,8 @@ async def test_export_data_flattened_error_async():
)
-def test_get_annotation_spec(
- transport: str = "grpc", request_type=service.GetAnnotationSpecRequest
-):
+@pytest.mark.parametrize("request_type", [service.GetAnnotationSpecRequest, dict,])
+def test_get_annotation_spec(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -2175,10 +2189,6 @@ def test_get_annotation_spec(
assert response.example_count == 1396
-def test_get_annotation_spec_from_dict():
- test_get_annotation_spec(request_type=dict)
-
-
def test_get_annotation_spec_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -2311,7 +2321,9 @@ def test_get_annotation_spec_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
def test_get_annotation_spec_flattened_error():
@@ -2347,7 +2359,9 @@ async def test_get_annotation_spec_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -2362,9 +2376,8 @@ async def test_get_annotation_spec_flattened_error_async():
)
-def test_get_table_spec(
- transport: str = "grpc", request_type=service.GetTableSpecRequest
-):
+@pytest.mark.parametrize("request_type", [service.GetTableSpecRequest, dict,])
+def test_get_table_spec(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -2401,10 +2414,6 @@ def test_get_table_spec(
assert response.etag == "etag_value"
-def test_get_table_spec_from_dict():
- test_get_table_spec(request_type=dict)
-
-
def test_get_table_spec_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -2533,7 +2542,9 @@ def test_get_table_spec_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
def test_get_table_spec_flattened_error():
@@ -2567,7 +2578,9 @@ async def test_get_table_spec_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -2582,9 +2595,8 @@ async def test_get_table_spec_flattened_error_async():
)
-def test_list_table_specs(
- transport: str = "grpc", request_type=service.ListTableSpecsRequest
-):
+@pytest.mark.parametrize("request_type", [service.ListTableSpecsRequest, dict,])
+def test_list_table_specs(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -2611,10 +2623,6 @@ def test_list_table_specs(
assert response.next_page_token == "next_page_token_value"
-def test_list_table_specs_from_dict():
- test_list_table_specs(request_type=dict)
-
-
def test_list_table_specs_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -2731,7 +2739,9 @@ def test_list_table_specs_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
def test_list_table_specs_flattened_error():
@@ -2765,7 +2775,9 @@ async def test_list_table_specs_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -2780,8 +2792,10 @@ async def test_list_table_specs_flattened_error_async():
)
-def test_list_table_specs_pager():
- client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials,)
+def test_list_table_specs_pager(transport_name: str = "grpc"):
+ client = AutoMlClient(
+ credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
+ )
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_table_specs), "__call__") as call:
@@ -2818,8 +2832,10 @@ def test_list_table_specs_pager():
assert all(isinstance(i, table_spec.TableSpec) for i in results)
-def test_list_table_specs_pages():
- client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials,)
+def test_list_table_specs_pages(transport_name: str = "grpc"):
+ client = AutoMlClient(
+ credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
+ )
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_table_specs), "__call__") as call:
@@ -2918,9 +2934,8 @@ async def test_list_table_specs_async_pages():
assert page_.raw_page.next_page_token == token
-def test_update_table_spec(
- transport: str = "grpc", request_type=service.UpdateTableSpecRequest
-):
+@pytest.mark.parametrize("request_type", [service.UpdateTableSpecRequest, dict,])
+def test_update_table_spec(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -2959,10 +2974,6 @@ def test_update_table_spec(
assert response.etag == "etag_value"
-def test_update_table_spec_from_dict():
- test_update_table_spec(request_type=dict)
-
-
def test_update_table_spec_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -3107,7 +3118,9 @@ def test_update_table_spec_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].table_spec == gca_table_spec.TableSpec(name="name_value")
+ arg = args[0].table_spec
+ mock_val = gca_table_spec.TableSpec(name="name_value")
+ assert arg == mock_val
def test_update_table_spec_flattened_error():
@@ -3146,7 +3159,9 @@ async def test_update_table_spec_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].table_spec == gca_table_spec.TableSpec(name="name_value")
+ arg = args[0].table_spec
+ mock_val = gca_table_spec.TableSpec(name="name_value")
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -3162,9 +3177,8 @@ async def test_update_table_spec_flattened_error_async():
)
-def test_get_column_spec(
- transport: str = "grpc", request_type=service.GetColumnSpecRequest
-):
+@pytest.mark.parametrize("request_type", [service.GetColumnSpecRequest, dict,])
+def test_get_column_spec(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -3193,10 +3207,6 @@ def test_get_column_spec(
assert response.etag == "etag_value"
-def test_get_column_spec_from_dict():
- test_get_column_spec(request_type=dict)
-
-
def test_get_column_spec_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -3317,7 +3327,9 @@ def test_get_column_spec_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
def test_get_column_spec_flattened_error():
@@ -3351,7 +3363,9 @@ async def test_get_column_spec_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -3366,9 +3380,8 @@ async def test_get_column_spec_flattened_error_async():
)
-def test_list_column_specs(
- transport: str = "grpc", request_type=service.ListColumnSpecsRequest
-):
+@pytest.mark.parametrize("request_type", [service.ListColumnSpecsRequest, dict,])
+def test_list_column_specs(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -3397,10 +3410,6 @@ def test_list_column_specs(
assert response.next_page_token == "next_page_token_value"
-def test_list_column_specs_from_dict():
- test_list_column_specs(request_type=dict)
-
-
def test_list_column_specs_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -3527,7 +3536,9 @@ def test_list_column_specs_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
def test_list_column_specs_flattened_error():
@@ -3563,7 +3574,9 @@ async def test_list_column_specs_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -3578,8 +3591,10 @@ async def test_list_column_specs_flattened_error_async():
)
-def test_list_column_specs_pager():
- client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials,)
+def test_list_column_specs_pager(transport_name: str = "grpc"):
+ client = AutoMlClient(
+ credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
+ )
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -3618,8 +3633,10 @@ def test_list_column_specs_pager():
assert all(isinstance(i, column_spec.ColumnSpec) for i in results)
-def test_list_column_specs_pages():
- client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials,)
+def test_list_column_specs_pages(transport_name: str = "grpc"):
+ client = AutoMlClient(
+ credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
+ )
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -3724,9 +3741,8 @@ async def test_list_column_specs_async_pages():
assert page_.raw_page.next_page_token == token
-def test_update_column_spec(
- transport: str = "grpc", request_type=service.UpdateColumnSpecRequest
-):
+@pytest.mark.parametrize("request_type", [service.UpdateColumnSpecRequest, dict,])
+def test_update_column_spec(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -3757,10 +3773,6 @@ def test_update_column_spec(
assert response.etag == "etag_value"
-def test_update_column_spec_from_dict():
- test_update_column_spec(request_type=dict)
-
-
def test_update_column_spec_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -3897,7 +3909,9 @@ def test_update_column_spec_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].column_spec == gca_column_spec.ColumnSpec(name="name_value")
+ arg = args[0].column_spec
+ mock_val = gca_column_spec.ColumnSpec(name="name_value")
+ assert arg == mock_val
def test_update_column_spec_flattened_error():
@@ -3936,7 +3950,9 @@ async def test_update_column_spec_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].column_spec == gca_column_spec.ColumnSpec(name="name_value")
+ arg = args[0].column_spec
+ mock_val = gca_column_spec.ColumnSpec(name="name_value")
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -3952,7 +3968,8 @@ async def test_update_column_spec_flattened_error_async():
)
-def test_create_model(transport: str = "grpc", request_type=service.CreateModelRequest):
+@pytest.mark.parametrize("request_type", [service.CreateModelRequest, dict,])
+def test_create_model(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -3976,10 +3993,6 @@ def test_create_model(transport: str = "grpc", request_type=service.CreateModelR
assert isinstance(response, future.Future)
-def test_create_model_from_dict():
- test_create_model(request_type=dict)
-
-
def test_create_model_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -4102,12 +4115,16 @@ def test_create_model_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
- assert args[0].model == gca_model.Model(
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].model
+ mock_val = gca_model.Model(
translation_model_metadata=translation.TranslationModelMetadata(
base_model="base_model_value"
)
)
+ assert arg == mock_val
def test_create_model_flattened_error():
@@ -4154,12 +4171,16 @@ async def test_create_model_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
- assert args[0].model == gca_model.Model(
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].model
+ mock_val = gca_model.Model(
translation_model_metadata=translation.TranslationModelMetadata(
base_model="base_model_value"
)
)
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -4180,7 +4201,8 @@ async def test_create_model_flattened_error_async():
)
-def test_get_model(transport: str = "grpc", request_type=service.GetModelRequest):
+@pytest.mark.parametrize("request_type", [service.GetModelRequest, dict,])
+def test_get_model(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -4216,10 +4238,6 @@ def test_get_model(transport: str = "grpc", request_type=service.GetModelRequest
assert response.deployment_state == model.Model.DeploymentState.DEPLOYED
-def test_get_model_from_dict():
- test_get_model(request_type=dict)
-
-
def test_get_model_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -4342,7 +4360,9 @@ def test_get_model_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
def test_get_model_flattened_error():
@@ -4374,7 +4394,9 @@ async def test_get_model_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -4389,7 +4411,8 @@ async def test_get_model_flattened_error_async():
)
-def test_list_models(transport: str = "grpc", request_type=service.ListModelsRequest):
+@pytest.mark.parametrize("request_type", [service.ListModelsRequest, dict,])
+def test_list_models(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -4416,10 +4439,6 @@ def test_list_models(transport: str = "grpc", request_type=service.ListModelsReq
assert response.next_page_token == "next_page_token_value"
-def test_list_models_from_dict():
- test_list_models(request_type=dict)
-
-
def test_list_models_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -4536,7 +4555,9 @@ def test_list_models_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
def test_list_models_flattened_error():
@@ -4570,7 +4591,9 @@ async def test_list_models_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -4585,8 +4608,10 @@ async def test_list_models_flattened_error_async():
)
-def test_list_models_pager():
- client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials,)
+def test_list_models_pager(transport_name: str = "grpc"):
+ client = AutoMlClient(
+ credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
+ )
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_models), "__call__") as call:
@@ -4615,8 +4640,10 @@ def test_list_models_pager():
assert all(isinstance(i, model.Model) for i in results)
-def test_list_models_pages():
- client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials,)
+def test_list_models_pages(transport_name: str = "grpc"):
+ client = AutoMlClient(
+ credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
+ )
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_models), "__call__") as call:
@@ -4691,7 +4718,8 @@ async def test_list_models_async_pages():
assert page_.raw_page.next_page_token == token
-def test_delete_model(transport: str = "grpc", request_type=service.DeleteModelRequest):
+@pytest.mark.parametrize("request_type", [service.DeleteModelRequest, dict,])
+def test_delete_model(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -4715,10 +4743,6 @@ def test_delete_model(transport: str = "grpc", request_type=service.DeleteModelR
assert isinstance(response, future.Future)
-def test_delete_model_from_dict():
- test_delete_model(request_type=dict)
-
-
def test_delete_model_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -4834,7 +4858,9 @@ def test_delete_model_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
def test_delete_model_flattened_error():
@@ -4868,7 +4894,9 @@ async def test_delete_model_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -4883,7 +4911,8 @@ async def test_delete_model_flattened_error_async():
)
-def test_deploy_model(transport: str = "grpc", request_type=service.DeployModelRequest):
+@pytest.mark.parametrize("request_type", [service.DeployModelRequest, dict,])
+def test_deploy_model(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -4907,10 +4936,6 @@ def test_deploy_model(transport: str = "grpc", request_type=service.DeployModelR
assert isinstance(response, future.Future)
-def test_deploy_model_from_dict():
- test_deploy_model(request_type=dict)
-
-
def test_deploy_model_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -5026,7 +5051,9 @@ def test_deploy_model_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
def test_deploy_model_flattened_error():
@@ -5060,7 +5087,9 @@ async def test_deploy_model_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -5075,9 +5104,8 @@ async def test_deploy_model_flattened_error_async():
)
-def test_undeploy_model(
- transport: str = "grpc", request_type=service.UndeployModelRequest
-):
+@pytest.mark.parametrize("request_type", [service.UndeployModelRequest, dict,])
+def test_undeploy_model(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -5101,10 +5129,6 @@ def test_undeploy_model(
assert isinstance(response, future.Future)
-def test_undeploy_model_from_dict():
- test_undeploy_model(request_type=dict)
-
-
def test_undeploy_model_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -5220,7 +5244,9 @@ def test_undeploy_model_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
def test_undeploy_model_flattened_error():
@@ -5254,7 +5280,9 @@ async def test_undeploy_model_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -5269,7 +5297,8 @@ async def test_undeploy_model_flattened_error_async():
)
-def test_export_model(transport: str = "grpc", request_type=service.ExportModelRequest):
+@pytest.mark.parametrize("request_type", [service.ExportModelRequest, dict,])
+def test_export_model(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -5293,10 +5322,6 @@ def test_export_model(transport: str = "grpc", request_type=service.ExportModelR
assert isinstance(response, future.Future)
-def test_export_model_from_dict():
- test_export_model(request_type=dict)
-
-
def test_export_model_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -5419,12 +5444,16 @@ def test_export_model_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
- assert args[0].output_config == io.ModelExportOutputConfig(
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+ arg = args[0].output_config
+ mock_val = io.ModelExportOutputConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
)
)
+ assert arg == mock_val
def test_export_model_flattened_error():
@@ -5471,12 +5500,16 @@ async def test_export_model_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
- assert args[0].output_config == io.ModelExportOutputConfig(
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+ arg = args[0].output_config
+ mock_val = io.ModelExportOutputConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
)
)
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -5497,9 +5530,10 @@ async def test_export_model_flattened_error_async():
)
-def test_export_evaluated_examples(
- transport: str = "grpc", request_type=service.ExportEvaluatedExamplesRequest
-):
+@pytest.mark.parametrize(
+ "request_type", [service.ExportEvaluatedExamplesRequest, dict,]
+)
+def test_export_evaluated_examples(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -5525,10 +5559,6 @@ def test_export_evaluated_examples(
assert isinstance(response, future.Future)
-def test_export_evaluated_examples_from_dict():
- test_export_evaluated_examples(request_type=dict)
-
-
def test_export_evaluated_examples_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -5661,10 +5691,14 @@ def test_export_evaluated_examples_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
- assert args[0].output_config == io.ExportEvaluatedExamplesOutputConfig(
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+ arg = args[0].output_config
+ mock_val = io.ExportEvaluatedExamplesOutputConfig(
bigquery_destination=io.BigQueryDestination(output_uri="output_uri_value")
)
+ assert arg == mock_val
def test_export_evaluated_examples_flattened_error():
@@ -5713,10 +5747,14 @@ async def test_export_evaluated_examples_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
- assert args[0].output_config == io.ExportEvaluatedExamplesOutputConfig(
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+ arg = args[0].output_config
+ mock_val = io.ExportEvaluatedExamplesOutputConfig(
bigquery_destination=io.BigQueryDestination(output_uri="output_uri_value")
)
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -5737,9 +5775,8 @@ async def test_export_evaluated_examples_flattened_error_async():
)
-def test_get_model_evaluation(
- transport: str = "grpc", request_type=service.GetModelEvaluationRequest
-):
+@pytest.mark.parametrize("request_type", [service.GetModelEvaluationRequest, dict,])
+def test_get_model_evaluation(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -5777,10 +5814,6 @@ def test_get_model_evaluation(
assert response.evaluated_example_count == 2446
-def test_get_model_evaluation_from_dict():
- test_get_model_evaluation(request_type=dict)
-
-
def test_get_model_evaluation_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -5915,7 +5948,9 @@ def test_get_model_evaluation_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
def test_get_model_evaluation_flattened_error():
@@ -5951,7 +5986,9 @@ async def test_get_model_evaluation_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -5966,9 +6003,8 @@ async def test_get_model_evaluation_flattened_error_async():
)
-def test_list_model_evaluations(
- transport: str = "grpc", request_type=service.ListModelEvaluationsRequest
-):
+@pytest.mark.parametrize("request_type", [service.ListModelEvaluationsRequest, dict,])
+def test_list_model_evaluations(request_type, transport: str = "grpc"):
client = AutoMlClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -5997,10 +6033,6 @@ def test_list_model_evaluations(
assert response.next_page_token == "next_page_token_value"
-def test_list_model_evaluations_from_dict():
- test_list_model_evaluations(request_type=dict)
-
-
def test_list_model_evaluations_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -6129,7 +6161,9 @@ def test_list_model_evaluations_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
def test_list_model_evaluations_flattened_error():
@@ -6165,7 +6199,9 @@ async def test_list_model_evaluations_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -6180,8 +6216,10 @@ async def test_list_model_evaluations_flattened_error_async():
)
-def test_list_model_evaluations_pager():
- client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials,)
+def test_list_model_evaluations_pager(transport_name: str = "grpc"):
+ client = AutoMlClient(
+ credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
+ )
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -6226,8 +6264,10 @@ def test_list_model_evaluations_pager():
assert all(isinstance(i, model_evaluation.ModelEvaluation) for i in results)
-def test_list_model_evaluations_pages():
- client = AutoMlClient(credentials=ga_credentials.AnonymousCredentials,)
+def test_list_model_evaluations_pages(transport_name: str = "grpc"):
+ client = AutoMlClient(
+ credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
+ )
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
@@ -7047,7 +7087,7 @@ def test_parse_common_location_path():
assert expected == actual
-def test_client_withDEFAULT_CLIENT_INFO():
+def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
diff --git a/tests/unit/gapic/automl_v1beta1/test_prediction_service.py b/tests/unit/gapic/automl_v1beta1/test_prediction_service.py
index 8dfca224..7032ef5e 100644
--- a/tests/unit/gapic/automl_v1beta1/test_prediction_service.py
+++ b/tests/unit/gapic/automl_v1beta1/test_prediction_service.py
@@ -261,20 +261,20 @@ def test_prediction_service_client_client_options(
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
- client = client_class()
+ client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
- client = client_class()
+ client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class(transport=transport_name, client_options=options)
+ client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
@@ -343,7 +343,7 @@ def test_prediction_service_client_mtls_env_auto(
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class(transport=transport_name, client_options=options)
+ client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
@@ -438,7 +438,7 @@ def test_prediction_service_client_client_options_scopes(
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class(transport=transport_name, client_options=options)
+ client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
@@ -469,7 +469,7 @@ def test_prediction_service_client_client_options_credentials_file(
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class(transport=transport_name, client_options=options)
+ client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
@@ -502,9 +502,8 @@ def test_prediction_service_client_client_options_from_dict():
)
-def test_predict(
- transport: str = "grpc", request_type=prediction_service.PredictRequest
-):
+@pytest.mark.parametrize("request_type", [prediction_service.PredictRequest, dict,])
+def test_predict(request_type, transport: str = "grpc"):
client = PredictionServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -528,10 +527,6 @@ def test_predict(
assert isinstance(response, prediction_service.PredictResponse)
-def test_predict_from_dict():
- test_predict(request_type=dict)
-
-
def test_predict_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -655,11 +650,17 @@ def test_predict_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
- assert args[0].payload == data_items.ExamplePayload(
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+ arg = args[0].payload
+ mock_val = data_items.ExamplePayload(
image=data_items.Image(image_bytes=b"image_bytes_blob")
)
- assert args[0].params == {"key_value": "value_value"}
+ assert arg == mock_val
+ arg = args[0].params
+ mock_val = {"key_value": "value_value"}
+ assert arg == mock_val
def test_predict_flattened_error():
@@ -706,11 +707,17 @@ async def test_predict_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
- assert args[0].payload == data_items.ExamplePayload(
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+ arg = args[0].payload
+ mock_val = data_items.ExamplePayload(
image=data_items.Image(image_bytes=b"image_bytes_blob")
)
- assert args[0].params == {"key_value": "value_value"}
+ assert arg == mock_val
+ arg = args[0].params
+ mock_val = {"key_value": "value_value"}
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -732,9 +739,10 @@ async def test_predict_flattened_error_async():
)
-def test_batch_predict(
- transport: str = "grpc", request_type=prediction_service.BatchPredictRequest
-):
+@pytest.mark.parametrize(
+ "request_type", [prediction_service.BatchPredictRequest, dict,]
+)
+def test_batch_predict(request_type, transport: str = "grpc"):
client = PredictionServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -758,10 +766,6 @@ def test_batch_predict(
assert isinstance(response, future.Future)
-def test_batch_predict_from_dict():
- test_batch_predict(request_type=dict)
-
-
def test_batch_predict_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -890,16 +894,24 @@ def test_batch_predict_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
- assert args[0].input_config == io.BatchPredictInputConfig(
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+ arg = args[0].input_config
+ mock_val = io.BatchPredictInputConfig(
gcs_source=io.GcsSource(input_uris=["input_uris_value"])
)
- assert args[0].output_config == io.BatchPredictOutputConfig(
+ assert arg == mock_val
+ arg = args[0].output_config
+ mock_val = io.BatchPredictOutputConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
)
)
- assert args[0].params == {"key_value": "value_value"}
+ assert arg == mock_val
+ arg = args[0].params
+ mock_val = {"key_value": "value_value"}
+ assert arg == mock_val
def test_batch_predict_flattened_error():
@@ -956,16 +968,24 @@ async def test_batch_predict_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
- assert args[0].input_config == io.BatchPredictInputConfig(
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+ arg = args[0].input_config
+ mock_val = io.BatchPredictInputConfig(
gcs_source=io.GcsSource(input_uris=["input_uris_value"])
)
- assert args[0].output_config == io.BatchPredictOutputConfig(
+ assert arg == mock_val
+ arg = args[0].output_config
+ mock_val = io.BatchPredictOutputConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
)
)
- assert args[0].params == {"key_value": "value_value"}
+ assert arg == mock_val
+ arg = args[0].params
+ mock_val = {"key_value": "value_value"}
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -1538,7 +1558,7 @@ def test_parse_common_location_path():
assert expected == actual
-def test_client_withDEFAULT_CLIENT_INFO():
+def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(