internalGetLabels() {
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfig.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfig.java
index 3a1a0f35..1d78a9f8 100644
--- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfig.java
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfig.java
@@ -275,22 +275,6 @@ private ClusterConfig(
metastoreConfig_ = subBuilder.buildPartial();
}
- break;
- }
- case 170:
- {
- com.google.cloud.dataproc.v1.GkeClusterConfig.Builder subBuilder = null;
- if (gkeClusterConfig_ != null) {
- subBuilder = gkeClusterConfig_.toBuilder();
- }
- gkeClusterConfig_ =
- input.readMessage(
- com.google.cloud.dataproc.v1.GkeClusterConfig.parser(), extensionRegistry);
- if (subBuilder != null) {
- subBuilder.mergeFrom(gkeClusterConfig_);
- gkeClusterConfig_ = subBuilder.buildPartial();
- }
-
break;
}
default:
@@ -1219,69 +1203,6 @@ public com.google.cloud.dataproc.v1.MetastoreConfigOrBuilder getMetastoreConfigO
return getMetastoreConfig();
}
- public static final int GKE_CLUSTER_CONFIG_FIELD_NUMBER = 21;
- private com.google.cloud.dataproc.v1.GkeClusterConfig gkeClusterConfig_;
- /**
- *
- *
- *
- * Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to
- * Kubernetes. Setting this is considered mutually exclusive with Compute
- * Engine-based options such as `gce_cluster_config`, `master_config`,
- * `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
- *
- *
- *
- * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 21 [(.google.api.field_behavior) = OPTIONAL];
- *
- *
- * @return Whether the gkeClusterConfig field is set.
- */
- @java.lang.Override
- public boolean hasGkeClusterConfig() {
- return gkeClusterConfig_ != null;
- }
- /**
- *
- *
- *
- * Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to
- * Kubernetes. Setting this is considered mutually exclusive with Compute
- * Engine-based options such as `gce_cluster_config`, `master_config`,
- * `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
- *
- *
- *
- * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 21 [(.google.api.field_behavior) = OPTIONAL];
- *
- *
- * @return The gkeClusterConfig.
- */
- @java.lang.Override
- public com.google.cloud.dataproc.v1.GkeClusterConfig getGkeClusterConfig() {
- return gkeClusterConfig_ == null
- ? com.google.cloud.dataproc.v1.GkeClusterConfig.getDefaultInstance()
- : gkeClusterConfig_;
- }
- /**
- *
- *
- *
- * Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to
- * Kubernetes. Setting this is considered mutually exclusive with Compute
- * Engine-based options such as `gce_cluster_config`, `master_config`,
- * `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
- *
- *
- *
- * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 21 [(.google.api.field_behavior) = OPTIONAL];
- *
- */
- @java.lang.Override
- public com.google.cloud.dataproc.v1.GkeClusterConfigOrBuilder getGkeClusterConfigOrBuilder() {
- return getGkeClusterConfig();
- }
-
private byte memoizedIsInitialized = -1;
@java.lang.Override
@@ -1338,9 +1259,6 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io
if (metastoreConfig_ != null) {
output.writeMessage(20, getMetastoreConfig());
}
- if (gkeClusterConfig_ != null) {
- output.writeMessage(21, getGkeClusterConfig());
- }
unknownFields.writeTo(output);
}
@@ -1395,9 +1313,6 @@ public int getSerializedSize() {
if (metastoreConfig_ != null) {
size += com.google.protobuf.CodedOutputStream.computeMessageSize(20, getMetastoreConfig());
}
- if (gkeClusterConfig_ != null) {
- size += com.google.protobuf.CodedOutputStream.computeMessageSize(21, getGkeClusterConfig());
- }
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
@@ -1461,10 +1376,6 @@ public boolean equals(final java.lang.Object obj) {
if (hasMetastoreConfig()) {
if (!getMetastoreConfig().equals(other.getMetastoreConfig())) return false;
}
- if (hasGkeClusterConfig() != other.hasGkeClusterConfig()) return false;
- if (hasGkeClusterConfig()) {
- if (!getGkeClusterConfig().equals(other.getGkeClusterConfig())) return false;
- }
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@@ -1528,10 +1439,6 @@ public int hashCode() {
hash = (37 * hash) + METASTORE_CONFIG_FIELD_NUMBER;
hash = (53 * hash) + getMetastoreConfig().hashCode();
}
- if (hasGkeClusterConfig()) {
- hash = (37 * hash) + GKE_CLUSTER_CONFIG_FIELD_NUMBER;
- hash = (53 * hash) + getGkeClusterConfig().hashCode();
- }
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
@@ -1755,12 +1662,6 @@ public Builder clear() {
metastoreConfig_ = null;
metastoreConfigBuilder_ = null;
}
- if (gkeClusterConfigBuilder_ == null) {
- gkeClusterConfig_ = null;
- } else {
- gkeClusterConfig_ = null;
- gkeClusterConfigBuilder_ = null;
- }
return this;
}
@@ -1855,11 +1756,6 @@ public com.google.cloud.dataproc.v1.ClusterConfig buildPartial() {
} else {
result.metastoreConfig_ = metastoreConfigBuilder_.build();
}
- if (gkeClusterConfigBuilder_ == null) {
- result.gkeClusterConfig_ = gkeClusterConfig_;
- } else {
- result.gkeClusterConfig_ = gkeClusterConfigBuilder_.build();
- }
onBuilt();
return result;
}
@@ -1977,9 +1873,6 @@ public Builder mergeFrom(com.google.cloud.dataproc.v1.ClusterConfig other) {
if (other.hasMetastoreConfig()) {
mergeMetastoreConfig(other.getMetastoreConfig());
}
- if (other.hasGkeClusterConfig()) {
- mergeGkeClusterConfig(other.getGkeClusterConfig());
- }
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
@@ -5226,237 +5119,6 @@ public com.google.cloud.dataproc.v1.MetastoreConfigOrBuilder getMetastoreConfigO
return metastoreConfigBuilder_;
}
- private com.google.cloud.dataproc.v1.GkeClusterConfig gkeClusterConfig_;
- private com.google.protobuf.SingleFieldBuilderV3<
- com.google.cloud.dataproc.v1.GkeClusterConfig,
- com.google.cloud.dataproc.v1.GkeClusterConfig.Builder,
- com.google.cloud.dataproc.v1.GkeClusterConfigOrBuilder>
- gkeClusterConfigBuilder_;
- /**
- *
- *
- *
- * Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to
- * Kubernetes. Setting this is considered mutually exclusive with Compute
- * Engine-based options such as `gce_cluster_config`, `master_config`,
- * `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
- *
- *
- *
- * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 21 [(.google.api.field_behavior) = OPTIONAL];
- *
- *
- * @return Whether the gkeClusterConfig field is set.
- */
- public boolean hasGkeClusterConfig() {
- return gkeClusterConfigBuilder_ != null || gkeClusterConfig_ != null;
- }
- /**
- *
- *
- *
- * Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to
- * Kubernetes. Setting this is considered mutually exclusive with Compute
- * Engine-based options such as `gce_cluster_config`, `master_config`,
- * `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
- *
- *
- *
- * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 21 [(.google.api.field_behavior) = OPTIONAL];
- *
- *
- * @return The gkeClusterConfig.
- */
- public com.google.cloud.dataproc.v1.GkeClusterConfig getGkeClusterConfig() {
- if (gkeClusterConfigBuilder_ == null) {
- return gkeClusterConfig_ == null
- ? com.google.cloud.dataproc.v1.GkeClusterConfig.getDefaultInstance()
- : gkeClusterConfig_;
- } else {
- return gkeClusterConfigBuilder_.getMessage();
- }
- }
- /**
- *
- *
- *
- * Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to
- * Kubernetes. Setting this is considered mutually exclusive with Compute
- * Engine-based options such as `gce_cluster_config`, `master_config`,
- * `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
- *
- *
- *
- * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 21 [(.google.api.field_behavior) = OPTIONAL];
- *
- */
- public Builder setGkeClusterConfig(com.google.cloud.dataproc.v1.GkeClusterConfig value) {
- if (gkeClusterConfigBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- gkeClusterConfig_ = value;
- onChanged();
- } else {
- gkeClusterConfigBuilder_.setMessage(value);
- }
-
- return this;
- }
- /**
- *
- *
- *
- * Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to
- * Kubernetes. Setting this is considered mutually exclusive with Compute
- * Engine-based options such as `gce_cluster_config`, `master_config`,
- * `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
- *
- *
- *
- * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 21 [(.google.api.field_behavior) = OPTIONAL];
- *
- */
- public Builder setGkeClusterConfig(
- com.google.cloud.dataproc.v1.GkeClusterConfig.Builder builderForValue) {
- if (gkeClusterConfigBuilder_ == null) {
- gkeClusterConfig_ = builderForValue.build();
- onChanged();
- } else {
- gkeClusterConfigBuilder_.setMessage(builderForValue.build());
- }
-
- return this;
- }
- /**
- *
- *
- *
- * Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to
- * Kubernetes. Setting this is considered mutually exclusive with Compute
- * Engine-based options such as `gce_cluster_config`, `master_config`,
- * `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
- *
- *
- *
- * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 21 [(.google.api.field_behavior) = OPTIONAL];
- *
- */
- public Builder mergeGkeClusterConfig(com.google.cloud.dataproc.v1.GkeClusterConfig value) {
- if (gkeClusterConfigBuilder_ == null) {
- if (gkeClusterConfig_ != null) {
- gkeClusterConfig_ =
- com.google.cloud.dataproc.v1.GkeClusterConfig.newBuilder(gkeClusterConfig_)
- .mergeFrom(value)
- .buildPartial();
- } else {
- gkeClusterConfig_ = value;
- }
- onChanged();
- } else {
- gkeClusterConfigBuilder_.mergeFrom(value);
- }
-
- return this;
- }
- /**
- *
- *
- *
- * Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to
- * Kubernetes. Setting this is considered mutually exclusive with Compute
- * Engine-based options such as `gce_cluster_config`, `master_config`,
- * `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
- *
- *
- *
- * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 21 [(.google.api.field_behavior) = OPTIONAL];
- *
- */
- public Builder clearGkeClusterConfig() {
- if (gkeClusterConfigBuilder_ == null) {
- gkeClusterConfig_ = null;
- onChanged();
- } else {
- gkeClusterConfig_ = null;
- gkeClusterConfigBuilder_ = null;
- }
-
- return this;
- }
- /**
- *
- *
- *
- * Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to
- * Kubernetes. Setting this is considered mutually exclusive with Compute
- * Engine-based options such as `gce_cluster_config`, `master_config`,
- * `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
- *
- *
- *
- * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 21 [(.google.api.field_behavior) = OPTIONAL];
- *
- */
- public com.google.cloud.dataproc.v1.GkeClusterConfig.Builder getGkeClusterConfigBuilder() {
-
- onChanged();
- return getGkeClusterConfigFieldBuilder().getBuilder();
- }
- /**
- *
- *
- *
- * Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to
- * Kubernetes. Setting this is considered mutually exclusive with Compute
- * Engine-based options such as `gce_cluster_config`, `master_config`,
- * `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
- *
- *
- *
- * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 21 [(.google.api.field_behavior) = OPTIONAL];
- *
- */
- public com.google.cloud.dataproc.v1.GkeClusterConfigOrBuilder getGkeClusterConfigOrBuilder() {
- if (gkeClusterConfigBuilder_ != null) {
- return gkeClusterConfigBuilder_.getMessageOrBuilder();
- } else {
- return gkeClusterConfig_ == null
- ? com.google.cloud.dataproc.v1.GkeClusterConfig.getDefaultInstance()
- : gkeClusterConfig_;
- }
- }
- /**
- *
- *
- *
- * Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to
- * Kubernetes. Setting this is considered mutually exclusive with Compute
- * Engine-based options such as `gce_cluster_config`, `master_config`,
- * `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
- *
- *
- *
- * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 21 [(.google.api.field_behavior) = OPTIONAL];
- *
- */
- private com.google.protobuf.SingleFieldBuilderV3<
- com.google.cloud.dataproc.v1.GkeClusterConfig,
- com.google.cloud.dataproc.v1.GkeClusterConfig.Builder,
- com.google.cloud.dataproc.v1.GkeClusterConfigOrBuilder>
- getGkeClusterConfigFieldBuilder() {
- if (gkeClusterConfigBuilder_ == null) {
- gkeClusterConfigBuilder_ =
- new com.google.protobuf.SingleFieldBuilderV3<
- com.google.cloud.dataproc.v1.GkeClusterConfig,
- com.google.cloud.dataproc.v1.GkeClusterConfig.Builder,
- com.google.cloud.dataproc.v1.GkeClusterConfigOrBuilder>(
- getGkeClusterConfig(), getParentForChildren(), isClean());
- gkeClusterConfig_ = null;
- }
- return gkeClusterConfigBuilder_;
- }
-
@java.lang.Override
public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfigOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfigOrBuilder.java
index 9a4a0c35..848764d7 100644
--- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfigOrBuilder.java
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfigOrBuilder.java
@@ -701,54 +701,4 @@ com.google.cloud.dataproc.v1.NodeInitializationActionOrBuilder getInitialization
*
*/
com.google.cloud.dataproc.v1.MetastoreConfigOrBuilder getMetastoreConfigOrBuilder();
-
- /**
- *
- *
- *
- * Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to
- * Kubernetes. Setting this is considered mutually exclusive with Compute
- * Engine-based options such as `gce_cluster_config`, `master_config`,
- * `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
- *
- *
- *
- * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 21 [(.google.api.field_behavior) = OPTIONAL];
- *
- *
- * @return Whether the gkeClusterConfig field is set.
- */
- boolean hasGkeClusterConfig();
- /**
- *
- *
- *
- * Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to
- * Kubernetes. Setting this is considered mutually exclusive with Compute
- * Engine-based options such as `gce_cluster_config`, `master_config`,
- * `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
- *
- *
- *
- * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 21 [(.google.api.field_behavior) = OPTIONAL];
- *
- *
- * @return The gkeClusterConfig.
- */
- com.google.cloud.dataproc.v1.GkeClusterConfig getGkeClusterConfig();
- /**
- *
- *
- *
- * Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to
- * Kubernetes. Setting this is considered mutually exclusive with Compute
- * Engine-based options such as `gce_cluster_config`, `master_config`,
- * `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
- *
- *
- *
- * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 21 [(.google.api.field_behavior) = OPTIONAL];
- *
- */
- com.google.cloud.dataproc.v1.GkeClusterConfigOrBuilder getGkeClusterConfigOrBuilder();
}
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterOrBuilder.java
index adffcd38..88fb3d88 100644
--- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterOrBuilder.java
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterOrBuilder.java
@@ -122,6 +122,65 @@ public interface ClusterOrBuilder
*/
com.google.cloud.dataproc.v1.ClusterConfigOrBuilder getConfigOrBuilder();
+ /**
+ *
+ *
+ *
+ * Optional. The virtual cluster config, used when creating a Dataproc cluster that
+ * does not directly control the underlying compute resources, for example,
+ * when creating a [Dataproc-on-GKE
+ * cluster](https://cloud.google.com/dataproc/docs/concepts/jobs/dataproc-gke#create-a-dataproc-on-gke-cluster).
+ * Note that Dataproc may set default values, and values may change when
+ * clusters are updated. Exactly one of config or virtualClusterConfig must be
+ * specified.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.VirtualClusterConfig virtual_cluster_config = 10 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return Whether the virtualClusterConfig field is set.
+ */
+ boolean hasVirtualClusterConfig();
+ /**
+ *
+ *
+ *
+ * Optional. The virtual cluster config, used when creating a Dataproc cluster that
+ * does not directly control the underlying compute resources, for example,
+ * when creating a [Dataproc-on-GKE
+ * cluster](https://cloud.google.com/dataproc/docs/concepts/jobs/dataproc-gke#create-a-dataproc-on-gke-cluster).
+ * Note that Dataproc may set default values, and values may change when
+ * clusters are updated. Exactly one of config or virtualClusterConfig must be
+ * specified.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.VirtualClusterConfig virtual_cluster_config = 10 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return The virtualClusterConfig.
+ */
+ com.google.cloud.dataproc.v1.VirtualClusterConfig getVirtualClusterConfig();
+ /**
+ *
+ *
+ *
+ * Optional. The virtual cluster config, used when creating a Dataproc cluster that
+ * does not directly control the underlying compute resources, for example,
+ * when creating a [Dataproc-on-GKE
+ * cluster](https://cloud.google.com/dataproc/docs/concepts/jobs/dataproc-gke#create-a-dataproc-on-gke-cluster).
+ * Note that Dataproc may set default values, and values may change when
+ * clusters are updated. Exactly one of config or virtualClusterConfig must be
+ * specified.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.VirtualClusterConfig virtual_cluster_config = 10 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ com.google.cloud.dataproc.v1.VirtualClusterConfigOrBuilder getVirtualClusterConfigOrBuilder();
+
/**
*
*
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterStatus.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterStatus.java
index 19087502..5ba77c15 100644
--- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterStatus.java
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterStatus.java
@@ -177,6 +177,9 @@ public enum State implements com.google.protobuf.ProtocolMessageEnum {
*
*
* The cluster is currently running and healthy. It is ready for use.
+ * **Note:** The cluster state changes from "creating" to "running" status
+ * after the master node(s), first two primary worker nodes (and the last
+ * primary worker node if primary workers > 2) are running.
*
*
* RUNNING = 2;
@@ -281,6 +284,9 @@ public enum State implements com.google.protobuf.ProtocolMessageEnum {
*
*
* The cluster is currently running and healthy. It is ready for use.
+ * **Note:** The cluster state changes from "creating" to "running" status
+ * after the master node(s), first two primary worker nodes (and the last
+ * primary worker node if primary workers > 2) are running.
*
*
* RUNNING = 2;
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClustersProto.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClustersProto.java
index 309910fd..17f1cf5a 100644
--- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClustersProto.java
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClustersProto.java
@@ -40,13 +40,13 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r
static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_google_cloud_dataproc_v1_ClusterConfig_fieldAccessorTable;
static final com.google.protobuf.Descriptors.Descriptor
- internal_static_google_cloud_dataproc_v1_GkeClusterConfig_descriptor;
+ internal_static_google_cloud_dataproc_v1_VirtualClusterConfig_descriptor;
static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
- internal_static_google_cloud_dataproc_v1_GkeClusterConfig_fieldAccessorTable;
+ internal_static_google_cloud_dataproc_v1_VirtualClusterConfig_fieldAccessorTable;
static final com.google.protobuf.Descriptors.Descriptor
- internal_static_google_cloud_dataproc_v1_GkeClusterConfig_NamespacedGkeDeploymentTarget_descriptor;
+ internal_static_google_cloud_dataproc_v1_AuxiliaryServicesConfig_descriptor;
static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
- internal_static_google_cloud_dataproc_v1_GkeClusterConfig_NamespacedGkeDeploymentTarget_fieldAccessorTable;
+ internal_static_google_cloud_dataproc_v1_AuxiliaryServicesConfig_fieldAccessorTable;
static final com.google.protobuf.Descriptors.Descriptor
internal_static_google_cloud_dataproc_v1_EndpointConfig_descriptor;
static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
@@ -212,276 +212,282 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
+ "aproc/v1/shared.proto\032#google/longrunnin"
+ "g/operations.proto\032\036google/protobuf/dura"
+ "tion.proto\032 google/protobuf/field_mask.p"
- + "roto\032\037google/protobuf/timestamp.proto\"\315\003"
+ + "roto\032\037google/protobuf/timestamp.proto\"\242\004"
+ "\n\007Cluster\022\027\n\nproject_id\030\001 \001(\tB\003\340A\002\022\031\n\014cl"
+ "uster_name\030\002 \001(\tB\003\340A\002\022<\n\006config\030\003 \001(\0132\'."
+ "google.cloud.dataproc.v1.ClusterConfigB\003"
- + "\340A\001\022B\n\006labels\030\010 \003(\0132-.google.cloud.datap"
- + "roc.v1.Cluster.LabelsEntryB\003\340A\001\022<\n\006statu"
- + "s\030\004 \001(\0132\'.google.cloud.dataproc.v1.Clust"
- + "erStatusB\003\340A\003\022D\n\016status_history\030\007 \003(\0132\'."
- + "google.cloud.dataproc.v1.ClusterStatusB\003"
- + "\340A\003\022\031\n\014cluster_uuid\030\006 \001(\tB\003\340A\003\022>\n\007metric"
- + "s\030\t \001(\0132(.google.cloud.dataproc.v1.Clust"
- + "erMetricsB\003\340A\003\032-\n\013LabelsEntry\022\013\n\003key\030\001 \001"
- + "(\t\022\r\n\005value\030\002 \001(\t:\0028\001\"\251\010\n\rClusterConfig\022"
- + "\032\n\rconfig_bucket\030\001 \001(\tB\003\340A\001\022\030\n\013temp_buck"
- + "et\030\002 \001(\tB\003\340A\001\022K\n\022gce_cluster_config\030\010 \001("
- + "\0132*.google.cloud.dataproc.v1.GceClusterC"
- + "onfigB\003\340A\001\022I\n\rmaster_config\030\t \001(\0132-.goog"
+ + "\340A\001\022S\n\026virtual_cluster_config\030\n \001(\0132..go"
+ + "ogle.cloud.dataproc.v1.VirtualClusterCon"
+ + "figB\003\340A\001\022B\n\006labels\030\010 \003(\0132-.google.cloud."
+ + "dataproc.v1.Cluster.LabelsEntryB\003\340A\001\022<\n\006"
+ + "status\030\004 \001(\0132\'.google.cloud.dataproc.v1."
+ + "ClusterStatusB\003\340A\003\022D\n\016status_history\030\007 \003"
+ + "(\0132\'.google.cloud.dataproc.v1.ClusterSta"
+ + "tusB\003\340A\003\022\031\n\014cluster_uuid\030\006 \001(\tB\003\340A\003\022>\n\007m"
+ + "etrics\030\t \001(\0132(.google.cloud.dataproc.v1."
+ + "ClusterMetricsB\003\340A\003\032-\n\013LabelsEntry\022\013\n\003ke"
+ + "y\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\"\334\007\n\rClusterCo"
+ + "nfig\022\032\n\rconfig_bucket\030\001 \001(\tB\003\340A\001\022\030\n\013temp"
+ + "_bucket\030\002 \001(\tB\003\340A\001\022K\n\022gce_cluster_config"
+ + "\030\010 \001(\0132*.google.cloud.dataproc.v1.GceClu"
+ + "sterConfigB\003\340A\001\022I\n\rmaster_config\030\t \001(\0132-"
+ + ".google.cloud.dataproc.v1.InstanceGroupC"
+ + "onfigB\003\340A\001\022I\n\rworker_config\030\n \001(\0132-.goog"
+ "le.cloud.dataproc.v1.InstanceGroupConfig"
- + "B\003\340A\001\022I\n\rworker_config\030\n \001(\0132-.google.cl"
- + "oud.dataproc.v1.InstanceGroupConfigB\003\340A\001"
- + "\022S\n\027secondary_worker_config\030\014 \001(\0132-.goog"
- + "le.cloud.dataproc.v1.InstanceGroupConfig"
- + "B\003\340A\001\022F\n\017software_config\030\r \001(\0132(.google."
- + "cloud.dataproc.v1.SoftwareConfigB\003\340A\001\022W\n"
- + "\026initialization_actions\030\013 \003(\01322.google.c"
- + "loud.dataproc.v1.NodeInitializationActio"
- + "nB\003\340A\001\022J\n\021encryption_config\030\017 \001(\0132*.goog"
- + "le.cloud.dataproc.v1.EncryptionConfigB\003\340"
- + "A\001\022L\n\022autoscaling_config\030\022 \001(\0132+.google."
- + "cloud.dataproc.v1.AutoscalingConfigB\003\340A\001"
- + "\022F\n\017security_config\030\020 \001(\0132(.google.cloud"
- + ".dataproc.v1.SecurityConfigB\003\340A\001\022H\n\020life"
- + "cycle_config\030\021 \001(\0132).google.cloud.datapr"
- + "oc.v1.LifecycleConfigB\003\340A\001\022F\n\017endpoint_c"
- + "onfig\030\023 \001(\0132(.google.cloud.dataproc.v1.E"
- + "ndpointConfigB\003\340A\001\022H\n\020metastore_config\030\024"
- + " \001(\0132).google.cloud.dataproc.v1.Metastor"
- + "eConfigB\003\340A\001\022K\n\022gke_cluster_config\030\025 \001(\013"
- + "2*.google.cloud.dataproc.v1.GkeClusterCo"
- + "nfigB\003\340A\001\"\223\002\n\020GkeClusterConfig\022w\n namesp"
- + "aced_gke_deployment_target\030\001 \001(\0132H.googl"
- + "e.cloud.dataproc.v1.GkeClusterConfig.Nam"
- + "espacedGkeDeploymentTargetB\003\340A\001\032\205\001\n\035Name"
- + "spacedGkeDeploymentTarget\022D\n\022target_gke_"
- + "cluster\030\001 \001(\tB(\340A\001\372A\"\n container.googlea"
- + "pis.com/Cluster\022\036\n\021cluster_namespace\030\002 \001"
- + "(\tB\003\340A\001\"\272\001\n\016EndpointConfig\022P\n\nhttp_ports"
- + "\030\001 \003(\01327.google.cloud.dataproc.v1.Endpoi"
- + "ntConfig.HttpPortsEntryB\003\340A\003\022$\n\027enable_h"
- + "ttp_port_access\030\002 \001(\010B\003\340A\001\0320\n\016HttpPortsE"
- + "ntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\",\n\021"
- + "AutoscalingConfig\022\027\n\npolicy_uri\030\001 \001(\tB\003\340"
- + "A\001\"4\n\020EncryptionConfig\022 \n\023gce_pd_kms_key"
- + "_name\030\001 \001(\tB\003\340A\001\"\233\007\n\020GceClusterConfig\022\025\n"
- + "\010zone_uri\030\001 \001(\tB\003\340A\001\022\030\n\013network_uri\030\002 \001("
- + "\tB\003\340A\001\022\033\n\016subnetwork_uri\030\006 \001(\tB\003\340A\001\022\035\n\020i"
- + "nternal_ip_only\030\007 \001(\010B\003\340A\001\022k\n\032private_ip"
- + "v6_google_access\030\014 \001(\0162B.google.cloud.da"
- + "taproc.v1.GceClusterConfig.PrivateIpv6Go"
- + "ogleAccessB\003\340A\001\022\034\n\017service_account\030\010 \001(\t"
- + "B\003\340A\001\022#\n\026service_account_scopes\030\003 \003(\tB\003\340"
- + "A\001\022\014\n\004tags\030\004 \003(\t\022J\n\010metadata\030\005 \003(\01328.goo"
- + "gle.cloud.dataproc.v1.GceClusterConfig.M"
- + "etadataEntry\022P\n\024reservation_affinity\030\013 \001"
- + "(\0132-.google.cloud.dataproc.v1.Reservatio"
- + "nAffinityB\003\340A\001\022M\n\023node_group_affinity\030\r "
- + "\001(\0132+.google.cloud.dataproc.v1.NodeGroup"
- + "AffinityB\003\340A\001\022W\n\030shielded_instance_confi"
- + "g\030\016 \001(\01320.google.cloud.dataproc.v1.Shiel"
- + "dedInstanceConfigB\003\340A\001\022_\n\034confidential_i"
- + "nstance_config\030\017 \001(\01324.google.cloud.data"
- + "proc.v1.ConfidentialInstanceConfigB\003\340A\001\032"
- + "/\n\rMetadataEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002"
- + " \001(\t:\0028\001\"\203\001\n\027PrivateIpv6GoogleAccess\022*\n&"
- + "PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED\020\000"
- + "\022\033\n\027INHERIT_FROM_SUBNETWORK\020\001\022\014\n\010OUTBOUN"
- + "D\020\002\022\021\n\rBIDIRECTIONAL\020\003\"0\n\021NodeGroupAffin"
- + "ity\022\033\n\016node_group_uri\030\001 \001(\tB\003\340A\002\"}\n\026Shie"
- + "ldedInstanceConfig\022\037\n\022enable_secure_boot"
- + "\030\001 \001(\010B\003\340A\001\022\030\n\013enable_vtpm\030\002 \001(\010B\003\340A\001\022(\n"
- + "\033enable_integrity_monitoring\030\003 \001(\010B\003\340A\001\""
- + "F\n\032ConfidentialInstanceConfig\022(\n\033enable_"
- + "confidential_compute\030\001 \001(\010B\003\340A\001\"\315\004\n\023Inst"
- + "anceGroupConfig\022\032\n\rnum_instances\030\001 \001(\005B\003"
- + "\340A\001\022\033\n\016instance_names\030\002 \003(\tB\003\340A\003\022\026\n\timag"
- + "e_uri\030\003 \001(\tB\003\340A\001\022\035\n\020machine_type_uri\030\004 \001"
- + "(\tB\003\340A\001\022>\n\013disk_config\030\005 \001(\0132$.google.cl"
- + "oud.dataproc.v1.DiskConfigB\003\340A\001\022\033\n\016is_pr"
- + "eemptible\030\006 \001(\010B\003\340A\003\022Y\n\016preemptibility\030\n"
- + " \001(\0162<.google.cloud.dataproc.v1.Instance"
- + "GroupConfig.PreemptibilityB\003\340A\001\022O\n\024manag"
- + "ed_group_config\030\007 \001(\0132,.google.cloud.dat"
- + "aproc.v1.ManagedGroupConfigB\003\340A\003\022F\n\014acce"
- + "lerators\030\010 \003(\0132+.google.cloud.dataproc.v"
- + "1.AcceleratorConfigB\003\340A\001\022\035\n\020min_cpu_plat"
- + "form\030\t \001(\tB\003\340A\001\"V\n\016Preemptibility\022\036\n\032PRE"
- + "EMPTIBILITY_UNSPECIFIED\020\000\022\023\n\017NON_PREEMPT"
- + "IBLE\020\001\022\017\n\013PREEMPTIBLE\020\002\"c\n\022ManagedGroupC"
- + "onfig\022#\n\026instance_template_name\030\001 \001(\tB\003\340"
- + "A\003\022(\n\033instance_group_manager_name\030\002 \001(\tB"
- + "\003\340A\003\"L\n\021AcceleratorConfig\022\034\n\024accelerator"
- + "_type_uri\030\001 \001(\t\022\031\n\021accelerator_count\030\002 \001"
- + "(\005\"\210\001\n\nDiskConfig\022\033\n\016boot_disk_type\030\003 \001("
- + "\tB\003\340A\001\022\036\n\021boot_disk_size_gb\030\001 \001(\005B\003\340A\001\022\033"
- + "\n\016num_local_ssds\030\002 \001(\005B\003\340A\001\022 \n\023local_ssd"
- + "_interface\030\004 \001(\tB\003\340A\001\"s\n\030NodeInitializat"
- + "ionAction\022\034\n\017executable_file\030\001 \001(\tB\003\340A\002\022"
- + "9\n\021execution_timeout\030\002 \001(\0132\031.google.prot"
- + "obuf.DurationB\003\340A\001\"\307\003\n\rClusterStatus\022A\n\005"
- + "state\030\001 \001(\0162-.google.cloud.dataproc.v1.C"
- + "lusterStatus.StateB\003\340A\003\022\026\n\006detail\030\002 \001(\tB"
- + "\006\340A\003\340A\001\0229\n\020state_start_time\030\003 \001(\0132\032.goog"
- + "le.protobuf.TimestampB\003\340A\003\022G\n\010substate\030\004"
- + " \001(\01620.google.cloud.dataproc.v1.ClusterS"
- + "tatus.SubstateB\003\340A\003\"\230\001\n\005State\022\013\n\007UNKNOWN"
- + "\020\000\022\014\n\010CREATING\020\001\022\013\n\007RUNNING\020\002\022\t\n\005ERROR\020\003"
- + "\022\027\n\023ERROR_DUE_TO_UPDATE\020\t\022\014\n\010DELETING\020\004\022"
- + "\014\n\010UPDATING\020\005\022\014\n\010STOPPING\020\006\022\013\n\007STOPPED\020\007"
- + "\022\014\n\010STARTING\020\010\"<\n\010Substate\022\017\n\013UNSPECIFIE"
- + "D\020\000\022\r\n\tUNHEALTHY\020\001\022\020\n\014STALE_STATUS\020\002\"\240\001\n"
- + "\016SecurityConfig\022F\n\017kerberos_config\030\001 \001(\013"
- + "2(.google.cloud.dataproc.v1.KerberosConf"
- + "igB\003\340A\001\022F\n\017identity_config\030\002 \001(\0132(.googl"
- + "e.cloud.dataproc.v1.IdentityConfigB\003\340A\001\""
- + "\220\004\n\016KerberosConfig\022\034\n\017enable_kerberos\030\001 "
- + "\001(\010B\003\340A\001\022(\n\033root_principal_password_uri\030"
- + "\002 \001(\tB\003\340A\001\022\030\n\013kms_key_uri\030\003 \001(\tB\003\340A\001\022\031\n\014"
- + "keystore_uri\030\004 \001(\tB\003\340A\001\022\033\n\016truststore_ur"
- + "i\030\005 \001(\tB\003\340A\001\022\"\n\025keystore_password_uri\030\006 "
- + "\001(\tB\003\340A\001\022\035\n\020key_password_uri\030\007 \001(\tB\003\340A\001\022"
- + "$\n\027truststore_password_uri\030\010 \001(\tB\003\340A\001\022$\n"
- + "\027cross_realm_trust_realm\030\t \001(\tB\003\340A\001\022\"\n\025c"
- + "ross_realm_trust_kdc\030\n \001(\tB\003\340A\001\022+\n\036cross"
- + "_realm_trust_admin_server\030\013 \001(\tB\003\340A\001\0222\n%"
- + "cross_realm_trust_shared_password_uri\030\014 "
- + "\001(\tB\003\340A\001\022\033\n\016kdc_db_key_uri\030\r \001(\tB\003\340A\001\022\037\n"
- + "\022tgt_lifetime_hours\030\016 \001(\005B\003\340A\001\022\022\n\005realm\030"
- + "\017 \001(\tB\003\340A\001\"\306\001\n\016IdentityConfig\022r\n\034user_se"
- + "rvice_account_mapping\030\001 \003(\0132G.google.clo"
- + "ud.dataproc.v1.IdentityConfig.UserServic"
- + "eAccountMappingEntryB\003\340A\002\032@\n\036UserService"
- + "AccountMappingEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005valu"
- + "e\030\002 \001(\t:\0028\001\"\371\001\n\016SoftwareConfig\022\032\n\rimage_"
- + "version\030\001 \001(\tB\003\340A\001\022Q\n\nproperties\030\002 \003(\01328"
- + ".google.cloud.dataproc.v1.SoftwareConfig"
- + ".PropertiesEntryB\003\340A\001\022E\n\023optional_compon"
- + "ents\030\003 \003(\0162#.google.cloud.dataproc.v1.Co"
- + "mponentB\003\340A\001\0321\n\017PropertiesEntry\022\013\n\003key\030\001"
- + " \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\"\203\002\n\017LifecycleCon"
- + "fig\0227\n\017idle_delete_ttl\030\001 \001(\0132\031.google.pr"
- + "otobuf.DurationB\003\340A\001\022;\n\020auto_delete_time"
- + "\030\002 \001(\0132\032.google.protobuf.TimestampB\003\340A\001H"
- + "\000\0229\n\017auto_delete_ttl\030\003 \001(\0132\031.google.prot"
- + "obuf.DurationB\003\340A\001H\000\0228\n\017idle_start_time\030"
- + "\004 \001(\0132\032.google.protobuf.TimestampB\003\340A\003B\005"
- + "\n\003ttl\"_\n\017MetastoreConfig\022L\n\032dataproc_met"
- + "astore_service\030\001 \001(\tB(\340A\002\372A\"\n metastore."
- + "googleapis.com/Service\"\232\002\n\016ClusterMetric"
- + "s\022O\n\014hdfs_metrics\030\001 \003(\01329.google.cloud.d"
- + "ataproc.v1.ClusterMetrics.HdfsMetricsEnt"
- + "ry\022O\n\014yarn_metrics\030\002 \003(\01329.google.cloud."
- + "dataproc.v1.ClusterMetrics.YarnMetricsEn"
- + "try\0322\n\020HdfsMetricsEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005"
- + "value\030\002 \001(\003:\0028\001\0322\n\020YarnMetricsEntry\022\013\n\003k"
- + "ey\030\001 \001(\t\022\r\n\005value\030\002 \001(\003:\0028\001\"\356\001\n\024CreateCl"
- + "usterRequest\022\027\n\nproject_id\030\001 \001(\tB\003\340A\002\022\023\n"
- + "\006region\030\003 \001(\tB\003\340A\002\0227\n\007cluster\030\002 \001(\0132!.go"
- + "ogle.cloud.dataproc.v1.ClusterB\003\340A\002\022\027\n\nr"
- + "equest_id\030\004 \001(\tB\003\340A\001\022V\n action_on_failed"
- + "_primary_workers\030\005 \001(\0162\'.google.cloud.da"
- + "taproc.v1.FailureActionB\003\340A\001\"\256\002\n\024UpdateC"
- + "lusterRequest\022\027\n\nproject_id\030\001 \001(\tB\003\340A\002\022\023"
- + "\n\006region\030\005 \001(\tB\003\340A\002\022\031\n\014cluster_name\030\002 \001("
- + "\tB\003\340A\002\0227\n\007cluster\030\003 \001(\0132!.google.cloud.d"
- + "ataproc.v1.ClusterB\003\340A\002\022E\n\035graceful_deco"
- + "mmission_timeout\030\006 \001(\0132\031.google.protobuf"
- + ".DurationB\003\340A\001\0224\n\013update_mask\030\004 \001(\0132\032.go"
- + "ogle.protobuf.FieldMaskB\003\340A\002\022\027\n\nrequest_"
- + "id\030\007 \001(\tB\003\340A\001\"\221\001\n\022StopClusterRequest\022\027\n\n"
- + "project_id\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\002 \001(\tB\003\340"
- + "A\002\022\031\n\014cluster_name\030\003 \001(\tB\003\340A\002\022\031\n\014cluster"
- + "_uuid\030\004 \001(\tB\003\340A\001\022\027\n\nrequest_id\030\005 \001(\tB\003\340A"
- + "\001\"\222\001\n\023StartClusterRequest\022\027\n\nproject_id\030"
- + "\001 \001(\tB\003\340A\002\022\023\n\006region\030\002 \001(\tB\003\340A\002\022\031\n\014clust"
- + "er_name\030\003 \001(\tB\003\340A\002\022\031\n\014cluster_uuid\030\004 \001(\t"
- + "B\003\340A\001\022\027\n\nrequest_id\030\005 \001(\tB\003\340A\001\"\223\001\n\024Delet"
- + "eClusterRequest\022\027\n\nproject_id\030\001 \001(\tB\003\340A\002"
- + "\022\023\n\006region\030\003 \001(\tB\003\340A\002\022\031\n\014cluster_name\030\002 "
- + "\001(\tB\003\340A\002\022\031\n\014cluster_uuid\030\004 \001(\tB\003\340A\001\022\027\n\nr"
- + "equest_id\030\005 \001(\tB\003\340A\001\"\\\n\021GetClusterReques"
- + "t\022\027\n\nproject_id\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\003 \001"
- + "(\tB\003\340A\002\022\031\n\014cluster_name\030\002 \001(\tB\003\340A\002\"\211\001\n\023L"
- + "istClustersRequest\022\027\n\nproject_id\030\001 \001(\tB\003"
- + "\340A\002\022\023\n\006region\030\004 \001(\tB\003\340A\002\022\023\n\006filter\030\005 \001(\t"
- + "B\003\340A\001\022\026\n\tpage_size\030\002 \001(\005B\003\340A\001\022\027\n\npage_to"
- + "ken\030\003 \001(\tB\003\340A\001\"n\n\024ListClustersResponse\0228"
- + "\n\010clusters\030\001 \003(\0132!.google.cloud.dataproc"
- + ".v1.ClusterB\003\340A\003\022\034\n\017next_page_token\030\002 \001("
- + "\tB\003\340A\003\"a\n\026DiagnoseClusterRequest\022\027\n\nproj"
- + "ect_id\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\003 \001(\tB\003\340A\002\022\031"
- + "\n\014cluster_name\030\002 \001(\tB\003\340A\002\"1\n\026DiagnoseClu"
- + "sterResults\022\027\n\noutput_uri\030\001 \001(\tB\003\340A\003\"\370\001\n"
- + "\023ReservationAffinity\022Y\n\030consume_reservat"
- + "ion_type\030\001 \001(\01622.google.cloud.dataproc.v"
- + "1.ReservationAffinity.TypeB\003\340A\001\022\020\n\003key\030\002"
- + " \001(\tB\003\340A\001\022\023\n\006values\030\003 \003(\tB\003\340A\001\"_\n\004Type\022\024"
- + "\n\020TYPE_UNSPECIFIED\020\000\022\022\n\016NO_RESERVATION\020\001"
- + "\022\023\n\017ANY_RESERVATION\020\002\022\030\n\024SPECIFIC_RESERV"
- + "ATION\020\0032\344\020\n\021ClusterController\022\200\002\n\rCreate"
- + "Cluster\022..google.cloud.dataproc.v1.Creat"
- + "eClusterRequest\032\035.google.longrunning.Ope"
- + "ration\"\237\001\202\323\344\223\002>\"3/v1/projects/{project_i"
- + "d}/regions/{region}/clusters:\007cluster\332A\031"
- + "project_id,region,cluster\312A<\n\007Cluster\0221g"
- + "oogle.cloud.dataproc.v1.ClusterOperation"
- + "Metadata\022\250\002\n\rUpdateCluster\022..google.clou"
- + "d.dataproc.v1.UpdateClusterRequest\032\035.goo"
- + "gle.longrunning.Operation\"\307\001\202\323\344\223\002M2B/v1/"
- + "projects/{project_id}/regions/{region}/c"
- + "lusters/{cluster_name}:\007cluster\332A2projec"
- + "t_id,region,cluster_name,cluster,update_"
- + "mask\312A<\n\007Cluster\0221google.cloud.dataproc."
- + "v1.ClusterOperationMetadata\022\356\001\n\013StopClus"
- + "ter\022,.google.cloud.dataproc.v1.StopClust"
+ + "B\003\340A\001\022S\n\027secondary_worker_config\030\014 \001(\0132-"
+ + ".google.cloud.dataproc.v1.InstanceGroupC"
+ + "onfigB\003\340A\001\022F\n\017software_config\030\r \001(\0132(.go"
+ + "ogle.cloud.dataproc.v1.SoftwareConfigB\003\340"
+ + "A\001\022W\n\026initialization_actions\030\013 \003(\01322.goo"
+ + "gle.cloud.dataproc.v1.NodeInitialization"
+ + "ActionB\003\340A\001\022J\n\021encryption_config\030\017 \001(\0132*"
+ + ".google.cloud.dataproc.v1.EncryptionConf"
+ + "igB\003\340A\001\022L\n\022autoscaling_config\030\022 \001(\0132+.go"
+ + "ogle.cloud.dataproc.v1.AutoscalingConfig"
+ + "B\003\340A\001\022F\n\017security_config\030\020 \001(\0132(.google."
+ + "cloud.dataproc.v1.SecurityConfigB\003\340A\001\022H\n"
+ + "\020lifecycle_config\030\021 \001(\0132).google.cloud.d"
+ + "ataproc.v1.LifecycleConfigB\003\340A\001\022F\n\017endpo"
+ + "int_config\030\023 \001(\0132(.google.cloud.dataproc"
+ + ".v1.EndpointConfigB\003\340A\001\022H\n\020metastore_con"
+ + "fig\030\024 \001(\0132).google.cloud.dataproc.v1.Met"
+ + "astoreConfigB\003\340A\001\"\236\002\n\024VirtualClusterConf"
+ + "ig\022\033\n\016staging_bucket\030\001 \001(\tB\003\340A\001\022\030\n\013temp_"
+ + "bucket\030\002 \001(\tB\003\340A\001\022[\n\031kubernetes_cluster_"
+ + "config\030\006 \001(\01321.google.cloud.dataproc.v1."
+ + "KubernetesClusterConfigB\003\340A\002H\000\022Y\n\031auxili"
+ + "ary_services_config\030\007 \001(\01321.google.cloud"
+ + ".dataproc.v1.AuxiliaryServicesConfigB\003\340A"
+ + "\001B\027\n\025infrastructure_config\"\301\001\n\027Auxiliary"
+ + "ServicesConfig\022H\n\020metastore_config\030\001 \001(\013"
+ + "2).google.cloud.dataproc.v1.MetastoreCon"
+ + "figB\003\340A\001\022\\\n\033spark_history_server_config\030"
+ + "\002 \001(\01322.google.cloud.dataproc.v1.SparkHi"
+ + "storyServerConfigB\003\340A\001\"\272\001\n\016EndpointConfi"
+ + "g\022P\n\nhttp_ports\030\001 \003(\01327.google.cloud.dat"
+ + "aproc.v1.EndpointConfig.HttpPortsEntryB\003"
+ + "\340A\003\022$\n\027enable_http_port_access\030\002 \001(\010B\003\340A"
+ + "\001\0320\n\016HttpPortsEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005valu"
+ + "e\030\002 \001(\t:\0028\001\",\n\021AutoscalingConfig\022\027\n\npoli"
+ + "cy_uri\030\001 \001(\tB\003\340A\001\"4\n\020EncryptionConfig\022 \n"
+ + "\023gce_pd_kms_key_name\030\001 \001(\tB\003\340A\001\"\233\007\n\020GceC"
+ + "lusterConfig\022\025\n\010zone_uri\030\001 \001(\tB\003\340A\001\022\030\n\013n"
+ + "etwork_uri\030\002 \001(\tB\003\340A\001\022\033\n\016subnetwork_uri\030"
+ + "\006 \001(\tB\003\340A\001\022\035\n\020internal_ip_only\030\007 \001(\010B\003\340A"
+ + "\001\022k\n\032private_ipv6_google_access\030\014 \001(\0162B."
+ + "google.cloud.dataproc.v1.GceClusterConfi"
+ + "g.PrivateIpv6GoogleAccessB\003\340A\001\022\034\n\017servic"
+ + "e_account\030\010 \001(\tB\003\340A\001\022#\n\026service_account_"
+ + "scopes\030\003 \003(\tB\003\340A\001\022\014\n\004tags\030\004 \003(\t\022J\n\010metad"
+ + "ata\030\005 \003(\01328.google.cloud.dataproc.v1.Gce"
+ + "ClusterConfig.MetadataEntry\022P\n\024reservati"
+ + "on_affinity\030\013 \001(\0132-.google.cloud.datapro"
+ + "c.v1.ReservationAffinityB\003\340A\001\022M\n\023node_gr"
+ + "oup_affinity\030\r \001(\0132+.google.cloud.datapr"
+ + "oc.v1.NodeGroupAffinityB\003\340A\001\022W\n\030shielded"
+ + "_instance_config\030\016 \001(\01320.google.cloud.da"
+ + "taproc.v1.ShieldedInstanceConfigB\003\340A\001\022_\n"
+ + "\034confidential_instance_config\030\017 \001(\01324.go"
+ + "ogle.cloud.dataproc.v1.ConfidentialInsta"
+ + "nceConfigB\003\340A\001\032/\n\rMetadataEntry\022\013\n\003key\030\001"
+ + " \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\"\203\001\n\027PrivateIpv6G"
+ + "oogleAccess\022*\n&PRIVATE_IPV6_GOOGLE_ACCES"
+ + "S_UNSPECIFIED\020\000\022\033\n\027INHERIT_FROM_SUBNETWO"
+ + "RK\020\001\022\014\n\010OUTBOUND\020\002\022\021\n\rBIDIRECTIONAL\020\003\"0\n"
+ + "\021NodeGroupAffinity\022\033\n\016node_group_uri\030\001 \001"
+ + "(\tB\003\340A\002\"}\n\026ShieldedInstanceConfig\022\037\n\022ena"
+ + "ble_secure_boot\030\001 \001(\010B\003\340A\001\022\030\n\013enable_vtp"
+ + "m\030\002 \001(\010B\003\340A\001\022(\n\033enable_integrity_monitor"
+ + "ing\030\003 \001(\010B\003\340A\001\"F\n\032ConfidentialInstanceCo"
+ + "nfig\022(\n\033enable_confidential_compute\030\001 \001("
+ + "\010B\003\340A\001\"\315\004\n\023InstanceGroupConfig\022\032\n\rnum_in"
+ + "stances\030\001 \001(\005B\003\340A\001\022\033\n\016instance_names\030\002 \003"
+ + "(\tB\003\340A\003\022\026\n\timage_uri\030\003 \001(\tB\003\340A\001\022\035\n\020machi"
+ + "ne_type_uri\030\004 \001(\tB\003\340A\001\022>\n\013disk_config\030\005 "
+ + "\001(\0132$.google.cloud.dataproc.v1.DiskConfi"
+ + "gB\003\340A\001\022\033\n\016is_preemptible\030\006 \001(\010B\003\340A\003\022Y\n\016p"
+ + "reemptibility\030\n \001(\0162<.google.cloud.datap"
+ + "roc.v1.InstanceGroupConfig.Preemptibilit"
+ + "yB\003\340A\001\022O\n\024managed_group_config\030\007 \001(\0132,.g"
+ + "oogle.cloud.dataproc.v1.ManagedGroupConf"
+ + "igB\003\340A\003\022F\n\014accelerators\030\010 \003(\0132+.google.c"
+ + "loud.dataproc.v1.AcceleratorConfigB\003\340A\001\022"
+ + "\035\n\020min_cpu_platform\030\t \001(\tB\003\340A\001\"V\n\016Preemp"
+ + "tibility\022\036\n\032PREEMPTIBILITY_UNSPECIFIED\020\000"
+ + "\022\023\n\017NON_PREEMPTIBLE\020\001\022\017\n\013PREEMPTIBLE\020\002\"c"
+ + "\n\022ManagedGroupConfig\022#\n\026instance_templat"
+ + "e_name\030\001 \001(\tB\003\340A\003\022(\n\033instance_group_mana"
+ + "ger_name\030\002 \001(\tB\003\340A\003\"L\n\021AcceleratorConfig"
+ + "\022\034\n\024accelerator_type_uri\030\001 \001(\t\022\031\n\021accele"
+ + "rator_count\030\002 \001(\005\"\210\001\n\nDiskConfig\022\033\n\016boot"
+ + "_disk_type\030\003 \001(\tB\003\340A\001\022\036\n\021boot_disk_size_"
+ + "gb\030\001 \001(\005B\003\340A\001\022\033\n\016num_local_ssds\030\002 \001(\005B\003\340"
+ + "A\001\022 \n\023local_ssd_interface\030\004 \001(\tB\003\340A\001\"s\n\030"
+ + "NodeInitializationAction\022\034\n\017executable_f"
+ + "ile\030\001 \001(\tB\003\340A\002\0229\n\021execution_timeout\030\002 \001("
+ + "\0132\031.google.protobuf.DurationB\003\340A\001\"\307\003\n\rCl"
+ + "usterStatus\022A\n\005state\030\001 \001(\0162-.google.clou"
+ + "d.dataproc.v1.ClusterStatus.StateB\003\340A\003\022\026"
+ + "\n\006detail\030\002 \001(\tB\006\340A\003\340A\001\0229\n\020state_start_ti"
+ + "me\030\003 \001(\0132\032.google.protobuf.TimestampB\003\340A"
+ + "\003\022G\n\010substate\030\004 \001(\01620.google.cloud.datap"
+ + "roc.v1.ClusterStatus.SubstateB\003\340A\003\"\230\001\n\005S"
+ + "tate\022\013\n\007UNKNOWN\020\000\022\014\n\010CREATING\020\001\022\013\n\007RUNNI"
+ + "NG\020\002\022\t\n\005ERROR\020\003\022\027\n\023ERROR_DUE_TO_UPDATE\020\t"
+ + "\022\014\n\010DELETING\020\004\022\014\n\010UPDATING\020\005\022\014\n\010STOPPING"
+ + "\020\006\022\013\n\007STOPPED\020\007\022\014\n\010STARTING\020\010\"<\n\010Substat"
+ + "e\022\017\n\013UNSPECIFIED\020\000\022\r\n\tUNHEALTHY\020\001\022\020\n\014STA"
+ + "LE_STATUS\020\002\"\240\001\n\016SecurityConfig\022F\n\017kerber"
+ + "os_config\030\001 \001(\0132(.google.cloud.dataproc."
+ + "v1.KerberosConfigB\003\340A\001\022F\n\017identity_confi"
+ + "g\030\002 \001(\0132(.google.cloud.dataproc.v1.Ident"
+ + "ityConfigB\003\340A\001\"\220\004\n\016KerberosConfig\022\034\n\017ena"
+ + "ble_kerberos\030\001 \001(\010B\003\340A\001\022(\n\033root_principa"
+ + "l_password_uri\030\002 \001(\tB\003\340A\001\022\030\n\013kms_key_uri"
+ + "\030\003 \001(\tB\003\340A\001\022\031\n\014keystore_uri\030\004 \001(\tB\003\340A\001\022\033"
+ + "\n\016truststore_uri\030\005 \001(\tB\003\340A\001\022\"\n\025keystore_"
+ + "password_uri\030\006 \001(\tB\003\340A\001\022\035\n\020key_password_"
+ + "uri\030\007 \001(\tB\003\340A\001\022$\n\027truststore_password_ur"
+ + "i\030\010 \001(\tB\003\340A\001\022$\n\027cross_realm_trust_realm\030"
+ + "\t \001(\tB\003\340A\001\022\"\n\025cross_realm_trust_kdc\030\n \001("
+ + "\tB\003\340A\001\022+\n\036cross_realm_trust_admin_server"
+ + "\030\013 \001(\tB\003\340A\001\0222\n%cross_realm_trust_shared_"
+ + "password_uri\030\014 \001(\tB\003\340A\001\022\033\n\016kdc_db_key_ur"
+ + "i\030\r \001(\tB\003\340A\001\022\037\n\022tgt_lifetime_hours\030\016 \001(\005"
+ + "B\003\340A\001\022\022\n\005realm\030\017 \001(\tB\003\340A\001\"\306\001\n\016IdentityCo"
+ + "nfig\022r\n\034user_service_account_mapping\030\001 \003"
+ + "(\0132G.google.cloud.dataproc.v1.IdentityCo"
+ + "nfig.UserServiceAccountMappingEntryB\003\340A\002"
+ + "\032@\n\036UserServiceAccountMappingEntry\022\013\n\003ke"
+ + "y\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\"\371\001\n\016SoftwareC"
+ + "onfig\022\032\n\rimage_version\030\001 \001(\tB\003\340A\001\022Q\n\npro"
+ + "perties\030\002 \003(\01328.google.cloud.dataproc.v1"
+ + ".SoftwareConfig.PropertiesEntryB\003\340A\001\022E\n\023"
+ + "optional_components\030\003 \003(\0162#.google.cloud"
+ + ".dataproc.v1.ComponentB\003\340A\001\0321\n\017Propertie"
+ + "sEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\"\203"
+ + "\002\n\017LifecycleConfig\0227\n\017idle_delete_ttl\030\001 "
+ + "\001(\0132\031.google.protobuf.DurationB\003\340A\001\022;\n\020a"
+ + "uto_delete_time\030\002 \001(\0132\032.google.protobuf."
+ + "TimestampB\003\340A\001H\000\0229\n\017auto_delete_ttl\030\003 \001("
+ + "\0132\031.google.protobuf.DurationB\003\340A\001H\000\0228\n\017i"
+ + "dle_start_time\030\004 \001(\0132\032.google.protobuf.T"
+ + "imestampB\003\340A\003B\005\n\003ttl\"_\n\017MetastoreConfig\022"
+ + "L\n\032dataproc_metastore_service\030\001 \001(\tB(\340A\002"
+ + "\372A\"\n metastore.googleapis.com/Service\"\232\002"
+ + "\n\016ClusterMetrics\022O\n\014hdfs_metrics\030\001 \003(\01329"
+ + ".google.cloud.dataproc.v1.ClusterMetrics"
+ + ".HdfsMetricsEntry\022O\n\014yarn_metrics\030\002 \003(\0132"
+ + "9.google.cloud.dataproc.v1.ClusterMetric"
+ + "s.YarnMetricsEntry\0322\n\020HdfsMetricsEntry\022\013"
+ + "\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\003:\0028\001\0322\n\020YarnMe"
+ + "tricsEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\003:\002"
+ + "8\001\"\356\001\n\024CreateClusterRequest\022\027\n\nproject_i"
+ + "d\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\003 \001(\tB\003\340A\002\0227\n\007clu"
+ + "ster\030\002 \001(\0132!.google.cloud.dataproc.v1.Cl"
+ + "usterB\003\340A\002\022\027\n\nrequest_id\030\004 \001(\tB\003\340A\001\022V\n a"
+ + "ction_on_failed_primary_workers\030\005 \001(\0162\'."
+ + "google.cloud.dataproc.v1.FailureActionB\003"
+ + "\340A\001\"\256\002\n\024UpdateClusterRequest\022\027\n\nproject_"
+ + "id\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\005 \001(\tB\003\340A\002\022\031\n\014cl"
+ + "uster_name\030\002 \001(\tB\003\340A\002\0227\n\007cluster\030\003 \001(\0132!"
+ + ".google.cloud.dataproc.v1.ClusterB\003\340A\002\022E"
+ + "\n\035graceful_decommission_timeout\030\006 \001(\0132\031."
+ + "google.protobuf.DurationB\003\340A\001\0224\n\013update_"
+ + "mask\030\004 \001(\0132\032.google.protobuf.FieldMaskB\003"
+ + "\340A\002\022\027\n\nrequest_id\030\007 \001(\tB\003\340A\001\"\221\001\n\022StopClu"
+ + "sterRequest\022\027\n\nproject_id\030\001 \001(\tB\003\340A\002\022\023\n\006"
+ + "region\030\002 \001(\tB\003\340A\002\022\031\n\014cluster_name\030\003 \001(\tB"
+ + "\003\340A\002\022\031\n\014cluster_uuid\030\004 \001(\tB\003\340A\001\022\027\n\nreque"
+ + "st_id\030\005 \001(\tB\003\340A\001\"\222\001\n\023StartClusterRequest"
+ + "\022\027\n\nproject_id\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\002 \001("
+ + "\tB\003\340A\002\022\031\n\014cluster_name\030\003 \001(\tB\003\340A\002\022\031\n\014clu"
+ + "ster_uuid\030\004 \001(\tB\003\340A\001\022\027\n\nrequest_id\030\005 \001(\t"
+ + "B\003\340A\001\"\223\001\n\024DeleteClusterRequest\022\027\n\nprojec"
+ + "t_id\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\003 \001(\tB\003\340A\002\022\031\n\014"
+ + "cluster_name\030\002 \001(\tB\003\340A\002\022\031\n\014cluster_uuid\030"
+ + "\004 \001(\tB\003\340A\001\022\027\n\nrequest_id\030\005 \001(\tB\003\340A\001\"\\\n\021G"
+ + "etClusterRequest\022\027\n\nproject_id\030\001 \001(\tB\003\340A"
+ + "\002\022\023\n\006region\030\003 \001(\tB\003\340A\002\022\031\n\014cluster_name\030\002"
+ + " \001(\tB\003\340A\002\"\211\001\n\023ListClustersRequest\022\027\n\npro"
+ + "ject_id\030\001 \001(\tB\003\340A\002\022\023\n\006region\030\004 \001(\tB\003\340A\002\022"
+ + "\023\n\006filter\030\005 \001(\tB\003\340A\001\022\026\n\tpage_size\030\002 \001(\005B"
+ + "\003\340A\001\022\027\n\npage_token\030\003 \001(\tB\003\340A\001\"n\n\024ListClu"
+ + "stersResponse\0228\n\010clusters\030\001 \003(\0132!.google"
+ + ".cloud.dataproc.v1.ClusterB\003\340A\003\022\034\n\017next_"
+ + "page_token\030\002 \001(\tB\003\340A\003\"a\n\026DiagnoseCluster"
+ + "Request\022\027\n\nproject_id\030\001 \001(\tB\003\340A\002\022\023\n\006regi"
+ + "on\030\003 \001(\tB\003\340A\002\022\031\n\014cluster_name\030\002 \001(\tB\003\340A\002"
+ + "\"1\n\026DiagnoseClusterResults\022\027\n\noutput_uri"
+ + "\030\001 \001(\tB\003\340A\003\"\370\001\n\023ReservationAffinity\022Y\n\030c"
+ + "onsume_reservation_type\030\001 \001(\01622.google.c"
+ + "loud.dataproc.v1.ReservationAffinity.Typ"
+ + "eB\003\340A\001\022\020\n\003key\030\002 \001(\tB\003\340A\001\022\023\n\006values\030\003 \003(\t"
+ + "B\003\340A\001\"_\n\004Type\022\024\n\020TYPE_UNSPECIFIED\020\000\022\022\n\016N"
+ + "O_RESERVATION\020\001\022\023\n\017ANY_RESERVATION\020\002\022\030\n\024"
+ + "SPECIFIC_RESERVATION\020\0032\344\020\n\021ClusterContro"
+ + "ller\022\200\002\n\rCreateCluster\022..google.cloud.da"
+ + "taproc.v1.CreateClusterRequest\032\035.google."
+ + "longrunning.Operation\"\237\001\202\323\344\223\002>\"3/v1/proj"
+ + "ects/{project_id}/regions/{region}/clust"
+ + "ers:\007cluster\332A\031project_id,region,cluster"
+ + "\312A<\n\007Cluster\0221google.cloud.dataproc.v1.C"
+ + "lusterOperationMetadata\022\250\002\n\rUpdateCluste"
+ + "r\022..google.cloud.dataproc.v1.UpdateClust"
+ "erRequest\032\035.google.longrunning.Operation"
- + "\"\221\001\202\323\344\223\002L\"G/v1/projects/{project_id}/reg"
- + "ions/{region}/clusters/{cluster_name}:st"
- + "op:\001*\312A<\n\007Cluster\0221google.cloud.dataproc"
- + ".v1.ClusterOperationMetadata\022\361\001\n\014StartCl"
- + "uster\022-.google.cloud.dataproc.v1.StartCl"
- + "usterRequest\032\035.google.longrunning.Operat"
- + "ion\"\222\001\202\323\344\223\002M\"H/v1/projects/{project_id}/"
- + "regions/{region}/clusters/{cluster_name}"
- + ":start:\001*\312A<\n\007Cluster\0221google.cloud.data"
- + "proc.v1.ClusterOperationMetadata\022\231\002\n\rDel"
- + "eteCluster\022..google.cloud.dataproc.v1.De"
- + "leteClusterRequest\032\035.google.longrunning."
- + "Operation\"\270\001\202\323\344\223\002D*B/v1/projects/{projec"
- + "t_id}/regions/{region}/clusters/{cluster"
- + "_name}\332A\036project_id,region,cluster_name\312"
- + "AJ\n\025google.protobuf.Empty\0221google.cloud."
- + "dataproc.v1.ClusterOperationMetadata\022\311\001\n"
- + "\nGetCluster\022+.google.cloud.dataproc.v1.G"
- + "etClusterRequest\032!.google.cloud.dataproc"
- + ".v1.Cluster\"k\202\323\344\223\002D\022B/v1/projects/{proje"
- + "ct_id}/regions/{region}/clusters/{cluste"
- + "r_name}\332A\036project_id,region,cluster_name"
- + "\022\331\001\n\014ListClusters\022-.google.cloud.datapro"
- + "c.v1.ListClustersRequest\032..google.cloud."
- + "dataproc.v1.ListClustersResponse\"j\202\323\344\223\0025"
- + "\0223/v1/projects/{project_id}/regions/{reg"
- + "ion}/clusters\332A\021project_id,region\332A\030proj"
- + "ect_id,region,filter\022\252\002\n\017DiagnoseCluster"
- + "\0220.google.cloud.dataproc.v1.DiagnoseClus"
- + "terRequest\032\035.google.longrunning.Operatio"
- + "n\"\305\001\202\323\344\223\002P\"K/v1/projects/{project_id}/re"
- + "gions/{region}/clusters/{cluster_name}:d"
- + "iagnose:\001*\332A\036project_id,region,cluster_n"
- + "ame\312AK\n\026DiagnoseClusterResults\0221google.c"
- + "loud.dataproc.v1.ClusterOperationMetadat"
- + "a\032K\312A\027dataproc.googleapis.com\322A.https://"
- + "www.googleapis.com/auth/cloud-platformB\263"
- + "\002\n\034com.google.cloud.dataproc.v1B\rCluster"
- + "sProtoP\001Z@google.golang.org/genproto/goo"
- + "gleapis/cloud/dataproc/v1;dataproc\352A^\n c"
- + "ontainer.googleapis.com/Cluster\022:project"
- + "s/{project}/locations/{location}/cluster"
- + "s/{cluster}\352A^\n metastore.googleapis.com"
- + "/Service\022:projects/{project}/locations/{"
- + "location}/services/{service}b\006proto3"
+ + "\"\307\001\202\323\344\223\002M2B/v1/projects/{project_id}/reg"
+ + "ions/{region}/clusters/{cluster_name}:\007c"
+ + "luster\332A2project_id,region,cluster_name,"
+ + "cluster,update_mask\312A<\n\007Cluster\0221google."
+ + "cloud.dataproc.v1.ClusterOperationMetada"
+ + "ta\022\356\001\n\013StopCluster\022,.google.cloud.datapr"
+ + "oc.v1.StopClusterRequest\032\035.google.longru"
+ + "nning.Operation\"\221\001\202\323\344\223\002L\"G/v1/projects/{"
+ + "project_id}/regions/{region}/clusters/{c"
+ + "luster_name}:stop:\001*\312A<\n\007Cluster\0221google"
+ + ".cloud.dataproc.v1.ClusterOperationMetad"
+ + "ata\022\361\001\n\014StartCluster\022-.google.cloud.data"
+ + "proc.v1.StartClusterRequest\032\035.google.lon"
+ + "grunning.Operation\"\222\001\202\323\344\223\002M\"H/v1/project"
+ + "s/{project_id}/regions/{region}/clusters"
+ + "/{cluster_name}:start:\001*\312A<\n\007Cluster\0221go"
+ + "ogle.cloud.dataproc.v1.ClusterOperationM"
+ + "etadata\022\231\002\n\rDeleteCluster\022..google.cloud"
+ + ".dataproc.v1.DeleteClusterRequest\032\035.goog"
+ + "le.longrunning.Operation\"\270\001\202\323\344\223\002D*B/v1/p"
+ + "rojects/{project_id}/regions/{region}/cl"
+ + "usters/{cluster_name}\332A\036project_id,regio"
+ + "n,cluster_name\312AJ\n\025google.protobuf.Empty"
+ + "\0221google.cloud.dataproc.v1.ClusterOperat"
+ + "ionMetadata\022\311\001\n\nGetCluster\022+.google.clou"
+ + "d.dataproc.v1.GetClusterRequest\032!.google"
+ + ".cloud.dataproc.v1.Cluster\"k\202\323\344\223\002D\022B/v1/"
+ + "projects/{project_id}/regions/{region}/c"
+ + "lusters/{cluster_name}\332A\036project_id,regi"
+ + "on,cluster_name\022\331\001\n\014ListClusters\022-.googl"
+ + "e.cloud.dataproc.v1.ListClustersRequest\032"
+ + "..google.cloud.dataproc.v1.ListClustersR"
+ + "esponse\"j\202\323\344\223\0025\0223/v1/projects/{project_i"
+ + "d}/regions/{region}/clusters\332A\021project_i"
+ + "d,region\332A\030project_id,region,filter\022\252\002\n\017"
+ + "DiagnoseCluster\0220.google.cloud.dataproc."
+ + "v1.DiagnoseClusterRequest\032\035.google.longr"
+ + "unning.Operation\"\305\001\202\323\344\223\002P\"K/v1/projects/"
+ + "{project_id}/regions/{region}/clusters/{"
+ + "cluster_name}:diagnose:\001*\332A\036project_id,r"
+ + "egion,cluster_name\312AK\n\026DiagnoseClusterRe"
+ + "sults\0221google.cloud.dataproc.v1.ClusterO"
+ + "perationMetadata\032K\312A\027dataproc.googleapis"
+ + ".com\322A.https://www.googleapis.com/auth/c"
+ + "loud-platformB\263\002\n\034com.google.cloud.datap"
+ + "roc.v1B\rClustersProtoP\001Z@google.golang.o"
+ + "rg/genproto/googleapis/cloud/dataproc/v1"
+ + ";dataproc\352A^\n container.googleapis.com/C"
+ + "luster\022:projects/{project}/locations/{lo"
+ + "cation}/clusters/{cluster}\352A^\n metastore"
+ + ".googleapis.com/Service\022:projects/{proje"
+ + "ct}/locations/{location}/services/{servi"
+ + "ce}b\006proto3"
};
descriptor =
com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom(
@@ -506,6 +512,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"ProjectId",
"ClusterName",
"Config",
+ "VirtualClusterConfig",
"Labels",
"Status",
"StatusHistory",
@@ -540,28 +547,29 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"LifecycleConfig",
"EndpointConfig",
"MetastoreConfig",
- "GkeClusterConfig",
});
- internal_static_google_cloud_dataproc_v1_GkeClusterConfig_descriptor =
+ internal_static_google_cloud_dataproc_v1_VirtualClusterConfig_descriptor =
getDescriptor().getMessageTypes().get(2);
- internal_static_google_cloud_dataproc_v1_GkeClusterConfig_fieldAccessorTable =
+ internal_static_google_cloud_dataproc_v1_VirtualClusterConfig_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
- internal_static_google_cloud_dataproc_v1_GkeClusterConfig_descriptor,
+ internal_static_google_cloud_dataproc_v1_VirtualClusterConfig_descriptor,
new java.lang.String[] {
- "NamespacedGkeDeploymentTarget",
+ "StagingBucket",
+ "TempBucket",
+ "KubernetesClusterConfig",
+ "AuxiliaryServicesConfig",
+ "InfrastructureConfig",
});
- internal_static_google_cloud_dataproc_v1_GkeClusterConfig_NamespacedGkeDeploymentTarget_descriptor =
- internal_static_google_cloud_dataproc_v1_GkeClusterConfig_descriptor
- .getNestedTypes()
- .get(0);
- internal_static_google_cloud_dataproc_v1_GkeClusterConfig_NamespacedGkeDeploymentTarget_fieldAccessorTable =
+ internal_static_google_cloud_dataproc_v1_AuxiliaryServicesConfig_descriptor =
+ getDescriptor().getMessageTypes().get(3);
+ internal_static_google_cloud_dataproc_v1_AuxiliaryServicesConfig_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
- internal_static_google_cloud_dataproc_v1_GkeClusterConfig_NamespacedGkeDeploymentTarget_descriptor,
+ internal_static_google_cloud_dataproc_v1_AuxiliaryServicesConfig_descriptor,
new java.lang.String[] {
- "TargetGkeCluster", "ClusterNamespace",
+ "MetastoreConfig", "SparkHistoryServerConfig",
});
internal_static_google_cloud_dataproc_v1_EndpointConfig_descriptor =
- getDescriptor().getMessageTypes().get(3);
+ getDescriptor().getMessageTypes().get(4);
internal_static_google_cloud_dataproc_v1_EndpointConfig_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_EndpointConfig_descriptor,
@@ -577,7 +585,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"Key", "Value",
});
internal_static_google_cloud_dataproc_v1_AutoscalingConfig_descriptor =
- getDescriptor().getMessageTypes().get(4);
+ getDescriptor().getMessageTypes().get(5);
internal_static_google_cloud_dataproc_v1_AutoscalingConfig_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_AutoscalingConfig_descriptor,
@@ -585,7 +593,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"PolicyUri",
});
internal_static_google_cloud_dataproc_v1_EncryptionConfig_descriptor =
- getDescriptor().getMessageTypes().get(5);
+ getDescriptor().getMessageTypes().get(6);
internal_static_google_cloud_dataproc_v1_EncryptionConfig_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_EncryptionConfig_descriptor,
@@ -593,7 +601,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"GcePdKmsKeyName",
});
internal_static_google_cloud_dataproc_v1_GceClusterConfig_descriptor =
- getDescriptor().getMessageTypes().get(6);
+ getDescriptor().getMessageTypes().get(7);
internal_static_google_cloud_dataproc_v1_GceClusterConfig_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_GceClusterConfig_descriptor,
@@ -623,7 +631,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"Key", "Value",
});
internal_static_google_cloud_dataproc_v1_NodeGroupAffinity_descriptor =
- getDescriptor().getMessageTypes().get(7);
+ getDescriptor().getMessageTypes().get(8);
internal_static_google_cloud_dataproc_v1_NodeGroupAffinity_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_NodeGroupAffinity_descriptor,
@@ -631,7 +639,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"NodeGroupUri",
});
internal_static_google_cloud_dataproc_v1_ShieldedInstanceConfig_descriptor =
- getDescriptor().getMessageTypes().get(8);
+ getDescriptor().getMessageTypes().get(9);
internal_static_google_cloud_dataproc_v1_ShieldedInstanceConfig_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_ShieldedInstanceConfig_descriptor,
@@ -639,7 +647,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"EnableSecureBoot", "EnableVtpm", "EnableIntegrityMonitoring",
});
internal_static_google_cloud_dataproc_v1_ConfidentialInstanceConfig_descriptor =
- getDescriptor().getMessageTypes().get(9);
+ getDescriptor().getMessageTypes().get(10);
internal_static_google_cloud_dataproc_v1_ConfidentialInstanceConfig_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_ConfidentialInstanceConfig_descriptor,
@@ -647,7 +655,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"EnableConfidentialCompute",
});
internal_static_google_cloud_dataproc_v1_InstanceGroupConfig_descriptor =
- getDescriptor().getMessageTypes().get(10);
+ getDescriptor().getMessageTypes().get(11);
internal_static_google_cloud_dataproc_v1_InstanceGroupConfig_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_InstanceGroupConfig_descriptor,
@@ -664,7 +672,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"MinCpuPlatform",
});
internal_static_google_cloud_dataproc_v1_ManagedGroupConfig_descriptor =
- getDescriptor().getMessageTypes().get(11);
+ getDescriptor().getMessageTypes().get(12);
internal_static_google_cloud_dataproc_v1_ManagedGroupConfig_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_ManagedGroupConfig_descriptor,
@@ -672,7 +680,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"InstanceTemplateName", "InstanceGroupManagerName",
});
internal_static_google_cloud_dataproc_v1_AcceleratorConfig_descriptor =
- getDescriptor().getMessageTypes().get(12);
+ getDescriptor().getMessageTypes().get(13);
internal_static_google_cloud_dataproc_v1_AcceleratorConfig_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_AcceleratorConfig_descriptor,
@@ -680,7 +688,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"AcceleratorTypeUri", "AcceleratorCount",
});
internal_static_google_cloud_dataproc_v1_DiskConfig_descriptor =
- getDescriptor().getMessageTypes().get(13);
+ getDescriptor().getMessageTypes().get(14);
internal_static_google_cloud_dataproc_v1_DiskConfig_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_DiskConfig_descriptor,
@@ -688,7 +696,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"BootDiskType", "BootDiskSizeGb", "NumLocalSsds", "LocalSsdInterface",
});
internal_static_google_cloud_dataproc_v1_NodeInitializationAction_descriptor =
- getDescriptor().getMessageTypes().get(14);
+ getDescriptor().getMessageTypes().get(15);
internal_static_google_cloud_dataproc_v1_NodeInitializationAction_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_NodeInitializationAction_descriptor,
@@ -696,7 +704,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"ExecutableFile", "ExecutionTimeout",
});
internal_static_google_cloud_dataproc_v1_ClusterStatus_descriptor =
- getDescriptor().getMessageTypes().get(15);
+ getDescriptor().getMessageTypes().get(16);
internal_static_google_cloud_dataproc_v1_ClusterStatus_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_ClusterStatus_descriptor,
@@ -704,7 +712,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"State", "Detail", "StateStartTime", "Substate",
});
internal_static_google_cloud_dataproc_v1_SecurityConfig_descriptor =
- getDescriptor().getMessageTypes().get(16);
+ getDescriptor().getMessageTypes().get(17);
internal_static_google_cloud_dataproc_v1_SecurityConfig_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_SecurityConfig_descriptor,
@@ -712,7 +720,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"KerberosConfig", "IdentityConfig",
});
internal_static_google_cloud_dataproc_v1_KerberosConfig_descriptor =
- getDescriptor().getMessageTypes().get(17);
+ getDescriptor().getMessageTypes().get(18);
internal_static_google_cloud_dataproc_v1_KerberosConfig_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_KerberosConfig_descriptor,
@@ -734,7 +742,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"Realm",
});
internal_static_google_cloud_dataproc_v1_IdentityConfig_descriptor =
- getDescriptor().getMessageTypes().get(18);
+ getDescriptor().getMessageTypes().get(19);
internal_static_google_cloud_dataproc_v1_IdentityConfig_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_IdentityConfig_descriptor,
@@ -750,7 +758,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"Key", "Value",
});
internal_static_google_cloud_dataproc_v1_SoftwareConfig_descriptor =
- getDescriptor().getMessageTypes().get(19);
+ getDescriptor().getMessageTypes().get(20);
internal_static_google_cloud_dataproc_v1_SoftwareConfig_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_SoftwareConfig_descriptor,
@@ -766,7 +774,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"Key", "Value",
});
internal_static_google_cloud_dataproc_v1_LifecycleConfig_descriptor =
- getDescriptor().getMessageTypes().get(20);
+ getDescriptor().getMessageTypes().get(21);
internal_static_google_cloud_dataproc_v1_LifecycleConfig_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_LifecycleConfig_descriptor,
@@ -774,7 +782,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"IdleDeleteTtl", "AutoDeleteTime", "AutoDeleteTtl", "IdleStartTime", "Ttl",
});
internal_static_google_cloud_dataproc_v1_MetastoreConfig_descriptor =
- getDescriptor().getMessageTypes().get(21);
+ getDescriptor().getMessageTypes().get(22);
internal_static_google_cloud_dataproc_v1_MetastoreConfig_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_MetastoreConfig_descriptor,
@@ -782,7 +790,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"DataprocMetastoreService",
});
internal_static_google_cloud_dataproc_v1_ClusterMetrics_descriptor =
- getDescriptor().getMessageTypes().get(22);
+ getDescriptor().getMessageTypes().get(23);
internal_static_google_cloud_dataproc_v1_ClusterMetrics_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_ClusterMetrics_descriptor,
@@ -806,7 +814,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"Key", "Value",
});
internal_static_google_cloud_dataproc_v1_CreateClusterRequest_descriptor =
- getDescriptor().getMessageTypes().get(23);
+ getDescriptor().getMessageTypes().get(24);
internal_static_google_cloud_dataproc_v1_CreateClusterRequest_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_CreateClusterRequest_descriptor,
@@ -814,7 +822,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"ProjectId", "Region", "Cluster", "RequestId", "ActionOnFailedPrimaryWorkers",
});
internal_static_google_cloud_dataproc_v1_UpdateClusterRequest_descriptor =
- getDescriptor().getMessageTypes().get(24);
+ getDescriptor().getMessageTypes().get(25);
internal_static_google_cloud_dataproc_v1_UpdateClusterRequest_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_UpdateClusterRequest_descriptor,
@@ -828,7 +836,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"RequestId",
});
internal_static_google_cloud_dataproc_v1_StopClusterRequest_descriptor =
- getDescriptor().getMessageTypes().get(25);
+ getDescriptor().getMessageTypes().get(26);
internal_static_google_cloud_dataproc_v1_StopClusterRequest_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_StopClusterRequest_descriptor,
@@ -836,7 +844,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"ProjectId", "Region", "ClusterName", "ClusterUuid", "RequestId",
});
internal_static_google_cloud_dataproc_v1_StartClusterRequest_descriptor =
- getDescriptor().getMessageTypes().get(26);
+ getDescriptor().getMessageTypes().get(27);
internal_static_google_cloud_dataproc_v1_StartClusterRequest_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_StartClusterRequest_descriptor,
@@ -844,7 +852,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"ProjectId", "Region", "ClusterName", "ClusterUuid", "RequestId",
});
internal_static_google_cloud_dataproc_v1_DeleteClusterRequest_descriptor =
- getDescriptor().getMessageTypes().get(27);
+ getDescriptor().getMessageTypes().get(28);
internal_static_google_cloud_dataproc_v1_DeleteClusterRequest_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_DeleteClusterRequest_descriptor,
@@ -852,7 +860,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"ProjectId", "Region", "ClusterName", "ClusterUuid", "RequestId",
});
internal_static_google_cloud_dataproc_v1_GetClusterRequest_descriptor =
- getDescriptor().getMessageTypes().get(28);
+ getDescriptor().getMessageTypes().get(29);
internal_static_google_cloud_dataproc_v1_GetClusterRequest_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_GetClusterRequest_descriptor,
@@ -860,7 +868,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"ProjectId", "Region", "ClusterName",
});
internal_static_google_cloud_dataproc_v1_ListClustersRequest_descriptor =
- getDescriptor().getMessageTypes().get(29);
+ getDescriptor().getMessageTypes().get(30);
internal_static_google_cloud_dataproc_v1_ListClustersRequest_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_ListClustersRequest_descriptor,
@@ -868,7 +876,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"ProjectId", "Region", "Filter", "PageSize", "PageToken",
});
internal_static_google_cloud_dataproc_v1_ListClustersResponse_descriptor =
- getDescriptor().getMessageTypes().get(30);
+ getDescriptor().getMessageTypes().get(31);
internal_static_google_cloud_dataproc_v1_ListClustersResponse_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_ListClustersResponse_descriptor,
@@ -876,7 +884,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"Clusters", "NextPageToken",
});
internal_static_google_cloud_dataproc_v1_DiagnoseClusterRequest_descriptor =
- getDescriptor().getMessageTypes().get(31);
+ getDescriptor().getMessageTypes().get(32);
internal_static_google_cloud_dataproc_v1_DiagnoseClusterRequest_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_DiagnoseClusterRequest_descriptor,
@@ -884,7 +892,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"ProjectId", "Region", "ClusterName",
});
internal_static_google_cloud_dataproc_v1_DiagnoseClusterResults_descriptor =
- getDescriptor().getMessageTypes().get(32);
+ getDescriptor().getMessageTypes().get(33);
internal_static_google_cloud_dataproc_v1_DiagnoseClusterResults_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_DiagnoseClusterResults_descriptor,
@@ -892,7 +900,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"OutputUri",
});
internal_static_google_cloud_dataproc_v1_ReservationAffinity_descriptor =
- getDescriptor().getMessageTypes().get(33);
+ getDescriptor().getMessageTypes().get(34);
internal_static_google_cloud_dataproc_v1_ReservationAffinity_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dataproc_v1_ReservationAffinity_descriptor,
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DiskConfig.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DiskConfig.java
index c3307375..756752ac 100644
--- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DiskConfig.java
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DiskConfig.java
@@ -236,8 +236,8 @@ public int getNumLocalSsds() {
* Optional. Interface type of local SSDs (default is "scsi").
* Valid values: "scsi" (Small Computer System Interface),
* "nvme" (Non-Volatile Memory Express).
- * See [SSD Interface
- * types](https://cloud.google.com/compute/docs/disks/local-ssd#performance).
+ * See [local SSD
+ * performance](https://cloud.google.com/compute/docs/disks/local-ssd#performance).
*
*
* string local_ssd_interface = 4 [(.google.api.field_behavior) = OPTIONAL];
@@ -263,8 +263,8 @@ public java.lang.String getLocalSsdInterface() {
* Optional. Interface type of local SSDs (default is "scsi").
* Valid values: "scsi" (Small Computer System Interface),
* "nvme" (Non-Volatile Memory Express).
- * See [SSD Interface
- * types](https://cloud.google.com/compute/docs/disks/local-ssd#performance).
+ * See [local SSD
+ * performance](https://cloud.google.com/compute/docs/disks/local-ssd#performance).
*
*
* string local_ssd_interface = 4 [(.google.api.field_behavior) = OPTIONAL];
@@ -898,8 +898,8 @@ public Builder clearNumLocalSsds() {
* Optional. Interface type of local SSDs (default is "scsi").
* Valid values: "scsi" (Small Computer System Interface),
* "nvme" (Non-Volatile Memory Express).
- * See [SSD Interface
- * types](https://cloud.google.com/compute/docs/disks/local-ssd#performance).
+ * See [local SSD
+ * performance](https://cloud.google.com/compute/docs/disks/local-ssd#performance).
*
*
* string local_ssd_interface = 4 [(.google.api.field_behavior) = OPTIONAL];
@@ -924,8 +924,8 @@ public java.lang.String getLocalSsdInterface() {
* Optional. Interface type of local SSDs (default is "scsi").
* Valid values: "scsi" (Small Computer System Interface),
* "nvme" (Non-Volatile Memory Express).
- * See [SSD Interface
- * types](https://cloud.google.com/compute/docs/disks/local-ssd#performance).
+ * See [local SSD
+ * performance](https://cloud.google.com/compute/docs/disks/local-ssd#performance).
*
*
* string local_ssd_interface = 4 [(.google.api.field_behavior) = OPTIONAL];
@@ -950,8 +950,8 @@ public com.google.protobuf.ByteString getLocalSsdInterfaceBytes() {
* Optional. Interface type of local SSDs (default is "scsi").
* Valid values: "scsi" (Small Computer System Interface),
* "nvme" (Non-Volatile Memory Express).
- * See [SSD Interface
- * types](https://cloud.google.com/compute/docs/disks/local-ssd#performance).
+ * See [local SSD
+ * performance](https://cloud.google.com/compute/docs/disks/local-ssd#performance).
*
*
* string local_ssd_interface = 4 [(.google.api.field_behavior) = OPTIONAL];
@@ -975,8 +975,8 @@ public Builder setLocalSsdInterface(java.lang.String value) {
* Optional. Interface type of local SSDs (default is "scsi").
* Valid values: "scsi" (Small Computer System Interface),
* "nvme" (Non-Volatile Memory Express).
- * See [SSD Interface
- * types](https://cloud.google.com/compute/docs/disks/local-ssd#performance).
+ * See [local SSD
+ * performance](https://cloud.google.com/compute/docs/disks/local-ssd#performance).
*
*
* string local_ssd_interface = 4 [(.google.api.field_behavior) = OPTIONAL];
@@ -996,8 +996,8 @@ public Builder clearLocalSsdInterface() {
* Optional. Interface type of local SSDs (default is "scsi").
* Valid values: "scsi" (Small Computer System Interface),
* "nvme" (Non-Volatile Memory Express).
- * See [SSD Interface
- * types](https://cloud.google.com/compute/docs/disks/local-ssd#performance).
+ * See [local SSD
+ * performance](https://cloud.google.com/compute/docs/disks/local-ssd#performance).
*
*
* string local_ssd_interface = 4 [(.google.api.field_behavior) = OPTIONAL];
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DiskConfigOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DiskConfigOrBuilder.java
index 4dc5e534..050b28c8 100644
--- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DiskConfigOrBuilder.java
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DiskConfigOrBuilder.java
@@ -94,8 +94,8 @@ public interface DiskConfigOrBuilder
* Optional. Interface type of local SSDs (default is "scsi").
* Valid values: "scsi" (Small Computer System Interface),
* "nvme" (Non-Volatile Memory Express).
- * See [SSD Interface
- * types](https://cloud.google.com/compute/docs/disks/local-ssd#performance).
+ * See [local SSD
+ * performance](https://cloud.google.com/compute/docs/disks/local-ssd#performance).
*
*
* string local_ssd_interface = 4 [(.google.api.field_behavior) = OPTIONAL];
@@ -110,8 +110,8 @@ public interface DiskConfigOrBuilder
* Optional. Interface type of local SSDs (default is "scsi").
* Valid values: "scsi" (Small Computer System Interface),
* "nvme" (Non-Volatile Memory Express).
- * See [SSD Interface
- * types](https://cloud.google.com/compute/docs/disks/local-ssd#performance).
+ * See [local SSD
+ * performance](https://cloud.google.com/compute/docs/disks/local-ssd#performance).
*
*
* string local_ssd_interface = 4 [(.google.api.field_behavior) = OPTIONAL];
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GkeClusterConfig.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GkeClusterConfig.java
index 75a2abf6..23b7bdf6 100644
--- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GkeClusterConfig.java
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GkeClusterConfig.java
@@ -14,7 +14,7 @@
* limitations under the License.
*/
// Generated by the protocol buffer compiler. DO NOT EDIT!
-// source: google/cloud/dataproc/v1/clusters.proto
+// source: google/cloud/dataproc/v1/shared.proto
package com.google.cloud.dataproc.v1;
@@ -22,7 +22,7 @@
*
*
*
- * The GKE config for this cluster.
+ * The cluster's GKE config.
*
*
* Protobuf type {@code google.cloud.dataproc.v1.GkeClusterConfig}
@@ -37,7 +37,10 @@ private GkeClusterConfig(com.google.protobuf.GeneratedMessageV3.Builder> build
super(builder);
}
- private GkeClusterConfig() {}
+ private GkeClusterConfig() {
+ gkeClusterTarget_ = "";
+ nodePoolTarget_ = java.util.Collections.emptyList();
+ }
@java.lang.Override
@SuppressWarnings({"unused"})
@@ -58,6 +61,7 @@ private GkeClusterConfig(
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
+ int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
@@ -68,23 +72,23 @@ private GkeClusterConfig(
case 0:
done = true;
break;
- case 10:
+ case 18:
+ {
+ java.lang.String s = input.readStringRequireUtf8();
+
+ gkeClusterTarget_ = s;
+ break;
+ }
+ case 26:
{
- com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget.Builder
- subBuilder = null;
- if (namespacedGkeDeploymentTarget_ != null) {
- subBuilder = namespacedGkeDeploymentTarget_.toBuilder();
+ if (!((mutable_bitField0_ & 0x00000001) != 0)) {
+ nodePoolTarget_ =
+ new java.util.ArrayList();
+ mutable_bitField0_ |= 0x00000001;
}
- namespacedGkeDeploymentTarget_ =
+ nodePoolTarget_.add(
input.readMessage(
- com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget
- .parser(),
- extensionRegistry);
- if (subBuilder != null) {
- subBuilder.mergeFrom(namespacedGkeDeploymentTarget_);
- namespacedGkeDeploymentTarget_ = subBuilder.buildPartial();
- }
-
+ com.google.cloud.dataproc.v1.GkeNodePoolTarget.parser(), extensionRegistry));
break;
}
default:
@@ -101,993 +105,180 @@ private GkeClusterConfig(
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this);
} finally {
+ if (((mutable_bitField0_ & 0x00000001) != 0)) {
+ nodePoolTarget_ = java.util.Collections.unmodifiableList(nodePoolTarget_);
+ }
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
- return com.google.cloud.dataproc.v1.ClustersProto
+ return com.google.cloud.dataproc.v1.SharedProto
.internal_static_google_cloud_dataproc_v1_GkeClusterConfig_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
- return com.google.cloud.dataproc.v1.ClustersProto
+ return com.google.cloud.dataproc.v1.SharedProto
.internal_static_google_cloud_dataproc_v1_GkeClusterConfig_fieldAccessorTable
.ensureFieldAccessorsInitialized(
com.google.cloud.dataproc.v1.GkeClusterConfig.class,
com.google.cloud.dataproc.v1.GkeClusterConfig.Builder.class);
}
- public interface NamespacedGkeDeploymentTargetOrBuilder
- extends
- // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget)
- com.google.protobuf.MessageOrBuilder {
-
- /**
- *
- *
- *
- * Optional. The target GKE cluster to deploy to.
- * Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
- *
- *
- *
- * string target_gke_cluster = 1 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... }
- *
- *
- * @return The targetGkeCluster.
- */
- java.lang.String getTargetGkeCluster();
- /**
- *
- *
- *
- * Optional. The target GKE cluster to deploy to.
- * Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
- *
- *
- *
- * string target_gke_cluster = 1 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... }
- *
- *
- * @return The bytes for targetGkeCluster.
- */
- com.google.protobuf.ByteString getTargetGkeClusterBytes();
-
- /**
- *
- *
- *
- * Optional. A namespace within the GKE cluster to deploy into.
- *
- *
- * string cluster_namespace = 2 [(.google.api.field_behavior) = OPTIONAL];
- *
- * @return The clusterNamespace.
- */
- java.lang.String getClusterNamespace();
- /**
- *
- *
- *
- * Optional. A namespace within the GKE cluster to deploy into.
- *
- *
- * string cluster_namespace = 2 [(.google.api.field_behavior) = OPTIONAL];
- *
- * @return The bytes for clusterNamespace.
- */
- com.google.protobuf.ByteString getClusterNamespaceBytes();
- }
+ public static final int GKE_CLUSTER_TARGET_FIELD_NUMBER = 2;
+ private volatile java.lang.Object gkeClusterTarget_;
/**
*
*
*
- * A full, namespace-isolated deployment target for an existing GKE cluster.
+ * Optional. A target GKE cluster to deploy to. It must be in the same project and
+ * region as the Dataproc cluster (the GKE cluster can be zonal or regional).
+ * Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
*
*
- * Protobuf type {@code google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget}
- */
- public static final class NamespacedGkeDeploymentTarget
- extends com.google.protobuf.GeneratedMessageV3
- implements
- // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget)
- NamespacedGkeDeploymentTargetOrBuilder {
- private static final long serialVersionUID = 0L;
- // Use NamespacedGkeDeploymentTarget.newBuilder() to construct.
- private NamespacedGkeDeploymentTarget(
- com.google.protobuf.GeneratedMessageV3.Builder> builder) {
- super(builder);
- }
-
- private NamespacedGkeDeploymentTarget() {
- targetGkeCluster_ = "";
- clusterNamespace_ = "";
- }
-
- @java.lang.Override
- @SuppressWarnings({"unused"})
- protected java.lang.Object newInstance(UnusedPrivateParameter unused) {
- return new NamespacedGkeDeploymentTarget();
- }
-
- @java.lang.Override
- public final com.google.protobuf.UnknownFieldSet getUnknownFields() {
- return this.unknownFields;
- }
-
- private NamespacedGkeDeploymentTarget(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- this();
- if (extensionRegistry == null) {
- throw new java.lang.NullPointerException();
- }
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder();
- try {
- boolean done = false;
- while (!done) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- done = true;
- break;
- case 10:
- {
- java.lang.String s = input.readStringRequireUtf8();
-
- targetGkeCluster_ = s;
- break;
- }
- case 18:
- {
- java.lang.String s = input.readStringRequireUtf8();
-
- clusterNamespace_ = s;
- break;
- }
- default:
- {
- if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) {
- done = true;
- }
- break;
- }
- }
- }
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- throw e.setUnfinishedMessage(this);
- } catch (java.io.IOException e) {
- throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this);
- } finally {
- this.unknownFields = unknownFields.build();
- makeExtensionsImmutable();
- }
- }
-
- public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
- return com.google.cloud.dataproc.v1.ClustersProto
- .internal_static_google_cloud_dataproc_v1_GkeClusterConfig_NamespacedGkeDeploymentTarget_descriptor;
- }
-
- @java.lang.Override
- protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return com.google.cloud.dataproc.v1.ClustersProto
- .internal_static_google_cloud_dataproc_v1_GkeClusterConfig_NamespacedGkeDeploymentTarget_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget.class,
- com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget.Builder
- .class);
- }
-
- public static final int TARGET_GKE_CLUSTER_FIELD_NUMBER = 1;
- private volatile java.lang.Object targetGkeCluster_;
- /**
- *
- *
- *
- * Optional. The target GKE cluster to deploy to.
- * Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
- *
- *
- *
- * string target_gke_cluster = 1 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... }
- *
- *
- * @return The targetGkeCluster.
- */
- @java.lang.Override
- public java.lang.String getTargetGkeCluster() {
- java.lang.Object ref = targetGkeCluster_;
- if (ref instanceof java.lang.String) {
- return (java.lang.String) ref;
- } else {
- com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- targetGkeCluster_ = s;
- return s;
- }
- }
- /**
- *
- *
- *
- * Optional. The target GKE cluster to deploy to.
- * Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
- *
- *
- *
- * string target_gke_cluster = 1 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... }
- *
- *
- * @return The bytes for targetGkeCluster.
- */
- @java.lang.Override
- public com.google.protobuf.ByteString getTargetGkeClusterBytes() {
- java.lang.Object ref = targetGkeCluster_;
- if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
- targetGkeCluster_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
-
- public static final int CLUSTER_NAMESPACE_FIELD_NUMBER = 2;
- private volatile java.lang.Object clusterNamespace_;
- /**
- *
- *
- *
- * Optional. A namespace within the GKE cluster to deploy into.
- *
- *
- * string cluster_namespace = 2 [(.google.api.field_behavior) = OPTIONAL];
- *
- * @return The clusterNamespace.
- */
- @java.lang.Override
- public java.lang.String getClusterNamespace() {
- java.lang.Object ref = clusterNamespace_;
- if (ref instanceof java.lang.String) {
- return (java.lang.String) ref;
- } else {
- com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- clusterNamespace_ = s;
- return s;
- }
- }
- /**
- *
- *
- *
- * Optional. A namespace within the GKE cluster to deploy into.
- *
- *
- * string cluster_namespace = 2 [(.google.api.field_behavior) = OPTIONAL];
- *
- * @return The bytes for clusterNamespace.
- */
- @java.lang.Override
- public com.google.protobuf.ByteString getClusterNamespaceBytes() {
- java.lang.Object ref = clusterNamespace_;
- if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
- clusterNamespace_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
-
- private byte memoizedIsInitialized = -1;
-
- @java.lang.Override
- public final boolean isInitialized() {
- byte isInitialized = memoizedIsInitialized;
- if (isInitialized == 1) return true;
- if (isInitialized == 0) return false;
-
- memoizedIsInitialized = 1;
- return true;
- }
-
- @java.lang.Override
- public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
- if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(targetGkeCluster_)) {
- com.google.protobuf.GeneratedMessageV3.writeString(output, 1, targetGkeCluster_);
- }
- if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(clusterNamespace_)) {
- com.google.protobuf.GeneratedMessageV3.writeString(output, 2, clusterNamespace_);
- }
- unknownFields.writeTo(output);
- }
-
- @java.lang.Override
- public int getSerializedSize() {
- int size = memoizedSize;
- if (size != -1) return size;
-
- size = 0;
- if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(targetGkeCluster_)) {
- size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, targetGkeCluster_);
- }
- if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(clusterNamespace_)) {
- size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, clusterNamespace_);
- }
- size += unknownFields.getSerializedSize();
- memoizedSize = size;
- return size;
- }
-
- @java.lang.Override
- public boolean equals(final java.lang.Object obj) {
- if (obj == this) {
- return true;
- }
- if (!(obj
- instanceof com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget)) {
- return super.equals(obj);
- }
- com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget other =
- (com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget) obj;
-
- if (!getTargetGkeCluster().equals(other.getTargetGkeCluster())) return false;
- if (!getClusterNamespace().equals(other.getClusterNamespace())) return false;
- if (!unknownFields.equals(other.unknownFields)) return false;
- return true;
- }
-
- @java.lang.Override
- public int hashCode() {
- if (memoizedHashCode != 0) {
- return memoizedHashCode;
- }
- int hash = 41;
- hash = (19 * hash) + getDescriptor().hashCode();
- hash = (37 * hash) + TARGET_GKE_CLUSTER_FIELD_NUMBER;
- hash = (53 * hash) + getTargetGkeCluster().hashCode();
- hash = (37 * hash) + CLUSTER_NAMESPACE_FIELD_NUMBER;
- hash = (53 * hash) + getClusterNamespace().hashCode();
- hash = (29 * hash) + unknownFields.hashCode();
- memoizedHashCode = hash;
- return hash;
- }
-
- public static com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget
- parseFrom(java.nio.ByteBuffer data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
-
- public static com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget
- parseFrom(
- java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
-
- public static com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget
- parseFrom(com.google.protobuf.ByteString data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
-
- public static com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget
- parseFrom(
- com.google.protobuf.ByteString data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
-
- public static com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget
- parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
-
- public static com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget
- parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
-
- public static com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget
- parseFrom(java.io.InputStream input) throws java.io.IOException {
- return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
- }
-
- public static com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget
- parseFrom(
- java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
- PARSER, input, extensionRegistry);
- }
-
- public static com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget
- parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException {
- return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
- }
-
- public static com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget
- parseDelimitedFrom(
- java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
- PARSER, input, extensionRegistry);
- }
-
- public static com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget
- parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException {
- return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
- }
-
- public static com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget
- parseFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
- PARSER, input, extensionRegistry);
- }
-
- @java.lang.Override
- public Builder newBuilderForType() {
- return newBuilder();
- }
-
- public static Builder newBuilder() {
- return DEFAULT_INSTANCE.toBuilder();
- }
-
- public static Builder newBuilder(
- com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget prototype) {
- return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
- }
-
- @java.lang.Override
- public Builder toBuilder() {
- return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
- }
-
- @java.lang.Override
- protected Builder newBuilderForType(
- com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
- Builder builder = new Builder(parent);
- return builder;
- }
- /**
- *
- *
- *
- * A full, namespace-isolated deployment target for an existing GKE cluster.
- *
- *
- * Protobuf type {@code google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget}
- */
- public static final class Builder
- extends com.google.protobuf.GeneratedMessageV3.Builder
- implements
- // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget)
- com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTargetOrBuilder {
- public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
- return com.google.cloud.dataproc.v1.ClustersProto
- .internal_static_google_cloud_dataproc_v1_GkeClusterConfig_NamespacedGkeDeploymentTarget_descriptor;
- }
-
- @java.lang.Override
- protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return com.google.cloud.dataproc.v1.ClustersProto
- .internal_static_google_cloud_dataproc_v1_GkeClusterConfig_NamespacedGkeDeploymentTarget_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget.class,
- com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget.Builder
- .class);
- }
-
- // Construct using
- // com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget.newBuilder()
- private Builder() {
- maybeForceBuilderInitialization();
- }
-
- private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
- super(parent);
- maybeForceBuilderInitialization();
- }
-
- private void maybeForceBuilderInitialization() {
- if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {}
- }
-
- @java.lang.Override
- public Builder clear() {
- super.clear();
- targetGkeCluster_ = "";
-
- clusterNamespace_ = "";
-
- return this;
- }
-
- @java.lang.Override
- public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
- return com.google.cloud.dataproc.v1.ClustersProto
- .internal_static_google_cloud_dataproc_v1_GkeClusterConfig_NamespacedGkeDeploymentTarget_descriptor;
- }
-
- @java.lang.Override
- public com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget
- getDefaultInstanceForType() {
- return com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget
- .getDefaultInstance();
- }
-
- @java.lang.Override
- public com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget build() {
- com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget result =
- buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(result);
- }
- return result;
- }
-
- @java.lang.Override
- public com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget
- buildPartial() {
- com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget result =
- new com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget(this);
- result.targetGkeCluster_ = targetGkeCluster_;
- result.clusterNamespace_ = clusterNamespace_;
- onBuilt();
- return result;
- }
-
- @java.lang.Override
- public Builder clone() {
- return super.clone();
- }
-
- @java.lang.Override
- public Builder setField(
- com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
- return super.setField(field, value);
- }
-
- @java.lang.Override
- public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
- return super.clearField(field);
- }
-
- @java.lang.Override
- public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
- return super.clearOneof(oneof);
- }
-
- @java.lang.Override
- public Builder setRepeatedField(
- com.google.protobuf.Descriptors.FieldDescriptor field,
- int index,
- java.lang.Object value) {
- return super.setRepeatedField(field, index, value);
- }
-
- @java.lang.Override
- public Builder addRepeatedField(
- com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
- return super.addRepeatedField(field, value);
- }
-
- @java.lang.Override
- public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other
- instanceof
- com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget) {
- return mergeFrom(
- (com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget) other);
- } else {
- super.mergeFrom(other);
- return this;
- }
- }
-
- public Builder mergeFrom(
- com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget other) {
- if (other
- == com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget
- .getDefaultInstance()) return this;
- if (!other.getTargetGkeCluster().isEmpty()) {
- targetGkeCluster_ = other.targetGkeCluster_;
- onChanged();
- }
- if (!other.getClusterNamespace().isEmpty()) {
- clusterNamespace_ = other.clusterNamespace_;
- onChanged();
- }
- this.mergeUnknownFields(other.unknownFields);
- onChanged();
- return this;
- }
-
- @java.lang.Override
- public final boolean isInitialized() {
- return true;
- }
-
- @java.lang.Override
- public Builder mergeFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget parsedMessage =
- null;
- try {
- parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage =
- (com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget)
- e.getUnfinishedMessage();
- throw e.unwrapIOException();
- } finally {
- if (parsedMessage != null) {
- mergeFrom(parsedMessage);
- }
- }
- return this;
- }
-
- private java.lang.Object targetGkeCluster_ = "";
- /**
- *
- *
- *
- * Optional. The target GKE cluster to deploy to.
- * Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
- *
- *
- *
- * string target_gke_cluster = 1 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... }
- *
- *
- * @return The targetGkeCluster.
- */
- public java.lang.String getTargetGkeCluster() {
- java.lang.Object ref = targetGkeCluster_;
- if (!(ref instanceof java.lang.String)) {
- com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- targetGkeCluster_ = s;
- return s;
- } else {
- return (java.lang.String) ref;
- }
- }
- /**
- *
- *
- *
- * Optional. The target GKE cluster to deploy to.
- * Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
- *
- *
- *
- * string target_gke_cluster = 1 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... }
- *
- *
- * @return The bytes for targetGkeCluster.
- */
- public com.google.protobuf.ByteString getTargetGkeClusterBytes() {
- java.lang.Object ref = targetGkeCluster_;
- if (ref instanceof String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
- targetGkeCluster_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
- /**
- *
- *
- *
- * Optional. The target GKE cluster to deploy to.
- * Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
- *
- *
- *
- * string target_gke_cluster = 1 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... }
- *
- *
- * @param value The targetGkeCluster to set.
- * @return This builder for chaining.
- */
- public Builder setTargetGkeCluster(java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
-
- targetGkeCluster_ = value;
- onChanged();
- return this;
- }
- /**
- *
- *
- *
- * Optional. The target GKE cluster to deploy to.
- * Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
- *
- *
- *
- * string target_gke_cluster = 1 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... }
- *
- *
- * @return This builder for chaining.
- */
- public Builder clearTargetGkeCluster() {
-
- targetGkeCluster_ = getDefaultInstance().getTargetGkeCluster();
- onChanged();
- return this;
- }
- /**
- *
- *
- *
- * Optional. The target GKE cluster to deploy to.
- * Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
- *
- *
- *
- * string target_gke_cluster = 1 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... }
- *
- *
- * @param value The bytes for targetGkeCluster to set.
- * @return This builder for chaining.
- */
- public Builder setTargetGkeClusterBytes(com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- checkByteStringIsUtf8(value);
-
- targetGkeCluster_ = value;
- onChanged();
- return this;
- }
-
- private java.lang.Object clusterNamespace_ = "";
- /**
- *
- *
- *
- * Optional. A namespace within the GKE cluster to deploy into.
- *
- *
- * string cluster_namespace = 2 [(.google.api.field_behavior) = OPTIONAL];
- *
- * @return The clusterNamespace.
- */
- public java.lang.String getClusterNamespace() {
- java.lang.Object ref = clusterNamespace_;
- if (!(ref instanceof java.lang.String)) {
- com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- clusterNamespace_ = s;
- return s;
- } else {
- return (java.lang.String) ref;
- }
- }
- /**
- *
- *
- *
- * Optional. A namespace within the GKE cluster to deploy into.
- *
- *
- * string cluster_namespace = 2 [(.google.api.field_behavior) = OPTIONAL];
- *
- * @return The bytes for clusterNamespace.
- */
- public com.google.protobuf.ByteString getClusterNamespaceBytes() {
- java.lang.Object ref = clusterNamespace_;
- if (ref instanceof String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
- clusterNamespace_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
- /**
- *
- *
- *
- * Optional. A namespace within the GKE cluster to deploy into.
- *
- *
- * string cluster_namespace = 2 [(.google.api.field_behavior) = OPTIONAL];
- *
- * @param value The clusterNamespace to set.
- * @return This builder for chaining.
- */
- public Builder setClusterNamespace(java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
-
- clusterNamespace_ = value;
- onChanged();
- return this;
- }
- /**
- *
- *
- *
- * Optional. A namespace within the GKE cluster to deploy into.
- *
- *
- * string cluster_namespace = 2 [(.google.api.field_behavior) = OPTIONAL];
- *
- * @return This builder for chaining.
- */
- public Builder clearClusterNamespace() {
-
- clusterNamespace_ = getDefaultInstance().getClusterNamespace();
- onChanged();
- return this;
- }
- /**
- *
- *
- *
- * Optional. A namespace within the GKE cluster to deploy into.
- *
- *
- * string cluster_namespace = 2 [(.google.api.field_behavior) = OPTIONAL];
- *
- * @param value The bytes for clusterNamespace to set.
- * @return This builder for chaining.
- */
- public Builder setClusterNamespaceBytes(com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- checkByteStringIsUtf8(value);
-
- clusterNamespace_ = value;
- onChanged();
- return this;
- }
-
- @java.lang.Override
- public final Builder setUnknownFields(
- final com.google.protobuf.UnknownFieldSet unknownFields) {
- return super.setUnknownFields(unknownFields);
- }
-
- @java.lang.Override
- public final Builder mergeUnknownFields(
- final com.google.protobuf.UnknownFieldSet unknownFields) {
- return super.mergeUnknownFields(unknownFields);
- }
-
- // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget)
- }
-
- // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget)
- private static final com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget
- DEFAULT_INSTANCE;
-
- static {
- DEFAULT_INSTANCE =
- new com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget();
- }
-
- public static com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget
- getDefaultInstance() {
- return DEFAULT_INSTANCE;
- }
-
- private static final com.google.protobuf.Parser PARSER =
- new com.google.protobuf.AbstractParser() {
- @java.lang.Override
- public NamespacedGkeDeploymentTarget parsePartialFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return new NamespacedGkeDeploymentTarget(input, extensionRegistry);
- }
- };
-
- public static com.google.protobuf.Parser parser() {
- return PARSER;
- }
-
- @java.lang.Override
- public com.google.protobuf.Parser getParserForType() {
- return PARSER;
+ * string gke_cluster_target = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The gkeClusterTarget.
+ */
+ @java.lang.Override
+ public java.lang.String getGkeClusterTarget() {
+ java.lang.Object ref = gkeClusterTarget_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ gkeClusterTarget_ = s;
+ return s;
}
-
- @java.lang.Override
- public com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget
- getDefaultInstanceForType() {
- return DEFAULT_INSTANCE;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A target GKE cluster to deploy to. It must be in the same project and
+ * region as the Dataproc cluster (the GKE cluster can be zonal or regional).
+ * Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
+ *
+ *
+ * string gke_cluster_target = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The bytes for gkeClusterTarget.
+ */
+ @java.lang.Override
+ public com.google.protobuf.ByteString getGkeClusterTargetBytes() {
+ java.lang.Object ref = gkeClusterTarget_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
+ gkeClusterTarget_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
}
}
- public static final int NAMESPACED_GKE_DEPLOYMENT_TARGET_FIELD_NUMBER = 1;
- private com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget
- namespacedGkeDeploymentTarget_;
+ public static final int NODE_POOL_TARGET_FIELD_NUMBER = 3;
+ private java.util.List nodePoolTarget_;
/**
*
*
*
- * Optional. A target for the deployment.
+ * Optional. GKE NodePools where workloads will be scheduled. At least one node pool
+ * must be assigned the 'default' role. Each role can be given to only a
+ * single NodePoolTarget. All NodePools must have the same location settings.
+ * If a nodePoolTarget is not specified, Dataproc constructs a default
+ * nodePoolTarget.
*
*
*
- * .google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget namespaced_gke_deployment_target = 1 [(.google.api.field_behavior) = OPTIONAL];
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget node_pool_target = 3 [(.google.api.field_behavior) = OPTIONAL];
*
+ */
+ @java.lang.Override
+ public java.util.List getNodePoolTargetList() {
+ return nodePoolTarget_;
+ }
+ /**
+ *
*
- * @return Whether the namespacedGkeDeploymentTarget field is set.
+ *
+ * Optional. GKE NodePools where workloads will be scheduled. At least one node pool
+ * must be assigned the 'default' role. Each role can be given to only a
+ * single NodePoolTarget. All NodePools must have the same location settings.
+ * If a nodePoolTarget is not specified, Dataproc constructs a default
+ * nodePoolTarget.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget node_pool_target = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
*/
@java.lang.Override
- public boolean hasNamespacedGkeDeploymentTarget() {
- return namespacedGkeDeploymentTarget_ != null;
+ public java.util.List extends com.google.cloud.dataproc.v1.GkeNodePoolTargetOrBuilder>
+ getNodePoolTargetOrBuilderList() {
+ return nodePoolTarget_;
}
/**
*
*
*
- * Optional. A target for the deployment.
+ * Optional. GKE NodePools where workloads will be scheduled. At least one node pool
+ * must be assigned the 'default' role. Each role can be given to only a
+ * single NodePoolTarget. All NodePools must have the same location settings.
+ * If a nodePoolTarget is not specified, Dataproc constructs a default
+ * nodePoolTarget.
*
*
*
- * .google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget namespaced_gke_deployment_target = 1 [(.google.api.field_behavior) = OPTIONAL];
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget node_pool_target = 3 [(.google.api.field_behavior) = OPTIONAL];
*
+ */
+ @java.lang.Override
+ public int getNodePoolTargetCount() {
+ return nodePoolTarget_.size();
+ }
+ /**
+ *
+ *
+ *
+ * Optional. GKE NodePools where workloads will be scheduled. At least one node pool
+ * must be assigned the 'default' role. Each role can be given to only a
+ * single NodePoolTarget. All NodePools must have the same location settings.
+ * If a nodePoolTarget is not specified, Dataproc constructs a default
+ * nodePoolTarget.
+ *
*
- * @return The namespacedGkeDeploymentTarget.
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget node_pool_target = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
*/
@java.lang.Override
- public com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget
- getNamespacedGkeDeploymentTarget() {
- return namespacedGkeDeploymentTarget_ == null
- ? com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget
- .getDefaultInstance()
- : namespacedGkeDeploymentTarget_;
+ public com.google.cloud.dataproc.v1.GkeNodePoolTarget getNodePoolTarget(int index) {
+ return nodePoolTarget_.get(index);
}
/**
*
*
*
- * Optional. A target for the deployment.
+ * Optional. GKE NodePools where workloads will be scheduled. At least one node pool
+ * must be assigned the 'default' role. Each role can be given to only a
+ * single NodePoolTarget. All NodePools must have the same location settings.
+ * If a nodePoolTarget is not specified, Dataproc constructs a default
+ * nodePoolTarget.
*
*
*
- * .google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget namespaced_gke_deployment_target = 1 [(.google.api.field_behavior) = OPTIONAL];
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget node_pool_target = 3 [(.google.api.field_behavior) = OPTIONAL];
*
*/
@java.lang.Override
- public com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTargetOrBuilder
- getNamespacedGkeDeploymentTargetOrBuilder() {
- return getNamespacedGkeDeploymentTarget();
+ public com.google.cloud.dataproc.v1.GkeNodePoolTargetOrBuilder getNodePoolTargetOrBuilder(
+ int index) {
+ return nodePoolTarget_.get(index);
}
private byte memoizedIsInitialized = -1;
@@ -1104,8 +295,11 @@ public final boolean isInitialized() {
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
- if (namespacedGkeDeploymentTarget_ != null) {
- output.writeMessage(1, getNamespacedGkeDeploymentTarget());
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(gkeClusterTarget_)) {
+ com.google.protobuf.GeneratedMessageV3.writeString(output, 2, gkeClusterTarget_);
+ }
+ for (int i = 0; i < nodePoolTarget_.size(); i++) {
+ output.writeMessage(3, nodePoolTarget_.get(i));
}
unknownFields.writeTo(output);
}
@@ -1116,10 +310,11 @@ public int getSerializedSize() {
if (size != -1) return size;
size = 0;
- if (namespacedGkeDeploymentTarget_ != null) {
- size +=
- com.google.protobuf.CodedOutputStream.computeMessageSize(
- 1, getNamespacedGkeDeploymentTarget());
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(gkeClusterTarget_)) {
+ size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, gkeClusterTarget_);
+ }
+ for (int i = 0; i < nodePoolTarget_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, nodePoolTarget_.get(i));
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
@@ -1137,12 +332,8 @@ public boolean equals(final java.lang.Object obj) {
com.google.cloud.dataproc.v1.GkeClusterConfig other =
(com.google.cloud.dataproc.v1.GkeClusterConfig) obj;
- if (hasNamespacedGkeDeploymentTarget() != other.hasNamespacedGkeDeploymentTarget())
- return false;
- if (hasNamespacedGkeDeploymentTarget()) {
- if (!getNamespacedGkeDeploymentTarget().equals(other.getNamespacedGkeDeploymentTarget()))
- return false;
- }
+ if (!getGkeClusterTarget().equals(other.getGkeClusterTarget())) return false;
+ if (!getNodePoolTargetList().equals(other.getNodePoolTargetList())) return false;
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@@ -1154,9 +345,11 @@ public int hashCode() {
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
- if (hasNamespacedGkeDeploymentTarget()) {
- hash = (37 * hash) + NAMESPACED_GKE_DEPLOYMENT_TARGET_FIELD_NUMBER;
- hash = (53 * hash) + getNamespacedGkeDeploymentTarget().hashCode();
+ hash = (37 * hash) + GKE_CLUSTER_TARGET_FIELD_NUMBER;
+ hash = (53 * hash) + getGkeClusterTarget().hashCode();
+ if (getNodePoolTargetCount() > 0) {
+ hash = (37 * hash) + NODE_POOL_TARGET_FIELD_NUMBER;
+ hash = (53 * hash) + getNodePoolTargetList().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
@@ -1262,7 +455,7 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build
*
*
*
- * The GKE config for this cluster.
+ * The cluster's GKE config.
*
*
* Protobuf type {@code google.cloud.dataproc.v1.GkeClusterConfig}
@@ -1272,14 +465,14 @@ public static final class Builder extends com.google.protobuf.GeneratedMessageV3
// @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1.GkeClusterConfig)
com.google.cloud.dataproc.v1.GkeClusterConfigOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
- return com.google.cloud.dataproc.v1.ClustersProto
+ return com.google.cloud.dataproc.v1.SharedProto
.internal_static_google_cloud_dataproc_v1_GkeClusterConfig_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
- return com.google.cloud.dataproc.v1.ClustersProto
+ return com.google.cloud.dataproc.v1.SharedProto
.internal_static_google_cloud_dataproc_v1_GkeClusterConfig_fieldAccessorTable
.ensureFieldAccessorsInitialized(
com.google.cloud.dataproc.v1.GkeClusterConfig.class,
@@ -1297,24 +490,28 @@ private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
}
private void maybeForceBuilderInitialization() {
- if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {}
+ if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {
+ getNodePoolTargetFieldBuilder();
+ }
}
@java.lang.Override
public Builder clear() {
super.clear();
- if (namespacedGkeDeploymentTargetBuilder_ == null) {
- namespacedGkeDeploymentTarget_ = null;
+ gkeClusterTarget_ = "";
+
+ if (nodePoolTargetBuilder_ == null) {
+ nodePoolTarget_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000001);
} else {
- namespacedGkeDeploymentTarget_ = null;
- namespacedGkeDeploymentTargetBuilder_ = null;
+ nodePoolTargetBuilder_.clear();
}
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
- return com.google.cloud.dataproc.v1.ClustersProto
+ return com.google.cloud.dataproc.v1.SharedProto
.internal_static_google_cloud_dataproc_v1_GkeClusterConfig_descriptor;
}
@@ -1336,10 +533,16 @@ public com.google.cloud.dataproc.v1.GkeClusterConfig build() {
public com.google.cloud.dataproc.v1.GkeClusterConfig buildPartial() {
com.google.cloud.dataproc.v1.GkeClusterConfig result =
new com.google.cloud.dataproc.v1.GkeClusterConfig(this);
- if (namespacedGkeDeploymentTargetBuilder_ == null) {
- result.namespacedGkeDeploymentTarget_ = namespacedGkeDeploymentTarget_;
+ int from_bitField0_ = bitField0_;
+ result.gkeClusterTarget_ = gkeClusterTarget_;
+ if (nodePoolTargetBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) != 0)) {
+ nodePoolTarget_ = java.util.Collections.unmodifiableList(nodePoolTarget_);
+ bitField0_ = (bitField0_ & ~0x00000001);
+ }
+ result.nodePoolTarget_ = nodePoolTarget_;
} else {
- result.namespacedGkeDeploymentTarget_ = namespacedGkeDeploymentTargetBuilder_.build();
+ result.nodePoolTarget_ = nodePoolTargetBuilder_.build();
}
onBuilt();
return result;
@@ -1390,8 +593,36 @@ public Builder mergeFrom(com.google.protobuf.Message other) {
public Builder mergeFrom(com.google.cloud.dataproc.v1.GkeClusterConfig other) {
if (other == com.google.cloud.dataproc.v1.GkeClusterConfig.getDefaultInstance()) return this;
- if (other.hasNamespacedGkeDeploymentTarget()) {
- mergeNamespacedGkeDeploymentTarget(other.getNamespacedGkeDeploymentTarget());
+ if (!other.getGkeClusterTarget().isEmpty()) {
+ gkeClusterTarget_ = other.gkeClusterTarget_;
+ onChanged();
+ }
+ if (nodePoolTargetBuilder_ == null) {
+ if (!other.nodePoolTarget_.isEmpty()) {
+ if (nodePoolTarget_.isEmpty()) {
+ nodePoolTarget_ = other.nodePoolTarget_;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ } else {
+ ensureNodePoolTargetIsMutable();
+ nodePoolTarget_.addAll(other.nodePoolTarget_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.nodePoolTarget_.isEmpty()) {
+ if (nodePoolTargetBuilder_.isEmpty()) {
+ nodePoolTargetBuilder_.dispose();
+ nodePoolTargetBuilder_ = null;
+ nodePoolTarget_ = other.nodePoolTarget_;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ nodePoolTargetBuilder_ =
+ com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders
+ ? getNodePoolTargetFieldBuilder()
+ : null;
+ } else {
+ nodePoolTargetBuilder_.addAllMessages(other.nodePoolTarget_);
+ }
+ }
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
@@ -1422,220 +653,590 @@ public Builder mergeFrom(
return this;
}
- private com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget
- namespacedGkeDeploymentTarget_;
- private com.google.protobuf.SingleFieldBuilderV3<
- com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget,
- com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget.Builder,
- com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTargetOrBuilder>
- namespacedGkeDeploymentTargetBuilder_;
+ private int bitField0_;
+
+ private java.lang.Object gkeClusterTarget_ = "";
+ /**
+ *
+ *
+ *
+ * Optional. A target GKE cluster to deploy to. It must be in the same project and
+ * region as the Dataproc cluster (the GKE cluster can be zonal or regional).
+ * Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
+ *
+ *
+ * string gke_cluster_target = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The gkeClusterTarget.
+ */
+ public java.lang.String getGkeClusterTarget() {
+ java.lang.Object ref = gkeClusterTarget_;
+ if (!(ref instanceof java.lang.String)) {
+ com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ gkeClusterTarget_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A target GKE cluster to deploy to. It must be in the same project and
+ * region as the Dataproc cluster (the GKE cluster can be zonal or regional).
+ * Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
+ *
+ *
+ * string gke_cluster_target = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The bytes for gkeClusterTarget.
+ */
+ public com.google.protobuf.ByteString getGkeClusterTargetBytes() {
+ java.lang.Object ref = gkeClusterTarget_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
+ gkeClusterTarget_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A target GKE cluster to deploy to. It must be in the same project and
+ * region as the Dataproc cluster (the GKE cluster can be zonal or regional).
+ * Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
+ *
+ *
+ * string gke_cluster_target = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @param value The gkeClusterTarget to set.
+ * @return This builder for chaining.
+ */
+ public Builder setGkeClusterTarget(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+
+ gkeClusterTarget_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A target GKE cluster to deploy to. It must be in the same project and
+ * region as the Dataproc cluster (the GKE cluster can be zonal or regional).
+ * Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
+ *
+ *
+ * string gke_cluster_target = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return This builder for chaining.
+ */
+ public Builder clearGkeClusterTarget() {
+
+ gkeClusterTarget_ = getDefaultInstance().getGkeClusterTarget();
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A target GKE cluster to deploy to. It must be in the same project and
+ * region as the Dataproc cluster (the GKE cluster can be zonal or regional).
+ * Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
+ *
+ *
+ * string gke_cluster_target = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @param value The bytes for gkeClusterTarget to set.
+ * @return This builder for chaining.
+ */
+ public Builder setGkeClusterTargetBytes(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ checkByteStringIsUtf8(value);
+
+ gkeClusterTarget_ = value;
+ onChanged();
+ return this;
+ }
+
+ private java.util.List nodePoolTarget_ =
+ java.util.Collections.emptyList();
+
+ private void ensureNodePoolTargetIsMutable() {
+ if (!((bitField0_ & 0x00000001) != 0)) {
+ nodePoolTarget_ =
+ new java.util.ArrayList(
+ nodePoolTarget_);
+ bitField0_ |= 0x00000001;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilderV3<
+ com.google.cloud.dataproc.v1.GkeNodePoolTarget,
+ com.google.cloud.dataproc.v1.GkeNodePoolTarget.Builder,
+ com.google.cloud.dataproc.v1.GkeNodePoolTargetOrBuilder>
+ nodePoolTargetBuilder_;
+
/**
*
*
*
- * Optional. A target for the deployment.
+ * Optional. GKE NodePools where workloads will be scheduled. At least one node pool
+ * must be assigned the 'default' role. Each role can be given to only a
+ * single NodePoolTarget. All NodePools must have the same location settings.
+ * If a nodePoolTarget is not specified, Dataproc constructs a default
+ * nodePoolTarget.
*
*
*
- * .google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget namespaced_gke_deployment_target = 1 [(.google.api.field_behavior) = OPTIONAL];
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget node_pool_target = 3 [(.google.api.field_behavior) = OPTIONAL];
*
- *
- * @return Whether the namespacedGkeDeploymentTarget field is set.
*/
- public boolean hasNamespacedGkeDeploymentTarget() {
- return namespacedGkeDeploymentTargetBuilder_ != null
- || namespacedGkeDeploymentTarget_ != null;
+ public java.util.List getNodePoolTargetList() {
+ if (nodePoolTargetBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(nodePoolTarget_);
+ } else {
+ return nodePoolTargetBuilder_.getMessageList();
+ }
}
/**
*
*
*
- * Optional. A target for the deployment.
+ * Optional. GKE NodePools where workloads will be scheduled. At least one node pool
+ * must be assigned the 'default' role. Each role can be given to only a
+ * single NodePoolTarget. All NodePools must have the same location settings.
+ * If a nodePoolTarget is not specified, Dataproc constructs a default
+ * nodePoolTarget.
*
*
*
- * .google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget namespaced_gke_deployment_target = 1 [(.google.api.field_behavior) = OPTIONAL];
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget node_pool_target = 3 [(.google.api.field_behavior) = OPTIONAL];
*
+ */
+ public int getNodePoolTargetCount() {
+ if (nodePoolTargetBuilder_ == null) {
+ return nodePoolTarget_.size();
+ } else {
+ return nodePoolTargetBuilder_.getCount();
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Optional. GKE NodePools where workloads will be scheduled. At least one node pool
+ * must be assigned the 'default' role. Each role can be given to only a
+ * single NodePoolTarget. All NodePools must have the same location settings.
+ * If a nodePoolTarget is not specified, Dataproc constructs a default
+ * nodePoolTarget.
+ *
*
- * @return The namespacedGkeDeploymentTarget.
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget node_pool_target = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
*/
- public com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget
- getNamespacedGkeDeploymentTarget() {
- if (namespacedGkeDeploymentTargetBuilder_ == null) {
- return namespacedGkeDeploymentTarget_ == null
- ? com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget
- .getDefaultInstance()
- : namespacedGkeDeploymentTarget_;
+ public com.google.cloud.dataproc.v1.GkeNodePoolTarget getNodePoolTarget(int index) {
+ if (nodePoolTargetBuilder_ == null) {
+ return nodePoolTarget_.get(index);
} else {
- return namespacedGkeDeploymentTargetBuilder_.getMessage();
+ return nodePoolTargetBuilder_.getMessage(index);
}
}
/**
*
*
*
- * Optional. A target for the deployment.
+ * Optional. GKE NodePools where workloads will be scheduled. At least one node pool
+ * must be assigned the 'default' role. Each role can be given to only a
+ * single NodePoolTarget. All NodePools must have the same location settings.
+ * If a nodePoolTarget is not specified, Dataproc constructs a default
+ * nodePoolTarget.
*
*
*
- * .google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget namespaced_gke_deployment_target = 1 [(.google.api.field_behavior) = OPTIONAL];
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget node_pool_target = 3 [(.google.api.field_behavior) = OPTIONAL];
*
*/
- public Builder setNamespacedGkeDeploymentTarget(
- com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget value) {
- if (namespacedGkeDeploymentTargetBuilder_ == null) {
+ public Builder setNodePoolTarget(
+ int index, com.google.cloud.dataproc.v1.GkeNodePoolTarget value) {
+ if (nodePoolTargetBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
- namespacedGkeDeploymentTarget_ = value;
+ ensureNodePoolTargetIsMutable();
+ nodePoolTarget_.set(index, value);
onChanged();
} else {
- namespacedGkeDeploymentTargetBuilder_.setMessage(value);
+ nodePoolTargetBuilder_.setMessage(index, value);
}
-
return this;
}
/**
*
*
*
- * Optional. A target for the deployment.
+ * Optional. GKE NodePools where workloads will be scheduled. At least one node pool
+ * must be assigned the 'default' role. Each role can be given to only a
+ * single NodePoolTarget. All NodePools must have the same location settings.
+ * If a nodePoolTarget is not specified, Dataproc constructs a default
+ * nodePoolTarget.
*
*
*
- * .google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget namespaced_gke_deployment_target = 1 [(.google.api.field_behavior) = OPTIONAL];
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget node_pool_target = 3 [(.google.api.field_behavior) = OPTIONAL];
*
*/
- public Builder setNamespacedGkeDeploymentTarget(
- com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget.Builder
- builderForValue) {
- if (namespacedGkeDeploymentTargetBuilder_ == null) {
- namespacedGkeDeploymentTarget_ = builderForValue.build();
+ public Builder setNodePoolTarget(
+ int index, com.google.cloud.dataproc.v1.GkeNodePoolTarget.Builder builderForValue) {
+ if (nodePoolTargetBuilder_ == null) {
+ ensureNodePoolTargetIsMutable();
+ nodePoolTarget_.set(index, builderForValue.build());
onChanged();
} else {
- namespacedGkeDeploymentTargetBuilder_.setMessage(builderForValue.build());
+ nodePoolTargetBuilder_.setMessage(index, builderForValue.build());
}
-
return this;
}
/**
*
*
*
- * Optional. A target for the deployment.
+ * Optional. GKE NodePools where workloads will be scheduled. At least one node pool
+ * must be assigned the 'default' role. Each role can be given to only a
+ * single NodePoolTarget. All NodePools must have the same location settings.
+ * If a nodePoolTarget is not specified, Dataproc constructs a default
+ * nodePoolTarget.
*
*
*
- * .google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget namespaced_gke_deployment_target = 1 [(.google.api.field_behavior) = OPTIONAL];
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget node_pool_target = 3 [(.google.api.field_behavior) = OPTIONAL];
*
*/
- public Builder mergeNamespacedGkeDeploymentTarget(
- com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget value) {
- if (namespacedGkeDeploymentTargetBuilder_ == null) {
- if (namespacedGkeDeploymentTarget_ != null) {
- namespacedGkeDeploymentTarget_ =
- com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget
- .newBuilder(namespacedGkeDeploymentTarget_)
- .mergeFrom(value)
- .buildPartial();
- } else {
- namespacedGkeDeploymentTarget_ = value;
+ public Builder addNodePoolTarget(com.google.cloud.dataproc.v1.GkeNodePoolTarget value) {
+ if (nodePoolTargetBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
}
+ ensureNodePoolTargetIsMutable();
+ nodePoolTarget_.add(value);
onChanged();
} else {
- namespacedGkeDeploymentTargetBuilder_.mergeFrom(value);
+ nodePoolTargetBuilder_.addMessage(value);
}
-
return this;
}
/**
*
*
*
- * Optional. A target for the deployment.
+ * Optional. GKE NodePools where workloads will be scheduled. At least one node pool
+ * must be assigned the 'default' role. Each role can be given to only a
+ * single NodePoolTarget. All NodePools must have the same location settings.
+ * If a nodePoolTarget is not specified, Dataproc constructs a default
+ * nodePoolTarget.
*
*
*
- * .google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget namespaced_gke_deployment_target = 1 [(.google.api.field_behavior) = OPTIONAL];
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget node_pool_target = 3 [(.google.api.field_behavior) = OPTIONAL];
*
*/
- public Builder clearNamespacedGkeDeploymentTarget() {
- if (namespacedGkeDeploymentTargetBuilder_ == null) {
- namespacedGkeDeploymentTarget_ = null;
+ public Builder addNodePoolTarget(
+ int index, com.google.cloud.dataproc.v1.GkeNodePoolTarget value) {
+ if (nodePoolTargetBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureNodePoolTargetIsMutable();
+ nodePoolTarget_.add(index, value);
onChanged();
} else {
- namespacedGkeDeploymentTarget_ = null;
- namespacedGkeDeploymentTargetBuilder_ = null;
+ nodePoolTargetBuilder_.addMessage(index, value);
}
-
return this;
}
/**
*
*
*
- * Optional. A target for the deployment.
+ * Optional. GKE NodePools where workloads will be scheduled. At least one node pool
+ * must be assigned the 'default' role. Each role can be given to only a
+ * single NodePoolTarget. All NodePools must have the same location settings.
+ * If a nodePoolTarget is not specified, Dataproc constructs a default
+ * nodePoolTarget.
*
*
*
- * .google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget namespaced_gke_deployment_target = 1 [(.google.api.field_behavior) = OPTIONAL];
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget node_pool_target = 3 [(.google.api.field_behavior) = OPTIONAL];
*
*/
- public com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget.Builder
- getNamespacedGkeDeploymentTargetBuilder() {
-
- onChanged();
- return getNamespacedGkeDeploymentTargetFieldBuilder().getBuilder();
+ public Builder addNodePoolTarget(
+ com.google.cloud.dataproc.v1.GkeNodePoolTarget.Builder builderForValue) {
+ if (nodePoolTargetBuilder_ == null) {
+ ensureNodePoolTargetIsMutable();
+ nodePoolTarget_.add(builderForValue.build());
+ onChanged();
+ } else {
+ nodePoolTargetBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. GKE NodePools where workloads will be scheduled. At least one node pool
+ * must be assigned the 'default' role. Each role can be given to only a
+ * single NodePoolTarget. All NodePools must have the same location settings.
+ * If a nodePoolTarget is not specified, Dataproc constructs a default
+ * nodePoolTarget.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget node_pool_target = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder addNodePoolTarget(
+ int index, com.google.cloud.dataproc.v1.GkeNodePoolTarget.Builder builderForValue) {
+ if (nodePoolTargetBuilder_ == null) {
+ ensureNodePoolTargetIsMutable();
+ nodePoolTarget_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ nodePoolTargetBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. GKE NodePools where workloads will be scheduled. At least one node pool
+ * must be assigned the 'default' role. Each role can be given to only a
+ * single NodePoolTarget. All NodePools must have the same location settings.
+ * If a nodePoolTarget is not specified, Dataproc constructs a default
+ * nodePoolTarget.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget node_pool_target = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder addAllNodePoolTarget(
+ java.lang.Iterable extends com.google.cloud.dataproc.v1.GkeNodePoolTarget> values) {
+ if (nodePoolTargetBuilder_ == null) {
+ ensureNodePoolTargetIsMutable();
+ com.google.protobuf.AbstractMessageLite.Builder.addAll(values, nodePoolTarget_);
+ onChanged();
+ } else {
+ nodePoolTargetBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. GKE NodePools where workloads will be scheduled. At least one node pool
+ * must be assigned the 'default' role. Each role can be given to only a
+ * single NodePoolTarget. All NodePools must have the same location settings.
+ * If a nodePoolTarget is not specified, Dataproc constructs a default
+ * nodePoolTarget.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget node_pool_target = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder clearNodePoolTarget() {
+ if (nodePoolTargetBuilder_ == null) {
+ nodePoolTarget_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000001);
+ onChanged();
+ } else {
+ nodePoolTargetBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. GKE NodePools where workloads will be scheduled. At least one node pool
+ * must be assigned the 'default' role. Each role can be given to only a
+ * single NodePoolTarget. All NodePools must have the same location settings.
+ * If a nodePoolTarget is not specified, Dataproc constructs a default
+ * nodePoolTarget.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget node_pool_target = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder removeNodePoolTarget(int index) {
+ if (nodePoolTargetBuilder_ == null) {
+ ensureNodePoolTargetIsMutable();
+ nodePoolTarget_.remove(index);
+ onChanged();
+ } else {
+ nodePoolTargetBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. GKE NodePools where workloads will be scheduled. At least one node pool
+ * must be assigned the 'default' role. Each role can be given to only a
+ * single NodePoolTarget. All NodePools must have the same location settings.
+ * If a nodePoolTarget is not specified, Dataproc constructs a default
+ * nodePoolTarget.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget node_pool_target = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public com.google.cloud.dataproc.v1.GkeNodePoolTarget.Builder getNodePoolTargetBuilder(
+ int index) {
+ return getNodePoolTargetFieldBuilder().getBuilder(index);
}
/**
*
*
*
- * Optional. A target for the deployment.
+ * Optional. GKE NodePools where workloads will be scheduled. At least one node pool
+ * must be assigned the 'default' role. Each role can be given to only a
+ * single NodePoolTarget. All NodePools must have the same location settings.
+ * If a nodePoolTarget is not specified, Dataproc constructs a default
+ * nodePoolTarget.
*
*
*
- * .google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget namespaced_gke_deployment_target = 1 [(.google.api.field_behavior) = OPTIONAL];
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget node_pool_target = 3 [(.google.api.field_behavior) = OPTIONAL];
*
*/
- public com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTargetOrBuilder
- getNamespacedGkeDeploymentTargetOrBuilder() {
- if (namespacedGkeDeploymentTargetBuilder_ != null) {
- return namespacedGkeDeploymentTargetBuilder_.getMessageOrBuilder();
+ public com.google.cloud.dataproc.v1.GkeNodePoolTargetOrBuilder getNodePoolTargetOrBuilder(
+ int index) {
+ if (nodePoolTargetBuilder_ == null) {
+ return nodePoolTarget_.get(index);
} else {
- return namespacedGkeDeploymentTarget_ == null
- ? com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget
- .getDefaultInstance()
- : namespacedGkeDeploymentTarget_;
+ return nodePoolTargetBuilder_.getMessageOrBuilder(index);
}
}
/**
*
*
*
- * Optional. A target for the deployment.
+ * Optional. GKE NodePools where workloads will be scheduled. At least one node pool
+ * must be assigned the 'default' role. Each role can be given to only a
+ * single NodePoolTarget. All NodePools must have the same location settings.
+ * If a nodePoolTarget is not specified, Dataproc constructs a default
+ * nodePoolTarget.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget node_pool_target = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public java.util.List extends com.google.cloud.dataproc.v1.GkeNodePoolTargetOrBuilder>
+ getNodePoolTargetOrBuilderList() {
+ if (nodePoolTargetBuilder_ != null) {
+ return nodePoolTargetBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(nodePoolTarget_);
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Optional. GKE NodePools where workloads will be scheduled. At least one node pool
+ * must be assigned the 'default' role. Each role can be given to only a
+ * single NodePoolTarget. All NodePools must have the same location settings.
+ * If a nodePoolTarget is not specified, Dataproc constructs a default
+ * nodePoolTarget.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget node_pool_target = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public com.google.cloud.dataproc.v1.GkeNodePoolTarget.Builder addNodePoolTargetBuilder() {
+ return getNodePoolTargetFieldBuilder()
+ .addBuilder(com.google.cloud.dataproc.v1.GkeNodePoolTarget.getDefaultInstance());
+ }
+ /**
+ *
+ *
+ *
+ * Optional. GKE NodePools where workloads will be scheduled. At least one node pool
+ * must be assigned the 'default' role. Each role can be given to only a
+ * single NodePoolTarget. All NodePools must have the same location settings.
+ * If a nodePoolTarget is not specified, Dataproc constructs a default
+ * nodePoolTarget.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget node_pool_target = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public com.google.cloud.dataproc.v1.GkeNodePoolTarget.Builder addNodePoolTargetBuilder(
+ int index) {
+ return getNodePoolTargetFieldBuilder()
+ .addBuilder(index, com.google.cloud.dataproc.v1.GkeNodePoolTarget.getDefaultInstance());
+ }
+ /**
+ *
+ *
+ *
+ * Optional. GKE NodePools where workloads will be scheduled. At least one node pool
+ * must be assigned the 'default' role. Each role can be given to only a
+ * single NodePoolTarget. All NodePools must have the same location settings.
+ * If a nodePoolTarget is not specified, Dataproc constructs a default
+ * nodePoolTarget.
*
*
*
- * .google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget namespaced_gke_deployment_target = 1 [(.google.api.field_behavior) = OPTIONAL];
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget node_pool_target = 3 [(.google.api.field_behavior) = OPTIONAL];
*
*/
- private com.google.protobuf.SingleFieldBuilderV3<
- com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget,
- com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget.Builder,
- com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTargetOrBuilder>
- getNamespacedGkeDeploymentTargetFieldBuilder() {
- if (namespacedGkeDeploymentTargetBuilder_ == null) {
- namespacedGkeDeploymentTargetBuilder_ =
- new com.google.protobuf.SingleFieldBuilderV3<
- com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget,
- com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget.Builder,
- com.google.cloud.dataproc.v1.GkeClusterConfig
- .NamespacedGkeDeploymentTargetOrBuilder>(
- getNamespacedGkeDeploymentTarget(), getParentForChildren(), isClean());
- namespacedGkeDeploymentTarget_ = null;
+ public java.util.List
+ getNodePoolTargetBuilderList() {
+ return getNodePoolTargetFieldBuilder().getBuilderList();
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilderV3<
+ com.google.cloud.dataproc.v1.GkeNodePoolTarget,
+ com.google.cloud.dataproc.v1.GkeNodePoolTarget.Builder,
+ com.google.cloud.dataproc.v1.GkeNodePoolTargetOrBuilder>
+ getNodePoolTargetFieldBuilder() {
+ if (nodePoolTargetBuilder_ == null) {
+ nodePoolTargetBuilder_ =
+ new com.google.protobuf.RepeatedFieldBuilderV3<
+ com.google.cloud.dataproc.v1.GkeNodePoolTarget,
+ com.google.cloud.dataproc.v1.GkeNodePoolTarget.Builder,
+ com.google.cloud.dataproc.v1.GkeNodePoolTargetOrBuilder>(
+ nodePoolTarget_,
+ ((bitField0_ & 0x00000001) != 0),
+ getParentForChildren(),
+ isClean());
+ nodePoolTarget_ = null;
}
- return namespacedGkeDeploymentTargetBuilder_;
+ return nodePoolTargetBuilder_;
}
@java.lang.Override
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GkeClusterConfigOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GkeClusterConfigOrBuilder.java
index 994f7dbc..bc7bd12a 100644
--- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GkeClusterConfigOrBuilder.java
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GkeClusterConfigOrBuilder.java
@@ -14,7 +14,7 @@
* limitations under the License.
*/
// Generated by the protocol buffer compiler. DO NOT EDIT!
-// source: google/cloud/dataproc/v1/clusters.proto
+// source: google/cloud/dataproc/v1/shared.proto
package com.google.cloud.dataproc.v1;
@@ -27,42 +27,110 @@ public interface GkeClusterConfigOrBuilder
*
*
*
- * Optional. A target for the deployment.
+ * Optional. A target GKE cluster to deploy to. It must be in the same project and
+ * region as the Dataproc cluster (the GKE cluster can be zonal or regional).
+ * Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
+ *
+ *
+ * string gke_cluster_target = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The gkeClusterTarget.
+ */
+ java.lang.String getGkeClusterTarget();
+ /**
+ *
+ *
+ *
+ * Optional. A target GKE cluster to deploy to. It must be in the same project and
+ * region as the Dataproc cluster (the GKE cluster can be zonal or regional).
+ * Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
+ *
+ *
+ * string gke_cluster_target = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The bytes for gkeClusterTarget.
+ */
+ com.google.protobuf.ByteString getGkeClusterTargetBytes();
+
+ /**
+ *
+ *
+ *
+ * Optional. GKE NodePools where workloads will be scheduled. At least one node pool
+ * must be assigned the 'default' role. Each role can be given to only a
+ * single NodePoolTarget. All NodePools must have the same location settings.
+ * If a nodePoolTarget is not specified, Dataproc constructs a default
+ * nodePoolTarget.
*
*
*
- * .google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget namespaced_gke_deployment_target = 1 [(.google.api.field_behavior) = OPTIONAL];
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget node_pool_target = 3 [(.google.api.field_behavior) = OPTIONAL];
*
+ */
+ java.util.List getNodePoolTargetList();
+ /**
+ *
+ *
+ *
+ * Optional. GKE NodePools where workloads will be scheduled. At least one node pool
+ * must be assigned the 'default' role. Each role can be given to only a
+ * single NodePoolTarget. All NodePools must have the same location settings.
+ * If a nodePoolTarget is not specified, Dataproc constructs a default
+ * nodePoolTarget.
+ *
*
- * @return Whether the namespacedGkeDeploymentTarget field is set.
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget node_pool_target = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
*/
- boolean hasNamespacedGkeDeploymentTarget();
+ com.google.cloud.dataproc.v1.GkeNodePoolTarget getNodePoolTarget(int index);
/**
*
*
*
- * Optional. A target for the deployment.
+ * Optional. GKE NodePools where workloads will be scheduled. At least one node pool
+ * must be assigned the 'default' role. Each role can be given to only a
+ * single NodePoolTarget. All NodePools must have the same location settings.
+ * If a nodePoolTarget is not specified, Dataproc constructs a default
+ * nodePoolTarget.
*
*
*
- * .google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget namespaced_gke_deployment_target = 1 [(.google.api.field_behavior) = OPTIONAL];
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget node_pool_target = 3 [(.google.api.field_behavior) = OPTIONAL];
*
+ */
+ int getNodePoolTargetCount();
+ /**
+ *
+ *
+ *
+ * Optional. GKE NodePools where workloads will be scheduled. At least one node pool
+ * must be assigned the 'default' role. Each role can be given to only a
+ * single NodePoolTarget. All NodePools must have the same location settings.
+ * If a nodePoolTarget is not specified, Dataproc constructs a default
+ * nodePoolTarget.
+ *
*
- * @return The namespacedGkeDeploymentTarget.
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget node_pool_target = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
*/
- com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget
- getNamespacedGkeDeploymentTarget();
+ java.util.List extends com.google.cloud.dataproc.v1.GkeNodePoolTargetOrBuilder>
+ getNodePoolTargetOrBuilderList();
/**
*
*
*
- * Optional. A target for the deployment.
+ * Optional. GKE NodePools where workloads will be scheduled. At least one node pool
+ * must be assigned the 'default' role. Each role can be given to only a
+ * single NodePoolTarget. All NodePools must have the same location settings.
+ * If a nodePoolTarget is not specified, Dataproc constructs a default
+ * nodePoolTarget.
*
*
*
- * .google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTarget namespaced_gke_deployment_target = 1 [(.google.api.field_behavior) = OPTIONAL];
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget node_pool_target = 3 [(.google.api.field_behavior) = OPTIONAL];
*
*/
- com.google.cloud.dataproc.v1.GkeClusterConfig.NamespacedGkeDeploymentTargetOrBuilder
- getNamespacedGkeDeploymentTargetOrBuilder();
+ com.google.cloud.dataproc.v1.GkeNodePoolTargetOrBuilder getNodePoolTargetOrBuilder(int index);
}
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GkeNodePoolConfig.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GkeNodePoolConfig.java
new file mode 100644
index 00000000..9bb277bd
--- /dev/null
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GkeNodePoolConfig.java
@@ -0,0 +1,4761 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/cloud/dataproc/v1/shared.proto
+
+package com.google.cloud.dataproc.v1;
+
+/**
+ *
+ *
+ *
+ * The configuration of a GKE NodePool used by a [Dataproc-on-GKE
+ * cluster](https://cloud.google.com/dataproc/docs/concepts/jobs/dataproc-gke#create-a-dataproc-on-gke-cluster).
+ *
+ *
+ * Protobuf type {@code google.cloud.dataproc.v1.GkeNodePoolConfig}
+ */
+public final class GkeNodePoolConfig extends com.google.protobuf.GeneratedMessageV3
+ implements
+ // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1.GkeNodePoolConfig)
+ GkeNodePoolConfigOrBuilder {
+ private static final long serialVersionUID = 0L;
+ // Use GkeNodePoolConfig.newBuilder() to construct.
+ private GkeNodePoolConfig(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
+ super(builder);
+ }
+
+ private GkeNodePoolConfig() {
+ locations_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ }
+
+ @java.lang.Override
+ @SuppressWarnings({"unused"})
+ protected java.lang.Object newInstance(UnusedPrivateParameter unused) {
+ return new GkeNodePoolConfig();
+ }
+
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet getUnknownFields() {
+ return this.unknownFields;
+ }
+
+ private GkeNodePoolConfig(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ this();
+ if (extensionRegistry == null) {
+ throw new java.lang.NullPointerException();
+ }
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ case 18:
+ {
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig.Builder subBuilder =
+ null;
+ if (config_ != null) {
+ subBuilder = config_.toBuilder();
+ }
+ config_ =
+ input.readMessage(
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig.parser(),
+ extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(config_);
+ config_ = subBuilder.buildPartial();
+ }
+
+ break;
+ }
+ case 34:
+ {
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig.Builder
+ subBuilder = null;
+ if (autoscaling_ != null) {
+ subBuilder = autoscaling_.toBuilder();
+ }
+ autoscaling_ =
+ input.readMessage(
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig
+ .parser(),
+ extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(autoscaling_);
+ autoscaling_ = subBuilder.buildPartial();
+ }
+
+ break;
+ }
+ case 106:
+ {
+ java.lang.String s = input.readStringRequireUtf8();
+ if (!((mutable_bitField0_ & 0x00000001) != 0)) {
+ locations_ = new com.google.protobuf.LazyStringArrayList();
+ mutable_bitField0_ |= 0x00000001;
+ }
+ locations_.add(s);
+ break;
+ }
+ default:
+ {
+ if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000001) != 0)) {
+ locations_ = locations_.getUnmodifiableView();
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
+ return com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_descriptor;
+ }
+
+ @java.lang.Override
+ protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.class,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.Builder.class);
+ }
+
+ public interface GkeNodeConfigOrBuilder
+ extends
+ // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig)
+ com.google.protobuf.MessageOrBuilder {
+
+ /**
+ *
+ *
+ *
+ * Optional. The name of a Compute Engine [machine
+ * type](https://cloud.google.com/compute/docs/machine-types).
+ *
+ *
+ * string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The machineType.
+ */
+ java.lang.String getMachineType();
+ /**
+ *
+ *
+ *
+ * Optional. The name of a Compute Engine [machine
+ * type](https://cloud.google.com/compute/docs/machine-types).
+ *
+ *
+ * string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The bytes for machineType.
+ */
+ com.google.protobuf.ByteString getMachineTypeBytes();
+
+ /**
+ *
+ *
+ *
+ * Optional. Whether the nodes are created as [preemptible VM
+ * instances](https://cloud.google.com/compute/docs/instances/preemptible).
+ *
+ *
+ * bool preemptible = 10 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The preemptible.
+ */
+ boolean getPreemptible();
+
+ /**
+ *
+ *
+ *
+ * Optional. The number of local SSD disks to attach to the node, which is limited by
+ * the maximum number of disks allowable per zone (see [Adding Local
+ * SSDs](https://cloud.google.com/compute/docs/disks/local-ssd)).
+ *
+ *
+ * int32 local_ssd_count = 7 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The localSsdCount.
+ */
+ int getLocalSsdCount();
+
+ /**
+ *
+ *
+ *
+ * Optional. A list of [hardware
+ * accelerators](https://cloud.google.com/compute/docs/gpus) to attach to
+ * each node.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig accelerators = 11 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ java.util.List
+ getAcceleratorsList();
+ /**
+ *
+ *
+ *
+ * Optional. A list of [hardware
+ * accelerators](https://cloud.google.com/compute/docs/gpus) to attach to
+ * each node.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig accelerators = 11 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig getAccelerators(
+ int index);
+ /**
+ *
+ *
+ *
+ * Optional. A list of [hardware
+ * accelerators](https://cloud.google.com/compute/docs/gpus) to attach to
+ * each node.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig accelerators = 11 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ int getAcceleratorsCount();
+ /**
+ *
+ *
+ *
+ * Optional. A list of [hardware
+ * accelerators](https://cloud.google.com/compute/docs/gpus) to attach to
+ * each node.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig accelerators = 11 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ java.util.List<
+ ? extends
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig
+ .GkeNodePoolAcceleratorConfigOrBuilder>
+ getAcceleratorsOrBuilderList();
+ /**
+ *
+ *
+ *
+ * Optional. A list of [hardware
+ * accelerators](https://cloud.google.com/compute/docs/gpus) to attach to
+ * each node.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig accelerators = 11 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfigOrBuilder
+ getAcceleratorsOrBuilder(int index);
+
+ /**
+ *
+ *
+ *
+ * Optional. [Minimum CPU
+ * platform](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)
+ * to be used by this instance. The instance may be scheduled on the
+ * specified or a newer CPU platform. Specify the friendly names of CPU
+ * platforms, such as "Intel Haswell"` or Intel Sandy Bridge".
+ *
+ *
+ * string min_cpu_platform = 13 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The minCpuPlatform.
+ */
+ java.lang.String getMinCpuPlatform();
+ /**
+ *
+ *
+ *
+ * Optional. [Minimum CPU
+ * platform](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)
+ * to be used by this instance. The instance may be scheduled on the
+ * specified or a newer CPU platform. Specify the friendly names of CPU
+ * platforms, such as "Intel Haswell"` or Intel Sandy Bridge".
+ *
+ *
+ * string min_cpu_platform = 13 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The bytes for minCpuPlatform.
+ */
+ com.google.protobuf.ByteString getMinCpuPlatformBytes();
+ }
+ /**
+ *
+ *
+ *
+ * Parameters that describe cluster nodes.
+ *
+ *
+ * Protobuf type {@code google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig}
+ */
+ public static final class GkeNodeConfig extends com.google.protobuf.GeneratedMessageV3
+ implements
+ // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig)
+ GkeNodeConfigOrBuilder {
+ private static final long serialVersionUID = 0L;
+ // Use GkeNodeConfig.newBuilder() to construct.
+ private GkeNodeConfig(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
+ super(builder);
+ }
+
+ private GkeNodeConfig() {
+ machineType_ = "";
+ accelerators_ = java.util.Collections.emptyList();
+ minCpuPlatform_ = "";
+ }
+
+ @java.lang.Override
+ @SuppressWarnings({"unused"})
+ protected java.lang.Object newInstance(UnusedPrivateParameter unused) {
+ return new GkeNodeConfig();
+ }
+
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet getUnknownFields() {
+ return this.unknownFields;
+ }
+
+ private GkeNodeConfig(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ this();
+ if (extensionRegistry == null) {
+ throw new java.lang.NullPointerException();
+ }
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ case 10:
+ {
+ java.lang.String s = input.readStringRequireUtf8();
+
+ machineType_ = s;
+ break;
+ }
+ case 56:
+ {
+ localSsdCount_ = input.readInt32();
+ break;
+ }
+ case 80:
+ {
+ preemptible_ = input.readBool();
+ break;
+ }
+ case 90:
+ {
+ if (!((mutable_bitField0_ & 0x00000001) != 0)) {
+ accelerators_ =
+ new java.util.ArrayList<
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig
+ .GkeNodePoolAcceleratorConfig>();
+ mutable_bitField0_ |= 0x00000001;
+ }
+ accelerators_.add(
+ input.readMessage(
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig
+ .parser(),
+ extensionRegistry));
+ break;
+ }
+ case 106:
+ {
+ java.lang.String s = input.readStringRequireUtf8();
+
+ minCpuPlatform_ = s;
+ break;
+ }
+ default:
+ {
+ if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000001) != 0)) {
+ accelerators_ = java.util.Collections.unmodifiableList(accelerators_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
+ return com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_GkeNodeConfig_descriptor;
+ }
+
+ @java.lang.Override
+ protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_GkeNodeConfig_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig.class,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig.Builder.class);
+ }
+
+ public static final int MACHINE_TYPE_FIELD_NUMBER = 1;
+ private volatile java.lang.Object machineType_;
+ /**
+ *
+ *
+ *
+ * Optional. The name of a Compute Engine [machine
+ * type](https://cloud.google.com/compute/docs/machine-types).
+ *
+ *
+ * string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The machineType.
+ */
+ @java.lang.Override
+ public java.lang.String getMachineType() {
+ java.lang.Object ref = machineType_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ machineType_ = s;
+ return s;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The name of a Compute Engine [machine
+ * type](https://cloud.google.com/compute/docs/machine-types).
+ *
+ *
+ * string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The bytes for machineType.
+ */
+ @java.lang.Override
+ public com.google.protobuf.ByteString getMachineTypeBytes() {
+ java.lang.Object ref = machineType_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
+ machineType_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ public static final int PREEMPTIBLE_FIELD_NUMBER = 10;
+ private boolean preemptible_;
+ /**
+ *
+ *
+ *
+ * Optional. Whether the nodes are created as [preemptible VM
+ * instances](https://cloud.google.com/compute/docs/instances/preemptible).
+ *
+ *
+ * bool preemptible = 10 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The preemptible.
+ */
+ @java.lang.Override
+ public boolean getPreemptible() {
+ return preemptible_;
+ }
+
+ public static final int LOCAL_SSD_COUNT_FIELD_NUMBER = 7;
+ private int localSsdCount_;
+ /**
+ *
+ *
+ *
+ * Optional. The number of local SSD disks to attach to the node, which is limited by
+ * the maximum number of disks allowable per zone (see [Adding Local
+ * SSDs](https://cloud.google.com/compute/docs/disks/local-ssd)).
+ *
+ *
+ * int32 local_ssd_count = 7 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The localSsdCount.
+ */
+ @java.lang.Override
+ public int getLocalSsdCount() {
+ return localSsdCount_;
+ }
+
+ public static final int ACCELERATORS_FIELD_NUMBER = 11;
+ private java.util.List<
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig>
+ accelerators_;
+ /**
+ *
+ *
+ *
+ * Optional. A list of [hardware
+ * accelerators](https://cloud.google.com/compute/docs/gpus) to attach to
+ * each node.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig accelerators = 11 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ @java.lang.Override
+ public java.util.List<
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig>
+ getAcceleratorsList() {
+ return accelerators_;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A list of [hardware
+ * accelerators](https://cloud.google.com/compute/docs/gpus) to attach to
+ * each node.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig accelerators = 11 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ @java.lang.Override
+ public java.util.List<
+ ? extends
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig
+ .GkeNodePoolAcceleratorConfigOrBuilder>
+ getAcceleratorsOrBuilderList() {
+ return accelerators_;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A list of [hardware
+ * accelerators](https://cloud.google.com/compute/docs/gpus) to attach to
+ * each node.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig accelerators = 11 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ @java.lang.Override
+ public int getAcceleratorsCount() {
+ return accelerators_.size();
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A list of [hardware
+ * accelerators](https://cloud.google.com/compute/docs/gpus) to attach to
+ * each node.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig accelerators = 11 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig
+ getAccelerators(int index) {
+ return accelerators_.get(index);
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A list of [hardware
+ * accelerators](https://cloud.google.com/compute/docs/gpus) to attach to
+ * each node.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig accelerators = 11 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfigOrBuilder
+ getAcceleratorsOrBuilder(int index) {
+ return accelerators_.get(index);
+ }
+
+ public static final int MIN_CPU_PLATFORM_FIELD_NUMBER = 13;
+ private volatile java.lang.Object minCpuPlatform_;
+ /**
+ *
+ *
+ *
+ * Optional. [Minimum CPU
+ * platform](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)
+ * to be used by this instance. The instance may be scheduled on the
+ * specified or a newer CPU platform. Specify the friendly names of CPU
+ * platforms, such as "Intel Haswell"` or Intel Sandy Bridge".
+ *
+ *
+ * string min_cpu_platform = 13 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The minCpuPlatform.
+ */
+ @java.lang.Override
+ public java.lang.String getMinCpuPlatform() {
+ java.lang.Object ref = minCpuPlatform_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ minCpuPlatform_ = s;
+ return s;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Optional. [Minimum CPU
+ * platform](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)
+ * to be used by this instance. The instance may be scheduled on the
+ * specified or a newer CPU platform. Specify the friendly names of CPU
+ * platforms, such as "Intel Haswell"` or Intel Sandy Bridge".
+ *
+ *
+ * string min_cpu_platform = 13 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The bytes for minCpuPlatform.
+ */
+ @java.lang.Override
+ public com.google.protobuf.ByteString getMinCpuPlatformBytes() {
+ java.lang.Object ref = minCpuPlatform_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
+ minCpuPlatform_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ private byte memoizedIsInitialized = -1;
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ @java.lang.Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(machineType_)) {
+ com.google.protobuf.GeneratedMessageV3.writeString(output, 1, machineType_);
+ }
+ if (localSsdCount_ != 0) {
+ output.writeInt32(7, localSsdCount_);
+ }
+ if (preemptible_ != false) {
+ output.writeBool(10, preemptible_);
+ }
+ for (int i = 0; i < accelerators_.size(); i++) {
+ output.writeMessage(11, accelerators_.get(i));
+ }
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(minCpuPlatform_)) {
+ com.google.protobuf.GeneratedMessageV3.writeString(output, 13, minCpuPlatform_);
+ }
+ unknownFields.writeTo(output);
+ }
+
+ @java.lang.Override
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(machineType_)) {
+ size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, machineType_);
+ }
+ if (localSsdCount_ != 0) {
+ size += com.google.protobuf.CodedOutputStream.computeInt32Size(7, localSsdCount_);
+ }
+ if (preemptible_ != false) {
+ size += com.google.protobuf.CodedOutputStream.computeBoolSize(10, preemptible_);
+ }
+ for (int i = 0; i < accelerators_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream.computeMessageSize(11, accelerators_.get(i));
+ }
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(minCpuPlatform_)) {
+ size += com.google.protobuf.GeneratedMessageV3.computeStringSize(13, minCpuPlatform_);
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig)) {
+ return super.equals(obj);
+ }
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig other =
+ (com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig) obj;
+
+ if (!getMachineType().equals(other.getMachineType())) return false;
+ if (getPreemptible() != other.getPreemptible()) return false;
+ if (getLocalSsdCount() != other.getLocalSsdCount()) return false;
+ if (!getAcceleratorsList().equals(other.getAcceleratorsList())) return false;
+ if (!getMinCpuPlatform().equals(other.getMinCpuPlatform())) return false;
+ if (!unknownFields.equals(other.unknownFields)) return false;
+ return true;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptor().hashCode();
+ hash = (37 * hash) + MACHINE_TYPE_FIELD_NUMBER;
+ hash = (53 * hash) + getMachineType().hashCode();
+ hash = (37 * hash) + PREEMPTIBLE_FIELD_NUMBER;
+ hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getPreemptible());
+ hash = (37 * hash) + LOCAL_SSD_COUNT_FIELD_NUMBER;
+ hash = (53 * hash) + getLocalSsdCount();
+ if (getAcceleratorsCount() > 0) {
+ hash = (37 * hash) + ACCELERATORS_FIELD_NUMBER;
+ hash = (53 * hash) + getAcceleratorsList().hashCode();
+ }
+ hash = (37 * hash) + MIN_CPU_PLATFORM_FIELD_NUMBER;
+ hash = (53 * hash) + getMinCpuPlatform().hashCode();
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig parseFrom(
+ java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig parseFrom(
+ java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig parseFrom(
+ byte[] data) throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig parseFrom(
+ byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig parseFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig parseFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig parseDelimitedFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig parseDelimitedFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig parseFrom(
+ com.google.protobuf.CodedInputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ @java.lang.Override
+ public Builder newBuilderForType() {
+ return newBuilder();
+ }
+
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+
+ public static Builder newBuilder(
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+
+ @java.lang.Override
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ *
+ *
+ *
+ * Parameters that describe cluster nodes.
+ *
+ *
+ * Protobuf type {@code google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig}
+ */
+ public static final class Builder
+ extends com.google.protobuf.GeneratedMessageV3.Builder
+ implements
+ // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig)
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfigOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
+ return com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_GkeNodeConfig_descriptor;
+ }
+
+ @java.lang.Override
+ protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_GkeNodeConfig_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig.class,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig.Builder.class);
+ }
+
+ // Construct using com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {
+ getAcceleratorsFieldBuilder();
+ }
+ }
+
+ @java.lang.Override
+ public Builder clear() {
+ super.clear();
+ machineType_ = "";
+
+ preemptible_ = false;
+
+ localSsdCount_ = 0;
+
+ if (acceleratorsBuilder_ == null) {
+ accelerators_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000001);
+ } else {
+ acceleratorsBuilder_.clear();
+ }
+ minCpuPlatform_ = "";
+
+ return this;
+ }
+
+ @java.lang.Override
+ public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
+ return com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_GkeNodeConfig_descriptor;
+ }
+
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig
+ getDefaultInstanceForType() {
+ return com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig.getDefaultInstance();
+ }
+
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig build() {
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig buildPartial() {
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig result =
+ new com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig(this);
+ int from_bitField0_ = bitField0_;
+ result.machineType_ = machineType_;
+ result.preemptible_ = preemptible_;
+ result.localSsdCount_ = localSsdCount_;
+ if (acceleratorsBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) != 0)) {
+ accelerators_ = java.util.Collections.unmodifiableList(accelerators_);
+ bitField0_ = (bitField0_ & ~0x00000001);
+ }
+ result.accelerators_ = accelerators_;
+ } else {
+ result.accelerators_ = acceleratorsBuilder_.build();
+ }
+ result.minCpuPlatform_ = minCpuPlatform_;
+ onBuilt();
+ return result;
+ }
+
+ @java.lang.Override
+ public Builder clone() {
+ return super.clone();
+ }
+
+ @java.lang.Override
+ public Builder setField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
+ return super.setField(field, value);
+ }
+
+ @java.lang.Override
+ public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
+ return super.clearField(field);
+ }
+
+ @java.lang.Override
+ public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+ return super.clearOneof(oneof);
+ }
+
+ @java.lang.Override
+ public Builder setRepeatedField(
+ com.google.protobuf.Descriptors.FieldDescriptor field,
+ int index,
+ java.lang.Object value) {
+ return super.setRepeatedField(field, index, value);
+ }
+
+ @java.lang.Override
+ public Builder addRepeatedField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
+ return super.addRepeatedField(field, value);
+ }
+
+ @java.lang.Override
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig) {
+ return mergeFrom((com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig) other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig other) {
+ if (other
+ == com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig.getDefaultInstance())
+ return this;
+ if (!other.getMachineType().isEmpty()) {
+ machineType_ = other.machineType_;
+ onChanged();
+ }
+ if (other.getPreemptible() != false) {
+ setPreemptible(other.getPreemptible());
+ }
+ if (other.getLocalSsdCount() != 0) {
+ setLocalSsdCount(other.getLocalSsdCount());
+ }
+ if (acceleratorsBuilder_ == null) {
+ if (!other.accelerators_.isEmpty()) {
+ if (accelerators_.isEmpty()) {
+ accelerators_ = other.accelerators_;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ } else {
+ ensureAcceleratorsIsMutable();
+ accelerators_.addAll(other.accelerators_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.accelerators_.isEmpty()) {
+ if (acceleratorsBuilder_.isEmpty()) {
+ acceleratorsBuilder_.dispose();
+ acceleratorsBuilder_ = null;
+ accelerators_ = other.accelerators_;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ acceleratorsBuilder_ =
+ com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders
+ ? getAcceleratorsFieldBuilder()
+ : null;
+ } else {
+ acceleratorsBuilder_.addAllMessages(other.accelerators_);
+ }
+ }
+ }
+ if (!other.getMinCpuPlatform().isEmpty()) {
+ minCpuPlatform_ = other.minCpuPlatform_;
+ onChanged();
+ }
+ this.mergeUnknownFields(other.unknownFields);
+ onChanged();
+ return this;
+ }
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ @java.lang.Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage =
+ (com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig)
+ e.getUnfinishedMessage();
+ throw e.unwrapIOException();
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+
+ private int bitField0_;
+
+ private java.lang.Object machineType_ = "";
+ /**
+ *
+ *
+ *
+ * Optional. The name of a Compute Engine [machine
+ * type](https://cloud.google.com/compute/docs/machine-types).
+ *
+ *
+ * string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The machineType.
+ */
+ public java.lang.String getMachineType() {
+ java.lang.Object ref = machineType_;
+ if (!(ref instanceof java.lang.String)) {
+ com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ machineType_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The name of a Compute Engine [machine
+ * type](https://cloud.google.com/compute/docs/machine-types).
+ *
+ *
+ * string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The bytes for machineType.
+ */
+ public com.google.protobuf.ByteString getMachineTypeBytes() {
+ java.lang.Object ref = machineType_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
+ machineType_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The name of a Compute Engine [machine
+ * type](https://cloud.google.com/compute/docs/machine-types).
+ *
+ *
+ * string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @param value The machineType to set.
+ * @return This builder for chaining.
+ */
+ public Builder setMachineType(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+
+ machineType_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The name of a Compute Engine [machine
+ * type](https://cloud.google.com/compute/docs/machine-types).
+ *
+ *
+ * string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return This builder for chaining.
+ */
+ public Builder clearMachineType() {
+
+ machineType_ = getDefaultInstance().getMachineType();
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The name of a Compute Engine [machine
+ * type](https://cloud.google.com/compute/docs/machine-types).
+ *
+ *
+ * string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @param value The bytes for machineType to set.
+ * @return This builder for chaining.
+ */
+ public Builder setMachineTypeBytes(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ checkByteStringIsUtf8(value);
+
+ machineType_ = value;
+ onChanged();
+ return this;
+ }
+
+ private boolean preemptible_;
+ /**
+ *
+ *
+ *
+ * Optional. Whether the nodes are created as [preemptible VM
+ * instances](https://cloud.google.com/compute/docs/instances/preemptible).
+ *
+ *
+ * bool preemptible = 10 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The preemptible.
+ */
+ @java.lang.Override
+ public boolean getPreemptible() {
+ return preemptible_;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. Whether the nodes are created as [preemptible VM
+ * instances](https://cloud.google.com/compute/docs/instances/preemptible).
+ *
+ *
+ * bool preemptible = 10 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @param value The preemptible to set.
+ * @return This builder for chaining.
+ */
+ public Builder setPreemptible(boolean value) {
+
+ preemptible_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. Whether the nodes are created as [preemptible VM
+ * instances](https://cloud.google.com/compute/docs/instances/preemptible).
+ *
+ *
+ * bool preemptible = 10 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return This builder for chaining.
+ */
+ public Builder clearPreemptible() {
+
+ preemptible_ = false;
+ onChanged();
+ return this;
+ }
+
+ private int localSsdCount_;
+ /**
+ *
+ *
+ *
+ * Optional. The number of local SSD disks to attach to the node, which is limited by
+ * the maximum number of disks allowable per zone (see [Adding Local
+ * SSDs](https://cloud.google.com/compute/docs/disks/local-ssd)).
+ *
+ *
+ * int32 local_ssd_count = 7 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The localSsdCount.
+ */
+ @java.lang.Override
+ public int getLocalSsdCount() {
+ return localSsdCount_;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The number of local SSD disks to attach to the node, which is limited by
+ * the maximum number of disks allowable per zone (see [Adding Local
+ * SSDs](https://cloud.google.com/compute/docs/disks/local-ssd)).
+ *
+ *
+ * int32 local_ssd_count = 7 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @param value The localSsdCount to set.
+ * @return This builder for chaining.
+ */
+ public Builder setLocalSsdCount(int value) {
+
+ localSsdCount_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The number of local SSD disks to attach to the node, which is limited by
+ * the maximum number of disks allowable per zone (see [Adding Local
+ * SSDs](https://cloud.google.com/compute/docs/disks/local-ssd)).
+ *
+ *
+ * int32 local_ssd_count = 7 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return This builder for chaining.
+ */
+ public Builder clearLocalSsdCount() {
+
+ localSsdCount_ = 0;
+ onChanged();
+ return this;
+ }
+
+ private java.util.List<
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig>
+ accelerators_ = java.util.Collections.emptyList();
+
+ private void ensureAcceleratorsIsMutable() {
+ if (!((bitField0_ & 0x00000001) != 0)) {
+ accelerators_ =
+ new java.util.ArrayList<
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig>(
+ accelerators_);
+ bitField0_ |= 0x00000001;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilderV3<
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig.Builder,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfigOrBuilder>
+ acceleratorsBuilder_;
+
+ /**
+ *
+ *
+ *
+ * Optional. A list of [hardware
+ * accelerators](https://cloud.google.com/compute/docs/gpus) to attach to
+ * each node.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig accelerators = 11 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public java.util.List<
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig>
+ getAcceleratorsList() {
+ if (acceleratorsBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(accelerators_);
+ } else {
+ return acceleratorsBuilder_.getMessageList();
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A list of [hardware
+ * accelerators](https://cloud.google.com/compute/docs/gpus) to attach to
+ * each node.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig accelerators = 11 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public int getAcceleratorsCount() {
+ if (acceleratorsBuilder_ == null) {
+ return accelerators_.size();
+ } else {
+ return acceleratorsBuilder_.getCount();
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A list of [hardware
+ * accelerators](https://cloud.google.com/compute/docs/gpus) to attach to
+ * each node.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig accelerators = 11 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig
+ getAccelerators(int index) {
+ if (acceleratorsBuilder_ == null) {
+ return accelerators_.get(index);
+ } else {
+ return acceleratorsBuilder_.getMessage(index);
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A list of [hardware
+ * accelerators](https://cloud.google.com/compute/docs/gpus) to attach to
+ * each node.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig accelerators = 11 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder setAccelerators(
+ int index,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig value) {
+ if (acceleratorsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureAcceleratorsIsMutable();
+ accelerators_.set(index, value);
+ onChanged();
+ } else {
+ acceleratorsBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A list of [hardware
+ * accelerators](https://cloud.google.com/compute/docs/gpus) to attach to
+ * each node.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig accelerators = 11 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder setAccelerators(
+ int index,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig.Builder
+ builderForValue) {
+ if (acceleratorsBuilder_ == null) {
+ ensureAcceleratorsIsMutable();
+ accelerators_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ acceleratorsBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A list of [hardware
+ * accelerators](https://cloud.google.com/compute/docs/gpus) to attach to
+ * each node.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig accelerators = 11 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder addAccelerators(
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig value) {
+ if (acceleratorsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureAcceleratorsIsMutable();
+ accelerators_.add(value);
+ onChanged();
+ } else {
+ acceleratorsBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A list of [hardware
+ * accelerators](https://cloud.google.com/compute/docs/gpus) to attach to
+ * each node.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig accelerators = 11 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder addAccelerators(
+ int index,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig value) {
+ if (acceleratorsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureAcceleratorsIsMutable();
+ accelerators_.add(index, value);
+ onChanged();
+ } else {
+ acceleratorsBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A list of [hardware
+ * accelerators](https://cloud.google.com/compute/docs/gpus) to attach to
+ * each node.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig accelerators = 11 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder addAccelerators(
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig.Builder
+ builderForValue) {
+ if (acceleratorsBuilder_ == null) {
+ ensureAcceleratorsIsMutable();
+ accelerators_.add(builderForValue.build());
+ onChanged();
+ } else {
+ acceleratorsBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A list of [hardware
+ * accelerators](https://cloud.google.com/compute/docs/gpus) to attach to
+ * each node.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig accelerators = 11 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder addAccelerators(
+ int index,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig.Builder
+ builderForValue) {
+ if (acceleratorsBuilder_ == null) {
+ ensureAcceleratorsIsMutable();
+ accelerators_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ acceleratorsBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A list of [hardware
+ * accelerators](https://cloud.google.com/compute/docs/gpus) to attach to
+ * each node.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig accelerators = 11 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder addAllAccelerators(
+ java.lang.Iterable<
+ ? extends
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig>
+ values) {
+ if (acceleratorsBuilder_ == null) {
+ ensureAcceleratorsIsMutable();
+ com.google.protobuf.AbstractMessageLite.Builder.addAll(values, accelerators_);
+ onChanged();
+ } else {
+ acceleratorsBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A list of [hardware
+ * accelerators](https://cloud.google.com/compute/docs/gpus) to attach to
+ * each node.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig accelerators = 11 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder clearAccelerators() {
+ if (acceleratorsBuilder_ == null) {
+ accelerators_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000001);
+ onChanged();
+ } else {
+ acceleratorsBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A list of [hardware
+ * accelerators](https://cloud.google.com/compute/docs/gpus) to attach to
+ * each node.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig accelerators = 11 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder removeAccelerators(int index) {
+ if (acceleratorsBuilder_ == null) {
+ ensureAcceleratorsIsMutable();
+ accelerators_.remove(index);
+ onChanged();
+ } else {
+ acceleratorsBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A list of [hardware
+ * accelerators](https://cloud.google.com/compute/docs/gpus) to attach to
+ * each node.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig accelerators = 11 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig.Builder
+ getAcceleratorsBuilder(int index) {
+ return getAcceleratorsFieldBuilder().getBuilder(index);
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A list of [hardware
+ * accelerators](https://cloud.google.com/compute/docs/gpus) to attach to
+ * each node.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig accelerators = 11 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfigOrBuilder
+ getAcceleratorsOrBuilder(int index) {
+ if (acceleratorsBuilder_ == null) {
+ return accelerators_.get(index);
+ } else {
+ return acceleratorsBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A list of [hardware
+ * accelerators](https://cloud.google.com/compute/docs/gpus) to attach to
+ * each node.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig accelerators = 11 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public java.util.List<
+ ? extends
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig
+ .GkeNodePoolAcceleratorConfigOrBuilder>
+ getAcceleratorsOrBuilderList() {
+ if (acceleratorsBuilder_ != null) {
+ return acceleratorsBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(accelerators_);
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A list of [hardware
+ * accelerators](https://cloud.google.com/compute/docs/gpus) to attach to
+ * each node.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig accelerators = 11 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig.Builder
+ addAcceleratorsBuilder() {
+ return getAcceleratorsFieldBuilder()
+ .addBuilder(
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig
+ .getDefaultInstance());
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A list of [hardware
+ * accelerators](https://cloud.google.com/compute/docs/gpus) to attach to
+ * each node.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig accelerators = 11 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig.Builder
+ addAcceleratorsBuilder(int index) {
+ return getAcceleratorsFieldBuilder()
+ .addBuilder(
+ index,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig
+ .getDefaultInstance());
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A list of [hardware
+ * accelerators](https://cloud.google.com/compute/docs/gpus) to attach to
+ * each node.
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig accelerators = 11 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public java.util.List<
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig.Builder>
+ getAcceleratorsBuilderList() {
+ return getAcceleratorsFieldBuilder().getBuilderList();
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilderV3<
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig.Builder,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfigOrBuilder>
+ getAcceleratorsFieldBuilder() {
+ if (acceleratorsBuilder_ == null) {
+ acceleratorsBuilder_ =
+ new com.google.protobuf.RepeatedFieldBuilderV3<
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig
+ .Builder,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig
+ .GkeNodePoolAcceleratorConfigOrBuilder>(
+ accelerators_,
+ ((bitField0_ & 0x00000001) != 0),
+ getParentForChildren(),
+ isClean());
+ accelerators_ = null;
+ }
+ return acceleratorsBuilder_;
+ }
+
+ private java.lang.Object minCpuPlatform_ = "";
+ /**
+ *
+ *
+ *
+ * Optional. [Minimum CPU
+ * platform](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)
+ * to be used by this instance. The instance may be scheduled on the
+ * specified or a newer CPU platform. Specify the friendly names of CPU
+ * platforms, such as "Intel Haswell"` or Intel Sandy Bridge".
+ *
+ *
+ * string min_cpu_platform = 13 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The minCpuPlatform.
+ */
+ public java.lang.String getMinCpuPlatform() {
+ java.lang.Object ref = minCpuPlatform_;
+ if (!(ref instanceof java.lang.String)) {
+ com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ minCpuPlatform_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Optional. [Minimum CPU
+ * platform](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)
+ * to be used by this instance. The instance may be scheduled on the
+ * specified or a newer CPU platform. Specify the friendly names of CPU
+ * platforms, such as "Intel Haswell"` or Intel Sandy Bridge".
+ *
+ *
+ * string min_cpu_platform = 13 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The bytes for minCpuPlatform.
+ */
+ public com.google.protobuf.ByteString getMinCpuPlatformBytes() {
+ java.lang.Object ref = minCpuPlatform_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
+ minCpuPlatform_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Optional. [Minimum CPU
+ * platform](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)
+ * to be used by this instance. The instance may be scheduled on the
+ * specified or a newer CPU platform. Specify the friendly names of CPU
+ * platforms, such as "Intel Haswell"` or Intel Sandy Bridge".
+ *
+ *
+ * string min_cpu_platform = 13 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @param value The minCpuPlatform to set.
+ * @return This builder for chaining.
+ */
+ public Builder setMinCpuPlatform(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+
+ minCpuPlatform_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. [Minimum CPU
+ * platform](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)
+ * to be used by this instance. The instance may be scheduled on the
+ * specified or a newer CPU platform. Specify the friendly names of CPU
+ * platforms, such as "Intel Haswell"` or Intel Sandy Bridge".
+ *
+ *
+ * string min_cpu_platform = 13 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return This builder for chaining.
+ */
+ public Builder clearMinCpuPlatform() {
+
+ minCpuPlatform_ = getDefaultInstance().getMinCpuPlatform();
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. [Minimum CPU
+ * platform](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)
+ * to be used by this instance. The instance may be scheduled on the
+ * specified or a newer CPU platform. Specify the friendly names of CPU
+ * platforms, such as "Intel Haswell"` or Intel Sandy Bridge".
+ *
+ *
+ * string min_cpu_platform = 13 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @param value The bytes for minCpuPlatform to set.
+ * @return This builder for chaining.
+ */
+ public Builder setMinCpuPlatformBytes(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ checkByteStringIsUtf8(value);
+
+ minCpuPlatform_ = value;
+ onChanged();
+ return this;
+ }
+
+ @java.lang.Override
+ public final Builder setUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFields(unknownFields);
+ }
+
+ @java.lang.Override
+ public final Builder mergeUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+ // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig)
+ }
+
+ // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig)
+ private static final com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig
+ DEFAULT_INSTANCE;
+
+ static {
+ DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig();
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig
+ getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ private static final com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ @java.lang.Override
+ public GkeNodeConfig parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new GkeNodeConfig(input, extensionRegistry);
+ }
+ };
+
+ public static com.google.protobuf.Parser parser() {
+ return PARSER;
+ }
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig
+ getDefaultInstanceForType() {
+ return DEFAULT_INSTANCE;
+ }
+ }
+
+ public interface GkeNodePoolAcceleratorConfigOrBuilder
+ extends
+ // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig)
+ com.google.protobuf.MessageOrBuilder {
+
+ /**
+ *
+ *
+ *
+ * The number of accelerator cards exposed to an instance.
+ *
+ *
+ * int64 accelerator_count = 1;
+ *
+ * @return The acceleratorCount.
+ */
+ long getAcceleratorCount();
+
+ /**
+ *
+ *
+ *
+ * The accelerator type resource namename (see GPUs on Compute Engine).
+ *
+ *
+ * string accelerator_type = 2;
+ *
+ * @return The acceleratorType.
+ */
+ java.lang.String getAcceleratorType();
+ /**
+ *
+ *
+ *
+ * The accelerator type resource namename (see GPUs on Compute Engine).
+ *
+ *
+ * string accelerator_type = 2;
+ *
+ * @return The bytes for acceleratorType.
+ */
+ com.google.protobuf.ByteString getAcceleratorTypeBytes();
+ }
+ /**
+ *
+ *
+ *
+ * A GkeNodeConfigAcceleratorConfig represents a Hardware Accelerator request
+ * for a NodePool.
+ *
+ *
+ * Protobuf type {@code google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig}
+ */
+ public static final class GkeNodePoolAcceleratorConfig
+ extends com.google.protobuf.GeneratedMessageV3
+ implements
+ // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig)
+ GkeNodePoolAcceleratorConfigOrBuilder {
+ private static final long serialVersionUID = 0L;
+ // Use GkeNodePoolAcceleratorConfig.newBuilder() to construct.
+ private GkeNodePoolAcceleratorConfig(
+ com.google.protobuf.GeneratedMessageV3.Builder> builder) {
+ super(builder);
+ }
+
+ private GkeNodePoolAcceleratorConfig() {
+ acceleratorType_ = "";
+ }
+
+ @java.lang.Override
+ @SuppressWarnings({"unused"})
+ protected java.lang.Object newInstance(UnusedPrivateParameter unused) {
+ return new GkeNodePoolAcceleratorConfig();
+ }
+
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet getUnknownFields() {
+ return this.unknownFields;
+ }
+
+ private GkeNodePoolAcceleratorConfig(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ this();
+ if (extensionRegistry == null) {
+ throw new java.lang.NullPointerException();
+ }
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ case 8:
+ {
+ acceleratorCount_ = input.readInt64();
+ break;
+ }
+ case 18:
+ {
+ java.lang.String s = input.readStringRequireUtf8();
+
+ acceleratorType_ = s;
+ break;
+ }
+ default:
+ {
+ if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
+ return com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_GkeNodePoolAcceleratorConfig_descriptor;
+ }
+
+ @java.lang.Override
+ protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_GkeNodePoolAcceleratorConfig_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig.class,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig.Builder
+ .class);
+ }
+
+ public static final int ACCELERATOR_COUNT_FIELD_NUMBER = 1;
+ private long acceleratorCount_;
+ /**
+ *
+ *
+ *
+ * The number of accelerator cards exposed to an instance.
+ *
+ *
+ * int64 accelerator_count = 1;
+ *
+ * @return The acceleratorCount.
+ */
+ @java.lang.Override
+ public long getAcceleratorCount() {
+ return acceleratorCount_;
+ }
+
+ public static final int ACCELERATOR_TYPE_FIELD_NUMBER = 2;
+ private volatile java.lang.Object acceleratorType_;
+ /**
+ *
+ *
+ *
+ * The accelerator type resource namename (see GPUs on Compute Engine).
+ *
+ *
+ * string accelerator_type = 2;
+ *
+ * @return The acceleratorType.
+ */
+ @java.lang.Override
+ public java.lang.String getAcceleratorType() {
+ java.lang.Object ref = acceleratorType_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ acceleratorType_ = s;
+ return s;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * The accelerator type resource namename (see GPUs on Compute Engine).
+ *
+ *
+ * string accelerator_type = 2;
+ *
+ * @return The bytes for acceleratorType.
+ */
+ @java.lang.Override
+ public com.google.protobuf.ByteString getAcceleratorTypeBytes() {
+ java.lang.Object ref = acceleratorType_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
+ acceleratorType_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ private byte memoizedIsInitialized = -1;
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ @java.lang.Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
+ if (acceleratorCount_ != 0L) {
+ output.writeInt64(1, acceleratorCount_);
+ }
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(acceleratorType_)) {
+ com.google.protobuf.GeneratedMessageV3.writeString(output, 2, acceleratorType_);
+ }
+ unknownFields.writeTo(output);
+ }
+
+ @java.lang.Override
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (acceleratorCount_ != 0L) {
+ size += com.google.protobuf.CodedOutputStream.computeInt64Size(1, acceleratorCount_);
+ }
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(acceleratorType_)) {
+ size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, acceleratorType_);
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj
+ instanceof com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig)) {
+ return super.equals(obj);
+ }
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig other =
+ (com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig) obj;
+
+ if (getAcceleratorCount() != other.getAcceleratorCount()) return false;
+ if (!getAcceleratorType().equals(other.getAcceleratorType())) return false;
+ if (!unknownFields.equals(other.unknownFields)) return false;
+ return true;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptor().hashCode();
+ hash = (37 * hash) + ACCELERATOR_COUNT_FIELD_NUMBER;
+ hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getAcceleratorCount());
+ hash = (37 * hash) + ACCELERATOR_TYPE_FIELD_NUMBER;
+ hash = (53 * hash) + getAcceleratorType().hashCode();
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig
+ parseFrom(java.nio.ByteBuffer data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig
+ parseFrom(
+ java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig
+ parseFrom(com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig
+ parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig
+ parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig
+ parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig
+ parseFrom(java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig
+ parseFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig
+ parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig
+ parseDelimitedFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig
+ parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig
+ parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ @java.lang.Override
+ public Builder newBuilderForType() {
+ return newBuilder();
+ }
+
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+
+ public static Builder newBuilder(
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+
+ @java.lang.Override
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ *
+ *
+ *
+ * A GkeNodeConfigAcceleratorConfig represents a Hardware Accelerator request
+ * for a NodePool.
+ *
+ *
+ * Protobuf type {@code google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig}
+ */
+ public static final class Builder
+ extends com.google.protobuf.GeneratedMessageV3.Builder
+ implements
+ // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig)
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfigOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
+ return com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_GkeNodePoolAcceleratorConfig_descriptor;
+ }
+
+ @java.lang.Override
+ protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_GkeNodePoolAcceleratorConfig_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig.class,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig.Builder
+ .class);
+ }
+
+ // Construct using
+ // com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {}
+ }
+
+ @java.lang.Override
+ public Builder clear() {
+ super.clear();
+ acceleratorCount_ = 0L;
+
+ acceleratorType_ = "";
+
+ return this;
+ }
+
+ @java.lang.Override
+ public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
+ return com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_GkeNodePoolAcceleratorConfig_descriptor;
+ }
+
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig
+ getDefaultInstanceForType() {
+ return com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig
+ .getDefaultInstance();
+ }
+
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig build() {
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig result =
+ buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig
+ buildPartial() {
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig result =
+ new com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig(this);
+ result.acceleratorCount_ = acceleratorCount_;
+ result.acceleratorType_ = acceleratorType_;
+ onBuilt();
+ return result;
+ }
+
+ @java.lang.Override
+ public Builder clone() {
+ return super.clone();
+ }
+
+ @java.lang.Override
+ public Builder setField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
+ return super.setField(field, value);
+ }
+
+ @java.lang.Override
+ public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
+ return super.clearField(field);
+ }
+
+ @java.lang.Override
+ public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+ return super.clearOneof(oneof);
+ }
+
+ @java.lang.Override
+ public Builder setRepeatedField(
+ com.google.protobuf.Descriptors.FieldDescriptor field,
+ int index,
+ java.lang.Object value) {
+ return super.setRepeatedField(field, index, value);
+ }
+
+ @java.lang.Override
+ public Builder addRepeatedField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
+ return super.addRepeatedField(field, value);
+ }
+
+ @java.lang.Override
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other
+ instanceof
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig) {
+ return mergeFrom(
+ (com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig) other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig other) {
+ if (other
+ == com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig
+ .getDefaultInstance()) return this;
+ if (other.getAcceleratorCount() != 0L) {
+ setAcceleratorCount(other.getAcceleratorCount());
+ }
+ if (!other.getAcceleratorType().isEmpty()) {
+ acceleratorType_ = other.acceleratorType_;
+ onChanged();
+ }
+ this.mergeUnknownFields(other.unknownFields);
+ onChanged();
+ return this;
+ }
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ @java.lang.Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig parsedMessage =
+ null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage =
+ (com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig)
+ e.getUnfinishedMessage();
+ throw e.unwrapIOException();
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+
+ private long acceleratorCount_;
+ /**
+ *
+ *
+ *
+ * The number of accelerator cards exposed to an instance.
+ *
+ *
+ * int64 accelerator_count = 1;
+ *
+ * @return The acceleratorCount.
+ */
+ @java.lang.Override
+ public long getAcceleratorCount() {
+ return acceleratorCount_;
+ }
+ /**
+ *
+ *
+ *
+ * The number of accelerator cards exposed to an instance.
+ *
+ *
+ * int64 accelerator_count = 1;
+ *
+ * @param value The acceleratorCount to set.
+ * @return This builder for chaining.
+ */
+ public Builder setAcceleratorCount(long value) {
+
+ acceleratorCount_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * The number of accelerator cards exposed to an instance.
+ *
+ *
+ * int64 accelerator_count = 1;
+ *
+ * @return This builder for chaining.
+ */
+ public Builder clearAcceleratorCount() {
+
+ acceleratorCount_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ private java.lang.Object acceleratorType_ = "";
+ /**
+ *
+ *
+ *
+ * The accelerator type resource namename (see GPUs on Compute Engine).
+ *
+ *
+ * string accelerator_type = 2;
+ *
+ * @return The acceleratorType.
+ */
+ public java.lang.String getAcceleratorType() {
+ java.lang.Object ref = acceleratorType_;
+ if (!(ref instanceof java.lang.String)) {
+ com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ acceleratorType_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * The accelerator type resource namename (see GPUs on Compute Engine).
+ *
+ *
+ * string accelerator_type = 2;
+ *
+ * @return The bytes for acceleratorType.
+ */
+ public com.google.protobuf.ByteString getAcceleratorTypeBytes() {
+ java.lang.Object ref = acceleratorType_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
+ acceleratorType_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * The accelerator type resource namename (see GPUs on Compute Engine).
+ *
+ *
+ * string accelerator_type = 2;
+ *
+ * @param value The acceleratorType to set.
+ * @return This builder for chaining.
+ */
+ public Builder setAcceleratorType(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+
+ acceleratorType_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * The accelerator type resource namename (see GPUs on Compute Engine).
+ *
+ *
+ * string accelerator_type = 2;
+ *
+ * @return This builder for chaining.
+ */
+ public Builder clearAcceleratorType() {
+
+ acceleratorType_ = getDefaultInstance().getAcceleratorType();
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * The accelerator type resource namename (see GPUs on Compute Engine).
+ *
+ *
+ * string accelerator_type = 2;
+ *
+ * @param value The bytes for acceleratorType to set.
+ * @return This builder for chaining.
+ */
+ public Builder setAcceleratorTypeBytes(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ checkByteStringIsUtf8(value);
+
+ acceleratorType_ = value;
+ onChanged();
+ return this;
+ }
+
+ @java.lang.Override
+ public final Builder setUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFields(unknownFields);
+ }
+
+ @java.lang.Override
+ public final Builder mergeUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+ // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig)
+ }
+
+ // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig)
+ private static final com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig
+ DEFAULT_INSTANCE;
+
+ static {
+ DEFAULT_INSTANCE =
+ new com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig();
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig
+ getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ private static final com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ @java.lang.Override
+ public GkeNodePoolAcceleratorConfig parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new GkeNodePoolAcceleratorConfig(input, extensionRegistry);
+ }
+ };
+
+ public static com.google.protobuf.Parser parser() {
+ return PARSER;
+ }
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig
+ getDefaultInstanceForType() {
+ return DEFAULT_INSTANCE;
+ }
+ }
+
+ public interface GkeNodePoolAutoscalingConfigOrBuilder
+ extends
+ // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig)
+ com.google.protobuf.MessageOrBuilder {
+
+ /**
+ *
+ *
+ *
+ * The minimum number of nodes in the NodePool. Must be >= 0 and <=
+ * max_node_count.
+ *
+ *
+ * int32 min_node_count = 2;
+ *
+ * @return The minNodeCount.
+ */
+ int getMinNodeCount();
+
+ /**
+ *
+ *
+ *
+ * The maximum number of nodes in the NodePool. Must be >= min_node_count.
+ * **Note:** Quota must be sufficient to scale up the cluster.
+ *
+ *
+ * int32 max_node_count = 3;
+ *
+ * @return The maxNodeCount.
+ */
+ int getMaxNodeCount();
+ }
+ /**
+ *
+ *
+ *
+ * GkeNodePoolAutoscaling contains information the cluster autoscaler needs to
+ * adjust the size of the node pool to the current cluster usage.
+ *
+ *
+ * Protobuf type {@code google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig}
+ */
+ public static final class GkeNodePoolAutoscalingConfig
+ extends com.google.protobuf.GeneratedMessageV3
+ implements
+ // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig)
+ GkeNodePoolAutoscalingConfigOrBuilder {
+ private static final long serialVersionUID = 0L;
+ // Use GkeNodePoolAutoscalingConfig.newBuilder() to construct.
+ private GkeNodePoolAutoscalingConfig(
+ com.google.protobuf.GeneratedMessageV3.Builder> builder) {
+ super(builder);
+ }
+
+ private GkeNodePoolAutoscalingConfig() {}
+
+ @java.lang.Override
+ @SuppressWarnings({"unused"})
+ protected java.lang.Object newInstance(UnusedPrivateParameter unused) {
+ return new GkeNodePoolAutoscalingConfig();
+ }
+
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet getUnknownFields() {
+ return this.unknownFields;
+ }
+
+ private GkeNodePoolAutoscalingConfig(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ this();
+ if (extensionRegistry == null) {
+ throw new java.lang.NullPointerException();
+ }
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ case 16:
+ {
+ minNodeCount_ = input.readInt32();
+ break;
+ }
+ case 24:
+ {
+ maxNodeCount_ = input.readInt32();
+ break;
+ }
+ default:
+ {
+ if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
+ return com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_GkeNodePoolAutoscalingConfig_descriptor;
+ }
+
+ @java.lang.Override
+ protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_GkeNodePoolAutoscalingConfig_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig.class,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig.Builder
+ .class);
+ }
+
+ public static final int MIN_NODE_COUNT_FIELD_NUMBER = 2;
+ private int minNodeCount_;
+ /**
+ *
+ *
+ *
+ * The minimum number of nodes in the NodePool. Must be >= 0 and <=
+ * max_node_count.
+ *
+ *
+ * int32 min_node_count = 2;
+ *
+ * @return The minNodeCount.
+ */
+ @java.lang.Override
+ public int getMinNodeCount() {
+ return minNodeCount_;
+ }
+
+ public static final int MAX_NODE_COUNT_FIELD_NUMBER = 3;
+ private int maxNodeCount_;
+ /**
+ *
+ *
+ *
+ * The maximum number of nodes in the NodePool. Must be >= min_node_count.
+ * **Note:** Quota must be sufficient to scale up the cluster.
+ *
+ *
+ * int32 max_node_count = 3;
+ *
+ * @return The maxNodeCount.
+ */
+ @java.lang.Override
+ public int getMaxNodeCount() {
+ return maxNodeCount_;
+ }
+
+ private byte memoizedIsInitialized = -1;
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ @java.lang.Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
+ if (minNodeCount_ != 0) {
+ output.writeInt32(2, minNodeCount_);
+ }
+ if (maxNodeCount_ != 0) {
+ output.writeInt32(3, maxNodeCount_);
+ }
+ unknownFields.writeTo(output);
+ }
+
+ @java.lang.Override
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (minNodeCount_ != 0) {
+ size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, minNodeCount_);
+ }
+ if (maxNodeCount_ != 0) {
+ size += com.google.protobuf.CodedOutputStream.computeInt32Size(3, maxNodeCount_);
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj
+ instanceof com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig)) {
+ return super.equals(obj);
+ }
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig other =
+ (com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig) obj;
+
+ if (getMinNodeCount() != other.getMinNodeCount()) return false;
+ if (getMaxNodeCount() != other.getMaxNodeCount()) return false;
+ if (!unknownFields.equals(other.unknownFields)) return false;
+ return true;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptor().hashCode();
+ hash = (37 * hash) + MIN_NODE_COUNT_FIELD_NUMBER;
+ hash = (53 * hash) + getMinNodeCount();
+ hash = (37 * hash) + MAX_NODE_COUNT_FIELD_NUMBER;
+ hash = (53 * hash) + getMaxNodeCount();
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig
+ parseFrom(java.nio.ByteBuffer data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig
+ parseFrom(
+ java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig
+ parseFrom(com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig
+ parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig
+ parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig
+ parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig
+ parseFrom(java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig
+ parseFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig
+ parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig
+ parseDelimitedFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig
+ parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig
+ parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ @java.lang.Override
+ public Builder newBuilderForType() {
+ return newBuilder();
+ }
+
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+
+ public static Builder newBuilder(
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+
+ @java.lang.Override
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ *
+ *
+ *
+ * GkeNodePoolAutoscaling contains information the cluster autoscaler needs to
+ * adjust the size of the node pool to the current cluster usage.
+ *
+ *
+ * Protobuf type {@code google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig}
+ */
+ public static final class Builder
+ extends com.google.protobuf.GeneratedMessageV3.Builder
+ implements
+ // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig)
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfigOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
+ return com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_GkeNodePoolAutoscalingConfig_descriptor;
+ }
+
+ @java.lang.Override
+ protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_GkeNodePoolAutoscalingConfig_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig.class,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig.Builder
+ .class);
+ }
+
+ // Construct using
+ // com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {}
+ }
+
+ @java.lang.Override
+ public Builder clear() {
+ super.clear();
+ minNodeCount_ = 0;
+
+ maxNodeCount_ = 0;
+
+ return this;
+ }
+
+ @java.lang.Override
+ public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
+ return com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_GkeNodePoolAutoscalingConfig_descriptor;
+ }
+
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig
+ getDefaultInstanceForType() {
+ return com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig
+ .getDefaultInstance();
+ }
+
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig build() {
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig result =
+ buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig
+ buildPartial() {
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig result =
+ new com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig(this);
+ result.minNodeCount_ = minNodeCount_;
+ result.maxNodeCount_ = maxNodeCount_;
+ onBuilt();
+ return result;
+ }
+
+ @java.lang.Override
+ public Builder clone() {
+ return super.clone();
+ }
+
+ @java.lang.Override
+ public Builder setField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
+ return super.setField(field, value);
+ }
+
+ @java.lang.Override
+ public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
+ return super.clearField(field);
+ }
+
+ @java.lang.Override
+ public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+ return super.clearOneof(oneof);
+ }
+
+ @java.lang.Override
+ public Builder setRepeatedField(
+ com.google.protobuf.Descriptors.FieldDescriptor field,
+ int index,
+ java.lang.Object value) {
+ return super.setRepeatedField(field, index, value);
+ }
+
+ @java.lang.Override
+ public Builder addRepeatedField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
+ return super.addRepeatedField(field, value);
+ }
+
+ @java.lang.Override
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other
+ instanceof
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig) {
+ return mergeFrom(
+ (com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig) other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig other) {
+ if (other
+ == com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig
+ .getDefaultInstance()) return this;
+ if (other.getMinNodeCount() != 0) {
+ setMinNodeCount(other.getMinNodeCount());
+ }
+ if (other.getMaxNodeCount() != 0) {
+ setMaxNodeCount(other.getMaxNodeCount());
+ }
+ this.mergeUnknownFields(other.unknownFields);
+ onChanged();
+ return this;
+ }
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ @java.lang.Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig parsedMessage =
+ null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage =
+ (com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig)
+ e.getUnfinishedMessage();
+ throw e.unwrapIOException();
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+
+ private int minNodeCount_;
+ /**
+ *
+ *
+ *
+ * The minimum number of nodes in the NodePool. Must be >= 0 and <=
+ * max_node_count.
+ *
+ *
+ * int32 min_node_count = 2;
+ *
+ * @return The minNodeCount.
+ */
+ @java.lang.Override
+ public int getMinNodeCount() {
+ return minNodeCount_;
+ }
+ /**
+ *
+ *
+ *
+ * The minimum number of nodes in the NodePool. Must be >= 0 and <=
+ * max_node_count.
+ *
+ *
+ * int32 min_node_count = 2;
+ *
+ * @param value The minNodeCount to set.
+ * @return This builder for chaining.
+ */
+ public Builder setMinNodeCount(int value) {
+
+ minNodeCount_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * The minimum number of nodes in the NodePool. Must be >= 0 and <=
+ * max_node_count.
+ *
+ *
+ * int32 min_node_count = 2;
+ *
+ * @return This builder for chaining.
+ */
+ public Builder clearMinNodeCount() {
+
+ minNodeCount_ = 0;
+ onChanged();
+ return this;
+ }
+
+ private int maxNodeCount_;
+ /**
+ *
+ *
+ *
+ * The maximum number of nodes in the NodePool. Must be >= min_node_count.
+ * **Note:** Quota must be sufficient to scale up the cluster.
+ *
+ *
+ * int32 max_node_count = 3;
+ *
+ * @return The maxNodeCount.
+ */
+ @java.lang.Override
+ public int getMaxNodeCount() {
+ return maxNodeCount_;
+ }
+ /**
+ *
+ *
+ *
+ * The maximum number of nodes in the NodePool. Must be >= min_node_count.
+ * **Note:** Quota must be sufficient to scale up the cluster.
+ *
+ *
+ * int32 max_node_count = 3;
+ *
+ * @param value The maxNodeCount to set.
+ * @return This builder for chaining.
+ */
+ public Builder setMaxNodeCount(int value) {
+
+ maxNodeCount_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * The maximum number of nodes in the NodePool. Must be >= min_node_count.
+ * **Note:** Quota must be sufficient to scale up the cluster.
+ *
+ *
+ * int32 max_node_count = 3;
+ *
+ * @return This builder for chaining.
+ */
+ public Builder clearMaxNodeCount() {
+
+ maxNodeCount_ = 0;
+ onChanged();
+ return this;
+ }
+
+ @java.lang.Override
+ public final Builder setUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFields(unknownFields);
+ }
+
+ @java.lang.Override
+ public final Builder mergeUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+ // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig)
+ }
+
+ // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig)
+ private static final com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig
+ DEFAULT_INSTANCE;
+
+ static {
+ DEFAULT_INSTANCE =
+ new com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig();
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig
+ getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ private static final com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ @java.lang.Override
+ public GkeNodePoolAutoscalingConfig parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new GkeNodePoolAutoscalingConfig(input, extensionRegistry);
+ }
+ };
+
+ public static com.google.protobuf.Parser parser() {
+ return PARSER;
+ }
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig
+ getDefaultInstanceForType() {
+ return DEFAULT_INSTANCE;
+ }
+ }
+
+ public static final int CONFIG_FIELD_NUMBER = 2;
+ private com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig config_;
+ /**
+ *
+ *
+ *
+ * Optional. The node pool configuration.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig config = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return Whether the config field is set.
+ */
+ @java.lang.Override
+ public boolean hasConfig() {
+ return config_ != null;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The node pool configuration.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig config = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return The config.
+ */
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig getConfig() {
+ return config_ == null
+ ? com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig.getDefaultInstance()
+ : config_;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The node pool configuration.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig config = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfigOrBuilder
+ getConfigOrBuilder() {
+ return getConfig();
+ }
+
+ public static final int LOCATIONS_FIELD_NUMBER = 13;
+ private com.google.protobuf.LazyStringList locations_;
+ /**
+ *
+ *
+ *
+ * Optional. The list of Compute Engine
+ * [zones](https://cloud.google.com/compute/docs/zones#available) where
+ * NodePool's nodes will be located.
+ * **Note:** Currently, only one zone may be specified.
+ * If a location is not specified during NodePool creation, Dataproc will
+ * choose a location.
+ *
+ *
+ * repeated string locations = 13 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return A list containing the locations.
+ */
+ public com.google.protobuf.ProtocolStringList getLocationsList() {
+ return locations_;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The list of Compute Engine
+ * [zones](https://cloud.google.com/compute/docs/zones#available) where
+ * NodePool's nodes will be located.
+ * **Note:** Currently, only one zone may be specified.
+ * If a location is not specified during NodePool creation, Dataproc will
+ * choose a location.
+ *
+ *
+ * repeated string locations = 13 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The count of locations.
+ */
+ public int getLocationsCount() {
+ return locations_.size();
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The list of Compute Engine
+ * [zones](https://cloud.google.com/compute/docs/zones#available) where
+ * NodePool's nodes will be located.
+ * **Note:** Currently, only one zone may be specified.
+ * If a location is not specified during NodePool creation, Dataproc will
+ * choose a location.
+ *
+ *
+ * repeated string locations = 13 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @param index The index of the element to return.
+ * @return The locations at the given index.
+ */
+ public java.lang.String getLocations(int index) {
+ return locations_.get(index);
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The list of Compute Engine
+ * [zones](https://cloud.google.com/compute/docs/zones#available) where
+ * NodePool's nodes will be located.
+ * **Note:** Currently, only one zone may be specified.
+ * If a location is not specified during NodePool creation, Dataproc will
+ * choose a location.
+ *
+ *
+ * repeated string locations = 13 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @param index The index of the value to return.
+ * @return The bytes of the locations at the given index.
+ */
+ public com.google.protobuf.ByteString getLocationsBytes(int index) {
+ return locations_.getByteString(index);
+ }
+
+ public static final int AUTOSCALING_FIELD_NUMBER = 4;
+ private com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig autoscaling_;
+ /**
+ *
+ *
+ *
+ * Optional. The autoscaler configuration for this NodePool. The autoscaler is enabled
+ * only when a valid configuration is present.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig autoscaling = 4 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return Whether the autoscaling field is set.
+ */
+ @java.lang.Override
+ public boolean hasAutoscaling() {
+ return autoscaling_ != null;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The autoscaler configuration for this NodePool. The autoscaler is enabled
+ * only when a valid configuration is present.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig autoscaling = 4 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return The autoscaling.
+ */
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig
+ getAutoscaling() {
+ return autoscaling_ == null
+ ? com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig
+ .getDefaultInstance()
+ : autoscaling_;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The autoscaler configuration for this NodePool. The autoscaler is enabled
+ * only when a valid configuration is present.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig autoscaling = 4 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfigOrBuilder
+ getAutoscalingOrBuilder() {
+ return getAutoscaling();
+ }
+
+ private byte memoizedIsInitialized = -1;
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ @java.lang.Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
+ if (config_ != null) {
+ output.writeMessage(2, getConfig());
+ }
+ if (autoscaling_ != null) {
+ output.writeMessage(4, getAutoscaling());
+ }
+ for (int i = 0; i < locations_.size(); i++) {
+ com.google.protobuf.GeneratedMessageV3.writeString(output, 13, locations_.getRaw(i));
+ }
+ unknownFields.writeTo(output);
+ }
+
+ @java.lang.Override
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (config_ != null) {
+ size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getConfig());
+ }
+ if (autoscaling_ != null) {
+ size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getAutoscaling());
+ }
+ {
+ int dataSize = 0;
+ for (int i = 0; i < locations_.size(); i++) {
+ dataSize += computeStringSizeNoTag(locations_.getRaw(i));
+ }
+ size += dataSize;
+ size += 1 * getLocationsList().size();
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof com.google.cloud.dataproc.v1.GkeNodePoolConfig)) {
+ return super.equals(obj);
+ }
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig other =
+ (com.google.cloud.dataproc.v1.GkeNodePoolConfig) obj;
+
+ if (hasConfig() != other.hasConfig()) return false;
+ if (hasConfig()) {
+ if (!getConfig().equals(other.getConfig())) return false;
+ }
+ if (!getLocationsList().equals(other.getLocationsList())) return false;
+ if (hasAutoscaling() != other.hasAutoscaling()) return false;
+ if (hasAutoscaling()) {
+ if (!getAutoscaling().equals(other.getAutoscaling())) return false;
+ }
+ if (!unknownFields.equals(other.unknownFields)) return false;
+ return true;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptor().hashCode();
+ if (hasConfig()) {
+ hash = (37 * hash) + CONFIG_FIELD_NUMBER;
+ hash = (53 * hash) + getConfig().hashCode();
+ }
+ if (getLocationsCount() > 0) {
+ hash = (37 * hash) + LOCATIONS_FIELD_NUMBER;
+ hash = (53 * hash) + getLocationsList().hashCode();
+ }
+ if (hasAutoscaling()) {
+ hash = (37 * hash) + AUTOSCALING_FIELD_NUMBER;
+ hash = (53 * hash) + getAutoscaling().hashCode();
+ }
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig parseFrom(java.nio.ByteBuffer data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig parseFrom(
+ java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig parseFrom(
+ byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig parseFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig parseDelimitedFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig parseDelimitedFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig parseFrom(
+ com.google.protobuf.CodedInputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ @java.lang.Override
+ public Builder newBuilderForType() {
+ return newBuilder();
+ }
+
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+
+ public static Builder newBuilder(com.google.cloud.dataproc.v1.GkeNodePoolConfig prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+
+ @java.lang.Override
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ *
+ *
+ *
+ * The configuration of a GKE NodePool used by a [Dataproc-on-GKE
+ * cluster](https://cloud.google.com/dataproc/docs/concepts/jobs/dataproc-gke#create-a-dataproc-on-gke-cluster).
+ *
+ *
+ * Protobuf type {@code google.cloud.dataproc.v1.GkeNodePoolConfig}
+ */
+ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder
+ implements
+ // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1.GkeNodePoolConfig)
+ com.google.cloud.dataproc.v1.GkeNodePoolConfigOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
+ return com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_descriptor;
+ }
+
+ @java.lang.Override
+ protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.class,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.Builder.class);
+ }
+
+ // Construct using com.google.cloud.dataproc.v1.GkeNodePoolConfig.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {}
+ }
+
+ @java.lang.Override
+ public Builder clear() {
+ super.clear();
+ if (configBuilder_ == null) {
+ config_ = null;
+ } else {
+ config_ = null;
+ configBuilder_ = null;
+ }
+ locations_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (autoscalingBuilder_ == null) {
+ autoscaling_ = null;
+ } else {
+ autoscaling_ = null;
+ autoscalingBuilder_ = null;
+ }
+ return this;
+ }
+
+ @java.lang.Override
+ public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
+ return com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_descriptor;
+ }
+
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig getDefaultInstanceForType() {
+ return com.google.cloud.dataproc.v1.GkeNodePoolConfig.getDefaultInstance();
+ }
+
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig build() {
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig buildPartial() {
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig result =
+ new com.google.cloud.dataproc.v1.GkeNodePoolConfig(this);
+ int from_bitField0_ = bitField0_;
+ if (configBuilder_ == null) {
+ result.config_ = config_;
+ } else {
+ result.config_ = configBuilder_.build();
+ }
+ if (((bitField0_ & 0x00000001) != 0)) {
+ locations_ = locations_.getUnmodifiableView();
+ bitField0_ = (bitField0_ & ~0x00000001);
+ }
+ result.locations_ = locations_;
+ if (autoscalingBuilder_ == null) {
+ result.autoscaling_ = autoscaling_;
+ } else {
+ result.autoscaling_ = autoscalingBuilder_.build();
+ }
+ onBuilt();
+ return result;
+ }
+
+ @java.lang.Override
+ public Builder clone() {
+ return super.clone();
+ }
+
+ @java.lang.Override
+ public Builder setField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
+ return super.setField(field, value);
+ }
+
+ @java.lang.Override
+ public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
+ return super.clearField(field);
+ }
+
+ @java.lang.Override
+ public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+ return super.clearOneof(oneof);
+ }
+
+ @java.lang.Override
+ public Builder setRepeatedField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) {
+ return super.setRepeatedField(field, index, value);
+ }
+
+ @java.lang.Override
+ public Builder addRepeatedField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
+ return super.addRepeatedField(field, value);
+ }
+
+ @java.lang.Override
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof com.google.cloud.dataproc.v1.GkeNodePoolConfig) {
+ return mergeFrom((com.google.cloud.dataproc.v1.GkeNodePoolConfig) other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(com.google.cloud.dataproc.v1.GkeNodePoolConfig other) {
+ if (other == com.google.cloud.dataproc.v1.GkeNodePoolConfig.getDefaultInstance()) return this;
+ if (other.hasConfig()) {
+ mergeConfig(other.getConfig());
+ }
+ if (!other.locations_.isEmpty()) {
+ if (locations_.isEmpty()) {
+ locations_ = other.locations_;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ } else {
+ ensureLocationsIsMutable();
+ locations_.addAll(other.locations_);
+ }
+ onChanged();
+ }
+ if (other.hasAutoscaling()) {
+ mergeAutoscaling(other.getAutoscaling());
+ }
+ this.mergeUnknownFields(other.unknownFields);
+ onChanged();
+ return this;
+ }
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ @java.lang.Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (com.google.cloud.dataproc.v1.GkeNodePoolConfig) e.getUnfinishedMessage();
+ throw e.unwrapIOException();
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+
+ private int bitField0_;
+
+ private com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig config_;
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig.Builder,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfigOrBuilder>
+ configBuilder_;
+ /**
+ *
+ *
+ *
+ * Optional. The node pool configuration.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig config = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return Whether the config field is set.
+ */
+ public boolean hasConfig() {
+ return configBuilder_ != null || config_ != null;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The node pool configuration.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig config = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return The config.
+ */
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig getConfig() {
+ if (configBuilder_ == null) {
+ return config_ == null
+ ? com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig.getDefaultInstance()
+ : config_;
+ } else {
+ return configBuilder_.getMessage();
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The node pool configuration.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig config = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder setConfig(com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig value) {
+ if (configBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ config_ = value;
+ onChanged();
+ } else {
+ configBuilder_.setMessage(value);
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The node pool configuration.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig config = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder setConfig(
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig.Builder builderForValue) {
+ if (configBuilder_ == null) {
+ config_ = builderForValue.build();
+ onChanged();
+ } else {
+ configBuilder_.setMessage(builderForValue.build());
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The node pool configuration.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig config = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder mergeConfig(com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig value) {
+ if (configBuilder_ == null) {
+ if (config_ != null) {
+ config_ =
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig.newBuilder(config_)
+ .mergeFrom(value)
+ .buildPartial();
+ } else {
+ config_ = value;
+ }
+ onChanged();
+ } else {
+ configBuilder_.mergeFrom(value);
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The node pool configuration.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig config = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder clearConfig() {
+ if (configBuilder_ == null) {
+ config_ = null;
+ onChanged();
+ } else {
+ config_ = null;
+ configBuilder_ = null;
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The node pool configuration.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig config = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig.Builder getConfigBuilder() {
+
+ onChanged();
+ return getConfigFieldBuilder().getBuilder();
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The node pool configuration.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig config = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfigOrBuilder
+ getConfigOrBuilder() {
+ if (configBuilder_ != null) {
+ return configBuilder_.getMessageOrBuilder();
+ } else {
+ return config_ == null
+ ? com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig.getDefaultInstance()
+ : config_;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The node pool configuration.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig config = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig.Builder,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfigOrBuilder>
+ getConfigFieldBuilder() {
+ if (configBuilder_ == null) {
+ configBuilder_ =
+ new com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig.Builder,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfigOrBuilder>(
+ getConfig(), getParentForChildren(), isClean());
+ config_ = null;
+ }
+ return configBuilder_;
+ }
+
+ private com.google.protobuf.LazyStringList locations_ =
+ com.google.protobuf.LazyStringArrayList.EMPTY;
+
+ private void ensureLocationsIsMutable() {
+ if (!((bitField0_ & 0x00000001) != 0)) {
+ locations_ = new com.google.protobuf.LazyStringArrayList(locations_);
+ bitField0_ |= 0x00000001;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The list of Compute Engine
+ * [zones](https://cloud.google.com/compute/docs/zones#available) where
+ * NodePool's nodes will be located.
+ * **Note:** Currently, only one zone may be specified.
+ * If a location is not specified during NodePool creation, Dataproc will
+ * choose a location.
+ *
+ *
+ * repeated string locations = 13 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return A list containing the locations.
+ */
+ public com.google.protobuf.ProtocolStringList getLocationsList() {
+ return locations_.getUnmodifiableView();
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The list of Compute Engine
+ * [zones](https://cloud.google.com/compute/docs/zones#available) where
+ * NodePool's nodes will be located.
+ * **Note:** Currently, only one zone may be specified.
+ * If a location is not specified during NodePool creation, Dataproc will
+ * choose a location.
+ *
+ *
+ * repeated string locations = 13 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The count of locations.
+ */
+ public int getLocationsCount() {
+ return locations_.size();
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The list of Compute Engine
+ * [zones](https://cloud.google.com/compute/docs/zones#available) where
+ * NodePool's nodes will be located.
+ * **Note:** Currently, only one zone may be specified.
+ * If a location is not specified during NodePool creation, Dataproc will
+ * choose a location.
+ *
+ *
+ * repeated string locations = 13 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @param index The index of the element to return.
+ * @return The locations at the given index.
+ */
+ public java.lang.String getLocations(int index) {
+ return locations_.get(index);
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The list of Compute Engine
+ * [zones](https://cloud.google.com/compute/docs/zones#available) where
+ * NodePool's nodes will be located.
+ * **Note:** Currently, only one zone may be specified.
+ * If a location is not specified during NodePool creation, Dataproc will
+ * choose a location.
+ *
+ *
+ * repeated string locations = 13 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @param index The index of the value to return.
+ * @return The bytes of the locations at the given index.
+ */
+ public com.google.protobuf.ByteString getLocationsBytes(int index) {
+ return locations_.getByteString(index);
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The list of Compute Engine
+ * [zones](https://cloud.google.com/compute/docs/zones#available) where
+ * NodePool's nodes will be located.
+ * **Note:** Currently, only one zone may be specified.
+ * If a location is not specified during NodePool creation, Dataproc will
+ * choose a location.
+ *
+ *
+ * repeated string locations = 13 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @param index The index to set the value at.
+ * @param value The locations to set.
+ * @return This builder for chaining.
+ */
+ public Builder setLocations(int index, java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureLocationsIsMutable();
+ locations_.set(index, value);
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The list of Compute Engine
+ * [zones](https://cloud.google.com/compute/docs/zones#available) where
+ * NodePool's nodes will be located.
+ * **Note:** Currently, only one zone may be specified.
+ * If a location is not specified during NodePool creation, Dataproc will
+ * choose a location.
+ *
+ *
+ * repeated string locations = 13 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @param value The locations to add.
+ * @return This builder for chaining.
+ */
+ public Builder addLocations(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureLocationsIsMutable();
+ locations_.add(value);
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The list of Compute Engine
+ * [zones](https://cloud.google.com/compute/docs/zones#available) where
+ * NodePool's nodes will be located.
+ * **Note:** Currently, only one zone may be specified.
+ * If a location is not specified during NodePool creation, Dataproc will
+ * choose a location.
+ *
+ *
+ * repeated string locations = 13 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @param values The locations to add.
+ * @return This builder for chaining.
+ */
+ public Builder addAllLocations(java.lang.Iterable values) {
+ ensureLocationsIsMutable();
+ com.google.protobuf.AbstractMessageLite.Builder.addAll(values, locations_);
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The list of Compute Engine
+ * [zones](https://cloud.google.com/compute/docs/zones#available) where
+ * NodePool's nodes will be located.
+ * **Note:** Currently, only one zone may be specified.
+ * If a location is not specified during NodePool creation, Dataproc will
+ * choose a location.
+ *
+ *
+ * repeated string locations = 13 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return This builder for chaining.
+ */
+ public Builder clearLocations() {
+ locations_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The list of Compute Engine
+ * [zones](https://cloud.google.com/compute/docs/zones#available) where
+ * NodePool's nodes will be located.
+ * **Note:** Currently, only one zone may be specified.
+ * If a location is not specified during NodePool creation, Dataproc will
+ * choose a location.
+ *
+ *
+ * repeated string locations = 13 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @param value The bytes of the locations to add.
+ * @return This builder for chaining.
+ */
+ public Builder addLocationsBytes(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ checkByteStringIsUtf8(value);
+ ensureLocationsIsMutable();
+ locations_.add(value);
+ onChanged();
+ return this;
+ }
+
+ private com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig
+ autoscaling_;
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig.Builder,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfigOrBuilder>
+ autoscalingBuilder_;
+ /**
+ *
+ *
+ *
+ * Optional. The autoscaler configuration for this NodePool. The autoscaler is enabled
+ * only when a valid configuration is present.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig autoscaling = 4 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return Whether the autoscaling field is set.
+ */
+ public boolean hasAutoscaling() {
+ return autoscalingBuilder_ != null || autoscaling_ != null;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The autoscaler configuration for this NodePool. The autoscaler is enabled
+ * only when a valid configuration is present.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig autoscaling = 4 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return The autoscaling.
+ */
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig
+ getAutoscaling() {
+ if (autoscalingBuilder_ == null) {
+ return autoscaling_ == null
+ ? com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig
+ .getDefaultInstance()
+ : autoscaling_;
+ } else {
+ return autoscalingBuilder_.getMessage();
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The autoscaler configuration for this NodePool. The autoscaler is enabled
+ * only when a valid configuration is present.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig autoscaling = 4 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder setAutoscaling(
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig value) {
+ if (autoscalingBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ autoscaling_ = value;
+ onChanged();
+ } else {
+ autoscalingBuilder_.setMessage(value);
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The autoscaler configuration for this NodePool. The autoscaler is enabled
+ * only when a valid configuration is present.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig autoscaling = 4 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder setAutoscaling(
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig.Builder
+ builderForValue) {
+ if (autoscalingBuilder_ == null) {
+ autoscaling_ = builderForValue.build();
+ onChanged();
+ } else {
+ autoscalingBuilder_.setMessage(builderForValue.build());
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The autoscaler configuration for this NodePool. The autoscaler is enabled
+ * only when a valid configuration is present.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig autoscaling = 4 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder mergeAutoscaling(
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig value) {
+ if (autoscalingBuilder_ == null) {
+ if (autoscaling_ != null) {
+ autoscaling_ =
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig
+ .newBuilder(autoscaling_)
+ .mergeFrom(value)
+ .buildPartial();
+ } else {
+ autoscaling_ = value;
+ }
+ onChanged();
+ } else {
+ autoscalingBuilder_.mergeFrom(value);
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The autoscaler configuration for this NodePool. The autoscaler is enabled
+ * only when a valid configuration is present.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig autoscaling = 4 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder clearAutoscaling() {
+ if (autoscalingBuilder_ == null) {
+ autoscaling_ = null;
+ onChanged();
+ } else {
+ autoscaling_ = null;
+ autoscalingBuilder_ = null;
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The autoscaler configuration for this NodePool. The autoscaler is enabled
+ * only when a valid configuration is present.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig autoscaling = 4 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig.Builder
+ getAutoscalingBuilder() {
+
+ onChanged();
+ return getAutoscalingFieldBuilder().getBuilder();
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The autoscaler configuration for this NodePool. The autoscaler is enabled
+ * only when a valid configuration is present.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig autoscaling = 4 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfigOrBuilder
+ getAutoscalingOrBuilder() {
+ if (autoscalingBuilder_ != null) {
+ return autoscalingBuilder_.getMessageOrBuilder();
+ } else {
+ return autoscaling_ == null
+ ? com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig
+ .getDefaultInstance()
+ : autoscaling_;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The autoscaler configuration for this NodePool. The autoscaler is enabled
+ * only when a valid configuration is present.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig autoscaling = 4 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig.Builder,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfigOrBuilder>
+ getAutoscalingFieldBuilder() {
+ if (autoscalingBuilder_ == null) {
+ autoscalingBuilder_ =
+ new com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig.Builder,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig
+ .GkeNodePoolAutoscalingConfigOrBuilder>(
+ getAutoscaling(), getParentForChildren(), isClean());
+ autoscaling_ = null;
+ }
+ return autoscalingBuilder_;
+ }
+
+ @java.lang.Override
+ public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFields(unknownFields);
+ }
+
+ @java.lang.Override
+ public final Builder mergeUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+ // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1.GkeNodePoolConfig)
+ }
+
+ // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.GkeNodePoolConfig)
+ private static final com.google.cloud.dataproc.v1.GkeNodePoolConfig DEFAULT_INSTANCE;
+
+ static {
+ DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1.GkeNodePoolConfig();
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolConfig getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ private static final com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ @java.lang.Override
+ public GkeNodePoolConfig parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new GkeNodePoolConfig(input, extensionRegistry);
+ }
+ };
+
+ public static com.google.protobuf.Parser parser() {
+ return PARSER;
+ }
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig getDefaultInstanceForType() {
+ return DEFAULT_INSTANCE;
+ }
+}
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GkeNodePoolConfigOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GkeNodePoolConfigOrBuilder.java
new file mode 100644
index 00000000..81c1de16
--- /dev/null
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GkeNodePoolConfigOrBuilder.java
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/cloud/dataproc/v1/shared.proto
+
+package com.google.cloud.dataproc.v1;
+
+public interface GkeNodePoolConfigOrBuilder
+ extends
+ // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1.GkeNodePoolConfig)
+ com.google.protobuf.MessageOrBuilder {
+
+ /**
+ *
+ *
+ *
+ * Optional. The node pool configuration.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig config = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return Whether the config field is set.
+ */
+ boolean hasConfig();
+ /**
+ *
+ *
+ *
+ * Optional. The node pool configuration.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig config = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return The config.
+ */
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig getConfig();
+ /**
+ *
+ *
+ *
+ * Optional. The node pool configuration.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig config = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfigOrBuilder getConfigOrBuilder();
+
+ /**
+ *
+ *
+ *
+ * Optional. The list of Compute Engine
+ * [zones](https://cloud.google.com/compute/docs/zones#available) where
+ * NodePool's nodes will be located.
+ * **Note:** Currently, only one zone may be specified.
+ * If a location is not specified during NodePool creation, Dataproc will
+ * choose a location.
+ *
+ *
+ * repeated string locations = 13 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return A list containing the locations.
+ */
+ java.util.List getLocationsList();
+ /**
+ *
+ *
+ *
+ * Optional. The list of Compute Engine
+ * [zones](https://cloud.google.com/compute/docs/zones#available) where
+ * NodePool's nodes will be located.
+ * **Note:** Currently, only one zone may be specified.
+ * If a location is not specified during NodePool creation, Dataproc will
+ * choose a location.
+ *
+ *
+ * repeated string locations = 13 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The count of locations.
+ */
+ int getLocationsCount();
+ /**
+ *
+ *
+ *
+ * Optional. The list of Compute Engine
+ * [zones](https://cloud.google.com/compute/docs/zones#available) where
+ * NodePool's nodes will be located.
+ * **Note:** Currently, only one zone may be specified.
+ * If a location is not specified during NodePool creation, Dataproc will
+ * choose a location.
+ *
+ *
+ * repeated string locations = 13 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @param index The index of the element to return.
+ * @return The locations at the given index.
+ */
+ java.lang.String getLocations(int index);
+ /**
+ *
+ *
+ *
+ * Optional. The list of Compute Engine
+ * [zones](https://cloud.google.com/compute/docs/zones#available) where
+ * NodePool's nodes will be located.
+ * **Note:** Currently, only one zone may be specified.
+ * If a location is not specified during NodePool creation, Dataproc will
+ * choose a location.
+ *
+ *
+ * repeated string locations = 13 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @param index The index of the value to return.
+ * @return The bytes of the locations at the given index.
+ */
+ com.google.protobuf.ByteString getLocationsBytes(int index);
+
+ /**
+ *
+ *
+ *
+ * Optional. The autoscaler configuration for this NodePool. The autoscaler is enabled
+ * only when a valid configuration is present.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig autoscaling = 4 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return Whether the autoscaling field is set.
+ */
+ boolean hasAutoscaling();
+ /**
+ *
+ *
+ *
+ * Optional. The autoscaler configuration for this NodePool. The autoscaler is enabled
+ * only when a valid configuration is present.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig autoscaling = 4 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return The autoscaling.
+ */
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig getAutoscaling();
+ /**
+ *
+ *
+ *
+ * Optional. The autoscaler configuration for this NodePool. The autoscaler is enabled
+ * only when a valid configuration is present.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig autoscaling = 4 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodePoolAutoscalingConfigOrBuilder
+ getAutoscalingOrBuilder();
+}
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GkeNodePoolTarget.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GkeNodePoolTarget.java
new file mode 100644
index 00000000..5d501e88
--- /dev/null
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GkeNodePoolTarget.java
@@ -0,0 +1,1669 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/cloud/dataproc/v1/shared.proto
+
+package com.google.cloud.dataproc.v1;
+
+/**
+ *
+ *
+ *
+ * GKE NodePools that Dataproc workloads run on.
+ *
+ *
+ * Protobuf type {@code google.cloud.dataproc.v1.GkeNodePoolTarget}
+ */
+public final class GkeNodePoolTarget extends com.google.protobuf.GeneratedMessageV3
+ implements
+ // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1.GkeNodePoolTarget)
+ GkeNodePoolTargetOrBuilder {
+ private static final long serialVersionUID = 0L;
+ // Use GkeNodePoolTarget.newBuilder() to construct.
+ private GkeNodePoolTarget(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
+ super(builder);
+ }
+
+ private GkeNodePoolTarget() {
+ nodePool_ = "";
+ roles_ = java.util.Collections.emptyList();
+ }
+
+ @java.lang.Override
+ @SuppressWarnings({"unused"})
+ protected java.lang.Object newInstance(UnusedPrivateParameter unused) {
+ return new GkeNodePoolTarget();
+ }
+
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet getUnknownFields() {
+ return this.unknownFields;
+ }
+
+ private GkeNodePoolTarget(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ this();
+ if (extensionRegistry == null) {
+ throw new java.lang.NullPointerException();
+ }
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ case 10:
+ {
+ java.lang.String s = input.readStringRequireUtf8();
+
+ nodePool_ = s;
+ break;
+ }
+ case 16:
+ {
+ int rawValue = input.readEnum();
+ if (!((mutable_bitField0_ & 0x00000001) != 0)) {
+ roles_ = new java.util.ArrayList();
+ mutable_bitField0_ |= 0x00000001;
+ }
+ roles_.add(rawValue);
+ break;
+ }
+ case 18:
+ {
+ int length = input.readRawVarint32();
+ int oldLimit = input.pushLimit(length);
+ while (input.getBytesUntilLimit() > 0) {
+ int rawValue = input.readEnum();
+ if (!((mutable_bitField0_ & 0x00000001) != 0)) {
+ roles_ = new java.util.ArrayList();
+ mutable_bitField0_ |= 0x00000001;
+ }
+ roles_.add(rawValue);
+ }
+ input.popLimit(oldLimit);
+ break;
+ }
+ case 26:
+ {
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.Builder subBuilder = null;
+ if (nodePoolConfig_ != null) {
+ subBuilder = nodePoolConfig_.toBuilder();
+ }
+ nodePoolConfig_ =
+ input.readMessage(
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.parser(), extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(nodePoolConfig_);
+ nodePoolConfig_ = subBuilder.buildPartial();
+ }
+
+ break;
+ }
+ default:
+ {
+ if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000001) != 0)) {
+ roles_ = java.util.Collections.unmodifiableList(roles_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
+ return com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_GkeNodePoolTarget_descriptor;
+ }
+
+ @java.lang.Override
+ protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_GkeNodePoolTarget_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.google.cloud.dataproc.v1.GkeNodePoolTarget.class,
+ com.google.cloud.dataproc.v1.GkeNodePoolTarget.Builder.class);
+ }
+
+ /**
+ *
+ *
+ *
+ * `Role` specifies whose tasks will run on the NodePool. The roles can be
+ * specific to workloads. Exactly one GkeNodePoolTarget within the
+ * VirtualCluster must have 'default' role, which is used to run all workloads
+ * that are not associated with a NodePool.
+ *
+ *
+ * Protobuf enum {@code google.cloud.dataproc.v1.GkeNodePoolTarget.Role}
+ */
+ public enum Role implements com.google.protobuf.ProtocolMessageEnum {
+ /**
+ *
+ *
+ *
+ * Role is unspecified.
+ *
+ *
+ * ROLE_UNSPECIFIED = 0;
+ */
+ ROLE_UNSPECIFIED(0),
+ /**
+ *
+ *
+ *
+ * Any roles that are not directly assigned to a NodePool run on the
+ * `default` role's NodePool.
+ *
+ *
+ * DEFAULT = 1;
+ */
+ DEFAULT(1),
+ /**
+ *
+ *
+ *
+ * Run controllers and webhooks.
+ *
+ *
+ * CONTROLLER = 2;
+ */
+ CONTROLLER(2),
+ /**
+ *
+ *
+ *
+ * Run spark driver.
+ *
+ *
+ * SPARK_DRIVER = 3;
+ */
+ SPARK_DRIVER(3),
+ /**
+ *
+ *
+ *
+ * Run spark executors.
+ *
+ *
+ * SPARK_EXECUTOR = 4;
+ */
+ SPARK_EXECUTOR(4),
+ UNRECOGNIZED(-1),
+ ;
+
+ /**
+ *
+ *
+ *
+ * Role is unspecified.
+ *
+ *
+ * ROLE_UNSPECIFIED = 0;
+ */
+ public static final int ROLE_UNSPECIFIED_VALUE = 0;
+ /**
+ *
+ *
+ *
+ * Any roles that are not directly assigned to a NodePool run on the
+ * `default` role's NodePool.
+ *
+ *
+ * DEFAULT = 1;
+ */
+ public static final int DEFAULT_VALUE = 1;
+ /**
+ *
+ *
+ *
+ * Run controllers and webhooks.
+ *
+ *
+ * CONTROLLER = 2;
+ */
+ public static final int CONTROLLER_VALUE = 2;
+ /**
+ *
+ *
+ *
+ * Run spark driver.
+ *
+ *
+ * SPARK_DRIVER = 3;
+ */
+ public static final int SPARK_DRIVER_VALUE = 3;
+ /**
+ *
+ *
+ *
+ * Run spark executors.
+ *
+ *
+ * SPARK_EXECUTOR = 4;
+ */
+ public static final int SPARK_EXECUTOR_VALUE = 4;
+
+ public final int getNumber() {
+ if (this == UNRECOGNIZED) {
+ throw new java.lang.IllegalArgumentException(
+ "Can't get the number of an unknown enum value.");
+ }
+ return value;
+ }
+
+ /**
+ * @param value The numeric wire value of the corresponding enum entry.
+ * @return The enum associated with the given numeric wire value.
+ * @deprecated Use {@link #forNumber(int)} instead.
+ */
+ @java.lang.Deprecated
+ public static Role valueOf(int value) {
+ return forNumber(value);
+ }
+
+ /**
+ * @param value The numeric wire value of the corresponding enum entry.
+ * @return The enum associated with the given numeric wire value.
+ */
+ public static Role forNumber(int value) {
+ switch (value) {
+ case 0:
+ return ROLE_UNSPECIFIED;
+ case 1:
+ return DEFAULT;
+ case 2:
+ return CONTROLLER;
+ case 3:
+ return SPARK_DRIVER;
+ case 4:
+ return SPARK_EXECUTOR;
+ default:
+ return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() {
+ return internalValueMap;
+ }
+
+ private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap() {
+ public Role findValueByNumber(int number) {
+ return Role.forNumber(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() {
+ if (this == UNRECOGNIZED) {
+ throw new java.lang.IllegalStateException(
+ "Can't get the descriptor of an unrecognized enum value.");
+ }
+ return getDescriptor().getValues().get(ordinal());
+ }
+
+ public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() {
+ return getDescriptor();
+ }
+
+ public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() {
+ return com.google.cloud.dataproc.v1.GkeNodePoolTarget.getDescriptor().getEnumTypes().get(0);
+ }
+
+ private static final Role[] VALUES = values();
+
+ public static Role valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type.");
+ }
+ if (desc.getIndex() == -1) {
+ return UNRECOGNIZED;
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int value;
+
+ private Role(int value) {
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:google.cloud.dataproc.v1.GkeNodePoolTarget.Role)
+ }
+
+ public static final int NODE_POOL_FIELD_NUMBER = 1;
+ private volatile java.lang.Object nodePool_;
+ /**
+ *
+ *
+ *
+ * Required. The target GKE NodePool.
+ * Format:
+ * 'projects/{project}/locations/{location}/clusters/{cluster}/nodePools/{node_pool}'
+ *
+ *
+ * string node_pool = 1 [(.google.api.field_behavior) = REQUIRED];
+ *
+ * @return The nodePool.
+ */
+ @java.lang.Override
+ public java.lang.String getNodePool() {
+ java.lang.Object ref = nodePool_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ nodePool_ = s;
+ return s;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Required. The target GKE NodePool.
+ * Format:
+ * 'projects/{project}/locations/{location}/clusters/{cluster}/nodePools/{node_pool}'
+ *
+ *
+ * string node_pool = 1 [(.google.api.field_behavior) = REQUIRED];
+ *
+ * @return The bytes for nodePool.
+ */
+ @java.lang.Override
+ public com.google.protobuf.ByteString getNodePoolBytes() {
+ java.lang.Object ref = nodePool_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
+ nodePool_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ public static final int ROLES_FIELD_NUMBER = 2;
+ private java.util.List roles_;
+ private static final com.google.protobuf.Internal.ListAdapter.Converter<
+ java.lang.Integer, com.google.cloud.dataproc.v1.GkeNodePoolTarget.Role>
+ roles_converter_ =
+ new com.google.protobuf.Internal.ListAdapter.Converter<
+ java.lang.Integer, com.google.cloud.dataproc.v1.GkeNodePoolTarget.Role>() {
+ public com.google.cloud.dataproc.v1.GkeNodePoolTarget.Role convert(
+ java.lang.Integer from) {
+ @SuppressWarnings("deprecation")
+ com.google.cloud.dataproc.v1.GkeNodePoolTarget.Role result =
+ com.google.cloud.dataproc.v1.GkeNodePoolTarget.Role.valueOf(from);
+ return result == null
+ ? com.google.cloud.dataproc.v1.GkeNodePoolTarget.Role.UNRECOGNIZED
+ : result;
+ }
+ };
+ /**
+ *
+ *
+ *
+ * Required. The types of role for a GKE NodePool
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget.Role roles = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ *
+ * @return A list containing the roles.
+ */
+ @java.lang.Override
+ public java.util.List getRolesList() {
+ return new com.google.protobuf.Internal.ListAdapter<
+ java.lang.Integer, com.google.cloud.dataproc.v1.GkeNodePoolTarget.Role>(
+ roles_, roles_converter_);
+ }
+ /**
+ *
+ *
+ *
+ * Required. The types of role for a GKE NodePool
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget.Role roles = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ *
+ * @return The count of roles.
+ */
+ @java.lang.Override
+ public int getRolesCount() {
+ return roles_.size();
+ }
+ /**
+ *
+ *
+ *
+ * Required. The types of role for a GKE NodePool
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget.Role roles = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ *
+ * @param index The index of the element to return.
+ * @return The roles at the given index.
+ */
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.GkeNodePoolTarget.Role getRoles(int index) {
+ return roles_converter_.convert(roles_.get(index));
+ }
+ /**
+ *
+ *
+ *
+ * Required. The types of role for a GKE NodePool
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget.Role roles = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ *
+ * @return A list containing the enum numeric values on the wire for roles.
+ */
+ @java.lang.Override
+ public java.util.List getRolesValueList() {
+ return roles_;
+ }
+ /**
+ *
+ *
+ *
+ * Required. The types of role for a GKE NodePool
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget.Role roles = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ *
+ * @param index The index of the value to return.
+ * @return The enum numeric value on the wire of roles at the given index.
+ */
+ @java.lang.Override
+ public int getRolesValue(int index) {
+ return roles_.get(index);
+ }
+
+ private int rolesMemoizedSerializedSize;
+
+ public static final int NODE_POOL_CONFIG_FIELD_NUMBER = 3;
+ private com.google.cloud.dataproc.v1.GkeNodePoolConfig nodePoolConfig_;
+ /**
+ *
+ *
+ *
+ * Optional. The configuration for the GKE NodePool.
+ * If specified, Dataproc attempts to create a NodePool with the
+ * specified shape. If one with the same name already exists, it is
+ * verified against all specified fields. If a field differs, the
+ * virtual cluster creation will fail.
+ * If omitted, any NodePool with the specified name is used. If a
+ * NodePool with the specified name does not exist, Dataproc create a NodePool
+ * with default values.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig node_pool_config = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return Whether the nodePoolConfig field is set.
+ */
+ @java.lang.Override
+ public boolean hasNodePoolConfig() {
+ return nodePoolConfig_ != null;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The configuration for the GKE NodePool.
+ * If specified, Dataproc attempts to create a NodePool with the
+ * specified shape. If one with the same name already exists, it is
+ * verified against all specified fields. If a field differs, the
+ * virtual cluster creation will fail.
+ * If omitted, any NodePool with the specified name is used. If a
+ * NodePool with the specified name does not exist, Dataproc create a NodePool
+ * with default values.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig node_pool_config = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return The nodePoolConfig.
+ */
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig getNodePoolConfig() {
+ return nodePoolConfig_ == null
+ ? com.google.cloud.dataproc.v1.GkeNodePoolConfig.getDefaultInstance()
+ : nodePoolConfig_;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The configuration for the GKE NodePool.
+ * If specified, Dataproc attempts to create a NodePool with the
+ * specified shape. If one with the same name already exists, it is
+ * verified against all specified fields. If a field differs, the
+ * virtual cluster creation will fail.
+ * If omitted, any NodePool with the specified name is used. If a
+ * NodePool with the specified name does not exist, Dataproc create a NodePool
+ * with default values.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig node_pool_config = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfigOrBuilder getNodePoolConfigOrBuilder() {
+ return getNodePoolConfig();
+ }
+
+ private byte memoizedIsInitialized = -1;
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ @java.lang.Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
+ getSerializedSize();
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(nodePool_)) {
+ com.google.protobuf.GeneratedMessageV3.writeString(output, 1, nodePool_);
+ }
+ if (getRolesList().size() > 0) {
+ output.writeUInt32NoTag(18);
+ output.writeUInt32NoTag(rolesMemoizedSerializedSize);
+ }
+ for (int i = 0; i < roles_.size(); i++) {
+ output.writeEnumNoTag(roles_.get(i));
+ }
+ if (nodePoolConfig_ != null) {
+ output.writeMessage(3, getNodePoolConfig());
+ }
+ unknownFields.writeTo(output);
+ }
+
+ @java.lang.Override
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(nodePool_)) {
+ size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, nodePool_);
+ }
+ {
+ int dataSize = 0;
+ for (int i = 0; i < roles_.size(); i++) {
+ dataSize += com.google.protobuf.CodedOutputStream.computeEnumSizeNoTag(roles_.get(i));
+ }
+ size += dataSize;
+ if (!getRolesList().isEmpty()) {
+ size += 1;
+ size += com.google.protobuf.CodedOutputStream.computeUInt32SizeNoTag(dataSize);
+ }
+ rolesMemoizedSerializedSize = dataSize;
+ }
+ if (nodePoolConfig_ != null) {
+ size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getNodePoolConfig());
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof com.google.cloud.dataproc.v1.GkeNodePoolTarget)) {
+ return super.equals(obj);
+ }
+ com.google.cloud.dataproc.v1.GkeNodePoolTarget other =
+ (com.google.cloud.dataproc.v1.GkeNodePoolTarget) obj;
+
+ if (!getNodePool().equals(other.getNodePool())) return false;
+ if (!roles_.equals(other.roles_)) return false;
+ if (hasNodePoolConfig() != other.hasNodePoolConfig()) return false;
+ if (hasNodePoolConfig()) {
+ if (!getNodePoolConfig().equals(other.getNodePoolConfig())) return false;
+ }
+ if (!unknownFields.equals(other.unknownFields)) return false;
+ return true;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptor().hashCode();
+ hash = (37 * hash) + NODE_POOL_FIELD_NUMBER;
+ hash = (53 * hash) + getNodePool().hashCode();
+ if (getRolesCount() > 0) {
+ hash = (37 * hash) + ROLES_FIELD_NUMBER;
+ hash = (53 * hash) + roles_.hashCode();
+ }
+ if (hasNodePoolConfig()) {
+ hash = (37 * hash) + NODE_POOL_CONFIG_FIELD_NUMBER;
+ hash = (53 * hash) + getNodePoolConfig().hashCode();
+ }
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolTarget parseFrom(java.nio.ByteBuffer data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolTarget parseFrom(
+ java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolTarget parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolTarget parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolTarget parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolTarget parseFrom(
+ byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolTarget parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolTarget parseFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolTarget parseDelimitedFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolTarget parseDelimitedFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolTarget parseFrom(
+ com.google.protobuf.CodedInputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolTarget parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ @java.lang.Override
+ public Builder newBuilderForType() {
+ return newBuilder();
+ }
+
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+
+ public static Builder newBuilder(com.google.cloud.dataproc.v1.GkeNodePoolTarget prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+
+ @java.lang.Override
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ *
+ *
+ *
+ * GKE NodePools that Dataproc workloads run on.
+ *
+ *
+ * Protobuf type {@code google.cloud.dataproc.v1.GkeNodePoolTarget}
+ */
+ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder
+ implements
+ // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1.GkeNodePoolTarget)
+ com.google.cloud.dataproc.v1.GkeNodePoolTargetOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
+ return com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_GkeNodePoolTarget_descriptor;
+ }
+
+ @java.lang.Override
+ protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_GkeNodePoolTarget_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.google.cloud.dataproc.v1.GkeNodePoolTarget.class,
+ com.google.cloud.dataproc.v1.GkeNodePoolTarget.Builder.class);
+ }
+
+ // Construct using com.google.cloud.dataproc.v1.GkeNodePoolTarget.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {}
+ }
+
+ @java.lang.Override
+ public Builder clear() {
+ super.clear();
+ nodePool_ = "";
+
+ roles_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (nodePoolConfigBuilder_ == null) {
+ nodePoolConfig_ = null;
+ } else {
+ nodePoolConfig_ = null;
+ nodePoolConfigBuilder_ = null;
+ }
+ return this;
+ }
+
+ @java.lang.Override
+ public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
+ return com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_GkeNodePoolTarget_descriptor;
+ }
+
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.GkeNodePoolTarget getDefaultInstanceForType() {
+ return com.google.cloud.dataproc.v1.GkeNodePoolTarget.getDefaultInstance();
+ }
+
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.GkeNodePoolTarget build() {
+ com.google.cloud.dataproc.v1.GkeNodePoolTarget result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.GkeNodePoolTarget buildPartial() {
+ com.google.cloud.dataproc.v1.GkeNodePoolTarget result =
+ new com.google.cloud.dataproc.v1.GkeNodePoolTarget(this);
+ int from_bitField0_ = bitField0_;
+ result.nodePool_ = nodePool_;
+ if (((bitField0_ & 0x00000001) != 0)) {
+ roles_ = java.util.Collections.unmodifiableList(roles_);
+ bitField0_ = (bitField0_ & ~0x00000001);
+ }
+ result.roles_ = roles_;
+ if (nodePoolConfigBuilder_ == null) {
+ result.nodePoolConfig_ = nodePoolConfig_;
+ } else {
+ result.nodePoolConfig_ = nodePoolConfigBuilder_.build();
+ }
+ onBuilt();
+ return result;
+ }
+
+ @java.lang.Override
+ public Builder clone() {
+ return super.clone();
+ }
+
+ @java.lang.Override
+ public Builder setField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
+ return super.setField(field, value);
+ }
+
+ @java.lang.Override
+ public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
+ return super.clearField(field);
+ }
+
+ @java.lang.Override
+ public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+ return super.clearOneof(oneof);
+ }
+
+ @java.lang.Override
+ public Builder setRepeatedField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) {
+ return super.setRepeatedField(field, index, value);
+ }
+
+ @java.lang.Override
+ public Builder addRepeatedField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
+ return super.addRepeatedField(field, value);
+ }
+
+ @java.lang.Override
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof com.google.cloud.dataproc.v1.GkeNodePoolTarget) {
+ return mergeFrom((com.google.cloud.dataproc.v1.GkeNodePoolTarget) other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(com.google.cloud.dataproc.v1.GkeNodePoolTarget other) {
+ if (other == com.google.cloud.dataproc.v1.GkeNodePoolTarget.getDefaultInstance()) return this;
+ if (!other.getNodePool().isEmpty()) {
+ nodePool_ = other.nodePool_;
+ onChanged();
+ }
+ if (!other.roles_.isEmpty()) {
+ if (roles_.isEmpty()) {
+ roles_ = other.roles_;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ } else {
+ ensureRolesIsMutable();
+ roles_.addAll(other.roles_);
+ }
+ onChanged();
+ }
+ if (other.hasNodePoolConfig()) {
+ mergeNodePoolConfig(other.getNodePoolConfig());
+ }
+ this.mergeUnknownFields(other.unknownFields);
+ onChanged();
+ return this;
+ }
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ @java.lang.Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.cloud.dataproc.v1.GkeNodePoolTarget parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (com.google.cloud.dataproc.v1.GkeNodePoolTarget) e.getUnfinishedMessage();
+ throw e.unwrapIOException();
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+
+ private int bitField0_;
+
+ private java.lang.Object nodePool_ = "";
+ /**
+ *
+ *
+ *
+ * Required. The target GKE NodePool.
+ * Format:
+ * 'projects/{project}/locations/{location}/clusters/{cluster}/nodePools/{node_pool}'
+ *
+ *
+ * string node_pool = 1 [(.google.api.field_behavior) = REQUIRED];
+ *
+ * @return The nodePool.
+ */
+ public java.lang.String getNodePool() {
+ java.lang.Object ref = nodePool_;
+ if (!(ref instanceof java.lang.String)) {
+ com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ nodePool_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Required. The target GKE NodePool.
+ * Format:
+ * 'projects/{project}/locations/{location}/clusters/{cluster}/nodePools/{node_pool}'
+ *
+ *
+ * string node_pool = 1 [(.google.api.field_behavior) = REQUIRED];
+ *
+ * @return The bytes for nodePool.
+ */
+ public com.google.protobuf.ByteString getNodePoolBytes() {
+ java.lang.Object ref = nodePool_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
+ nodePool_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Required. The target GKE NodePool.
+ * Format:
+ * 'projects/{project}/locations/{location}/clusters/{cluster}/nodePools/{node_pool}'
+ *
+ *
+ * string node_pool = 1 [(.google.api.field_behavior) = REQUIRED];
+ *
+ * @param value The nodePool to set.
+ * @return This builder for chaining.
+ */
+ public Builder setNodePool(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+
+ nodePool_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Required. The target GKE NodePool.
+ * Format:
+ * 'projects/{project}/locations/{location}/clusters/{cluster}/nodePools/{node_pool}'
+ *
+ *
+ * string node_pool = 1 [(.google.api.field_behavior) = REQUIRED];
+ *
+ * @return This builder for chaining.
+ */
+ public Builder clearNodePool() {
+
+ nodePool_ = getDefaultInstance().getNodePool();
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Required. The target GKE NodePool.
+ * Format:
+ * 'projects/{project}/locations/{location}/clusters/{cluster}/nodePools/{node_pool}'
+ *
+ *
+ * string node_pool = 1 [(.google.api.field_behavior) = REQUIRED];
+ *
+ * @param value The bytes for nodePool to set.
+ * @return This builder for chaining.
+ */
+ public Builder setNodePoolBytes(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ checkByteStringIsUtf8(value);
+
+ nodePool_ = value;
+ onChanged();
+ return this;
+ }
+
+ private java.util.List roles_ = java.util.Collections.emptyList();
+
+ private void ensureRolesIsMutable() {
+ if (!((bitField0_ & 0x00000001) != 0)) {
+ roles_ = new java.util.ArrayList(roles_);
+ bitField0_ |= 0x00000001;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Required. The types of role for a GKE NodePool
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget.Role roles = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ *
+ * @return A list containing the roles.
+ */
+ public java.util.List getRolesList() {
+ return new com.google.protobuf.Internal.ListAdapter<
+ java.lang.Integer, com.google.cloud.dataproc.v1.GkeNodePoolTarget.Role>(
+ roles_, roles_converter_);
+ }
+ /**
+ *
+ *
+ *
+ * Required. The types of role for a GKE NodePool
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget.Role roles = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ *
+ * @return The count of roles.
+ */
+ public int getRolesCount() {
+ return roles_.size();
+ }
+ /**
+ *
+ *
+ *
+ * Required. The types of role for a GKE NodePool
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget.Role roles = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ *
+ * @param index The index of the element to return.
+ * @return The roles at the given index.
+ */
+ public com.google.cloud.dataproc.v1.GkeNodePoolTarget.Role getRoles(int index) {
+ return roles_converter_.convert(roles_.get(index));
+ }
+ /**
+ *
+ *
+ *
+ * Required. The types of role for a GKE NodePool
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget.Role roles = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ *
+ * @param index The index to set the value at.
+ * @param value The roles to set.
+ * @return This builder for chaining.
+ */
+ public Builder setRoles(int index, com.google.cloud.dataproc.v1.GkeNodePoolTarget.Role value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRolesIsMutable();
+ roles_.set(index, value.getNumber());
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Required. The types of role for a GKE NodePool
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget.Role roles = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ *
+ * @param value The roles to add.
+ * @return This builder for chaining.
+ */
+ public Builder addRoles(com.google.cloud.dataproc.v1.GkeNodePoolTarget.Role value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRolesIsMutable();
+ roles_.add(value.getNumber());
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Required. The types of role for a GKE NodePool
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget.Role roles = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ *
+ * @param values The roles to add.
+ * @return This builder for chaining.
+ */
+ public Builder addAllRoles(
+ java.lang.Iterable extends com.google.cloud.dataproc.v1.GkeNodePoolTarget.Role> values) {
+ ensureRolesIsMutable();
+ for (com.google.cloud.dataproc.v1.GkeNodePoolTarget.Role value : values) {
+ roles_.add(value.getNumber());
+ }
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Required. The types of role for a GKE NodePool
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget.Role roles = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ *
+ * @return This builder for chaining.
+ */
+ public Builder clearRoles() {
+ roles_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000001);
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Required. The types of role for a GKE NodePool
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget.Role roles = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ *
+ * @return A list containing the enum numeric values on the wire for roles.
+ */
+ public java.util.List getRolesValueList() {
+ return java.util.Collections.unmodifiableList(roles_);
+ }
+ /**
+ *
+ *
+ *
+ * Required. The types of role for a GKE NodePool
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget.Role roles = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ *
+ * @param index The index of the value to return.
+ * @return The enum numeric value on the wire of roles at the given index.
+ */
+ public int getRolesValue(int index) {
+ return roles_.get(index);
+ }
+ /**
+ *
+ *
+ *
+ * Required. The types of role for a GKE NodePool
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget.Role roles = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ *
+ * @param index The index of the value to return.
+ * @return The enum numeric value on the wire of roles at the given index.
+ * @return This builder for chaining.
+ */
+ public Builder setRolesValue(int index, int value) {
+ ensureRolesIsMutable();
+ roles_.set(index, value);
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Required. The types of role for a GKE NodePool
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget.Role roles = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ *
+ * @param value The enum numeric value on the wire for roles to add.
+ * @return This builder for chaining.
+ */
+ public Builder addRolesValue(int value) {
+ ensureRolesIsMutable();
+ roles_.add(value);
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Required. The types of role for a GKE NodePool
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget.Role roles = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ *
+ * @param values The enum numeric values on the wire for roles to add.
+ * @return This builder for chaining.
+ */
+ public Builder addAllRolesValue(java.lang.Iterable values) {
+ ensureRolesIsMutable();
+ for (int value : values) {
+ roles_.add(value);
+ }
+ onChanged();
+ return this;
+ }
+
+ private com.google.cloud.dataproc.v1.GkeNodePoolConfig nodePoolConfig_;
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.Builder,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfigOrBuilder>
+ nodePoolConfigBuilder_;
+ /**
+ *
+ *
+ *
+ * Optional. The configuration for the GKE NodePool.
+ * If specified, Dataproc attempts to create a NodePool with the
+ * specified shape. If one with the same name already exists, it is
+ * verified against all specified fields. If a field differs, the
+ * virtual cluster creation will fail.
+ * If omitted, any NodePool with the specified name is used. If a
+ * NodePool with the specified name does not exist, Dataproc create a NodePool
+ * with default values.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig node_pool_config = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return Whether the nodePoolConfig field is set.
+ */
+ public boolean hasNodePoolConfig() {
+ return nodePoolConfigBuilder_ != null || nodePoolConfig_ != null;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The configuration for the GKE NodePool.
+ * If specified, Dataproc attempts to create a NodePool with the
+ * specified shape. If one with the same name already exists, it is
+ * verified against all specified fields. If a field differs, the
+ * virtual cluster creation will fail.
+ * If omitted, any NodePool with the specified name is used. If a
+ * NodePool with the specified name does not exist, Dataproc create a NodePool
+ * with default values.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig node_pool_config = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return The nodePoolConfig.
+ */
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig getNodePoolConfig() {
+ if (nodePoolConfigBuilder_ == null) {
+ return nodePoolConfig_ == null
+ ? com.google.cloud.dataproc.v1.GkeNodePoolConfig.getDefaultInstance()
+ : nodePoolConfig_;
+ } else {
+ return nodePoolConfigBuilder_.getMessage();
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The configuration for the GKE NodePool.
+ * If specified, Dataproc attempts to create a NodePool with the
+ * specified shape. If one with the same name already exists, it is
+ * verified against all specified fields. If a field differs, the
+ * virtual cluster creation will fail.
+ * If omitted, any NodePool with the specified name is used. If a
+ * NodePool with the specified name does not exist, Dataproc create a NodePool
+ * with default values.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig node_pool_config = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder setNodePoolConfig(com.google.cloud.dataproc.v1.GkeNodePoolConfig value) {
+ if (nodePoolConfigBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ nodePoolConfig_ = value;
+ onChanged();
+ } else {
+ nodePoolConfigBuilder_.setMessage(value);
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The configuration for the GKE NodePool.
+ * If specified, Dataproc attempts to create a NodePool with the
+ * specified shape. If one with the same name already exists, it is
+ * verified against all specified fields. If a field differs, the
+ * virtual cluster creation will fail.
+ * If omitted, any NodePool with the specified name is used. If a
+ * NodePool with the specified name does not exist, Dataproc create a NodePool
+ * with default values.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig node_pool_config = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder setNodePoolConfig(
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.Builder builderForValue) {
+ if (nodePoolConfigBuilder_ == null) {
+ nodePoolConfig_ = builderForValue.build();
+ onChanged();
+ } else {
+ nodePoolConfigBuilder_.setMessage(builderForValue.build());
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The configuration for the GKE NodePool.
+ * If specified, Dataproc attempts to create a NodePool with the
+ * specified shape. If one with the same name already exists, it is
+ * verified against all specified fields. If a field differs, the
+ * virtual cluster creation will fail.
+ * If omitted, any NodePool with the specified name is used. If a
+ * NodePool with the specified name does not exist, Dataproc create a NodePool
+ * with default values.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig node_pool_config = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder mergeNodePoolConfig(com.google.cloud.dataproc.v1.GkeNodePoolConfig value) {
+ if (nodePoolConfigBuilder_ == null) {
+ if (nodePoolConfig_ != null) {
+ nodePoolConfig_ =
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.newBuilder(nodePoolConfig_)
+ .mergeFrom(value)
+ .buildPartial();
+ } else {
+ nodePoolConfig_ = value;
+ }
+ onChanged();
+ } else {
+ nodePoolConfigBuilder_.mergeFrom(value);
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The configuration for the GKE NodePool.
+ * If specified, Dataproc attempts to create a NodePool with the
+ * specified shape. If one with the same name already exists, it is
+ * verified against all specified fields. If a field differs, the
+ * virtual cluster creation will fail.
+ * If omitted, any NodePool with the specified name is used. If a
+ * NodePool with the specified name does not exist, Dataproc create a NodePool
+ * with default values.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig node_pool_config = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder clearNodePoolConfig() {
+ if (nodePoolConfigBuilder_ == null) {
+ nodePoolConfig_ = null;
+ onChanged();
+ } else {
+ nodePoolConfig_ = null;
+ nodePoolConfigBuilder_ = null;
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The configuration for the GKE NodePool.
+ * If specified, Dataproc attempts to create a NodePool with the
+ * specified shape. If one with the same name already exists, it is
+ * verified against all specified fields. If a field differs, the
+ * virtual cluster creation will fail.
+ * If omitted, any NodePool with the specified name is used. If a
+ * NodePool with the specified name does not exist, Dataproc create a NodePool
+ * with default values.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig node_pool_config = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfig.Builder getNodePoolConfigBuilder() {
+
+ onChanged();
+ return getNodePoolConfigFieldBuilder().getBuilder();
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The configuration for the GKE NodePool.
+ * If specified, Dataproc attempts to create a NodePool with the
+ * specified shape. If one with the same name already exists, it is
+ * verified against all specified fields. If a field differs, the
+ * virtual cluster creation will fail.
+ * If omitted, any NodePool with the specified name is used. If a
+ * NodePool with the specified name does not exist, Dataproc create a NodePool
+ * with default values.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig node_pool_config = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public com.google.cloud.dataproc.v1.GkeNodePoolConfigOrBuilder getNodePoolConfigOrBuilder() {
+ if (nodePoolConfigBuilder_ != null) {
+ return nodePoolConfigBuilder_.getMessageOrBuilder();
+ } else {
+ return nodePoolConfig_ == null
+ ? com.google.cloud.dataproc.v1.GkeNodePoolConfig.getDefaultInstance()
+ : nodePoolConfig_;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The configuration for the GKE NodePool.
+ * If specified, Dataproc attempts to create a NodePool with the
+ * specified shape. If one with the same name already exists, it is
+ * verified against all specified fields. If a field differs, the
+ * virtual cluster creation will fail.
+ * If omitted, any NodePool with the specified name is used. If a
+ * NodePool with the specified name does not exist, Dataproc create a NodePool
+ * with default values.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig node_pool_config = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.Builder,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfigOrBuilder>
+ getNodePoolConfigFieldBuilder() {
+ if (nodePoolConfigBuilder_ == null) {
+ nodePoolConfigBuilder_ =
+ new com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig.Builder,
+ com.google.cloud.dataproc.v1.GkeNodePoolConfigOrBuilder>(
+ getNodePoolConfig(), getParentForChildren(), isClean());
+ nodePoolConfig_ = null;
+ }
+ return nodePoolConfigBuilder_;
+ }
+
+ @java.lang.Override
+ public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFields(unknownFields);
+ }
+
+ @java.lang.Override
+ public final Builder mergeUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+ // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1.GkeNodePoolTarget)
+ }
+
+ // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.GkeNodePoolTarget)
+ private static final com.google.cloud.dataproc.v1.GkeNodePoolTarget DEFAULT_INSTANCE;
+
+ static {
+ DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1.GkeNodePoolTarget();
+ }
+
+ public static com.google.cloud.dataproc.v1.GkeNodePoolTarget getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ private static final com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ @java.lang.Override
+ public GkeNodePoolTarget parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new GkeNodePoolTarget(input, extensionRegistry);
+ }
+ };
+
+ public static com.google.protobuf.Parser parser() {
+ return PARSER;
+ }
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.GkeNodePoolTarget getDefaultInstanceForType() {
+ return DEFAULT_INSTANCE;
+ }
+}
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GkeNodePoolTargetOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GkeNodePoolTargetOrBuilder.java
new file mode 100644
index 00000000..64b301fb
--- /dev/null
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GkeNodePoolTargetOrBuilder.java
@@ -0,0 +1,189 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/cloud/dataproc/v1/shared.proto
+
+package com.google.cloud.dataproc.v1;
+
+public interface GkeNodePoolTargetOrBuilder
+ extends
+ // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1.GkeNodePoolTarget)
+ com.google.protobuf.MessageOrBuilder {
+
+ /**
+ *
+ *
+ *
+ * Required. The target GKE NodePool.
+ * Format:
+ * 'projects/{project}/locations/{location}/clusters/{cluster}/nodePools/{node_pool}'
+ *
+ *
+ * string node_pool = 1 [(.google.api.field_behavior) = REQUIRED];
+ *
+ * @return The nodePool.
+ */
+ java.lang.String getNodePool();
+ /**
+ *
+ *
+ *
+ * Required. The target GKE NodePool.
+ * Format:
+ * 'projects/{project}/locations/{location}/clusters/{cluster}/nodePools/{node_pool}'
+ *
+ *
+ * string node_pool = 1 [(.google.api.field_behavior) = REQUIRED];
+ *
+ * @return The bytes for nodePool.
+ */
+ com.google.protobuf.ByteString getNodePoolBytes();
+
+ /**
+ *
+ *
+ *
+ * Required. The types of role for a GKE NodePool
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget.Role roles = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ *
+ * @return A list containing the roles.
+ */
+ java.util.List getRolesList();
+ /**
+ *
+ *
+ *
+ * Required. The types of role for a GKE NodePool
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget.Role roles = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ *
+ * @return The count of roles.
+ */
+ int getRolesCount();
+ /**
+ *
+ *
+ *
+ * Required. The types of role for a GKE NodePool
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget.Role roles = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ *
+ * @param index The index of the element to return.
+ * @return The roles at the given index.
+ */
+ com.google.cloud.dataproc.v1.GkeNodePoolTarget.Role getRoles(int index);
+ /**
+ *
+ *
+ *
+ * Required. The types of role for a GKE NodePool
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget.Role roles = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ *
+ * @return A list containing the enum numeric values on the wire for roles.
+ */
+ java.util.List getRolesValueList();
+ /**
+ *
+ *
+ *
+ * Required. The types of role for a GKE NodePool
+ *
+ *
+ *
+ * repeated .google.cloud.dataproc.v1.GkeNodePoolTarget.Role roles = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ *
+ * @param index The index of the value to return.
+ * @return The enum numeric value on the wire of roles at the given index.
+ */
+ int getRolesValue(int index);
+
+ /**
+ *
+ *
+ *
+ * Optional. The configuration for the GKE NodePool.
+ * If specified, Dataproc attempts to create a NodePool with the
+ * specified shape. If one with the same name already exists, it is
+ * verified against all specified fields. If a field differs, the
+ * virtual cluster creation will fail.
+ * If omitted, any NodePool with the specified name is used. If a
+ * NodePool with the specified name does not exist, Dataproc create a NodePool
+ * with default values.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig node_pool_config = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return Whether the nodePoolConfig field is set.
+ */
+ boolean hasNodePoolConfig();
+ /**
+ *
+ *
+ *
+ * Optional. The configuration for the GKE NodePool.
+ * If specified, Dataproc attempts to create a NodePool with the
+ * specified shape. If one with the same name already exists, it is
+ * verified against all specified fields. If a field differs, the
+ * virtual cluster creation will fail.
+ * If omitted, any NodePool with the specified name is used. If a
+ * NodePool with the specified name does not exist, Dataproc create a NodePool
+ * with default values.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig node_pool_config = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return The nodePoolConfig.
+ */
+ com.google.cloud.dataproc.v1.GkeNodePoolConfig getNodePoolConfig();
+ /**
+ *
+ *
+ *
+ * Optional. The configuration for the GKE NodePool.
+ * If specified, Dataproc attempts to create a NodePool with the
+ * specified shape. If one with the same name already exists, it is
+ * verified against all specified fields. If a field differs, the
+ * virtual cluster creation will fail.
+ * If omitted, any NodePool with the specified name is used. If a
+ * NodePool with the specified name does not exist, Dataproc create a NodePool
+ * with default values.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeNodePoolConfig node_pool_config = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ com.google.cloud.dataproc.v1.GkeNodePoolConfigOrBuilder getNodePoolConfigOrBuilder();
+}
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/KubernetesClusterConfig.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/KubernetesClusterConfig.java
new file mode 100644
index 00000000..73587a1b
--- /dev/null
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/KubernetesClusterConfig.java
@@ -0,0 +1,1371 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/cloud/dataproc/v1/shared.proto
+
+package com.google.cloud.dataproc.v1;
+
+/**
+ *
+ *
+ *
+ * The configuration for running the Dataproc cluster on Kubernetes.
+ *
+ *
+ * Protobuf type {@code google.cloud.dataproc.v1.KubernetesClusterConfig}
+ */
+public final class KubernetesClusterConfig extends com.google.protobuf.GeneratedMessageV3
+ implements
+ // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1.KubernetesClusterConfig)
+ KubernetesClusterConfigOrBuilder {
+ private static final long serialVersionUID = 0L;
+ // Use KubernetesClusterConfig.newBuilder() to construct.
+ private KubernetesClusterConfig(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
+ super(builder);
+ }
+
+ private KubernetesClusterConfig() {
+ kubernetesNamespace_ = "";
+ }
+
+ @java.lang.Override
+ @SuppressWarnings({"unused"})
+ protected java.lang.Object newInstance(UnusedPrivateParameter unused) {
+ return new KubernetesClusterConfig();
+ }
+
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet getUnknownFields() {
+ return this.unknownFields;
+ }
+
+ private KubernetesClusterConfig(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ this();
+ if (extensionRegistry == null) {
+ throw new java.lang.NullPointerException();
+ }
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ case 10:
+ {
+ java.lang.String s = input.readStringRequireUtf8();
+
+ kubernetesNamespace_ = s;
+ break;
+ }
+ case 18:
+ {
+ com.google.cloud.dataproc.v1.GkeClusterConfig.Builder subBuilder = null;
+ if (configCase_ == 2) {
+ subBuilder = ((com.google.cloud.dataproc.v1.GkeClusterConfig) config_).toBuilder();
+ }
+ config_ =
+ input.readMessage(
+ com.google.cloud.dataproc.v1.GkeClusterConfig.parser(), extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom((com.google.cloud.dataproc.v1.GkeClusterConfig) config_);
+ config_ = subBuilder.buildPartial();
+ }
+ configCase_ = 2;
+ break;
+ }
+ case 26:
+ {
+ com.google.cloud.dataproc.v1.KubernetesSoftwareConfig.Builder subBuilder = null;
+ if (kubernetesSoftwareConfig_ != null) {
+ subBuilder = kubernetesSoftwareConfig_.toBuilder();
+ }
+ kubernetesSoftwareConfig_ =
+ input.readMessage(
+ com.google.cloud.dataproc.v1.KubernetesSoftwareConfig.parser(),
+ extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(kubernetesSoftwareConfig_);
+ kubernetesSoftwareConfig_ = subBuilder.buildPartial();
+ }
+
+ break;
+ }
+ default:
+ {
+ if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
+ return com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_KubernetesClusterConfig_descriptor;
+ }
+
+ @java.lang.Override
+ protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_KubernetesClusterConfig_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.google.cloud.dataproc.v1.KubernetesClusterConfig.class,
+ com.google.cloud.dataproc.v1.KubernetesClusterConfig.Builder.class);
+ }
+
+ private int configCase_ = 0;
+ private java.lang.Object config_;
+
+ public enum ConfigCase
+ implements
+ com.google.protobuf.Internal.EnumLite,
+ com.google.protobuf.AbstractMessage.InternalOneOfEnum {
+ GKE_CLUSTER_CONFIG(2),
+ CONFIG_NOT_SET(0);
+ private final int value;
+
+ private ConfigCase(int value) {
+ this.value = value;
+ }
+ /**
+ * @param value The number of the enum to look for.
+ * @return The enum associated with the given number.
+ * @deprecated Use {@link #forNumber(int)} instead.
+ */
+ @java.lang.Deprecated
+ public static ConfigCase valueOf(int value) {
+ return forNumber(value);
+ }
+
+ public static ConfigCase forNumber(int value) {
+ switch (value) {
+ case 2:
+ return GKE_CLUSTER_CONFIG;
+ case 0:
+ return CONFIG_NOT_SET;
+ default:
+ return null;
+ }
+ }
+
+ public int getNumber() {
+ return this.value;
+ }
+ };
+
+ public ConfigCase getConfigCase() {
+ return ConfigCase.forNumber(configCase_);
+ }
+
+ public static final int KUBERNETES_NAMESPACE_FIELD_NUMBER = 1;
+ private volatile java.lang.Object kubernetesNamespace_;
+ /**
+ *
+ *
+ *
+ * Optional. A namespace within the Kubernetes cluster to deploy into. If this namespace
+ * does not exist, it is created. If it exists, Dataproc
+ * verifies that another Dataproc VirtualCluster is not installed
+ * into it. If not specified, the name of the Dataproc Cluster is used.
+ *
+ *
+ * string kubernetes_namespace = 1 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The kubernetesNamespace.
+ */
+ @java.lang.Override
+ public java.lang.String getKubernetesNamespace() {
+ java.lang.Object ref = kubernetesNamespace_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ kubernetesNamespace_ = s;
+ return s;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A namespace within the Kubernetes cluster to deploy into. If this namespace
+ * does not exist, it is created. If it exists, Dataproc
+ * verifies that another Dataproc VirtualCluster is not installed
+ * into it. If not specified, the name of the Dataproc Cluster is used.
+ *
+ *
+ * string kubernetes_namespace = 1 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The bytes for kubernetesNamespace.
+ */
+ @java.lang.Override
+ public com.google.protobuf.ByteString getKubernetesNamespaceBytes() {
+ java.lang.Object ref = kubernetesNamespace_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
+ kubernetesNamespace_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ public static final int GKE_CLUSTER_CONFIG_FIELD_NUMBER = 2;
+ /**
+ *
+ *
+ *
+ * Required. The configuration for running the Dataproc cluster on GKE.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ *
+ * @return Whether the gkeClusterConfig field is set.
+ */
+ @java.lang.Override
+ public boolean hasGkeClusterConfig() {
+ return configCase_ == 2;
+ }
+ /**
+ *
+ *
+ *
+ * Required. The configuration for running the Dataproc cluster on GKE.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ *
+ * @return The gkeClusterConfig.
+ */
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.GkeClusterConfig getGkeClusterConfig() {
+ if (configCase_ == 2) {
+ return (com.google.cloud.dataproc.v1.GkeClusterConfig) config_;
+ }
+ return com.google.cloud.dataproc.v1.GkeClusterConfig.getDefaultInstance();
+ }
+ /**
+ *
+ *
+ *
+ * Required. The configuration for running the Dataproc cluster on GKE.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ */
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.GkeClusterConfigOrBuilder getGkeClusterConfigOrBuilder() {
+ if (configCase_ == 2) {
+ return (com.google.cloud.dataproc.v1.GkeClusterConfig) config_;
+ }
+ return com.google.cloud.dataproc.v1.GkeClusterConfig.getDefaultInstance();
+ }
+
+ public static final int KUBERNETES_SOFTWARE_CONFIG_FIELD_NUMBER = 3;
+ private com.google.cloud.dataproc.v1.KubernetesSoftwareConfig kubernetesSoftwareConfig_;
+ /**
+ *
+ *
+ *
+ * Optional. The software configuration for this Dataproc cluster running on Kubernetes.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.KubernetesSoftwareConfig kubernetes_software_config = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return Whether the kubernetesSoftwareConfig field is set.
+ */
+ @java.lang.Override
+ public boolean hasKubernetesSoftwareConfig() {
+ return kubernetesSoftwareConfig_ != null;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The software configuration for this Dataproc cluster running on Kubernetes.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.KubernetesSoftwareConfig kubernetes_software_config = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return The kubernetesSoftwareConfig.
+ */
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.KubernetesSoftwareConfig getKubernetesSoftwareConfig() {
+ return kubernetesSoftwareConfig_ == null
+ ? com.google.cloud.dataproc.v1.KubernetesSoftwareConfig.getDefaultInstance()
+ : kubernetesSoftwareConfig_;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The software configuration for this Dataproc cluster running on Kubernetes.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.KubernetesSoftwareConfig kubernetes_software_config = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.KubernetesSoftwareConfigOrBuilder
+ getKubernetesSoftwareConfigOrBuilder() {
+ return getKubernetesSoftwareConfig();
+ }
+
+ private byte memoizedIsInitialized = -1;
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ @java.lang.Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(kubernetesNamespace_)) {
+ com.google.protobuf.GeneratedMessageV3.writeString(output, 1, kubernetesNamespace_);
+ }
+ if (configCase_ == 2) {
+ output.writeMessage(2, (com.google.cloud.dataproc.v1.GkeClusterConfig) config_);
+ }
+ if (kubernetesSoftwareConfig_ != null) {
+ output.writeMessage(3, getKubernetesSoftwareConfig());
+ }
+ unknownFields.writeTo(output);
+ }
+
+ @java.lang.Override
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(kubernetesNamespace_)) {
+ size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, kubernetesNamespace_);
+ }
+ if (configCase_ == 2) {
+ size +=
+ com.google.protobuf.CodedOutputStream.computeMessageSize(
+ 2, (com.google.cloud.dataproc.v1.GkeClusterConfig) config_);
+ }
+ if (kubernetesSoftwareConfig_ != null) {
+ size +=
+ com.google.protobuf.CodedOutputStream.computeMessageSize(
+ 3, getKubernetesSoftwareConfig());
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof com.google.cloud.dataproc.v1.KubernetesClusterConfig)) {
+ return super.equals(obj);
+ }
+ com.google.cloud.dataproc.v1.KubernetesClusterConfig other =
+ (com.google.cloud.dataproc.v1.KubernetesClusterConfig) obj;
+
+ if (!getKubernetesNamespace().equals(other.getKubernetesNamespace())) return false;
+ if (hasKubernetesSoftwareConfig() != other.hasKubernetesSoftwareConfig()) return false;
+ if (hasKubernetesSoftwareConfig()) {
+ if (!getKubernetesSoftwareConfig().equals(other.getKubernetesSoftwareConfig())) return false;
+ }
+ if (!getConfigCase().equals(other.getConfigCase())) return false;
+ switch (configCase_) {
+ case 2:
+ if (!getGkeClusterConfig().equals(other.getGkeClusterConfig())) return false;
+ break;
+ case 0:
+ default:
+ }
+ if (!unknownFields.equals(other.unknownFields)) return false;
+ return true;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptor().hashCode();
+ hash = (37 * hash) + KUBERNETES_NAMESPACE_FIELD_NUMBER;
+ hash = (53 * hash) + getKubernetesNamespace().hashCode();
+ if (hasKubernetesSoftwareConfig()) {
+ hash = (37 * hash) + KUBERNETES_SOFTWARE_CONFIG_FIELD_NUMBER;
+ hash = (53 * hash) + getKubernetesSoftwareConfig().hashCode();
+ }
+ switch (configCase_) {
+ case 2:
+ hash = (37 * hash) + GKE_CLUSTER_CONFIG_FIELD_NUMBER;
+ hash = (53 * hash) + getGkeClusterConfig().hashCode();
+ break;
+ case 0:
+ default:
+ }
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static com.google.cloud.dataproc.v1.KubernetesClusterConfig parseFrom(
+ java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.dataproc.v1.KubernetesClusterConfig parseFrom(
+ java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.KubernetesClusterConfig parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.dataproc.v1.KubernetesClusterConfig parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.KubernetesClusterConfig parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.dataproc.v1.KubernetesClusterConfig parseFrom(
+ byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.KubernetesClusterConfig parseFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.dataproc.v1.KubernetesClusterConfig parseFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.KubernetesClusterConfig parseDelimitedFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.dataproc.v1.KubernetesClusterConfig parseDelimitedFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.KubernetesClusterConfig parseFrom(
+ com.google.protobuf.CodedInputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.dataproc.v1.KubernetesClusterConfig parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ @java.lang.Override
+ public Builder newBuilderForType() {
+ return newBuilder();
+ }
+
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+
+ public static Builder newBuilder(com.google.cloud.dataproc.v1.KubernetesClusterConfig prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+
+ @java.lang.Override
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ *
+ *
+ *
+ * The configuration for running the Dataproc cluster on Kubernetes.
+ *
+ *
+ * Protobuf type {@code google.cloud.dataproc.v1.KubernetesClusterConfig}
+ */
+ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder
+ implements
+ // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1.KubernetesClusterConfig)
+ com.google.cloud.dataproc.v1.KubernetesClusterConfigOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
+ return com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_KubernetesClusterConfig_descriptor;
+ }
+
+ @java.lang.Override
+ protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_KubernetesClusterConfig_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.google.cloud.dataproc.v1.KubernetesClusterConfig.class,
+ com.google.cloud.dataproc.v1.KubernetesClusterConfig.Builder.class);
+ }
+
+ // Construct using com.google.cloud.dataproc.v1.KubernetesClusterConfig.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {}
+ }
+
+ @java.lang.Override
+ public Builder clear() {
+ super.clear();
+ kubernetesNamespace_ = "";
+
+ if (kubernetesSoftwareConfigBuilder_ == null) {
+ kubernetesSoftwareConfig_ = null;
+ } else {
+ kubernetesSoftwareConfig_ = null;
+ kubernetesSoftwareConfigBuilder_ = null;
+ }
+ configCase_ = 0;
+ config_ = null;
+ return this;
+ }
+
+ @java.lang.Override
+ public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
+ return com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_KubernetesClusterConfig_descriptor;
+ }
+
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.KubernetesClusterConfig getDefaultInstanceForType() {
+ return com.google.cloud.dataproc.v1.KubernetesClusterConfig.getDefaultInstance();
+ }
+
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.KubernetesClusterConfig build() {
+ com.google.cloud.dataproc.v1.KubernetesClusterConfig result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.KubernetesClusterConfig buildPartial() {
+ com.google.cloud.dataproc.v1.KubernetesClusterConfig result =
+ new com.google.cloud.dataproc.v1.KubernetesClusterConfig(this);
+ result.kubernetesNamespace_ = kubernetesNamespace_;
+ if (configCase_ == 2) {
+ if (gkeClusterConfigBuilder_ == null) {
+ result.config_ = config_;
+ } else {
+ result.config_ = gkeClusterConfigBuilder_.build();
+ }
+ }
+ if (kubernetesSoftwareConfigBuilder_ == null) {
+ result.kubernetesSoftwareConfig_ = kubernetesSoftwareConfig_;
+ } else {
+ result.kubernetesSoftwareConfig_ = kubernetesSoftwareConfigBuilder_.build();
+ }
+ result.configCase_ = configCase_;
+ onBuilt();
+ return result;
+ }
+
+ @java.lang.Override
+ public Builder clone() {
+ return super.clone();
+ }
+
+ @java.lang.Override
+ public Builder setField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
+ return super.setField(field, value);
+ }
+
+ @java.lang.Override
+ public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
+ return super.clearField(field);
+ }
+
+ @java.lang.Override
+ public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+ return super.clearOneof(oneof);
+ }
+
+ @java.lang.Override
+ public Builder setRepeatedField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) {
+ return super.setRepeatedField(field, index, value);
+ }
+
+ @java.lang.Override
+ public Builder addRepeatedField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
+ return super.addRepeatedField(field, value);
+ }
+
+ @java.lang.Override
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof com.google.cloud.dataproc.v1.KubernetesClusterConfig) {
+ return mergeFrom((com.google.cloud.dataproc.v1.KubernetesClusterConfig) other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(com.google.cloud.dataproc.v1.KubernetesClusterConfig other) {
+ if (other == com.google.cloud.dataproc.v1.KubernetesClusterConfig.getDefaultInstance())
+ return this;
+ if (!other.getKubernetesNamespace().isEmpty()) {
+ kubernetesNamespace_ = other.kubernetesNamespace_;
+ onChanged();
+ }
+ if (other.hasKubernetesSoftwareConfig()) {
+ mergeKubernetesSoftwareConfig(other.getKubernetesSoftwareConfig());
+ }
+ switch (other.getConfigCase()) {
+ case GKE_CLUSTER_CONFIG:
+ {
+ mergeGkeClusterConfig(other.getGkeClusterConfig());
+ break;
+ }
+ case CONFIG_NOT_SET:
+ {
+ break;
+ }
+ }
+ this.mergeUnknownFields(other.unknownFields);
+ onChanged();
+ return this;
+ }
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ @java.lang.Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.cloud.dataproc.v1.KubernetesClusterConfig parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage =
+ (com.google.cloud.dataproc.v1.KubernetesClusterConfig) e.getUnfinishedMessage();
+ throw e.unwrapIOException();
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+
+ private int configCase_ = 0;
+ private java.lang.Object config_;
+
+ public ConfigCase getConfigCase() {
+ return ConfigCase.forNumber(configCase_);
+ }
+
+ public Builder clearConfig() {
+ configCase_ = 0;
+ config_ = null;
+ onChanged();
+ return this;
+ }
+
+ private java.lang.Object kubernetesNamespace_ = "";
+ /**
+ *
+ *
+ *
+ * Optional. A namespace within the Kubernetes cluster to deploy into. If this namespace
+ * does not exist, it is created. If it exists, Dataproc
+ * verifies that another Dataproc VirtualCluster is not installed
+ * into it. If not specified, the name of the Dataproc Cluster is used.
+ *
+ *
+ * string kubernetes_namespace = 1 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The kubernetesNamespace.
+ */
+ public java.lang.String getKubernetesNamespace() {
+ java.lang.Object ref = kubernetesNamespace_;
+ if (!(ref instanceof java.lang.String)) {
+ com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ kubernetesNamespace_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A namespace within the Kubernetes cluster to deploy into. If this namespace
+ * does not exist, it is created. If it exists, Dataproc
+ * verifies that another Dataproc VirtualCluster is not installed
+ * into it. If not specified, the name of the Dataproc Cluster is used.
+ *
+ *
+ * string kubernetes_namespace = 1 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The bytes for kubernetesNamespace.
+ */
+ public com.google.protobuf.ByteString getKubernetesNamespaceBytes() {
+ java.lang.Object ref = kubernetesNamespace_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
+ kubernetesNamespace_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A namespace within the Kubernetes cluster to deploy into. If this namespace
+ * does not exist, it is created. If it exists, Dataproc
+ * verifies that another Dataproc VirtualCluster is not installed
+ * into it. If not specified, the name of the Dataproc Cluster is used.
+ *
+ *
+ * string kubernetes_namespace = 1 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @param value The kubernetesNamespace to set.
+ * @return This builder for chaining.
+ */
+ public Builder setKubernetesNamespace(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+
+ kubernetesNamespace_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A namespace within the Kubernetes cluster to deploy into. If this namespace
+ * does not exist, it is created. If it exists, Dataproc
+ * verifies that another Dataproc VirtualCluster is not installed
+ * into it. If not specified, the name of the Dataproc Cluster is used.
+ *
+ *
+ * string kubernetes_namespace = 1 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return This builder for chaining.
+ */
+ public Builder clearKubernetesNamespace() {
+
+ kubernetesNamespace_ = getDefaultInstance().getKubernetesNamespace();
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A namespace within the Kubernetes cluster to deploy into. If this namespace
+ * does not exist, it is created. If it exists, Dataproc
+ * verifies that another Dataproc VirtualCluster is not installed
+ * into it. If not specified, the name of the Dataproc Cluster is used.
+ *
+ *
+ * string kubernetes_namespace = 1 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @param value The bytes for kubernetesNamespace to set.
+ * @return This builder for chaining.
+ */
+ public Builder setKubernetesNamespaceBytes(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ checkByteStringIsUtf8(value);
+
+ kubernetesNamespace_ = value;
+ onChanged();
+ return this;
+ }
+
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.dataproc.v1.GkeClusterConfig,
+ com.google.cloud.dataproc.v1.GkeClusterConfig.Builder,
+ com.google.cloud.dataproc.v1.GkeClusterConfigOrBuilder>
+ gkeClusterConfigBuilder_;
+ /**
+ *
+ *
+ *
+ * Required. The configuration for running the Dataproc cluster on GKE.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ *
+ * @return Whether the gkeClusterConfig field is set.
+ */
+ @java.lang.Override
+ public boolean hasGkeClusterConfig() {
+ return configCase_ == 2;
+ }
+ /**
+ *
+ *
+ *
+ * Required. The configuration for running the Dataproc cluster on GKE.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ *
+ * @return The gkeClusterConfig.
+ */
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.GkeClusterConfig getGkeClusterConfig() {
+ if (gkeClusterConfigBuilder_ == null) {
+ if (configCase_ == 2) {
+ return (com.google.cloud.dataproc.v1.GkeClusterConfig) config_;
+ }
+ return com.google.cloud.dataproc.v1.GkeClusterConfig.getDefaultInstance();
+ } else {
+ if (configCase_ == 2) {
+ return gkeClusterConfigBuilder_.getMessage();
+ }
+ return com.google.cloud.dataproc.v1.GkeClusterConfig.getDefaultInstance();
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Required. The configuration for running the Dataproc cluster on GKE.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ */
+ public Builder setGkeClusterConfig(com.google.cloud.dataproc.v1.GkeClusterConfig value) {
+ if (gkeClusterConfigBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ config_ = value;
+ onChanged();
+ } else {
+ gkeClusterConfigBuilder_.setMessage(value);
+ }
+ configCase_ = 2;
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Required. The configuration for running the Dataproc cluster on GKE.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ */
+ public Builder setGkeClusterConfig(
+ com.google.cloud.dataproc.v1.GkeClusterConfig.Builder builderForValue) {
+ if (gkeClusterConfigBuilder_ == null) {
+ config_ = builderForValue.build();
+ onChanged();
+ } else {
+ gkeClusterConfigBuilder_.setMessage(builderForValue.build());
+ }
+ configCase_ = 2;
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Required. The configuration for running the Dataproc cluster on GKE.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ */
+ public Builder mergeGkeClusterConfig(com.google.cloud.dataproc.v1.GkeClusterConfig value) {
+ if (gkeClusterConfigBuilder_ == null) {
+ if (configCase_ == 2
+ && config_ != com.google.cloud.dataproc.v1.GkeClusterConfig.getDefaultInstance()) {
+ config_ =
+ com.google.cloud.dataproc.v1.GkeClusterConfig.newBuilder(
+ (com.google.cloud.dataproc.v1.GkeClusterConfig) config_)
+ .mergeFrom(value)
+ .buildPartial();
+ } else {
+ config_ = value;
+ }
+ onChanged();
+ } else {
+ if (configCase_ == 2) {
+ gkeClusterConfigBuilder_.mergeFrom(value);
+ }
+ gkeClusterConfigBuilder_.setMessage(value);
+ }
+ configCase_ = 2;
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Required. The configuration for running the Dataproc cluster on GKE.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ */
+ public Builder clearGkeClusterConfig() {
+ if (gkeClusterConfigBuilder_ == null) {
+ if (configCase_ == 2) {
+ configCase_ = 0;
+ config_ = null;
+ onChanged();
+ }
+ } else {
+ if (configCase_ == 2) {
+ configCase_ = 0;
+ config_ = null;
+ }
+ gkeClusterConfigBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Required. The configuration for running the Dataproc cluster on GKE.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ */
+ public com.google.cloud.dataproc.v1.GkeClusterConfig.Builder getGkeClusterConfigBuilder() {
+ return getGkeClusterConfigFieldBuilder().getBuilder();
+ }
+ /**
+ *
+ *
+ *
+ * Required. The configuration for running the Dataproc cluster on GKE.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ */
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.GkeClusterConfigOrBuilder getGkeClusterConfigOrBuilder() {
+ if ((configCase_ == 2) && (gkeClusterConfigBuilder_ != null)) {
+ return gkeClusterConfigBuilder_.getMessageOrBuilder();
+ } else {
+ if (configCase_ == 2) {
+ return (com.google.cloud.dataproc.v1.GkeClusterConfig) config_;
+ }
+ return com.google.cloud.dataproc.v1.GkeClusterConfig.getDefaultInstance();
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Required. The configuration for running the Dataproc cluster on GKE.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ */
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.dataproc.v1.GkeClusterConfig,
+ com.google.cloud.dataproc.v1.GkeClusterConfig.Builder,
+ com.google.cloud.dataproc.v1.GkeClusterConfigOrBuilder>
+ getGkeClusterConfigFieldBuilder() {
+ if (gkeClusterConfigBuilder_ == null) {
+ if (!(configCase_ == 2)) {
+ config_ = com.google.cloud.dataproc.v1.GkeClusterConfig.getDefaultInstance();
+ }
+ gkeClusterConfigBuilder_ =
+ new com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.dataproc.v1.GkeClusterConfig,
+ com.google.cloud.dataproc.v1.GkeClusterConfig.Builder,
+ com.google.cloud.dataproc.v1.GkeClusterConfigOrBuilder>(
+ (com.google.cloud.dataproc.v1.GkeClusterConfig) config_,
+ getParentForChildren(),
+ isClean());
+ config_ = null;
+ }
+ configCase_ = 2;
+ onChanged();
+ ;
+ return gkeClusterConfigBuilder_;
+ }
+
+ private com.google.cloud.dataproc.v1.KubernetesSoftwareConfig kubernetesSoftwareConfig_;
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.dataproc.v1.KubernetesSoftwareConfig,
+ com.google.cloud.dataproc.v1.KubernetesSoftwareConfig.Builder,
+ com.google.cloud.dataproc.v1.KubernetesSoftwareConfigOrBuilder>
+ kubernetesSoftwareConfigBuilder_;
+ /**
+ *
+ *
+ *
+ * Optional. The software configuration for this Dataproc cluster running on Kubernetes.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.KubernetesSoftwareConfig kubernetes_software_config = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return Whether the kubernetesSoftwareConfig field is set.
+ */
+ public boolean hasKubernetesSoftwareConfig() {
+ return kubernetesSoftwareConfigBuilder_ != null || kubernetesSoftwareConfig_ != null;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The software configuration for this Dataproc cluster running on Kubernetes.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.KubernetesSoftwareConfig kubernetes_software_config = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return The kubernetesSoftwareConfig.
+ */
+ public com.google.cloud.dataproc.v1.KubernetesSoftwareConfig getKubernetesSoftwareConfig() {
+ if (kubernetesSoftwareConfigBuilder_ == null) {
+ return kubernetesSoftwareConfig_ == null
+ ? com.google.cloud.dataproc.v1.KubernetesSoftwareConfig.getDefaultInstance()
+ : kubernetesSoftwareConfig_;
+ } else {
+ return kubernetesSoftwareConfigBuilder_.getMessage();
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The software configuration for this Dataproc cluster running on Kubernetes.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.KubernetesSoftwareConfig kubernetes_software_config = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder setKubernetesSoftwareConfig(
+ com.google.cloud.dataproc.v1.KubernetesSoftwareConfig value) {
+ if (kubernetesSoftwareConfigBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ kubernetesSoftwareConfig_ = value;
+ onChanged();
+ } else {
+ kubernetesSoftwareConfigBuilder_.setMessage(value);
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The software configuration for this Dataproc cluster running on Kubernetes.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.KubernetesSoftwareConfig kubernetes_software_config = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder setKubernetesSoftwareConfig(
+ com.google.cloud.dataproc.v1.KubernetesSoftwareConfig.Builder builderForValue) {
+ if (kubernetesSoftwareConfigBuilder_ == null) {
+ kubernetesSoftwareConfig_ = builderForValue.build();
+ onChanged();
+ } else {
+ kubernetesSoftwareConfigBuilder_.setMessage(builderForValue.build());
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The software configuration for this Dataproc cluster running on Kubernetes.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.KubernetesSoftwareConfig kubernetes_software_config = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder mergeKubernetesSoftwareConfig(
+ com.google.cloud.dataproc.v1.KubernetesSoftwareConfig value) {
+ if (kubernetesSoftwareConfigBuilder_ == null) {
+ if (kubernetesSoftwareConfig_ != null) {
+ kubernetesSoftwareConfig_ =
+ com.google.cloud.dataproc.v1.KubernetesSoftwareConfig.newBuilder(
+ kubernetesSoftwareConfig_)
+ .mergeFrom(value)
+ .buildPartial();
+ } else {
+ kubernetesSoftwareConfig_ = value;
+ }
+ onChanged();
+ } else {
+ kubernetesSoftwareConfigBuilder_.mergeFrom(value);
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The software configuration for this Dataproc cluster running on Kubernetes.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.KubernetesSoftwareConfig kubernetes_software_config = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder clearKubernetesSoftwareConfig() {
+ if (kubernetesSoftwareConfigBuilder_ == null) {
+ kubernetesSoftwareConfig_ = null;
+ onChanged();
+ } else {
+ kubernetesSoftwareConfig_ = null;
+ kubernetesSoftwareConfigBuilder_ = null;
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The software configuration for this Dataproc cluster running on Kubernetes.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.KubernetesSoftwareConfig kubernetes_software_config = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public com.google.cloud.dataproc.v1.KubernetesSoftwareConfig.Builder
+ getKubernetesSoftwareConfigBuilder() {
+
+ onChanged();
+ return getKubernetesSoftwareConfigFieldBuilder().getBuilder();
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The software configuration for this Dataproc cluster running on Kubernetes.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.KubernetesSoftwareConfig kubernetes_software_config = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public com.google.cloud.dataproc.v1.KubernetesSoftwareConfigOrBuilder
+ getKubernetesSoftwareConfigOrBuilder() {
+ if (kubernetesSoftwareConfigBuilder_ != null) {
+ return kubernetesSoftwareConfigBuilder_.getMessageOrBuilder();
+ } else {
+ return kubernetesSoftwareConfig_ == null
+ ? com.google.cloud.dataproc.v1.KubernetesSoftwareConfig.getDefaultInstance()
+ : kubernetesSoftwareConfig_;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The software configuration for this Dataproc cluster running on Kubernetes.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.KubernetesSoftwareConfig kubernetes_software_config = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.dataproc.v1.KubernetesSoftwareConfig,
+ com.google.cloud.dataproc.v1.KubernetesSoftwareConfig.Builder,
+ com.google.cloud.dataproc.v1.KubernetesSoftwareConfigOrBuilder>
+ getKubernetesSoftwareConfigFieldBuilder() {
+ if (kubernetesSoftwareConfigBuilder_ == null) {
+ kubernetesSoftwareConfigBuilder_ =
+ new com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.dataproc.v1.KubernetesSoftwareConfig,
+ com.google.cloud.dataproc.v1.KubernetesSoftwareConfig.Builder,
+ com.google.cloud.dataproc.v1.KubernetesSoftwareConfigOrBuilder>(
+ getKubernetesSoftwareConfig(), getParentForChildren(), isClean());
+ kubernetesSoftwareConfig_ = null;
+ }
+ return kubernetesSoftwareConfigBuilder_;
+ }
+
+ @java.lang.Override
+ public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFields(unknownFields);
+ }
+
+ @java.lang.Override
+ public final Builder mergeUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+ // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1.KubernetesClusterConfig)
+ }
+
+ // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.KubernetesClusterConfig)
+ private static final com.google.cloud.dataproc.v1.KubernetesClusterConfig DEFAULT_INSTANCE;
+
+ static {
+ DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1.KubernetesClusterConfig();
+ }
+
+ public static com.google.cloud.dataproc.v1.KubernetesClusterConfig getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ private static final com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ @java.lang.Override
+ public KubernetesClusterConfig parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new KubernetesClusterConfig(input, extensionRegistry);
+ }
+ };
+
+ public static com.google.protobuf.Parser parser() {
+ return PARSER;
+ }
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.KubernetesClusterConfig getDefaultInstanceForType() {
+ return DEFAULT_INSTANCE;
+ }
+}
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/KubernetesClusterConfigOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/KubernetesClusterConfigOrBuilder.java
new file mode 100644
index 00000000..ab45cd88
--- /dev/null
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/KubernetesClusterConfigOrBuilder.java
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/cloud/dataproc/v1/shared.proto
+
+package com.google.cloud.dataproc.v1;
+
+public interface KubernetesClusterConfigOrBuilder
+ extends
+ // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1.KubernetesClusterConfig)
+ com.google.protobuf.MessageOrBuilder {
+
+ /**
+ *
+ *
+ *
+ * Optional. A namespace within the Kubernetes cluster to deploy into. If this namespace
+ * does not exist, it is created. If it exists, Dataproc
+ * verifies that another Dataproc VirtualCluster is not installed
+ * into it. If not specified, the name of the Dataproc Cluster is used.
+ *
+ *
+ * string kubernetes_namespace = 1 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The kubernetesNamespace.
+ */
+ java.lang.String getKubernetesNamespace();
+ /**
+ *
+ *
+ *
+ * Optional. A namespace within the Kubernetes cluster to deploy into. If this namespace
+ * does not exist, it is created. If it exists, Dataproc
+ * verifies that another Dataproc VirtualCluster is not installed
+ * into it. If not specified, the name of the Dataproc Cluster is used.
+ *
+ *
+ * string kubernetes_namespace = 1 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The bytes for kubernetesNamespace.
+ */
+ com.google.protobuf.ByteString getKubernetesNamespaceBytes();
+
+ /**
+ *
+ *
+ *
+ * Required. The configuration for running the Dataproc cluster on GKE.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ *
+ * @return Whether the gkeClusterConfig field is set.
+ */
+ boolean hasGkeClusterConfig();
+ /**
+ *
+ *
+ *
+ * Required. The configuration for running the Dataproc cluster on GKE.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ *
+ * @return The gkeClusterConfig.
+ */
+ com.google.cloud.dataproc.v1.GkeClusterConfig getGkeClusterConfig();
+ /**
+ *
+ *
+ *
+ * Required. The configuration for running the Dataproc cluster on GKE.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.GkeClusterConfig gke_cluster_config = 2 [(.google.api.field_behavior) = REQUIRED];
+ *
+ */
+ com.google.cloud.dataproc.v1.GkeClusterConfigOrBuilder getGkeClusterConfigOrBuilder();
+
+ /**
+ *
+ *
+ *
+ * Optional. The software configuration for this Dataproc cluster running on Kubernetes.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.KubernetesSoftwareConfig kubernetes_software_config = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return Whether the kubernetesSoftwareConfig field is set.
+ */
+ boolean hasKubernetesSoftwareConfig();
+ /**
+ *
+ *
+ *
+ * Optional. The software configuration for this Dataproc cluster running on Kubernetes.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.KubernetesSoftwareConfig kubernetes_software_config = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return The kubernetesSoftwareConfig.
+ */
+ com.google.cloud.dataproc.v1.KubernetesSoftwareConfig getKubernetesSoftwareConfig();
+ /**
+ *
+ *
+ *
+ * Optional. The software configuration for this Dataproc cluster running on Kubernetes.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.KubernetesSoftwareConfig kubernetes_software_config = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ com.google.cloud.dataproc.v1.KubernetesSoftwareConfigOrBuilder
+ getKubernetesSoftwareConfigOrBuilder();
+
+ public com.google.cloud.dataproc.v1.KubernetesClusterConfig.ConfigCase getConfigCase();
+}
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/KubernetesSoftwareConfig.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/KubernetesSoftwareConfig.java
new file mode 100644
index 00000000..9780b0ba
--- /dev/null
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/KubernetesSoftwareConfig.java
@@ -0,0 +1,1195 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/cloud/dataproc/v1/shared.proto
+
+package com.google.cloud.dataproc.v1;
+
+/**
+ *
+ *
+ *
+ * The software configuration for this Dataproc cluster running on Kubernetes.
+ *
+ *
+ * Protobuf type {@code google.cloud.dataproc.v1.KubernetesSoftwareConfig}
+ */
+public final class KubernetesSoftwareConfig extends com.google.protobuf.GeneratedMessageV3
+ implements
+ // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1.KubernetesSoftwareConfig)
+ KubernetesSoftwareConfigOrBuilder {
+ private static final long serialVersionUID = 0L;
+ // Use KubernetesSoftwareConfig.newBuilder() to construct.
+ private KubernetesSoftwareConfig(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
+ super(builder);
+ }
+
+ private KubernetesSoftwareConfig() {}
+
+ @java.lang.Override
+ @SuppressWarnings({"unused"})
+ protected java.lang.Object newInstance(UnusedPrivateParameter unused) {
+ return new KubernetesSoftwareConfig();
+ }
+
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet getUnknownFields() {
+ return this.unknownFields;
+ }
+
+ private KubernetesSoftwareConfig(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ this();
+ if (extensionRegistry == null) {
+ throw new java.lang.NullPointerException();
+ }
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ case 10:
+ {
+ if (!((mutable_bitField0_ & 0x00000001) != 0)) {
+ componentVersion_ =
+ com.google.protobuf.MapField.newMapField(
+ ComponentVersionDefaultEntryHolder.defaultEntry);
+ mutable_bitField0_ |= 0x00000001;
+ }
+ com.google.protobuf.MapEntry componentVersion__ =
+ input.readMessage(
+ ComponentVersionDefaultEntryHolder.defaultEntry.getParserForType(),
+ extensionRegistry);
+ componentVersion_
+ .getMutableMap()
+ .put(componentVersion__.getKey(), componentVersion__.getValue());
+ break;
+ }
+ case 18:
+ {
+ if (!((mutable_bitField0_ & 0x00000002) != 0)) {
+ properties_ =
+ com.google.protobuf.MapField.newMapField(
+ PropertiesDefaultEntryHolder.defaultEntry);
+ mutable_bitField0_ |= 0x00000002;
+ }
+ com.google.protobuf.MapEntry properties__ =
+ input.readMessage(
+ PropertiesDefaultEntryHolder.defaultEntry.getParserForType(),
+ extensionRegistry);
+ properties_.getMutableMap().put(properties__.getKey(), properties__.getValue());
+ break;
+ }
+ default:
+ {
+ if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
+ return com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_KubernetesSoftwareConfig_descriptor;
+ }
+
+ @SuppressWarnings({"rawtypes"})
+ @java.lang.Override
+ protected com.google.protobuf.MapField internalGetMapField(int number) {
+ switch (number) {
+ case 1:
+ return internalGetComponentVersion();
+ case 2:
+ return internalGetProperties();
+ default:
+ throw new RuntimeException("Invalid map field number: " + number);
+ }
+ }
+
+ @java.lang.Override
+ protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_KubernetesSoftwareConfig_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.google.cloud.dataproc.v1.KubernetesSoftwareConfig.class,
+ com.google.cloud.dataproc.v1.KubernetesSoftwareConfig.Builder.class);
+ }
+
+ public static final int COMPONENT_VERSION_FIELD_NUMBER = 1;
+
+ private static final class ComponentVersionDefaultEntryHolder {
+ static final com.google.protobuf.MapEntry defaultEntry =
+ com.google.protobuf.MapEntry.newDefaultInstance(
+ com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_KubernetesSoftwareConfig_ComponentVersionEntry_descriptor,
+ com.google.protobuf.WireFormat.FieldType.STRING,
+ "",
+ com.google.protobuf.WireFormat.FieldType.STRING,
+ "");
+ }
+
+ private com.google.protobuf.MapField componentVersion_;
+
+ private com.google.protobuf.MapField
+ internalGetComponentVersion() {
+ if (componentVersion_ == null) {
+ return com.google.protobuf.MapField.emptyMapField(
+ ComponentVersionDefaultEntryHolder.defaultEntry);
+ }
+ return componentVersion_;
+ }
+
+ public int getComponentVersionCount() {
+ return internalGetComponentVersion().getMap().size();
+ }
+ /**
+ *
+ *
+ *
+ * The components that should be installed in this Dataproc cluster. The key
+ * must be a string from the KubernetesComponent enumeration. The value is
+ * the version of the software to be installed.
+ * At least one entry must be specified.
+ *
+ *
+ * map<string, string> component_version = 1;
+ */
+ @java.lang.Override
+ public boolean containsComponentVersion(java.lang.String key) {
+ if (key == null) {
+ throw new java.lang.NullPointerException();
+ }
+ return internalGetComponentVersion().getMap().containsKey(key);
+ }
+ /** Use {@link #getComponentVersionMap()} instead. */
+ @java.lang.Override
+ @java.lang.Deprecated
+ public java.util.Map getComponentVersion() {
+ return getComponentVersionMap();
+ }
+ /**
+ *
+ *
+ *
+ * The components that should be installed in this Dataproc cluster. The key
+ * must be a string from the KubernetesComponent enumeration. The value is
+ * the version of the software to be installed.
+ * At least one entry must be specified.
+ *
+ *
+ * map<string, string> component_version = 1;
+ */
+ @java.lang.Override
+ public java.util.Map getComponentVersionMap() {
+ return internalGetComponentVersion().getMap();
+ }
+ /**
+ *
+ *
+ *
+ * The components that should be installed in this Dataproc cluster. The key
+ * must be a string from the KubernetesComponent enumeration. The value is
+ * the version of the software to be installed.
+ * At least one entry must be specified.
+ *
+ *
+ * map<string, string> component_version = 1;
+ */
+ @java.lang.Override
+ public java.lang.String getComponentVersionOrDefault(
+ java.lang.String key, java.lang.String defaultValue) {
+ if (key == null) {
+ throw new java.lang.NullPointerException();
+ }
+ java.util.Map map = internalGetComponentVersion().getMap();
+ return map.containsKey(key) ? map.get(key) : defaultValue;
+ }
+ /**
+ *
+ *
+ *
+ * The components that should be installed in this Dataproc cluster. The key
+ * must be a string from the KubernetesComponent enumeration. The value is
+ * the version of the software to be installed.
+ * At least one entry must be specified.
+ *
+ *
+ * map<string, string> component_version = 1;
+ */
+ @java.lang.Override
+ public java.lang.String getComponentVersionOrThrow(java.lang.String key) {
+ if (key == null) {
+ throw new java.lang.NullPointerException();
+ }
+ java.util.Map map = internalGetComponentVersion().getMap();
+ if (!map.containsKey(key)) {
+ throw new java.lang.IllegalArgumentException();
+ }
+ return map.get(key);
+ }
+
+ public static final int PROPERTIES_FIELD_NUMBER = 2;
+
+ private static final class PropertiesDefaultEntryHolder {
+ static final com.google.protobuf.MapEntry defaultEntry =
+ com.google.protobuf.MapEntry.newDefaultInstance(
+ com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_KubernetesSoftwareConfig_PropertiesEntry_descriptor,
+ com.google.protobuf.WireFormat.FieldType.STRING,
+ "",
+ com.google.protobuf.WireFormat.FieldType.STRING,
+ "");
+ }
+
+ private com.google.protobuf.MapField properties_;
+
+ private com.google.protobuf.MapField internalGetProperties() {
+ if (properties_ == null) {
+ return com.google.protobuf.MapField.emptyMapField(PropertiesDefaultEntryHolder.defaultEntry);
+ }
+ return properties_;
+ }
+
+ public int getPropertiesCount() {
+ return internalGetProperties().getMap().size();
+ }
+ /**
+ *
+ *
+ *
+ * The properties to set on daemon config files.
+ * Property keys are specified in `prefix:property` format, for example
+ * `spark:spark.kubernetes.container.image`. The following are supported
+ * prefixes and their mappings:
+ * * spark: `spark-defaults.conf`
+ * For more information, see [Cluster
+ * properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
+ *
+ *
+ * map<string, string> properties = 2;
+ */
+ @java.lang.Override
+ public boolean containsProperties(java.lang.String key) {
+ if (key == null) {
+ throw new java.lang.NullPointerException();
+ }
+ return internalGetProperties().getMap().containsKey(key);
+ }
+ /** Use {@link #getPropertiesMap()} instead. */
+ @java.lang.Override
+ @java.lang.Deprecated
+ public java.util.Map getProperties() {
+ return getPropertiesMap();
+ }
+ /**
+ *
+ *
+ *
+ * The properties to set on daemon config files.
+ * Property keys are specified in `prefix:property` format, for example
+ * `spark:spark.kubernetes.container.image`. The following are supported
+ * prefixes and their mappings:
+ * * spark: `spark-defaults.conf`
+ * For more information, see [Cluster
+ * properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
+ *
+ *
+ * map<string, string> properties = 2;
+ */
+ @java.lang.Override
+ public java.util.Map getPropertiesMap() {
+ return internalGetProperties().getMap();
+ }
+ /**
+ *
+ *
+ *
+ * The properties to set on daemon config files.
+ * Property keys are specified in `prefix:property` format, for example
+ * `spark:spark.kubernetes.container.image`. The following are supported
+ * prefixes and their mappings:
+ * * spark: `spark-defaults.conf`
+ * For more information, see [Cluster
+ * properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
+ *
+ *
+ * map<string, string> properties = 2;
+ */
+ @java.lang.Override
+ public java.lang.String getPropertiesOrDefault(
+ java.lang.String key, java.lang.String defaultValue) {
+ if (key == null) {
+ throw new java.lang.NullPointerException();
+ }
+ java.util.Map map = internalGetProperties().getMap();
+ return map.containsKey(key) ? map.get(key) : defaultValue;
+ }
+ /**
+ *
+ *
+ *
+ * The properties to set on daemon config files.
+ * Property keys are specified in `prefix:property` format, for example
+ * `spark:spark.kubernetes.container.image`. The following are supported
+ * prefixes and their mappings:
+ * * spark: `spark-defaults.conf`
+ * For more information, see [Cluster
+ * properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
+ *
+ *
+ * map<string, string> properties = 2;
+ */
+ @java.lang.Override
+ public java.lang.String getPropertiesOrThrow(java.lang.String key) {
+ if (key == null) {
+ throw new java.lang.NullPointerException();
+ }
+ java.util.Map map = internalGetProperties().getMap();
+ if (!map.containsKey(key)) {
+ throw new java.lang.IllegalArgumentException();
+ }
+ return map.get(key);
+ }
+
+ private byte memoizedIsInitialized = -1;
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ @java.lang.Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
+ com.google.protobuf.GeneratedMessageV3.serializeStringMapTo(
+ output, internalGetComponentVersion(), ComponentVersionDefaultEntryHolder.defaultEntry, 1);
+ com.google.protobuf.GeneratedMessageV3.serializeStringMapTo(
+ output, internalGetProperties(), PropertiesDefaultEntryHolder.defaultEntry, 2);
+ unknownFields.writeTo(output);
+ }
+
+ @java.lang.Override
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ for (java.util.Map.Entry entry :
+ internalGetComponentVersion().getMap().entrySet()) {
+ com.google.protobuf.MapEntry componentVersion__ =
+ ComponentVersionDefaultEntryHolder.defaultEntry
+ .newBuilderForType()
+ .setKey(entry.getKey())
+ .setValue(entry.getValue())
+ .build();
+ size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, componentVersion__);
+ }
+ for (java.util.Map.Entry entry :
+ internalGetProperties().getMap().entrySet()) {
+ com.google.protobuf.MapEntry properties__ =
+ PropertiesDefaultEntryHolder.defaultEntry
+ .newBuilderForType()
+ .setKey(entry.getKey())
+ .setValue(entry.getValue())
+ .build();
+ size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, properties__);
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof com.google.cloud.dataproc.v1.KubernetesSoftwareConfig)) {
+ return super.equals(obj);
+ }
+ com.google.cloud.dataproc.v1.KubernetesSoftwareConfig other =
+ (com.google.cloud.dataproc.v1.KubernetesSoftwareConfig) obj;
+
+ if (!internalGetComponentVersion().equals(other.internalGetComponentVersion())) return false;
+ if (!internalGetProperties().equals(other.internalGetProperties())) return false;
+ if (!unknownFields.equals(other.unknownFields)) return false;
+ return true;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptor().hashCode();
+ if (!internalGetComponentVersion().getMap().isEmpty()) {
+ hash = (37 * hash) + COMPONENT_VERSION_FIELD_NUMBER;
+ hash = (53 * hash) + internalGetComponentVersion().hashCode();
+ }
+ if (!internalGetProperties().getMap().isEmpty()) {
+ hash = (37 * hash) + PROPERTIES_FIELD_NUMBER;
+ hash = (53 * hash) + internalGetProperties().hashCode();
+ }
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static com.google.cloud.dataproc.v1.KubernetesSoftwareConfig parseFrom(
+ java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.dataproc.v1.KubernetesSoftwareConfig parseFrom(
+ java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.KubernetesSoftwareConfig parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.dataproc.v1.KubernetesSoftwareConfig parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.KubernetesSoftwareConfig parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.dataproc.v1.KubernetesSoftwareConfig parseFrom(
+ byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.KubernetesSoftwareConfig parseFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.dataproc.v1.KubernetesSoftwareConfig parseFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.KubernetesSoftwareConfig parseDelimitedFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.dataproc.v1.KubernetesSoftwareConfig parseDelimitedFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.KubernetesSoftwareConfig parseFrom(
+ com.google.protobuf.CodedInputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.dataproc.v1.KubernetesSoftwareConfig parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ @java.lang.Override
+ public Builder newBuilderForType() {
+ return newBuilder();
+ }
+
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+
+ public static Builder newBuilder(
+ com.google.cloud.dataproc.v1.KubernetesSoftwareConfig prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+
+ @java.lang.Override
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ *
+ *
+ *
+ * The software configuration for this Dataproc cluster running on Kubernetes.
+ *
+ *
+ * Protobuf type {@code google.cloud.dataproc.v1.KubernetesSoftwareConfig}
+ */
+ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder
+ implements
+ // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1.KubernetesSoftwareConfig)
+ com.google.cloud.dataproc.v1.KubernetesSoftwareConfigOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
+ return com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_KubernetesSoftwareConfig_descriptor;
+ }
+
+ @SuppressWarnings({"rawtypes"})
+ protected com.google.protobuf.MapField internalGetMapField(int number) {
+ switch (number) {
+ case 1:
+ return internalGetComponentVersion();
+ case 2:
+ return internalGetProperties();
+ default:
+ throw new RuntimeException("Invalid map field number: " + number);
+ }
+ }
+
+ @SuppressWarnings({"rawtypes"})
+ protected com.google.protobuf.MapField internalGetMutableMapField(int number) {
+ switch (number) {
+ case 1:
+ return internalGetMutableComponentVersion();
+ case 2:
+ return internalGetMutableProperties();
+ default:
+ throw new RuntimeException("Invalid map field number: " + number);
+ }
+ }
+
+ @java.lang.Override
+ protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_KubernetesSoftwareConfig_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.google.cloud.dataproc.v1.KubernetesSoftwareConfig.class,
+ com.google.cloud.dataproc.v1.KubernetesSoftwareConfig.Builder.class);
+ }
+
+ // Construct using com.google.cloud.dataproc.v1.KubernetesSoftwareConfig.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {}
+ }
+
+ @java.lang.Override
+ public Builder clear() {
+ super.clear();
+ internalGetMutableComponentVersion().clear();
+ internalGetMutableProperties().clear();
+ return this;
+ }
+
+ @java.lang.Override
+ public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
+ return com.google.cloud.dataproc.v1.SharedProto
+ .internal_static_google_cloud_dataproc_v1_KubernetesSoftwareConfig_descriptor;
+ }
+
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.KubernetesSoftwareConfig getDefaultInstanceForType() {
+ return com.google.cloud.dataproc.v1.KubernetesSoftwareConfig.getDefaultInstance();
+ }
+
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.KubernetesSoftwareConfig build() {
+ com.google.cloud.dataproc.v1.KubernetesSoftwareConfig result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.KubernetesSoftwareConfig buildPartial() {
+ com.google.cloud.dataproc.v1.KubernetesSoftwareConfig result =
+ new com.google.cloud.dataproc.v1.KubernetesSoftwareConfig(this);
+ int from_bitField0_ = bitField0_;
+ result.componentVersion_ = internalGetComponentVersion();
+ result.componentVersion_.makeImmutable();
+ result.properties_ = internalGetProperties();
+ result.properties_.makeImmutable();
+ onBuilt();
+ return result;
+ }
+
+ @java.lang.Override
+ public Builder clone() {
+ return super.clone();
+ }
+
+ @java.lang.Override
+ public Builder setField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
+ return super.setField(field, value);
+ }
+
+ @java.lang.Override
+ public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
+ return super.clearField(field);
+ }
+
+ @java.lang.Override
+ public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+ return super.clearOneof(oneof);
+ }
+
+ @java.lang.Override
+ public Builder setRepeatedField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) {
+ return super.setRepeatedField(field, index, value);
+ }
+
+ @java.lang.Override
+ public Builder addRepeatedField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
+ return super.addRepeatedField(field, value);
+ }
+
+ @java.lang.Override
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof com.google.cloud.dataproc.v1.KubernetesSoftwareConfig) {
+ return mergeFrom((com.google.cloud.dataproc.v1.KubernetesSoftwareConfig) other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(com.google.cloud.dataproc.v1.KubernetesSoftwareConfig other) {
+ if (other == com.google.cloud.dataproc.v1.KubernetesSoftwareConfig.getDefaultInstance())
+ return this;
+ internalGetMutableComponentVersion().mergeFrom(other.internalGetComponentVersion());
+ internalGetMutableProperties().mergeFrom(other.internalGetProperties());
+ this.mergeUnknownFields(other.unknownFields);
+ onChanged();
+ return this;
+ }
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ @java.lang.Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.cloud.dataproc.v1.KubernetesSoftwareConfig parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage =
+ (com.google.cloud.dataproc.v1.KubernetesSoftwareConfig) e.getUnfinishedMessage();
+ throw e.unwrapIOException();
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+
+ private int bitField0_;
+
+ private com.google.protobuf.MapField componentVersion_;
+
+ private com.google.protobuf.MapField
+ internalGetComponentVersion() {
+ if (componentVersion_ == null) {
+ return com.google.protobuf.MapField.emptyMapField(
+ ComponentVersionDefaultEntryHolder.defaultEntry);
+ }
+ return componentVersion_;
+ }
+
+ private com.google.protobuf.MapField
+ internalGetMutableComponentVersion() {
+ onChanged();
+ ;
+ if (componentVersion_ == null) {
+ componentVersion_ =
+ com.google.protobuf.MapField.newMapField(
+ ComponentVersionDefaultEntryHolder.defaultEntry);
+ }
+ if (!componentVersion_.isMutable()) {
+ componentVersion_ = componentVersion_.copy();
+ }
+ return componentVersion_;
+ }
+
+ public int getComponentVersionCount() {
+ return internalGetComponentVersion().getMap().size();
+ }
+ /**
+ *
+ *
+ *
+ * The components that should be installed in this Dataproc cluster. The key
+ * must be a string from the KubernetesComponent enumeration. The value is
+ * the version of the software to be installed.
+ * At least one entry must be specified.
+ *
+ *
+ * map<string, string> component_version = 1;
+ */
+ @java.lang.Override
+ public boolean containsComponentVersion(java.lang.String key) {
+ if (key == null) {
+ throw new java.lang.NullPointerException();
+ }
+ return internalGetComponentVersion().getMap().containsKey(key);
+ }
+ /** Use {@link #getComponentVersionMap()} instead. */
+ @java.lang.Override
+ @java.lang.Deprecated
+ public java.util.Map getComponentVersion() {
+ return getComponentVersionMap();
+ }
+ /**
+ *
+ *
+ *
+ * The components that should be installed in this Dataproc cluster. The key
+ * must be a string from the KubernetesComponent enumeration. The value is
+ * the version of the software to be installed.
+ * At least one entry must be specified.
+ *
+ *
+ * map<string, string> component_version = 1;
+ */
+ @java.lang.Override
+ public java.util.Map getComponentVersionMap() {
+ return internalGetComponentVersion().getMap();
+ }
+ /**
+ *
+ *
+ *
+ * The components that should be installed in this Dataproc cluster. The key
+ * must be a string from the KubernetesComponent enumeration. The value is
+ * the version of the software to be installed.
+ * At least one entry must be specified.
+ *
+ *
+ * map<string, string> component_version = 1;
+ */
+ @java.lang.Override
+ public java.lang.String getComponentVersionOrDefault(
+ java.lang.String key, java.lang.String defaultValue) {
+ if (key == null) {
+ throw new java.lang.NullPointerException();
+ }
+ java.util.Map map =
+ internalGetComponentVersion().getMap();
+ return map.containsKey(key) ? map.get(key) : defaultValue;
+ }
+ /**
+ *
+ *
+ *
+ * The components that should be installed in this Dataproc cluster. The key
+ * must be a string from the KubernetesComponent enumeration. The value is
+ * the version of the software to be installed.
+ * At least one entry must be specified.
+ *
+ *
+ * map<string, string> component_version = 1;
+ */
+ @java.lang.Override
+ public java.lang.String getComponentVersionOrThrow(java.lang.String key) {
+ if (key == null) {
+ throw new java.lang.NullPointerException();
+ }
+ java.util.Map map =
+ internalGetComponentVersion().getMap();
+ if (!map.containsKey(key)) {
+ throw new java.lang.IllegalArgumentException();
+ }
+ return map.get(key);
+ }
+
+ public Builder clearComponentVersion() {
+ internalGetMutableComponentVersion().getMutableMap().clear();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * The components that should be installed in this Dataproc cluster. The key
+ * must be a string from the KubernetesComponent enumeration. The value is
+ * the version of the software to be installed.
+ * At least one entry must be specified.
+ *
+ *
+ * map<string, string> component_version = 1;
+ */
+ public Builder removeComponentVersion(java.lang.String key) {
+ if (key == null) {
+ throw new java.lang.NullPointerException();
+ }
+ internalGetMutableComponentVersion().getMutableMap().remove(key);
+ return this;
+ }
+ /** Use alternate mutation accessors instead. */
+ @java.lang.Deprecated
+ public java.util.Map getMutableComponentVersion() {
+ return internalGetMutableComponentVersion().getMutableMap();
+ }
+ /**
+ *
+ *
+ *
+ * The components that should be installed in this Dataproc cluster. The key
+ * must be a string from the KubernetesComponent enumeration. The value is
+ * the version of the software to be installed.
+ * At least one entry must be specified.
+ *
+ *
+ * map<string, string> component_version = 1;
+ */
+ public Builder putComponentVersion(java.lang.String key, java.lang.String value) {
+ if (key == null) {
+ throw new java.lang.NullPointerException();
+ }
+ if (value == null) {
+ throw new java.lang.NullPointerException();
+ }
+ internalGetMutableComponentVersion().getMutableMap().put(key, value);
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * The components that should be installed in this Dataproc cluster. The key
+ * must be a string from the KubernetesComponent enumeration. The value is
+ * the version of the software to be installed.
+ * At least one entry must be specified.
+ *
+ *
+ * map<string, string> component_version = 1;
+ */
+ public Builder putAllComponentVersion(
+ java.util.Map values) {
+ internalGetMutableComponentVersion().getMutableMap().putAll(values);
+ return this;
+ }
+
+ private com.google.protobuf.MapField properties_;
+
+ private com.google.protobuf.MapField
+ internalGetProperties() {
+ if (properties_ == null) {
+ return com.google.protobuf.MapField.emptyMapField(
+ PropertiesDefaultEntryHolder.defaultEntry);
+ }
+ return properties_;
+ }
+
+ private com.google.protobuf.MapField
+ internalGetMutableProperties() {
+ onChanged();
+ ;
+ if (properties_ == null) {
+ properties_ =
+ com.google.protobuf.MapField.newMapField(PropertiesDefaultEntryHolder.defaultEntry);
+ }
+ if (!properties_.isMutable()) {
+ properties_ = properties_.copy();
+ }
+ return properties_;
+ }
+
+ public int getPropertiesCount() {
+ return internalGetProperties().getMap().size();
+ }
+ /**
+ *
+ *
+ *
+ * The properties to set on daemon config files.
+ * Property keys are specified in `prefix:property` format, for example
+ * `spark:spark.kubernetes.container.image`. The following are supported
+ * prefixes and their mappings:
+ * * spark: `spark-defaults.conf`
+ * For more information, see [Cluster
+ * properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
+ *
+ *
+ * map<string, string> properties = 2;
+ */
+ @java.lang.Override
+ public boolean containsProperties(java.lang.String key) {
+ if (key == null) {
+ throw new java.lang.NullPointerException();
+ }
+ return internalGetProperties().getMap().containsKey(key);
+ }
+ /** Use {@link #getPropertiesMap()} instead. */
+ @java.lang.Override
+ @java.lang.Deprecated
+ public java.util.Map getProperties() {
+ return getPropertiesMap();
+ }
+ /**
+ *
+ *
+ *
+ * The properties to set on daemon config files.
+ * Property keys are specified in `prefix:property` format, for example
+ * `spark:spark.kubernetes.container.image`. The following are supported
+ * prefixes and their mappings:
+ * * spark: `spark-defaults.conf`
+ * For more information, see [Cluster
+ * properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
+ *
+ *
+ * map<string, string> properties = 2;
+ */
+ @java.lang.Override
+ public java.util.Map getPropertiesMap() {
+ return internalGetProperties().getMap();
+ }
+ /**
+ *
+ *
+ *
+ * The properties to set on daemon config files.
+ * Property keys are specified in `prefix:property` format, for example
+ * `spark:spark.kubernetes.container.image`. The following are supported
+ * prefixes and their mappings:
+ * * spark: `spark-defaults.conf`
+ * For more information, see [Cluster
+ * properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
+ *
+ *
+ * map<string, string> properties = 2;
+ */
+ @java.lang.Override
+ public java.lang.String getPropertiesOrDefault(
+ java.lang.String key, java.lang.String defaultValue) {
+ if (key == null) {
+ throw new java.lang.NullPointerException();
+ }
+ java.util.Map map = internalGetProperties().getMap();
+ return map.containsKey(key) ? map.get(key) : defaultValue;
+ }
+ /**
+ *
+ *
+ *
+ * The properties to set on daemon config files.
+ * Property keys are specified in `prefix:property` format, for example
+ * `spark:spark.kubernetes.container.image`. The following are supported
+ * prefixes and their mappings:
+ * * spark: `spark-defaults.conf`
+ * For more information, see [Cluster
+ * properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
+ *
+ *
+ * map<string, string> properties = 2;
+ */
+ @java.lang.Override
+ public java.lang.String getPropertiesOrThrow(java.lang.String key) {
+ if (key == null) {
+ throw new java.lang.NullPointerException();
+ }
+ java.util.Map map = internalGetProperties().getMap();
+ if (!map.containsKey(key)) {
+ throw new java.lang.IllegalArgumentException();
+ }
+ return map.get(key);
+ }
+
+ public Builder clearProperties() {
+ internalGetMutableProperties().getMutableMap().clear();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * The properties to set on daemon config files.
+ * Property keys are specified in `prefix:property` format, for example
+ * `spark:spark.kubernetes.container.image`. The following are supported
+ * prefixes and their mappings:
+ * * spark: `spark-defaults.conf`
+ * For more information, see [Cluster
+ * properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
+ *
+ *
+ * map<string, string> properties = 2;
+ */
+ public Builder removeProperties(java.lang.String key) {
+ if (key == null) {
+ throw new java.lang.NullPointerException();
+ }
+ internalGetMutableProperties().getMutableMap().remove(key);
+ return this;
+ }
+ /** Use alternate mutation accessors instead. */
+ @java.lang.Deprecated
+ public java.util.Map getMutableProperties() {
+ return internalGetMutableProperties().getMutableMap();
+ }
+ /**
+ *
+ *
+ *
+ * The properties to set on daemon config files.
+ * Property keys are specified in `prefix:property` format, for example
+ * `spark:spark.kubernetes.container.image`. The following are supported
+ * prefixes and their mappings:
+ * * spark: `spark-defaults.conf`
+ * For more information, see [Cluster
+ * properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
+ *
+ *
+ * map<string, string> properties = 2;
+ */
+ public Builder putProperties(java.lang.String key, java.lang.String value) {
+ if (key == null) {
+ throw new java.lang.NullPointerException();
+ }
+ if (value == null) {
+ throw new java.lang.NullPointerException();
+ }
+ internalGetMutableProperties().getMutableMap().put(key, value);
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * The properties to set on daemon config files.
+ * Property keys are specified in `prefix:property` format, for example
+ * `spark:spark.kubernetes.container.image`. The following are supported
+ * prefixes and their mappings:
+ * * spark: `spark-defaults.conf`
+ * For more information, see [Cluster
+ * properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
+ *
+ *
+ * map<string, string> properties = 2;
+ */
+ public Builder putAllProperties(java.util.Map values) {
+ internalGetMutableProperties().getMutableMap().putAll(values);
+ return this;
+ }
+
+ @java.lang.Override
+ public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFields(unknownFields);
+ }
+
+ @java.lang.Override
+ public final Builder mergeUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+ // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1.KubernetesSoftwareConfig)
+ }
+
+ // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.KubernetesSoftwareConfig)
+ private static final com.google.cloud.dataproc.v1.KubernetesSoftwareConfig DEFAULT_INSTANCE;
+
+ static {
+ DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1.KubernetesSoftwareConfig();
+ }
+
+ public static com.google.cloud.dataproc.v1.KubernetesSoftwareConfig getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ private static final com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ @java.lang.Override
+ public KubernetesSoftwareConfig parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new KubernetesSoftwareConfig(input, extensionRegistry);
+ }
+ };
+
+ public static com.google.protobuf.Parser parser() {
+ return PARSER;
+ }
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.KubernetesSoftwareConfig getDefaultInstanceForType() {
+ return DEFAULT_INSTANCE;
+ }
+}
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/KubernetesSoftwareConfigOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/KubernetesSoftwareConfigOrBuilder.java
new file mode 100644
index 00000000..05673c71
--- /dev/null
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/KubernetesSoftwareConfigOrBuilder.java
@@ -0,0 +1,179 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/cloud/dataproc/v1/shared.proto
+
+package com.google.cloud.dataproc.v1;
+
+public interface KubernetesSoftwareConfigOrBuilder
+ extends
+ // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1.KubernetesSoftwareConfig)
+ com.google.protobuf.MessageOrBuilder {
+
+ /**
+ *
+ *
+ *
+ * The components that should be installed in this Dataproc cluster. The key
+ * must be a string from the KubernetesComponent enumeration. The value is
+ * the version of the software to be installed.
+ * At least one entry must be specified.
+ *
+ *
+ * map<string, string> component_version = 1;
+ */
+ int getComponentVersionCount();
+ /**
+ *
+ *
+ *
+ * The components that should be installed in this Dataproc cluster. The key
+ * must be a string from the KubernetesComponent enumeration. The value is
+ * the version of the software to be installed.
+ * At least one entry must be specified.
+ *
+ *
+ * map<string, string> component_version = 1;
+ */
+ boolean containsComponentVersion(java.lang.String key);
+ /** Use {@link #getComponentVersionMap()} instead. */
+ @java.lang.Deprecated
+ java.util.Map getComponentVersion();
+ /**
+ *
+ *
+ *
+ * The components that should be installed in this Dataproc cluster. The key
+ * must be a string from the KubernetesComponent enumeration. The value is
+ * the version of the software to be installed.
+ * At least one entry must be specified.
+ *
+ *
+ * map<string, string> component_version = 1;
+ */
+ java.util.Map getComponentVersionMap();
+ /**
+ *
+ *
+ *
+ * The components that should be installed in this Dataproc cluster. The key
+ * must be a string from the KubernetesComponent enumeration. The value is
+ * the version of the software to be installed.
+ * At least one entry must be specified.
+ *
+ *
+ * map<string, string> component_version = 1;
+ */
+ java.lang.String getComponentVersionOrDefault(
+ java.lang.String key, java.lang.String defaultValue);
+ /**
+ *
+ *
+ *
+ * The components that should be installed in this Dataproc cluster. The key
+ * must be a string from the KubernetesComponent enumeration. The value is
+ * the version of the software to be installed.
+ * At least one entry must be specified.
+ *
+ *
+ * map<string, string> component_version = 1;
+ */
+ java.lang.String getComponentVersionOrThrow(java.lang.String key);
+
+ /**
+ *
+ *
+ *
+ * The properties to set on daemon config files.
+ * Property keys are specified in `prefix:property` format, for example
+ * `spark:spark.kubernetes.container.image`. The following are supported
+ * prefixes and their mappings:
+ * * spark: `spark-defaults.conf`
+ * For more information, see [Cluster
+ * properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
+ *
+ *
+ * map<string, string> properties = 2;
+ */
+ int getPropertiesCount();
+ /**
+ *
+ *
+ *
+ * The properties to set on daemon config files.
+ * Property keys are specified in `prefix:property` format, for example
+ * `spark:spark.kubernetes.container.image`. The following are supported
+ * prefixes and their mappings:
+ * * spark: `spark-defaults.conf`
+ * For more information, see [Cluster
+ * properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
+ *
+ *
+ * map<string, string> properties = 2;
+ */
+ boolean containsProperties(java.lang.String key);
+ /** Use {@link #getPropertiesMap()} instead. */
+ @java.lang.Deprecated
+ java.util.Map getProperties();
+ /**
+ *
+ *
+ *
+ * The properties to set on daemon config files.
+ * Property keys are specified in `prefix:property` format, for example
+ * `spark:spark.kubernetes.container.image`. The following are supported
+ * prefixes and their mappings:
+ * * spark: `spark-defaults.conf`
+ * For more information, see [Cluster
+ * properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
+ *
+ *
+ * map<string, string> properties = 2;
+ */
+ java.util.Map getPropertiesMap();
+ /**
+ *
+ *
+ *
+ * The properties to set on daemon config files.
+ * Property keys are specified in `prefix:property` format, for example
+ * `spark:spark.kubernetes.container.image`. The following are supported
+ * prefixes and their mappings:
+ * * spark: `spark-defaults.conf`
+ * For more information, see [Cluster
+ * properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
+ *
+ *
+ * map<string, string> properties = 2;
+ */
+ java.lang.String getPropertiesOrDefault(java.lang.String key, java.lang.String defaultValue);
+ /**
+ *
+ *
+ *
+ * The properties to set on daemon config files.
+ * Property keys are specified in `prefix:property` format, for example
+ * `spark:spark.kubernetes.container.image`. The following are supported
+ * prefixes and their mappings:
+ * * spark: `spark-defaults.conf`
+ * For more information, see [Cluster
+ * properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
+ *
+ *
+ * map<string, string> properties = 2;
+ */
+ java.lang.String getPropertiesOrThrow(java.lang.String key);
+}
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SharedProto.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SharedProto.java
index dede2784..d1d12ca9 100644
--- a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SharedProto.java
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SharedProto.java
@@ -59,6 +59,46 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r
internal_static_google_cloud_dataproc_v1_RuntimeInfo_EndpointsEntry_descriptor;
static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_google_cloud_dataproc_v1_RuntimeInfo_EndpointsEntry_fieldAccessorTable;
+ static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_cloud_dataproc_v1_GkeClusterConfig_descriptor;
+ static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_cloud_dataproc_v1_GkeClusterConfig_fieldAccessorTable;
+ static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_cloud_dataproc_v1_KubernetesClusterConfig_descriptor;
+ static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_cloud_dataproc_v1_KubernetesClusterConfig_fieldAccessorTable;
+ static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_cloud_dataproc_v1_KubernetesSoftwareConfig_descriptor;
+ static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_cloud_dataproc_v1_KubernetesSoftwareConfig_fieldAccessorTable;
+ static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_cloud_dataproc_v1_KubernetesSoftwareConfig_ComponentVersionEntry_descriptor;
+ static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_cloud_dataproc_v1_KubernetesSoftwareConfig_ComponentVersionEntry_fieldAccessorTable;
+ static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_cloud_dataproc_v1_KubernetesSoftwareConfig_PropertiesEntry_descriptor;
+ static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_cloud_dataproc_v1_KubernetesSoftwareConfig_PropertiesEntry_fieldAccessorTable;
+ static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_cloud_dataproc_v1_GkeNodePoolTarget_descriptor;
+ static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_cloud_dataproc_v1_GkeNodePoolTarget_fieldAccessorTable;
+ static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_descriptor;
+ static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_fieldAccessorTable;
+ static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_GkeNodeConfig_descriptor;
+ static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_GkeNodeConfig_fieldAccessorTable;
+ static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_GkeNodePoolAcceleratorConfig_descriptor;
+ static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_GkeNodePoolAcceleratorConfig_fieldAccessorTable;
+ static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_GkeNodePoolAutoscalingConfig_descriptor;
+ static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_GkeNodePoolAutoscalingConfig_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
return descriptor;
@@ -94,17 +134,57 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
+ "eInfo.EndpointsEntryB\003\340A\003\022\027\n\noutput_uri\030"
+ "\002 \001(\tB\003\340A\003\022\"\n\025diagnostic_output_uri\030\003 \001("
+ "\tB\003\340A\003\0320\n\016EndpointsEntry\022\013\n\003key\030\001 \001(\t\022\r\n"
- + "\005value\030\002 \001(\t:\0028\001*\277\001\n\tComponent\022\031\n\025COMPON"
- + "ENT_UNSPECIFIED\020\000\022\014\n\010ANACONDA\020\005\022\n\n\006DOCKE"
- + "R\020\r\022\t\n\005DRUID\020\t\022\t\n\005FLINK\020\016\022\t\n\005HBASE\020\013\022\020\n\014"
- + "HIVE_WEBHCAT\020\003\022\013\n\007JUPYTER\020\001\022\n\n\006PRESTO\020\006\022"
- + "\n\n\006RANGER\020\014\022\010\n\004SOLR\020\n\022\014\n\010ZEPPELIN\020\004\022\r\n\tZ"
- + "OOKEEPER\020\010*J\n\rFailureAction\022\036\n\032FAILURE_A"
- + "CTION_UNSPECIFIED\020\000\022\r\n\tNO_ACTION\020\001\022\n\n\006DE"
- + "LETE\020\002Bo\n\034com.google.cloud.dataproc.v1B\013"
- + "SharedProtoP\001Z@google.golang.org/genprot"
- + "o/googleapis/cloud/dataproc/v1;dataprocb"
- + "\006proto3"
+ + "\005value\030\002 \001(\t:\0028\001\"\177\n\020GkeClusterConfig\022\037\n\022"
+ + "gke_cluster_target\030\002 \001(\tB\003\340A\001\022J\n\020node_po"
+ + "ol_target\030\003 \003(\0132+.google.cloud.dataproc."
+ + "v1.GkeNodePoolTargetB\003\340A\001\"\362\001\n\027Kubernetes"
+ + "ClusterConfig\022!\n\024kubernetes_namespace\030\001 "
+ + "\001(\tB\003\340A\001\022M\n\022gke_cluster_config\030\002 \001(\0132*.g"
+ + "oogle.cloud.dataproc.v1.GkeClusterConfig"
+ + "B\003\340A\002H\000\022[\n\032kubernetes_software_config\030\003 "
+ + "\001(\01322.google.cloud.dataproc.v1.Kubernete"
+ + "sSoftwareConfigB\003\340A\001B\010\n\006config\"\303\002\n\030Kuber"
+ + "netesSoftwareConfig\022c\n\021component_version"
+ + "\030\001 \003(\0132H.google.cloud.dataproc.v1.Kubern"
+ + "etesSoftwareConfig.ComponentVersionEntry"
+ + "\022V\n\nproperties\030\002 \003(\0132B.google.cloud.data"
+ + "proc.v1.KubernetesSoftwareConfig.Propert"
+ + "iesEntry\0327\n\025ComponentVersionEntry\022\013\n\003key"
+ + "\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\0321\n\017PropertiesE"
+ + "ntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\"\236\002\n"
+ + "\021GkeNodePoolTarget\022\026\n\tnode_pool\030\001 \001(\tB\003\340"
+ + "A\002\022D\n\005roles\030\002 \003(\01620.google.cloud.datapro"
+ + "c.v1.GkeNodePoolTarget.RoleB\003\340A\002\022J\n\020node"
+ + "_pool_config\030\003 \001(\0132+.google.cloud.datapr"
+ + "oc.v1.GkeNodePoolConfigB\003\340A\001\"_\n\004Role\022\024\n\020"
+ + "ROLE_UNSPECIFIED\020\000\022\013\n\007DEFAULT\020\001\022\016\n\nCONTR"
+ + "OLLER\020\002\022\020\n\014SPARK_DRIVER\020\003\022\022\n\016SPARK_EXECU"
+ + "TOR\020\004\"\355\004\n\021GkeNodePoolConfig\022N\n\006config\030\002 "
+ + "\001(\01329.google.cloud.dataproc.v1.GkeNodePo"
+ + "olConfig.GkeNodeConfigB\003\340A\001\022\026\n\tlocations"
+ + "\030\r \003(\tB\003\340A\001\022b\n\013autoscaling\030\004 \001(\0132H.googl"
+ + "e.cloud.dataproc.v1.GkeNodePoolConfig.Gk"
+ + "eNodePoolAutoscalingConfigB\003\340A\001\032\346\001\n\rGkeN"
+ + "odeConfig\022\031\n\014machine_type\030\001 \001(\tB\003\340A\001\022\030\n\013"
+ + "preemptible\030\n \001(\010B\003\340A\001\022\034\n\017local_ssd_coun"
+ + "t\030\007 \001(\005B\003\340A\001\022c\n\014accelerators\030\013 \003(\0132H.goo"
+ + "gle.cloud.dataproc.v1.GkeNodePoolConfig."
+ + "GkeNodePoolAcceleratorConfigB\003\340A\001\022\035\n\020min"
+ + "_cpu_platform\030\r \001(\tB\003\340A\001\032S\n\034GkeNodePoolA"
+ + "cceleratorConfig\022\031\n\021accelerator_count\030\001 "
+ + "\001(\003\022\030\n\020accelerator_type\030\002 \001(\t\032N\n\034GkeNode"
+ + "PoolAutoscalingConfig\022\026\n\016min_node_count\030"
+ + "\002 \001(\005\022\026\n\016max_node_count\030\003 \001(\005*\277\001\n\tCompon"
+ + "ent\022\031\n\025COMPONENT_UNSPECIFIED\020\000\022\014\n\010ANACON"
+ + "DA\020\005\022\n\n\006DOCKER\020\r\022\t\n\005DRUID\020\t\022\t\n\005FLINK\020\016\022\t"
+ + "\n\005HBASE\020\013\022\020\n\014HIVE_WEBHCAT\020\003\022\013\n\007JUPYTER\020\001"
+ + "\022\n\n\006PRESTO\020\006\022\n\n\006RANGER\020\014\022\010\n\004SOLR\020\n\022\014\n\010ZE"
+ + "PPELIN\020\004\022\r\n\tZOOKEEPER\020\010*J\n\rFailureAction"
+ + "\022\036\n\032FAILURE_ACTION_UNSPECIFIED\020\000\022\r\n\tNO_A"
+ + "CTION\020\001\022\n\n\006DELETE\020\002Bo\n\034com.google.cloud."
+ + "dataproc.v1B\013SharedProtoP\001Z@google.golan"
+ + "g.org/genproto/googleapis/cloud/dataproc"
+ + "/v1;dataprocb\006proto3"
};
descriptor =
com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom(
@@ -176,6 +256,96 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
new java.lang.String[] {
"Key", "Value",
});
+ internal_static_google_cloud_dataproc_v1_GkeClusterConfig_descriptor =
+ getDescriptor().getMessageTypes().get(6);
+ internal_static_google_cloud_dataproc_v1_GkeClusterConfig_fieldAccessorTable =
+ new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_google_cloud_dataproc_v1_GkeClusterConfig_descriptor,
+ new java.lang.String[] {
+ "GkeClusterTarget", "NodePoolTarget",
+ });
+ internal_static_google_cloud_dataproc_v1_KubernetesClusterConfig_descriptor =
+ getDescriptor().getMessageTypes().get(7);
+ internal_static_google_cloud_dataproc_v1_KubernetesClusterConfig_fieldAccessorTable =
+ new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_google_cloud_dataproc_v1_KubernetesClusterConfig_descriptor,
+ new java.lang.String[] {
+ "KubernetesNamespace", "GkeClusterConfig", "KubernetesSoftwareConfig", "Config",
+ });
+ internal_static_google_cloud_dataproc_v1_KubernetesSoftwareConfig_descriptor =
+ getDescriptor().getMessageTypes().get(8);
+ internal_static_google_cloud_dataproc_v1_KubernetesSoftwareConfig_fieldAccessorTable =
+ new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_google_cloud_dataproc_v1_KubernetesSoftwareConfig_descriptor,
+ new java.lang.String[] {
+ "ComponentVersion", "Properties",
+ });
+ internal_static_google_cloud_dataproc_v1_KubernetesSoftwareConfig_ComponentVersionEntry_descriptor =
+ internal_static_google_cloud_dataproc_v1_KubernetesSoftwareConfig_descriptor
+ .getNestedTypes()
+ .get(0);
+ internal_static_google_cloud_dataproc_v1_KubernetesSoftwareConfig_ComponentVersionEntry_fieldAccessorTable =
+ new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_google_cloud_dataproc_v1_KubernetesSoftwareConfig_ComponentVersionEntry_descriptor,
+ new java.lang.String[] {
+ "Key", "Value",
+ });
+ internal_static_google_cloud_dataproc_v1_KubernetesSoftwareConfig_PropertiesEntry_descriptor =
+ internal_static_google_cloud_dataproc_v1_KubernetesSoftwareConfig_descriptor
+ .getNestedTypes()
+ .get(1);
+ internal_static_google_cloud_dataproc_v1_KubernetesSoftwareConfig_PropertiesEntry_fieldAccessorTable =
+ new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_google_cloud_dataproc_v1_KubernetesSoftwareConfig_PropertiesEntry_descriptor,
+ new java.lang.String[] {
+ "Key", "Value",
+ });
+ internal_static_google_cloud_dataproc_v1_GkeNodePoolTarget_descriptor =
+ getDescriptor().getMessageTypes().get(9);
+ internal_static_google_cloud_dataproc_v1_GkeNodePoolTarget_fieldAccessorTable =
+ new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_google_cloud_dataproc_v1_GkeNodePoolTarget_descriptor,
+ new java.lang.String[] {
+ "NodePool", "Roles", "NodePoolConfig",
+ });
+ internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_descriptor =
+ getDescriptor().getMessageTypes().get(10);
+ internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_fieldAccessorTable =
+ new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_descriptor,
+ new java.lang.String[] {
+ "Config", "Locations", "Autoscaling",
+ });
+ internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_GkeNodeConfig_descriptor =
+ internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_descriptor
+ .getNestedTypes()
+ .get(0);
+ internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_GkeNodeConfig_fieldAccessorTable =
+ new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_GkeNodeConfig_descriptor,
+ new java.lang.String[] {
+ "MachineType", "Preemptible", "LocalSsdCount", "Accelerators", "MinCpuPlatform",
+ });
+ internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_GkeNodePoolAcceleratorConfig_descriptor =
+ internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_descriptor
+ .getNestedTypes()
+ .get(1);
+ internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_GkeNodePoolAcceleratorConfig_fieldAccessorTable =
+ new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_GkeNodePoolAcceleratorConfig_descriptor,
+ new java.lang.String[] {
+ "AcceleratorCount", "AcceleratorType",
+ });
+ internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_GkeNodePoolAutoscalingConfig_descriptor =
+ internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_descriptor
+ .getNestedTypes()
+ .get(2);
+ internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_GkeNodePoolAutoscalingConfig_fieldAccessorTable =
+ new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_google_cloud_dataproc_v1_GkeNodePoolConfig_GkeNodePoolAutoscalingConfig_descriptor,
+ new java.lang.String[] {
+ "MinNodeCount", "MaxNodeCount",
+ });
com.google.protobuf.ExtensionRegistry registry =
com.google.protobuf.ExtensionRegistry.newInstance();
registry.add(com.google.api.FieldBehaviorProto.fieldBehavior);
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/VirtualClusterConfig.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/VirtualClusterConfig.java
new file mode 100644
index 00000000..808a0cc1
--- /dev/null
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/VirtualClusterConfig.java
@@ -0,0 +1,1698 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/cloud/dataproc/v1/clusters.proto
+
+package com.google.cloud.dataproc.v1;
+
+/**
+ *
+ *
+ *
+ * Dataproc cluster config for a cluster that does not directly control the
+ * underlying compute resources, such as a [Dataproc-on-GKE
+ * cluster](https://cloud.google.com/dataproc/docs/concepts/jobs/dataproc-gke#create-a-dataproc-on-gke-cluster).
+ *
+ *
+ * Protobuf type {@code google.cloud.dataproc.v1.VirtualClusterConfig}
+ */
+public final class VirtualClusterConfig extends com.google.protobuf.GeneratedMessageV3
+ implements
+ // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1.VirtualClusterConfig)
+ VirtualClusterConfigOrBuilder {
+ private static final long serialVersionUID = 0L;
+ // Use VirtualClusterConfig.newBuilder() to construct.
+ private VirtualClusterConfig(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
+ super(builder);
+ }
+
+ private VirtualClusterConfig() {
+ stagingBucket_ = "";
+ tempBucket_ = "";
+ }
+
+ @java.lang.Override
+ @SuppressWarnings({"unused"})
+ protected java.lang.Object newInstance(UnusedPrivateParameter unused) {
+ return new VirtualClusterConfig();
+ }
+
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet getUnknownFields() {
+ return this.unknownFields;
+ }
+
+ private VirtualClusterConfig(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ this();
+ if (extensionRegistry == null) {
+ throw new java.lang.NullPointerException();
+ }
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ case 10:
+ {
+ java.lang.String s = input.readStringRequireUtf8();
+
+ stagingBucket_ = s;
+ break;
+ }
+ case 18:
+ {
+ java.lang.String s = input.readStringRequireUtf8();
+
+ tempBucket_ = s;
+ break;
+ }
+ case 50:
+ {
+ com.google.cloud.dataproc.v1.KubernetesClusterConfig.Builder subBuilder = null;
+ if (infrastructureConfigCase_ == 6) {
+ subBuilder =
+ ((com.google.cloud.dataproc.v1.KubernetesClusterConfig) infrastructureConfig_)
+ .toBuilder();
+ }
+ infrastructureConfig_ =
+ input.readMessage(
+ com.google.cloud.dataproc.v1.KubernetesClusterConfig.parser(),
+ extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(
+ (com.google.cloud.dataproc.v1.KubernetesClusterConfig) infrastructureConfig_);
+ infrastructureConfig_ = subBuilder.buildPartial();
+ }
+ infrastructureConfigCase_ = 6;
+ break;
+ }
+ case 58:
+ {
+ com.google.cloud.dataproc.v1.AuxiliaryServicesConfig.Builder subBuilder = null;
+ if (auxiliaryServicesConfig_ != null) {
+ subBuilder = auxiliaryServicesConfig_.toBuilder();
+ }
+ auxiliaryServicesConfig_ =
+ input.readMessage(
+ com.google.cloud.dataproc.v1.AuxiliaryServicesConfig.parser(),
+ extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(auxiliaryServicesConfig_);
+ auxiliaryServicesConfig_ = subBuilder.buildPartial();
+ }
+
+ break;
+ }
+ default:
+ {
+ if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
+ return com.google.cloud.dataproc.v1.ClustersProto
+ .internal_static_google_cloud_dataproc_v1_VirtualClusterConfig_descriptor;
+ }
+
+ @java.lang.Override
+ protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return com.google.cloud.dataproc.v1.ClustersProto
+ .internal_static_google_cloud_dataproc_v1_VirtualClusterConfig_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.google.cloud.dataproc.v1.VirtualClusterConfig.class,
+ com.google.cloud.dataproc.v1.VirtualClusterConfig.Builder.class);
+ }
+
+ private int infrastructureConfigCase_ = 0;
+ private java.lang.Object infrastructureConfig_;
+
+ public enum InfrastructureConfigCase
+ implements
+ com.google.protobuf.Internal.EnumLite,
+ com.google.protobuf.AbstractMessage.InternalOneOfEnum {
+ KUBERNETES_CLUSTER_CONFIG(6),
+ INFRASTRUCTURECONFIG_NOT_SET(0);
+ private final int value;
+
+ private InfrastructureConfigCase(int value) {
+ this.value = value;
+ }
+ /**
+ * @param value The number of the enum to look for.
+ * @return The enum associated with the given number.
+ * @deprecated Use {@link #forNumber(int)} instead.
+ */
+ @java.lang.Deprecated
+ public static InfrastructureConfigCase valueOf(int value) {
+ return forNumber(value);
+ }
+
+ public static InfrastructureConfigCase forNumber(int value) {
+ switch (value) {
+ case 6:
+ return KUBERNETES_CLUSTER_CONFIG;
+ case 0:
+ return INFRASTRUCTURECONFIG_NOT_SET;
+ default:
+ return null;
+ }
+ }
+
+ public int getNumber() {
+ return this.value;
+ }
+ };
+
+ public InfrastructureConfigCase getInfrastructureConfigCase() {
+ return InfrastructureConfigCase.forNumber(infrastructureConfigCase_);
+ }
+
+ public static final int STAGING_BUCKET_FIELD_NUMBER = 1;
+ private volatile java.lang.Object stagingBucket_;
+ /**
+ *
+ *
+ *
+ * Optional. A Storage bucket used to stage job
+ * dependencies, config files, and job driver console output.
+ * If you do not specify a staging bucket, Cloud
+ * Dataproc will determine a Cloud Storage location (US,
+ * ASIA, or EU) for your cluster's staging bucket according to the
+ * Compute Engine zone where your cluster is deployed, and then create
+ * and manage this project-level, per-location bucket (see
+ * [Dataproc staging and temp
+ * buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
+ * **This field requires a Cloud Storage bucket name, not a `gs://...` URI to
+ * a Cloud Storage bucket.**
+ *
+ *
+ * string staging_bucket = 1 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The stagingBucket.
+ */
+ @java.lang.Override
+ public java.lang.String getStagingBucket() {
+ java.lang.Object ref = stagingBucket_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ stagingBucket_ = s;
+ return s;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A Storage bucket used to stage job
+ * dependencies, config files, and job driver console output.
+ * If you do not specify a staging bucket, Cloud
+ * Dataproc will determine a Cloud Storage location (US,
+ * ASIA, or EU) for your cluster's staging bucket according to the
+ * Compute Engine zone where your cluster is deployed, and then create
+ * and manage this project-level, per-location bucket (see
+ * [Dataproc staging and temp
+ * buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
+ * **This field requires a Cloud Storage bucket name, not a `gs://...` URI to
+ * a Cloud Storage bucket.**
+ *
+ *
+ * string staging_bucket = 1 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The bytes for stagingBucket.
+ */
+ @java.lang.Override
+ public com.google.protobuf.ByteString getStagingBucketBytes() {
+ java.lang.Object ref = stagingBucket_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
+ stagingBucket_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ public static final int TEMP_BUCKET_FIELD_NUMBER = 2;
+ private volatile java.lang.Object tempBucket_;
+ /**
+ *
+ *
+ *
+ * Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data,
+ * such as Spark and MapReduce history files.
+ * If you do not specify a temp bucket,
+ * Dataproc will determine a Cloud Storage location (US,
+ * ASIA, or EU) for your cluster's temp bucket according to the
+ * Compute Engine zone where your cluster is deployed, and then create
+ * and manage this project-level, per-location bucket. The default bucket has
+ * a TTL of 90 days, but you can use any TTL (or none) if you specify a
+ * bucket (see
+ * [Dataproc staging and temp
+ * buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
+ * **This field requires a Cloud Storage bucket name, not a `gs://...` URI to
+ * a Cloud Storage bucket.**
+ *
+ *
+ * string temp_bucket = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The tempBucket.
+ */
+ @java.lang.Override
+ public java.lang.String getTempBucket() {
+ java.lang.Object ref = tempBucket_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ tempBucket_ = s;
+ return s;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data,
+ * such as Spark and MapReduce history files.
+ * If you do not specify a temp bucket,
+ * Dataproc will determine a Cloud Storage location (US,
+ * ASIA, or EU) for your cluster's temp bucket according to the
+ * Compute Engine zone where your cluster is deployed, and then create
+ * and manage this project-level, per-location bucket. The default bucket has
+ * a TTL of 90 days, but you can use any TTL (or none) if you specify a
+ * bucket (see
+ * [Dataproc staging and temp
+ * buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
+ * **This field requires a Cloud Storage bucket name, not a `gs://...` URI to
+ * a Cloud Storage bucket.**
+ *
+ *
+ * string temp_bucket = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The bytes for tempBucket.
+ */
+ @java.lang.Override
+ public com.google.protobuf.ByteString getTempBucketBytes() {
+ java.lang.Object ref = tempBucket_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
+ tempBucket_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ public static final int KUBERNETES_CLUSTER_CONFIG_FIELD_NUMBER = 6;
+ /**
+ *
+ *
+ *
+ * Required. The configuration for running the Dataproc cluster on Kubernetes.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.KubernetesClusterConfig kubernetes_cluster_config = 6 [(.google.api.field_behavior) = REQUIRED];
+ *
+ *
+ * @return Whether the kubernetesClusterConfig field is set.
+ */
+ @java.lang.Override
+ public boolean hasKubernetesClusterConfig() {
+ return infrastructureConfigCase_ == 6;
+ }
+ /**
+ *
+ *
+ *
+ * Required. The configuration for running the Dataproc cluster on Kubernetes.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.KubernetesClusterConfig kubernetes_cluster_config = 6 [(.google.api.field_behavior) = REQUIRED];
+ *
+ *
+ * @return The kubernetesClusterConfig.
+ */
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.KubernetesClusterConfig getKubernetesClusterConfig() {
+ if (infrastructureConfigCase_ == 6) {
+ return (com.google.cloud.dataproc.v1.KubernetesClusterConfig) infrastructureConfig_;
+ }
+ return com.google.cloud.dataproc.v1.KubernetesClusterConfig.getDefaultInstance();
+ }
+ /**
+ *
+ *
+ *
+ * Required. The configuration for running the Dataproc cluster on Kubernetes.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.KubernetesClusterConfig kubernetes_cluster_config = 6 [(.google.api.field_behavior) = REQUIRED];
+ *
+ */
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.KubernetesClusterConfigOrBuilder
+ getKubernetesClusterConfigOrBuilder() {
+ if (infrastructureConfigCase_ == 6) {
+ return (com.google.cloud.dataproc.v1.KubernetesClusterConfig) infrastructureConfig_;
+ }
+ return com.google.cloud.dataproc.v1.KubernetesClusterConfig.getDefaultInstance();
+ }
+
+ public static final int AUXILIARY_SERVICES_CONFIG_FIELD_NUMBER = 7;
+ private com.google.cloud.dataproc.v1.AuxiliaryServicesConfig auxiliaryServicesConfig_;
+ /**
+ *
+ *
+ *
+ * Optional. Configuration of auxiliary services used by this cluster.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.AuxiliaryServicesConfig auxiliary_services_config = 7 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return Whether the auxiliaryServicesConfig field is set.
+ */
+ @java.lang.Override
+ public boolean hasAuxiliaryServicesConfig() {
+ return auxiliaryServicesConfig_ != null;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. Configuration of auxiliary services used by this cluster.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.AuxiliaryServicesConfig auxiliary_services_config = 7 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return The auxiliaryServicesConfig.
+ */
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.AuxiliaryServicesConfig getAuxiliaryServicesConfig() {
+ return auxiliaryServicesConfig_ == null
+ ? com.google.cloud.dataproc.v1.AuxiliaryServicesConfig.getDefaultInstance()
+ : auxiliaryServicesConfig_;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. Configuration of auxiliary services used by this cluster.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.AuxiliaryServicesConfig auxiliary_services_config = 7 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.AuxiliaryServicesConfigOrBuilder
+ getAuxiliaryServicesConfigOrBuilder() {
+ return getAuxiliaryServicesConfig();
+ }
+
+ private byte memoizedIsInitialized = -1;
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ @java.lang.Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(stagingBucket_)) {
+ com.google.protobuf.GeneratedMessageV3.writeString(output, 1, stagingBucket_);
+ }
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(tempBucket_)) {
+ com.google.protobuf.GeneratedMessageV3.writeString(output, 2, tempBucket_);
+ }
+ if (infrastructureConfigCase_ == 6) {
+ output.writeMessage(
+ 6, (com.google.cloud.dataproc.v1.KubernetesClusterConfig) infrastructureConfig_);
+ }
+ if (auxiliaryServicesConfig_ != null) {
+ output.writeMessage(7, getAuxiliaryServicesConfig());
+ }
+ unknownFields.writeTo(output);
+ }
+
+ @java.lang.Override
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(stagingBucket_)) {
+ size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, stagingBucket_);
+ }
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(tempBucket_)) {
+ size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, tempBucket_);
+ }
+ if (infrastructureConfigCase_ == 6) {
+ size +=
+ com.google.protobuf.CodedOutputStream.computeMessageSize(
+ 6, (com.google.cloud.dataproc.v1.KubernetesClusterConfig) infrastructureConfig_);
+ }
+ if (auxiliaryServicesConfig_ != null) {
+ size +=
+ com.google.protobuf.CodedOutputStream.computeMessageSize(7, getAuxiliaryServicesConfig());
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof com.google.cloud.dataproc.v1.VirtualClusterConfig)) {
+ return super.equals(obj);
+ }
+ com.google.cloud.dataproc.v1.VirtualClusterConfig other =
+ (com.google.cloud.dataproc.v1.VirtualClusterConfig) obj;
+
+ if (!getStagingBucket().equals(other.getStagingBucket())) return false;
+ if (!getTempBucket().equals(other.getTempBucket())) return false;
+ if (hasAuxiliaryServicesConfig() != other.hasAuxiliaryServicesConfig()) return false;
+ if (hasAuxiliaryServicesConfig()) {
+ if (!getAuxiliaryServicesConfig().equals(other.getAuxiliaryServicesConfig())) return false;
+ }
+ if (!getInfrastructureConfigCase().equals(other.getInfrastructureConfigCase())) return false;
+ switch (infrastructureConfigCase_) {
+ case 6:
+ if (!getKubernetesClusterConfig().equals(other.getKubernetesClusterConfig())) return false;
+ break;
+ case 0:
+ default:
+ }
+ if (!unknownFields.equals(other.unknownFields)) return false;
+ return true;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptor().hashCode();
+ hash = (37 * hash) + STAGING_BUCKET_FIELD_NUMBER;
+ hash = (53 * hash) + getStagingBucket().hashCode();
+ hash = (37 * hash) + TEMP_BUCKET_FIELD_NUMBER;
+ hash = (53 * hash) + getTempBucket().hashCode();
+ if (hasAuxiliaryServicesConfig()) {
+ hash = (37 * hash) + AUXILIARY_SERVICES_CONFIG_FIELD_NUMBER;
+ hash = (53 * hash) + getAuxiliaryServicesConfig().hashCode();
+ }
+ switch (infrastructureConfigCase_) {
+ case 6:
+ hash = (37 * hash) + KUBERNETES_CLUSTER_CONFIG_FIELD_NUMBER;
+ hash = (53 * hash) + getKubernetesClusterConfig().hashCode();
+ break;
+ case 0:
+ default:
+ }
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static com.google.cloud.dataproc.v1.VirtualClusterConfig parseFrom(
+ java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.dataproc.v1.VirtualClusterConfig parseFrom(
+ java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.VirtualClusterConfig parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.dataproc.v1.VirtualClusterConfig parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.VirtualClusterConfig parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.dataproc.v1.VirtualClusterConfig parseFrom(
+ byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.VirtualClusterConfig parseFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.dataproc.v1.VirtualClusterConfig parseFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.VirtualClusterConfig parseDelimitedFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.dataproc.v1.VirtualClusterConfig parseDelimitedFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.dataproc.v1.VirtualClusterConfig parseFrom(
+ com.google.protobuf.CodedInputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.dataproc.v1.VirtualClusterConfig parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ @java.lang.Override
+ public Builder newBuilderForType() {
+ return newBuilder();
+ }
+
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+
+ public static Builder newBuilder(com.google.cloud.dataproc.v1.VirtualClusterConfig prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+
+ @java.lang.Override
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ *
+ *
+ *
+ * Dataproc cluster config for a cluster that does not directly control the
+ * underlying compute resources, such as a [Dataproc-on-GKE
+ * cluster](https://cloud.google.com/dataproc/docs/concepts/jobs/dataproc-gke#create-a-dataproc-on-gke-cluster).
+ *
+ *
+ * Protobuf type {@code google.cloud.dataproc.v1.VirtualClusterConfig}
+ */
+ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder
+ implements
+ // @@protoc_insertion_point(builder_implements:google.cloud.dataproc.v1.VirtualClusterConfig)
+ com.google.cloud.dataproc.v1.VirtualClusterConfigOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
+ return com.google.cloud.dataproc.v1.ClustersProto
+ .internal_static_google_cloud_dataproc_v1_VirtualClusterConfig_descriptor;
+ }
+
+ @java.lang.Override
+ protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return com.google.cloud.dataproc.v1.ClustersProto
+ .internal_static_google_cloud_dataproc_v1_VirtualClusterConfig_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.google.cloud.dataproc.v1.VirtualClusterConfig.class,
+ com.google.cloud.dataproc.v1.VirtualClusterConfig.Builder.class);
+ }
+
+ // Construct using com.google.cloud.dataproc.v1.VirtualClusterConfig.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {}
+ }
+
+ @java.lang.Override
+ public Builder clear() {
+ super.clear();
+ stagingBucket_ = "";
+
+ tempBucket_ = "";
+
+ if (auxiliaryServicesConfigBuilder_ == null) {
+ auxiliaryServicesConfig_ = null;
+ } else {
+ auxiliaryServicesConfig_ = null;
+ auxiliaryServicesConfigBuilder_ = null;
+ }
+ infrastructureConfigCase_ = 0;
+ infrastructureConfig_ = null;
+ return this;
+ }
+
+ @java.lang.Override
+ public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
+ return com.google.cloud.dataproc.v1.ClustersProto
+ .internal_static_google_cloud_dataproc_v1_VirtualClusterConfig_descriptor;
+ }
+
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.VirtualClusterConfig getDefaultInstanceForType() {
+ return com.google.cloud.dataproc.v1.VirtualClusterConfig.getDefaultInstance();
+ }
+
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.VirtualClusterConfig build() {
+ com.google.cloud.dataproc.v1.VirtualClusterConfig result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.VirtualClusterConfig buildPartial() {
+ com.google.cloud.dataproc.v1.VirtualClusterConfig result =
+ new com.google.cloud.dataproc.v1.VirtualClusterConfig(this);
+ result.stagingBucket_ = stagingBucket_;
+ result.tempBucket_ = tempBucket_;
+ if (infrastructureConfigCase_ == 6) {
+ if (kubernetesClusterConfigBuilder_ == null) {
+ result.infrastructureConfig_ = infrastructureConfig_;
+ } else {
+ result.infrastructureConfig_ = kubernetesClusterConfigBuilder_.build();
+ }
+ }
+ if (auxiliaryServicesConfigBuilder_ == null) {
+ result.auxiliaryServicesConfig_ = auxiliaryServicesConfig_;
+ } else {
+ result.auxiliaryServicesConfig_ = auxiliaryServicesConfigBuilder_.build();
+ }
+ result.infrastructureConfigCase_ = infrastructureConfigCase_;
+ onBuilt();
+ return result;
+ }
+
+ @java.lang.Override
+ public Builder clone() {
+ return super.clone();
+ }
+
+ @java.lang.Override
+ public Builder setField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
+ return super.setField(field, value);
+ }
+
+ @java.lang.Override
+ public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
+ return super.clearField(field);
+ }
+
+ @java.lang.Override
+ public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+ return super.clearOneof(oneof);
+ }
+
+ @java.lang.Override
+ public Builder setRepeatedField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) {
+ return super.setRepeatedField(field, index, value);
+ }
+
+ @java.lang.Override
+ public Builder addRepeatedField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
+ return super.addRepeatedField(field, value);
+ }
+
+ @java.lang.Override
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof com.google.cloud.dataproc.v1.VirtualClusterConfig) {
+ return mergeFrom((com.google.cloud.dataproc.v1.VirtualClusterConfig) other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(com.google.cloud.dataproc.v1.VirtualClusterConfig other) {
+ if (other == com.google.cloud.dataproc.v1.VirtualClusterConfig.getDefaultInstance())
+ return this;
+ if (!other.getStagingBucket().isEmpty()) {
+ stagingBucket_ = other.stagingBucket_;
+ onChanged();
+ }
+ if (!other.getTempBucket().isEmpty()) {
+ tempBucket_ = other.tempBucket_;
+ onChanged();
+ }
+ if (other.hasAuxiliaryServicesConfig()) {
+ mergeAuxiliaryServicesConfig(other.getAuxiliaryServicesConfig());
+ }
+ switch (other.getInfrastructureConfigCase()) {
+ case KUBERNETES_CLUSTER_CONFIG:
+ {
+ mergeKubernetesClusterConfig(other.getKubernetesClusterConfig());
+ break;
+ }
+ case INFRASTRUCTURECONFIG_NOT_SET:
+ {
+ break;
+ }
+ }
+ this.mergeUnknownFields(other.unknownFields);
+ onChanged();
+ return this;
+ }
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ @java.lang.Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.cloud.dataproc.v1.VirtualClusterConfig parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage =
+ (com.google.cloud.dataproc.v1.VirtualClusterConfig) e.getUnfinishedMessage();
+ throw e.unwrapIOException();
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+
+ private int infrastructureConfigCase_ = 0;
+ private java.lang.Object infrastructureConfig_;
+
+ public InfrastructureConfigCase getInfrastructureConfigCase() {
+ return InfrastructureConfigCase.forNumber(infrastructureConfigCase_);
+ }
+
+ public Builder clearInfrastructureConfig() {
+ infrastructureConfigCase_ = 0;
+ infrastructureConfig_ = null;
+ onChanged();
+ return this;
+ }
+
+ private java.lang.Object stagingBucket_ = "";
+ /**
+ *
+ *
+ *
+ * Optional. A Storage bucket used to stage job
+ * dependencies, config files, and job driver console output.
+ * If you do not specify a staging bucket, Cloud
+ * Dataproc will determine a Cloud Storage location (US,
+ * ASIA, or EU) for your cluster's staging bucket according to the
+ * Compute Engine zone where your cluster is deployed, and then create
+ * and manage this project-level, per-location bucket (see
+ * [Dataproc staging and temp
+ * buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
+ * **This field requires a Cloud Storage bucket name, not a `gs://...` URI to
+ * a Cloud Storage bucket.**
+ *
+ *
+ * string staging_bucket = 1 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The stagingBucket.
+ */
+ public java.lang.String getStagingBucket() {
+ java.lang.Object ref = stagingBucket_;
+ if (!(ref instanceof java.lang.String)) {
+ com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ stagingBucket_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A Storage bucket used to stage job
+ * dependencies, config files, and job driver console output.
+ * If you do not specify a staging bucket, Cloud
+ * Dataproc will determine a Cloud Storage location (US,
+ * ASIA, or EU) for your cluster's staging bucket according to the
+ * Compute Engine zone where your cluster is deployed, and then create
+ * and manage this project-level, per-location bucket (see
+ * [Dataproc staging and temp
+ * buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
+ * **This field requires a Cloud Storage bucket name, not a `gs://...` URI to
+ * a Cloud Storage bucket.**
+ *
+ *
+ * string staging_bucket = 1 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The bytes for stagingBucket.
+ */
+ public com.google.protobuf.ByteString getStagingBucketBytes() {
+ java.lang.Object ref = stagingBucket_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
+ stagingBucket_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A Storage bucket used to stage job
+ * dependencies, config files, and job driver console output.
+ * If you do not specify a staging bucket, Cloud
+ * Dataproc will determine a Cloud Storage location (US,
+ * ASIA, or EU) for your cluster's staging bucket according to the
+ * Compute Engine zone where your cluster is deployed, and then create
+ * and manage this project-level, per-location bucket (see
+ * [Dataproc staging and temp
+ * buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
+ * **This field requires a Cloud Storage bucket name, not a `gs://...` URI to
+ * a Cloud Storage bucket.**
+ *
+ *
+ * string staging_bucket = 1 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @param value The stagingBucket to set.
+ * @return This builder for chaining.
+ */
+ public Builder setStagingBucket(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+
+ stagingBucket_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A Storage bucket used to stage job
+ * dependencies, config files, and job driver console output.
+ * If you do not specify a staging bucket, Cloud
+ * Dataproc will determine a Cloud Storage location (US,
+ * ASIA, or EU) for your cluster's staging bucket according to the
+ * Compute Engine zone where your cluster is deployed, and then create
+ * and manage this project-level, per-location bucket (see
+ * [Dataproc staging and temp
+ * buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
+ * **This field requires a Cloud Storage bucket name, not a `gs://...` URI to
+ * a Cloud Storage bucket.**
+ *
+ *
+ * string staging_bucket = 1 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return This builder for chaining.
+ */
+ public Builder clearStagingBucket() {
+
+ stagingBucket_ = getDefaultInstance().getStagingBucket();
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A Storage bucket used to stage job
+ * dependencies, config files, and job driver console output.
+ * If you do not specify a staging bucket, Cloud
+ * Dataproc will determine a Cloud Storage location (US,
+ * ASIA, or EU) for your cluster's staging bucket according to the
+ * Compute Engine zone where your cluster is deployed, and then create
+ * and manage this project-level, per-location bucket (see
+ * [Dataproc staging and temp
+ * buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
+ * **This field requires a Cloud Storage bucket name, not a `gs://...` URI to
+ * a Cloud Storage bucket.**
+ *
+ *
+ * string staging_bucket = 1 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @param value The bytes for stagingBucket to set.
+ * @return This builder for chaining.
+ */
+ public Builder setStagingBucketBytes(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ checkByteStringIsUtf8(value);
+
+ stagingBucket_ = value;
+ onChanged();
+ return this;
+ }
+
+ private java.lang.Object tempBucket_ = "";
+ /**
+ *
+ *
+ *
+ * Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data,
+ * such as Spark and MapReduce history files.
+ * If you do not specify a temp bucket,
+ * Dataproc will determine a Cloud Storage location (US,
+ * ASIA, or EU) for your cluster's temp bucket according to the
+ * Compute Engine zone where your cluster is deployed, and then create
+ * and manage this project-level, per-location bucket. The default bucket has
+ * a TTL of 90 days, but you can use any TTL (or none) if you specify a
+ * bucket (see
+ * [Dataproc staging and temp
+ * buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
+ * **This field requires a Cloud Storage bucket name, not a `gs://...` URI to
+ * a Cloud Storage bucket.**
+ *
+ *
+ * string temp_bucket = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The tempBucket.
+ */
+ public java.lang.String getTempBucket() {
+ java.lang.Object ref = tempBucket_;
+ if (!(ref instanceof java.lang.String)) {
+ com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ tempBucket_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data,
+ * such as Spark and MapReduce history files.
+ * If you do not specify a temp bucket,
+ * Dataproc will determine a Cloud Storage location (US,
+ * ASIA, or EU) for your cluster's temp bucket according to the
+ * Compute Engine zone where your cluster is deployed, and then create
+ * and manage this project-level, per-location bucket. The default bucket has
+ * a TTL of 90 days, but you can use any TTL (or none) if you specify a
+ * bucket (see
+ * [Dataproc staging and temp
+ * buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
+ * **This field requires a Cloud Storage bucket name, not a `gs://...` URI to
+ * a Cloud Storage bucket.**
+ *
+ *
+ * string temp_bucket = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The bytes for tempBucket.
+ */
+ public com.google.protobuf.ByteString getTempBucketBytes() {
+ java.lang.Object ref = tempBucket_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
+ tempBucket_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data,
+ * such as Spark and MapReduce history files.
+ * If you do not specify a temp bucket,
+ * Dataproc will determine a Cloud Storage location (US,
+ * ASIA, or EU) for your cluster's temp bucket according to the
+ * Compute Engine zone where your cluster is deployed, and then create
+ * and manage this project-level, per-location bucket. The default bucket has
+ * a TTL of 90 days, but you can use any TTL (or none) if you specify a
+ * bucket (see
+ * [Dataproc staging and temp
+ * buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
+ * **This field requires a Cloud Storage bucket name, not a `gs://...` URI to
+ * a Cloud Storage bucket.**
+ *
+ *
+ * string temp_bucket = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @param value The tempBucket to set.
+ * @return This builder for chaining.
+ */
+ public Builder setTempBucket(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+
+ tempBucket_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data,
+ * such as Spark and MapReduce history files.
+ * If you do not specify a temp bucket,
+ * Dataproc will determine a Cloud Storage location (US,
+ * ASIA, or EU) for your cluster's temp bucket according to the
+ * Compute Engine zone where your cluster is deployed, and then create
+ * and manage this project-level, per-location bucket. The default bucket has
+ * a TTL of 90 days, but you can use any TTL (or none) if you specify a
+ * bucket (see
+ * [Dataproc staging and temp
+ * buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
+ * **This field requires a Cloud Storage bucket name, not a `gs://...` URI to
+ * a Cloud Storage bucket.**
+ *
+ *
+ * string temp_bucket = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return This builder for chaining.
+ */
+ public Builder clearTempBucket() {
+
+ tempBucket_ = getDefaultInstance().getTempBucket();
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data,
+ * such as Spark and MapReduce history files.
+ * If you do not specify a temp bucket,
+ * Dataproc will determine a Cloud Storage location (US,
+ * ASIA, or EU) for your cluster's temp bucket according to the
+ * Compute Engine zone where your cluster is deployed, and then create
+ * and manage this project-level, per-location bucket. The default bucket has
+ * a TTL of 90 days, but you can use any TTL (or none) if you specify a
+ * bucket (see
+ * [Dataproc staging and temp
+ * buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
+ * **This field requires a Cloud Storage bucket name, not a `gs://...` URI to
+ * a Cloud Storage bucket.**
+ *
+ *
+ * string temp_bucket = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @param value The bytes for tempBucket to set.
+ * @return This builder for chaining.
+ */
+ public Builder setTempBucketBytes(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ checkByteStringIsUtf8(value);
+
+ tempBucket_ = value;
+ onChanged();
+ return this;
+ }
+
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.dataproc.v1.KubernetesClusterConfig,
+ com.google.cloud.dataproc.v1.KubernetesClusterConfig.Builder,
+ com.google.cloud.dataproc.v1.KubernetesClusterConfigOrBuilder>
+ kubernetesClusterConfigBuilder_;
+ /**
+ *
+ *
+ *
+ * Required. The configuration for running the Dataproc cluster on Kubernetes.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.KubernetesClusterConfig kubernetes_cluster_config = 6 [(.google.api.field_behavior) = REQUIRED];
+ *
+ *
+ * @return Whether the kubernetesClusterConfig field is set.
+ */
+ @java.lang.Override
+ public boolean hasKubernetesClusterConfig() {
+ return infrastructureConfigCase_ == 6;
+ }
+ /**
+ *
+ *
+ *
+ * Required. The configuration for running the Dataproc cluster on Kubernetes.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.KubernetesClusterConfig kubernetes_cluster_config = 6 [(.google.api.field_behavior) = REQUIRED];
+ *
+ *
+ * @return The kubernetesClusterConfig.
+ */
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.KubernetesClusterConfig getKubernetesClusterConfig() {
+ if (kubernetesClusterConfigBuilder_ == null) {
+ if (infrastructureConfigCase_ == 6) {
+ return (com.google.cloud.dataproc.v1.KubernetesClusterConfig) infrastructureConfig_;
+ }
+ return com.google.cloud.dataproc.v1.KubernetesClusterConfig.getDefaultInstance();
+ } else {
+ if (infrastructureConfigCase_ == 6) {
+ return kubernetesClusterConfigBuilder_.getMessage();
+ }
+ return com.google.cloud.dataproc.v1.KubernetesClusterConfig.getDefaultInstance();
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Required. The configuration for running the Dataproc cluster on Kubernetes.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.KubernetesClusterConfig kubernetes_cluster_config = 6 [(.google.api.field_behavior) = REQUIRED];
+ *
+ */
+ public Builder setKubernetesClusterConfig(
+ com.google.cloud.dataproc.v1.KubernetesClusterConfig value) {
+ if (kubernetesClusterConfigBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ infrastructureConfig_ = value;
+ onChanged();
+ } else {
+ kubernetesClusterConfigBuilder_.setMessage(value);
+ }
+ infrastructureConfigCase_ = 6;
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Required. The configuration for running the Dataproc cluster on Kubernetes.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.KubernetesClusterConfig kubernetes_cluster_config = 6 [(.google.api.field_behavior) = REQUIRED];
+ *
+ */
+ public Builder setKubernetesClusterConfig(
+ com.google.cloud.dataproc.v1.KubernetesClusterConfig.Builder builderForValue) {
+ if (kubernetesClusterConfigBuilder_ == null) {
+ infrastructureConfig_ = builderForValue.build();
+ onChanged();
+ } else {
+ kubernetesClusterConfigBuilder_.setMessage(builderForValue.build());
+ }
+ infrastructureConfigCase_ = 6;
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Required. The configuration for running the Dataproc cluster on Kubernetes.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.KubernetesClusterConfig kubernetes_cluster_config = 6 [(.google.api.field_behavior) = REQUIRED];
+ *
+ */
+ public Builder mergeKubernetesClusterConfig(
+ com.google.cloud.dataproc.v1.KubernetesClusterConfig value) {
+ if (kubernetesClusterConfigBuilder_ == null) {
+ if (infrastructureConfigCase_ == 6
+ && infrastructureConfig_
+ != com.google.cloud.dataproc.v1.KubernetesClusterConfig.getDefaultInstance()) {
+ infrastructureConfig_ =
+ com.google.cloud.dataproc.v1.KubernetesClusterConfig.newBuilder(
+ (com.google.cloud.dataproc.v1.KubernetesClusterConfig) infrastructureConfig_)
+ .mergeFrom(value)
+ .buildPartial();
+ } else {
+ infrastructureConfig_ = value;
+ }
+ onChanged();
+ } else {
+ if (infrastructureConfigCase_ == 6) {
+ kubernetesClusterConfigBuilder_.mergeFrom(value);
+ }
+ kubernetesClusterConfigBuilder_.setMessage(value);
+ }
+ infrastructureConfigCase_ = 6;
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Required. The configuration for running the Dataproc cluster on Kubernetes.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.KubernetesClusterConfig kubernetes_cluster_config = 6 [(.google.api.field_behavior) = REQUIRED];
+ *
+ */
+ public Builder clearKubernetesClusterConfig() {
+ if (kubernetesClusterConfigBuilder_ == null) {
+ if (infrastructureConfigCase_ == 6) {
+ infrastructureConfigCase_ = 0;
+ infrastructureConfig_ = null;
+ onChanged();
+ }
+ } else {
+ if (infrastructureConfigCase_ == 6) {
+ infrastructureConfigCase_ = 0;
+ infrastructureConfig_ = null;
+ }
+ kubernetesClusterConfigBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Required. The configuration for running the Dataproc cluster on Kubernetes.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.KubernetesClusterConfig kubernetes_cluster_config = 6 [(.google.api.field_behavior) = REQUIRED];
+ *
+ */
+ public com.google.cloud.dataproc.v1.KubernetesClusterConfig.Builder
+ getKubernetesClusterConfigBuilder() {
+ return getKubernetesClusterConfigFieldBuilder().getBuilder();
+ }
+ /**
+ *
+ *
+ *
+ * Required. The configuration for running the Dataproc cluster on Kubernetes.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.KubernetesClusterConfig kubernetes_cluster_config = 6 [(.google.api.field_behavior) = REQUIRED];
+ *
+ */
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.KubernetesClusterConfigOrBuilder
+ getKubernetesClusterConfigOrBuilder() {
+ if ((infrastructureConfigCase_ == 6) && (kubernetesClusterConfigBuilder_ != null)) {
+ return kubernetesClusterConfigBuilder_.getMessageOrBuilder();
+ } else {
+ if (infrastructureConfigCase_ == 6) {
+ return (com.google.cloud.dataproc.v1.KubernetesClusterConfig) infrastructureConfig_;
+ }
+ return com.google.cloud.dataproc.v1.KubernetesClusterConfig.getDefaultInstance();
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Required. The configuration for running the Dataproc cluster on Kubernetes.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.KubernetesClusterConfig kubernetes_cluster_config = 6 [(.google.api.field_behavior) = REQUIRED];
+ *
+ */
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.dataproc.v1.KubernetesClusterConfig,
+ com.google.cloud.dataproc.v1.KubernetesClusterConfig.Builder,
+ com.google.cloud.dataproc.v1.KubernetesClusterConfigOrBuilder>
+ getKubernetesClusterConfigFieldBuilder() {
+ if (kubernetesClusterConfigBuilder_ == null) {
+ if (!(infrastructureConfigCase_ == 6)) {
+ infrastructureConfig_ =
+ com.google.cloud.dataproc.v1.KubernetesClusterConfig.getDefaultInstance();
+ }
+ kubernetesClusterConfigBuilder_ =
+ new com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.dataproc.v1.KubernetesClusterConfig,
+ com.google.cloud.dataproc.v1.KubernetesClusterConfig.Builder,
+ com.google.cloud.dataproc.v1.KubernetesClusterConfigOrBuilder>(
+ (com.google.cloud.dataproc.v1.KubernetesClusterConfig) infrastructureConfig_,
+ getParentForChildren(),
+ isClean());
+ infrastructureConfig_ = null;
+ }
+ infrastructureConfigCase_ = 6;
+ onChanged();
+ ;
+ return kubernetesClusterConfigBuilder_;
+ }
+
+ private com.google.cloud.dataproc.v1.AuxiliaryServicesConfig auxiliaryServicesConfig_;
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.dataproc.v1.AuxiliaryServicesConfig,
+ com.google.cloud.dataproc.v1.AuxiliaryServicesConfig.Builder,
+ com.google.cloud.dataproc.v1.AuxiliaryServicesConfigOrBuilder>
+ auxiliaryServicesConfigBuilder_;
+ /**
+ *
+ *
+ *
+ * Optional. Configuration of auxiliary services used by this cluster.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.AuxiliaryServicesConfig auxiliary_services_config = 7 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return Whether the auxiliaryServicesConfig field is set.
+ */
+ public boolean hasAuxiliaryServicesConfig() {
+ return auxiliaryServicesConfigBuilder_ != null || auxiliaryServicesConfig_ != null;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. Configuration of auxiliary services used by this cluster.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.AuxiliaryServicesConfig auxiliary_services_config = 7 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return The auxiliaryServicesConfig.
+ */
+ public com.google.cloud.dataproc.v1.AuxiliaryServicesConfig getAuxiliaryServicesConfig() {
+ if (auxiliaryServicesConfigBuilder_ == null) {
+ return auxiliaryServicesConfig_ == null
+ ? com.google.cloud.dataproc.v1.AuxiliaryServicesConfig.getDefaultInstance()
+ : auxiliaryServicesConfig_;
+ } else {
+ return auxiliaryServicesConfigBuilder_.getMessage();
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Optional. Configuration of auxiliary services used by this cluster.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.AuxiliaryServicesConfig auxiliary_services_config = 7 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder setAuxiliaryServicesConfig(
+ com.google.cloud.dataproc.v1.AuxiliaryServicesConfig value) {
+ if (auxiliaryServicesConfigBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ auxiliaryServicesConfig_ = value;
+ onChanged();
+ } else {
+ auxiliaryServicesConfigBuilder_.setMessage(value);
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. Configuration of auxiliary services used by this cluster.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.AuxiliaryServicesConfig auxiliary_services_config = 7 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder setAuxiliaryServicesConfig(
+ com.google.cloud.dataproc.v1.AuxiliaryServicesConfig.Builder builderForValue) {
+ if (auxiliaryServicesConfigBuilder_ == null) {
+ auxiliaryServicesConfig_ = builderForValue.build();
+ onChanged();
+ } else {
+ auxiliaryServicesConfigBuilder_.setMessage(builderForValue.build());
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. Configuration of auxiliary services used by this cluster.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.AuxiliaryServicesConfig auxiliary_services_config = 7 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder mergeAuxiliaryServicesConfig(
+ com.google.cloud.dataproc.v1.AuxiliaryServicesConfig value) {
+ if (auxiliaryServicesConfigBuilder_ == null) {
+ if (auxiliaryServicesConfig_ != null) {
+ auxiliaryServicesConfig_ =
+ com.google.cloud.dataproc.v1.AuxiliaryServicesConfig.newBuilder(
+ auxiliaryServicesConfig_)
+ .mergeFrom(value)
+ .buildPartial();
+ } else {
+ auxiliaryServicesConfig_ = value;
+ }
+ onChanged();
+ } else {
+ auxiliaryServicesConfigBuilder_.mergeFrom(value);
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. Configuration of auxiliary services used by this cluster.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.AuxiliaryServicesConfig auxiliary_services_config = 7 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder clearAuxiliaryServicesConfig() {
+ if (auxiliaryServicesConfigBuilder_ == null) {
+ auxiliaryServicesConfig_ = null;
+ onChanged();
+ } else {
+ auxiliaryServicesConfig_ = null;
+ auxiliaryServicesConfigBuilder_ = null;
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. Configuration of auxiliary services used by this cluster.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.AuxiliaryServicesConfig auxiliary_services_config = 7 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public com.google.cloud.dataproc.v1.AuxiliaryServicesConfig.Builder
+ getAuxiliaryServicesConfigBuilder() {
+
+ onChanged();
+ return getAuxiliaryServicesConfigFieldBuilder().getBuilder();
+ }
+ /**
+ *
+ *
+ *
+ * Optional. Configuration of auxiliary services used by this cluster.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.AuxiliaryServicesConfig auxiliary_services_config = 7 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public com.google.cloud.dataproc.v1.AuxiliaryServicesConfigOrBuilder
+ getAuxiliaryServicesConfigOrBuilder() {
+ if (auxiliaryServicesConfigBuilder_ != null) {
+ return auxiliaryServicesConfigBuilder_.getMessageOrBuilder();
+ } else {
+ return auxiliaryServicesConfig_ == null
+ ? com.google.cloud.dataproc.v1.AuxiliaryServicesConfig.getDefaultInstance()
+ : auxiliaryServicesConfig_;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Optional. Configuration of auxiliary services used by this cluster.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.AuxiliaryServicesConfig auxiliary_services_config = 7 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.dataproc.v1.AuxiliaryServicesConfig,
+ com.google.cloud.dataproc.v1.AuxiliaryServicesConfig.Builder,
+ com.google.cloud.dataproc.v1.AuxiliaryServicesConfigOrBuilder>
+ getAuxiliaryServicesConfigFieldBuilder() {
+ if (auxiliaryServicesConfigBuilder_ == null) {
+ auxiliaryServicesConfigBuilder_ =
+ new com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.dataproc.v1.AuxiliaryServicesConfig,
+ com.google.cloud.dataproc.v1.AuxiliaryServicesConfig.Builder,
+ com.google.cloud.dataproc.v1.AuxiliaryServicesConfigOrBuilder>(
+ getAuxiliaryServicesConfig(), getParentForChildren(), isClean());
+ auxiliaryServicesConfig_ = null;
+ }
+ return auxiliaryServicesConfigBuilder_;
+ }
+
+ @java.lang.Override
+ public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFields(unknownFields);
+ }
+
+ @java.lang.Override
+ public final Builder mergeUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+ // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1.VirtualClusterConfig)
+ }
+
+ // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.VirtualClusterConfig)
+ private static final com.google.cloud.dataproc.v1.VirtualClusterConfig DEFAULT_INSTANCE;
+
+ static {
+ DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1.VirtualClusterConfig();
+ }
+
+ public static com.google.cloud.dataproc.v1.VirtualClusterConfig getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ private static final com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ @java.lang.Override
+ public VirtualClusterConfig parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new VirtualClusterConfig(input, extensionRegistry);
+ }
+ };
+
+ public static com.google.protobuf.Parser parser() {
+ return PARSER;
+ }
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ @java.lang.Override
+ public com.google.cloud.dataproc.v1.VirtualClusterConfig getDefaultInstanceForType() {
+ return DEFAULT_INSTANCE;
+ }
+}
diff --git a/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/VirtualClusterConfigOrBuilder.java b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/VirtualClusterConfigOrBuilder.java
new file mode 100644
index 00000000..cd4563e8
--- /dev/null
+++ b/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/VirtualClusterConfigOrBuilder.java
@@ -0,0 +1,206 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/cloud/dataproc/v1/clusters.proto
+
+package com.google.cloud.dataproc.v1;
+
+public interface VirtualClusterConfigOrBuilder
+ extends
+ // @@protoc_insertion_point(interface_extends:google.cloud.dataproc.v1.VirtualClusterConfig)
+ com.google.protobuf.MessageOrBuilder {
+
+ /**
+ *
+ *
+ *
+ * Optional. A Storage bucket used to stage job
+ * dependencies, config files, and job driver console output.
+ * If you do not specify a staging bucket, Cloud
+ * Dataproc will determine a Cloud Storage location (US,
+ * ASIA, or EU) for your cluster's staging bucket according to the
+ * Compute Engine zone where your cluster is deployed, and then create
+ * and manage this project-level, per-location bucket (see
+ * [Dataproc staging and temp
+ * buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
+ * **This field requires a Cloud Storage bucket name, not a `gs://...` URI to
+ * a Cloud Storage bucket.**
+ *
+ *
+ * string staging_bucket = 1 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The stagingBucket.
+ */
+ java.lang.String getStagingBucket();
+ /**
+ *
+ *
+ *
+ * Optional. A Storage bucket used to stage job
+ * dependencies, config files, and job driver console output.
+ * If you do not specify a staging bucket, Cloud
+ * Dataproc will determine a Cloud Storage location (US,
+ * ASIA, or EU) for your cluster's staging bucket according to the
+ * Compute Engine zone where your cluster is deployed, and then create
+ * and manage this project-level, per-location bucket (see
+ * [Dataproc staging and temp
+ * buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
+ * **This field requires a Cloud Storage bucket name, not a `gs://...` URI to
+ * a Cloud Storage bucket.**
+ *
+ *
+ * string staging_bucket = 1 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The bytes for stagingBucket.
+ */
+ com.google.protobuf.ByteString getStagingBucketBytes();
+
+ /**
+ *
+ *
+ *
+ * Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data,
+ * such as Spark and MapReduce history files.
+ * If you do not specify a temp bucket,
+ * Dataproc will determine a Cloud Storage location (US,
+ * ASIA, or EU) for your cluster's temp bucket according to the
+ * Compute Engine zone where your cluster is deployed, and then create
+ * and manage this project-level, per-location bucket. The default bucket has
+ * a TTL of 90 days, but you can use any TTL (or none) if you specify a
+ * bucket (see
+ * [Dataproc staging and temp
+ * buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
+ * **This field requires a Cloud Storage bucket name, not a `gs://...` URI to
+ * a Cloud Storage bucket.**
+ *
+ *
+ * string temp_bucket = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The tempBucket.
+ */
+ java.lang.String getTempBucket();
+ /**
+ *
+ *
+ *
+ * Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data,
+ * such as Spark and MapReduce history files.
+ * If you do not specify a temp bucket,
+ * Dataproc will determine a Cloud Storage location (US,
+ * ASIA, or EU) for your cluster's temp bucket according to the
+ * Compute Engine zone where your cluster is deployed, and then create
+ * and manage this project-level, per-location bucket. The default bucket has
+ * a TTL of 90 days, but you can use any TTL (or none) if you specify a
+ * bucket (see
+ * [Dataproc staging and temp
+ * buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
+ * **This field requires a Cloud Storage bucket name, not a `gs://...` URI to
+ * a Cloud Storage bucket.**
+ *
+ *
+ * string temp_bucket = 2 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ * @return The bytes for tempBucket.
+ */
+ com.google.protobuf.ByteString getTempBucketBytes();
+
+ /**
+ *
+ *
+ *
+ * Required. The configuration for running the Dataproc cluster on Kubernetes.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.KubernetesClusterConfig kubernetes_cluster_config = 6 [(.google.api.field_behavior) = REQUIRED];
+ *
+ *
+ * @return Whether the kubernetesClusterConfig field is set.
+ */
+ boolean hasKubernetesClusterConfig();
+ /**
+ *
+ *
+ *
+ * Required. The configuration for running the Dataproc cluster on Kubernetes.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.KubernetesClusterConfig kubernetes_cluster_config = 6 [(.google.api.field_behavior) = REQUIRED];
+ *
+ *
+ * @return The kubernetesClusterConfig.
+ */
+ com.google.cloud.dataproc.v1.KubernetesClusterConfig getKubernetesClusterConfig();
+ /**
+ *
+ *
+ *
+ * Required. The configuration for running the Dataproc cluster on Kubernetes.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.KubernetesClusterConfig kubernetes_cluster_config = 6 [(.google.api.field_behavior) = REQUIRED];
+ *
+ */
+ com.google.cloud.dataproc.v1.KubernetesClusterConfigOrBuilder
+ getKubernetesClusterConfigOrBuilder();
+
+ /**
+ *
+ *
+ *
+ * Optional. Configuration of auxiliary services used by this cluster.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.AuxiliaryServicesConfig auxiliary_services_config = 7 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return Whether the auxiliaryServicesConfig field is set.
+ */
+ boolean hasAuxiliaryServicesConfig();
+ /**
+ *
+ *
+ *
+ * Optional. Configuration of auxiliary services used by this cluster.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.AuxiliaryServicesConfig auxiliary_services_config = 7 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return The auxiliaryServicesConfig.
+ */
+ com.google.cloud.dataproc.v1.AuxiliaryServicesConfig getAuxiliaryServicesConfig();
+ /**
+ *
+ *
+ *
+ * Optional. Configuration of auxiliary services used by this cluster.
+ *
+ *
+ *
+ * .google.cloud.dataproc.v1.AuxiliaryServicesConfig auxiliary_services_config = 7 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ com.google.cloud.dataproc.v1.AuxiliaryServicesConfigOrBuilder
+ getAuxiliaryServicesConfigOrBuilder();
+
+ public com.google.cloud.dataproc.v1.VirtualClusterConfig.InfrastructureConfigCase
+ getInfrastructureConfigCase();
+}
diff --git a/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/clusters.proto b/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/clusters.proto
index 0bb1aaef..0b9f0a02 100644
--- a/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/clusters.proto
+++ b/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/clusters.proto
@@ -1,4 +1,4 @@
-// Copyright 2021 Google LLC
+// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -167,6 +167,15 @@ message Cluster {
// when clusters are updated.
ClusterConfig config = 3 [(google.api.field_behavior) = OPTIONAL];
+ // Optional. The virtual cluster config, used when creating a Dataproc cluster that
+ // does not directly control the underlying compute resources, for example,
+ // when creating a [Dataproc-on-GKE
+ // cluster](https://cloud.google.com/dataproc/docs/concepts/jobs/dataproc-gke#create-a-dataproc-on-gke-cluster).
+ // Note that Dataproc may set default values, and values may change when
+ // clusters are updated. Exactly one of config or virtualClusterConfig must be
+ // specified.
+ VirtualClusterConfig virtual_cluster_config = 10 [(google.api.field_behavior) = OPTIONAL];
+
// Optional. The labels to associate with this cluster.
// Label **keys** must contain 1 to 63 characters, and must conform to
// [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
@@ -275,33 +284,56 @@ message ClusterConfig {
// Optional. Metastore configuration.
MetastoreConfig metastore_config = 20 [(google.api.field_behavior) = OPTIONAL];
-
- // Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to
- // Kubernetes. Setting this is considered mutually exclusive with Compute
- // Engine-based options such as `gce_cluster_config`, `master_config`,
- // `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
- GkeClusterConfig gke_cluster_config = 21 [(google.api.field_behavior) = OPTIONAL];
}
-// The GKE config for this cluster.
-message GkeClusterConfig {
- // A full, namespace-isolated deployment target for an existing GKE cluster.
- message NamespacedGkeDeploymentTarget {
- // Optional. The target GKE cluster to deploy to.
- // Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
- string target_gke_cluster = 1 [
- (google.api.field_behavior) = OPTIONAL,
- (google.api.resource_reference) = {
- type: "container.googleapis.com/Cluster"
- }
- ];
-
- // Optional. A namespace within the GKE cluster to deploy into.
- string cluster_namespace = 2 [(google.api.field_behavior) = OPTIONAL];
+// Dataproc cluster config for a cluster that does not directly control the
+// underlying compute resources, such as a [Dataproc-on-GKE
+// cluster](https://cloud.google.com/dataproc/docs/concepts/jobs/dataproc-gke#create-a-dataproc-on-gke-cluster).
+message VirtualClusterConfig {
+ // Optional. A Storage bucket used to stage job
+ // dependencies, config files, and job driver console output.
+ // If you do not specify a staging bucket, Cloud
+ // Dataproc will determine a Cloud Storage location (US,
+ // ASIA, or EU) for your cluster's staging bucket according to the
+ // Compute Engine zone where your cluster is deployed, and then create
+ // and manage this project-level, per-location bucket (see
+ // [Dataproc staging and temp
+ // buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
+ // **This field requires a Cloud Storage bucket name, not a `gs://...` URI to
+ // a Cloud Storage bucket.**
+ string staging_bucket = 1 [(google.api.field_behavior) = OPTIONAL];
+
+ // Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data,
+ // such as Spark and MapReduce history files.
+ // If you do not specify a temp bucket,
+ // Dataproc will determine a Cloud Storage location (US,
+ // ASIA, or EU) for your cluster's temp bucket according to the
+ // Compute Engine zone where your cluster is deployed, and then create
+ // and manage this project-level, per-location bucket. The default bucket has
+ // a TTL of 90 days, but you can use any TTL (or none) if you specify a
+ // bucket (see
+ // [Dataproc staging and temp
+ // buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
+ // **This field requires a Cloud Storage bucket name, not a `gs://...` URI to
+ // a Cloud Storage bucket.**
+ string temp_bucket = 2 [(google.api.field_behavior) = OPTIONAL];
+
+ oneof infrastructure_config {
+ // Required. The configuration for running the Dataproc cluster on Kubernetes.
+ KubernetesClusterConfig kubernetes_cluster_config = 6 [(google.api.field_behavior) = REQUIRED];
}
- // Optional. A target for the deployment.
- NamespacedGkeDeploymentTarget namespaced_gke_deployment_target = 1 [(google.api.field_behavior) = OPTIONAL];
+ // Optional. Configuration of auxiliary services used by this cluster.
+ AuxiliaryServicesConfig auxiliary_services_config = 7 [(google.api.field_behavior) = OPTIONAL];
+}
+
+// Auxiliary services configuration for a Cluster.
+message AuxiliaryServicesConfig {
+ // Optional. The Hive Metastore configuration for this workload.
+ MetastoreConfig metastore_config = 1 [(google.api.field_behavior) = OPTIONAL];
+
+ // Optional. The Spark History Server configuration for the workload.
+ SparkHistoryServerConfig spark_history_server_config = 2 [(google.api.field_behavior) = OPTIONAL];
}
// Endpoint config for this cluster
@@ -660,8 +692,8 @@ message DiskConfig {
// Optional. Interface type of local SSDs (default is "scsi").
// Valid values: "scsi" (Small Computer System Interface),
// "nvme" (Non-Volatile Memory Express).
- // See [SSD Interface
- // types](https://cloud.google.com/compute/docs/disks/local-ssd#performance).
+ // See [local SSD
+ // performance](https://cloud.google.com/compute/docs/disks/local-ssd#performance).
string local_ssd_interface = 4 [(google.api.field_behavior) = OPTIONAL];
}
@@ -692,6 +724,10 @@ message ClusterStatus {
CREATING = 1;
// The cluster is currently running and healthy. It is ready for use.
+ //
+ // **Note:** The cluster state changes from "creating" to "running" status
+ // after the master node(s), first two primary worker nodes (and the last
+ // primary worker node if primary workers > 2) are running.
RUNNING = 2;
// The cluster encountered an error. It is not ready for use.
diff --git a/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/shared.proto b/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/shared.proto
index f574fe01..18796915 100644
--- a/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/shared.proto
+++ b/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/shared.proto
@@ -1,4 +1,4 @@
-// Copyright 2021 Google LLC
+// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -108,6 +108,179 @@ message RuntimeInfo {
string diagnostic_output_uri = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
}
+// The cluster's GKE config.
+message GkeClusterConfig {
+ // Optional. A target GKE cluster to deploy to. It must be in the same project and
+ // region as the Dataproc cluster (the GKE cluster can be zonal or regional).
+ // Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
+ string gke_cluster_target = 2 [
+ (google.api.field_behavior) = OPTIONAL
+ ];
+
+ // Optional. GKE NodePools where workloads will be scheduled. At least one node pool
+ // must be assigned the 'default' role. Each role can be given to only a
+ // single NodePoolTarget. All NodePools must have the same location settings.
+ // If a nodePoolTarget is not specified, Dataproc constructs a default
+ // nodePoolTarget.
+ repeated GkeNodePoolTarget node_pool_target = 3 [(google.api.field_behavior) = OPTIONAL];
+}
+
+// The configuration for running the Dataproc cluster on Kubernetes.
+message KubernetesClusterConfig {
+ // Optional. A namespace within the Kubernetes cluster to deploy into. If this namespace
+ // does not exist, it is created. If it exists, Dataproc
+ // verifies that another Dataproc VirtualCluster is not installed
+ // into it. If not specified, the name of the Dataproc Cluster is used.
+ string kubernetes_namespace = 1 [(google.api.field_behavior) = OPTIONAL];
+
+ oneof config {
+ // Required. The configuration for running the Dataproc cluster on GKE.
+ GkeClusterConfig gke_cluster_config = 2 [(google.api.field_behavior) = REQUIRED];
+ }
+
+ // Optional. The software configuration for this Dataproc cluster running on Kubernetes.
+ KubernetesSoftwareConfig kubernetes_software_config = 3 [(google.api.field_behavior) = OPTIONAL];
+}
+
+// The software configuration for this Dataproc cluster running on Kubernetes.
+message KubernetesSoftwareConfig {
+ // The components that should be installed in this Dataproc cluster. The key
+ // must be a string from the KubernetesComponent enumeration. The value is
+ // the version of the software to be installed.
+ // At least one entry must be specified.
+ map component_version = 1;
+
+ // The properties to set on daemon config files.
+ //
+ // Property keys are specified in `prefix:property` format, for example
+ // `spark:spark.kubernetes.container.image`. The following are supported
+ // prefixes and their mappings:
+ //
+ // * spark: `spark-defaults.conf`
+ //
+ // For more information, see [Cluster
+ // properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
+ map properties = 2;
+}
+
+// GKE NodePools that Dataproc workloads run on.
+message GkeNodePoolTarget {
+ // `Role` specifies whose tasks will run on the NodePool. The roles can be
+ // specific to workloads. Exactly one GkeNodePoolTarget within the
+ // VirtualCluster must have 'default' role, which is used to run all workloads
+ // that are not associated with a NodePool.
+ enum Role {
+ // Role is unspecified.
+ ROLE_UNSPECIFIED = 0;
+
+ // Any roles that are not directly assigned to a NodePool run on the
+ // `default` role's NodePool.
+ DEFAULT = 1;
+
+ // Run controllers and webhooks.
+ CONTROLLER = 2;
+
+ // Run spark driver.
+ SPARK_DRIVER = 3;
+
+ // Run spark executors.
+ SPARK_EXECUTOR = 4;
+ }
+
+ // Required. The target GKE NodePool.
+ // Format:
+ // 'projects/{project}/locations/{location}/clusters/{cluster}/nodePools/{node_pool}'
+ string node_pool = 1 [
+ (google.api.field_behavior) = REQUIRED
+ ];
+
+ // Required. The types of role for a GKE NodePool
+ repeated Role roles = 2 [(google.api.field_behavior) = REQUIRED];
+
+ // Optional. The configuration for the GKE NodePool.
+ //
+ // If specified, Dataproc attempts to create a NodePool with the
+ // specified shape. If one with the same name already exists, it is
+ // verified against all specified fields. If a field differs, the
+ // virtual cluster creation will fail.
+ //
+ // If omitted, any NodePool with the specified name is used. If a
+ // NodePool with the specified name does not exist, Dataproc create a NodePool
+ // with default values.
+ GkeNodePoolConfig node_pool_config = 3 [(google.api.field_behavior) = OPTIONAL];
+}
+
+// The configuration of a GKE NodePool used by a [Dataproc-on-GKE
+// cluster](https://cloud.google.com/dataproc/docs/concepts/jobs/dataproc-gke#create-a-dataproc-on-gke-cluster).
+message GkeNodePoolConfig {
+ // Parameters that describe cluster nodes.
+ message GkeNodeConfig {
+ // Optional. The name of a Compute Engine [machine
+ // type](https://cloud.google.com/compute/docs/machine-types).
+ string machine_type = 1 [(google.api.field_behavior) = OPTIONAL];
+
+ // Optional. Whether the nodes are created as [preemptible VM
+ // instances](https://cloud.google.com/compute/docs/instances/preemptible).
+ bool preemptible = 10 [(google.api.field_behavior) = OPTIONAL];
+
+ // Optional. The number of local SSD disks to attach to the node, which is limited by
+ // the maximum number of disks allowable per zone (see [Adding Local
+ // SSDs](https://cloud.google.com/compute/docs/disks/local-ssd)).
+ int32 local_ssd_count = 7 [(google.api.field_behavior) = OPTIONAL];
+
+ // Optional. A list of [hardware
+ // accelerators](https://cloud.google.com/compute/docs/gpus) to attach to
+ // each node.
+ repeated GkeNodePoolAcceleratorConfig accelerators = 11 [(google.api.field_behavior) = OPTIONAL];
+
+ // Optional. [Minimum CPU
+ // platform](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)
+ // to be used by this instance. The instance may be scheduled on the
+ // specified or a newer CPU platform. Specify the friendly names of CPU
+ // platforms, such as "Intel Haswell"` or Intel Sandy Bridge".
+ string min_cpu_platform = 13 [(google.api.field_behavior) = OPTIONAL];
+ }
+
+ // A GkeNodeConfigAcceleratorConfig represents a Hardware Accelerator request
+ // for a NodePool.
+ message GkeNodePoolAcceleratorConfig {
+ // The number of accelerator cards exposed to an instance.
+ int64 accelerator_count = 1;
+
+ // The accelerator type resource namename (see GPUs on Compute Engine).
+ string accelerator_type = 2;
+ }
+
+ // GkeNodePoolAutoscaling contains information the cluster autoscaler needs to
+ // adjust the size of the node pool to the current cluster usage.
+ message GkeNodePoolAutoscalingConfig {
+ // The minimum number of nodes in the NodePool. Must be >= 0 and <=
+ // max_node_count.
+ int32 min_node_count = 2;
+
+ // The maximum number of nodes in the NodePool. Must be >= min_node_count.
+ // **Note:** Quota must be sufficient to scale up the cluster.
+ int32 max_node_count = 3;
+ }
+
+ // Optional. The node pool configuration.
+ GkeNodeConfig config = 2 [(google.api.field_behavior) = OPTIONAL];
+
+ // Optional. The list of Compute Engine
+ // [zones](https://cloud.google.com/compute/docs/zones#available) where
+ // NodePool's nodes will be located.
+ //
+ // **Note:** Currently, only one zone may be specified.
+ //
+ // If a location is not specified during NodePool creation, Dataproc will
+ // choose a location.
+ repeated string locations = 13 [(google.api.field_behavior) = OPTIONAL];
+
+ // Optional. The autoscaler configuration for this NodePool. The autoscaler is enabled
+ // only when a valid configuration is present.
+ GkeNodePoolAutoscalingConfig autoscaling = 4 [(google.api.field_behavior) = OPTIONAL];
+}
+
// Cluster components that can be activated.
enum Component {
// Unspecified component. Specifying this will cause Cluster creation to fail.
From 425aecdfc384a2f9bef0ce1af702e949b177c657 Mon Sep 17 00:00:00 2001
From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com>
Date: Fri, 25 Feb 2022 22:00:44 +0000
Subject: [PATCH 12/22] ci: pull request template includes sample format
(#1357) (#799)
Source-Link: https://github.com/googleapis/synthtool/commit/e122cb03ea37652946651346736d99b9dcc4311f
Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-java:latest@sha256:387835a1375a0049ec44e02542c844302854c732d8291bdf8e472c0ff70a8f67
---
.github/.OwlBot.lock.yaml | 2 +-
.github/PULL_REQUEST_TEMPLATE.md | 3 +++
2 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml
index 9786771c..9351fdfb 100644
--- a/.github/.OwlBot.lock.yaml
+++ b/.github/.OwlBot.lock.yaml
@@ -13,4 +13,4 @@
# limitations under the License.
docker:
image: gcr.io/cloud-devrel-public-resources/owlbot-java:latest
- digest: sha256:3c950ed12391ebaffd1ee66d0374766a1c50144ebe6a7a0042300b2e6bb5856b
+ digest: sha256:387835a1375a0049ec44e02542c844302854c732d8291bdf8e472c0ff70a8f67
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 39883260..4c345b28 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -5,3 +5,6 @@ Thank you for opening a Pull Request! Before submitting your PR, there are a few
- [ ] Appropriate docs were updated (if necessary)
Fixes # ☕️
+
+If you write sample code, please follow the [samples format](
+https://github.com/GoogleCloudPlatform/java-docs-samples/blob/main/SAMPLE_FORMAT.md).
From 4754aa0321069b5b68bbc5cf6f237c44b317a84d Mon Sep 17 00:00:00 2001
From: WhiteSource Renovate
Date: Mon, 28 Feb 2022 23:12:21 +0100
Subject: [PATCH 13/22] deps: update dependency
com.google.cloud:google-cloud-storage to v2.4.0 (#782)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
[](https://renovatebot.com)
This PR contains the following updates:
| Package | Change | Age | Adoption | Passing | Confidence |
|---|---|---|---|---|---|
| [com.google.cloud:google-cloud-storage](https://togithub.com/googleapis/java-storage) | `2.3.0` -> `2.4.0` | [](https://docs.renovatebot.com/merge-confidence/) | [](https://docs.renovatebot.com/merge-confidence/) | [](https://docs.renovatebot.com/merge-confidence/) | [](https://docs.renovatebot.com/merge-confidence/) |
---
### Release Notes
googleapis/java-storage
### [`v2.4.0`](https://togithub.com/googleapis/java-storage/blob/HEAD/CHANGELOG.md#240-httpsgithubcomgoogleapisjava-storagecomparev230v240-2022-02-03)
[Compare Source](https://togithub.com/googleapis/java-storage/compare/v2.3.0...v2.4.0)
##### Features
- Change RewriteObjectRequest to specify bucket name, object name and KMS key outside of Object resource ([#1218](https://togithub.com/googleapis/java-storage/issues/1218)) ([8789e4f](https://togithub.com/googleapis/java-storage/commit/8789e4f73a3c5b36aa93246d172d07adb24027aa))
- re-generate gapic client to include full GCS gRPC API ([#1189](https://togithub.com/googleapis/java-storage/issues/1189)) ([3099a22](https://togithub.com/googleapis/java-storage/commit/3099a2264d8b135f602d8dd06f3e91ac5b0ecdba))
- Update definition of RewriteObjectRequest to bring to parity with JSON API support ([#1220](https://togithub.com/googleapis/java-storage/issues/1220)) ([7845c0e](https://togithub.com/googleapis/java-storage/commit/7845c0e8be5ba150f5e835172e9341ef2efc6054))
##### Bug Fixes
- Remove post policy v4 client side validation ([#1210](https://togithub.com/googleapis/java-storage/issues/1210)) ([631741d](https://togithub.com/googleapis/java-storage/commit/631741df96a6dddd31a38dce099f3d3ff09ca7cf))
##### Dependencies
- **java:** update actions/github-script action to v5 ([#1339](https://togithub.com/googleapis/java-storage/issues/1339)) ([#1215](https://togithub.com/googleapis/java-storage/issues/1215)) ([deb110b](https://togithub.com/googleapis/java-storage/commit/deb110b0b5ec4a7e6963d1c1ab0e63ca58240ae1))
- update dependency com.google.cloud:google-cloud-shared-dependencies to v2.7.0 ([#1219](https://togithub.com/googleapis/java-storage/issues/1219)) ([623e68b](https://togithub.com/googleapis/java-storage/commit/623e68b8b678df425730b6472cf34d7b78841757))
---
### Configuration
📅 **Schedule**: At any time (no schedule defined).
🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied.
♻ **Rebasing**: Renovate will not automatically rebase this PR, because other commits have been found.
🔕 **Ignore**: Close this PR and you won't be reminded about this update again.
---
- [ ] If you want to rebase/retry this PR, click this checkbox.
---
This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#github/googleapis/java-dataproc).
---
samples/install-without-bom/pom.xml | 2 +-
samples/snapshot/pom.xml | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/samples/install-without-bom/pom.xml b/samples/install-without-bom/pom.xml
index 6c2e309e..975f3245 100644
--- a/samples/install-without-bom/pom.xml
+++ b/samples/install-without-bom/pom.xml
@@ -35,7 +35,7 @@
com.google.cloud
google-cloud-storage
- 2.3.0
+ 2.4.0
diff --git a/samples/snapshot/pom.xml b/samples/snapshot/pom.xml
index 95858de9..57b33d9e 100644
--- a/samples/snapshot/pom.xml
+++ b/samples/snapshot/pom.xml
@@ -34,7 +34,7 @@
com.google.cloud
google-cloud-storage
- 2.3.0
+ 2.4.0
junit
From 53caaf9f0aac0a86f339d14efaccc42254bd4c76 Mon Sep 17 00:00:00 2001
From: WhiteSource Renovate
Date: Mon, 28 Feb 2022 23:20:28 +0100
Subject: [PATCH 14/22] chore(deps): update dependency
com.google.cloud:google-cloud-dataproc to v2.3.2 (#783)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
[](https://renovatebot.com)
This PR contains the following updates:
| Package | Change | Age | Adoption | Passing | Confidence |
|---|---|---|---|---|---|
| [com.google.cloud:google-cloud-dataproc](https://togithub.com/googleapis/java-dataproc) | `2.3.1` -> `2.3.2` | [](https://docs.renovatebot.com/merge-confidence/) | [](https://docs.renovatebot.com/merge-confidence/) | [](https://docs.renovatebot.com/merge-confidence/) | [](https://docs.renovatebot.com/merge-confidence/) |
---
### Release Notes
googleapis/java-dataproc
### [`v2.3.2`](https://togithub.com/googleapis/java-dataproc/blob/HEAD/CHANGELOG.md#232-httpsgithubcomgoogleapisjava-dataproccomparev231v232-2022-02-03)
[Compare Source](https://togithub.com/googleapis/java-dataproc/compare/v2.3.1...v2.3.2)
---
### Configuration
📅 **Schedule**: At any time (no schedule defined).
🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied.
♻ **Rebasing**: Renovate will not automatically rebase this PR, because other commits have been found.
🔕 **Ignore**: Close this PR and you won't be reminded about this update again.
---
- [ ] If you want to rebase/retry this PR, click this checkbox.
---
This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#github/googleapis/java-dataproc).
---
README.md | 2 +-
samples/install-without-bom/pom.xml | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/README.md b/README.md
index d476fb80..5543ad13 100644
--- a/README.md
+++ b/README.md
@@ -42,7 +42,7 @@ If you are using Maven without BOM, add this to your dependencies:
com.google.cloud
google-cloud-dataproc
- 2.3.1
+ 2.3.2
```
diff --git a/samples/install-without-bom/pom.xml b/samples/install-without-bom/pom.xml
index 975f3245..0612a496 100644
--- a/samples/install-without-bom/pom.xml
+++ b/samples/install-without-bom/pom.xml
@@ -29,7 +29,7 @@
com.google.cloud
google-cloud-dataproc
- 2.3.1
+ 2.3.2
From f0f57f454a62b5be653ee75f4186e5575699d6dc Mon Sep 17 00:00:00 2001
From: WhiteSource Renovate
Date: Mon, 28 Feb 2022 23:28:40 +0100
Subject: [PATCH 15/22] build(deps): update dependency
org.apache.maven.plugins:maven-project-info-reports-plugin to v3.2.1 (#784)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
[](https://renovatebot.com)
This PR contains the following updates:
| Package | Change | Age | Adoption | Passing | Confidence |
|---|---|---|---|---|---|
| [org.apache.maven.plugins:maven-project-info-reports-plugin](https://maven.apache.org/plugins/) | `3.1.2` -> `3.2.1` | [](https://docs.renovatebot.com/merge-confidence/) | [](https://docs.renovatebot.com/merge-confidence/) | [](https://docs.renovatebot.com/merge-confidence/) | [](https://docs.renovatebot.com/merge-confidence/) |
---
### Configuration
📅 **Schedule**: At any time (no schedule defined).
🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied.
♻ **Rebasing**: Renovate will not automatically rebase this PR, because other commits have been found.
🔕 **Ignore**: Close this PR and you won't be reminded about this update again.
---
- [ ] If you want to rebase/retry this PR, click this checkbox.
---
This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#github/googleapis/java-dataproc).
---
pom.xml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pom.xml b/pom.xml
index 529868f7..51a9a5e8 100644
--- a/pom.xml
+++ b/pom.xml
@@ -120,7 +120,7 @@
org.apache.maven.plugins
maven-project-info-reports-plugin
- 3.1.2
+ 3.2.1
From a054ce8fb637f3452fd641a3f4689cbac3070456 Mon Sep 17 00:00:00 2001
From: WhiteSource Renovate
Date: Tue, 1 Mar 2022 00:38:22 +0100
Subject: [PATCH 16/22] deps: update actions/setup-java action to v3 (#798)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
[](https://renovatebot.com)
This PR contains the following updates:
| Package | Type | Update | Change |
|---|---|---|---|
| [actions/setup-java](https://togithub.com/actions/setup-java) | action | major | `v1` -> `v3` |
| [actions/setup-java](https://togithub.com/actions/setup-java) | action | major | `v2` -> `v3` |
---
### Release Notes
actions/setup-java
### [`v3`](https://togithub.com/actions/setup-java/compare/v2...v3)
[Compare Source](https://togithub.com/actions/setup-java/compare/v2...v3)
### [`v2`](https://togithub.com/actions/setup-java/compare/v1...v2)
[Compare Source](https://togithub.com/actions/setup-java/compare/v1...v2)
---
### Configuration
📅 **Schedule**: At any time (no schedule defined).
🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied.
♻ **Rebasing**: Renovate will not automatically rebase this PR, because other commits have been found.
🔕 **Ignore**: Close this PR and you won't be reminded about these updates again.
---
- [ ] If you want to rebase/retry this PR, click this checkbox.
---
This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#github/googleapis/java-dataproc).
From bfab0958ffc81615768d00bc8bc1e6af03a770e2 Mon Sep 17 00:00:00 2001
From: WhiteSource Renovate
Date: Tue, 1 Mar 2022 03:42:26 +0100
Subject: [PATCH 17/22] deps: update dependency
com.google.cloud:google-cloud-storage to v2.4.4 (#800)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
[](https://renovatebot.com)
This PR contains the following updates:
| Package | Change | Age | Adoption | Passing | Confidence |
|---|---|---|---|---|---|
| [com.google.cloud:google-cloud-storage](https://togithub.com/googleapis/java-storage) | `2.4.0` -> `2.4.4` | [](https://docs.renovatebot.com/merge-confidence/) | [](https://docs.renovatebot.com/merge-confidence/) | [](https://docs.renovatebot.com/merge-confidence/) | [](https://docs.renovatebot.com/merge-confidence/) |
---
### Release Notes
googleapis/java-storage
### [`v2.4.4`](https://togithub.com/googleapis/java-storage/blob/HEAD/CHANGELOG.md#244-httpsgithubcomgoogleapisjava-storagecomparev243v244-2022-02-28)
[Compare Source](https://togithub.com/googleapis/java-storage/compare/v2.4.3...v2.4.4)
### [`v2.4.3`](https://togithub.com/googleapis/java-storage/blob/HEAD/CHANGELOG.md#243-httpsgithubcomgoogleapisjava-storagecomparev242v243-2022-02-25)
[Compare Source](https://togithub.com/googleapis/java-storage/compare/v2.4.2...v2.4.3)
### [`v2.4.2`](https://togithub.com/googleapis/java-storage/blob/HEAD/CHANGELOG.md#242-httpsgithubcomgoogleapisjava-storagecomparev241v242-2022-02-11)
[Compare Source](https://togithub.com/googleapis/java-storage/compare/v2.4.1...v2.4.2)
### [`v2.4.1`](https://togithub.com/googleapis/java-storage/blob/HEAD/CHANGELOG.md#241-httpsgithubcomgoogleapisjava-storagecomparev240v241-2022-02-08)
[Compare Source](https://togithub.com/googleapis/java-storage/compare/v2.4.0...v2.4.1)
---
### Configuration
📅 **Schedule**: At any time (no schedule defined).
🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied.
♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox.
🔕 **Ignore**: Close this PR and you won't be reminded about this update again.
---
- [ ] If you want to rebase/retry this PR, click this checkbox.
---
This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#github/googleapis/java-dataproc).
---
samples/install-without-bom/pom.xml | 2 +-
samples/snapshot/pom.xml | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/samples/install-without-bom/pom.xml b/samples/install-without-bom/pom.xml
index 0612a496..c7e71c98 100644
--- a/samples/install-without-bom/pom.xml
+++ b/samples/install-without-bom/pom.xml
@@ -35,7 +35,7 @@
com.google.cloud
google-cloud-storage
- 2.4.0
+ 2.4.4
diff --git a/samples/snapshot/pom.xml b/samples/snapshot/pom.xml
index 57b33d9e..1bad76f5 100644
--- a/samples/snapshot/pom.xml
+++ b/samples/snapshot/pom.xml
@@ -34,7 +34,7 @@
com.google.cloud
google-cloud-storage
- 2.4.0
+ 2.4.4
junit
From 2f7c56db340d2a30499ace5664ca1a9ec55b4248 Mon Sep 17 00:00:00 2001
From: WhiteSource Renovate
Date: Tue, 1 Mar 2022 04:20:27 +0100
Subject: [PATCH 18/22] build(deps): update dependency
org.apache.maven.plugins:maven-project-info-reports-plugin to v3.2.2 (#801)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
[](https://renovatebot.com)
This PR contains the following updates:
| Package | Change | Age | Adoption | Passing | Confidence |
|---|---|---|---|---|---|
| [org.apache.maven.plugins:maven-project-info-reports-plugin](https://maven.apache.org/plugins/) | `3.2.1` -> `3.2.2` | [](https://docs.renovatebot.com/merge-confidence/) | [](https://docs.renovatebot.com/merge-confidence/) | [](https://docs.renovatebot.com/merge-confidence/) | [](https://docs.renovatebot.com/merge-confidence/) |
---
### Configuration
📅 **Schedule**: At any time (no schedule defined).
🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied.
♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox.
🔕 **Ignore**: Close this PR and you won't be reminded about this update again.
---
- [ ] If you want to rebase/retry this PR, click this checkbox.
---
This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#github/googleapis/java-dataproc).
---
pom.xml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pom.xml b/pom.xml
index 51a9a5e8..6f4e5205 100644
--- a/pom.xml
+++ b/pom.xml
@@ -120,7 +120,7 @@
org.apache.maven.plugins
maven-project-info-reports-plugin
- 3.2.1
+ 3.2.2
From 4b4092b14c592b09202e484028a696a408f976d0 Mon Sep 17 00:00:00 2001
From: WhiteSource Renovate
Date: Wed, 2 Mar 2022 19:08:24 +0100
Subject: [PATCH 19/22] deps: update dependency
com.google.cloud:google-cloud-shared-dependencies to v2.8.0 (#804)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
[](https://renovatebot.com)
This PR contains the following updates:
| Package | Change | Age | Adoption | Passing | Confidence |
|---|---|---|---|---|---|
| [com.google.cloud:google-cloud-shared-dependencies](https://togithub.com/googleapis/java-shared-dependencies) | `2.7.0` -> `2.8.0` | [](https://docs.renovatebot.com/merge-confidence/) | [](https://docs.renovatebot.com/merge-confidence/) | [](https://docs.renovatebot.com/merge-confidence/) | [](https://docs.renovatebot.com/merge-confidence/) |
---
### Release Notes
googleapis/java-shared-dependencies
### [`v2.8.0`](https://togithub.com/googleapis/java-shared-dependencies/blob/HEAD/CHANGELOG.md#280-httpsgithubcomgoogleapisjava-shared-dependenciescomparev270v280-2022-03-02)
[Compare Source](https://togithub.com/googleapis/java-shared-dependencies/compare/v2.7.0...v2.8.0)
##### Dependencies
- update dependency com.google.api-client:google-api-client-bom to v1.33.2 ([#602](https://togithub.com/googleapis/java-shared-dependencies/issues/602)) ([85b132f](https://togithub.com/googleapis/java-shared-dependencies/commit/85b132f5830772646025b6a9fbbb970a941b86b5))
- update dependency com.google.api:api-common to v2.1.4 ([#605](https://togithub.com/googleapis/java-shared-dependencies/issues/605)) ([ca7d49a](https://togithub.com/googleapis/java-shared-dependencies/commit/ca7d49aee26b6d90abd2afd61c20861d2307fe9c))
- update dependency com.google.auth:google-auth-library-bom to v1.5.3 ([#614](https://togithub.com/googleapis/java-shared-dependencies/issues/614)) ([e6413f3](https://togithub.com/googleapis/java-shared-dependencies/commit/e6413f3b1be78473ace7085c344eda2d78cdf01a))
- update dependency com.google.cloud:google-cloud-core to v2.5.6 ([#621](https://togithub.com/googleapis/java-shared-dependencies/issues/621)) ([3d5669f](https://togithub.com/googleapis/java-shared-dependencies/commit/3d5669f215689dd2df71200ffe37d5a9d385cda8))
- update dependency com.google.code.gson:gson to v2.9.0 ([#611](https://togithub.com/googleapis/java-shared-dependencies/issues/611)) ([f59c28d](https://togithub.com/googleapis/java-shared-dependencies/commit/f59c28d8cd30ada1237f7722135ba148dce6315e))
- update dependency com.google.http-client:google-http-client-bom to v1.41.4 ([#608](https://togithub.com/googleapis/java-shared-dependencies/issues/608)) ([f9a4f23](https://togithub.com/googleapis/java-shared-dependencies/commit/f9a4f23d3a32148ac59e53eaae6558ccbceca12c))
- update dependency com.google.oauth-client:google-oauth-client-bom to v1.33.1 ([#606](https://togithub.com/googleapis/java-shared-dependencies/issues/606)) ([3882494](https://togithub.com/googleapis/java-shared-dependencies/commit/3882494770d48fcc02ed19088aa06612a8e440eb))
- update dependency com.google.protobuf:protobuf-bom to v3.19.4 ([#593](https://togithub.com/googleapis/java-shared-dependencies/issues/593)) ([1e155bf](https://togithub.com/googleapis/java-shared-dependencies/commit/1e155bfc957bbb7e25d2e0994cdecaa81843bdc5))
- update dependency io.grpc:grpc-bom to v1.44.1 ([#613](https://togithub.com/googleapis/java-shared-dependencies/issues/613)) ([3038a2c](https://togithub.com/googleapis/java-shared-dependencies/commit/3038a2c86cd20c91b65f2d7926eeb739147a68db))
- update dependency junit:junit to v4.13.2 ([#607](https://togithub.com/googleapis/java-shared-dependencies/issues/607)) ([987e617](https://togithub.com/googleapis/java-shared-dependencies/commit/987e61796c7a093e005fe8832cf39275c391b2c1))
- update dependency org.checkerframework:checker-qual to v3.21.3 ([#596](https://togithub.com/googleapis/java-shared-dependencies/issues/596)) ([ac5083c](https://togithub.com/googleapis/java-shared-dependencies/commit/ac5083cd010924dc128f041c2cdbab20166a6bf0))
- update gax.version to v2.12.2 ([#592](https://togithub.com/googleapis/java-shared-dependencies/issues/592)) ([713ff8d](https://togithub.com/googleapis/java-shared-dependencies/commit/713ff8dd94f939c417524616bc47c771a3fbe0cf))
- update google.common-protos.version to v2.7.4 ([#603](https://togithub.com/googleapis/java-shared-dependencies/issues/603)) ([bb9b4c7](https://togithub.com/googleapis/java-shared-dependencies/commit/bb9b4c7e39552cc73b3b9d4c826b26e8cb74459f))
- update google.core.version ([#599](https://togithub.com/googleapis/java-shared-dependencies/issues/599)) ([327d0df](https://togithub.com/googleapis/java-shared-dependencies/commit/327d0df9b57203c0d4f426de0380770d3d7910d6))
- update iam.version to v1.2.6 ([#598](https://togithub.com/googleapis/java-shared-dependencies/issues/598)) ([2801439](https://togithub.com/googleapis/java-shared-dependencies/commit/280143964c7c3b93a8d7f67215ba2cc74ffce761))
---
### Configuration
📅 **Schedule**: At any time (no schedule defined).
🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied.
♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox.
🔕 **Ignore**: Close this PR and you won't be reminded about this update again.
---
- [ ] If you want to rebase/retry this PR, click this checkbox.
---
This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#github/googleapis/java-dataproc).
---
pom.xml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pom.xml b/pom.xml
index 6f4e5205..9d200130 100644
--- a/pom.xml
+++ b/pom.xml
@@ -77,7 +77,7 @@
com.google.cloud
google-cloud-shared-dependencies
- 2.7.0
+ 2.8.0
pom
import
From 1105dbbf188afc9fcf87b87b8c3761bbdc1cd3a3 Mon Sep 17 00:00:00 2001
From: WhiteSource Renovate
Date: Thu, 3 Mar 2022 02:36:48 +0100
Subject: [PATCH 20/22] chore(deps): update dependency
com.google.cloud:libraries-bom to v24.4.0 (#805)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
[](https://renovatebot.com)
This PR contains the following updates:
| Package | Change | Age | Adoption | Passing | Confidence |
|---|---|---|---|---|---|
| [com.google.cloud:libraries-bom](https://cloud.google.com/java/docs/bom) ([source](https://togithub.com/GoogleCloudPlatform/cloud-opensource-java)) | `24.3.0` -> `24.4.0` | [](https://docs.renovatebot.com/merge-confidence/) | [](https://docs.renovatebot.com/merge-confidence/) | [](https://docs.renovatebot.com/merge-confidence/) | [](https://docs.renovatebot.com/merge-confidence/) |
---
### Configuration
📅 **Schedule**: At any time (no schedule defined).
🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied.
♻ **Rebasing**: Renovate will not automatically rebase this PR, because other commits have been found.
🔕 **Ignore**: Close this PR and you won't be reminded about this update again.
---
- [ ] If you want to rebase/retry this PR, click this checkbox.
---
This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#github/googleapis/java-dataproc).
---
README.md | 4 ++--
samples/snippets/pom.xml | 2 +-
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/README.md b/README.md
index 5543ad13..67b26d8a 100644
--- a/README.md
+++ b/README.md
@@ -19,7 +19,7 @@ If you are using Maven with [BOM][libraries-bom], add this to your pom.xml file
com.google.cloud
libraries-bom
- 24.3.0
+ 24.4.0
pom
import
@@ -50,7 +50,7 @@ If you are using Maven without BOM, add this to your dependencies:
If you are using Gradle 5.x or later, add this to your dependencies
```Groovy
-implementation platform('com.google.cloud:libraries-bom:24.3.0')
+implementation platform('com.google.cloud:libraries-bom:24.4.0')
implementation 'com.google.cloud:google-cloud-dataproc'
```
diff --git a/samples/snippets/pom.xml b/samples/snippets/pom.xml
index eda69488..b59943b8 100644
--- a/samples/snippets/pom.xml
+++ b/samples/snippets/pom.xml
@@ -30,7 +30,7 @@
com.google.cloud
libraries-bom
- 24.3.0
+ 24.4.0
pom
import
From 74dd2ec823798aca6daf0c9d7e248d35294ced68 Mon Sep 17 00:00:00 2001
From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com>
Date: Thu, 3 Mar 2022 18:34:40 +0000
Subject: [PATCH 21/22] chore: fix license header in build.bat (#1363) (#806)
* chore: fix license header in build.bat
* chore: add disctribution field for setup-java action
Source-Link: https://github.com/googleapis/synthtool/commit/4fea5f40a9075f3ba205ede0b453010cf080e194
Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-java:latest@sha256:df8d7b2cc0dbc65871e7edd86601901a0612b272fa3f7f0eb590c5c53aa5f92e
---
.github/.OwlBot.lock.yaml | 2 +-
.github/workflows/approve-readme.yaml | 2 +-
.github/workflows/auto-release.yaml | 2 +-
.github/workflows/ci.yaml | 20 +++++++++---------
.github/workflows/samples.yaml | 5 +++--
.kokoro/build.bat | 30 +++++++++++++--------------
6 files changed, 31 insertions(+), 30 deletions(-)
diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml
index 9351fdfb..3473042c 100644
--- a/.github/.OwlBot.lock.yaml
+++ b/.github/.OwlBot.lock.yaml
@@ -13,4 +13,4 @@
# limitations under the License.
docker:
image: gcr.io/cloud-devrel-public-resources/owlbot-java:latest
- digest: sha256:387835a1375a0049ec44e02542c844302854c732d8291bdf8e472c0ff70a8f67
+ digest: sha256:df8d7b2cc0dbc65871e7edd86601901a0612b272fa3f7f0eb590c5c53aa5f92e
diff --git a/.github/workflows/approve-readme.yaml b/.github/workflows/approve-readme.yaml
index 1bb18232..f5fc7d51 100644
--- a/.github/workflows/approve-readme.yaml
+++ b/.github/workflows/approve-readme.yaml
@@ -21,7 +21,7 @@ jobs:
runs-on: ubuntu-latest
if: github.repository_owner == 'googleapis' && github.head_ref == 'autosynth-readme'
steps:
- - uses: actions/github-script@v5
+ - uses: actions/github-script@v6
with:
github-token: ${{secrets.YOSHI_APPROVER_TOKEN}}
script: |
diff --git a/.github/workflows/auto-release.yaml b/.github/workflows/auto-release.yaml
index 18e23230..7a106d00 100644
--- a/.github/workflows/auto-release.yaml
+++ b/.github/workflows/auto-release.yaml
@@ -21,7 +21,7 @@ jobs:
runs-on: ubuntu-latest
if: contains(github.head_ref, 'release-please')
steps:
- - uses: actions/github-script@v5
+ - uses: actions/github-script@v6
with:
github-token: ${{secrets.YOSHI_APPROVER_TOKEN}}
debug: true
diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml
index 6b5e56aa..83ef7f9c 100644
--- a/.github/workflows/ci.yaml
+++ b/.github/workflows/ci.yaml
@@ -27,8 +27,8 @@ jobs:
matrix:
java: [8, 11, 17]
steps:
- - uses: actions/checkout@v2
- - uses: actions/setup-java@v2
+ - uses: actions/checkout@v3
+ - uses: actions/setup-java@v3
with:
distribution: zulu
java-version: ${{matrix.java}}
@@ -39,8 +39,8 @@ jobs:
windows:
runs-on: windows-latest
steps:
- - uses: actions/checkout@v2
- - uses: actions/setup-java@v2
+ - uses: actions/checkout@v3
+ - uses: actions/setup-java@v3
with:
distribution: zulu
java-version: 8
@@ -54,8 +54,8 @@ jobs:
matrix:
java: [8, 11, 17]
steps:
- - uses: actions/checkout@v2
- - uses: actions/setup-java@v2
+ - uses: actions/checkout@v3
+ - uses: actions/setup-java@v3
with:
distribution: zulu
java-version: ${{matrix.java}}
@@ -64,8 +64,8 @@ jobs:
lint:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
- - uses: actions/setup-java@v2
+ - uses: actions/checkout@v3
+ - uses: actions/setup-java@v3
with:
distribution: zulu
java-version: 11
@@ -76,8 +76,8 @@ jobs:
clirr:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
- - uses: actions/setup-java@v2
+ - uses: actions/checkout@v3
+ - uses: actions/setup-java@v3
with:
distribution: zulu
java-version: 8
diff --git a/.github/workflows/samples.yaml b/.github/workflows/samples.yaml
index d5d964df..912ed8b2 100644
--- a/.github/workflows/samples.yaml
+++ b/.github/workflows/samples.yaml
@@ -20,9 +20,10 @@ jobs:
checkstyle:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
- - uses: actions/setup-java@v1
+ - uses: actions/checkout@v3
+ - uses: actions/setup-java@v3
with:
+ distribution: zulu
java-version: 8
- name: Run checkstyle
run: mvn -P lint --quiet --batch-mode checkstyle:check
diff --git a/.kokoro/build.bat b/.kokoro/build.bat
index cc602c9e..067cf4a4 100644
--- a/.kokoro/build.bat
+++ b/.kokoro/build.bat
@@ -1,18 +1,18 @@
+:: Copyright 2022 Google LLC
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+:: Github action job to test core java library features on
+:: downstream client libraries before they are released.
:: See documentation in type-shell-output.bat
-# Copyright 2022 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# Github action job to test core java library features on
-# downstream client libraries before they are released.
"C:\Program Files\Git\bin\bash.exe" %~dp0build.sh
From 9697fbfaa2896e98aa46423d0c767bc8a5afd44e Mon Sep 17 00:00:00 2001
From: "release-please[bot]"
<55107282+release-please[bot]@users.noreply.github.com>
Date: Thu, 3 Mar 2022 18:36:34 +0000
Subject: [PATCH 22/22] chore(main): release 3.0.0 (#791)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
:robot: I have created a release *beep* *boop*
---
## [3.0.0](https://github.com/googleapis/java-dataproc/compare/v2.3.2...v3.0.0) (2022-03-03)
### ⚠ BREAKING CHANGES
* add support for Virtual Dataproc cluster running on GKE cluster (#795)
### Features
* add support for Virtual Dataproc cluster running on GKE cluster ([#795](https://github.com/googleapis/java-dataproc/issues/795)) ([71aa406](https://github.com/googleapis/java-dataproc/commit/71aa40686cf1d7d9a9b3a15b2037392d3c1e6fd3))
### Dependencies
* update actions/github-script action to v6 ([#790](https://github.com/googleapis/java-dataproc/issues/790)) ([de5ac7b](https://github.com/googleapis/java-dataproc/commit/de5ac7b2693d3434c6369d2888e790383347bd39))
* update actions/setup-java action to v3 ([#798](https://github.com/googleapis/java-dataproc/issues/798)) ([a054ce8](https://github.com/googleapis/java-dataproc/commit/a054ce8fb637f3452fd641a3f4689cbac3070456))
* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.8.0 ([#804](https://github.com/googleapis/java-dataproc/issues/804)) ([4b4092b](https://github.com/googleapis/java-dataproc/commit/4b4092b14c592b09202e484028a696a408f976d0))
* update dependency com.google.cloud:google-cloud-storage to v2.4.0 ([#782](https://github.com/googleapis/java-dataproc/issues/782)) ([4754aa0](https://github.com/googleapis/java-dataproc/commit/4754aa0321069b5b68bbc5cf6f237c44b317a84d))
* update dependency com.google.cloud:google-cloud-storage to v2.4.4 ([#800](https://github.com/googleapis/java-dataproc/issues/800)) ([bfab095](https://github.com/googleapis/java-dataproc/commit/bfab0958ffc81615768d00bc8bc1e6af03a770e2))
---
This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please).
---
CHANGELOG.md | 20 ++++++++++++++++++++
google-cloud-dataproc-bom/pom.xml | 8 ++++----
google-cloud-dataproc/pom.xml | 4 ++--
grpc-google-cloud-dataproc-v1/pom.xml | 4 ++--
pom.xml | 8 ++++----
proto-google-cloud-dataproc-v1/pom.xml | 4 ++--
samples/snapshot/pom.xml | 2 +-
versions.txt | 6 +++---
8 files changed, 38 insertions(+), 18 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 83911042..fa5a57df 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,25 @@
# Changelog
+## [3.0.0](https://github.com/googleapis/java-dataproc/compare/v2.3.2...v3.0.0) (2022-03-03)
+
+
+### ⚠ BREAKING CHANGES
+
+* add support for Virtual Dataproc cluster running on GKE cluster (#795)
+
+### Features
+
+* add support for Virtual Dataproc cluster running on GKE cluster ([#795](https://github.com/googleapis/java-dataproc/issues/795)) ([71aa406](https://github.com/googleapis/java-dataproc/commit/71aa40686cf1d7d9a9b3a15b2037392d3c1e6fd3))
+
+
+### Dependencies
+
+* update actions/github-script action to v6 ([#790](https://github.com/googleapis/java-dataproc/issues/790)) ([de5ac7b](https://github.com/googleapis/java-dataproc/commit/de5ac7b2693d3434c6369d2888e790383347bd39))
+* update actions/setup-java action to v3 ([#798](https://github.com/googleapis/java-dataproc/issues/798)) ([a054ce8](https://github.com/googleapis/java-dataproc/commit/a054ce8fb637f3452fd641a3f4689cbac3070456))
+* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.8.0 ([#804](https://github.com/googleapis/java-dataproc/issues/804)) ([4b4092b](https://github.com/googleapis/java-dataproc/commit/4b4092b14c592b09202e484028a696a408f976d0))
+* update dependency com.google.cloud:google-cloud-storage to v2.4.0 ([#782](https://github.com/googleapis/java-dataproc/issues/782)) ([4754aa0](https://github.com/googleapis/java-dataproc/commit/4754aa0321069b5b68bbc5cf6f237c44b317a84d))
+* update dependency com.google.cloud:google-cloud-storage to v2.4.4 ([#800](https://github.com/googleapis/java-dataproc/issues/800)) ([bfab095](https://github.com/googleapis/java-dataproc/commit/bfab0958ffc81615768d00bc8bc1e6af03a770e2))
+
### [2.3.2](https://github.com/googleapis/java-dataproc/compare/v2.3.1...v2.3.2) (2022-02-03)
diff --git a/google-cloud-dataproc-bom/pom.xml b/google-cloud-dataproc-bom/pom.xml
index 3dac3379..b8e70f35 100644
--- a/google-cloud-dataproc-bom/pom.xml
+++ b/google-cloud-dataproc-bom/pom.xml
@@ -3,7 +3,7 @@
4.0.0
com.google.cloud
google-cloud-dataproc-bom
- 2.3.3-SNAPSHOT
+ 3.0.0
pom
com.google.cloud
@@ -52,17 +52,17 @@
com.google.cloud
google-cloud-dataproc
- 2.3.3-SNAPSHOT
+ 3.0.0
com.google.api.grpc
grpc-google-cloud-dataproc-v1
- 2.3.3-SNAPSHOT
+ 3.0.0
com.google.api.grpc
proto-google-cloud-dataproc-v1
- 2.3.3-SNAPSHOT
+ 3.0.0