From ac9d0d2d4d46dce8183bae59aaedb5dc91137b34 Mon Sep 17 00:00:00 2001 From: Kannan J Date: Tue, 4 Mar 2025 14:01:57 +0000 Subject: [PATCH 01/15] Update README etc to reference 1.71.0 --- README.md | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index c9f68d4ccb9..756519d0ad0 100644 --- a/README.md +++ b/README.md @@ -44,8 +44,8 @@ For a guided tour, take a look at the [quick start guide](https://grpc.io/docs/languages/java/quickstart) or the more explanatory [gRPC basics](https://grpc.io/docs/languages/java/basics). -The [examples](https://github.com/grpc/grpc-java/tree/v1.70.0/examples) and the -[Android example](https://github.com/grpc/grpc-java/tree/v1.70.0/examples/android) +The [examples](https://github.com/grpc/grpc-java/tree/v1.71.0/examples) and the +[Android example](https://github.com/grpc/grpc-java/tree/v1.71.0/examples/android) are standalone projects that showcase the usage of gRPC. Download @@ -56,18 +56,18 @@ Download [the JARs][]. Or for Maven with non-Android, add to your `pom.xml`: io.grpc grpc-netty-shaded - 1.70.0 + 1.71.0 runtime io.grpc grpc-protobuf - 1.70.0 + 1.71.0 io.grpc grpc-stub - 1.70.0 + 1.71.0 org.apache.tomcat @@ -79,18 +79,18 @@ Download [the JARs][]. Or for Maven with non-Android, add to your `pom.xml`: Or for Gradle with non-Android, add to your dependencies: ```gradle -runtimeOnly 'io.grpc:grpc-netty-shaded:1.70.0' -implementation 'io.grpc:grpc-protobuf:1.70.0' -implementation 'io.grpc:grpc-stub:1.70.0' +runtimeOnly 'io.grpc:grpc-netty-shaded:1.71.0' +implementation 'io.grpc:grpc-protobuf:1.71.0' +implementation 'io.grpc:grpc-stub:1.71.0' compileOnly 'org.apache.tomcat:annotations-api:6.0.53' // necessary for Java 9+ ``` For Android client, use `grpc-okhttp` instead of `grpc-netty-shaded` and `grpc-protobuf-lite` instead of `grpc-protobuf`: ```gradle -implementation 'io.grpc:grpc-okhttp:1.70.0' -implementation 'io.grpc:grpc-protobuf-lite:1.70.0' -implementation 'io.grpc:grpc-stub:1.70.0' +implementation 'io.grpc:grpc-okhttp:1.71.0' +implementation 'io.grpc:grpc-protobuf-lite:1.71.0' +implementation 'io.grpc:grpc-stub:1.71.0' compileOnly 'org.apache.tomcat:annotations-api:6.0.53' // necessary for Java 9+ ``` @@ -99,7 +99,7 @@ For [Bazel](https://bazel.build), you can either (with the GAVs from above), or use `@io_grpc_grpc_java//api` et al (see below). [the JARs]: -https://search.maven.org/search?q=g:io.grpc%20AND%20v:1.70.0 +https://search.maven.org/search?q=g:io.grpc%20AND%20v:1.71.0 Development snapshots are available in [Sonatypes's snapshot repository](https://oss.sonatype.org/content/repositories/snapshots/). @@ -131,7 +131,7 @@ For protobuf-based codegen integrated with the Maven build system, you can use com.google.protobuf:protoc:3.25.5:exe:${os.detected.classifier} grpc-java - io.grpc:protoc-gen-grpc-java:1.70.0:exe:${os.detected.classifier} + io.grpc:protoc-gen-grpc-java:1.71.0:exe:${os.detected.classifier} @@ -161,7 +161,7 @@ protobuf { } plugins { grpc { - artifact = 'io.grpc:protoc-gen-grpc-java:1.70.0' + artifact = 'io.grpc:protoc-gen-grpc-java:1.71.0' } } generateProtoTasks { @@ -194,7 +194,7 @@ protobuf { } plugins { grpc { - artifact = 'io.grpc:protoc-gen-grpc-java:1.70.0' + artifact = 'io.grpc:protoc-gen-grpc-java:1.71.0' } } generateProtoTasks { From 865c4432569dfdf738b3cf07bcdb6d6e3285f761 Mon Sep 17 00:00:00 2001 From: Kannan J Date: Tue, 4 Mar 2025 14:23:13 +0000 Subject: [PATCH 02/15] Bump version to 1.71.0 --- MODULE.bazel | 2 +- build.gradle | 2 +- .../src/test/golden/TestDeprecatedService.java.txt | 2 +- compiler/src/test/golden/TestService.java.txt | 2 +- core/src/main/java/io/grpc/internal/GrpcUtil.java | 2 +- examples/MODULE.bazel | 2 +- examples/android/clientcache/app/build.gradle | 10 +++++----- examples/android/helloworld/app/build.gradle | 8 ++++---- examples/android/routeguide/app/build.gradle | 8 ++++---- examples/android/strictmode/app/build.gradle | 8 ++++---- examples/build.gradle | 2 +- examples/example-alts/build.gradle | 2 +- examples/example-debug/build.gradle | 2 +- examples/example-debug/pom.xml | 4 ++-- examples/example-dualstack/build.gradle | 2 +- examples/example-dualstack/pom.xml | 4 ++-- examples/example-gauth/build.gradle | 2 +- examples/example-gauth/pom.xml | 4 ++-- examples/example-gcp-csm-observability/build.gradle | 2 +- examples/example-gcp-observability/build.gradle | 2 +- examples/example-hostname/build.gradle | 2 +- examples/example-hostname/pom.xml | 4 ++-- examples/example-jwt-auth/build.gradle | 2 +- examples/example-jwt-auth/pom.xml | 4 ++-- examples/example-oauth/build.gradle | 2 +- examples/example-oauth/pom.xml | 4 ++-- examples/example-opentelemetry/build.gradle | 2 +- examples/example-orca/build.gradle | 2 +- examples/example-reflection/build.gradle | 2 +- examples/example-servlet/build.gradle | 2 +- examples/example-tls/build.gradle | 2 +- examples/example-tls/pom.xml | 4 ++-- examples/example-xds/build.gradle | 2 +- examples/pom.xml | 4 ++-- 34 files changed, 55 insertions(+), 55 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index 9c18d918a2c..337c9b5bbfd 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -2,7 +2,7 @@ module( name = "grpc-java", compatibility_level = 0, repo_name = "io_grpc_grpc_java", - version = "1.71.0-SNAPSHOT", # CURRENT_GRPC_VERSION + version = "1.71.0", # CURRENT_GRPC_VERSION ) # GRPC_DEPS_START diff --git a/build.gradle b/build.gradle index 09c78735776..d821e7e0a99 100644 --- a/build.gradle +++ b/build.gradle @@ -21,7 +21,7 @@ subprojects { apply plugin: "net.ltgt.errorprone" group = "io.grpc" - version = "1.71.0-SNAPSHOT" // CURRENT_GRPC_VERSION + version = "1.71.0" // CURRENT_GRPC_VERSION repositories { maven { // The google mirror is less flaky than mavenCentral() diff --git a/compiler/src/test/golden/TestDeprecatedService.java.txt b/compiler/src/test/golden/TestDeprecatedService.java.txt index e696def7b99..67aceb58899 100644 --- a/compiler/src/test/golden/TestDeprecatedService.java.txt +++ b/compiler/src/test/golden/TestDeprecatedService.java.txt @@ -8,7 +8,7 @@ import static io.grpc.MethodDescriptor.generateFullMethodName; * */ @javax.annotation.Generated( - value = "by gRPC proto compiler (version 1.71.0-SNAPSHOT)", + value = "by gRPC proto compiler (version 1.71.0)", comments = "Source: grpc/testing/compiler/test.proto") @io.grpc.stub.annotations.GrpcGenerated @java.lang.Deprecated diff --git a/compiler/src/test/golden/TestService.java.txt b/compiler/src/test/golden/TestService.java.txt index c052093cbbc..7b85eedabbe 100644 --- a/compiler/src/test/golden/TestService.java.txt +++ b/compiler/src/test/golden/TestService.java.txt @@ -8,7 +8,7 @@ import static io.grpc.MethodDescriptor.generateFullMethodName; * */ @javax.annotation.Generated( - value = "by gRPC proto compiler (version 1.71.0-SNAPSHOT)", + value = "by gRPC proto compiler (version 1.71.0)", comments = "Source: grpc/testing/compiler/test.proto") @io.grpc.stub.annotations.GrpcGenerated public final class TestServiceGrpc { diff --git a/core/src/main/java/io/grpc/internal/GrpcUtil.java b/core/src/main/java/io/grpc/internal/GrpcUtil.java index fc420195667..46dd7b79690 100644 --- a/core/src/main/java/io/grpc/internal/GrpcUtil.java +++ b/core/src/main/java/io/grpc/internal/GrpcUtil.java @@ -219,7 +219,7 @@ public byte[] parseAsciiString(byte[] serialized) { public static final Splitter ACCEPT_ENCODING_SPLITTER = Splitter.on(',').trimResults(); - public static final String IMPLEMENTATION_VERSION = "1.71.0-SNAPSHOT"; // CURRENT_GRPC_VERSION + public static final String IMPLEMENTATION_VERSION = "1.71.0"; // CURRENT_GRPC_VERSION /** * The default timeout in nanos for a keepalive ping request. diff --git a/examples/MODULE.bazel b/examples/MODULE.bazel index 72541817979..87811ada80b 100644 --- a/examples/MODULE.bazel +++ b/examples/MODULE.bazel @@ -1,5 +1,5 @@ bazel_dep(name = "googleapis", repo_name = "com_google_googleapis", version = "0.0.0-20240326-1c8d509c5") -bazel_dep(name = "grpc-java", repo_name = "io_grpc_grpc_java", version = "1.70.0-SNAPSHOT") # CURRENT_GRPC_VERSION +bazel_dep(name = "grpc-java", repo_name = "io_grpc_grpc_java", version = "1.70.0") # CURRENT_GRPC_VERSION bazel_dep(name = "grpc-proto", repo_name = "io_grpc_grpc_proto", version = "0.0.0-20240627-ec30f58") bazel_dep(name = "protobuf", repo_name = "com_google_protobuf", version = "23.1") bazel_dep(name = "rules_jvm_external", version = "6.0") diff --git a/examples/android/clientcache/app/build.gradle b/examples/android/clientcache/app/build.gradle index 7700b2248eb..545bc2a60bd 100644 --- a/examples/android/clientcache/app/build.gradle +++ b/examples/android/clientcache/app/build.gradle @@ -34,7 +34,7 @@ android { protobuf { protoc { artifact = 'com.google.protobuf:protoc:3.25.1' } plugins { - grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.71.0-SNAPSHOT' // CURRENT_GRPC_VERSION + grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.71.0' // CURRENT_GRPC_VERSION } } generateProtoTasks { @@ -54,12 +54,12 @@ dependencies { implementation 'androidx.appcompat:appcompat:1.0.0' // You need to build grpc-java to obtain these libraries below. - implementation 'io.grpc:grpc-okhttp:1.71.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-protobuf-lite:1.71.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-stub:1.71.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-okhttp:1.71.0' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-protobuf-lite:1.71.0' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-stub:1.71.0' // CURRENT_GRPC_VERSION implementation 'org.apache.tomcat:annotations-api:6.0.53' testImplementation 'junit:junit:4.13.2' testImplementation 'com.google.truth:truth:1.1.5' - testImplementation 'io.grpc:grpc-testing:1.71.0-SNAPSHOT' // CURRENT_GRPC_VERSION + testImplementation 'io.grpc:grpc-testing:1.71.0' // CURRENT_GRPC_VERSION } diff --git a/examples/android/helloworld/app/build.gradle b/examples/android/helloworld/app/build.gradle index 0fb396bbe39..7de85338552 100644 --- a/examples/android/helloworld/app/build.gradle +++ b/examples/android/helloworld/app/build.gradle @@ -32,7 +32,7 @@ android { protobuf { protoc { artifact = 'com.google.protobuf:protoc:3.25.1' } plugins { - grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.71.0-SNAPSHOT' // CURRENT_GRPC_VERSION + grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.71.0' // CURRENT_GRPC_VERSION } } generateProtoTasks { @@ -52,8 +52,8 @@ dependencies { implementation 'androidx.appcompat:appcompat:1.0.0' // You need to build grpc-java to obtain these libraries below. - implementation 'io.grpc:grpc-okhttp:1.71.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-protobuf-lite:1.71.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-stub:1.71.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-okhttp:1.71.0' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-protobuf-lite:1.71.0' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-stub:1.71.0' // CURRENT_GRPC_VERSION implementation 'org.apache.tomcat:annotations-api:6.0.53' } diff --git a/examples/android/routeguide/app/build.gradle b/examples/android/routeguide/app/build.gradle index 1a8209913a2..f79558abe00 100644 --- a/examples/android/routeguide/app/build.gradle +++ b/examples/android/routeguide/app/build.gradle @@ -32,7 +32,7 @@ android { protobuf { protoc { artifact = 'com.google.protobuf:protoc:3.25.1' } plugins { - grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.71.0-SNAPSHOT' // CURRENT_GRPC_VERSION + grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.71.0' // CURRENT_GRPC_VERSION } } generateProtoTasks { @@ -52,8 +52,8 @@ dependencies { implementation 'androidx.appcompat:appcompat:1.0.0' // You need to build grpc-java to obtain these libraries below. - implementation 'io.grpc:grpc-okhttp:1.71.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-protobuf-lite:1.71.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-stub:1.71.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-okhttp:1.71.0' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-protobuf-lite:1.71.0' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-stub:1.71.0' // CURRENT_GRPC_VERSION implementation 'org.apache.tomcat:annotations-api:6.0.53' } diff --git a/examples/android/strictmode/app/build.gradle b/examples/android/strictmode/app/build.gradle index 02b17c189f9..9875af88e16 100644 --- a/examples/android/strictmode/app/build.gradle +++ b/examples/android/strictmode/app/build.gradle @@ -33,7 +33,7 @@ android { protobuf { protoc { artifact = 'com.google.protobuf:protoc:3.25.1' } plugins { - grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.71.0-SNAPSHOT' // CURRENT_GRPC_VERSION + grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.71.0' // CURRENT_GRPC_VERSION } } generateProtoTasks { @@ -53,8 +53,8 @@ dependencies { implementation 'androidx.appcompat:appcompat:1.0.0' // You need to build grpc-java to obtain these libraries below. - implementation 'io.grpc:grpc-okhttp:1.71.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-protobuf-lite:1.71.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-stub:1.71.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-okhttp:1.71.0' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-protobuf-lite:1.71.0' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-stub:1.71.0' // CURRENT_GRPC_VERSION implementation 'org.apache.tomcat:annotations-api:6.0.53' } diff --git a/examples/build.gradle b/examples/build.gradle index d4991f02f43..80fea427b88 100644 --- a/examples/build.gradle +++ b/examples/build.gradle @@ -21,7 +21,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.71.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.71.0' // CURRENT_GRPC_VERSION def protobufVersion = '3.25.5' def protocVersion = protobufVersion diff --git a/examples/example-alts/build.gradle b/examples/example-alts/build.gradle index 17b2568c7ea..1d4f5704b21 100644 --- a/examples/example-alts/build.gradle +++ b/examples/example-alts/build.gradle @@ -21,7 +21,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.71.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.71.0' // CURRENT_GRPC_VERSION def protocVersion = '3.25.5' dependencies { diff --git a/examples/example-debug/build.gradle b/examples/example-debug/build.gradle index 7701465dee2..56a1b5a5081 100644 --- a/examples/example-debug/build.gradle +++ b/examples/example-debug/build.gradle @@ -23,7 +23,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.71.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.71.0' // CURRENT_GRPC_VERSION def protobufVersion = '3.25.5' dependencies { diff --git a/examples/example-debug/pom.xml b/examples/example-debug/pom.xml index 2976782a5d7..c8aa9650103 100644 --- a/examples/example-debug/pom.xml +++ b/examples/example-debug/pom.xml @@ -6,13 +6,13 @@ jar - 1.71.0-SNAPSHOT + 1.71.0 example-debug https://github.com/grpc/grpc-java UTF-8 - 1.71.0-SNAPSHOT + 1.71.0 3.25.5 1.8 diff --git a/examples/example-dualstack/build.gradle b/examples/example-dualstack/build.gradle index a0f29660afc..e462d416f8c 100644 --- a/examples/example-dualstack/build.gradle +++ b/examples/example-dualstack/build.gradle @@ -23,7 +23,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.71.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.71.0' // CURRENT_GRPC_VERSION def protobufVersion = '3.25.5' dependencies { diff --git a/examples/example-dualstack/pom.xml b/examples/example-dualstack/pom.xml index 99f98cc5b48..0c317cdff8c 100644 --- a/examples/example-dualstack/pom.xml +++ b/examples/example-dualstack/pom.xml @@ -6,13 +6,13 @@ jar - 1.71.0-SNAPSHOT + 1.71.0 example-dualstack https://github.com/grpc/grpc-java UTF-8 - 1.71.0-SNAPSHOT + 1.71.0 3.25.5 1.8 diff --git a/examples/example-gauth/build.gradle b/examples/example-gauth/build.gradle index aea626a5193..d5972e230c7 100644 --- a/examples/example-gauth/build.gradle +++ b/examples/example-gauth/build.gradle @@ -21,7 +21,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.71.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.71.0' // CURRENT_GRPC_VERSION def protobufVersion = '3.25.5' def protocVersion = protobufVersion diff --git a/examples/example-gauth/pom.xml b/examples/example-gauth/pom.xml index 00e7ee0e3ad..5adc50e30e1 100644 --- a/examples/example-gauth/pom.xml +++ b/examples/example-gauth/pom.xml @@ -6,13 +6,13 @@ jar - 1.71.0-SNAPSHOT + 1.71.0 example-gauth https://github.com/grpc/grpc-java UTF-8 - 1.71.0-SNAPSHOT + 1.71.0 3.25.5 1.8 diff --git a/examples/example-gcp-csm-observability/build.gradle b/examples/example-gcp-csm-observability/build.gradle index 5ccb5bb0d3a..4393843173d 100644 --- a/examples/example-gcp-csm-observability/build.gradle +++ b/examples/example-gcp-csm-observability/build.gradle @@ -22,7 +22,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.71.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.71.0' // CURRENT_GRPC_VERSION def protocVersion = '3.25.5' def openTelemetryVersion = '1.40.0' def openTelemetryPrometheusVersion = '1.40.0-alpha' diff --git a/examples/example-gcp-observability/build.gradle b/examples/example-gcp-observability/build.gradle index 3c7b0587e8f..55cd51e8988 100644 --- a/examples/example-gcp-observability/build.gradle +++ b/examples/example-gcp-observability/build.gradle @@ -22,7 +22,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.71.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.71.0' // CURRENT_GRPC_VERSION def protocVersion = '3.25.5' dependencies { diff --git a/examples/example-hostname/build.gradle b/examples/example-hostname/build.gradle index 9eb5f38c364..5f3e223dca3 100644 --- a/examples/example-hostname/build.gradle +++ b/examples/example-hostname/build.gradle @@ -21,7 +21,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.71.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.71.0' // CURRENT_GRPC_VERSION def protobufVersion = '3.25.5' dependencies { diff --git a/examples/example-hostname/pom.xml b/examples/example-hostname/pom.xml index d7ac43e79ae..bd7f3e19b0e 100644 --- a/examples/example-hostname/pom.xml +++ b/examples/example-hostname/pom.xml @@ -6,13 +6,13 @@ jar - 1.71.0-SNAPSHOT + 1.71.0 example-hostname https://github.com/grpc/grpc-java UTF-8 - 1.71.0-SNAPSHOT + 1.71.0 3.25.5 1.8 diff --git a/examples/example-jwt-auth/build.gradle b/examples/example-jwt-auth/build.gradle index 64ea928456b..a3d7c332de4 100644 --- a/examples/example-jwt-auth/build.gradle +++ b/examples/example-jwt-auth/build.gradle @@ -21,7 +21,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.71.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.71.0' // CURRENT_GRPC_VERSION def protobufVersion = '3.25.5' def protocVersion = protobufVersion diff --git a/examples/example-jwt-auth/pom.xml b/examples/example-jwt-auth/pom.xml index 6c1e172b2e0..201b34487a0 100644 --- a/examples/example-jwt-auth/pom.xml +++ b/examples/example-jwt-auth/pom.xml @@ -7,13 +7,13 @@ jar - 1.71.0-SNAPSHOT + 1.71.0 example-jwt-auth https://github.com/grpc/grpc-java UTF-8 - 1.71.0-SNAPSHOT + 1.71.0 3.25.5 3.25.5 diff --git a/examples/example-oauth/build.gradle b/examples/example-oauth/build.gradle index 4de1183e0d7..9bb90fa1339 100644 --- a/examples/example-oauth/build.gradle +++ b/examples/example-oauth/build.gradle @@ -21,7 +21,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.71.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.71.0' // CURRENT_GRPC_VERSION def protobufVersion = '3.25.5' def protocVersion = protobufVersion diff --git a/examples/example-oauth/pom.xml b/examples/example-oauth/pom.xml index f01b362347a..567cc93eaaa 100644 --- a/examples/example-oauth/pom.xml +++ b/examples/example-oauth/pom.xml @@ -7,13 +7,13 @@ jar - 1.71.0-SNAPSHOT + 1.71.0 example-oauth https://github.com/grpc/grpc-java UTF-8 - 1.71.0-SNAPSHOT + 1.71.0 3.25.5 3.25.5 diff --git a/examples/example-opentelemetry/build.gradle b/examples/example-opentelemetry/build.gradle index 2e0cbfbe0b6..b62a8384fe9 100644 --- a/examples/example-opentelemetry/build.gradle +++ b/examples/example-opentelemetry/build.gradle @@ -21,7 +21,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.71.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.71.0' // CURRENT_GRPC_VERSION def protocVersion = '3.25.5' def openTelemetryVersion = '1.40.0' def openTelemetryPrometheusVersion = '1.40.0-alpha' diff --git a/examples/example-orca/build.gradle b/examples/example-orca/build.gradle index ffa8295f849..62aeb420f58 100644 --- a/examples/example-orca/build.gradle +++ b/examples/example-orca/build.gradle @@ -16,7 +16,7 @@ java { targetCompatibility = JavaVersion.VERSION_1_8 } -def grpcVersion = '1.71.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.71.0' // CURRENT_GRPC_VERSION def protocVersion = '3.25.5' dependencies { diff --git a/examples/example-reflection/build.gradle b/examples/example-reflection/build.gradle index 8c885d9cb99..852e74e39aa 100644 --- a/examples/example-reflection/build.gradle +++ b/examples/example-reflection/build.gradle @@ -16,7 +16,7 @@ java { targetCompatibility = JavaVersion.VERSION_1_8 } -def grpcVersion = '1.71.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.71.0' // CURRENT_GRPC_VERSION def protocVersion = '3.25.5' dependencies { diff --git a/examples/example-servlet/build.gradle b/examples/example-servlet/build.gradle index 163276aec10..ada1f6b0b4a 100644 --- a/examples/example-servlet/build.gradle +++ b/examples/example-servlet/build.gradle @@ -15,7 +15,7 @@ java { targetCompatibility = JavaVersion.VERSION_1_8 } -def grpcVersion = '1.71.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.71.0' // CURRENT_GRPC_VERSION def protocVersion = '3.25.5' dependencies { diff --git a/examples/example-tls/build.gradle b/examples/example-tls/build.gradle index 87e37c5a3b1..cd24d5b75e4 100644 --- a/examples/example-tls/build.gradle +++ b/examples/example-tls/build.gradle @@ -21,7 +21,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.71.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.71.0' // CURRENT_GRPC_VERSION def protocVersion = '3.25.5' dependencies { diff --git a/examples/example-tls/pom.xml b/examples/example-tls/pom.xml index ade33ee8769..1646aa6bf4d 100644 --- a/examples/example-tls/pom.xml +++ b/examples/example-tls/pom.xml @@ -6,13 +6,13 @@ jar - 1.71.0-SNAPSHOT + 1.71.0 example-tls https://github.com/grpc/grpc-java UTF-8 - 1.71.0-SNAPSHOT + 1.71.0 3.25.5 1.8 diff --git a/examples/example-xds/build.gradle b/examples/example-xds/build.gradle index ef1bc185816..f32c9eb3fe1 100644 --- a/examples/example-xds/build.gradle +++ b/examples/example-xds/build.gradle @@ -21,7 +21,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.71.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.71.0' // CURRENT_GRPC_VERSION def protocVersion = '3.25.5' dependencies { diff --git a/examples/pom.xml b/examples/pom.xml index 6370ce7d56a..ecbc8fd9e8d 100644 --- a/examples/pom.xml +++ b/examples/pom.xml @@ -6,13 +6,13 @@ jar - 1.71.0-SNAPSHOT + 1.71.0 examples https://github.com/grpc/grpc-java UTF-8 - 1.71.0-SNAPSHOT + 1.71.0 3.25.5 3.25.5 From 8cc0d4cd48051ddd65d178fd8cbb36bbee56a9e4 Mon Sep 17 00:00:00 2001 From: Kannan J Date: Tue, 4 Mar 2025 14:33:37 +0000 Subject: [PATCH 03/15] Bump version to 1.71.1-SNAPSHOT --- MODULE.bazel | 2 +- build.gradle | 2 +- .../src/test/golden/TestDeprecatedService.java.txt | 2 +- compiler/src/test/golden/TestService.java.txt | 2 +- core/src/main/java/io/grpc/internal/GrpcUtil.java | 2 +- examples/MODULE.bazel | 2 +- examples/android/clientcache/app/build.gradle | 10 +++++----- examples/android/helloworld/app/build.gradle | 8 ++++---- examples/android/routeguide/app/build.gradle | 8 ++++---- examples/android/strictmode/app/build.gradle | 8 ++++---- examples/build.gradle | 2 +- examples/example-alts/build.gradle | 2 +- examples/example-debug/build.gradle | 2 +- examples/example-debug/pom.xml | 4 ++-- examples/example-dualstack/build.gradle | 2 +- examples/example-dualstack/pom.xml | 4 ++-- examples/example-gauth/build.gradle | 2 +- examples/example-gauth/pom.xml | 4 ++-- examples/example-gcp-csm-observability/build.gradle | 2 +- examples/example-gcp-observability/build.gradle | 2 +- examples/example-hostname/build.gradle | 2 +- examples/example-hostname/pom.xml | 4 ++-- examples/example-jwt-auth/build.gradle | 2 +- examples/example-jwt-auth/pom.xml | 4 ++-- examples/example-oauth/build.gradle | 2 +- examples/example-oauth/pom.xml | 4 ++-- examples/example-opentelemetry/build.gradle | 2 +- examples/example-orca/build.gradle | 2 +- examples/example-reflection/build.gradle | 2 +- examples/example-servlet/build.gradle | 2 +- examples/example-tls/build.gradle | 2 +- examples/example-tls/pom.xml | 4 ++-- examples/example-xds/build.gradle | 2 +- examples/pom.xml | 4 ++-- 34 files changed, 55 insertions(+), 55 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index 337c9b5bbfd..10f264512b2 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -2,7 +2,7 @@ module( name = "grpc-java", compatibility_level = 0, repo_name = "io_grpc_grpc_java", - version = "1.71.0", # CURRENT_GRPC_VERSION + version = "1.71.1-SNAPSHOT", # CURRENT_GRPC_VERSION ) # GRPC_DEPS_START diff --git a/build.gradle b/build.gradle index d821e7e0a99..b530f466b1d 100644 --- a/build.gradle +++ b/build.gradle @@ -21,7 +21,7 @@ subprojects { apply plugin: "net.ltgt.errorprone" group = "io.grpc" - version = "1.71.0" // CURRENT_GRPC_VERSION + version = "1.71.1-SNAPSHOT" // CURRENT_GRPC_VERSION repositories { maven { // The google mirror is less flaky than mavenCentral() diff --git a/compiler/src/test/golden/TestDeprecatedService.java.txt b/compiler/src/test/golden/TestDeprecatedService.java.txt index 67aceb58899..225eb8f4806 100644 --- a/compiler/src/test/golden/TestDeprecatedService.java.txt +++ b/compiler/src/test/golden/TestDeprecatedService.java.txt @@ -8,7 +8,7 @@ import static io.grpc.MethodDescriptor.generateFullMethodName; * */ @javax.annotation.Generated( - value = "by gRPC proto compiler (version 1.71.0)", + value = "by gRPC proto compiler (version 1.71.1-SNAPSHOT)", comments = "Source: grpc/testing/compiler/test.proto") @io.grpc.stub.annotations.GrpcGenerated @java.lang.Deprecated diff --git a/compiler/src/test/golden/TestService.java.txt b/compiler/src/test/golden/TestService.java.txt index 7b85eedabbe..a627aeccb9a 100644 --- a/compiler/src/test/golden/TestService.java.txt +++ b/compiler/src/test/golden/TestService.java.txt @@ -8,7 +8,7 @@ import static io.grpc.MethodDescriptor.generateFullMethodName; * */ @javax.annotation.Generated( - value = "by gRPC proto compiler (version 1.71.0)", + value = "by gRPC proto compiler (version 1.71.1-SNAPSHOT)", comments = "Source: grpc/testing/compiler/test.proto") @io.grpc.stub.annotations.GrpcGenerated public final class TestServiceGrpc { diff --git a/core/src/main/java/io/grpc/internal/GrpcUtil.java b/core/src/main/java/io/grpc/internal/GrpcUtil.java index 46dd7b79690..5eece39e1a8 100644 --- a/core/src/main/java/io/grpc/internal/GrpcUtil.java +++ b/core/src/main/java/io/grpc/internal/GrpcUtil.java @@ -219,7 +219,7 @@ public byte[] parseAsciiString(byte[] serialized) { public static final Splitter ACCEPT_ENCODING_SPLITTER = Splitter.on(',').trimResults(); - public static final String IMPLEMENTATION_VERSION = "1.71.0"; // CURRENT_GRPC_VERSION + public static final String IMPLEMENTATION_VERSION = "1.71.1-SNAPSHOT"; // CURRENT_GRPC_VERSION /** * The default timeout in nanos for a keepalive ping request. diff --git a/examples/MODULE.bazel b/examples/MODULE.bazel index 87811ada80b..64f86a3761a 100644 --- a/examples/MODULE.bazel +++ b/examples/MODULE.bazel @@ -1,5 +1,5 @@ bazel_dep(name = "googleapis", repo_name = "com_google_googleapis", version = "0.0.0-20240326-1c8d509c5") -bazel_dep(name = "grpc-java", repo_name = "io_grpc_grpc_java", version = "1.70.0") # CURRENT_GRPC_VERSION +bazel_dep(name = "grpc-java", repo_name = "io_grpc_grpc_java", version = "1.71.1-SNAPSHOT") # CURRENT_GRPC_VERSION bazel_dep(name = "grpc-proto", repo_name = "io_grpc_grpc_proto", version = "0.0.0-20240627-ec30f58") bazel_dep(name = "protobuf", repo_name = "com_google_protobuf", version = "23.1") bazel_dep(name = "rules_jvm_external", version = "6.0") diff --git a/examples/android/clientcache/app/build.gradle b/examples/android/clientcache/app/build.gradle index 545bc2a60bd..294748b93a6 100644 --- a/examples/android/clientcache/app/build.gradle +++ b/examples/android/clientcache/app/build.gradle @@ -34,7 +34,7 @@ android { protobuf { protoc { artifact = 'com.google.protobuf:protoc:3.25.1' } plugins { - grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.71.0' // CURRENT_GRPC_VERSION + grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.71.1-SNAPSHOT' // CURRENT_GRPC_VERSION } } generateProtoTasks { @@ -54,12 +54,12 @@ dependencies { implementation 'androidx.appcompat:appcompat:1.0.0' // You need to build grpc-java to obtain these libraries below. - implementation 'io.grpc:grpc-okhttp:1.71.0' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-protobuf-lite:1.71.0' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-stub:1.71.0' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-okhttp:1.71.1-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-protobuf-lite:1.71.1-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-stub:1.71.1-SNAPSHOT' // CURRENT_GRPC_VERSION implementation 'org.apache.tomcat:annotations-api:6.0.53' testImplementation 'junit:junit:4.13.2' testImplementation 'com.google.truth:truth:1.1.5' - testImplementation 'io.grpc:grpc-testing:1.71.0' // CURRENT_GRPC_VERSION + testImplementation 'io.grpc:grpc-testing:1.71.1-SNAPSHOT' // CURRENT_GRPC_VERSION } diff --git a/examples/android/helloworld/app/build.gradle b/examples/android/helloworld/app/build.gradle index 7de85338552..500d0bf5e63 100644 --- a/examples/android/helloworld/app/build.gradle +++ b/examples/android/helloworld/app/build.gradle @@ -32,7 +32,7 @@ android { protobuf { protoc { artifact = 'com.google.protobuf:protoc:3.25.1' } plugins { - grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.71.0' // CURRENT_GRPC_VERSION + grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.71.1-SNAPSHOT' // CURRENT_GRPC_VERSION } } generateProtoTasks { @@ -52,8 +52,8 @@ dependencies { implementation 'androidx.appcompat:appcompat:1.0.0' // You need to build grpc-java to obtain these libraries below. - implementation 'io.grpc:grpc-okhttp:1.71.0' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-protobuf-lite:1.71.0' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-stub:1.71.0' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-okhttp:1.71.1-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-protobuf-lite:1.71.1-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-stub:1.71.1-SNAPSHOT' // CURRENT_GRPC_VERSION implementation 'org.apache.tomcat:annotations-api:6.0.53' } diff --git a/examples/android/routeguide/app/build.gradle b/examples/android/routeguide/app/build.gradle index f79558abe00..a9038d2e5f4 100644 --- a/examples/android/routeguide/app/build.gradle +++ b/examples/android/routeguide/app/build.gradle @@ -32,7 +32,7 @@ android { protobuf { protoc { artifact = 'com.google.protobuf:protoc:3.25.1' } plugins { - grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.71.0' // CURRENT_GRPC_VERSION + grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.71.1-SNAPSHOT' // CURRENT_GRPC_VERSION } } generateProtoTasks { @@ -52,8 +52,8 @@ dependencies { implementation 'androidx.appcompat:appcompat:1.0.0' // You need to build grpc-java to obtain these libraries below. - implementation 'io.grpc:grpc-okhttp:1.71.0' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-protobuf-lite:1.71.0' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-stub:1.71.0' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-okhttp:1.71.1-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-protobuf-lite:1.71.1-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-stub:1.71.1-SNAPSHOT' // CURRENT_GRPC_VERSION implementation 'org.apache.tomcat:annotations-api:6.0.53' } diff --git a/examples/android/strictmode/app/build.gradle b/examples/android/strictmode/app/build.gradle index 9875af88e16..8a938f007d5 100644 --- a/examples/android/strictmode/app/build.gradle +++ b/examples/android/strictmode/app/build.gradle @@ -33,7 +33,7 @@ android { protobuf { protoc { artifact = 'com.google.protobuf:protoc:3.25.1' } plugins { - grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.71.0' // CURRENT_GRPC_VERSION + grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.71.1-SNAPSHOT' // CURRENT_GRPC_VERSION } } generateProtoTasks { @@ -53,8 +53,8 @@ dependencies { implementation 'androidx.appcompat:appcompat:1.0.0' // You need to build grpc-java to obtain these libraries below. - implementation 'io.grpc:grpc-okhttp:1.71.0' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-protobuf-lite:1.71.0' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-stub:1.71.0' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-okhttp:1.71.1-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-protobuf-lite:1.71.1-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-stub:1.71.1-SNAPSHOT' // CURRENT_GRPC_VERSION implementation 'org.apache.tomcat:annotations-api:6.0.53' } diff --git a/examples/build.gradle b/examples/build.gradle index 80fea427b88..3c80105c4b4 100644 --- a/examples/build.gradle +++ b/examples/build.gradle @@ -21,7 +21,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.71.0' // CURRENT_GRPC_VERSION +def grpcVersion = '1.71.1-SNAPSHOT' // CURRENT_GRPC_VERSION def protobufVersion = '3.25.5' def protocVersion = protobufVersion diff --git a/examples/example-alts/build.gradle b/examples/example-alts/build.gradle index 1d4f5704b21..513f0519991 100644 --- a/examples/example-alts/build.gradle +++ b/examples/example-alts/build.gradle @@ -21,7 +21,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.71.0' // CURRENT_GRPC_VERSION +def grpcVersion = '1.71.1-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.5' dependencies { diff --git a/examples/example-debug/build.gradle b/examples/example-debug/build.gradle index 56a1b5a5081..7df41bf7432 100644 --- a/examples/example-debug/build.gradle +++ b/examples/example-debug/build.gradle @@ -23,7 +23,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.71.0' // CURRENT_GRPC_VERSION +def grpcVersion = '1.71.1-SNAPSHOT' // CURRENT_GRPC_VERSION def protobufVersion = '3.25.5' dependencies { diff --git a/examples/example-debug/pom.xml b/examples/example-debug/pom.xml index c8aa9650103..f58a7ced1f5 100644 --- a/examples/example-debug/pom.xml +++ b/examples/example-debug/pom.xml @@ -6,13 +6,13 @@ jar - 1.71.0 + 1.71.1-SNAPSHOT example-debug https://github.com/grpc/grpc-java UTF-8 - 1.71.0 + 1.71.1-SNAPSHOT 3.25.5 1.8 diff --git a/examples/example-dualstack/build.gradle b/examples/example-dualstack/build.gradle index e462d416f8c..f06d4fe7fb0 100644 --- a/examples/example-dualstack/build.gradle +++ b/examples/example-dualstack/build.gradle @@ -23,7 +23,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.71.0' // CURRENT_GRPC_VERSION +def grpcVersion = '1.71.1-SNAPSHOT' // CURRENT_GRPC_VERSION def protobufVersion = '3.25.5' dependencies { diff --git a/examples/example-dualstack/pom.xml b/examples/example-dualstack/pom.xml index 0c317cdff8c..d4c50322095 100644 --- a/examples/example-dualstack/pom.xml +++ b/examples/example-dualstack/pom.xml @@ -6,13 +6,13 @@ jar - 1.71.0 + 1.71.1-SNAPSHOT example-dualstack https://github.com/grpc/grpc-java UTF-8 - 1.71.0 + 1.71.1-SNAPSHOT 3.25.5 1.8 diff --git a/examples/example-gauth/build.gradle b/examples/example-gauth/build.gradle index d5972e230c7..066917fed11 100644 --- a/examples/example-gauth/build.gradle +++ b/examples/example-gauth/build.gradle @@ -21,7 +21,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.71.0' // CURRENT_GRPC_VERSION +def grpcVersion = '1.71.1-SNAPSHOT' // CURRENT_GRPC_VERSION def protobufVersion = '3.25.5' def protocVersion = protobufVersion diff --git a/examples/example-gauth/pom.xml b/examples/example-gauth/pom.xml index 5adc50e30e1..640542c3203 100644 --- a/examples/example-gauth/pom.xml +++ b/examples/example-gauth/pom.xml @@ -6,13 +6,13 @@ jar - 1.71.0 + 1.71.1-SNAPSHOT example-gauth https://github.com/grpc/grpc-java UTF-8 - 1.71.0 + 1.71.1-SNAPSHOT 3.25.5 1.8 diff --git a/examples/example-gcp-csm-observability/build.gradle b/examples/example-gcp-csm-observability/build.gradle index 4393843173d..7923160debf 100644 --- a/examples/example-gcp-csm-observability/build.gradle +++ b/examples/example-gcp-csm-observability/build.gradle @@ -22,7 +22,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.71.0' // CURRENT_GRPC_VERSION +def grpcVersion = '1.71.1-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.5' def openTelemetryVersion = '1.40.0' def openTelemetryPrometheusVersion = '1.40.0-alpha' diff --git a/examples/example-gcp-observability/build.gradle b/examples/example-gcp-observability/build.gradle index 55cd51e8988..84a0789878a 100644 --- a/examples/example-gcp-observability/build.gradle +++ b/examples/example-gcp-observability/build.gradle @@ -22,7 +22,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.71.0' // CURRENT_GRPC_VERSION +def grpcVersion = '1.71.1-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.5' dependencies { diff --git a/examples/example-hostname/build.gradle b/examples/example-hostname/build.gradle index 5f3e223dca3..54cbf882d56 100644 --- a/examples/example-hostname/build.gradle +++ b/examples/example-hostname/build.gradle @@ -21,7 +21,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.71.0' // CURRENT_GRPC_VERSION +def grpcVersion = '1.71.1-SNAPSHOT' // CURRENT_GRPC_VERSION def protobufVersion = '3.25.5' dependencies { diff --git a/examples/example-hostname/pom.xml b/examples/example-hostname/pom.xml index bd7f3e19b0e..f6e1e217f97 100644 --- a/examples/example-hostname/pom.xml +++ b/examples/example-hostname/pom.xml @@ -6,13 +6,13 @@ jar - 1.71.0 + 1.71.1-SNAPSHOT example-hostname https://github.com/grpc/grpc-java UTF-8 - 1.71.0 + 1.71.1-SNAPSHOT 3.25.5 1.8 diff --git a/examples/example-jwt-auth/build.gradle b/examples/example-jwt-auth/build.gradle index a3d7c332de4..7ea6432d8b8 100644 --- a/examples/example-jwt-auth/build.gradle +++ b/examples/example-jwt-auth/build.gradle @@ -21,7 +21,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.71.0' // CURRENT_GRPC_VERSION +def grpcVersion = '1.71.1-SNAPSHOT' // CURRENT_GRPC_VERSION def protobufVersion = '3.25.5' def protocVersion = protobufVersion diff --git a/examples/example-jwt-auth/pom.xml b/examples/example-jwt-auth/pom.xml index 201b34487a0..87c0dee3381 100644 --- a/examples/example-jwt-auth/pom.xml +++ b/examples/example-jwt-auth/pom.xml @@ -7,13 +7,13 @@ jar - 1.71.0 + 1.71.1-SNAPSHOT example-jwt-auth https://github.com/grpc/grpc-java UTF-8 - 1.71.0 + 1.71.1-SNAPSHOT 3.25.5 3.25.5 diff --git a/examples/example-oauth/build.gradle b/examples/example-oauth/build.gradle index 9bb90fa1339..b0a3d45158a 100644 --- a/examples/example-oauth/build.gradle +++ b/examples/example-oauth/build.gradle @@ -21,7 +21,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.71.0' // CURRENT_GRPC_VERSION +def grpcVersion = '1.71.1-SNAPSHOT' // CURRENT_GRPC_VERSION def protobufVersion = '3.25.5' def protocVersion = protobufVersion diff --git a/examples/example-oauth/pom.xml b/examples/example-oauth/pom.xml index 567cc93eaaa..6382281ee92 100644 --- a/examples/example-oauth/pom.xml +++ b/examples/example-oauth/pom.xml @@ -7,13 +7,13 @@ jar - 1.71.0 + 1.71.1-SNAPSHOT example-oauth https://github.com/grpc/grpc-java UTF-8 - 1.71.0 + 1.71.1-SNAPSHOT 3.25.5 3.25.5 diff --git a/examples/example-opentelemetry/build.gradle b/examples/example-opentelemetry/build.gradle index b62a8384fe9..4c8656a2cfc 100644 --- a/examples/example-opentelemetry/build.gradle +++ b/examples/example-opentelemetry/build.gradle @@ -21,7 +21,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.71.0' // CURRENT_GRPC_VERSION +def grpcVersion = '1.71.1-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.5' def openTelemetryVersion = '1.40.0' def openTelemetryPrometheusVersion = '1.40.0-alpha' diff --git a/examples/example-orca/build.gradle b/examples/example-orca/build.gradle index 62aeb420f58..b9ae432be32 100644 --- a/examples/example-orca/build.gradle +++ b/examples/example-orca/build.gradle @@ -16,7 +16,7 @@ java { targetCompatibility = JavaVersion.VERSION_1_8 } -def grpcVersion = '1.71.0' // CURRENT_GRPC_VERSION +def grpcVersion = '1.71.1-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.5' dependencies { diff --git a/examples/example-reflection/build.gradle b/examples/example-reflection/build.gradle index 852e74e39aa..dc6bdd95432 100644 --- a/examples/example-reflection/build.gradle +++ b/examples/example-reflection/build.gradle @@ -16,7 +16,7 @@ java { targetCompatibility = JavaVersion.VERSION_1_8 } -def grpcVersion = '1.71.0' // CURRENT_GRPC_VERSION +def grpcVersion = '1.71.1-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.5' dependencies { diff --git a/examples/example-servlet/build.gradle b/examples/example-servlet/build.gradle index ada1f6b0b4a..251eda65e64 100644 --- a/examples/example-servlet/build.gradle +++ b/examples/example-servlet/build.gradle @@ -15,7 +15,7 @@ java { targetCompatibility = JavaVersion.VERSION_1_8 } -def grpcVersion = '1.71.0' // CURRENT_GRPC_VERSION +def grpcVersion = '1.71.1-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.5' dependencies { diff --git a/examples/example-tls/build.gradle b/examples/example-tls/build.gradle index cd24d5b75e4..5d5656e4c62 100644 --- a/examples/example-tls/build.gradle +++ b/examples/example-tls/build.gradle @@ -21,7 +21,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.71.0' // CURRENT_GRPC_VERSION +def grpcVersion = '1.71.1-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.5' dependencies { diff --git a/examples/example-tls/pom.xml b/examples/example-tls/pom.xml index 1646aa6bf4d..77d164f591b 100644 --- a/examples/example-tls/pom.xml +++ b/examples/example-tls/pom.xml @@ -6,13 +6,13 @@ jar - 1.71.0 + 1.71.1-SNAPSHOT example-tls https://github.com/grpc/grpc-java UTF-8 - 1.71.0 + 1.71.1-SNAPSHOT 3.25.5 1.8 diff --git a/examples/example-xds/build.gradle b/examples/example-xds/build.gradle index f32c9eb3fe1..97425b5da56 100644 --- a/examples/example-xds/build.gradle +++ b/examples/example-xds/build.gradle @@ -21,7 +21,7 @@ java { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.71.0' // CURRENT_GRPC_VERSION +def grpcVersion = '1.71.1-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.25.5' dependencies { diff --git a/examples/pom.xml b/examples/pom.xml index ecbc8fd9e8d..cf26bf44730 100644 --- a/examples/pom.xml +++ b/examples/pom.xml @@ -6,13 +6,13 @@ jar - 1.71.0 + 1.71.1-SNAPSHOT examples https://github.com/grpc/grpc-java UTF-8 - 1.71.0 + 1.71.1-SNAPSHOT 3.25.5 3.25.5 From 143532304f45522c6e5fc8898bff6e5e35cbf699 Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Mon, 7 Apr 2025 06:06:00 -0700 Subject: [PATCH 04/15] core: Avoid Set.removeAll() when passing a possibly-large List (#11994) (#12001) See #11958 --- .../main/java/io/grpc/internal/DelayedClientTransport.java | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/core/src/main/java/io/grpc/internal/DelayedClientTransport.java b/core/src/main/java/io/grpc/internal/DelayedClientTransport.java index 8ff755af3eb..eccd8fadc8c 100644 --- a/core/src/main/java/io/grpc/internal/DelayedClientTransport.java +++ b/core/src/main/java/io/grpc/internal/DelayedClientTransport.java @@ -325,7 +325,11 @@ final void reprocess(@Nullable SubchannelPicker picker) { if (!hasPendingStreams()) { return; } - pendingStreams.removeAll(toRemove); + // Avoid pendingStreams.removeAll() as it can degrade to calling toRemove.contains() for each + // element in pendingStreams. + for (PendingStream stream : toRemove) { + pendingStreams.remove(stream); + } // Because delayed transport is long-lived, we take this opportunity to down-size the // hashmap. if (pendingStreams.isEmpty()) { From e47748a2250c231a9574fed2a50e94253f87ae79 Mon Sep 17 00:00:00 2001 From: Kannan J Date: Mon, 7 Apr 2025 15:07:25 +0000 Subject: [PATCH 05/15] xds: ClusterResolverLoadBalancer handle update for both resolved addresses and errors via ResolutionResult (v1.71.x backport) (#12005) Backport of #11997 to v1.71.x. ------------ Fixes #11995. --- .../grpc/xds/ClusterResolverLoadBalancer.java | 134 +++++++++--------- .../xds/ClusterResolverLoadBalancerTest.java | 83 +++++++++-- 2 files changed, 138 insertions(+), 79 deletions(-) diff --git a/xds/src/main/java/io/grpc/xds/ClusterResolverLoadBalancer.java b/xds/src/main/java/io/grpc/xds/ClusterResolverLoadBalancer.java index 4e08ddc5973..aff61cf7ada 100644 --- a/xds/src/main/java/io/grpc/xds/ClusterResolverLoadBalancer.java +++ b/xds/src/main/java/io/grpc/xds/ClusterResolverLoadBalancer.java @@ -32,6 +32,7 @@ import io.grpc.NameResolver; import io.grpc.NameResolver.ResolutionResult; import io.grpc.Status; +import io.grpc.StatusOr; import io.grpc.SynchronizationContext; import io.grpc.SynchronizationContext.ScheduledHandle; import io.grpc.internal.BackoffPolicy; @@ -615,79 +616,84 @@ private class NameResolverListener extends NameResolver.Listener2 { @Override public void onResult(final ResolutionResult resolutionResult) { - class NameResolved implements Runnable { - @Override - public void run() { - if (shutdown) { - return; - } - backoffPolicy = null; // reset backoff sequence if succeeded - // Arbitrary priority notation for all DNS-resolved endpoints. - String priorityName = priorityName(name, 0); // value doesn't matter - List addresses = new ArrayList<>(); - for (EquivalentAddressGroup eag : resolutionResult.getAddresses()) { - // No weight attribute is attached, all endpoint-level LB policy should be able - // to handle such it. - String localityName = localityName(LOGICAL_DNS_CLUSTER_LOCALITY); - Attributes attr = eag.getAttributes().toBuilder() - .set(XdsAttributes.ATTR_LOCALITY, LOGICAL_DNS_CLUSTER_LOCALITY) - .set(XdsAttributes.ATTR_LOCALITY_NAME, localityName) - .set(XdsAttributes.ATTR_ADDRESS_NAME, dnsHostName) - .build(); - eag = new EquivalentAddressGroup(eag.getAddresses(), attr); - eag = AddressFilter.setPathFilter(eag, Arrays.asList(priorityName, localityName)); - addresses.add(eag); - } - PriorityChildConfig priorityChildConfig = generateDnsBasedPriorityChildConfig( - name, lrsServerInfo, maxConcurrentRequests, tlsContext, filterMetadata, - lbRegistry, Collections.emptyList()); - status = Status.OK; - resolved = true; - result = new ClusterResolutionResult(addresses, priorityName, priorityChildConfig); - handleEndpointResourceUpdate(); + syncContext.execute(() -> onResult2(resolutionResult)); + } + + @Override + public Status onResult2(final ResolutionResult resolutionResult) { + if (shutdown) { + return Status.OK; + } + // Arbitrary priority notation for all DNS-resolved endpoints. + String priorityName = priorityName(name, 0); // value doesn't matter + List addresses = new ArrayList<>(); + StatusOr> addressesOrError = + resolutionResult.getAddressesOrError(); + if (addressesOrError.hasValue()) { + backoffPolicy = null; // reset backoff sequence if succeeded + for (EquivalentAddressGroup eag : resolutionResult.getAddresses()) { + // No weight attribute is attached, all endpoint-level LB policy should be able + // to handle such it. + String localityName = localityName(LOGICAL_DNS_CLUSTER_LOCALITY); + Attributes attr = eag.getAttributes().toBuilder() + .set(XdsAttributes.ATTR_LOCALITY, LOGICAL_DNS_CLUSTER_LOCALITY) + .set(XdsAttributes.ATTR_LOCALITY_NAME, localityName) + .set(XdsAttributes.ATTR_ADDRESS_NAME, dnsHostName) + .build(); + eag = new EquivalentAddressGroup(eag.getAddresses(), attr); + eag = AddressFilter.setPathFilter(eag, Arrays.asList(priorityName, localityName)); + addresses.add(eag); } + PriorityChildConfig priorityChildConfig = generateDnsBasedPriorityChildConfig( + name, lrsServerInfo, maxConcurrentRequests, tlsContext, filterMetadata, + lbRegistry, Collections.emptyList()); + status = Status.OK; + resolved = true; + result = new ClusterResolutionResult(addresses, priorityName, priorityChildConfig); + handleEndpointResourceUpdate(); + return Status.OK; + } else { + handleErrorInSyncContext(addressesOrError.getStatus()); + return addressesOrError.getStatus(); } - - syncContext.execute(new NameResolved()); } @Override public void onError(final Status error) { - syncContext.execute(new Runnable() { - @Override - public void run() { - if (shutdown) { - return; - } - status = error; - // NameResolver.Listener API cannot distinguish between address-not-found and - // transient errors. If the error occurs in the first resolution, treat it as - // address not found. Otherwise, either there is previously resolved addresses - // previously encountered error, propagate the error to downstream/upstream and - // let downstream/upstream handle it. - if (!resolved) { - resolved = true; - handleEndpointResourceUpdate(); - } else { - handleEndpointResolutionError(); - } - if (scheduledRefresh != null && scheduledRefresh.isPending()) { - return; - } - if (backoffPolicy == null) { - backoffPolicy = backoffPolicyProvider.get(); - } - long delayNanos = backoffPolicy.nextBackoffNanos(); - logger.log(XdsLogLevel.DEBUG, + syncContext.execute(() -> handleErrorInSyncContext(error)); + } + + private void handleErrorInSyncContext(final Status error) { + if (shutdown) { + return; + } + status = error; + // NameResolver.Listener API cannot distinguish between address-not-found and + // transient errors. If the error occurs in the first resolution, treat it as + // address not found. Otherwise, either there is previously resolved addresses + // previously encountered error, propagate the error to downstream/upstream and + // let downstream/upstream handle it. + if (!resolved) { + resolved = true; + handleEndpointResourceUpdate(); + } else { + handleEndpointResolutionError(); + } + if (scheduledRefresh != null && scheduledRefresh.isPending()) { + return; + } + if (backoffPolicy == null) { + backoffPolicy = backoffPolicyProvider.get(); + } + long delayNanos = backoffPolicy.nextBackoffNanos(); + logger.log(XdsLogLevel.DEBUG, "Logical DNS resolver for cluster {0} encountered name resolution " - + "error: {1}, scheduling DNS resolution backoff for {2} ns", + + "error: {1}, scheduling DNS resolution backoff for {2} ns", name, error, delayNanos); - scheduledRefresh = + scheduledRefresh = syncContext.schedule( - new DelayedNameResolverRefresh(), delayNanos, TimeUnit.NANOSECONDS, - timeService); - } - }); + new DelayedNameResolverRefresh(), delayNanos, TimeUnit.NANOSECONDS, + timeService); } } } diff --git a/xds/src/test/java/io/grpc/xds/ClusterResolverLoadBalancerTest.java b/xds/src/test/java/io/grpc/xds/ClusterResolverLoadBalancerTest.java index 9243abba6d3..2a8617912ea 100644 --- a/xds/src/test/java/io/grpc/xds/ClusterResolverLoadBalancerTest.java +++ b/xds/src/test/java/io/grpc/xds/ClusterResolverLoadBalancerTest.java @@ -198,6 +198,7 @@ public XdsClient returnObject(Object object) { private ArgumentCaptor pickerCaptor; private int xdsClientRefs; private ClusterResolverLoadBalancer loadBalancer; + private NameResolverProvider fakeNameResolverProvider; @Before public void setUp() throws URISyntaxException { @@ -214,7 +215,8 @@ public void setUp() throws URISyntaxException { .setServiceConfigParser(mock(ServiceConfigParser.class)) .setChannelLogger(mock(ChannelLogger.class)) .build(); - nsRegistry.register(new FakeNameResolverProvider()); + fakeNameResolverProvider = new FakeNameResolverProvider(false); + nsRegistry.register(fakeNameResolverProvider); when(helper.getNameResolverRegistry()).thenReturn(nsRegistry); when(helper.getNameResolverArgs()).thenReturn(args); when(helper.getSynchronizationContext()).thenReturn(syncContext); @@ -715,6 +717,17 @@ public void handleEdsResource_noHealthyEndpoint() { @Test public void onlyLogicalDnsCluster_endpointsResolved() { + do_onlyLogicalDnsCluster_endpointsResolved(); + } + + @Test + public void oldListenerCallback_onlyLogicalDnsCluster_endpointsResolved() { + nsRegistry.deregister(fakeNameResolverProvider); + nsRegistry.register(new FakeNameResolverProvider(true)); + do_onlyLogicalDnsCluster_endpointsResolved(); + } + + void do_onlyLogicalDnsCluster_endpointsResolved() { ClusterResolverConfig config = new ClusterResolverConfig( Collections.singletonList(logicalDnsDiscoveryMechanism), roundRobin); deliverLbConfig(config); @@ -743,7 +756,6 @@ public void onlyLogicalDnsCluster_endpointsResolved() { .get(XdsAttributes.ATTR_ADDRESS_NAME)).isEqualTo(DNS_HOST_NAME); assertThat(childBalancer.addresses.get(1).getAttributes() .get(XdsAttributes.ATTR_ADDRESS_NAME)).isEqualTo(DNS_HOST_NAME); - } @Test @@ -763,37 +775,48 @@ public void onlyLogicalDnsCluster_handleRefreshNameResolution() { } @Test - public void onlyLogicalDnsCluster_resolutionError_backoffAndRefresh() { + public void resolutionError_backoffAndRefresh() { + do_onlyLogicalDnsCluster_resolutionError_backoffAndRefresh(); + } + + @Test + public void oldListenerCallback_resolutionError_backoffAndRefresh() { + nsRegistry.deregister(fakeNameResolverProvider); + nsRegistry.register(new FakeNameResolverProvider(true)); + do_onlyLogicalDnsCluster_resolutionError_backoffAndRefresh(); + } + + void do_onlyLogicalDnsCluster_resolutionError_backoffAndRefresh() { InOrder inOrder = Mockito.inOrder(helper, backoffPolicyProvider, - backoffPolicy1, backoffPolicy2); + backoffPolicy1, backoffPolicy2); ClusterResolverConfig config = new ClusterResolverConfig( - Collections.singletonList(logicalDnsDiscoveryMechanism), roundRobin); + Collections.singletonList(logicalDnsDiscoveryMechanism), roundRobin); deliverLbConfig(config); FakeNameResolver resolver = assertResolverCreated("/" + DNS_HOST_NAME); assertThat(childBalancers).isEmpty(); Status error = Status.UNAVAILABLE.withDescription("cannot reach DNS server"); resolver.deliverError(error); inOrder.verify(helper).updateBalancingState( - eq(ConnectivityState.TRANSIENT_FAILURE), pickerCaptor.capture()); + eq(ConnectivityState.TRANSIENT_FAILURE), pickerCaptor.capture()); assertPicker(pickerCaptor.getValue(), error, null); assertThat(resolver.refreshCount).isEqualTo(0); inOrder.verify(backoffPolicyProvider).get(); inOrder.verify(backoffPolicy1).nextBackoffNanos(); assertThat(fakeClock.getPendingTasks()).hasSize(1); assertThat(Iterables.getOnlyElement(fakeClock.getPendingTasks()).getDelay(TimeUnit.SECONDS)) - .isEqualTo(1L); + .isEqualTo(1L); fakeClock.forwardTime(1L, TimeUnit.SECONDS); assertThat(resolver.refreshCount).isEqualTo(1); error = Status.UNKNOWN.withDescription("I am lost"); resolver.deliverError(error); inOrder.verify(helper).updateBalancingState( - eq(ConnectivityState.TRANSIENT_FAILURE), pickerCaptor.capture()); + eq(ConnectivityState.TRANSIENT_FAILURE), pickerCaptor.capture()); inOrder.verify(backoffPolicy1).nextBackoffNanos(); assertPicker(pickerCaptor.getValue(), error, null); assertThat(fakeClock.getPendingTasks()).hasSize(1); assertThat(Iterables.getOnlyElement(fakeClock.getPendingTasks()).getDelay(TimeUnit.SECONDS)) - .isEqualTo(10L); + .isEqualTo(10L); fakeClock.forwardTime(10L, TimeUnit.SECONDS); assertThat(resolver.refreshCount).isEqualTo(2); @@ -803,7 +826,7 @@ public void onlyLogicalDnsCluster_resolutionError_backoffAndRefresh() { resolver.deliverEndpointAddresses(Arrays.asList(endpoint1, endpoint2)); assertThat(childBalancers).hasSize(1); assertAddressesEqual(Arrays.asList(endpoint1, endpoint2), - Iterables.getOnlyElement(childBalancers).addresses); + Iterables.getOnlyElement(childBalancers).addresses); assertThat(fakeClock.getPendingTasks()).isEmpty(); inOrder.verifyNoMoreInteractions(); @@ -1204,10 +1227,18 @@ void deliverError(Status error) { } private class FakeNameResolverProvider extends NameResolverProvider { + private final boolean useOldListenerCallback; + + private FakeNameResolverProvider(boolean useOldListenerCallback) { + this.useOldListenerCallback = useOldListenerCallback; + } + @Override public NameResolver newNameResolver(URI targetUri, NameResolver.Args args) { assertThat(targetUri.getScheme()).isEqualTo("dns"); - FakeNameResolver resolver = new FakeNameResolver(targetUri); + FakeNameResolver resolver = useOldListenerCallback + ? new FakeNameResolverUsingOldListenerCallback(targetUri) + : new FakeNameResolver(targetUri); resolvers.add(resolver); return resolver; } @@ -1228,9 +1259,10 @@ protected int priority() { } } + private class FakeNameResolver extends NameResolver { private final URI targetUri; - private Listener2 listener; + protected Listener2 listener; private int refreshCount; private FakeNameResolver(URI targetUri) { @@ -1257,12 +1289,33 @@ public void shutdown() { resolvers.remove(this); } - private void deliverEndpointAddresses(List addresses) { + protected void deliverEndpointAddresses(List addresses) { + syncContext.execute(() -> { + Status ret = listener.onResult2(ResolutionResult.newBuilder() + .setAddressesOrError(StatusOr.fromValue(addresses)).build()); + assertThat(ret.getCode()).isEqualTo(Status.Code.OK); + }); + } + + protected void deliverError(Status error) { + syncContext.execute(() -> listener.onResult2(ResolutionResult.newBuilder() + .setAddressesOrError(StatusOr.fromStatus(error)).build())); + } + } + + private class FakeNameResolverUsingOldListenerCallback extends FakeNameResolver { + private FakeNameResolverUsingOldListenerCallback(URI targetUri) { + super(targetUri); + } + + @Override + protected void deliverEndpointAddresses(List addresses) { listener.onResult(ResolutionResult.newBuilder() - .setAddressesOrError(StatusOr.fromValue(addresses)).build()); + .setAddressesOrError(StatusOr.fromValue(addresses)).build()); } - private void deliverError(Status error) { + @Override + protected void deliverError(Status error) { listener.onError(error); } } From c5fcfea0bb69940154d5ec0161f85ea7c68cfce6 Mon Sep 17 00:00:00 2001 From: Kannan J Date: Wed, 23 Apr 2025 10:37:20 +0000 Subject: [PATCH 06/15] Update psm-dualstack.cfg (#11950) (#12028) 120 minutes has not been sufficient, causing frequent VM timeout errors in the test runs. --- buildscripts/kokoro/psm-dualstack.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/buildscripts/kokoro/psm-dualstack.cfg b/buildscripts/kokoro/psm-dualstack.cfg index 55c906bc4ec..a55d91a95b0 100644 --- a/buildscripts/kokoro/psm-dualstack.cfg +++ b/buildscripts/kokoro/psm-dualstack.cfg @@ -2,7 +2,7 @@ # Location of the continuous shell script in repository. build_file: "grpc-java/buildscripts/kokoro/psm-interop-test-java.sh" -timeout_mins: 120 +timeout_mins: 240 action { define_artifacts { From 0290672bc82684eac69d3e2238be34088f7a4274 Mon Sep 17 00:00:00 2001 From: Michael Lumish Date: Thu, 22 May 2025 15:41:37 -0700 Subject: [PATCH 07/15] Rename PSM interop fallback test suite to light (#12094) --- buildscripts/kokoro/{psm-fallback.cfg => psm-light.cfg} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename buildscripts/kokoro/{psm-fallback.cfg => psm-light.cfg} (94%) diff --git a/buildscripts/kokoro/psm-fallback.cfg b/buildscripts/kokoro/psm-light.cfg similarity index 94% rename from buildscripts/kokoro/psm-fallback.cfg rename to buildscripts/kokoro/psm-light.cfg index 7335d1d9fd9..decd179efa3 100644 --- a/buildscripts/kokoro/psm-fallback.cfg +++ b/buildscripts/kokoro/psm-light.cfg @@ -13,5 +13,5 @@ action { } env_vars { key: "PSM_TEST_SUITE" - value: "fallback" + value: "light" } From 2fd84478a6e45bfb07215dac1f1a49733d24b2e8 Mon Sep 17 00:00:00 2001 From: MV Shiva Date: Fri, 23 May 2025 16:24:39 +0530 Subject: [PATCH 08/15] xds: Change how xDS filters are created by introducing Filter.Provider (#11883) (#12089) This is the first step towards supporting filter state retention in Java. The mechanism will be similar to the one described in [A83] (https://github.com/grpc/proposal/blob/master/A83-xds-gcp-authn-filter.md#filter-call-credentials-cache) for C-core, and will serve the same purpose. However, the implementation details are very different due to the different nature of xDS HTTP filter support in C-core and Java. In Java, xDS HTTP filters are backed by classes implementing `io.grpc.xds.Filter`, from here just called "Filters". To support Filter state retention (next PR), Java's xDS implementation must be able to create unique Filter instances per: - Per HCM `envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager` - Per filter name as specified in `envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter.name` This PR **does not** implements Filter state retention, but lays the groundwork for it by changing how filters are registered and instantiated. To achieve this, all existing Filter classes had to be updated to the new instantiation mechanism described below. Prior to these this PR, Filters had no livecycle. FilterRegistry provided singleton instances for a given typeUrl. This PR introduces a new interface `Filter.Provider`, which instantiates Filter classes. All functionality that doesn't need an instance of a Filter is moved to the Filter.Provider. This includes parsing filter config proto into FilterConfig and determining the filter kind (client-side, server-side, or both). This PR is limited to refactoring, and there's no changes to the existing behavior. Note that all Filter Providers still return singleton Filter instances. However, with this PR, it is now possible to create Providers that return a new Filter instance each time `newInstance` is called. Co-authored-by: Sergii Tkachenko --- .../main/java/io/grpc/xds/FaultFilter.java | 170 ++++++++++-------- xds/src/main/java/io/grpc/xds/Filter.java | 90 +++++++--- .../main/java/io/grpc/xds/FilterRegistry.java | 16 +- .../io/grpc/xds/GcpAuthenticationFilter.java | 78 ++++---- .../java/io/grpc/xds/InternalRbacFilter.java | 7 +- xds/src/main/java/io/grpc/xds/RbacFilter.java | 158 ++++++++-------- .../main/java/io/grpc/xds/RouterFilter.java | 65 ++++--- .../java/io/grpc/xds/XdsListenerResource.java | 29 ++- .../java/io/grpc/xds/XdsNameResolver.java | 31 ++-- .../grpc/xds/XdsRouteConfigureResource.java | 6 +- .../java/io/grpc/xds/XdsServerWrapper.java | 66 ++++--- .../java/io/grpc/xds/FaultFilterTest.java | 19 +- .../grpc/xds/GcpAuthenticationFilterTest.java | 25 +-- .../grpc/xds/GrpcXdsClientImplDataTest.java | 78 ++++---- .../test/java/io/grpc/xds/RbacFilterTest.java | 35 ++-- .../java/io/grpc/xds/RouterFilterTest.java | 36 ++++ .../java/io/grpc/xds/XdsNameResolverTest.java | 19 +- .../io/grpc/xds/XdsServerWrapperTest.java | 61 ++++--- 18 files changed, 579 insertions(+), 410 deletions(-) create mode 100644 xds/src/test/java/io/grpc/xds/RouterFilterTest.java diff --git a/xds/src/main/java/io/grpc/xds/FaultFilter.java b/xds/src/main/java/io/grpc/xds/FaultFilter.java index c66861a9f15..2012fd36b62 100644 --- a/xds/src/main/java/io/grpc/xds/FaultFilter.java +++ b/xds/src/main/java/io/grpc/xds/FaultFilter.java @@ -45,7 +45,6 @@ import io.grpc.internal.GrpcUtil; import io.grpc.xds.FaultConfig.FaultAbort; import io.grpc.xds.FaultConfig.FaultDelay; -import io.grpc.xds.Filter.ClientInterceptorBuilder; import io.grpc.xds.ThreadSafeRandom.ThreadSafeRandomImpl; import java.util.Locale; import java.util.concurrent.Executor; @@ -56,10 +55,11 @@ import javax.annotation.Nullable; /** HttpFault filter implementation. */ -final class FaultFilter implements Filter, ClientInterceptorBuilder { +final class FaultFilter implements Filter { - static final FaultFilter INSTANCE = + private static final FaultFilter INSTANCE = new FaultFilter(ThreadSafeRandomImpl.instance, new AtomicLong()); + @VisibleForTesting static final Metadata.Key HEADER_DELAY_KEY = Metadata.Key.of("x-envoy-fault-delay-request", Metadata.ASCII_STRING_MARSHALLER); @@ -87,96 +87,108 @@ final class FaultFilter implements Filter, ClientInterceptorBuilder { this.activeFaultCounter = activeFaultCounter; } - @Override - public String[] typeUrls() { - return new String[] { TYPE_URL }; - } - - @Override - public ConfigOrError parseFilterConfig(Message rawProtoMessage) { - HTTPFault httpFaultProto; - if (!(rawProtoMessage instanceof Any)) { - return ConfigOrError.fromError("Invalid config type: " + rawProtoMessage.getClass()); + static final class Provider implements Filter.Provider { + @Override + public String[] typeUrls() { + return new String[]{TYPE_URL}; } - Any anyMessage = (Any) rawProtoMessage; - try { - httpFaultProto = anyMessage.unpack(HTTPFault.class); - } catch (InvalidProtocolBufferException e) { - return ConfigOrError.fromError("Invalid proto: " + e); + + @Override + public boolean isClientFilter() { + return true; } - return parseHttpFault(httpFaultProto); - } - private static ConfigOrError parseHttpFault(HTTPFault httpFault) { - FaultDelay faultDelay = null; - FaultAbort faultAbort = null; - if (httpFault.hasDelay()) { - faultDelay = parseFaultDelay(httpFault.getDelay()); + @Override + public FaultFilter newInstance() { + return INSTANCE; } - if (httpFault.hasAbort()) { - ConfigOrError faultAbortOrError = parseFaultAbort(httpFault.getAbort()); - if (faultAbortOrError.errorDetail != null) { - return ConfigOrError.fromError( - "HttpFault contains invalid FaultAbort: " + faultAbortOrError.errorDetail); + + @Override + public ConfigOrError parseFilterConfig(Message rawProtoMessage) { + HTTPFault httpFaultProto; + if (!(rawProtoMessage instanceof Any)) { + return ConfigOrError.fromError("Invalid config type: " + rawProtoMessage.getClass()); } - faultAbort = faultAbortOrError.config; - } - Integer maxActiveFaults = null; - if (httpFault.hasMaxActiveFaults()) { - maxActiveFaults = httpFault.getMaxActiveFaults().getValue(); - if (maxActiveFaults < 0) { - maxActiveFaults = Integer.MAX_VALUE; + Any anyMessage = (Any) rawProtoMessage; + try { + httpFaultProto = anyMessage.unpack(HTTPFault.class); + } catch (InvalidProtocolBufferException e) { + return ConfigOrError.fromError("Invalid proto: " + e); } + return parseHttpFault(httpFaultProto); } - return ConfigOrError.fromConfig(FaultConfig.create(faultDelay, faultAbort, maxActiveFaults)); - } - private static FaultDelay parseFaultDelay( - io.envoyproxy.envoy.extensions.filters.common.fault.v3.FaultDelay faultDelay) { - FaultConfig.FractionalPercent percent = parsePercent(faultDelay.getPercentage()); - if (faultDelay.hasHeaderDelay()) { - return FaultDelay.forHeader(percent); + @Override + public ConfigOrError parseFilterConfigOverride(Message rawProtoMessage) { + return parseFilterConfig(rawProtoMessage); } - return FaultDelay.forFixedDelay(Durations.toNanos(faultDelay.getFixedDelay()), percent); - } - @VisibleForTesting - static ConfigOrError parseFaultAbort( - io.envoyproxy.envoy.extensions.filters.http.fault.v3.FaultAbort faultAbort) { - FaultConfig.FractionalPercent percent = parsePercent(faultAbort.getPercentage()); - switch (faultAbort.getErrorTypeCase()) { - case HEADER_ABORT: - return ConfigOrError.fromConfig(FaultAbort.forHeader(percent)); - case HTTP_STATUS: - return ConfigOrError.fromConfig(FaultAbort.forStatus( - GrpcUtil.httpStatusToGrpcStatus(faultAbort.getHttpStatus()), percent)); - case GRPC_STATUS: - return ConfigOrError.fromConfig(FaultAbort.forStatus( - Status.fromCodeValue(faultAbort.getGrpcStatus()), percent)); - case ERRORTYPE_NOT_SET: - default: - return ConfigOrError.fromError( - "Unknown error type case: " + faultAbort.getErrorTypeCase()); + private static ConfigOrError parseHttpFault(HTTPFault httpFault) { + FaultDelay faultDelay = null; + FaultAbort faultAbort = null; + if (httpFault.hasDelay()) { + faultDelay = parseFaultDelay(httpFault.getDelay()); + } + if (httpFault.hasAbort()) { + ConfigOrError faultAbortOrError = parseFaultAbort(httpFault.getAbort()); + if (faultAbortOrError.errorDetail != null) { + return ConfigOrError.fromError( + "HttpFault contains invalid FaultAbort: " + faultAbortOrError.errorDetail); + } + faultAbort = faultAbortOrError.config; + } + Integer maxActiveFaults = null; + if (httpFault.hasMaxActiveFaults()) { + maxActiveFaults = httpFault.getMaxActiveFaults().getValue(); + if (maxActiveFaults < 0) { + maxActiveFaults = Integer.MAX_VALUE; + } + } + return ConfigOrError.fromConfig(FaultConfig.create(faultDelay, faultAbort, maxActiveFaults)); } - } - private static FaultConfig.FractionalPercent parsePercent(FractionalPercent proto) { - switch (proto.getDenominator()) { - case HUNDRED: - return FaultConfig.FractionalPercent.perHundred(proto.getNumerator()); - case TEN_THOUSAND: - return FaultConfig.FractionalPercent.perTenThousand(proto.getNumerator()); - case MILLION: - return FaultConfig.FractionalPercent.perMillion(proto.getNumerator()); - case UNRECOGNIZED: - default: - throw new IllegalArgumentException("Unknown denominator type: " + proto.getDenominator()); + private static FaultDelay parseFaultDelay( + io.envoyproxy.envoy.extensions.filters.common.fault.v3.FaultDelay faultDelay) { + FaultConfig.FractionalPercent percent = parsePercent(faultDelay.getPercentage()); + if (faultDelay.hasHeaderDelay()) { + return FaultDelay.forHeader(percent); + } + return FaultDelay.forFixedDelay(Durations.toNanos(faultDelay.getFixedDelay()), percent); } - } - @Override - public ConfigOrError parseFilterConfigOverride(Message rawProtoMessage) { - return parseFilterConfig(rawProtoMessage); + @VisibleForTesting + static ConfigOrError parseFaultAbort( + io.envoyproxy.envoy.extensions.filters.http.fault.v3.FaultAbort faultAbort) { + FaultConfig.FractionalPercent percent = parsePercent(faultAbort.getPercentage()); + switch (faultAbort.getErrorTypeCase()) { + case HEADER_ABORT: + return ConfigOrError.fromConfig(FaultAbort.forHeader(percent)); + case HTTP_STATUS: + return ConfigOrError.fromConfig(FaultAbort.forStatus( + GrpcUtil.httpStatusToGrpcStatus(faultAbort.getHttpStatus()), percent)); + case GRPC_STATUS: + return ConfigOrError.fromConfig(FaultAbort.forStatus( + Status.fromCodeValue(faultAbort.getGrpcStatus()), percent)); + case ERRORTYPE_NOT_SET: + default: + return ConfigOrError.fromError( + "Unknown error type case: " + faultAbort.getErrorTypeCase()); + } + } + + private static FaultConfig.FractionalPercent parsePercent(FractionalPercent proto) { + switch (proto.getDenominator()) { + case HUNDRED: + return FaultConfig.FractionalPercent.perHundred(proto.getNumerator()); + case TEN_THOUSAND: + return FaultConfig.FractionalPercent.perTenThousand(proto.getNumerator()); + case MILLION: + return FaultConfig.FractionalPercent.perMillion(proto.getNumerator()); + case UNRECOGNIZED: + default: + throw new IllegalArgumentException("Unknown denominator type: " + proto.getDenominator()); + } + } } @Nullable diff --git a/xds/src/main/java/io/grpc/xds/Filter.java b/xds/src/main/java/io/grpc/xds/Filter.java index 29f8cc4e337..ab61ba2b570 100644 --- a/xds/src/main/java/io/grpc/xds/Filter.java +++ b/xds/src/main/java/io/grpc/xds/Filter.java @@ -25,48 +25,82 @@ import javax.annotation.Nullable; /** - * Defines the parsing functionality of an HTTP filter. A Filter may optionally implement either - * {@link ClientInterceptorBuilder} or {@link ServerInterceptorBuilder} or both, indicating it is - * capable of working on the client side or server side or both, respectively. + * Defines the parsing functionality of an HTTP filter. + * + *

A Filter may optionally implement either {@link Filter#buildClientInterceptor} or + * {@link Filter#buildServerInterceptor} or both, and return true from corresponding + * {@link Provider#isClientFilter()}, {@link Provider#isServerFilter()} to indicate that the filter + * is capable of working on the client side or server side or both, respectively. */ interface Filter { - /** - * The proto message types supported by this filter. A filter will be registered by each of its - * supported message types. - */ - String[] typeUrls(); + /** Represents an opaque data structure holding configuration for a filter. */ + interface FilterConfig { + String typeUrl(); + } /** - * Parses the top-level filter config from raw proto message. The message may be either a {@link - * com.google.protobuf.Any} or a {@link com.google.protobuf.Struct}. + * Common interface for filter providers. */ - ConfigOrError parseFilterConfig(Message rawProtoMessage); + interface Provider { + /** + * The proto message types supported by this filter. A filter will be registered by each of its + * supported message types. + */ + String[] typeUrls(); - /** - * Parses the per-filter override filter config from raw proto message. The message may be either - * a {@link com.google.protobuf.Any} or a {@link com.google.protobuf.Struct}. - */ - ConfigOrError parseFilterConfigOverride(Message rawProtoMessage); + /** + * Whether the filter can be installed on the client side. + * + *

Returns true if the filter implements {@link Filter#buildClientInterceptor}. + */ + default boolean isClientFilter() { + return false; + } - /** Represents an opaque data structure holding configuration for a filter. */ - interface FilterConfig { - String typeUrl(); + /** + * Whether the filter can be installed into xDS-enabled servers. + * + *

Returns true if the filter implements {@link Filter#buildServerInterceptor}. + */ + default boolean isServerFilter() { + return false; + } + + /** + * Creates a new instance of the filter. + * + *

Returns a filter instance registered with the same typeUrls as the provider, + * capable of working with the same FilterConfig type returned by provider's parse functions. + */ + Filter newInstance(); + + /** + * Parses the top-level filter config from raw proto message. The message may be either a {@link + * com.google.protobuf.Any} or a {@link com.google.protobuf.Struct}. + */ + ConfigOrError parseFilterConfig(Message rawProtoMessage); + + /** + * Parses the per-filter override filter config from raw proto message. The message may be + * either a {@link com.google.protobuf.Any} or a {@link com.google.protobuf.Struct}. + */ + ConfigOrError parseFilterConfigOverride(Message rawProtoMessage); } /** Uses the FilterConfigs produced above to produce an HTTP filter interceptor for clients. */ - interface ClientInterceptorBuilder { - @Nullable - ClientInterceptor buildClientInterceptor( - FilterConfig config, @Nullable FilterConfig overrideConfig, - ScheduledExecutorService scheduler); + @Nullable + default ClientInterceptor buildClientInterceptor( + FilterConfig config, @Nullable FilterConfig overrideConfig, + ScheduledExecutorService scheduler) { + return null; } /** Uses the FilterConfigs produced above to produce an HTTP filter interceptor for the server. */ - interface ServerInterceptorBuilder { - @Nullable - ServerInterceptor buildServerInterceptor( - FilterConfig config, @Nullable FilterConfig overrideConfig); + @Nullable + default ServerInterceptor buildServerInterceptor( + FilterConfig config, @Nullable FilterConfig overrideConfig) { + return null; } /** Filter config with instance name. */ diff --git a/xds/src/main/java/io/grpc/xds/FilterRegistry.java b/xds/src/main/java/io/grpc/xds/FilterRegistry.java index 7f1fe82c6c3..426c6d1b3f6 100644 --- a/xds/src/main/java/io/grpc/xds/FilterRegistry.java +++ b/xds/src/main/java/io/grpc/xds/FilterRegistry.java @@ -23,21 +23,21 @@ /** * A registry for all supported {@link Filter}s. Filters can be queried from the registry - * by any of the {@link Filter#typeUrls() type URLs}. + * by any of the {@link Filter.Provider#typeUrls() type URLs}. */ final class FilterRegistry { private static FilterRegistry instance; - private final Map supportedFilters = new HashMap<>(); + private final Map supportedFilters = new HashMap<>(); private FilterRegistry() {} static synchronized FilterRegistry getDefaultRegistry() { if (instance == null) { instance = newRegistry().register( - FaultFilter.INSTANCE, - RouterFilter.INSTANCE, - RbacFilter.INSTANCE); + new FaultFilter.Provider(), + new RouterFilter.Provider(), + new RbacFilter.Provider()); } return instance; } @@ -48,8 +48,8 @@ static FilterRegistry newRegistry() { } @VisibleForTesting - FilterRegistry register(Filter... filters) { - for (Filter filter : filters) { + FilterRegistry register(Filter.Provider... filters) { + for (Filter.Provider filter : filters) { for (String typeUrl : filter.typeUrls()) { supportedFilters.put(typeUrl, filter); } @@ -58,7 +58,7 @@ FilterRegistry register(Filter... filters) { } @Nullable - Filter get(String typeUrl) { + Filter.Provider get(String typeUrl) { return supportedFilters.get(typeUrl); } } diff --git a/xds/src/main/java/io/grpc/xds/GcpAuthenticationFilter.java b/xds/src/main/java/io/grpc/xds/GcpAuthenticationFilter.java index f73494d74db..7ed617c9843 100644 --- a/xds/src/main/java/io/grpc/xds/GcpAuthenticationFilter.java +++ b/xds/src/main/java/io/grpc/xds/GcpAuthenticationFilter.java @@ -35,7 +35,6 @@ import io.grpc.MethodDescriptor; import io.grpc.Status; import io.grpc.auth.MoreCallCredentials; -import io.grpc.xds.Filter.ClientInterceptorBuilder; import io.grpc.xds.MetadataRegistry.MetadataValueParser; import java.util.LinkedHashMap; import java.util.Map; @@ -47,50 +46,63 @@ * A {@link Filter} that injects a {@link CallCredentials} to handle * authentication for xDS credentials. */ -final class GcpAuthenticationFilter implements Filter, ClientInterceptorBuilder { +final class GcpAuthenticationFilter implements Filter { static final String TYPE_URL = "type.googleapis.com/envoy.extensions.filters.http.gcp_authn.v3.GcpAuthnFilterConfig"; - @Override - public String[] typeUrls() { - return new String[] { TYPE_URL }; - } + static final class Provider implements Filter.Provider { + @Override + public String[] typeUrls() { + return new String[]{TYPE_URL}; + } - @Override - public ConfigOrError parseFilterConfig(Message rawProtoMessage) { - GcpAuthnFilterConfig gcpAuthnProto; - if (!(rawProtoMessage instanceof Any)) { - return ConfigOrError.fromError("Invalid config type: " + rawProtoMessage.getClass()); + @Override + public boolean isClientFilter() { + return true; } - Any anyMessage = (Any) rawProtoMessage; - try { - gcpAuthnProto = anyMessage.unpack(GcpAuthnFilterConfig.class); - } catch (InvalidProtocolBufferException e) { - return ConfigOrError.fromError("Invalid proto: " + e); + @Override + public GcpAuthenticationFilter newInstance() { + return new GcpAuthenticationFilter(); } - long cacheSize = 10; - // Validate cache_config - if (gcpAuthnProto.hasCacheConfig()) { - TokenCacheConfig cacheConfig = gcpAuthnProto.getCacheConfig(); - cacheSize = cacheConfig.getCacheSize().getValue(); - if (cacheSize == 0) { - return ConfigOrError.fromError( - "cache_config.cache_size must be greater than zero"); + @Override + public ConfigOrError parseFilterConfig(Message rawProtoMessage) { + GcpAuthnFilterConfig gcpAuthnProto; + if (!(rawProtoMessage instanceof Any)) { + return ConfigOrError.fromError("Invalid config type: " + rawProtoMessage.getClass()); } - // LruCache's size is an int and briefly exceeds its maximum size before evicting entries - cacheSize = UnsignedLongs.min(cacheSize, Integer.MAX_VALUE - 1); - } + Any anyMessage = (Any) rawProtoMessage; - GcpAuthenticationConfig config = new GcpAuthenticationConfig((int) cacheSize); - return ConfigOrError.fromConfig(config); - } + try { + gcpAuthnProto = anyMessage.unpack(GcpAuthnFilterConfig.class); + } catch (InvalidProtocolBufferException e) { + return ConfigOrError.fromError("Invalid proto: " + e); + } - @Override - public ConfigOrError parseFilterConfigOverride(Message rawProtoMessage) { - return parseFilterConfig(rawProtoMessage); + long cacheSize = 10; + // Validate cache_config + if (gcpAuthnProto.hasCacheConfig()) { + TokenCacheConfig cacheConfig = gcpAuthnProto.getCacheConfig(); + cacheSize = cacheConfig.getCacheSize().getValue(); + if (cacheSize == 0) { + return ConfigOrError.fromError( + "cache_config.cache_size must be greater than zero"); + } + // LruCache's size is an int and briefly exceeds its maximum size before evicting entries + cacheSize = UnsignedLongs.min(cacheSize, Integer.MAX_VALUE - 1); + } + + GcpAuthenticationConfig config = new GcpAuthenticationConfig((int) cacheSize); + return ConfigOrError.fromConfig(config); + } + + @Override + public ConfigOrError parseFilterConfigOverride( + Message rawProtoMessage) { + return parseFilterConfig(rawProtoMessage); + } } @Nullable diff --git a/xds/src/main/java/io/grpc/xds/InternalRbacFilter.java b/xds/src/main/java/io/grpc/xds/InternalRbacFilter.java index 54e6c748cd5..cedb3f4c85b 100644 --- a/xds/src/main/java/io/grpc/xds/InternalRbacFilter.java +++ b/xds/src/main/java/io/grpc/xds/InternalRbacFilter.java @@ -19,8 +19,6 @@ import io.envoyproxy.envoy.extensions.filters.http.rbac.v3.RBAC; import io.grpc.Internal; import io.grpc.ServerInterceptor; -import io.grpc.xds.RbacConfig; -import io.grpc.xds.RbacFilter; /** This class exposes some functionality in RbacFilter to other packages. */ @Internal @@ -30,11 +28,12 @@ private InternalRbacFilter() {} /** Parses RBAC filter config and creates AuthorizationServerInterceptor. */ public static ServerInterceptor createInterceptor(RBAC rbac) { - ConfigOrError filterConfig = RbacFilter.parseRbacConfig(rbac); + ConfigOrError filterConfig = RbacFilter.Provider.parseRbacConfig(rbac); if (filterConfig.errorDetail != null) { throw new IllegalArgumentException( String.format("Failed to parse Rbac policy: %s", filterConfig.errorDetail)); } - return new RbacFilter().buildServerInterceptor(filterConfig.config, null); + return new RbacFilter.Provider().newInstance() + .buildServerInterceptor(filterConfig.config, null); } } diff --git a/xds/src/main/java/io/grpc/xds/RbacFilter.java b/xds/src/main/java/io/grpc/xds/RbacFilter.java index 6a55f7f193e..2bc4eeb846b 100644 --- a/xds/src/main/java/io/grpc/xds/RbacFilter.java +++ b/xds/src/main/java/io/grpc/xds/RbacFilter.java @@ -18,7 +18,6 @@ import static com.google.common.base.Preconditions.checkNotNull; -import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.Any; import com.google.protobuf.InvalidProtocolBufferException; import com.google.protobuf.Message; @@ -34,7 +33,6 @@ import io.grpc.ServerCallHandler; import io.grpc.ServerInterceptor; import io.grpc.Status; -import io.grpc.xds.Filter.ServerInterceptorBuilder; import io.grpc.xds.internal.MatcherParser; import io.grpc.xds.internal.Matchers; import io.grpc.xds.internal.rbac.engine.GrpcAuthorizationEngine; @@ -66,10 +64,10 @@ import javax.annotation.Nullable; /** RBAC Http filter implementation. */ -final class RbacFilter implements Filter, ServerInterceptorBuilder { +final class RbacFilter implements Filter { private static final Logger logger = Logger.getLogger(RbacFilter.class.getName()); - static final RbacFilter INSTANCE = new RbacFilter(); + private static final RbacFilter INSTANCE = new RbacFilter(); static final String TYPE_URL = "type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC"; @@ -77,87 +75,99 @@ final class RbacFilter implements Filter, ServerInterceptorBuilder { private static final String TYPE_URL_OVERRIDE_CONFIG = "type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBACPerRoute"; - RbacFilter() {} + private RbacFilter() {} - @Override - public String[] typeUrls() { - return new String[] { TYPE_URL, TYPE_URL_OVERRIDE_CONFIG }; - } + static final class Provider implements Filter.Provider { + @Override + public String[] typeUrls() { + return new String[] {TYPE_URL, TYPE_URL_OVERRIDE_CONFIG}; + } - @Override - public ConfigOrError parseFilterConfig(Message rawProtoMessage) { - RBAC rbacProto; - if (!(rawProtoMessage instanceof Any)) { - return ConfigOrError.fromError("Invalid config type: " + rawProtoMessage.getClass()); + @Override + public boolean isServerFilter() { + return true; } - Any anyMessage = (Any) rawProtoMessage; - try { - rbacProto = anyMessage.unpack(RBAC.class); - } catch (InvalidProtocolBufferException e) { - return ConfigOrError.fromError("Invalid proto: " + e); + + @Override + public RbacFilter newInstance() { + return INSTANCE; } - return parseRbacConfig(rbacProto); - } - @VisibleForTesting - static ConfigOrError parseRbacConfig(RBAC rbac) { - if (!rbac.hasRules()) { - return ConfigOrError.fromConfig(RbacConfig.create(null)); + @Override + public ConfigOrError parseFilterConfig(Message rawProtoMessage) { + RBAC rbacProto; + if (!(rawProtoMessage instanceof Any)) { + return ConfigOrError.fromError("Invalid config type: " + rawProtoMessage.getClass()); + } + Any anyMessage = (Any) rawProtoMessage; + try { + rbacProto = anyMessage.unpack(RBAC.class); + } catch (InvalidProtocolBufferException e) { + return ConfigOrError.fromError("Invalid proto: " + e); + } + return parseRbacConfig(rbacProto); } - io.envoyproxy.envoy.config.rbac.v3.RBAC rbacConfig = rbac.getRules(); - GrpcAuthorizationEngine.Action authAction; - switch (rbacConfig.getAction()) { - case ALLOW: - authAction = GrpcAuthorizationEngine.Action.ALLOW; - break; - case DENY: - authAction = GrpcAuthorizationEngine.Action.DENY; - break; - case LOG: + + @Override + public ConfigOrError parseFilterConfigOverride(Message rawProtoMessage) { + RBACPerRoute rbacPerRoute; + if (!(rawProtoMessage instanceof Any)) { + return ConfigOrError.fromError("Invalid config type: " + rawProtoMessage.getClass()); + } + Any anyMessage = (Any) rawProtoMessage; + try { + rbacPerRoute = anyMessage.unpack(RBACPerRoute.class); + } catch (InvalidProtocolBufferException e) { + return ConfigOrError.fromError("Invalid proto: " + e); + } + if (rbacPerRoute.hasRbac()) { + return parseRbacConfig(rbacPerRoute.getRbac()); + } else { return ConfigOrError.fromConfig(RbacConfig.create(null)); - case UNRECOGNIZED: - default: - return ConfigOrError.fromError("Unknown rbacConfig action type: " + rbacConfig.getAction()); + } } - List policyMatchers = new ArrayList<>(); - List> sortedPolicyEntries = rbacConfig.getPoliciesMap().entrySet() - .stream() - .sorted((a,b) -> a.getKey().compareTo(b.getKey())) - .collect(Collectors.toList()); - for (Map.Entry entry: sortedPolicyEntries) { - try { - Policy policy = entry.getValue(); - if (policy.hasCondition() || policy.hasCheckedCondition()) { + + static ConfigOrError parseRbacConfig(RBAC rbac) { + if (!rbac.hasRules()) { + return ConfigOrError.fromConfig(RbacConfig.create(null)); + } + io.envoyproxy.envoy.config.rbac.v3.RBAC rbacConfig = rbac.getRules(); + GrpcAuthorizationEngine.Action authAction; + switch (rbacConfig.getAction()) { + case ALLOW: + authAction = GrpcAuthorizationEngine.Action.ALLOW; + break; + case DENY: + authAction = GrpcAuthorizationEngine.Action.DENY; + break; + case LOG: + return ConfigOrError.fromConfig(RbacConfig.create(null)); + case UNRECOGNIZED: + default: return ConfigOrError.fromError( - "Policy.condition and Policy.checked_condition must not set: " + entry.getKey()); + "Unknown rbacConfig action type: " + rbacConfig.getAction()); + } + List policyMatchers = new ArrayList<>(); + List> sortedPolicyEntries = rbacConfig.getPoliciesMap().entrySet() + .stream() + .sorted((a,b) -> a.getKey().compareTo(b.getKey())) + .collect(Collectors.toList()); + for (Map.Entry entry: sortedPolicyEntries) { + try { + Policy policy = entry.getValue(); + if (policy.hasCondition() || policy.hasCheckedCondition()) { + return ConfigOrError.fromError( + "Policy.condition and Policy.checked_condition must not set: " + entry.getKey()); + } + policyMatchers.add(PolicyMatcher.create(entry.getKey(), + parsePermissionList(policy.getPermissionsList()), + parsePrincipalList(policy.getPrincipalsList()))); + } catch (Exception e) { + return ConfigOrError.fromError("Encountered error parsing policy: " + e); } - policyMatchers.add(PolicyMatcher.create(entry.getKey(), - parsePermissionList(policy.getPermissionsList()), - parsePrincipalList(policy.getPrincipalsList()))); - } catch (Exception e) { - return ConfigOrError.fromError("Encountered error parsing policy: " + e); } - } - return ConfigOrError.fromConfig(RbacConfig.create( - AuthConfig.create(policyMatchers, authAction))); - } - - @Override - public ConfigOrError parseFilterConfigOverride(Message rawProtoMessage) { - RBACPerRoute rbacPerRoute; - if (!(rawProtoMessage instanceof Any)) { - return ConfigOrError.fromError("Invalid config type: " + rawProtoMessage.getClass()); - } - Any anyMessage = (Any) rawProtoMessage; - try { - rbacPerRoute = anyMessage.unpack(RBACPerRoute.class); - } catch (InvalidProtocolBufferException e) { - return ConfigOrError.fromError("Invalid proto: " + e); - } - if (rbacPerRoute.hasRbac()) { - return parseRbacConfig(rbacPerRoute.getRbac()); - } else { - return ConfigOrError.fromConfig(RbacConfig.create(null)); + return ConfigOrError.fromConfig(RbacConfig.create( + AuthConfig.create(policyMatchers, authAction))); } } diff --git a/xds/src/main/java/io/grpc/xds/RouterFilter.java b/xds/src/main/java/io/grpc/xds/RouterFilter.java index 8038c1b98ae..939bd0b12ab 100644 --- a/xds/src/main/java/io/grpc/xds/RouterFilter.java +++ b/xds/src/main/java/io/grpc/xds/RouterFilter.java @@ -17,18 +17,12 @@ package io.grpc.xds; import com.google.protobuf.Message; -import io.grpc.ClientInterceptor; -import io.grpc.ServerInterceptor; -import io.grpc.xds.Filter.ClientInterceptorBuilder; -import io.grpc.xds.Filter.ServerInterceptorBuilder; -import java.util.concurrent.ScheduledExecutorService; -import javax.annotation.Nullable; /** * Router filter implementation. Currently this filter does not parse any field in the config. */ -enum RouterFilter implements Filter, ClientInterceptorBuilder, ServerInterceptorBuilder { - INSTANCE; +final class RouterFilter implements Filter { + private static final RouterFilter INSTANCE = new RouterFilter(); static final String TYPE_URL = "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router"; @@ -36,7 +30,7 @@ enum RouterFilter implements Filter, ClientInterceptorBuilder, ServerInterceptor static final FilterConfig ROUTER_CONFIG = new FilterConfig() { @Override public String typeUrl() { - return RouterFilter.TYPE_URL; + return TYPE_URL; } @Override @@ -45,33 +39,38 @@ public String toString() { } }; - @Override - public String[] typeUrls() { - return new String[] { TYPE_URL }; - } + static final class Provider implements Filter.Provider { + @Override + public String[] typeUrls() { + return new String[]{TYPE_URL}; + } - @Override - public ConfigOrError parseFilterConfig(Message rawProtoMessage) { - return ConfigOrError.fromConfig(ROUTER_CONFIG); - } + @Override + public boolean isClientFilter() { + return true; + } - @Override - public ConfigOrError parseFilterConfigOverride(Message rawProtoMessage) { - return ConfigOrError.fromError("Router Filter should not have override config"); - } + @Override + public boolean isServerFilter() { + return true; + } - @Nullable - @Override - public ClientInterceptor buildClientInterceptor( - FilterConfig config, @Nullable FilterConfig overrideConfig, - ScheduledExecutorService scheduler) { - return null; - } + @Override + public RouterFilter newInstance() { + return INSTANCE; + } - @Nullable - @Override - public ServerInterceptor buildServerInterceptor( - FilterConfig config, @Nullable Filter.FilterConfig overrideConfig) { - return null; + @Override + public ConfigOrError parseFilterConfig(Message rawProtoMessage) { + return ConfigOrError.fromConfig(ROUTER_CONFIG); + } + + @Override + public ConfigOrError parseFilterConfigOverride( + Message rawProtoMessage) { + return ConfigOrError.fromError("Router Filter should not have override config"); + } } + + private RouterFilter() {} } diff --git a/xds/src/main/java/io/grpc/xds/XdsListenerResource.java b/xds/src/main/java/io/grpc/xds/XdsListenerResource.java index a18b093e38f..4b554be1743 100644 --- a/xds/src/main/java/io/grpc/xds/XdsListenerResource.java +++ b/xds/src/main/java/io/grpc/xds/XdsListenerResource.java @@ -575,12 +575,8 @@ static StructOrError parseHttpFilter( String filterName = httpFilter.getName(); boolean isOptional = httpFilter.getIsOptional(); if (!httpFilter.hasTypedConfig()) { - if (isOptional) { - return null; - } else { - return StructOrError.fromError( - "HttpFilter [" + filterName + "] is not optional and has no typed config"); - } + return isOptional ? null : StructOrError.fromError( + "HttpFilter [" + filterName + "] is not optional and has no typed config"); } Message rawConfig = httpFilter.getTypedConfig(); String typeUrl = httpFilter.getTypedConfig().getTypeUrl(); @@ -600,18 +596,17 @@ static StructOrError parseHttpFilter( return StructOrError.fromError( "HttpFilter [" + filterName + "] contains invalid proto: " + e); } - Filter filter = filterRegistry.get(typeUrl); - if ((isForClient && !(filter instanceof Filter.ClientInterceptorBuilder)) - || (!isForClient && !(filter instanceof Filter.ServerInterceptorBuilder))) { - if (isOptional) { - return null; - } else { - return StructOrError.fromError( - "HttpFilter [" + filterName + "](" + typeUrl + ") is required but unsupported for " - + (isForClient ? "client" : "server")); - } + + Filter.Provider provider = filterRegistry.get(typeUrl); + if (provider == null + || (isForClient && !provider.isClientFilter()) + || (!isForClient && !provider.isServerFilter())) { + // Filter type not supported. + return isOptional ? null : StructOrError.fromError( + "HttpFilter [" + filterName + "](" + typeUrl + ") is required but unsupported for " + ( + isForClient ? "client" : "server")); } - ConfigOrError filterConfig = filter.parseFilterConfig(rawConfig); + ConfigOrError filterConfig = provider.parseFilterConfig(rawConfig); if (filterConfig.errorDetail != null) { return StructOrError.fromError( "Invalid filter config for HttpFilter [" + filterName + "]: " + filterConfig.errorDetail); diff --git a/xds/src/main/java/io/grpc/xds/XdsNameResolver.java b/xds/src/main/java/io/grpc/xds/XdsNameResolver.java index 21f5d5efce6..b7b1ed0bdba 100644 --- a/xds/src/main/java/io/grpc/xds/XdsNameResolver.java +++ b/xds/src/main/java/io/grpc/xds/XdsNameResolver.java @@ -49,7 +49,6 @@ import io.grpc.internal.GrpcUtil; import io.grpc.internal.ObjectPool; import io.grpc.xds.ClusterSpecifierPlugin.PluginConfig; -import io.grpc.xds.Filter.ClientInterceptorBuilder; import io.grpc.xds.Filter.FilterConfig; import io.grpc.xds.Filter.NamedFilterConfig; import io.grpc.xds.RouteLookupServiceClusterSpecifierPlugin.RlsPluginConfig; @@ -827,26 +826,36 @@ private ClientInterceptor createFilters( if (filterConfigs == null) { return new PassthroughClientInterceptor(); } + Map selectedOverrideConfigs = new HashMap<>(virtualHost.filterConfigOverrides()); selectedOverrideConfigs.putAll(route.filterConfigOverrides()); if (weightedCluster != null) { selectedOverrideConfigs.putAll(weightedCluster.filterConfigOverrides()); } + ImmutableList.Builder filterInterceptors = ImmutableList.builder(); for (NamedFilterConfig namedFilter : filterConfigs) { - FilterConfig filterConfig = namedFilter.filterConfig; - Filter filter = filterRegistry.get(filterConfig.typeUrl()); - if (filter instanceof ClientInterceptorBuilder) { - ClientInterceptor interceptor = ((ClientInterceptorBuilder) filter) - .buildClientInterceptor( - filterConfig, selectedOverrideConfigs.get(namedFilter.name), - scheduler); - if (interceptor != null) { - filterInterceptors.add(interceptor); - } + FilterConfig config = namedFilter.filterConfig; + String name = namedFilter.name; + String typeUrl = config.typeUrl(); + + Filter.Provider provider = filterRegistry.get(typeUrl); + if (provider == null || !provider.isClientFilter()) { + continue; + } + + Filter filter = provider.newInstance(); + + ClientInterceptor interceptor = + filter.buildClientInterceptor(config, selectedOverrideConfigs.get(name), scheduler); + if (interceptor != null) { + filterInterceptors.add(interceptor); } } + + // Combine interceptors produced by different filters into a single one that executes + // them sequentially. The order is preserved. return combineInterceptors(filterInterceptors.build()); } diff --git a/xds/src/main/java/io/grpc/xds/XdsRouteConfigureResource.java b/xds/src/main/java/io/grpc/xds/XdsRouteConfigureResource.java index c5ca8d45cb3..80a77cbb1d4 100644 --- a/xds/src/main/java/io/grpc/xds/XdsRouteConfigureResource.java +++ b/xds/src/main/java/io/grpc/xds/XdsRouteConfigureResource.java @@ -245,8 +245,8 @@ static StructOrError> parseOverrideFilterConfigs( return StructOrError.fromError( "FilterConfig [" + name + "] contains invalid proto: " + e); } - Filter filter = filterRegistry.get(typeUrl); - if (filter == null) { + Filter.Provider provider = filterRegistry.get(typeUrl); + if (provider == null) { if (isOptional) { continue; } @@ -254,7 +254,7 @@ static StructOrError> parseOverrideFilterConfigs( "HttpFilter [" + name + "](" + typeUrl + ") is required but unsupported"); } ConfigOrError filterConfig = - filter.parseFilterConfigOverride(rawConfig); + provider.parseFilterConfigOverride(rawConfig); if (filterConfig.errorDetail != null) { return StructOrError.fromError( "Invalid filter config for HttpFilter [" + name + "]: " + filterConfig.errorDetail); diff --git a/xds/src/main/java/io/grpc/xds/XdsServerWrapper.java b/xds/src/main/java/io/grpc/xds/XdsServerWrapper.java index 3a9b98ee321..bbb17d9b616 100644 --- a/xds/src/main/java/io/grpc/xds/XdsServerWrapper.java +++ b/xds/src/main/java/io/grpc/xds/XdsServerWrapper.java @@ -47,7 +47,6 @@ import io.grpc.xds.EnvoyServerProtoData.FilterChain; import io.grpc.xds.Filter.FilterConfig; import io.grpc.xds.Filter.NamedFilterConfig; -import io.grpc.xds.Filter.ServerInterceptorBuilder; import io.grpc.xds.FilterChainMatchingProtocolNegotiators.FilterChainMatchingHandler.FilterChainSelector; import io.grpc.xds.ThreadSafeRandom.ThreadSafeRandomImpl; import io.grpc.xds.VirtualHost.Route; @@ -524,37 +523,56 @@ private AtomicReference generateRoutingConfig(FilterChain f } private ImmutableMap generatePerRouteInterceptors( - List namedFilterConfigs, List virtualHosts) { + @Nullable List filterConfigs, List virtualHosts) { + // This should always be called from the sync context. + // Ideally we'd want to throw otherwise, but this breaks the tests now. + // syncContext.throwIfNotInThisSynchronizationContext(); + ImmutableMap.Builder perRouteInterceptors = new ImmutableMap.Builder<>(); + for (VirtualHost virtualHost : virtualHosts) { for (Route route : virtualHost.routes()) { - List filterInterceptors = new ArrayList<>(); - Map selectedOverrideConfigs = - new HashMap<>(virtualHost.filterConfigOverrides()); - selectedOverrideConfigs.putAll(route.filterConfigOverrides()); - if (namedFilterConfigs != null) { - for (NamedFilterConfig namedFilterConfig : namedFilterConfigs) { - FilterConfig filterConfig = namedFilterConfig.filterConfig; - Filter filter = filterRegistry.get(filterConfig.typeUrl()); - if (filter instanceof ServerInterceptorBuilder) { - ServerInterceptor interceptor = - ((ServerInterceptorBuilder) filter).buildServerInterceptor( - filterConfig, selectedOverrideConfigs.get(namedFilterConfig.name)); - if (interceptor != null) { - filterInterceptors.add(interceptor); - } - } else { - logger.log(Level.WARNING, "HttpFilterConfig(type URL: " - + filterConfig.typeUrl() + ") is not supported on server-side. " - + "Probably a bug at ClientXdsClient verification."); - } + // Short circuit. + if (filterConfigs == null) { + perRouteInterceptors.put(route, noopInterceptor); + continue; + } + + // Override vhost filter configs with more specific per-route configs. + Map perRouteOverrides = ImmutableMap.builder() + .putAll(virtualHost.filterConfigOverrides()) + .putAll(route.filterConfigOverrides()) + .buildKeepingLast(); + + // Interceptors for this vhost/route combo. + List interceptors = new ArrayList<>(filterConfigs.size()); + + for (NamedFilterConfig namedFilter : filterConfigs) { + FilterConfig config = namedFilter.filterConfig; + String name = namedFilter.name; + String typeUrl = config.typeUrl(); + + Filter.Provider provider = filterRegistry.get(typeUrl); + if (provider == null || !provider.isServerFilter()) { + logger.warning("HttpFilter[" + name + "]: not supported on server-side: " + typeUrl); + continue; + } + + Filter filter = provider.newInstance(); + ServerInterceptor interceptor = + filter.buildServerInterceptor(config, perRouteOverrides.get(name)); + if (interceptor != null) { + interceptors.add(interceptor); } } - ServerInterceptor interceptor = combineInterceptors(filterInterceptors); - perRouteInterceptors.put(route, interceptor); + + // Combine interceptors produced by different filters into a single one that executes + // them sequentially. The order is preserved. + perRouteInterceptors.put(route, combineInterceptors(interceptors)); } } + return perRouteInterceptors.buildOrThrow(); } diff --git a/xds/src/test/java/io/grpc/xds/FaultFilterTest.java b/xds/src/test/java/io/grpc/xds/FaultFilterTest.java index f85f29ec0a3..8f0a33951b0 100644 --- a/xds/src/test/java/io/grpc/xds/FaultFilterTest.java +++ b/xds/src/test/java/io/grpc/xds/FaultFilterTest.java @@ -33,16 +33,23 @@ /** Tests for {@link FaultFilter}. */ @RunWith(JUnit4.class) public class FaultFilterTest { + private static final FaultFilter.Provider FILTER_PROVIDER = new FaultFilter.Provider(); + + @Test + public void filterType_clientOnly() { + assertThat(FILTER_PROVIDER.isClientFilter()).isTrue(); + assertThat(FILTER_PROVIDER.isServerFilter()).isFalse(); + } @Test public void parseFaultAbort_convertHttpStatus() { Any rawConfig = Any.pack( HTTPFault.newBuilder().setAbort(FaultAbort.newBuilder().setHttpStatus(404)).build()); - FaultConfig faultConfig = FaultFilter.INSTANCE.parseFilterConfig(rawConfig).config; + FaultConfig faultConfig = FILTER_PROVIDER.parseFilterConfig(rawConfig).config; assertThat(faultConfig.faultAbort().status().getCode()) .isEqualTo(GrpcUtil.httpStatusToGrpcStatus(404).getCode()); - FaultConfig faultConfigOverride = - FaultFilter.INSTANCE.parseFilterConfigOverride(rawConfig).config; + + FaultConfig faultConfigOverride = FILTER_PROVIDER.parseFilterConfigOverride(rawConfig).config; assertThat(faultConfigOverride.faultAbort().status().getCode()) .isEqualTo(GrpcUtil.httpStatusToGrpcStatus(404).getCode()); } @@ -54,7 +61,7 @@ public void parseFaultAbort_withHeaderAbort() { .setPercentage(FractionalPercent.newBuilder() .setNumerator(20).setDenominator(DenominatorType.HUNDRED)) .setHeaderAbort(HeaderAbort.getDefaultInstance()).build(); - FaultConfig.FaultAbort faultAbort = FaultFilter.parseFaultAbort(proto).config; + FaultConfig.FaultAbort faultAbort = FaultFilter.Provider.parseFaultAbort(proto).config; assertThat(faultAbort.headerAbort()).isTrue(); assertThat(faultAbort.percent().numerator()).isEqualTo(20); assertThat(faultAbort.percent().denominatorType()) @@ -68,7 +75,7 @@ public void parseFaultAbort_withHttpStatus() { .setPercentage(FractionalPercent.newBuilder() .setNumerator(100).setDenominator(DenominatorType.TEN_THOUSAND)) .setHttpStatus(400).build(); - FaultConfig.FaultAbort res = FaultFilter.parseFaultAbort(proto).config; + FaultConfig.FaultAbort res = FaultFilter.Provider.parseFaultAbort(proto).config; assertThat(res.percent().numerator()).isEqualTo(100); assertThat(res.percent().denominatorType()) .isEqualTo(FaultConfig.FractionalPercent.DenominatorType.TEN_THOUSAND); @@ -82,7 +89,7 @@ public void parseFaultAbort_withGrpcStatus() { .setPercentage(FractionalPercent.newBuilder() .setNumerator(600).setDenominator(DenominatorType.MILLION)) .setGrpcStatus(Code.DEADLINE_EXCEEDED.value()).build(); - FaultConfig.FaultAbort faultAbort = FaultFilter.parseFaultAbort(proto).config; + FaultConfig.FaultAbort faultAbort = FaultFilter.Provider.parseFaultAbort(proto).config; assertThat(faultAbort.percent().numerator()).isEqualTo(600); assertThat(faultAbort.percent().denominatorType()) .isEqualTo(FaultConfig.FractionalPercent.DenominatorType.MILLION); diff --git a/xds/src/test/java/io/grpc/xds/GcpAuthenticationFilterTest.java b/xds/src/test/java/io/grpc/xds/GcpAuthenticationFilterTest.java index 3ca240ab7c7..52efaf9bd7b 100644 --- a/xds/src/test/java/io/grpc/xds/GcpAuthenticationFilterTest.java +++ b/xds/src/test/java/io/grpc/xds/GcpAuthenticationFilterTest.java @@ -35,6 +35,7 @@ import io.grpc.ClientInterceptor; import io.grpc.MethodDescriptor; import io.grpc.testing.TestMethodDescriptors; +import io.grpc.xds.GcpAuthenticationFilter.GcpAuthenticationConfig; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; @@ -43,6 +44,14 @@ @RunWith(JUnit4.class) public class GcpAuthenticationFilterTest { + private static final GcpAuthenticationFilter.Provider FILTER_PROVIDER = + new GcpAuthenticationFilter.Provider(); + + @Test + public void filterType_clientOnly() { + assertThat(FILTER_PROVIDER.isClientFilter()).isTrue(); + assertThat(FILTER_PROVIDER.isServerFilter()).isFalse(); + } @Test public void testParseFilterConfig_withValidConfig() { @@ -51,13 +60,11 @@ public void testParseFilterConfig_withValidConfig() { .build(); Any anyMessage = Any.pack(config); - GcpAuthenticationFilter filter = new GcpAuthenticationFilter(); - ConfigOrError result = filter.parseFilterConfig(anyMessage); + ConfigOrError result = FILTER_PROVIDER.parseFilterConfig(anyMessage); assertNotNull(result.config); assertNull(result.errorDetail); - assertEquals(20L, - ((GcpAuthenticationFilter.GcpAuthenticationConfig) result.config).getCacheSize()); + assertEquals(20L, result.config.getCacheSize()); } @Test @@ -67,8 +74,7 @@ public void testParseFilterConfig_withZeroCacheSize() { .build(); Any anyMessage = Any.pack(config); - GcpAuthenticationFilter filter = new GcpAuthenticationFilter(); - ConfigOrError result = filter.parseFilterConfig(anyMessage); + ConfigOrError result = FILTER_PROVIDER.parseFilterConfig(anyMessage); assertNull(result.config); assertNotNull(result.errorDetail); @@ -77,9 +83,9 @@ public void testParseFilterConfig_withZeroCacheSize() { @Test public void testParseFilterConfig_withInvalidMessageType() { - GcpAuthenticationFilter filter = new GcpAuthenticationFilter(); Message invalidMessage = Empty.getDefaultInstance(); - ConfigOrError result = filter.parseFilterConfig(invalidMessage); + ConfigOrError result = + FILTER_PROVIDER.parseFilterConfig(invalidMessage); assertNull(result.config); assertThat(result.errorDetail).contains("Invalid config type"); @@ -87,8 +93,7 @@ public void testParseFilterConfig_withInvalidMessageType() { @Test public void testClientInterceptor_createsAndReusesCachedCredentials() { - GcpAuthenticationFilter.GcpAuthenticationConfig config = - new GcpAuthenticationFilter.GcpAuthenticationConfig(10); + GcpAuthenticationConfig config = new GcpAuthenticationConfig(10); GcpAuthenticationFilter filter = new GcpAuthenticationFilter(); // Create interceptor diff --git a/xds/src/test/java/io/grpc/xds/GrpcXdsClientImplDataTest.java b/xds/src/test/java/io/grpc/xds/GrpcXdsClientImplDataTest.java index 314b2094480..610d147ccf9 100644 --- a/xds/src/test/java/io/grpc/xds/GrpcXdsClientImplDataTest.java +++ b/xds/src/test/java/io/grpc/xds/GrpcXdsClientImplDataTest.java @@ -110,7 +110,6 @@ import io.envoyproxy.envoy.type.v3.FractionalPercent; import io.envoyproxy.envoy.type.v3.FractionalPercent.DenominatorType; import io.envoyproxy.envoy.type.v3.Int64Range; -import io.grpc.ClientInterceptor; import io.grpc.EquivalentAddressGroup; import io.grpc.InsecureChannelCredentials; import io.grpc.LoadBalancerRegistry; @@ -150,9 +149,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; -import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; -import javax.annotation.Nullable; import org.junit.After; import org.junit.Before; import org.junit.Rule; @@ -165,6 +162,10 @@ @RunWith(JUnit4.class) public class GrpcXdsClientImplDataTest { + private static final FaultFilter.Provider FAULT_FILTER_PROVIDER = new FaultFilter.Provider(); + private static final RbacFilter.Provider RBAC_FILTER_PROVIDER = new RbacFilter.Provider(); + private static final RouterFilter.Provider ROUTER_FILTER_PROVIDER = new RouterFilter.Provider(); + private static final ServerInfo LRS_SERVER_INFO = ServerInfo.create("lrs.googleapis.com", InsecureChannelCredentials.create()); private static final String GRPC_EXPERIMENTAL_XDS_AUTHORITY_REWRITE = @@ -1243,36 +1244,39 @@ public String typeUrl() { } } - private static class TestFilter implements io.grpc.xds.Filter, - io.grpc.xds.Filter.ClientInterceptorBuilder { - @Override - public String[] typeUrls() { - return new String[]{"test-url"}; - } + private static class TestFilter implements io.grpc.xds.Filter { - @Override - public ConfigOrError parseFilterConfig(Message rawProtoMessage) { - return ConfigOrError.fromConfig(new SimpleFilterConfig(rawProtoMessage)); - } + static final class Provider implements io.grpc.xds.Filter.Provider { + @Override + public String[] typeUrls() { + return new String[]{"test-url"}; + } - @Override - public ConfigOrError parseFilterConfigOverride( - Message rawProtoMessage) { - return ConfigOrError.fromConfig(new SimpleFilterConfig(rawProtoMessage)); - } + @Override + public boolean isClientFilter() { + return true; + } - @Nullable - @Override - public ClientInterceptor buildClientInterceptor(FilterConfig config, - @Nullable FilterConfig overrideConfig, - ScheduledExecutorService scheduler) { - return null; + @Override + public TestFilter newInstance() { + return new TestFilter(); + } + + @Override + public ConfigOrError parseFilterConfig(Message rawProtoMessage) { + return ConfigOrError.fromConfig(new SimpleFilterConfig(rawProtoMessage)); + } + + @Override + public ConfigOrError parseFilterConfigOverride(Message rawProtoMessage) { + return ConfigOrError.fromConfig(new SimpleFilterConfig(rawProtoMessage)); + } } } @Test public void parseHttpFilter_typedStructMigration() { - filterRegistry.register(new TestFilter()); + filterRegistry.register(new TestFilter.Provider()); Struct rawStruct = Struct.newBuilder() .putFields("name", Value.newBuilder().setStringValue("default").build()) .build(); @@ -1301,7 +1305,7 @@ public void parseHttpFilter_typedStructMigration() { @Test public void parseOverrideHttpFilter_typedStructMigration() { - filterRegistry.register(new TestFilter()); + filterRegistry.register(new TestFilter.Provider()); Struct rawStruct0 = Struct.newBuilder() .putFields("name", Value.newBuilder().setStringValue("default0").build()) .build(); @@ -1342,7 +1346,7 @@ public void parseHttpFilter_unsupportedAndRequired() { @Test public void parseHttpFilter_routerFilterForClient() { - filterRegistry.register(RouterFilter.INSTANCE); + filterRegistry.register(ROUTER_FILTER_PROVIDER); HttpFilter httpFilter = HttpFilter.newBuilder() .setIsOptional(false) @@ -1356,7 +1360,7 @@ public void parseHttpFilter_routerFilterForClient() { @Test public void parseHttpFilter_routerFilterForServer() { - filterRegistry.register(RouterFilter.INSTANCE); + filterRegistry.register(ROUTER_FILTER_PROVIDER); HttpFilter httpFilter = HttpFilter.newBuilder() .setIsOptional(false) @@ -1370,7 +1374,7 @@ public void parseHttpFilter_routerFilterForServer() { @Test public void parseHttpFilter_faultConfigForClient() { - filterRegistry.register(FaultFilter.INSTANCE); + filterRegistry.register(FAULT_FILTER_PROVIDER); HttpFilter httpFilter = HttpFilter.newBuilder() .setIsOptional(false) @@ -1397,7 +1401,7 @@ public void parseHttpFilter_faultConfigForClient() { @Test public void parseHttpFilter_faultConfigUnsupportedForServer() { - filterRegistry.register(FaultFilter.INSTANCE); + filterRegistry.register(FAULT_FILTER_PROVIDER); HttpFilter httpFilter = HttpFilter.newBuilder() .setIsOptional(false) @@ -1426,7 +1430,7 @@ public void parseHttpFilter_faultConfigUnsupportedForServer() { @Test public void parseHttpFilter_rbacConfigForServer() { - filterRegistry.register(RbacFilter.INSTANCE); + filterRegistry.register(RBAC_FILTER_PROVIDER); HttpFilter httpFilter = HttpFilter.newBuilder() .setIsOptional(false) @@ -1453,7 +1457,7 @@ public void parseHttpFilter_rbacConfigForServer() { @Test public void parseHttpFilter_rbacConfigUnsupportedForClient() { - filterRegistry.register(RbacFilter.INSTANCE); + filterRegistry.register(RBAC_FILTER_PROVIDER); HttpFilter httpFilter = HttpFilter.newBuilder() .setIsOptional(false) @@ -1482,7 +1486,7 @@ public void parseHttpFilter_rbacConfigUnsupportedForClient() { @Test public void parseOverrideRbacFilterConfig() { - filterRegistry.register(RbacFilter.INSTANCE); + filterRegistry.register(RBAC_FILTER_PROVIDER); RBACPerRoute rbacPerRoute = RBACPerRoute.newBuilder() .setRbac( @@ -1508,7 +1512,7 @@ public void parseOverrideRbacFilterConfig() { @Test public void parseOverrideFilterConfigs_unsupportedButOptional() { - filterRegistry.register(FaultFilter.INSTANCE); + filterRegistry.register(FAULT_FILTER_PROVIDER); HTTPFault httpFault = HTTPFault.newBuilder() .setDelay(FaultDelay.newBuilder().setFixedDelay(Durations.fromNanos(3000))) .build(); @@ -1528,7 +1532,7 @@ public void parseOverrideFilterConfigs_unsupportedButOptional() { @Test public void parseOverrideFilterConfigs_unsupportedAndRequired() { - filterRegistry.register(FaultFilter.INSTANCE); + filterRegistry.register(FAULT_FILTER_PROVIDER); HTTPFault httpFault = HTTPFault.newBuilder() .setDelay(FaultDelay.newBuilder().setFixedDelay(Durations.fromNanos(3000))) .build(); @@ -1620,7 +1624,7 @@ public void parseHttpConnectionManager_duplicateHttpFilters() throws ResourceInv @Test public void parseHttpConnectionManager_lastNotTerminal() throws ResourceInvalidException { - filterRegistry.register(FaultFilter.INSTANCE); + filterRegistry.register(FAULT_FILTER_PROVIDER); HttpConnectionManager hcm = HttpConnectionManager.newBuilder() .addHttpFilters( @@ -1638,7 +1642,7 @@ public void parseHttpConnectionManager_lastNotTerminal() throws ResourceInvalidE @Test public void parseHttpConnectionManager_terminalNotLast() throws ResourceInvalidException { - filterRegistry.register(RouterFilter.INSTANCE); + filterRegistry.register(ROUTER_FILTER_PROVIDER); HttpConnectionManager hcm = HttpConnectionManager.newBuilder() .addHttpFilters( diff --git a/xds/src/test/java/io/grpc/xds/RbacFilterTest.java b/xds/src/test/java/io/grpc/xds/RbacFilterTest.java index 013b21e3f45..7f195693d84 100644 --- a/xds/src/test/java/io/grpc/xds/RbacFilterTest.java +++ b/xds/src/test/java/io/grpc/xds/RbacFilterTest.java @@ -78,6 +78,13 @@ public class RbacFilterTest { private static final String PATH = "auth"; private static final StringMatcher STRING_MATCHER = StringMatcher.newBuilder().setExact("/" + PATH).setIgnoreCase(true).build(); + private static final RbacFilter.Provider FILTER_PROVIDER = new RbacFilter.Provider(); + + @Test + public void filterType_serverOnly() { + assertThat(FILTER_PROVIDER.isClientFilter()).isFalse(); + assertThat(FILTER_PROVIDER.isServerFilter()).isTrue(); + } @Test @SuppressWarnings({"unchecked", "deprecation"}) @@ -252,7 +259,7 @@ public void testAuthorizationInterceptor() { OrMatcher.create(AlwaysTrueMatcher.INSTANCE)); AuthConfig authconfig = AuthConfig.create(Collections.singletonList(policyMatcher), GrpcAuthorizationEngine.Action.ALLOW); - new RbacFilter().buildServerInterceptor(RbacConfig.create(authconfig), null) + FILTER_PROVIDER.newInstance().buildServerInterceptor(RbacConfig.create(authconfig), null) .interceptCall(mockServerCall, new Metadata(), mockHandler); verify(mockHandler, never()).startCall(eq(mockServerCall), any(Metadata.class)); ArgumentCaptor captor = ArgumentCaptor.forClass(Status.class); @@ -264,7 +271,7 @@ public void testAuthorizationInterceptor() { authconfig = AuthConfig.create(Collections.singletonList(policyMatcher), GrpcAuthorizationEngine.Action.DENY); - new RbacFilter().buildServerInterceptor(RbacConfig.create(authconfig), null) + FILTER_PROVIDER.newInstance().buildServerInterceptor(RbacConfig.create(authconfig), null) .interceptCall(mockServerCall, new Metadata(), mockHandler); verify(mockHandler).startCall(eq(mockServerCall), any(Metadata.class)); } @@ -290,7 +297,7 @@ public void handleException() { .putPolicies("policy-name", Policy.newBuilder().setCondition(Expr.newBuilder().build()).build()) .build()).build(); - result = new RbacFilter().parseFilterConfig(Any.pack(rawProto)); + result = FILTER_PROVIDER.parseFilterConfig(Any.pack(rawProto)); assertThat(result.errorDetail).isNotNull(); } @@ -312,10 +319,10 @@ public void overrideConfig() { RbacConfig original = RbacConfig.create(authconfig); RBACPerRoute rbacPerRoute = RBACPerRoute.newBuilder().build(); - RbacConfig override = - new RbacFilter().parseFilterConfigOverride(Any.pack(rbacPerRoute)).config; + RbacConfig override = FILTER_PROVIDER.parseFilterConfigOverride(Any.pack(rbacPerRoute)).config; assertThat(override).isEqualTo(RbacConfig.create(null)); - ServerInterceptor interceptor = new RbacFilter().buildServerInterceptor(original, override); + ServerInterceptor interceptor = + FILTER_PROVIDER.newInstance().buildServerInterceptor(original, override); assertThat(interceptor).isNull(); policyMatcher = PolicyMatcher.create("policy-matcher-override", @@ -325,7 +332,7 @@ public void overrideConfig() { GrpcAuthorizationEngine.Action.ALLOW); override = RbacConfig.create(authconfig); - new RbacFilter().buildServerInterceptor(original, override) + FILTER_PROVIDER.newInstance().buildServerInterceptor(original, override) .interceptCall(mockServerCall, new Metadata(), mockHandler); verify(mockHandler).startCall(eq(mockServerCall), any(Metadata.class)); verify(mockServerCall).getAttributes(); @@ -337,22 +344,22 @@ public void ignoredConfig() { Message rawProto = io.envoyproxy.envoy.extensions.filters.http.rbac.v3.RBAC.newBuilder() .setRules(RBAC.newBuilder().setAction(Action.LOG) .putPolicies("policy-name", Policy.newBuilder().build()).build()).build(); - ConfigOrError result = new RbacFilter().parseFilterConfig(Any.pack(rawProto)); + ConfigOrError result = FILTER_PROVIDER.parseFilterConfig(Any.pack(rawProto)); assertThat(result.config).isEqualTo(RbacConfig.create(null)); } @Test public void testOrderIndependenceOfPolicies() { Message rawProto = buildComplexRbac(ImmutableList.of(1, 2, 3, 4, 5, 6), true); - ConfigOrError ascFirst = new RbacFilter().parseFilterConfig(Any.pack(rawProto)); + ConfigOrError ascFirst = FILTER_PROVIDER.parseFilterConfig(Any.pack(rawProto)); rawProto = buildComplexRbac(ImmutableList.of(1, 2, 3, 4, 5, 6), false); - ConfigOrError ascLast = new RbacFilter().parseFilterConfig(Any.pack(rawProto)); + ConfigOrError ascLast = FILTER_PROVIDER.parseFilterConfig(Any.pack(rawProto)); assertThat(ascFirst.config).isEqualTo(ascLast.config); rawProto = buildComplexRbac(ImmutableList.of(6, 5, 4, 3, 2, 1), true); - ConfigOrError decFirst = new RbacFilter().parseFilterConfig(Any.pack(rawProto)); + ConfigOrError decFirst = FILTER_PROVIDER.parseFilterConfig(Any.pack(rawProto)); assertThat(ascFirst.config).isEqualTo(decFirst.config); } @@ -374,14 +381,14 @@ private MethodDescriptor.Builder method() { private ConfigOrError parse(List permissionList, List principalList) { - return RbacFilter.parseRbacConfig(buildRbac(permissionList, principalList)); + return RbacFilter.Provider.parseRbacConfig(buildRbac(permissionList, principalList)); } private ConfigOrError parseRaw(List permissionList, List principalList) { Message rawProto = buildRbac(permissionList, principalList); Any proto = Any.pack(rawProto); - return new RbacFilter().parseFilterConfig(proto); + return FILTER_PROVIDER.parseFilterConfig(proto); } private io.envoyproxy.envoy.extensions.filters.http.rbac.v3.RBAC buildRbac( @@ -449,6 +456,6 @@ private ConfigOrError parseOverride(List permissionList, RBACPerRoute rbacPerRoute = RBACPerRoute.newBuilder().setRbac( buildRbac(permissionList, principalList)).build(); Any proto = Any.pack(rbacPerRoute); - return new RbacFilter().parseFilterConfigOverride(proto); + return FILTER_PROVIDER.parseFilterConfigOverride(proto); } } diff --git a/xds/src/test/java/io/grpc/xds/RouterFilterTest.java b/xds/src/test/java/io/grpc/xds/RouterFilterTest.java new file mode 100644 index 00000000000..30fd8a6dc38 --- /dev/null +++ b/xds/src/test/java/io/grpc/xds/RouterFilterTest.java @@ -0,0 +1,36 @@ +/* + * Copyright 2025 The gRPC Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.grpc.xds; + +import static com.google.common.truth.Truth.assertThat; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Tests for {@link RouterFilter}. */ +@RunWith(JUnit4.class) +public class RouterFilterTest { + private static final RouterFilter.Provider FILTER_PROVIDER = new RouterFilter.Provider(); + + @Test + public void filterType_clientAndServer() { + assertThat(FILTER_PROVIDER.isClientFilter()).isTrue(); + assertThat(FILTER_PROVIDER.isServerFilter()).isTrue(); + } + +} diff --git a/xds/src/test/java/io/grpc/xds/XdsNameResolverTest.java b/xds/src/test/java/io/grpc/xds/XdsNameResolverTest.java index d895cecdb10..f7309051f92 100644 --- a/xds/src/test/java/io/grpc/xds/XdsNameResolverTest.java +++ b/xds/src/test/java/io/grpc/xds/XdsNameResolverTest.java @@ -22,10 +22,12 @@ import static io.grpc.xds.FaultFilter.HEADER_ABORT_PERCENTAGE_KEY; import static io.grpc.xds.FaultFilter.HEADER_DELAY_KEY; import static io.grpc.xds.FaultFilter.HEADER_DELAY_PERCENTAGE_KEY; +import static org.mockito.AdditionalAnswers.delegatesTo; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.lenient; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.reset; @@ -130,6 +132,9 @@ public class XdsNameResolverTest { private static final String RDS_RESOURCE_NAME = "route-configuration.googleapis.com"; private static final String FAULT_FILTER_INSTANCE_NAME = "envoy.fault"; private static final String ROUTER_FILTER_INSTANCE_NAME = "envoy.router"; + private static final FaultFilter.Provider FAULT_FILTER_PROVIDER = new FaultFilter.Provider(); + private static final RouterFilter.Provider ROUTER_FILTER_PROVIDER = new RouterFilter.Provider(); + @Rule public final MockitoRule mocks = MockitoJUnit.rule(); private final SynchronizationContext syncContext = new SynchronizationContext( @@ -184,9 +189,19 @@ public void setUp() { originalEnableTimeout = XdsNameResolver.enableTimeout; XdsNameResolver.enableTimeout = true; + + // Replace FaultFilter.Provider with the one returning FaultFilter injected with mockRandom. + Filter.Provider faultFilterProvider = + mock(Filter.Provider.class, delegatesTo(FAULT_FILTER_PROVIDER)); + // Lenient: suppress [MockitoHint] Unused warning, only used in resolved_fault* tests. + lenient() + .doReturn(new FaultFilter(mockRandom, new AtomicLong())) + .when(faultFilterProvider).newInstance(); + FilterRegistry filterRegistry = FilterRegistry.newRegistry().register( - new FaultFilter(mockRandom, new AtomicLong()), - RouterFilter.INSTANCE); + ROUTER_FILTER_PROVIDER, + faultFilterProvider); + resolver = new XdsNameResolver(targetUri, null, AUTHORITY, null, serviceConfigParser, syncContext, scheduler, xdsClientPoolFactory, mockRandom, filterRegistry, null, metricRecorder); diff --git a/xds/src/test/java/io/grpc/xds/XdsServerWrapperTest.java b/xds/src/test/java/io/grpc/xds/XdsServerWrapperTest.java index 66ac1475d8e..41f005ba583 100644 --- a/xds/src/test/java/io/grpc/xds/XdsServerWrapperTest.java +++ b/xds/src/test/java/io/grpc/xds/XdsServerWrapperTest.java @@ -31,7 +31,6 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import static org.mockito.Mockito.withSettings; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; @@ -53,7 +52,6 @@ import io.grpc.xds.EnvoyServerProtoData.FilterChain; import io.grpc.xds.Filter.FilterConfig; import io.grpc.xds.Filter.NamedFilterConfig; -import io.grpc.xds.Filter.ServerInterceptorBuilder; import io.grpc.xds.FilterChainMatchingProtocolNegotiators.FilterChainMatchingHandler.FilterChainSelector; import io.grpc.xds.VirtualHost.Route; import io.grpc.xds.VirtualHost.Route.RouteMatch; @@ -957,9 +955,11 @@ public void run() { new AtomicReference<>(routingConfig)).build()); when(serverCall.getAuthority()).thenReturn("not-match.google.com"); - Filter filter = mock(Filter.class); - when(filter.typeUrls()).thenReturn(new String[]{"filter-type-url"}); - filterRegistry.register(filter); + Filter.Provider filterProvider = mock(Filter.Provider.class); + when(filterProvider.typeUrls()).thenReturn(new String[]{"filter-type-url"}); + when(filterProvider.isServerFilter()).thenReturn(true); + filterRegistry.register(filterProvider); + ServerCallHandler next = mock(ServerCallHandler.class); interceptor.interceptCall(serverCall, new Metadata(), next); verify(next, never()).startCall(any(ServerCall.class), any(Metadata.class)); @@ -998,9 +998,11 @@ public void run() { when(serverCall.getMethodDescriptor()).thenReturn(createMethod("NotMatchMethod")); when(serverCall.getAuthority()).thenReturn("foo.google.com"); - Filter filter = mock(Filter.class); - when(filter.typeUrls()).thenReturn(new String[]{"filter-type-url"}); - filterRegistry.register(filter); + Filter.Provider filterProvider = mock(Filter.Provider.class); + when(filterProvider.typeUrls()).thenReturn(new String[]{"filter-type-url"}); + when(filterProvider.isServerFilter()).thenReturn(true); + filterRegistry.register(filterProvider); + ServerCallHandler next = mock(ServerCallHandler.class); interceptor.interceptCall(serverCall, new Metadata(), next); verify(next, never()).startCall(any(ServerCall.class), any(Metadata.class)); @@ -1044,9 +1046,11 @@ public void run() { when(serverCall.getMethodDescriptor()).thenReturn(createMethod("FooService/barMethod")); when(serverCall.getAuthority()).thenReturn("foo.google.com"); - Filter filter = mock(Filter.class); - when(filter.typeUrls()).thenReturn(new String[]{"filter-type-url"}); - filterRegistry.register(filter); + Filter.Provider filterProvider = mock(Filter.Provider.class); + when(filterProvider.typeUrls()).thenReturn(new String[]{"filter-type-url"}); + when(filterProvider.isServerFilter()).thenReturn(true); + filterRegistry.register(filterProvider); + ServerCallHandler next = mock(ServerCallHandler.class); interceptor.interceptCall(serverCall, new Metadata(), next); verify(next, never()).startCall(any(ServerCall.class), any(Metadata.class)); @@ -1113,10 +1117,14 @@ public void run() { RouteMatch.create( PathMatcher.fromPath("/FooService/barMethod", true), Collections.emptyList(), null); - Filter filter = mock(Filter.class, withSettings() - .extraInterfaces(ServerInterceptorBuilder.class)); - when(filter.typeUrls()).thenReturn(new String[]{"filter-type-url"}); - filterRegistry.register(filter); + + Filter filter = mock(Filter.class); + Filter.Provider filterProvider = mock(Filter.Provider.class); + when(filterProvider.typeUrls()).thenReturn(new String[]{"filter-type-url"}); + when(filterProvider.isServerFilter()).thenReturn(true); + when(filterProvider.newInstance()).thenReturn(filter); + filterRegistry.register(filterProvider); + FilterConfig f0 = mock(FilterConfig.class); FilterConfig f0Override = mock(FilterConfig.class); when(f0.typeUrl()).thenReturn("filter-type-url"); @@ -1137,10 +1145,8 @@ public ServerCall.Listener interceptCall(ServerCallof()); VirtualHost virtualHost = VirtualHost.create( @@ -1185,10 +1191,13 @@ public void run() { }); xdsClient.ldsResource.get(5, TimeUnit.SECONDS); - Filter filter = mock(Filter.class, withSettings() - .extraInterfaces(ServerInterceptorBuilder.class)); - when(filter.typeUrls()).thenReturn(new String[]{"filter-type-url"}); - filterRegistry.register(filter); + Filter filter = mock(Filter.class); + Filter.Provider filterProvider = mock(Filter.Provider.class); + when(filterProvider.typeUrls()).thenReturn(new String[]{"filter-type-url"}); + when(filterProvider.isServerFilter()).thenReturn(true); + when(filterProvider.newInstance()).thenReturn(filter); + filterRegistry.register(filterProvider); + FilterConfig f0 = mock(FilterConfig.class); FilterConfig f0Override = mock(FilterConfig.class); when(f0.typeUrl()).thenReturn("filter-type-url"); @@ -1209,10 +1218,8 @@ public ServerCall.Listener interceptCall(ServerCall Date: Sat, 24 May 2025 12:33:49 +0530 Subject: [PATCH 09/15] xds: xDS-based HTTP CONNECT configuration (#12099) --- xds/BUILD.bazel | 1 + .../java/io/grpc/xds/CdsLoadBalancer2.java | 4 +- .../grpc/xds/ClusterResolverLoadBalancer.java | 46 ++- .../ClusterResolverLoadBalancerProvider.java | 9 +- xds/src/main/java/io/grpc/xds/Endpoints.java | 20 +- .../io/grpc/xds/GcpAuthenticationFilter.java | 12 +- .../java/io/grpc/xds/MetadataRegistry.java | 60 +++- .../java/io/grpc/xds/XdsClusterResource.java | 126 ++++---- .../java/io/grpc/xds/XdsEndpointResource.java | 74 ++++- .../io/grpc/xds/CdsLoadBalancer2Test.java | 45 +-- .../xds/ClusterResolverLoadBalancerTest.java | 305 ++++++++++++------ .../grpc/xds/GrpcXdsClientImplDataTest.java | 120 ++++++- .../grpc/xds/GrpcXdsClientImplTestBase.java | 16 +- .../test/java/io/grpc/xds/XdsTestUtils.java | 8 +- xds/third_party/envoy/import.sh | 1 + .../v3/upstream_http_11_connect.proto | 38 +++ 16 files changed, 665 insertions(+), 220 deletions(-) create mode 100644 xds/third_party/envoy/src/main/proto/envoy/extensions/transport_sockets/http_11_proxy/v3/upstream_http_11_connect.proto diff --git a/xds/BUILD.bazel b/xds/BUILD.bazel index b235a79c526..53fac28b2da 100644 --- a/xds/BUILD.bazel +++ b/xds/BUILD.bazel @@ -85,6 +85,7 @@ java_proto_library( "@envoy_api//envoy/extensions/load_balancing_policies/ring_hash/v3:pkg", "@envoy_api//envoy/extensions/load_balancing_policies/round_robin/v3:pkg", "@envoy_api//envoy/extensions/load_balancing_policies/wrr_locality/v3:pkg", + "@envoy_api//envoy/extensions/transport_sockets/http_11_proxy/v3:pkg", "@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg", "@envoy_api//envoy/service/discovery/v3:pkg", "@envoy_api//envoy/service/load_stats/v3:pkg", diff --git a/xds/src/main/java/io/grpc/xds/CdsLoadBalancer2.java b/xds/src/main/java/io/grpc/xds/CdsLoadBalancer2.java index 04b7663fd35..bb44071a484 100644 --- a/xds/src/main/java/io/grpc/xds/CdsLoadBalancer2.java +++ b/xds/src/main/java/io/grpc/xds/CdsLoadBalancer2.java @@ -243,7 +243,9 @@ private void handleClusterDiscovered() { } ClusterResolverConfig config = new ClusterResolverConfig( - Collections.unmodifiableList(instances), configOrError.getConfig()); + Collections.unmodifiableList(instances), + configOrError.getConfig(), + root.result.isHttp11ProxyAvailable()); if (childLb == null) { childLb = lbRegistry.getProvider(CLUSTER_RESOLVER_POLICY_NAME).newLoadBalancer(helper); } diff --git a/xds/src/main/java/io/grpc/xds/ClusterResolverLoadBalancer.java b/xds/src/main/java/io/grpc/xds/ClusterResolverLoadBalancer.java index aff61cf7ada..0fb7cf15909 100644 --- a/xds/src/main/java/io/grpc/xds/ClusterResolverLoadBalancer.java +++ b/xds/src/main/java/io/grpc/xds/ClusterResolverLoadBalancer.java @@ -25,6 +25,7 @@ import com.google.protobuf.Struct; import io.grpc.Attributes; import io.grpc.EquivalentAddressGroup; +import io.grpc.HttpConnectProxiedSocketAddress; import io.grpc.InternalLogId; import io.grpc.LoadBalancer; import io.grpc.LoadBalancerProvider; @@ -60,6 +61,8 @@ import io.grpc.xds.client.XdsClient.ResourceWatcher; import io.grpc.xds.client.XdsLogger; import io.grpc.xds.client.XdsLogger.XdsLogLevel; +import java.net.InetSocketAddress; +import java.net.SocketAddress; import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; @@ -431,8 +434,18 @@ public void run() { .set(XdsAttributes.ATTR_SERVER_WEIGHT, weight) .set(XdsAttributes.ATTR_ADDRESS_NAME, endpoint.hostname()) .build(); - EquivalentAddressGroup eag = new EquivalentAddressGroup( - endpoint.eag().getAddresses(), attr); + + EquivalentAddressGroup eag; + if (config.isHttp11ProxyAvailable()) { + List rewrittenAddresses = new ArrayList<>(); + for (SocketAddress addr : endpoint.eag().getAddresses()) { + rewrittenAddresses.add(rewriteAddress( + addr, endpoint.endpointMetadata(), localityLbInfo.localityMetadata())); + } + eag = new EquivalentAddressGroup(rewrittenAddresses, attr); + } else { + eag = new EquivalentAddressGroup(endpoint.eag().getAddresses(), attr); + } eag = AddressFilter.setPathFilter(eag, Arrays.asList(priorityName, localityName)); addresses.add(eag); } @@ -470,6 +483,35 @@ public void run() { new EndpointsUpdated().run(); } + private SocketAddress rewriteAddress(SocketAddress addr, + ImmutableMap endpointMetadata, + ImmutableMap localityMetadata) { + if (!(addr instanceof InetSocketAddress)) { + return addr; + } + + SocketAddress proxyAddress; + try { + proxyAddress = (SocketAddress) endpointMetadata.get( + "envoy.http11_proxy_transport_socket.proxy_address"); + if (proxyAddress == null) { + proxyAddress = (SocketAddress) localityMetadata.get( + "envoy.http11_proxy_transport_socket.proxy_address"); + } + } catch (ClassCastException e) { + return addr; + } + + if (proxyAddress == null) { + return addr; + } + + return HttpConnectProxiedSocketAddress.newBuilder() + .setTargetAddress((InetSocketAddress) addr) + .setProxyAddress(proxyAddress) + .build(); + } + private List generatePriorityNames(String name, Map localityLbEndpoints) { TreeMap> todo = new TreeMap<>(); diff --git a/xds/src/main/java/io/grpc/xds/ClusterResolverLoadBalancerProvider.java b/xds/src/main/java/io/grpc/xds/ClusterResolverLoadBalancerProvider.java index 2301cb670e0..b5dcb271368 100644 --- a/xds/src/main/java/io/grpc/xds/ClusterResolverLoadBalancerProvider.java +++ b/xds/src/main/java/io/grpc/xds/ClusterResolverLoadBalancerProvider.java @@ -74,10 +74,17 @@ static final class ClusterResolverConfig { final List discoveryMechanisms; // GracefulSwitch configuration final Object lbConfig; + private final boolean isHttp11ProxyAvailable; - ClusterResolverConfig(List discoveryMechanisms, Object lbConfig) { + ClusterResolverConfig(List discoveryMechanisms, Object lbConfig, + boolean isHttp11ProxyAvailable) { this.discoveryMechanisms = checkNotNull(discoveryMechanisms, "discoveryMechanisms"); this.lbConfig = checkNotNull(lbConfig, "lbConfig"); + this.isHttp11ProxyAvailable = isHttp11ProxyAvailable; + } + + boolean isHttp11ProxyAvailable() { + return isHttp11ProxyAvailable; } @Override diff --git a/xds/src/main/java/io/grpc/xds/Endpoints.java b/xds/src/main/java/io/grpc/xds/Endpoints.java index 7d7aa3e386d..b0d97d42c11 100644 --- a/xds/src/main/java/io/grpc/xds/Endpoints.java +++ b/xds/src/main/java/io/grpc/xds/Endpoints.java @@ -21,6 +21,7 @@ import com.google.auto.value.AutoValue; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; import io.grpc.EquivalentAddressGroup; import java.net.InetSocketAddress; import java.util.List; @@ -41,11 +42,13 @@ abstract static class LocalityLbEndpoints { // Locality's priority level. abstract int priority(); + abstract ImmutableMap localityMetadata(); + static LocalityLbEndpoints create(List endpoints, int localityWeight, - int priority) { + int priority, ImmutableMap localityMetadata) { checkArgument(localityWeight > 0, "localityWeight must be greater than 0"); return new AutoValue_Endpoints_LocalityLbEndpoints( - ImmutableList.copyOf(endpoints), localityWeight, priority); + ImmutableList.copyOf(endpoints), localityWeight, priority, localityMetadata); } } @@ -63,17 +66,20 @@ abstract static class LbEndpoint { abstract String hostname(); + abstract ImmutableMap endpointMetadata(); + static LbEndpoint create(EquivalentAddressGroup eag, int loadBalancingWeight, - boolean isHealthy, String hostname) { - return new AutoValue_Endpoints_LbEndpoint(eag, loadBalancingWeight, isHealthy, hostname); + boolean isHealthy, String hostname, ImmutableMap endpointMetadata) { + return new AutoValue_Endpoints_LbEndpoint( + eag, loadBalancingWeight, isHealthy, hostname, endpointMetadata); } // Only for testing. @VisibleForTesting - static LbEndpoint create( - String address, int port, int loadBalancingWeight, boolean isHealthy, String hostname) { + static LbEndpoint create(String address, int port, int loadBalancingWeight, boolean isHealthy, + String hostname, ImmutableMap endpointMetadata) { return LbEndpoint.create(new EquivalentAddressGroup(new InetSocketAddress(address, port)), - loadBalancingWeight, isHealthy, hostname); + loadBalancingWeight, isHealthy, hostname, endpointMetadata); } } diff --git a/xds/src/main/java/io/grpc/xds/GcpAuthenticationFilter.java b/xds/src/main/java/io/grpc/xds/GcpAuthenticationFilter.java index 7ed617c9843..41687817c47 100644 --- a/xds/src/main/java/io/grpc/xds/GcpAuthenticationFilter.java +++ b/xds/src/main/java/io/grpc/xds/GcpAuthenticationFilter.java @@ -36,6 +36,7 @@ import io.grpc.Status; import io.grpc.auth.MoreCallCredentials; import io.grpc.xds.MetadataRegistry.MetadataValueParser; +import io.grpc.xds.client.XdsResourceType.ResourceInvalidException; import java.util.LinkedHashMap; import java.util.Map; import java.util.concurrent.ScheduledExecutorService; @@ -240,11 +241,16 @@ public String getTypeUrl() { } @Override - public String parse(Any any) throws InvalidProtocolBufferException { - Audience audience = any.unpack(Audience.class); + public String parse(Any any) throws ResourceInvalidException { + Audience audience; + try { + audience = any.unpack(Audience.class); + } catch (InvalidProtocolBufferException ex) { + throw new ResourceInvalidException("Invalid Resource in address proto", ex); + } String url = audience.getUrl(); if (url.isEmpty()) { - throw new InvalidProtocolBufferException( + throw new ResourceInvalidException( "Audience URL is empty. Metadata value must contain a valid URL."); } return url; diff --git a/xds/src/main/java/io/grpc/xds/MetadataRegistry.java b/xds/src/main/java/io/grpc/xds/MetadataRegistry.java index 8243b6a6f0f..b79a61a261a 100644 --- a/xds/src/main/java/io/grpc/xds/MetadataRegistry.java +++ b/xds/src/main/java/io/grpc/xds/MetadataRegistry.java @@ -17,9 +17,14 @@ package io.grpc.xds; import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableMap; import com.google.protobuf.Any; -import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.Struct; +import io.envoyproxy.envoy.config.core.v3.Metadata; import io.grpc.xds.GcpAuthenticationFilter.AudienceMetadataParser; +import io.grpc.xds.XdsEndpointResource.AddressMetadataParser; +import io.grpc.xds.client.XdsResourceType.ResourceInvalidException; +import io.grpc.xds.internal.ProtobufJsonConverter; import java.util.HashMap; import java.util.Map; @@ -36,6 +41,7 @@ final class MetadataRegistry { private MetadataRegistry() { registerParser(new AudienceMetadataParser()); + registerParser(new AddressMetadataParser()); } static MetadataRegistry getInstance() { @@ -55,6 +61,54 @@ void removeParser(MetadataValueParser parser) { supportedParsers.remove(parser.getTypeUrl()); } + /** + * Parses cluster metadata into a structured map. + * + *

Values in {@code typed_filter_metadata} take precedence over + * {@code filter_metadata} when keys overlap, following Envoy API behavior. See + * + * Envoy metadata documentation for details. + * + * @param metadata the {@link Metadata} containing the fields to parse. + * @return an immutable map of parsed metadata. + * @throws ResourceInvalidException if parsing {@code typed_filter_metadata} fails. + */ + public ImmutableMap parseMetadata(Metadata metadata) + throws ResourceInvalidException { + ImmutableMap.Builder parsedMetadata = ImmutableMap.builder(); + + // Process typed_filter_metadata + for (Map.Entry entry : metadata.getTypedFilterMetadataMap().entrySet()) { + String key = entry.getKey(); + Any value = entry.getValue(); + MetadataValueParser parser = findParser(value.getTypeUrl()); + if (parser != null) { + try { + Object parsedValue = parser.parse(value); + parsedMetadata.put(key, parsedValue); + } catch (ResourceInvalidException e) { + throw new ResourceInvalidException( + String.format("Failed to parse metadata key: %s, type: %s. Error: %s", + key, value.getTypeUrl(), e.getMessage()), e); + } + } + } + // building once to reuse in the next loop + ImmutableMap intermediateParsedMetadata = parsedMetadata.build(); + + // Process filter_metadata for remaining keys + for (Map.Entry entry : metadata.getFilterMetadataMap().entrySet()) { + String key = entry.getKey(); + if (!intermediateParsedMetadata.containsKey(key)) { + Struct structValue = entry.getValue(); + Object jsonValue = ProtobufJsonConverter.convertToJson(structValue); + parsedMetadata.put(key, jsonValue); + } + } + + return parsedMetadata.build(); + } + interface MetadataValueParser { String getTypeUrl(); @@ -64,8 +118,8 @@ interface MetadataValueParser { * * @param any the {@link Any} object to parse. * @return the parsed metadata value. - * @throws InvalidProtocolBufferException if the parsing fails. + * @throws ResourceInvalidException if the parsing fails. */ - Object parse(Any any) throws InvalidProtocolBufferException; + Object parse(Any any) throws ResourceInvalidException; } } diff --git a/xds/src/main/java/io/grpc/xds/XdsClusterResource.java b/xds/src/main/java/io/grpc/xds/XdsClusterResource.java index 626d61c1f55..cfc74f3ca70 100644 --- a/xds/src/main/java/io/grpc/xds/XdsClusterResource.java +++ b/xds/src/main/java/io/grpc/xds/XdsClusterResource.java @@ -25,7 +25,6 @@ import com.google.common.base.Strings; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; -import com.google.protobuf.Any; import com.google.protobuf.Duration; import com.google.protobuf.InvalidProtocolBufferException; import com.google.protobuf.Message; @@ -33,10 +32,11 @@ import com.google.protobuf.util.Durations; import io.envoyproxy.envoy.config.cluster.v3.CircuitBreakers.Thresholds; import io.envoyproxy.envoy.config.cluster.v3.Cluster; -import io.envoyproxy.envoy.config.core.v3.Metadata; import io.envoyproxy.envoy.config.core.v3.RoutingPriority; import io.envoyproxy.envoy.config.core.v3.SocketAddress; +import io.envoyproxy.envoy.config.core.v3.TransportSocket; import io.envoyproxy.envoy.config.endpoint.v3.ClusterLoadAssignment; +import io.envoyproxy.envoy.extensions.transport_sockets.http_11_proxy.v3.Http11ProxyUpstreamTransport; import io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext; import io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.CommonTlsContext; import io.grpc.LoadBalancerRegistry; @@ -46,15 +46,12 @@ import io.grpc.internal.ServiceConfigUtil.LbConfig; import io.grpc.xds.EnvoyServerProtoData.OutlierDetection; import io.grpc.xds.EnvoyServerProtoData.UpstreamTlsContext; -import io.grpc.xds.MetadataRegistry.MetadataValueParser; import io.grpc.xds.XdsClusterResource.CdsUpdate; import io.grpc.xds.client.XdsClient.ResourceUpdate; import io.grpc.xds.client.XdsResourceType; -import io.grpc.xds.internal.ProtobufJsonConverter; import io.grpc.xds.internal.security.CommonTlsContextUtil; import java.util.List; import java.util.Locale; -import java.util.Map; import java.util.Set; import javax.annotation.Nullable; @@ -67,6 +64,8 @@ class XdsClusterResource extends XdsResourceType { @VisibleForTesting public static boolean enableSystemRootCerts = GrpcUtil.getFlag("GRPC_EXPERIMENTAL_XDS_SYSTEM_ROOT_CERTS", false); + static boolean isEnabledXdsHttpConnect = + GrpcUtil.getFlag("GRPC_EXPERIMENTAL_XDS_HTTP_CONNECT", false); @VisibleForTesting static final String AGGREGATE_CLUSTER_TYPE_NAME = "envoy.clusters.aggregate"; @@ -78,6 +77,9 @@ class XdsClusterResource extends XdsResourceType { "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext"; private static final String TYPE_URL_UPSTREAM_TLS_CONTEXT_V2 = "type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext"; + static final String TRANSPORT_SOCKET_NAME_HTTP11_PROXY = + "type.googleapis.com/envoy.extensions.transport_sockets.http_11_proxy.v3" + + ".Http11ProxyUpstreamTransport"; private final LoadBalancerRegistry loadBalancerRegistry = LoadBalancerRegistry.getDefaultRegistry(); @@ -177,10 +179,11 @@ static CdsUpdate processCluster(Cluster cluster, ImmutableMap.copyOf(cluster.getMetadata().getFilterMetadataMap())); try { + MetadataRegistry registry = MetadataRegistry.getInstance(); ImmutableMap parsedFilterMetadata = - parseClusterMetadata(cluster.getMetadata()); + registry.parseMetadata(cluster.getMetadata()); updateBuilder.parsedMetadata(parsedFilterMetadata); - } catch (InvalidProtocolBufferException e) { + } catch (ResourceInvalidException e) { throw new ResourceInvalidException( "Failed to parse xDS filter metadata for cluster '" + cluster.getName() + "': " + e.getMessage(), e); @@ -189,49 +192,6 @@ static CdsUpdate processCluster(Cluster cluster, return updateBuilder.build(); } - /** - * Parses cluster metadata into a structured map. - * - *

Values in {@code typed_filter_metadata} take precedence over - * {@code filter_metadata} when keys overlap, following Envoy API behavior. See - * - * Envoy metadata documentation for details. - * - * @param metadata the {@link Metadata} containing the fields to parse. - * @return an immutable map of parsed metadata. - * @throws InvalidProtocolBufferException if parsing {@code typed_filter_metadata} fails. - */ - private static ImmutableMap parseClusterMetadata(Metadata metadata) - throws InvalidProtocolBufferException { - ImmutableMap.Builder parsedMetadata = ImmutableMap.builder(); - - MetadataRegistry registry = MetadataRegistry.getInstance(); - // Process typed_filter_metadata - for (Map.Entry entry : metadata.getTypedFilterMetadataMap().entrySet()) { - String key = entry.getKey(); - Any value = entry.getValue(); - MetadataValueParser parser = registry.findParser(value.getTypeUrl()); - if (parser != null) { - Object parsedValue = parser.parse(value); - parsedMetadata.put(key, parsedValue); - } - } - // building once to reuse in the next loop - ImmutableMap intermediateParsedMetadata = parsedMetadata.build(); - - // Process filter_metadata for remaining keys - for (Map.Entry entry : metadata.getFilterMetadataMap().entrySet()) { - String key = entry.getKey(); - if (!intermediateParsedMetadata.containsKey(key)) { - Struct structValue = entry.getValue(); - Object jsonValue = ProtobufJsonConverter.convertToJson(structValue); - parsedMetadata.put(key, jsonValue); - } - } - - return parsedMetadata.build(); - } - private static StructOrError parseAggregateCluster(Cluster cluster) { String clusterName = cluster.getName(); Cluster.CustomClusterType customType = cluster.getClusterType(); @@ -259,6 +219,7 @@ private static StructOrError parseNonAggregateCluster( Long maxConcurrentRequests = null; UpstreamTlsContext upstreamTlsContext = null; OutlierDetection outlierDetection = null; + boolean isHttp11ProxyAvailable = false; if (cluster.hasLrsServer()) { if (!cluster.getLrsServer().hasSelf()) { return StructOrError.fromError( @@ -281,17 +242,43 @@ private static StructOrError parseNonAggregateCluster( return StructOrError.fromError("Cluster " + clusterName + ": transport-socket-matches not supported."); } - if (cluster.hasTransportSocket()) { - if (!TRANSPORT_SOCKET_NAME_TLS.equals(cluster.getTransportSocket().getName())) { - return StructOrError.fromError("transport-socket with name " - + cluster.getTransportSocket().getName() + " not supported."); + boolean hasTransportSocket = cluster.hasTransportSocket(); + TransportSocket transportSocket = cluster.getTransportSocket(); + + if (hasTransportSocket && !TRANSPORT_SOCKET_NAME_TLS.equals(transportSocket.getName()) + && !(isEnabledXdsHttpConnect + && TRANSPORT_SOCKET_NAME_HTTP11_PROXY.equals(transportSocket.getName()))) { + return StructOrError.fromError( + "transport-socket with name " + transportSocket.getName() + " not supported."); + } + + if (hasTransportSocket && isEnabledXdsHttpConnect + && TRANSPORT_SOCKET_NAME_HTTP11_PROXY.equals(transportSocket.getName())) { + isHttp11ProxyAvailable = true; + try { + Http11ProxyUpstreamTransport wrappedTransportSocket = transportSocket + .getTypedConfig().unpack(io.envoyproxy.envoy.extensions.transport_sockets + .http_11_proxy.v3.Http11ProxyUpstreamTransport.class); + hasTransportSocket = wrappedTransportSocket.hasTransportSocket(); + transportSocket = wrappedTransportSocket.getTransportSocket(); + } catch (InvalidProtocolBufferException e) { + return StructOrError.fromError( + "Cluster " + clusterName + ": malformed Http11ProxyUpstreamTransport: " + e); + } catch (ClassCastException e) { + return StructOrError.fromError( + "Cluster " + clusterName + + ": invalid transport_socket type in Http11ProxyUpstreamTransport"); } + } + + if (hasTransportSocket && TRANSPORT_SOCKET_NAME_TLS.equals(transportSocket.getName())) { try { upstreamTlsContext = UpstreamTlsContext.fromEnvoyProtoUpstreamTlsContext( validateUpstreamTlsContext( - unpackCompatibleType(cluster.getTransportSocket().getTypedConfig(), - io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext.class, - TYPE_URL_UPSTREAM_TLS_CONTEXT, TYPE_URL_UPSTREAM_TLS_CONTEXT_V2), + unpackCompatibleType(transportSocket.getTypedConfig(), + io.envoyproxy.envoy.extensions + .transport_sockets.tls.v3.UpstreamTlsContext.class, + TYPE_URL_UPSTREAM_TLS_CONTEXT, TYPE_URL_UPSTREAM_TLS_CONTEXT_V2), certProviderInstances)); } catch (InvalidProtocolBufferException | ResourceInvalidException e) { return StructOrError.fromError( @@ -329,9 +316,10 @@ private static StructOrError parseNonAggregateCluster( return StructOrError.fromError( "EDS service_name must be set when Cluster resource has an xdstp name"); } + return StructOrError.fromStruct(CdsUpdate.forEds( clusterName, edsServiceName, lrsServerInfo, maxConcurrentRequests, upstreamTlsContext, - outlierDetection)); + outlierDetection, isHttp11ProxyAvailable)); } else if (type.equals(Cluster.DiscoveryType.LOGICAL_DNS)) { if (!cluster.hasLoadAssignment()) { return StructOrError.fromError( @@ -366,7 +354,8 @@ private static StructOrError parseNonAggregateCluster( String dnsHostName = String.format( Locale.US, "%s:%d", socketAddress.getAddress(), socketAddress.getPortValue()); return StructOrError.fromStruct(CdsUpdate.forLogicalDns( - clusterName, dnsHostName, lrsServerInfo, maxConcurrentRequests, upstreamTlsContext)); + clusterName, dnsHostName, lrsServerInfo, maxConcurrentRequests, + upstreamTlsContext, isHttp11ProxyAvailable)); } return StructOrError.fromError( "Cluster " + clusterName + ": unsupported built-in discovery type: " + type); @@ -620,6 +609,8 @@ abstract static class CdsUpdate implements ResourceUpdate { @Nullable abstract UpstreamTlsContext upstreamTlsContext(); + abstract boolean isHttp11ProxyAvailable(); + // List of underlying clusters making of this aggregate cluster. // Only valid for AGGREGATE cluster. @Nullable @@ -640,7 +631,8 @@ private static Builder newBuilder(String clusterName) { .maxRingSize(0) .choiceCount(0) .filterMetadata(ImmutableMap.of()) - .parsedMetadata(ImmutableMap.of()); + .parsedMetadata(ImmutableMap.of()) + .isHttp11ProxyAvailable(false); } static Builder forAggregate(String clusterName, List prioritizedClusterNames) { @@ -653,26 +645,30 @@ static Builder forAggregate(String clusterName, List prioritizedClusterN static Builder forEds(String clusterName, @Nullable String edsServiceName, @Nullable ServerInfo lrsServerInfo, @Nullable Long maxConcurrentRequests, @Nullable UpstreamTlsContext upstreamTlsContext, - @Nullable OutlierDetection outlierDetection) { + @Nullable OutlierDetection outlierDetection, + boolean isHttp11ProxyAvailable) { return newBuilder(clusterName) .clusterType(ClusterType.EDS) .edsServiceName(edsServiceName) .lrsServerInfo(lrsServerInfo) .maxConcurrentRequests(maxConcurrentRequests) .upstreamTlsContext(upstreamTlsContext) - .outlierDetection(outlierDetection); + .outlierDetection(outlierDetection) + .isHttp11ProxyAvailable(isHttp11ProxyAvailable); } static Builder forLogicalDns(String clusterName, String dnsHostName, @Nullable ServerInfo lrsServerInfo, @Nullable Long maxConcurrentRequests, - @Nullable UpstreamTlsContext upstreamTlsContext) { + @Nullable UpstreamTlsContext upstreamTlsContext, + boolean isHttp11ProxyAvailable) { return newBuilder(clusterName) .clusterType(ClusterType.LOGICAL_DNS) .dnsHostName(dnsHostName) .lrsServerInfo(lrsServerInfo) .maxConcurrentRequests(maxConcurrentRequests) - .upstreamTlsContext(upstreamTlsContext); + .upstreamTlsContext(upstreamTlsContext) + .isHttp11ProxyAvailable(isHttp11ProxyAvailable); } enum ClusterType { @@ -749,6 +745,8 @@ Builder leastRequestLbPolicy(Integer choiceCount) { // Private, use one of the static factory methods instead. protected abstract Builder maxConcurrentRequests(Long maxConcurrentRequests); + protected abstract Builder isHttp11ProxyAvailable(boolean isHttp11ProxyAvailable); + // Private, use one of the static factory methods instead. protected abstract Builder upstreamTlsContext(UpstreamTlsContext upstreamTlsContext); diff --git a/xds/src/main/java/io/grpc/xds/XdsEndpointResource.java b/xds/src/main/java/io/grpc/xds/XdsEndpointResource.java index 6a3cd35bd59..11111fa51ca 100644 --- a/xds/src/main/java/io/grpc/xds/XdsEndpointResource.java +++ b/xds/src/main/java/io/grpc/xds/XdsEndpointResource.java @@ -20,9 +20,14 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableMap; +import com.google.common.net.InetAddresses; +import com.google.protobuf.Any; +import com.google.protobuf.InvalidProtocolBufferException; import com.google.protobuf.Message; import io.envoyproxy.envoy.config.core.v3.Address; import io.envoyproxy.envoy.config.core.v3.HealthStatus; +import io.envoyproxy.envoy.config.core.v3.SocketAddress; import io.envoyproxy.envoy.config.endpoint.v3.ClusterLoadAssignment; import io.envoyproxy.envoy.config.endpoint.v3.Endpoint; import io.envoyproxy.envoy.type.v3.FractionalPercent; @@ -30,6 +35,7 @@ import io.grpc.internal.GrpcUtil; import io.grpc.xds.Endpoints.DropOverload; import io.grpc.xds.Endpoints.LocalityLbEndpoints; +import io.grpc.xds.MetadataRegistry.MetadataValueParser; import io.grpc.xds.XdsEndpointResource.EdsUpdate; import io.grpc.xds.client.Locality; import io.grpc.xds.client.XdsClient.ResourceUpdate; @@ -185,7 +191,8 @@ private static int getRatePerMillion(FractionalPercent percent) { @VisibleForTesting @Nullable static StructOrError parseLocalityLbEndpoints( - io.envoyproxy.envoy.config.endpoint.v3.LocalityLbEndpoints proto) { + io.envoyproxy.envoy.config.endpoint.v3.LocalityLbEndpoints proto) + throws ResourceInvalidException { // Filter out localities without or with 0 weight. if (!proto.hasLoadBalancingWeight() || proto.getLoadBalancingWeight().getValue() < 1) { return null; @@ -193,6 +200,15 @@ static StructOrError parseLocalityLbEndpoints( if (proto.getPriority() < 0) { return StructOrError.fromError("negative priority"); } + + ImmutableMap localityMetadata; + MetadataRegistry registry = MetadataRegistry.getInstance(); + try { + localityMetadata = registry.parseMetadata(proto.getMetadata()); + } catch (ResourceInvalidException e) { + throw new ResourceInvalidException("Failed to parse Locality Endpoint metadata: " + + e.getMessage(), e); + } List endpoints = new ArrayList<>(proto.getLbEndpointsCount()); for (io.envoyproxy.envoy.config.endpoint.v3.LbEndpoint endpoint : proto.getLbEndpointsList()) { // The endpoint field of each lb_endpoints must be set. @@ -200,6 +216,13 @@ static StructOrError parseLocalityLbEndpoints( if (!endpoint.hasEndpoint() || !endpoint.getEndpoint().hasAddress()) { return StructOrError.fromError("LbEndpoint with no endpoint/address"); } + ImmutableMap endpointMetadata; + try { + endpointMetadata = registry.parseMetadata(endpoint.getMetadata()); + } catch (ResourceInvalidException e) { + throw new ResourceInvalidException("Failed to parse Endpoint metadata: " + + e.getMessage(), e); + } List addresses = new ArrayList<>(); addresses.add(getInetSocketAddress(endpoint.getEndpoint().getAddress())); @@ -214,10 +237,12 @@ static StructOrError parseLocalityLbEndpoints( endpoints.add(Endpoints.LbEndpoint.create( new EquivalentAddressGroup(addresses), endpoint.getLoadBalancingWeight().getValue(), isHealthy, - endpoint.getEndpoint().getHostname())); + endpoint.getEndpoint().getHostname(), + endpointMetadata)); } return StructOrError.fromStruct(Endpoints.LocalityLbEndpoints.create( - endpoints, proto.getLoadBalancingWeight().getValue(), proto.getPriority())); + endpoints, proto.getLoadBalancingWeight().getValue(), + proto.getPriority(), localityMetadata)); } private static InetSocketAddress getInetSocketAddress(Address address) { @@ -270,4 +295,47 @@ public String toString() { .toString(); } } + + public static class AddressMetadataParser implements MetadataValueParser { + + @Override + public String getTypeUrl() { + return "type.googleapis.com/envoy.config.core.v3.Address"; + } + + @Override + public java.net.SocketAddress parse(Any any) throws ResourceInvalidException { + SocketAddress socketAddress; + try { + socketAddress = any.unpack(Address.class).getSocketAddress(); + } catch (InvalidProtocolBufferException ex) { + throw new ResourceInvalidException("Invalid Resource in address proto", ex); + } + validateAddress(socketAddress); + + String ip = socketAddress.getAddress(); + int port = socketAddress.getPortValue(); + + try { + return new InetSocketAddress(InetAddresses.forString(ip), port); + } catch (IllegalArgumentException e) { + throw createException("Invalid IP address or port: " + ip + ":" + port); + } + } + + private void validateAddress(SocketAddress socketAddress) throws ResourceInvalidException { + if (socketAddress.getAddress().isEmpty()) { + throw createException("Address field is empty or invalid."); + } + long port = Integer.toUnsignedLong(socketAddress.getPortValue()); + if (port > 65535) { + throw createException(String.format("Port value %d out of range 1-65535.", port)); + } + } + + private ResourceInvalidException createException(String message) { + return new ResourceInvalidException( + "Failed to parse envoy.config.core.v3.Address: " + message); + } + } } diff --git a/xds/src/test/java/io/grpc/xds/CdsLoadBalancer2Test.java b/xds/src/test/java/io/grpc/xds/CdsLoadBalancer2Test.java index 82a61e79abf..479bde76ce5 100644 --- a/xds/src/test/java/io/grpc/xds/CdsLoadBalancer2Test.java +++ b/xds/src/test/java/io/grpc/xds/CdsLoadBalancer2Test.java @@ -179,7 +179,7 @@ public void tearDown() { public void discoverTopLevelEdsCluster() { CdsUpdate update = CdsUpdate.forEds(CLUSTER, EDS_SERVICE_NAME, LRS_SERVER_INFO, 100L, upstreamTlsContext, - outlierDetection) + outlierDetection, false) .roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(CLUSTER, update); assertThat(childBalancers).hasSize(1); @@ -198,7 +198,8 @@ public void discoverTopLevelEdsCluster() { @Test public void discoverTopLevelLogicalDnsCluster() { CdsUpdate update = - CdsUpdate.forLogicalDns(CLUSTER, DNS_HOST_NAME, LRS_SERVER_INFO, 100L, upstreamTlsContext) + CdsUpdate.forLogicalDns(CLUSTER, DNS_HOST_NAME, LRS_SERVER_INFO, 100L, upstreamTlsContext, + false) .leastRequestLbPolicy(3).build(); xdsClient.deliverCdsUpdate(CLUSTER, update); assertThat(childBalancers).hasSize(1); @@ -232,7 +233,7 @@ public void nonAggregateCluster_resourceNotExist_returnErrorPicker() { @Test public void nonAggregateCluster_resourceUpdate() { CdsUpdate update = - CdsUpdate.forEds(CLUSTER, null, null, 100L, upstreamTlsContext, outlierDetection) + CdsUpdate.forEds(CLUSTER, null, null, 100L, upstreamTlsContext, outlierDetection, false) .roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(CLUSTER, update); assertThat(childBalancers).hasSize(1); @@ -243,7 +244,7 @@ public void nonAggregateCluster_resourceUpdate() { 100L, upstreamTlsContext, outlierDetection); update = CdsUpdate.forEds(CLUSTER, EDS_SERVICE_NAME, LRS_SERVER_INFO, 200L, null, - outlierDetection).roundRobinLbPolicy().build(); + outlierDetection, false).roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(CLUSTER, update); childLbConfig = (ClusterResolverConfig) childBalancer.config; instance = Iterables.getOnlyElement(childLbConfig.discoveryMechanisms); @@ -254,7 +255,8 @@ public void nonAggregateCluster_resourceUpdate() { @Test public void nonAggregateCluster_resourceRevoked() { CdsUpdate update = - CdsUpdate.forLogicalDns(CLUSTER, DNS_HOST_NAME, null, 100L, upstreamTlsContext) + CdsUpdate.forLogicalDns(CLUSTER, DNS_HOST_NAME, null, 100L, upstreamTlsContext, + false) .roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(CLUSTER, update); assertThat(childBalancers).hasSize(1); @@ -298,16 +300,16 @@ public void discoverAggregateCluster() { CLUSTER, cluster1, cluster2, cluster3, cluster4); assertThat(childBalancers).isEmpty(); CdsUpdate update3 = CdsUpdate.forEds(cluster3, EDS_SERVICE_NAME, LRS_SERVER_INFO, 200L, - upstreamTlsContext, outlierDetection).roundRobinLbPolicy().build(); + upstreamTlsContext, outlierDetection, false).roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(cluster3, update3); assertThat(childBalancers).isEmpty(); CdsUpdate update2 = - CdsUpdate.forLogicalDns(cluster2, DNS_HOST_NAME, null, 100L, null) + CdsUpdate.forLogicalDns(cluster2, DNS_HOST_NAME, null, 100L, null, false) .roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(cluster2, update2); assertThat(childBalancers).isEmpty(); CdsUpdate update4 = - CdsUpdate.forEds(cluster4, null, LRS_SERVER_INFO, 300L, null, outlierDetection) + CdsUpdate.forEds(cluster4, null, LRS_SERVER_INFO, 300L, null, outlierDetection, false) .roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(cluster4, update4); assertThat(childBalancers).hasSize(1); // all non-aggregate clusters discovered @@ -362,10 +364,11 @@ public void aggregateCluster_descendantClustersRevoked() { xdsClient.deliverCdsUpdate(CLUSTER, update); assertThat(xdsClient.watchers.keySet()).containsExactly(CLUSTER, cluster1, cluster2); CdsUpdate update1 = CdsUpdate.forEds(cluster1, EDS_SERVICE_NAME, LRS_SERVER_INFO, 200L, - upstreamTlsContext, outlierDetection).roundRobinLbPolicy().build(); + upstreamTlsContext, outlierDetection, false).roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(cluster1, update1); CdsUpdate update2 = - CdsUpdate.forLogicalDns(cluster2, DNS_HOST_NAME, LRS_SERVER_INFO, 100L, null) + CdsUpdate.forLogicalDns(cluster2, DNS_HOST_NAME, LRS_SERVER_INFO, 100L, null, + false) .roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(cluster2, update2); FakeLoadBalancer childBalancer = Iterables.getOnlyElement(childBalancers); @@ -412,10 +415,11 @@ public void aggregateCluster_rootClusterRevoked() { xdsClient.deliverCdsUpdate(CLUSTER, update); assertThat(xdsClient.watchers.keySet()).containsExactly(CLUSTER, cluster1, cluster2); CdsUpdate update1 = CdsUpdate.forEds(cluster1, EDS_SERVICE_NAME, LRS_SERVER_INFO, 200L, - upstreamTlsContext, outlierDetection).roundRobinLbPolicy().build(); + upstreamTlsContext, outlierDetection, false).roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(cluster1, update1); CdsUpdate update2 = - CdsUpdate.forLogicalDns(cluster2, DNS_HOST_NAME, LRS_SERVER_INFO, 100L, null) + CdsUpdate.forLogicalDns(cluster2, DNS_HOST_NAME, LRS_SERVER_INFO, 100L, null, + false) .roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(cluster2, update2); FakeLoadBalancer childBalancer = Iterables.getOnlyElement(childBalancers); @@ -467,7 +471,7 @@ public void aggregateCluster_intermediateClusterChanges() { xdsClient.deliverCdsUpdate(cluster2, update2); assertThat(xdsClient.watchers.keySet()).containsExactly(CLUSTER, cluster2, cluster3); CdsUpdate update3 = CdsUpdate.forEds(cluster3, EDS_SERVICE_NAME, LRS_SERVER_INFO, 100L, - upstreamTlsContext, outlierDetection).roundRobinLbPolicy().build(); + upstreamTlsContext, outlierDetection, false).roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(cluster3, update3); FakeLoadBalancer childBalancer = Iterables.getOnlyElement(childBalancers); ClusterResolverConfig childLbConfig = (ClusterResolverConfig) childBalancer.config; @@ -518,7 +522,7 @@ public void aggregateCluster_withLoops() { reset(helper); CdsUpdate update3 = CdsUpdate.forEds(cluster3, EDS_SERVICE_NAME, LRS_SERVER_INFO, 100L, - upstreamTlsContext, outlierDetection).roundRobinLbPolicy().build(); + upstreamTlsContext, outlierDetection, false).roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(cluster3, update3); verify(helper).updateBalancingState( eq(ConnectivityState.TRANSIENT_FAILURE), pickerCaptor.capture()); @@ -553,7 +557,7 @@ public void aggregateCluster_withLoops_afterEds() { .roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(cluster2, update2); CdsUpdate update3 = CdsUpdate.forEds(cluster3, EDS_SERVICE_NAME, LRS_SERVER_INFO, 100L, - upstreamTlsContext, outlierDetection).roundRobinLbPolicy().build(); + upstreamTlsContext, outlierDetection, false).roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(cluster3, update3); // cluster2 (aggr.) -> [cluster3 (EDS)] @@ -602,7 +606,7 @@ public void aggregateCluster_duplicateChildren() { // Define EDS cluster CdsUpdate update3 = CdsUpdate.forEds(cluster3, EDS_SERVICE_NAME, LRS_SERVER_INFO, 100L, - upstreamTlsContext, outlierDetection).roundRobinLbPolicy().build(); + upstreamTlsContext, outlierDetection, false).roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(cluster3, update3); // cluster4 (agg) -> [cluster3 (EDS)] with dups (3 copies) @@ -649,7 +653,8 @@ public void aggregateCluster_discoveryErrorAfterChildLbCreated_propagateToChildL .roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(CLUSTER, update); CdsUpdate update1 = - CdsUpdate.forLogicalDns(cluster1, DNS_HOST_NAME, LRS_SERVER_INFO, 200L, null) + CdsUpdate.forLogicalDns(cluster1, DNS_HOST_NAME, LRS_SERVER_INFO, 200L, null, + false) .roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(cluster1, update1); FakeLoadBalancer childLb = Iterables.getOnlyElement(childBalancers); @@ -676,7 +681,7 @@ public void handleNameResolutionErrorFromUpstream_beforeChildLbCreated_returnErr @Test public void handleNameResolutionErrorFromUpstream_afterChildLbCreated_fallThrough() { CdsUpdate update = CdsUpdate.forEds(CLUSTER, EDS_SERVICE_NAME, LRS_SERVER_INFO, 100L, - upstreamTlsContext, outlierDetection).roundRobinLbPolicy().build(); + upstreamTlsContext, outlierDetection, false).roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(CLUSTER, update); FakeLoadBalancer childBalancer = Iterables.getOnlyElement(childBalancers); assertThat(childBalancer.shutdown).isFalse(); @@ -692,7 +697,7 @@ public void unknownLbProvider() { try { xdsClient.deliverCdsUpdate(CLUSTER, CdsUpdate.forEds(CLUSTER, EDS_SERVICE_NAME, LRS_SERVER_INFO, 100L, upstreamTlsContext, - outlierDetection) + outlierDetection, false) .lbPolicyConfig(ImmutableMap.of("unknownLb", ImmutableMap.of("foo", "bar"))).build()); } catch (Exception e) { assertThat(e).hasMessageThat().contains("unknownLb"); @@ -706,7 +711,7 @@ public void invalidLbConfig() { try { xdsClient.deliverCdsUpdate(CLUSTER, CdsUpdate.forEds(CLUSTER, EDS_SERVICE_NAME, LRS_SERVER_INFO, 100L, upstreamTlsContext, - outlierDetection).lbPolicyConfig( + outlierDetection, false).lbPolicyConfig( ImmutableMap.of("ring_hash_experimental", ImmutableMap.of("minRingSize", "-1"))) .build()); } catch (Exception e) { diff --git a/xds/src/test/java/io/grpc/xds/ClusterResolverLoadBalancerTest.java b/xds/src/test/java/io/grpc/xds/ClusterResolverLoadBalancerTest.java index 2a8617912ea..2ae05a7dbf3 100644 --- a/xds/src/test/java/io/grpc/xds/ClusterResolverLoadBalancerTest.java +++ b/xds/src/test/java/io/grpc/xds/ClusterResolverLoadBalancerTest.java @@ -36,6 +36,7 @@ import io.grpc.ChannelLogger; import io.grpc.ConnectivityState; import io.grpc.EquivalentAddressGroup; +import io.grpc.HttpConnectProxiedSocketAddress; import io.grpc.InsecureChannelCredentials; import io.grpc.LoadBalancer; import io.grpc.LoadBalancer.Helper; @@ -83,6 +84,7 @@ import io.grpc.xds.client.XdsClient; import io.grpc.xds.client.XdsResourceType; import io.grpc.xds.internal.security.CommonTlsContextTestsUtil; +import java.net.InetSocketAddress; import java.net.SocketAddress; import java.net.URI; import java.net.URISyntaxException; @@ -242,7 +244,7 @@ public void tearDown() { @Test public void edsClustersWithRingHashEndpointLbPolicy() { ClusterResolverConfig config = new ClusterResolverConfig( - Collections.singletonList(edsDiscoveryMechanism1), ringHash); + Collections.singletonList(edsDiscoveryMechanism1), ringHash, false); deliverLbConfig(config); assertThat(xdsClient.watchers.keySet()).containsExactly(EDS_SERVICE_NAME1); assertThat(childBalancers).isEmpty(); @@ -254,14 +256,18 @@ public void edsClustersWithRingHashEndpointLbPolicy() { LocalityLbEndpoints localityLbEndpoints1 = LocalityLbEndpoints.create( Arrays.asList( - LbEndpoint.create(endpoint1, 0 /* loadBalancingWeight */, true, "hostname1"), - LbEndpoint.create(endpoint2, 0 /* loadBalancingWeight */, true, "hostname2")), - 10 /* localityWeight */, 1 /* priority */); + LbEndpoint.create(endpoint1, 0 /* loadBalancingWeight */, + true, "hostname1", ImmutableMap.of()), + LbEndpoint.create(endpoint2, 0 /* loadBalancingWeight */, + true, "hostname2", ImmutableMap.of())), + 10 /* localityWeight */, 1 /* priority */, ImmutableMap.of()); LocalityLbEndpoints localityLbEndpoints2 = LocalityLbEndpoints.create( Collections.singletonList( - LbEndpoint.create(endpoint3, 60 /* loadBalancingWeight */, true, "hostname3")), - 50 /* localityWeight */, 1 /* priority */); + LbEndpoint.create( + endpoint3, 60 /* loadBalancingWeight */, true, + "hostname3", ImmutableMap.of())), + 50 /* localityWeight */, 1 /* priority */, ImmutableMap.of()); xdsClient.deliverClusterLoadAssignment( EDS_SERVICE_NAME1, ImmutableMap.of(locality1, localityLbEndpoints1, locality2, localityLbEndpoints2)); @@ -304,7 +310,7 @@ public void edsClustersWithRingHashEndpointLbPolicy() { @Test public void edsClustersWithLeastRequestEndpointLbPolicy() { ClusterResolverConfig config = new ClusterResolverConfig( - Collections.singletonList(edsDiscoveryMechanism1), leastRequest); + Collections.singletonList(edsDiscoveryMechanism1), leastRequest, false); deliverLbConfig(config); assertThat(xdsClient.watchers.keySet()).containsExactly(EDS_SERVICE_NAME1); assertThat(childBalancers).isEmpty(); @@ -314,8 +320,9 @@ public void edsClustersWithLeastRequestEndpointLbPolicy() { LocalityLbEndpoints localityLbEndpoints = LocalityLbEndpoints.create( Arrays.asList( - LbEndpoint.create(endpoint, 0 /* loadBalancingWeight */, true, "hostname1")), - 100 /* localityWeight */, 1 /* priority */); + LbEndpoint.create(endpoint, 0 /* loadBalancingWeight */, true, + "hostname1", ImmutableMap.of())), + 100 /* localityWeight */, 1 /* priority */, ImmutableMap.of()); xdsClient.deliverClusterLoadAssignment( EDS_SERVICE_NAME1, ImmutableMap.of(locality1, localityLbEndpoints)); @@ -350,7 +357,7 @@ public void edsClustersWithLeastRequestEndpointLbPolicy() { @Test public void edsClustersEndpointHostname_addedToAddressAttribute() { ClusterResolverConfig config = new ClusterResolverConfig( - Collections.singletonList(edsDiscoveryMechanismWithOutlierDetection), leastRequest); + Collections.singletonList(edsDiscoveryMechanismWithOutlierDetection), leastRequest, false); deliverLbConfig(config); assertThat(xdsClient.watchers.keySet()).containsExactly(EDS_SERVICE_NAME1); assertThat(childBalancers).isEmpty(); @@ -360,8 +367,9 @@ public void edsClustersEndpointHostname_addedToAddressAttribute() { LocalityLbEndpoints localityLbEndpoints = LocalityLbEndpoints.create( Arrays.asList( - LbEndpoint.create(endpoint, 0 /* loadBalancingWeight */, true, "hostname1")), - 100 /* localityWeight */, 1 /* priority */); + LbEndpoint.create(endpoint, 0 /* loadBalancingWeight */, true, + "hostname1", ImmutableMap.of())), + 100 /* localityWeight */, 1 /* priority */, ImmutableMap.of()); xdsClient.deliverClusterLoadAssignment( EDS_SERVICE_NAME1, ImmutableMap.of(locality1, localityLbEndpoints)); @@ -373,11 +381,104 @@ public void edsClustersEndpointHostname_addedToAddressAttribute() { .get(XdsAttributes.ATTR_ADDRESS_NAME)).isEqualTo("hostname1"); } + @Test + public void endpointAddressRewritten_whenProxyMetadataIsInEndpointMetadata() { + ClusterResolverConfig config = new ClusterResolverConfig( + Collections.singletonList(edsDiscoveryMechanismWithOutlierDetection), leastRequest, true); + deliverLbConfig(config); + assertThat(xdsClient.watchers.keySet()).containsExactly(EDS_SERVICE_NAME1); + assertThat(childBalancers).isEmpty(); + + EquivalentAddressGroup endpoint = + new EquivalentAddressGroup(InetSocketAddress.createUnresolved("127.0.0.1", 8080)); + + // Proxy address in endpointMetadata (use FakeSocketAddress directly) + SocketAddress proxyAddress = new FakeSocketAddress("127.0.0.2"); + ImmutableMap endpointMetadata = + ImmutableMap.of("envoy.http11_proxy_transport_socket.proxy_address", proxyAddress); + + // No proxy in locality metadata + ImmutableMap localityMetadata = ImmutableMap.of(); + + LocalityLbEndpoints localityLbEndpoints = LocalityLbEndpoints.create( + Arrays.asList( + LbEndpoint.create(endpoint, 0 /* loadBalancingWeight */, true, + "hostname1", endpointMetadata)), + 100 /* localityWeight */, 1 /* priority */, localityMetadata); + + xdsClient.deliverClusterLoadAssignment( + EDS_SERVICE_NAME1, + ImmutableMap.of(locality1, localityLbEndpoints)); + + assertThat(childBalancers).hasSize(1); + FakeLoadBalancer childBalancer = Iterables.getOnlyElement(childBalancers); + + // Get the rewritten address + SocketAddress rewrittenAddress = + childBalancer.addresses.get(0).getAddresses().get(0); + assertThat(rewrittenAddress).isInstanceOf(HttpConnectProxiedSocketAddress.class); + HttpConnectProxiedSocketAddress proxiedSocket = + (HttpConnectProxiedSocketAddress) rewrittenAddress; + + // Assert that the target address is the original address + assertThat(proxiedSocket.getTargetAddress()) + .isEqualTo(endpoint.getAddresses().get(0)); + + // Assert that the proxy address is correctly set + assertThat(proxiedSocket.getProxyAddress()).isEqualTo(proxyAddress); + } + + @Test + public void endpointAddressRewritten_whenProxyMetadataIsInLocalityMetadata() { + ClusterResolverConfig config = new ClusterResolverConfig( + Collections.singletonList(edsDiscoveryMechanismWithOutlierDetection), leastRequest, true); + deliverLbConfig(config); + assertThat(xdsClient.watchers.keySet()).containsExactly(EDS_SERVICE_NAME1); + assertThat(childBalancers).isEmpty(); + + EquivalentAddressGroup endpoint = + new EquivalentAddressGroup(InetSocketAddress.createUnresolved("127.0.0.2", 8080)); + + // No proxy in endpointMetadata + ImmutableMap endpointMetadata = ImmutableMap.of(); + + // Proxy address is now in localityMetadata + SocketAddress proxyAddress = new FakeSocketAddress("proxy-addr"); + ImmutableMap localityMetadata = + ImmutableMap.of("envoy.http11_proxy_transport_socket.proxy_address", proxyAddress); + + LocalityLbEndpoints localityLbEndpoints = LocalityLbEndpoints.create( + Arrays.asList( + LbEndpoint.create(endpoint, 0 /* loadBalancingWeight */, true, + "hostname2", endpointMetadata)), + 100 /* localityWeight */, 1 /* priority */, localityMetadata); + + xdsClient.deliverClusterLoadAssignment( + EDS_SERVICE_NAME1, + ImmutableMap.of(locality1, localityLbEndpoints)); + + assertThat(childBalancers).hasSize(1); + FakeLoadBalancer childBalancer = Iterables.getOnlyElement(childBalancers); + + // Get the rewritten address + SocketAddress rewrittenAddress = childBalancer.addresses.get(0).getAddresses().get(0); + + // Assert that the address was rewritten + assertThat(rewrittenAddress).isInstanceOf(HttpConnectProxiedSocketAddress.class); + HttpConnectProxiedSocketAddress proxiedSocket = + (HttpConnectProxiedSocketAddress) rewrittenAddress; + + // Assert that the target address is the original address + assertThat(proxiedSocket.getTargetAddress()).isEqualTo(endpoint.getAddresses().get(0)); + + // Assert that the proxy address is correctly set from locality metadata + assertThat(proxiedSocket.getProxyAddress()).isEqualTo(proxyAddress); + } @Test public void onlyEdsClusters_receivedEndpoints() { ClusterResolverConfig config = new ClusterResolverConfig( - Arrays.asList(edsDiscoveryMechanism1, edsDiscoveryMechanism2), roundRobin); + Arrays.asList(edsDiscoveryMechanism1, edsDiscoveryMechanism2), roundRobin, false); deliverLbConfig(config); assertThat(xdsClient.watchers.keySet()).containsExactly(EDS_SERVICE_NAME1, EDS_SERVICE_NAME2); assertThat(childBalancers).isEmpty(); @@ -391,17 +492,21 @@ public void onlyEdsClusters_receivedEndpoints() { LocalityLbEndpoints localityLbEndpoints1 = LocalityLbEndpoints.create( Arrays.asList( - LbEndpoint.create(endpoint1, 100, true, "hostname1"), - LbEndpoint.create(endpoint2, 100, true, "hostname1")), - 70 /* localityWeight */, 1 /* priority */); + LbEndpoint.create(endpoint1, 100, + true, "hostname1", ImmutableMap.of()), + LbEndpoint.create(endpoint2, 100, + true, "hostname1", ImmutableMap.of())), + 70 /* localityWeight */, 1 /* priority */, ImmutableMap.of()); LocalityLbEndpoints localityLbEndpoints2 = LocalityLbEndpoints.create( - Collections.singletonList(LbEndpoint.create(endpoint3, 100, true, "hostname2")), - 10 /* localityWeight */, 1 /* priority */); + Collections.singletonList(LbEndpoint.create(endpoint3, 100, true, + "hostname2", ImmutableMap.of())), + 10 /* localityWeight */, 1 /* priority */, ImmutableMap.of()); LocalityLbEndpoints localityLbEndpoints3 = LocalityLbEndpoints.create( - Collections.singletonList(LbEndpoint.create(endpoint4, 100, true, "hostname3")), - 20 /* localityWeight */, 2 /* priority */); + Collections.singletonList(LbEndpoint.create(endpoint4, 100, true, + "hostname3", ImmutableMap.of())), + 20 /* localityWeight */, 2 /* priority */, ImmutableMap.of()); String priority1 = CLUSTER2 + "[child1]"; String priority2 = CLUSTER2 + "[child2]"; String priority3 = CLUSTER1 + "[child1]"; @@ -489,7 +594,7 @@ public void onlyEdsClusters_receivedEndpoints() { private void verifyEdsPriorityNames(List want, Map... updates) { ClusterResolverConfig config = new ClusterResolverConfig( - Arrays.asList(edsDiscoveryMechanism2), roundRobin); + Arrays.asList(edsDiscoveryMechanism2), roundRobin, false); deliverLbConfig(config); assertThat(xdsClient.watchers.keySet()).containsExactly(EDS_SERVICE_NAME2); assertThat(childBalancers).isEmpty(); @@ -555,15 +660,17 @@ locality2, createEndpoints(1) private LocalityLbEndpoints createEndpoints(int priority) { return LocalityLbEndpoints.create( Arrays.asList( - LbEndpoint.create(makeAddress("endpoint-addr-1"), 100, true, "hostname1"), - LbEndpoint.create(makeAddress("endpoint-addr-2"), 100, true, "hostname2")), - 70 /* localityWeight */, priority /* priority */); + LbEndpoint.create(makeAddress("endpoint-addr-1"), 100, + true, "hostname1", ImmutableMap.of()), + LbEndpoint.create(makeAddress("endpoint-addr-2"), 100, + true, "hostname2", ImmutableMap.of())), + 70 /* localityWeight */, priority /* priority */, ImmutableMap.of()); } @Test public void onlyEdsClusters_resourceNeverExist_returnErrorPicker() { ClusterResolverConfig config = new ClusterResolverConfig( - Arrays.asList(edsDiscoveryMechanism1, edsDiscoveryMechanism2), roundRobin); + Arrays.asList(edsDiscoveryMechanism1, edsDiscoveryMechanism2), roundRobin, false); deliverLbConfig(config); assertThat(xdsClient.watchers.keySet()).containsExactly(EDS_SERVICE_NAME1, EDS_SERVICE_NAME2); assertThat(childBalancers).isEmpty(); @@ -585,7 +692,7 @@ public void onlyEdsClusters_resourceNeverExist_returnErrorPicker() { @Test public void onlyEdsClusters_allResourcesRevoked_shutDownChildLbPolicy() { ClusterResolverConfig config = new ClusterResolverConfig( - Arrays.asList(edsDiscoveryMechanism1, edsDiscoveryMechanism2), roundRobin); + Arrays.asList(edsDiscoveryMechanism1, edsDiscoveryMechanism2), roundRobin, false); deliverLbConfig(config); assertThat(xdsClient.watchers.keySet()).containsExactly(EDS_SERVICE_NAME1, EDS_SERVICE_NAME2); assertThat(childBalancers).isEmpty(); @@ -594,12 +701,14 @@ public void onlyEdsClusters_allResourcesRevoked_shutDownChildLbPolicy() { EquivalentAddressGroup endpoint2 = makeAddress("endpoint-addr-2"); LocalityLbEndpoints localityLbEndpoints1 = LocalityLbEndpoints.create( - Collections.singletonList(LbEndpoint.create(endpoint1, 100, true, "hostname1")), - 10 /* localityWeight */, 1 /* priority */); + Collections.singletonList(LbEndpoint.create(endpoint1, 100, true, + "hostname1", ImmutableMap.of())), + 10 /* localityWeight */, 1 /* priority */, ImmutableMap.of()); LocalityLbEndpoints localityLbEndpoints2 = LocalityLbEndpoints.create( - Collections.singletonList(LbEndpoint.create(endpoint2, 100, true, "hostname2")), - 20 /* localityWeight */, 2 /* priority */); + Collections.singletonList(LbEndpoint.create(endpoint2, 100, true, + "hostname2", ImmutableMap.of())), + 20 /* localityWeight */, 2 /* priority */, ImmutableMap.of()); xdsClient.deliverClusterLoadAssignment( EDS_SERVICE_NAME1, Collections.singletonMap(locality1, localityLbEndpoints1)); xdsClient.deliverClusterLoadAssignment( @@ -620,17 +729,19 @@ public void onlyEdsClusters_allResourcesRevoked_shutDownChildLbPolicy() { @Test public void handleEdsResource_ignoreUnhealthyEndpoints() { - ClusterResolverConfig config = - new ClusterResolverConfig(Collections.singletonList(edsDiscoveryMechanism1), roundRobin); + ClusterResolverConfig config = new ClusterResolverConfig( + Collections.singletonList(edsDiscoveryMechanism1), roundRobin, false); deliverLbConfig(config); EquivalentAddressGroup endpoint1 = makeAddress("endpoint-addr-1"); EquivalentAddressGroup endpoint2 = makeAddress("endpoint-addr-2"); LocalityLbEndpoints localityLbEndpoints = LocalityLbEndpoints.create( Arrays.asList( - LbEndpoint.create(endpoint1, 100, false /* isHealthy */, "hostname1"), - LbEndpoint.create(endpoint2, 100, true /* isHealthy */, "hostname2")), - 10 /* localityWeight */, 1 /* priority */); + LbEndpoint.create(endpoint1, 100, false /* isHealthy */, + "hostname1", ImmutableMap.of()), + LbEndpoint.create(endpoint2, 100, true /* isHealthy */, + "hostname2", ImmutableMap.of())), + 10 /* localityWeight */, 1 /* priority */, ImmutableMap.of()); xdsClient.deliverClusterLoadAssignment( EDS_SERVICE_NAME1, Collections.singletonMap(locality1, localityLbEndpoints)); FakeLoadBalancer childBalancer = Iterables.getOnlyElement(childBalancers); @@ -640,21 +751,21 @@ public void handleEdsResource_ignoreUnhealthyEndpoints() { @Test public void handleEdsResource_ignoreLocalitiesWithNoHealthyEndpoints() { - ClusterResolverConfig config = - new ClusterResolverConfig(Collections.singletonList(edsDiscoveryMechanism1), roundRobin); + ClusterResolverConfig config = new ClusterResolverConfig( + Collections.singletonList(edsDiscoveryMechanism1), roundRobin, false); deliverLbConfig(config); EquivalentAddressGroup endpoint1 = makeAddress("endpoint-addr-1"); EquivalentAddressGroup endpoint2 = makeAddress("endpoint-addr-2"); LocalityLbEndpoints localityLbEndpoints1 = LocalityLbEndpoints.create( Collections.singletonList(LbEndpoint.create(endpoint1, 100, false /* isHealthy */, - "hostname1")), - 10 /* localityWeight */, 1 /* priority */); + "hostname1", ImmutableMap.of())), + 10 /* localityWeight */, 1 /* priority */, ImmutableMap.of()); LocalityLbEndpoints localityLbEndpoints2 = LocalityLbEndpoints.create( Collections.singletonList(LbEndpoint.create(endpoint2, 100, true /* isHealthy */, - "hostname2")), - 10 /* localityWeight */, 1 /* priority */); + "hostname2", ImmutableMap.of())), + 10 /* localityWeight */, 1 /* priority */, ImmutableMap.of()); xdsClient.deliverClusterLoadAssignment( EDS_SERVICE_NAME1, ImmutableMap.of(locality1, localityLbEndpoints1, locality2, localityLbEndpoints2)); @@ -667,21 +778,21 @@ public void handleEdsResource_ignoreLocalitiesWithNoHealthyEndpoints() { @Test public void handleEdsResource_ignorePrioritiesWithNoHealthyEndpoints() { - ClusterResolverConfig config = - new ClusterResolverConfig(Collections.singletonList(edsDiscoveryMechanism1), roundRobin); + ClusterResolverConfig config = new ClusterResolverConfig( + Collections.singletonList(edsDiscoveryMechanism1), roundRobin, false); deliverLbConfig(config); EquivalentAddressGroup endpoint1 = makeAddress("endpoint-addr-1"); EquivalentAddressGroup endpoint2 = makeAddress("endpoint-addr-2"); LocalityLbEndpoints localityLbEndpoints1 = LocalityLbEndpoints.create( Collections.singletonList(LbEndpoint.create(endpoint1, 100, false /* isHealthy */, - "hostname1")), - 10 /* localityWeight */, 1 /* priority */); + "hostname1", ImmutableMap.of())), + 10 /* localityWeight */, 1 /* priority */, ImmutableMap.of()); LocalityLbEndpoints localityLbEndpoints2 = LocalityLbEndpoints.create( Collections.singletonList(LbEndpoint.create(endpoint2, 200, true /* isHealthy */, - "hostname2")), - 10 /* localityWeight */, 2 /* priority */); + "hostname2", ImmutableMap.of())), + 10 /* localityWeight */, 2 /* priority */, ImmutableMap.of()); String priority2 = CLUSTER1 + "[child2]"; xdsClient.deliverClusterLoadAssignment( EDS_SERVICE_NAME1, @@ -693,15 +804,15 @@ public void handleEdsResource_ignorePrioritiesWithNoHealthyEndpoints() { @Test public void handleEdsResource_noHealthyEndpoint() { - ClusterResolverConfig config = - new ClusterResolverConfig(Collections.singletonList(edsDiscoveryMechanism1), roundRobin); + ClusterResolverConfig config = new ClusterResolverConfig( + Collections.singletonList(edsDiscoveryMechanism1), roundRobin, false); deliverLbConfig(config); EquivalentAddressGroup endpoint = makeAddress("endpoint-addr-1"); LocalityLbEndpoints localityLbEndpoints = LocalityLbEndpoints.create( Collections.singletonList(LbEndpoint.create(endpoint, 100, false /* isHealthy */, - "hostname1")), - 10 /* localityWeight */, 1 /* priority */); + "hostname1", ImmutableMap.of())), + 10 /* localityWeight */, 1 /* priority */, ImmutableMap.of()); xdsClient.deliverClusterLoadAssignment(EDS_SERVICE_NAME1, Collections.singletonMap(locality1, localityLbEndpoints)); // single endpoint, unhealthy @@ -729,7 +840,7 @@ public void oldListenerCallback_onlyLogicalDnsCluster_endpointsResolved() { void do_onlyLogicalDnsCluster_endpointsResolved() { ClusterResolverConfig config = new ClusterResolverConfig( - Collections.singletonList(logicalDnsDiscoveryMechanism), roundRobin); + Collections.singletonList(logicalDnsDiscoveryMechanism), roundRobin, false); deliverLbConfig(config); FakeNameResolver resolver = assertResolverCreated("/" + DNS_HOST_NAME); assertThat(childBalancers).isEmpty(); @@ -761,7 +872,7 @@ void do_onlyLogicalDnsCluster_endpointsResolved() { @Test public void onlyLogicalDnsCluster_handleRefreshNameResolution() { ClusterResolverConfig config = new ClusterResolverConfig( - Collections.singletonList(logicalDnsDiscoveryMechanism), roundRobin); + Collections.singletonList(logicalDnsDiscoveryMechanism), roundRobin, false); deliverLbConfig(config); FakeNameResolver resolver = assertResolverCreated("/" + DNS_HOST_NAME); assertThat(childBalancers).isEmpty(); @@ -790,7 +901,7 @@ void do_onlyLogicalDnsCluster_resolutionError_backoffAndRefresh() { InOrder inOrder = Mockito.inOrder(helper, backoffPolicyProvider, backoffPolicy1, backoffPolicy2); ClusterResolverConfig config = new ClusterResolverConfig( - Collections.singletonList(logicalDnsDiscoveryMechanism), roundRobin); + Collections.singletonList(logicalDnsDiscoveryMechanism), roundRobin, false); deliverLbConfig(config); FakeNameResolver resolver = assertResolverCreated("/" + DNS_HOST_NAME); assertThat(childBalancers).isEmpty(); @@ -836,7 +947,7 @@ void do_onlyLogicalDnsCluster_resolutionError_backoffAndRefresh() { public void onlyLogicalDnsCluster_refreshNameResolutionRaceWithResolutionError() { InOrder inOrder = Mockito.inOrder(backoffPolicyProvider, backoffPolicy1, backoffPolicy2); ClusterResolverConfig config = new ClusterResolverConfig( - Collections.singletonList(logicalDnsDiscoveryMechanism), roundRobin); + Collections.singletonList(logicalDnsDiscoveryMechanism), roundRobin, false); deliverLbConfig(config); FakeNameResolver resolver = assertResolverCreated("/" + DNS_HOST_NAME); assertThat(childBalancers).isEmpty(); @@ -874,7 +985,7 @@ public void onlyLogicalDnsCluster_refreshNameResolutionRaceWithResolutionError() @Test public void edsClustersAndLogicalDnsCluster_receivedEndpoints() { ClusterResolverConfig config = new ClusterResolverConfig( - Arrays.asList(edsDiscoveryMechanism1, logicalDnsDiscoveryMechanism), roundRobin); + Arrays.asList(edsDiscoveryMechanism1, logicalDnsDiscoveryMechanism), roundRobin, false); deliverLbConfig(config); assertThat(xdsClient.watchers.keySet()).containsExactly(EDS_SERVICE_NAME1); FakeNameResolver resolver = assertResolverCreated("/" + DNS_HOST_NAME); @@ -885,8 +996,9 @@ public void edsClustersAndLogicalDnsCluster_receivedEndpoints() { resolver.deliverEndpointAddresses(Arrays.asList(endpoint1, endpoint2)); LocalityLbEndpoints localityLbEndpoints = LocalityLbEndpoints.create( - Collections.singletonList(LbEndpoint.create(endpoint3, 100, true, "hostname3")), - 10 /* localityWeight */, 1 /* priority */); + Collections.singletonList(LbEndpoint.create(endpoint3, 100, true, + "hostname3", ImmutableMap.of())), + 10 /* localityWeight */, 1 /* priority */, ImmutableMap.of()); xdsClient.deliverClusterLoadAssignment( EDS_SERVICE_NAME1, Collections.singletonMap(locality1, localityLbEndpoints)); @@ -909,7 +1021,7 @@ public void edsClustersAndLogicalDnsCluster_receivedEndpoints() { @Test public void noEdsResourceExists_useDnsResolutionResults() { ClusterResolverConfig config = new ClusterResolverConfig( - Arrays.asList(edsDiscoveryMechanism1, logicalDnsDiscoveryMechanism), roundRobin); + Arrays.asList(edsDiscoveryMechanism1, logicalDnsDiscoveryMechanism), roundRobin, false); deliverLbConfig(config); assertThat(xdsClient.watchers.keySet()).containsExactly(EDS_SERVICE_NAME1); FakeNameResolver resolver = assertResolverCreated("/" + DNS_HOST_NAME); @@ -933,7 +1045,7 @@ public void noEdsResourceExists_useDnsResolutionResults() { @Test public void edsResourceRevoked_dnsResolutionError_shutDownChildLbPolicyAndReturnErrorPicker() { ClusterResolverConfig config = new ClusterResolverConfig( - Arrays.asList(edsDiscoveryMechanism1, logicalDnsDiscoveryMechanism), roundRobin); + Arrays.asList(edsDiscoveryMechanism1, logicalDnsDiscoveryMechanism), roundRobin, false); deliverLbConfig(config); assertThat(xdsClient.watchers.keySet()).containsExactly(EDS_SERVICE_NAME1); FakeNameResolver resolver = assertResolverCreated("/" + DNS_HOST_NAME); @@ -942,8 +1054,9 @@ public void edsResourceRevoked_dnsResolutionError_shutDownChildLbPolicyAndReturn EquivalentAddressGroup endpoint = makeAddress("endpoint-addr-1"); LocalityLbEndpoints localityLbEndpoints = LocalityLbEndpoints.create( - Collections.singletonList(LbEndpoint.create(endpoint, 100, true, "hostname1")), - 10 /* localityWeight */, 1 /* priority */); + Collections.singletonList(LbEndpoint.create(endpoint, 100, true, + "hostname1", ImmutableMap.of())), + 10 /* localityWeight */, 1 /* priority */, ImmutableMap.of()); xdsClient.deliverClusterLoadAssignment( EDS_SERVICE_NAME1, Collections.singletonMap(locality1, localityLbEndpoints)); resolver.deliverError(Status.UNKNOWN.withDescription("I am lost")); @@ -964,7 +1077,7 @@ public void edsResourceRevoked_dnsResolutionError_shutDownChildLbPolicyAndReturn @Test public void resolutionErrorAfterChildLbCreated_propagateErrorIfAllClustersEncounterError() { ClusterResolverConfig config = new ClusterResolverConfig( - Arrays.asList(edsDiscoveryMechanism1, logicalDnsDiscoveryMechanism), roundRobin); + Arrays.asList(edsDiscoveryMechanism1, logicalDnsDiscoveryMechanism), roundRobin, false); deliverLbConfig(config); assertThat(xdsClient.watchers.keySet()).containsExactly(EDS_SERVICE_NAME1); FakeNameResolver resolver = assertResolverCreated("/" + DNS_HOST_NAME); @@ -973,8 +1086,9 @@ public void resolutionErrorAfterChildLbCreated_propagateErrorIfAllClustersEncoun EquivalentAddressGroup endpoint = makeAddress("endpoint-addr-1"); LocalityLbEndpoints localityLbEndpoints = LocalityLbEndpoints.create( - Collections.singletonList(LbEndpoint.create(endpoint, 100, true, "hostname1")), - 10 /* localityWeight */, 1 /* priority */); + Collections.singletonList(LbEndpoint.create(endpoint, 100, true, + "hostname1", ImmutableMap.of())), + 10 /* localityWeight */, 1 /* priority */, ImmutableMap.of()); xdsClient.deliverClusterLoadAssignment( EDS_SERVICE_NAME1, Collections.singletonMap(locality1, localityLbEndpoints)); assertThat(childBalancers).isEmpty(); // not created until all clusters resolved. @@ -999,7 +1113,7 @@ public void resolutionErrorAfterChildLbCreated_propagateErrorIfAllClustersEncoun @Test public void resolutionErrorBeforeChildLbCreated_returnErrorPickerIfAllClustersEncounterError() { ClusterResolverConfig config = new ClusterResolverConfig( - Arrays.asList(edsDiscoveryMechanism1, logicalDnsDiscoveryMechanism), roundRobin); + Arrays.asList(edsDiscoveryMechanism1, logicalDnsDiscoveryMechanism), roundRobin, false); deliverLbConfig(config); assertThat(xdsClient.watchers.keySet()).containsExactly(EDS_SERVICE_NAME1); FakeNameResolver resolver = assertResolverCreated("/" + DNS_HOST_NAME); @@ -1022,7 +1136,7 @@ public void resolutionErrorBeforeChildLbCreated_returnErrorPickerIfAllClustersEn @Test public void resolutionErrorBeforeChildLbCreated_edsOnly_returnErrorPicker() { ClusterResolverConfig config = new ClusterResolverConfig( - Arrays.asList(edsDiscoveryMechanism1), roundRobin); + Arrays.asList(edsDiscoveryMechanism1), roundRobin, false); deliverLbConfig(config); assertThat(xdsClient.watchers.keySet()).containsExactly(EDS_SERVICE_NAME1); assertThat(childBalancers).isEmpty(); @@ -1040,7 +1154,7 @@ public void resolutionErrorBeforeChildLbCreated_edsOnly_returnErrorPicker() { @Test public void handleNameResolutionErrorFromUpstream_beforeChildLbCreated_returnErrorPicker() { ClusterResolverConfig config = new ClusterResolverConfig( - Arrays.asList(edsDiscoveryMechanism1, logicalDnsDiscoveryMechanism), roundRobin); + Arrays.asList(edsDiscoveryMechanism1, logicalDnsDiscoveryMechanism), roundRobin, false); deliverLbConfig(config); assertThat(xdsClient.watchers.keySet()).containsExactly(EDS_SERVICE_NAME1); assertResolverCreated("/" + DNS_HOST_NAME); @@ -1056,7 +1170,7 @@ public void handleNameResolutionErrorFromUpstream_beforeChildLbCreated_returnErr @Test public void handleNameResolutionErrorFromUpstream_afterChildLbCreated_fallThrough() { ClusterResolverConfig config = new ClusterResolverConfig( - Arrays.asList(edsDiscoveryMechanism1, logicalDnsDiscoveryMechanism), roundRobin); + Arrays.asList(edsDiscoveryMechanism1, logicalDnsDiscoveryMechanism), roundRobin, false); deliverLbConfig(config); assertThat(xdsClient.watchers.keySet()).containsExactly(EDS_SERVICE_NAME1); FakeNameResolver resolver = assertResolverCreated("/" + DNS_HOST_NAME); @@ -1066,8 +1180,9 @@ public void handleNameResolutionErrorFromUpstream_afterChildLbCreated_fallThroug EquivalentAddressGroup endpoint2 = makeAddress("endpoint-addr-2"); LocalityLbEndpoints localityLbEndpoints = LocalityLbEndpoints.create( - Collections.singletonList(LbEndpoint.create(endpoint1, 100, true, "hostname1")), - 10 /* localityWeight */, 1 /* priority */); + Collections.singletonList(LbEndpoint.create(endpoint1, 100, true, + "hostname1", ImmutableMap.of())), + 10 /* localityWeight */, 1 /* priority */, ImmutableMap.of()); xdsClient.deliverClusterLoadAssignment( EDS_SERVICE_NAME1, Collections.singletonMap(locality1, localityLbEndpoints)); resolver.deliverEndpointAddresses(Collections.singletonList(endpoint2)); @@ -1141,37 +1256,37 @@ private static void assertAddressesEqual( } private static EquivalentAddressGroup makeAddress(final String name) { - class FakeSocketAddress extends SocketAddress { - private final String name; + return new EquivalentAddressGroup(new FakeSocketAddress(name)); + } - private FakeSocketAddress(String name) { - this.name = name; - } + static class FakeSocketAddress extends SocketAddress { + private final String name; - @Override - public int hashCode() { - return Objects.hash(name); - } + private FakeSocketAddress(String name) { + this.name = name; + } - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof FakeSocketAddress)) { - return false; - } - FakeSocketAddress that = (FakeSocketAddress) o; - return Objects.equals(name, that.name); - } + @Override + public int hashCode() { + return Objects.hash(name); + } - @Override - public String toString() { - return name; + @Override + public boolean equals(Object o) { + if (this == o) { + return true; } + if (!(o instanceof FakeSocketAddress)) { + return false; + } + FakeSocketAddress that = (FakeSocketAddress) o; + return Objects.equals(name, that.name); } - return new EquivalentAddressGroup(new FakeSocketAddress(name)); + @Override + public String toString() { + return name; + } } private static final class FakeXdsClient extends XdsClient { diff --git a/xds/src/test/java/io/grpc/xds/GrpcXdsClientImplDataTest.java b/xds/src/test/java/io/grpc/xds/GrpcXdsClientImplDataTest.java index 610d147ccf9..7fac666f983 100644 --- a/xds/src/test/java/io/grpc/xds/GrpcXdsClientImplDataTest.java +++ b/xds/src/test/java/io/grpc/xds/GrpcXdsClientImplDataTest.java @@ -18,6 +18,7 @@ import static com.google.common.truth.Truth.assertThat; import static io.envoyproxy.envoy.config.route.v3.RouteAction.ClusterSpecifierCase.CLUSTER_SPECIFIER_PLUGIN; +import static io.grpc.xds.XdsClusterResource.TRANSPORT_SOCKET_NAME_HTTP11_PROXY; import static io.grpc.xds.XdsEndpointResource.GRPC_EXPERIMENTAL_XDS_DUALSTACK_ENDPOINTS; import static org.junit.Assert.fail; @@ -93,6 +94,7 @@ import io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.Rds; import io.envoyproxy.envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin; import io.envoyproxy.envoy.extensions.load_balancing_policies.wrr_locality.v3.WrrLocality; +import io.envoyproxy.envoy.extensions.transport_sockets.http_11_proxy.v3.Http11ProxyUpstreamTransport; import io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.CertificateProviderPluginInstance; import io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext; import io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.CommonTlsContext; @@ -1055,7 +1057,7 @@ public void parseClusterWeight() { } @Test - public void parseLocalityLbEndpoints_withHealthyEndpoints() { + public void parseLocalityLbEndpoints_withHealthyEndpoints() throws ResourceInvalidException { io.envoyproxy.envoy.config.endpoint.v3.LocalityLbEndpoints proto = io.envoyproxy.envoy.config.endpoint.v3.LocalityLbEndpoints.newBuilder() .setLocality(Locality.newBuilder() @@ -1075,12 +1077,14 @@ public void parseLocalityLbEndpoints_withHealthyEndpoints() { assertThat(struct.getErrorDetail()).isNull(); assertThat(struct.getStruct()).isEqualTo( LocalityLbEndpoints.create( - Collections.singletonList(LbEndpoint.create("172.14.14.5", 8888, 20, true, "")), - 100, 1)); + Collections.singletonList(LbEndpoint.create("172.14.14.5", 8888, + 20, true, "", ImmutableMap.of())), + 100, 1, ImmutableMap.of())); } @Test - public void parseLocalityLbEndpoints_treatUnknownHealthAsHealthy() { + public void parseLocalityLbEndpoints_treatUnknownHealthAsHealthy() + throws ResourceInvalidException { io.envoyproxy.envoy.config.endpoint.v3.LocalityLbEndpoints proto = io.envoyproxy.envoy.config.endpoint.v3.LocalityLbEndpoints.newBuilder() .setLocality(Locality.newBuilder() @@ -1100,12 +1104,13 @@ public void parseLocalityLbEndpoints_treatUnknownHealthAsHealthy() { assertThat(struct.getErrorDetail()).isNull(); assertThat(struct.getStruct()).isEqualTo( LocalityLbEndpoints.create( - Collections.singletonList(LbEndpoint.create("172.14.14.5", 8888, 20, true, "")), 100, - 1)); + Collections.singletonList(LbEndpoint.create("172.14.14.5", 8888, + 20, true, "", ImmutableMap.of())), + 100, 1, ImmutableMap.of())); } @Test - public void parseLocalityLbEndpoints_withUnHealthyEndpoints() { + public void parseLocalityLbEndpoints_withUnHealthyEndpoints() throws ResourceInvalidException { io.envoyproxy.envoy.config.endpoint.v3.LocalityLbEndpoints proto = io.envoyproxy.envoy.config.endpoint.v3.LocalityLbEndpoints.newBuilder() .setLocality(Locality.newBuilder() @@ -1125,12 +1130,13 @@ public void parseLocalityLbEndpoints_withUnHealthyEndpoints() { assertThat(struct.getErrorDetail()).isNull(); assertThat(struct.getStruct()).isEqualTo( LocalityLbEndpoints.create( - Collections.singletonList(LbEndpoint.create("172.14.14.5", 8888, 20, false, "")), 100, - 1)); + Collections.singletonList(LbEndpoint.create("172.14.14.5", 8888, 20, + false, "", ImmutableMap.of())), + 100, 1, ImmutableMap.of())); } @Test - public void parseLocalityLbEndpoints_ignorZeroWeightLocality() { + public void parseLocalityLbEndpoints_ignorZeroWeightLocality() throws ResourceInvalidException { io.envoyproxy.envoy.config.endpoint.v3.LocalityLbEndpoints proto = io.envoyproxy.envoy.config.endpoint.v3.LocalityLbEndpoints.newBuilder() .setLocality(Locality.newBuilder() @@ -1187,7 +1193,10 @@ public void parseLocalityLbEndpoints_withDualStackEndpoints() { EquivalentAddressGroup expectedEag = new EquivalentAddressGroup(socketAddressList); assertThat(struct.getStruct()).isEqualTo( LocalityLbEndpoints.create( - Collections.singletonList(LbEndpoint.create(expectedEag, 20, true, "")), 100, 1)); + Collections.singletonList(LbEndpoint.create( + expectedEag, 20, true, "", ImmutableMap.of())), 100, 1, ImmutableMap.of())); + } catch (ResourceInvalidException e) { + throw new RuntimeException(e); } finally { if (originalDualStackProp != null) { System.setProperty(GRPC_EXPERIMENTAL_XDS_DUALSTACK_ENDPOINTS, originalDualStackProp); @@ -1198,7 +1207,7 @@ public void parseLocalityLbEndpoints_withDualStackEndpoints() { } @Test - public void parseLocalityLbEndpoints_invalidPriority() { + public void parseLocalityLbEndpoints_invalidPriority() throws ResourceInvalidException { io.envoyproxy.envoy.config.endpoint.v3.LocalityLbEndpoints proto = io.envoyproxy.envoy.config.endpoint.v3.LocalityLbEndpoints.newBuilder() .setLocality(Locality.newBuilder() @@ -2456,6 +2465,59 @@ public void processCluster_parsesAudienceMetadata() assertThat(update.parsedMetadata()).isEqualTo(expectedParsedMetadata); } + @Test + public void processCluster_parsesAddressMetadata() throws Exception { + + // Create an Address message + Address address = Address.newBuilder() + .setSocketAddress(SocketAddress.newBuilder() + .setAddress("192.168.1.1") + .setPortValue(8080) + .build()) + .build(); + + // Wrap the Address in Any + Any addressMetadata = Any.newBuilder() + .setTypeUrl("type.googleapis.com/envoy.config.core.v3.Address") + .setValue(address.toByteString()) + .build(); + + Struct filterMetadata = Struct.newBuilder() + .putFields("key1", Value.newBuilder().setStringValue("value1").build()) + .putFields("key2", Value.newBuilder().setNumberValue(42).build()) + .build(); + + Metadata metadata = Metadata.newBuilder() + .putTypedFilterMetadata("ADDRESS_METADATA", addressMetadata) + .putFilterMetadata("FILTER_METADATA", filterMetadata) + .build(); + + Cluster cluster = Cluster.newBuilder() + .setName("cluster-foo.googleapis.com") + .setType(DiscoveryType.EDS) + .setEdsClusterConfig( + EdsClusterConfig.newBuilder() + .setEdsConfig( + ConfigSource.newBuilder() + .setAds(AggregatedConfigSource.getDefaultInstance())) + .setServiceName("service-foo.googleapis.com")) + .setLbPolicy(LbPolicy.ROUND_ROBIN) + .setMetadata(metadata) + .build(); + + CdsUpdate update = XdsClusterResource.processCluster( + cluster, null, LRS_SERVER_INFO, + LoadBalancerRegistry.getDefaultRegistry()); + + ImmutableMap expectedParsedMetadata = ImmutableMap.of( + "ADDRESS_METADATA", new InetSocketAddress("192.168.1.1", 8080), + "FILTER_METADATA", ImmutableMap.of( + "key1", "value1", + "key2", 42.0)); + + assertThat(update.parsedMetadata()).isEqualTo(expectedParsedMetadata); + } + @Test public void processCluster_metadataKeyCollision_resolvesToTypedMetadata() throws ResourceInvalidException, InvalidProtocolBufferException { @@ -2512,6 +2574,40 @@ public Object parse(Any value) { metadataRegistry.removeParser(testParser); } + @Test + public void parseNonAggregateCluster_withHttp11ProxyTransportSocket() + throws ResourceInvalidException, InvalidProtocolBufferException { + XdsClusterResource.isEnabledXdsHttpConnect = true; + + Http11ProxyUpstreamTransport http11ProxyUpstreamTransport = + Http11ProxyUpstreamTransport.newBuilder() + .setTransportSocket(TransportSocket.getDefaultInstance()) + .build(); + + TransportSocket transportSocket = TransportSocket.newBuilder() + .setName(TRANSPORT_SOCKET_NAME_HTTP11_PROXY) + .setTypedConfig(Any.pack(http11ProxyUpstreamTransport)) + .build(); + + Cluster cluster = Cluster.newBuilder() + .setName("cluster-http11-proxy.googleapis.com") + .setType(DiscoveryType.EDS) + .setEdsClusterConfig( + EdsClusterConfig.newBuilder() + .setEdsConfig( + ConfigSource.newBuilder().setAds(AggregatedConfigSource.getDefaultInstance())) + .setServiceName("service-http11-proxy.googleapis.com")) + .setLbPolicy(LbPolicy.ROUND_ROBIN) + .setTransportSocket(transportSocket) + .build(); + + CdsUpdate result = + XdsClusterResource.processCluster(cluster, null, LRS_SERVER_INFO, + LoadBalancerRegistry.getDefaultRegistry()); + + assertThat(result).isNotNull(); + assertThat(result.isHttp11ProxyAvailable()).isTrue(); + } @Test public void parseServerSideListener_invalidTrafficDirection() throws ResourceInvalidException { diff --git a/xds/src/test/java/io/grpc/xds/GrpcXdsClientImplTestBase.java b/xds/src/test/java/io/grpc/xds/GrpcXdsClientImplTestBase.java index 00fbfe669af..51c07cb3537 100644 --- a/xds/src/test/java/io/grpc/xds/GrpcXdsClientImplTestBase.java +++ b/xds/src/test/java/io/grpc/xds/GrpcXdsClientImplTestBase.java @@ -607,9 +607,9 @@ private void validateGoldenClusterLoadAssignment(EdsUpdate edsUpdate) { Locality.create("region1", "zone1", "subzone1"), LocalityLbEndpoints.create( ImmutableList.of(LbEndpoint.create("192.168.0.1", 8080, 2, true, - "endpoint-host-name")), 1, 0), + "endpoint-host-name", ImmutableMap.of())), 1, 0, ImmutableMap.of()), Locality.create("region3", "zone3", "subzone3"), - LocalityLbEndpoints.create(ImmutableList.of(), 2, 1)); + LocalityLbEndpoints.create(ImmutableList.of(), 2, 1, ImmutableMap.of())); } /** @@ -3246,7 +3246,9 @@ public void edsResourceUpdated() { Locality.create("region2", "zone2", "subzone2"), LocalityLbEndpoints.create( ImmutableList.of( - LbEndpoint.create("172.44.2.2", 8000, 3, true, "endpoint-host-name")), 2, 0)); + LbEndpoint.create("172.44.2.2", 8000, 3, + true, "endpoint-host-name", ImmutableMap.of())), + 2, 0, ImmutableMap.of())); verifyResourceMetadataAcked(EDS, EDS_RESOURCE, updatedClusterLoadAssignment, VERSION_2, TIME_INCREMENT * 2); verifySubscribedResourcesMetadataSizes(0, 0, 0, 1); @@ -3416,7 +3418,9 @@ public void multipleEdsWatchers() { Locality.create("region2", "zone2", "subzone2"), LocalityLbEndpoints.create( ImmutableList.of( - LbEndpoint.create("172.44.2.2", 8000, 3, true, "endpoint-host-name")), 2, 0)); + LbEndpoint.create("172.44.2.2", 8000, 3, + true, "endpoint-host-name", ImmutableMap.of())), + 2, 0, ImmutableMap.of())); verify(watcher2).onChanged(edsUpdateCaptor.capture()); edsUpdate = edsUpdateCaptor.getValue(); assertThat(edsUpdate.clusterName).isEqualTo(edsResourceTwo); @@ -3426,7 +3430,9 @@ public void multipleEdsWatchers() { Locality.create("region2", "zone2", "subzone2"), LocalityLbEndpoints.create( ImmutableList.of( - LbEndpoint.create("172.44.2.2", 8000, 3, true, "endpoint-host-name")), 2, 0)); + LbEndpoint.create("172.44.2.2", 8000, 3, + true, "endpoint-host-name", ImmutableMap.of())), + 2, 0, ImmutableMap.of())); verifyNoMoreInteractions(edsResourceWatcher); verifyResourceMetadataAcked( EDS, edsResourceTwo, clusterLoadAssignmentTwo, VERSION_2, TIME_INCREMENT * 2); diff --git a/xds/src/test/java/io/grpc/xds/XdsTestUtils.java b/xds/src/test/java/io/grpc/xds/XdsTestUtils.java index ea28734ec6a..d0580ae2667 100644 --- a/xds/src/test/java/io/grpc/xds/XdsTestUtils.java +++ b/xds/src/test/java/io/grpc/xds/XdsTestUtils.java @@ -257,17 +257,17 @@ static XdsConfig getDefaultXdsConfig(String serverHostName) // Need to create endpoints to create locality endpoints map to create edsUpdate Map lbEndpointsMap = new HashMap<>(); - LbEndpoint lbEndpoint = - LbEndpoint.create(serverHostName, ENDPOINT_PORT, 0, true, ENDPOINT_HOSTNAME); + LbEndpoint lbEndpoint = LbEndpoint.create( + serverHostName, ENDPOINT_PORT, 0, true, ENDPOINT_HOSTNAME, ImmutableMap.of()); lbEndpointsMap.put( Locality.create("", "", ""), - LocalityLbEndpoints.create(ImmutableList.of(lbEndpoint), 10, 0)); + LocalityLbEndpoints.create(ImmutableList.of(lbEndpoint), 10, 0, ImmutableMap.of())); // Need to create EdsUpdate to create CdsUpdate to create XdsClusterConfig for builder XdsEndpointResource.EdsUpdate edsUpdate = new XdsEndpointResource.EdsUpdate( EDS_NAME, lbEndpointsMap, Collections.emptyList()); XdsClusterResource.CdsUpdate cdsUpdate = XdsClusterResource.CdsUpdate.forEds( - CLUSTER_NAME, EDS_NAME, serverInfo, null, null, null) + CLUSTER_NAME, EDS_NAME, serverInfo, null, null, null, false) .lbPolicyConfig(getWrrLbConfigAsMap()).build(); XdsConfig.XdsClusterConfig clusterConfig = new XdsConfig.XdsClusterConfig( CLUSTER_NAME, cdsUpdate, new EndpointConfig(StatusOr.fromValue(edsUpdate))); diff --git a/xds/third_party/envoy/import.sh b/xds/third_party/envoy/import.sh index dbe6f81b1a8..7a6b33871b3 100755 --- a/xds/third_party/envoy/import.sh +++ b/xds/third_party/envoy/import.sh @@ -86,6 +86,7 @@ envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.proto envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.proto envoy/extensions/load_balancing_policies/round_robin/v3/round_robin.proto envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.proto +envoy/extensions/transport_sockets/http_11_proxy/v3/upstream_http_11_connect.proto envoy/extensions/transport_sockets/tls/v3/cert.proto envoy/extensions/transport_sockets/tls/v3/common.proto envoy/extensions/transport_sockets/tls/v3/secret.proto diff --git a/xds/third_party/envoy/src/main/proto/envoy/extensions/transport_sockets/http_11_proxy/v3/upstream_http_11_connect.proto b/xds/third_party/envoy/src/main/proto/envoy/extensions/transport_sockets/http_11_proxy/v3/upstream_http_11_connect.proto new file mode 100644 index 00000000000..2c9b5333f41 --- /dev/null +++ b/xds/third_party/envoy/src/main/proto/envoy/extensions/transport_sockets/http_11_proxy/v3/upstream_http_11_connect.proto @@ -0,0 +1,38 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.http_11_proxy.v3; + +import "envoy/config/core/v3/base.proto"; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.http_11_proxy.v3"; +option java_outer_classname = "UpstreamHttp11ConnectProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/http_11_proxy/v3;http_11_proxyv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Upstream HTTP/1.1 Proxy] +// [#extension: envoy.transport_sockets.http_11_proxy] + +// HTTP/1.1 proxy transport socket establishes an upstream connection to a proxy address +// instead of the target host's address. This behavior is triggered when the transport +// socket is configured and proxy information is provided. +// +// Behavior when proxying: +// ======================= +// When an upstream connection is established, instead of connecting directly to the endpoint +// address, the client will connect to the specified proxy address, send an HTTP/1.1 ``CONNECT`` request +// indicating the endpoint address, and process the response. If the response has HTTP status 200, +// the connection will be passed down to the underlying transport socket. +// +// Configuring proxy information: +// ============================== +// Set ``typed_filter_metadata`` in :ref:`LbEndpoint.Metadata ` or :ref:`LocalityLbEndpoints.Metadata `. +// using the key ``envoy.http11_proxy_transport_socket.proxy_address`` and the +// proxy address in ``config::core::v3::Address`` format. +// +message Http11ProxyUpstreamTransport { + // The underlying transport socket being wrapped. Defaults to plaintext (raw_buffer) if unset. + config.core.v3.TransportSocket transport_socket = 1; +} From 80ea4e9e6981daadb1e175a2a60ce78fd5fab1ed Mon Sep 17 00:00:00 2001 From: MV Shiva Date: Mon, 26 May 2025 11:22:24 +0530 Subject: [PATCH 10/15] xds: ensure server interceptors are created in a sync context (#11930) (#12102) `XdsServerWrapper#generatePerRouteInterceptors` was always intended to be executed within a sync context. This PR ensures that by calling `syncContext.throwIfNotInThisSynchronizationContext()`. This change is needed for upcoming xDS filter state retention because the new tests in XdsServerWrapperTest flake with this NPE: > `Cannot invoke "io.grpc.xds.client.XdsClient$ResourceWatcher.onChanged(io.grpc.xds.client.XdsClient$ResourceUpdate)" because "this.ldsWatcher" is null` Co-authored-by: Sergii Tkachenko --- .../java/io/grpc/xds/XdsServerWrapper.java | 4 +- .../java/io/grpc/xds/XdsServerTestHelper.java | 83 +++++++++++++++---- .../io/grpc/xds/XdsServerWrapperTest.java | 28 +++---- 3 files changed, 79 insertions(+), 36 deletions(-) diff --git a/xds/src/main/java/io/grpc/xds/XdsServerWrapper.java b/xds/src/main/java/io/grpc/xds/XdsServerWrapper.java index bbb17d9b616..e5b25ae458b 100644 --- a/xds/src/main/java/io/grpc/xds/XdsServerWrapper.java +++ b/xds/src/main/java/io/grpc/xds/XdsServerWrapper.java @@ -524,9 +524,7 @@ private AtomicReference generateRoutingConfig(FilterChain f private ImmutableMap generatePerRouteInterceptors( @Nullable List filterConfigs, List virtualHosts) { - // This should always be called from the sync context. - // Ideally we'd want to throw otherwise, but this breaks the tests now. - // syncContext.throwIfNotInThisSynchronizationContext(); + syncContext.throwIfNotInThisSynchronizationContext(); ImmutableMap.Builder perRouteInterceptors = new ImmutableMap.Builder<>(); diff --git a/xds/src/test/java/io/grpc/xds/XdsServerTestHelper.java b/xds/src/test/java/io/grpc/xds/XdsServerTestHelper.java index a27c2917712..0508b11c205 100644 --- a/xds/src/test/java/io/grpc/xds/XdsServerTestHelper.java +++ b/xds/src/test/java/io/grpc/xds/XdsServerTestHelper.java @@ -38,6 +38,7 @@ import io.grpc.xds.client.XdsClient; import io.grpc.xds.client.XdsInitializationException; import io.grpc.xds.client.XdsResourceType; +import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -45,7 +46,10 @@ import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import javax.annotation.Nullable; /** @@ -174,12 +178,18 @@ public List getTargets() { } } + // Implementation details: + // 1. Use `synchronized` in methods where XdsClientImpl uses its own `syncContext`. + // 2. Use `serverExecutor` via `execute()` in methods where XdsClientImpl uses watcher's executor. static final class FakeXdsClient extends XdsClient { - boolean shutdown; - SettableFuture ldsResource = SettableFuture.create(); - ResourceWatcher ldsWatcher; - CountDownLatch rdsCount = new CountDownLatch(1); + public static final Duration DEFAULT_TIMEOUT = Duration.ofSeconds(5); + + private boolean shutdown; + @Nullable SettableFuture ldsResource = SettableFuture.create(); + @Nullable ResourceWatcher ldsWatcher; + private CountDownLatch rdsCount = new CountDownLatch(1); final Map> rdsWatchers = new HashMap<>(); + @Nullable private volatile Executor serverExecutor; @Override public TlsContextManager getSecurityConfig() { @@ -193,14 +203,20 @@ public BootstrapInfo getBootstrapInfo() { @Override @SuppressWarnings("unchecked") - public void watchXdsResource(XdsResourceType resourceType, - String resourceName, - ResourceWatcher watcher, - Executor syncContext) { + public synchronized void watchXdsResource( + XdsResourceType resourceType, + String resourceName, + ResourceWatcher watcher, + Executor executor) { + if (serverExecutor != null) { + assertThat(executor).isEqualTo(serverExecutor); + } + switch (resourceType.typeName()) { case "LDS": assertThat(ldsWatcher).isNull(); ldsWatcher = (ResourceWatcher) watcher; + serverExecutor = executor; ldsResource.set(resourceName); break; case "RDS": @@ -213,14 +229,14 @@ public void watchXdsResource(XdsResourceType resou } @Override - public void cancelXdsResourceWatch(XdsResourceType type, - String resourceName, - ResourceWatcher watcher) { + public synchronized void cancelXdsResourceWatch( + XdsResourceType type, String resourceName, ResourceWatcher watcher) { switch (type.typeName()) { case "LDS": assertThat(ldsWatcher).isNotNull(); ldsResource = null; ldsWatcher = null; + serverExecutor = null; break; case "RDS": rdsWatchers.remove(resourceName); @@ -230,27 +246,58 @@ public void cancelXdsResourceWatch(XdsResourceType } @Override - public void shutdown() { + public synchronized void shutdown() { shutdown = true; } @Override - public boolean isShutDown() { + public synchronized boolean isShutDown() { return shutdown; } + public void awaitRds(Duration timeout) throws InterruptedException, TimeoutException { + if (!rdsCount.await(timeout.toMillis(), TimeUnit.MILLISECONDS)) { + throw new TimeoutException("Timeout " + timeout + " waiting for RDSs"); + } + } + + public void setExpectedRdsCount(int count) { + rdsCount = new CountDownLatch(count); + } + + private void execute(Runnable action) { + // This method ensures that all watcher updates: + // - Happen after the server started watching LDS. + // - Are executed within the sync context of the server. + // + // Note that this doesn't guarantee that any of the RDS watchers are created. + // Tests should use setExpectedRdsCount(int) and awaitRds() for that. + if (ldsResource == null) { + throw new IllegalStateException("xDS resource update after watcher cancel"); + } + try { + ldsResource.get(DEFAULT_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS); + } catch (ExecutionException | TimeoutException e) { + throw new RuntimeException("Can't resolve LDS resource name in " + DEFAULT_TIMEOUT, e); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new RuntimeException(e); + } + serverExecutor.execute(action); + } + void deliverLdsUpdate(List filterChains, FilterChain defaultFilterChain) { - ldsWatcher.onChanged(LdsUpdate.forTcpListener(Listener.create( - "listener", "0.0.0.0:1", ImmutableList.copyOf(filterChains), defaultFilterChain))); + deliverLdsUpdate(LdsUpdate.forTcpListener(Listener.create( + "listener", "0.0.0.0:1", ImmutableList.copyOf(filterChains), defaultFilterChain))); } void deliverLdsUpdate(LdsUpdate ldsUpdate) { - ldsWatcher.onChanged(ldsUpdate); + execute(() -> ldsWatcher.onChanged(ldsUpdate)); } - void deliverRdsUpdate(String rdsName, List virtualHosts) { - rdsWatchers.get(rdsName).onChanged(new RdsUpdate(virtualHosts)); + void deliverRdsUpdate(String resourceName, List virtualHosts) { + execute(() -> rdsWatchers.get(resourceName).onChanged(new RdsUpdate(virtualHosts))); } } } diff --git a/xds/src/test/java/io/grpc/xds/XdsServerWrapperTest.java b/xds/src/test/java/io/grpc/xds/XdsServerWrapperTest.java index 41f005ba583..388052a3dc8 100644 --- a/xds/src/test/java/io/grpc/xds/XdsServerWrapperTest.java +++ b/xds/src/test/java/io/grpc/xds/XdsServerWrapperTest.java @@ -74,7 +74,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; -import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; @@ -252,7 +251,7 @@ public void run() { FilterChain f0 = createFilterChain("filter-chain-0", hcm_virtual); FilterChain f1 = createFilterChain("filter-chain-1", createRds("rds")); xdsClient.deliverLdsUpdate(Collections.singletonList(f0), f1); - xdsClient.rdsCount.await(5, TimeUnit.SECONDS); + xdsClient.awaitRds(FakeXdsClient.DEFAULT_TIMEOUT); xdsClient.deliverRdsUpdate("rds", Collections.singletonList(createVirtualHost("virtual-host-1"))); verify(listener, timeout(5000)).onServing(); @@ -261,7 +260,7 @@ public void run() { xdsServerWrapper.shutdown(); assertThat(xdsServerWrapper.isShutdown()).isTrue(); assertThat(xdsClient.ldsResource).isNull(); - assertThat(xdsClient.shutdown).isTrue(); + assertThat(xdsClient.isShutDown()).isTrue(); verify(mockServer).shutdown(); assertThat(f0.sslContextProviderSupplier().isShutdown()).isTrue(); assertThat(f1.sslContextProviderSupplier().isShutdown()).isTrue(); @@ -303,7 +302,7 @@ public void run() { verify(mockServer, never()).start(); assertThat(xdsServerWrapper.isShutdown()).isTrue(); assertThat(xdsClient.ldsResource).isNull(); - assertThat(xdsClient.shutdown).isTrue(); + assertThat(xdsClient.isShutDown()).isTrue(); verify(mockServer).shutdown(); assertThat(f0.sslContextProviderSupplier().isShutdown()).isTrue(); assertThat(f1.sslContextProviderSupplier().isShutdown()).isTrue(); @@ -342,7 +341,7 @@ public void run() { xdsServerWrapper.shutdown(); assertThat(xdsServerWrapper.isShutdown()).isTrue(); assertThat(xdsClient.ldsResource).isNull(); - assertThat(xdsClient.shutdown).isTrue(); + assertThat(xdsClient.isShutDown()).isTrue(); verify(mockBuilder, times(1)).build(); verify(mockServer, times(1)).shutdown(); xdsServerWrapper.awaitTermination(1, TimeUnit.SECONDS); @@ -367,7 +366,7 @@ public void run() { FilterChain filterChain = createFilterChain("filter-chain-1", createRds("rds")); SslContextProviderSupplier sslSupplier = filterChain.sslContextProviderSupplier(); xdsClient.deliverLdsUpdate(Collections.singletonList(filterChain), null); - xdsClient.rdsCount.await(5, TimeUnit.SECONDS); + xdsClient.awaitRds(FakeXdsClient.DEFAULT_TIMEOUT); xdsClient.deliverRdsUpdate("rds", Collections.singletonList(createVirtualHost("virtual-host-1"))); try { @@ -434,7 +433,7 @@ public void run() { xdsClient.ldsResource.get(5, TimeUnit.SECONDS); FilterChain filterChain = createFilterChain("filter-chain-1", createRds("rds")); xdsClient.deliverLdsUpdate(Collections.singletonList(filterChain), null); - xdsClient.rdsCount.await(5, TimeUnit.SECONDS); + xdsClient.awaitRds(FakeXdsClient.DEFAULT_TIMEOUT); xdsClient.deliverRdsUpdate("rds", Collections.singletonList(createVirtualHost("virtual-host-1"))); try { @@ -544,7 +543,7 @@ public void run() { 0L, Collections.singletonList(virtualHost), new ArrayList()); EnvoyServerProtoData.FilterChain f0 = createFilterChain("filter-chain-0", hcmVirtual); EnvoyServerProtoData.FilterChain f1 = createFilterChain("filter-chain-1", createRds("r0")); - xdsClient.rdsCount = new CountDownLatch(3); + xdsClient.setExpectedRdsCount(3); xdsClient.deliverLdsUpdate(Arrays.asList(f0, f1), null); assertThat(start.isDone()).isFalse(); assertThat(selectorManager.getSelectorToUpdateSelector()).isNull(); @@ -556,7 +555,7 @@ public void run() { xdsClient.deliverLdsUpdate(Arrays.asList(f0, f2), f3); verify(mockServer, never()).start(); verify(listener, never()).onServing(); - xdsClient.rdsCount.await(5, TimeUnit.SECONDS); + xdsClient.awaitRds(FakeXdsClient.DEFAULT_TIMEOUT); xdsClient.deliverRdsUpdate("r1", Collections.singletonList(createVirtualHost("virtual-host-1"))); @@ -602,12 +601,11 @@ public void run() { EnvoyServerProtoData.FilterChain f1 = createFilterChain("filter-chain-1", createRds("r0")); EnvoyServerProtoData.FilterChain f2 = createFilterChain("filter-chain-2", createRds("r0")); - xdsClient.rdsCount = new CountDownLatch(1); xdsClient.deliverLdsUpdate(Arrays.asList(f0, f1), f2); assertThat(start.isDone()).isFalse(); assertThat(selectorManager.getSelectorToUpdateSelector()).isNull(); - xdsClient.rdsCount.await(5, TimeUnit.SECONDS); + xdsClient.awaitRds(FakeXdsClient.DEFAULT_TIMEOUT); xdsClient.deliverRdsUpdate("r0", Collections.singletonList(createVirtualHost("virtual-host-0"))); start.get(5000, TimeUnit.MILLISECONDS); @@ -633,9 +631,9 @@ public void run() { EnvoyServerProtoData.FilterChain f3 = createFilterChain("filter-chain-3", createRds("r0")); EnvoyServerProtoData.FilterChain f4 = createFilterChain("filter-chain-4", createRds("r1")); EnvoyServerProtoData.FilterChain f5 = createFilterChain("filter-chain-4", createRds("r1")); - xdsClient.rdsCount = new CountDownLatch(1); + xdsClient.setExpectedRdsCount(1); xdsClient.deliverLdsUpdate(Arrays.asList(f5, f3), f4); - xdsClient.rdsCount.await(5, TimeUnit.SECONDS); + xdsClient.awaitRds(FakeXdsClient.DEFAULT_TIMEOUT); xdsClient.deliverRdsUpdate("r1", Collections.singletonList(createVirtualHost("virtual-host-1"))); xdsClient.deliverRdsUpdate("r0", @@ -688,7 +686,7 @@ public void run() { EnvoyServerProtoData.FilterChain f0 = createFilterChain("filter-chain-0", hcmVirtual); EnvoyServerProtoData.FilterChain f1 = createFilterChain("filter-chain-1", createRds("r0")); xdsClient.deliverLdsUpdate(Arrays.asList(f0, f1), null); - xdsClient.rdsCount.await(); + xdsClient.awaitRds(FakeXdsClient.DEFAULT_TIMEOUT); xdsClient.rdsWatchers.get("r0").onError(Status.CANCELLED); start.get(5000, TimeUnit.MILLISECONDS); assertThat(selectorManager.getSelectorToUpdateSelector().getRoutingConfigs().size()) @@ -1235,7 +1233,7 @@ public ServerCall.Listener interceptCall(ServerCall Date: Mon, 2 Jun 2025 20:00:15 +0530 Subject: [PATCH 11/15] Revert "xds: xDS-based HTTP CONNECT configuration (#12099)" (#12120) This reverts commit 7776d3712085d6cd8abd2780e5602c8b00801358. --- xds/BUILD.bazel | 1 - .../java/io/grpc/xds/CdsLoadBalancer2.java | 4 +- .../grpc/xds/ClusterResolverLoadBalancer.java | 46 +-- .../ClusterResolverLoadBalancerProvider.java | 9 +- xds/src/main/java/io/grpc/xds/Endpoints.java | 20 +- .../io/grpc/xds/GcpAuthenticationFilter.java | 12 +- .../java/io/grpc/xds/MetadataRegistry.java | 60 +--- .../java/io/grpc/xds/XdsClusterResource.java | 126 ++++---- .../java/io/grpc/xds/XdsEndpointResource.java | 74 +---- .../io/grpc/xds/CdsLoadBalancer2Test.java | 45 ++- .../xds/ClusterResolverLoadBalancerTest.java | 305 ++++++------------ .../grpc/xds/GrpcXdsClientImplDataTest.java | 120 +------ .../grpc/xds/GrpcXdsClientImplTestBase.java | 16 +- .../test/java/io/grpc/xds/XdsTestUtils.java | 8 +- xds/third_party/envoy/import.sh | 1 - .../v3/upstream_http_11_connect.proto | 38 --- 16 files changed, 220 insertions(+), 665 deletions(-) delete mode 100644 xds/third_party/envoy/src/main/proto/envoy/extensions/transport_sockets/http_11_proxy/v3/upstream_http_11_connect.proto diff --git a/xds/BUILD.bazel b/xds/BUILD.bazel index 53fac28b2da..b235a79c526 100644 --- a/xds/BUILD.bazel +++ b/xds/BUILD.bazel @@ -85,7 +85,6 @@ java_proto_library( "@envoy_api//envoy/extensions/load_balancing_policies/ring_hash/v3:pkg", "@envoy_api//envoy/extensions/load_balancing_policies/round_robin/v3:pkg", "@envoy_api//envoy/extensions/load_balancing_policies/wrr_locality/v3:pkg", - "@envoy_api//envoy/extensions/transport_sockets/http_11_proxy/v3:pkg", "@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg", "@envoy_api//envoy/service/discovery/v3:pkg", "@envoy_api//envoy/service/load_stats/v3:pkg", diff --git a/xds/src/main/java/io/grpc/xds/CdsLoadBalancer2.java b/xds/src/main/java/io/grpc/xds/CdsLoadBalancer2.java index bb44071a484..04b7663fd35 100644 --- a/xds/src/main/java/io/grpc/xds/CdsLoadBalancer2.java +++ b/xds/src/main/java/io/grpc/xds/CdsLoadBalancer2.java @@ -243,9 +243,7 @@ private void handleClusterDiscovered() { } ClusterResolverConfig config = new ClusterResolverConfig( - Collections.unmodifiableList(instances), - configOrError.getConfig(), - root.result.isHttp11ProxyAvailable()); + Collections.unmodifiableList(instances), configOrError.getConfig()); if (childLb == null) { childLb = lbRegistry.getProvider(CLUSTER_RESOLVER_POLICY_NAME).newLoadBalancer(helper); } diff --git a/xds/src/main/java/io/grpc/xds/ClusterResolverLoadBalancer.java b/xds/src/main/java/io/grpc/xds/ClusterResolverLoadBalancer.java index 0fb7cf15909..aff61cf7ada 100644 --- a/xds/src/main/java/io/grpc/xds/ClusterResolverLoadBalancer.java +++ b/xds/src/main/java/io/grpc/xds/ClusterResolverLoadBalancer.java @@ -25,7 +25,6 @@ import com.google.protobuf.Struct; import io.grpc.Attributes; import io.grpc.EquivalentAddressGroup; -import io.grpc.HttpConnectProxiedSocketAddress; import io.grpc.InternalLogId; import io.grpc.LoadBalancer; import io.grpc.LoadBalancerProvider; @@ -61,8 +60,6 @@ import io.grpc.xds.client.XdsClient.ResourceWatcher; import io.grpc.xds.client.XdsLogger; import io.grpc.xds.client.XdsLogger.XdsLogLevel; -import java.net.InetSocketAddress; -import java.net.SocketAddress; import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; @@ -434,18 +431,8 @@ public void run() { .set(XdsAttributes.ATTR_SERVER_WEIGHT, weight) .set(XdsAttributes.ATTR_ADDRESS_NAME, endpoint.hostname()) .build(); - - EquivalentAddressGroup eag; - if (config.isHttp11ProxyAvailable()) { - List rewrittenAddresses = new ArrayList<>(); - for (SocketAddress addr : endpoint.eag().getAddresses()) { - rewrittenAddresses.add(rewriteAddress( - addr, endpoint.endpointMetadata(), localityLbInfo.localityMetadata())); - } - eag = new EquivalentAddressGroup(rewrittenAddresses, attr); - } else { - eag = new EquivalentAddressGroup(endpoint.eag().getAddresses(), attr); - } + EquivalentAddressGroup eag = new EquivalentAddressGroup( + endpoint.eag().getAddresses(), attr); eag = AddressFilter.setPathFilter(eag, Arrays.asList(priorityName, localityName)); addresses.add(eag); } @@ -483,35 +470,6 @@ public void run() { new EndpointsUpdated().run(); } - private SocketAddress rewriteAddress(SocketAddress addr, - ImmutableMap endpointMetadata, - ImmutableMap localityMetadata) { - if (!(addr instanceof InetSocketAddress)) { - return addr; - } - - SocketAddress proxyAddress; - try { - proxyAddress = (SocketAddress) endpointMetadata.get( - "envoy.http11_proxy_transport_socket.proxy_address"); - if (proxyAddress == null) { - proxyAddress = (SocketAddress) localityMetadata.get( - "envoy.http11_proxy_transport_socket.proxy_address"); - } - } catch (ClassCastException e) { - return addr; - } - - if (proxyAddress == null) { - return addr; - } - - return HttpConnectProxiedSocketAddress.newBuilder() - .setTargetAddress((InetSocketAddress) addr) - .setProxyAddress(proxyAddress) - .build(); - } - private List generatePriorityNames(String name, Map localityLbEndpoints) { TreeMap> todo = new TreeMap<>(); diff --git a/xds/src/main/java/io/grpc/xds/ClusterResolverLoadBalancerProvider.java b/xds/src/main/java/io/grpc/xds/ClusterResolverLoadBalancerProvider.java index b5dcb271368..2301cb670e0 100644 --- a/xds/src/main/java/io/grpc/xds/ClusterResolverLoadBalancerProvider.java +++ b/xds/src/main/java/io/grpc/xds/ClusterResolverLoadBalancerProvider.java @@ -74,17 +74,10 @@ static final class ClusterResolverConfig { final List discoveryMechanisms; // GracefulSwitch configuration final Object lbConfig; - private final boolean isHttp11ProxyAvailable; - ClusterResolverConfig(List discoveryMechanisms, Object lbConfig, - boolean isHttp11ProxyAvailable) { + ClusterResolverConfig(List discoveryMechanisms, Object lbConfig) { this.discoveryMechanisms = checkNotNull(discoveryMechanisms, "discoveryMechanisms"); this.lbConfig = checkNotNull(lbConfig, "lbConfig"); - this.isHttp11ProxyAvailable = isHttp11ProxyAvailable; - } - - boolean isHttp11ProxyAvailable() { - return isHttp11ProxyAvailable; } @Override diff --git a/xds/src/main/java/io/grpc/xds/Endpoints.java b/xds/src/main/java/io/grpc/xds/Endpoints.java index b0d97d42c11..7d7aa3e386d 100644 --- a/xds/src/main/java/io/grpc/xds/Endpoints.java +++ b/xds/src/main/java/io/grpc/xds/Endpoints.java @@ -21,7 +21,6 @@ import com.google.auto.value.AutoValue; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; import io.grpc.EquivalentAddressGroup; import java.net.InetSocketAddress; import java.util.List; @@ -42,13 +41,11 @@ abstract static class LocalityLbEndpoints { // Locality's priority level. abstract int priority(); - abstract ImmutableMap localityMetadata(); - static LocalityLbEndpoints create(List endpoints, int localityWeight, - int priority, ImmutableMap localityMetadata) { + int priority) { checkArgument(localityWeight > 0, "localityWeight must be greater than 0"); return new AutoValue_Endpoints_LocalityLbEndpoints( - ImmutableList.copyOf(endpoints), localityWeight, priority, localityMetadata); + ImmutableList.copyOf(endpoints), localityWeight, priority); } } @@ -66,20 +63,17 @@ abstract static class LbEndpoint { abstract String hostname(); - abstract ImmutableMap endpointMetadata(); - static LbEndpoint create(EquivalentAddressGroup eag, int loadBalancingWeight, - boolean isHealthy, String hostname, ImmutableMap endpointMetadata) { - return new AutoValue_Endpoints_LbEndpoint( - eag, loadBalancingWeight, isHealthy, hostname, endpointMetadata); + boolean isHealthy, String hostname) { + return new AutoValue_Endpoints_LbEndpoint(eag, loadBalancingWeight, isHealthy, hostname); } // Only for testing. @VisibleForTesting - static LbEndpoint create(String address, int port, int loadBalancingWeight, boolean isHealthy, - String hostname, ImmutableMap endpointMetadata) { + static LbEndpoint create( + String address, int port, int loadBalancingWeight, boolean isHealthy, String hostname) { return LbEndpoint.create(new EquivalentAddressGroup(new InetSocketAddress(address, port)), - loadBalancingWeight, isHealthy, hostname, endpointMetadata); + loadBalancingWeight, isHealthy, hostname); } } diff --git a/xds/src/main/java/io/grpc/xds/GcpAuthenticationFilter.java b/xds/src/main/java/io/grpc/xds/GcpAuthenticationFilter.java index 41687817c47..7ed617c9843 100644 --- a/xds/src/main/java/io/grpc/xds/GcpAuthenticationFilter.java +++ b/xds/src/main/java/io/grpc/xds/GcpAuthenticationFilter.java @@ -36,7 +36,6 @@ import io.grpc.Status; import io.grpc.auth.MoreCallCredentials; import io.grpc.xds.MetadataRegistry.MetadataValueParser; -import io.grpc.xds.client.XdsResourceType.ResourceInvalidException; import java.util.LinkedHashMap; import java.util.Map; import java.util.concurrent.ScheduledExecutorService; @@ -241,16 +240,11 @@ public String getTypeUrl() { } @Override - public String parse(Any any) throws ResourceInvalidException { - Audience audience; - try { - audience = any.unpack(Audience.class); - } catch (InvalidProtocolBufferException ex) { - throw new ResourceInvalidException("Invalid Resource in address proto", ex); - } + public String parse(Any any) throws InvalidProtocolBufferException { + Audience audience = any.unpack(Audience.class); String url = audience.getUrl(); if (url.isEmpty()) { - throw new ResourceInvalidException( + throw new InvalidProtocolBufferException( "Audience URL is empty. Metadata value must contain a valid URL."); } return url; diff --git a/xds/src/main/java/io/grpc/xds/MetadataRegistry.java b/xds/src/main/java/io/grpc/xds/MetadataRegistry.java index b79a61a261a..8243b6a6f0f 100644 --- a/xds/src/main/java/io/grpc/xds/MetadataRegistry.java +++ b/xds/src/main/java/io/grpc/xds/MetadataRegistry.java @@ -17,14 +17,9 @@ package io.grpc.xds; import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableMap; import com.google.protobuf.Any; -import com.google.protobuf.Struct; -import io.envoyproxy.envoy.config.core.v3.Metadata; +import com.google.protobuf.InvalidProtocolBufferException; import io.grpc.xds.GcpAuthenticationFilter.AudienceMetadataParser; -import io.grpc.xds.XdsEndpointResource.AddressMetadataParser; -import io.grpc.xds.client.XdsResourceType.ResourceInvalidException; -import io.grpc.xds.internal.ProtobufJsonConverter; import java.util.HashMap; import java.util.Map; @@ -41,7 +36,6 @@ final class MetadataRegistry { private MetadataRegistry() { registerParser(new AudienceMetadataParser()); - registerParser(new AddressMetadataParser()); } static MetadataRegistry getInstance() { @@ -61,54 +55,6 @@ void removeParser(MetadataValueParser parser) { supportedParsers.remove(parser.getTypeUrl()); } - /** - * Parses cluster metadata into a structured map. - * - *

Values in {@code typed_filter_metadata} take precedence over - * {@code filter_metadata} when keys overlap, following Envoy API behavior. See - * - * Envoy metadata documentation for details. - * - * @param metadata the {@link Metadata} containing the fields to parse. - * @return an immutable map of parsed metadata. - * @throws ResourceInvalidException if parsing {@code typed_filter_metadata} fails. - */ - public ImmutableMap parseMetadata(Metadata metadata) - throws ResourceInvalidException { - ImmutableMap.Builder parsedMetadata = ImmutableMap.builder(); - - // Process typed_filter_metadata - for (Map.Entry entry : metadata.getTypedFilterMetadataMap().entrySet()) { - String key = entry.getKey(); - Any value = entry.getValue(); - MetadataValueParser parser = findParser(value.getTypeUrl()); - if (parser != null) { - try { - Object parsedValue = parser.parse(value); - parsedMetadata.put(key, parsedValue); - } catch (ResourceInvalidException e) { - throw new ResourceInvalidException( - String.format("Failed to parse metadata key: %s, type: %s. Error: %s", - key, value.getTypeUrl(), e.getMessage()), e); - } - } - } - // building once to reuse in the next loop - ImmutableMap intermediateParsedMetadata = parsedMetadata.build(); - - // Process filter_metadata for remaining keys - for (Map.Entry entry : metadata.getFilterMetadataMap().entrySet()) { - String key = entry.getKey(); - if (!intermediateParsedMetadata.containsKey(key)) { - Struct structValue = entry.getValue(); - Object jsonValue = ProtobufJsonConverter.convertToJson(structValue); - parsedMetadata.put(key, jsonValue); - } - } - - return parsedMetadata.build(); - } - interface MetadataValueParser { String getTypeUrl(); @@ -118,8 +64,8 @@ interface MetadataValueParser { * * @param any the {@link Any} object to parse. * @return the parsed metadata value. - * @throws ResourceInvalidException if the parsing fails. + * @throws InvalidProtocolBufferException if the parsing fails. */ - Object parse(Any any) throws ResourceInvalidException; + Object parse(Any any) throws InvalidProtocolBufferException; } } diff --git a/xds/src/main/java/io/grpc/xds/XdsClusterResource.java b/xds/src/main/java/io/grpc/xds/XdsClusterResource.java index cfc74f3ca70..626d61c1f55 100644 --- a/xds/src/main/java/io/grpc/xds/XdsClusterResource.java +++ b/xds/src/main/java/io/grpc/xds/XdsClusterResource.java @@ -25,6 +25,7 @@ import com.google.common.base.Strings; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; +import com.google.protobuf.Any; import com.google.protobuf.Duration; import com.google.protobuf.InvalidProtocolBufferException; import com.google.protobuf.Message; @@ -32,11 +33,10 @@ import com.google.protobuf.util.Durations; import io.envoyproxy.envoy.config.cluster.v3.CircuitBreakers.Thresholds; import io.envoyproxy.envoy.config.cluster.v3.Cluster; +import io.envoyproxy.envoy.config.core.v3.Metadata; import io.envoyproxy.envoy.config.core.v3.RoutingPriority; import io.envoyproxy.envoy.config.core.v3.SocketAddress; -import io.envoyproxy.envoy.config.core.v3.TransportSocket; import io.envoyproxy.envoy.config.endpoint.v3.ClusterLoadAssignment; -import io.envoyproxy.envoy.extensions.transport_sockets.http_11_proxy.v3.Http11ProxyUpstreamTransport; import io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext; import io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.CommonTlsContext; import io.grpc.LoadBalancerRegistry; @@ -46,12 +46,15 @@ import io.grpc.internal.ServiceConfigUtil.LbConfig; import io.grpc.xds.EnvoyServerProtoData.OutlierDetection; import io.grpc.xds.EnvoyServerProtoData.UpstreamTlsContext; +import io.grpc.xds.MetadataRegistry.MetadataValueParser; import io.grpc.xds.XdsClusterResource.CdsUpdate; import io.grpc.xds.client.XdsClient.ResourceUpdate; import io.grpc.xds.client.XdsResourceType; +import io.grpc.xds.internal.ProtobufJsonConverter; import io.grpc.xds.internal.security.CommonTlsContextUtil; import java.util.List; import java.util.Locale; +import java.util.Map; import java.util.Set; import javax.annotation.Nullable; @@ -64,8 +67,6 @@ class XdsClusterResource extends XdsResourceType { @VisibleForTesting public static boolean enableSystemRootCerts = GrpcUtil.getFlag("GRPC_EXPERIMENTAL_XDS_SYSTEM_ROOT_CERTS", false); - static boolean isEnabledXdsHttpConnect = - GrpcUtil.getFlag("GRPC_EXPERIMENTAL_XDS_HTTP_CONNECT", false); @VisibleForTesting static final String AGGREGATE_CLUSTER_TYPE_NAME = "envoy.clusters.aggregate"; @@ -77,9 +78,6 @@ class XdsClusterResource extends XdsResourceType { "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext"; private static final String TYPE_URL_UPSTREAM_TLS_CONTEXT_V2 = "type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext"; - static final String TRANSPORT_SOCKET_NAME_HTTP11_PROXY = - "type.googleapis.com/envoy.extensions.transport_sockets.http_11_proxy.v3" - + ".Http11ProxyUpstreamTransport"; private final LoadBalancerRegistry loadBalancerRegistry = LoadBalancerRegistry.getDefaultRegistry(); @@ -179,11 +177,10 @@ static CdsUpdate processCluster(Cluster cluster, ImmutableMap.copyOf(cluster.getMetadata().getFilterMetadataMap())); try { - MetadataRegistry registry = MetadataRegistry.getInstance(); ImmutableMap parsedFilterMetadata = - registry.parseMetadata(cluster.getMetadata()); + parseClusterMetadata(cluster.getMetadata()); updateBuilder.parsedMetadata(parsedFilterMetadata); - } catch (ResourceInvalidException e) { + } catch (InvalidProtocolBufferException e) { throw new ResourceInvalidException( "Failed to parse xDS filter metadata for cluster '" + cluster.getName() + "': " + e.getMessage(), e); @@ -192,6 +189,49 @@ static CdsUpdate processCluster(Cluster cluster, return updateBuilder.build(); } + /** + * Parses cluster metadata into a structured map. + * + *

Values in {@code typed_filter_metadata} take precedence over + * {@code filter_metadata} when keys overlap, following Envoy API behavior. See + * + * Envoy metadata documentation for details. + * + * @param metadata the {@link Metadata} containing the fields to parse. + * @return an immutable map of parsed metadata. + * @throws InvalidProtocolBufferException if parsing {@code typed_filter_metadata} fails. + */ + private static ImmutableMap parseClusterMetadata(Metadata metadata) + throws InvalidProtocolBufferException { + ImmutableMap.Builder parsedMetadata = ImmutableMap.builder(); + + MetadataRegistry registry = MetadataRegistry.getInstance(); + // Process typed_filter_metadata + for (Map.Entry entry : metadata.getTypedFilterMetadataMap().entrySet()) { + String key = entry.getKey(); + Any value = entry.getValue(); + MetadataValueParser parser = registry.findParser(value.getTypeUrl()); + if (parser != null) { + Object parsedValue = parser.parse(value); + parsedMetadata.put(key, parsedValue); + } + } + // building once to reuse in the next loop + ImmutableMap intermediateParsedMetadata = parsedMetadata.build(); + + // Process filter_metadata for remaining keys + for (Map.Entry entry : metadata.getFilterMetadataMap().entrySet()) { + String key = entry.getKey(); + if (!intermediateParsedMetadata.containsKey(key)) { + Struct structValue = entry.getValue(); + Object jsonValue = ProtobufJsonConverter.convertToJson(structValue); + parsedMetadata.put(key, jsonValue); + } + } + + return parsedMetadata.build(); + } + private static StructOrError parseAggregateCluster(Cluster cluster) { String clusterName = cluster.getName(); Cluster.CustomClusterType customType = cluster.getClusterType(); @@ -219,7 +259,6 @@ private static StructOrError parseNonAggregateCluster( Long maxConcurrentRequests = null; UpstreamTlsContext upstreamTlsContext = null; OutlierDetection outlierDetection = null; - boolean isHttp11ProxyAvailable = false; if (cluster.hasLrsServer()) { if (!cluster.getLrsServer().hasSelf()) { return StructOrError.fromError( @@ -242,43 +281,17 @@ private static StructOrError parseNonAggregateCluster( return StructOrError.fromError("Cluster " + clusterName + ": transport-socket-matches not supported."); } - boolean hasTransportSocket = cluster.hasTransportSocket(); - TransportSocket transportSocket = cluster.getTransportSocket(); - - if (hasTransportSocket && !TRANSPORT_SOCKET_NAME_TLS.equals(transportSocket.getName()) - && !(isEnabledXdsHttpConnect - && TRANSPORT_SOCKET_NAME_HTTP11_PROXY.equals(transportSocket.getName()))) { - return StructOrError.fromError( - "transport-socket with name " + transportSocket.getName() + " not supported."); - } - - if (hasTransportSocket && isEnabledXdsHttpConnect - && TRANSPORT_SOCKET_NAME_HTTP11_PROXY.equals(transportSocket.getName())) { - isHttp11ProxyAvailable = true; - try { - Http11ProxyUpstreamTransport wrappedTransportSocket = transportSocket - .getTypedConfig().unpack(io.envoyproxy.envoy.extensions.transport_sockets - .http_11_proxy.v3.Http11ProxyUpstreamTransport.class); - hasTransportSocket = wrappedTransportSocket.hasTransportSocket(); - transportSocket = wrappedTransportSocket.getTransportSocket(); - } catch (InvalidProtocolBufferException e) { - return StructOrError.fromError( - "Cluster " + clusterName + ": malformed Http11ProxyUpstreamTransport: " + e); - } catch (ClassCastException e) { - return StructOrError.fromError( - "Cluster " + clusterName - + ": invalid transport_socket type in Http11ProxyUpstreamTransport"); + if (cluster.hasTransportSocket()) { + if (!TRANSPORT_SOCKET_NAME_TLS.equals(cluster.getTransportSocket().getName())) { + return StructOrError.fromError("transport-socket with name " + + cluster.getTransportSocket().getName() + " not supported."); } - } - - if (hasTransportSocket && TRANSPORT_SOCKET_NAME_TLS.equals(transportSocket.getName())) { try { upstreamTlsContext = UpstreamTlsContext.fromEnvoyProtoUpstreamTlsContext( validateUpstreamTlsContext( - unpackCompatibleType(transportSocket.getTypedConfig(), - io.envoyproxy.envoy.extensions - .transport_sockets.tls.v3.UpstreamTlsContext.class, - TYPE_URL_UPSTREAM_TLS_CONTEXT, TYPE_URL_UPSTREAM_TLS_CONTEXT_V2), + unpackCompatibleType(cluster.getTransportSocket().getTypedConfig(), + io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext.class, + TYPE_URL_UPSTREAM_TLS_CONTEXT, TYPE_URL_UPSTREAM_TLS_CONTEXT_V2), certProviderInstances)); } catch (InvalidProtocolBufferException | ResourceInvalidException e) { return StructOrError.fromError( @@ -316,10 +329,9 @@ private static StructOrError parseNonAggregateCluster( return StructOrError.fromError( "EDS service_name must be set when Cluster resource has an xdstp name"); } - return StructOrError.fromStruct(CdsUpdate.forEds( clusterName, edsServiceName, lrsServerInfo, maxConcurrentRequests, upstreamTlsContext, - outlierDetection, isHttp11ProxyAvailable)); + outlierDetection)); } else if (type.equals(Cluster.DiscoveryType.LOGICAL_DNS)) { if (!cluster.hasLoadAssignment()) { return StructOrError.fromError( @@ -354,8 +366,7 @@ private static StructOrError parseNonAggregateCluster( String dnsHostName = String.format( Locale.US, "%s:%d", socketAddress.getAddress(), socketAddress.getPortValue()); return StructOrError.fromStruct(CdsUpdate.forLogicalDns( - clusterName, dnsHostName, lrsServerInfo, maxConcurrentRequests, - upstreamTlsContext, isHttp11ProxyAvailable)); + clusterName, dnsHostName, lrsServerInfo, maxConcurrentRequests, upstreamTlsContext)); } return StructOrError.fromError( "Cluster " + clusterName + ": unsupported built-in discovery type: " + type); @@ -609,8 +620,6 @@ abstract static class CdsUpdate implements ResourceUpdate { @Nullable abstract UpstreamTlsContext upstreamTlsContext(); - abstract boolean isHttp11ProxyAvailable(); - // List of underlying clusters making of this aggregate cluster. // Only valid for AGGREGATE cluster. @Nullable @@ -631,8 +640,7 @@ private static Builder newBuilder(String clusterName) { .maxRingSize(0) .choiceCount(0) .filterMetadata(ImmutableMap.of()) - .parsedMetadata(ImmutableMap.of()) - .isHttp11ProxyAvailable(false); + .parsedMetadata(ImmutableMap.of()); } static Builder forAggregate(String clusterName, List prioritizedClusterNames) { @@ -645,30 +653,26 @@ static Builder forAggregate(String clusterName, List prioritizedClusterN static Builder forEds(String clusterName, @Nullable String edsServiceName, @Nullable ServerInfo lrsServerInfo, @Nullable Long maxConcurrentRequests, @Nullable UpstreamTlsContext upstreamTlsContext, - @Nullable OutlierDetection outlierDetection, - boolean isHttp11ProxyAvailable) { + @Nullable OutlierDetection outlierDetection) { return newBuilder(clusterName) .clusterType(ClusterType.EDS) .edsServiceName(edsServiceName) .lrsServerInfo(lrsServerInfo) .maxConcurrentRequests(maxConcurrentRequests) .upstreamTlsContext(upstreamTlsContext) - .outlierDetection(outlierDetection) - .isHttp11ProxyAvailable(isHttp11ProxyAvailable); + .outlierDetection(outlierDetection); } static Builder forLogicalDns(String clusterName, String dnsHostName, @Nullable ServerInfo lrsServerInfo, @Nullable Long maxConcurrentRequests, - @Nullable UpstreamTlsContext upstreamTlsContext, - boolean isHttp11ProxyAvailable) { + @Nullable UpstreamTlsContext upstreamTlsContext) { return newBuilder(clusterName) .clusterType(ClusterType.LOGICAL_DNS) .dnsHostName(dnsHostName) .lrsServerInfo(lrsServerInfo) .maxConcurrentRequests(maxConcurrentRequests) - .upstreamTlsContext(upstreamTlsContext) - .isHttp11ProxyAvailable(isHttp11ProxyAvailable); + .upstreamTlsContext(upstreamTlsContext); } enum ClusterType { @@ -745,8 +749,6 @@ Builder leastRequestLbPolicy(Integer choiceCount) { // Private, use one of the static factory methods instead. protected abstract Builder maxConcurrentRequests(Long maxConcurrentRequests); - protected abstract Builder isHttp11ProxyAvailable(boolean isHttp11ProxyAvailable); - // Private, use one of the static factory methods instead. protected abstract Builder upstreamTlsContext(UpstreamTlsContext upstreamTlsContext); diff --git a/xds/src/main/java/io/grpc/xds/XdsEndpointResource.java b/xds/src/main/java/io/grpc/xds/XdsEndpointResource.java index 11111fa51ca..6a3cd35bd59 100644 --- a/xds/src/main/java/io/grpc/xds/XdsEndpointResource.java +++ b/xds/src/main/java/io/grpc/xds/XdsEndpointResource.java @@ -20,14 +20,9 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.MoreObjects; -import com.google.common.collect.ImmutableMap; -import com.google.common.net.InetAddresses; -import com.google.protobuf.Any; -import com.google.protobuf.InvalidProtocolBufferException; import com.google.protobuf.Message; import io.envoyproxy.envoy.config.core.v3.Address; import io.envoyproxy.envoy.config.core.v3.HealthStatus; -import io.envoyproxy.envoy.config.core.v3.SocketAddress; import io.envoyproxy.envoy.config.endpoint.v3.ClusterLoadAssignment; import io.envoyproxy.envoy.config.endpoint.v3.Endpoint; import io.envoyproxy.envoy.type.v3.FractionalPercent; @@ -35,7 +30,6 @@ import io.grpc.internal.GrpcUtil; import io.grpc.xds.Endpoints.DropOverload; import io.grpc.xds.Endpoints.LocalityLbEndpoints; -import io.grpc.xds.MetadataRegistry.MetadataValueParser; import io.grpc.xds.XdsEndpointResource.EdsUpdate; import io.grpc.xds.client.Locality; import io.grpc.xds.client.XdsClient.ResourceUpdate; @@ -191,8 +185,7 @@ private static int getRatePerMillion(FractionalPercent percent) { @VisibleForTesting @Nullable static StructOrError parseLocalityLbEndpoints( - io.envoyproxy.envoy.config.endpoint.v3.LocalityLbEndpoints proto) - throws ResourceInvalidException { + io.envoyproxy.envoy.config.endpoint.v3.LocalityLbEndpoints proto) { // Filter out localities without or with 0 weight. if (!proto.hasLoadBalancingWeight() || proto.getLoadBalancingWeight().getValue() < 1) { return null; @@ -200,15 +193,6 @@ static StructOrError parseLocalityLbEndpoints( if (proto.getPriority() < 0) { return StructOrError.fromError("negative priority"); } - - ImmutableMap localityMetadata; - MetadataRegistry registry = MetadataRegistry.getInstance(); - try { - localityMetadata = registry.parseMetadata(proto.getMetadata()); - } catch (ResourceInvalidException e) { - throw new ResourceInvalidException("Failed to parse Locality Endpoint metadata: " - + e.getMessage(), e); - } List endpoints = new ArrayList<>(proto.getLbEndpointsCount()); for (io.envoyproxy.envoy.config.endpoint.v3.LbEndpoint endpoint : proto.getLbEndpointsList()) { // The endpoint field of each lb_endpoints must be set. @@ -216,13 +200,6 @@ static StructOrError parseLocalityLbEndpoints( if (!endpoint.hasEndpoint() || !endpoint.getEndpoint().hasAddress()) { return StructOrError.fromError("LbEndpoint with no endpoint/address"); } - ImmutableMap endpointMetadata; - try { - endpointMetadata = registry.parseMetadata(endpoint.getMetadata()); - } catch (ResourceInvalidException e) { - throw new ResourceInvalidException("Failed to parse Endpoint metadata: " - + e.getMessage(), e); - } List addresses = new ArrayList<>(); addresses.add(getInetSocketAddress(endpoint.getEndpoint().getAddress())); @@ -237,12 +214,10 @@ static StructOrError parseLocalityLbEndpoints( endpoints.add(Endpoints.LbEndpoint.create( new EquivalentAddressGroup(addresses), endpoint.getLoadBalancingWeight().getValue(), isHealthy, - endpoint.getEndpoint().getHostname(), - endpointMetadata)); + endpoint.getEndpoint().getHostname())); } return StructOrError.fromStruct(Endpoints.LocalityLbEndpoints.create( - endpoints, proto.getLoadBalancingWeight().getValue(), - proto.getPriority(), localityMetadata)); + endpoints, proto.getLoadBalancingWeight().getValue(), proto.getPriority())); } private static InetSocketAddress getInetSocketAddress(Address address) { @@ -295,47 +270,4 @@ public String toString() { .toString(); } } - - public static class AddressMetadataParser implements MetadataValueParser { - - @Override - public String getTypeUrl() { - return "type.googleapis.com/envoy.config.core.v3.Address"; - } - - @Override - public java.net.SocketAddress parse(Any any) throws ResourceInvalidException { - SocketAddress socketAddress; - try { - socketAddress = any.unpack(Address.class).getSocketAddress(); - } catch (InvalidProtocolBufferException ex) { - throw new ResourceInvalidException("Invalid Resource in address proto", ex); - } - validateAddress(socketAddress); - - String ip = socketAddress.getAddress(); - int port = socketAddress.getPortValue(); - - try { - return new InetSocketAddress(InetAddresses.forString(ip), port); - } catch (IllegalArgumentException e) { - throw createException("Invalid IP address or port: " + ip + ":" + port); - } - } - - private void validateAddress(SocketAddress socketAddress) throws ResourceInvalidException { - if (socketAddress.getAddress().isEmpty()) { - throw createException("Address field is empty or invalid."); - } - long port = Integer.toUnsignedLong(socketAddress.getPortValue()); - if (port > 65535) { - throw createException(String.format("Port value %d out of range 1-65535.", port)); - } - } - - private ResourceInvalidException createException(String message) { - return new ResourceInvalidException( - "Failed to parse envoy.config.core.v3.Address: " + message); - } - } } diff --git a/xds/src/test/java/io/grpc/xds/CdsLoadBalancer2Test.java b/xds/src/test/java/io/grpc/xds/CdsLoadBalancer2Test.java index 479bde76ce5..82a61e79abf 100644 --- a/xds/src/test/java/io/grpc/xds/CdsLoadBalancer2Test.java +++ b/xds/src/test/java/io/grpc/xds/CdsLoadBalancer2Test.java @@ -179,7 +179,7 @@ public void tearDown() { public void discoverTopLevelEdsCluster() { CdsUpdate update = CdsUpdate.forEds(CLUSTER, EDS_SERVICE_NAME, LRS_SERVER_INFO, 100L, upstreamTlsContext, - outlierDetection, false) + outlierDetection) .roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(CLUSTER, update); assertThat(childBalancers).hasSize(1); @@ -198,8 +198,7 @@ public void discoverTopLevelEdsCluster() { @Test public void discoverTopLevelLogicalDnsCluster() { CdsUpdate update = - CdsUpdate.forLogicalDns(CLUSTER, DNS_HOST_NAME, LRS_SERVER_INFO, 100L, upstreamTlsContext, - false) + CdsUpdate.forLogicalDns(CLUSTER, DNS_HOST_NAME, LRS_SERVER_INFO, 100L, upstreamTlsContext) .leastRequestLbPolicy(3).build(); xdsClient.deliverCdsUpdate(CLUSTER, update); assertThat(childBalancers).hasSize(1); @@ -233,7 +232,7 @@ public void nonAggregateCluster_resourceNotExist_returnErrorPicker() { @Test public void nonAggregateCluster_resourceUpdate() { CdsUpdate update = - CdsUpdate.forEds(CLUSTER, null, null, 100L, upstreamTlsContext, outlierDetection, false) + CdsUpdate.forEds(CLUSTER, null, null, 100L, upstreamTlsContext, outlierDetection) .roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(CLUSTER, update); assertThat(childBalancers).hasSize(1); @@ -244,7 +243,7 @@ public void nonAggregateCluster_resourceUpdate() { 100L, upstreamTlsContext, outlierDetection); update = CdsUpdate.forEds(CLUSTER, EDS_SERVICE_NAME, LRS_SERVER_INFO, 200L, null, - outlierDetection, false).roundRobinLbPolicy().build(); + outlierDetection).roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(CLUSTER, update); childLbConfig = (ClusterResolverConfig) childBalancer.config; instance = Iterables.getOnlyElement(childLbConfig.discoveryMechanisms); @@ -255,8 +254,7 @@ public void nonAggregateCluster_resourceUpdate() { @Test public void nonAggregateCluster_resourceRevoked() { CdsUpdate update = - CdsUpdate.forLogicalDns(CLUSTER, DNS_HOST_NAME, null, 100L, upstreamTlsContext, - false) + CdsUpdate.forLogicalDns(CLUSTER, DNS_HOST_NAME, null, 100L, upstreamTlsContext) .roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(CLUSTER, update); assertThat(childBalancers).hasSize(1); @@ -300,16 +298,16 @@ public void discoverAggregateCluster() { CLUSTER, cluster1, cluster2, cluster3, cluster4); assertThat(childBalancers).isEmpty(); CdsUpdate update3 = CdsUpdate.forEds(cluster3, EDS_SERVICE_NAME, LRS_SERVER_INFO, 200L, - upstreamTlsContext, outlierDetection, false).roundRobinLbPolicy().build(); + upstreamTlsContext, outlierDetection).roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(cluster3, update3); assertThat(childBalancers).isEmpty(); CdsUpdate update2 = - CdsUpdate.forLogicalDns(cluster2, DNS_HOST_NAME, null, 100L, null, false) + CdsUpdate.forLogicalDns(cluster2, DNS_HOST_NAME, null, 100L, null) .roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(cluster2, update2); assertThat(childBalancers).isEmpty(); CdsUpdate update4 = - CdsUpdate.forEds(cluster4, null, LRS_SERVER_INFO, 300L, null, outlierDetection, false) + CdsUpdate.forEds(cluster4, null, LRS_SERVER_INFO, 300L, null, outlierDetection) .roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(cluster4, update4); assertThat(childBalancers).hasSize(1); // all non-aggregate clusters discovered @@ -364,11 +362,10 @@ public void aggregateCluster_descendantClustersRevoked() { xdsClient.deliverCdsUpdate(CLUSTER, update); assertThat(xdsClient.watchers.keySet()).containsExactly(CLUSTER, cluster1, cluster2); CdsUpdate update1 = CdsUpdate.forEds(cluster1, EDS_SERVICE_NAME, LRS_SERVER_INFO, 200L, - upstreamTlsContext, outlierDetection, false).roundRobinLbPolicy().build(); + upstreamTlsContext, outlierDetection).roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(cluster1, update1); CdsUpdate update2 = - CdsUpdate.forLogicalDns(cluster2, DNS_HOST_NAME, LRS_SERVER_INFO, 100L, null, - false) + CdsUpdate.forLogicalDns(cluster2, DNS_HOST_NAME, LRS_SERVER_INFO, 100L, null) .roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(cluster2, update2); FakeLoadBalancer childBalancer = Iterables.getOnlyElement(childBalancers); @@ -415,11 +412,10 @@ public void aggregateCluster_rootClusterRevoked() { xdsClient.deliverCdsUpdate(CLUSTER, update); assertThat(xdsClient.watchers.keySet()).containsExactly(CLUSTER, cluster1, cluster2); CdsUpdate update1 = CdsUpdate.forEds(cluster1, EDS_SERVICE_NAME, LRS_SERVER_INFO, 200L, - upstreamTlsContext, outlierDetection, false).roundRobinLbPolicy().build(); + upstreamTlsContext, outlierDetection).roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(cluster1, update1); CdsUpdate update2 = - CdsUpdate.forLogicalDns(cluster2, DNS_HOST_NAME, LRS_SERVER_INFO, 100L, null, - false) + CdsUpdate.forLogicalDns(cluster2, DNS_HOST_NAME, LRS_SERVER_INFO, 100L, null) .roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(cluster2, update2); FakeLoadBalancer childBalancer = Iterables.getOnlyElement(childBalancers); @@ -471,7 +467,7 @@ public void aggregateCluster_intermediateClusterChanges() { xdsClient.deliverCdsUpdate(cluster2, update2); assertThat(xdsClient.watchers.keySet()).containsExactly(CLUSTER, cluster2, cluster3); CdsUpdate update3 = CdsUpdate.forEds(cluster3, EDS_SERVICE_NAME, LRS_SERVER_INFO, 100L, - upstreamTlsContext, outlierDetection, false).roundRobinLbPolicy().build(); + upstreamTlsContext, outlierDetection).roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(cluster3, update3); FakeLoadBalancer childBalancer = Iterables.getOnlyElement(childBalancers); ClusterResolverConfig childLbConfig = (ClusterResolverConfig) childBalancer.config; @@ -522,7 +518,7 @@ public void aggregateCluster_withLoops() { reset(helper); CdsUpdate update3 = CdsUpdate.forEds(cluster3, EDS_SERVICE_NAME, LRS_SERVER_INFO, 100L, - upstreamTlsContext, outlierDetection, false).roundRobinLbPolicy().build(); + upstreamTlsContext, outlierDetection).roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(cluster3, update3); verify(helper).updateBalancingState( eq(ConnectivityState.TRANSIENT_FAILURE), pickerCaptor.capture()); @@ -557,7 +553,7 @@ public void aggregateCluster_withLoops_afterEds() { .roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(cluster2, update2); CdsUpdate update3 = CdsUpdate.forEds(cluster3, EDS_SERVICE_NAME, LRS_SERVER_INFO, 100L, - upstreamTlsContext, outlierDetection, false).roundRobinLbPolicy().build(); + upstreamTlsContext, outlierDetection).roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(cluster3, update3); // cluster2 (aggr.) -> [cluster3 (EDS)] @@ -606,7 +602,7 @@ public void aggregateCluster_duplicateChildren() { // Define EDS cluster CdsUpdate update3 = CdsUpdate.forEds(cluster3, EDS_SERVICE_NAME, LRS_SERVER_INFO, 100L, - upstreamTlsContext, outlierDetection, false).roundRobinLbPolicy().build(); + upstreamTlsContext, outlierDetection).roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(cluster3, update3); // cluster4 (agg) -> [cluster3 (EDS)] with dups (3 copies) @@ -653,8 +649,7 @@ public void aggregateCluster_discoveryErrorAfterChildLbCreated_propagateToChildL .roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(CLUSTER, update); CdsUpdate update1 = - CdsUpdate.forLogicalDns(cluster1, DNS_HOST_NAME, LRS_SERVER_INFO, 200L, null, - false) + CdsUpdate.forLogicalDns(cluster1, DNS_HOST_NAME, LRS_SERVER_INFO, 200L, null) .roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(cluster1, update1); FakeLoadBalancer childLb = Iterables.getOnlyElement(childBalancers); @@ -681,7 +676,7 @@ public void handleNameResolutionErrorFromUpstream_beforeChildLbCreated_returnErr @Test public void handleNameResolutionErrorFromUpstream_afterChildLbCreated_fallThrough() { CdsUpdate update = CdsUpdate.forEds(CLUSTER, EDS_SERVICE_NAME, LRS_SERVER_INFO, 100L, - upstreamTlsContext, outlierDetection, false).roundRobinLbPolicy().build(); + upstreamTlsContext, outlierDetection).roundRobinLbPolicy().build(); xdsClient.deliverCdsUpdate(CLUSTER, update); FakeLoadBalancer childBalancer = Iterables.getOnlyElement(childBalancers); assertThat(childBalancer.shutdown).isFalse(); @@ -697,7 +692,7 @@ public void unknownLbProvider() { try { xdsClient.deliverCdsUpdate(CLUSTER, CdsUpdate.forEds(CLUSTER, EDS_SERVICE_NAME, LRS_SERVER_INFO, 100L, upstreamTlsContext, - outlierDetection, false) + outlierDetection) .lbPolicyConfig(ImmutableMap.of("unknownLb", ImmutableMap.of("foo", "bar"))).build()); } catch (Exception e) { assertThat(e).hasMessageThat().contains("unknownLb"); @@ -711,7 +706,7 @@ public void invalidLbConfig() { try { xdsClient.deliverCdsUpdate(CLUSTER, CdsUpdate.forEds(CLUSTER, EDS_SERVICE_NAME, LRS_SERVER_INFO, 100L, upstreamTlsContext, - outlierDetection, false).lbPolicyConfig( + outlierDetection).lbPolicyConfig( ImmutableMap.of("ring_hash_experimental", ImmutableMap.of("minRingSize", "-1"))) .build()); } catch (Exception e) { diff --git a/xds/src/test/java/io/grpc/xds/ClusterResolverLoadBalancerTest.java b/xds/src/test/java/io/grpc/xds/ClusterResolverLoadBalancerTest.java index 2ae05a7dbf3..2a8617912ea 100644 --- a/xds/src/test/java/io/grpc/xds/ClusterResolverLoadBalancerTest.java +++ b/xds/src/test/java/io/grpc/xds/ClusterResolverLoadBalancerTest.java @@ -36,7 +36,6 @@ import io.grpc.ChannelLogger; import io.grpc.ConnectivityState; import io.grpc.EquivalentAddressGroup; -import io.grpc.HttpConnectProxiedSocketAddress; import io.grpc.InsecureChannelCredentials; import io.grpc.LoadBalancer; import io.grpc.LoadBalancer.Helper; @@ -84,7 +83,6 @@ import io.grpc.xds.client.XdsClient; import io.grpc.xds.client.XdsResourceType; import io.grpc.xds.internal.security.CommonTlsContextTestsUtil; -import java.net.InetSocketAddress; import java.net.SocketAddress; import java.net.URI; import java.net.URISyntaxException; @@ -244,7 +242,7 @@ public void tearDown() { @Test public void edsClustersWithRingHashEndpointLbPolicy() { ClusterResolverConfig config = new ClusterResolverConfig( - Collections.singletonList(edsDiscoveryMechanism1), ringHash, false); + Collections.singletonList(edsDiscoveryMechanism1), ringHash); deliverLbConfig(config); assertThat(xdsClient.watchers.keySet()).containsExactly(EDS_SERVICE_NAME1); assertThat(childBalancers).isEmpty(); @@ -256,18 +254,14 @@ public void edsClustersWithRingHashEndpointLbPolicy() { LocalityLbEndpoints localityLbEndpoints1 = LocalityLbEndpoints.create( Arrays.asList( - LbEndpoint.create(endpoint1, 0 /* loadBalancingWeight */, - true, "hostname1", ImmutableMap.of()), - LbEndpoint.create(endpoint2, 0 /* loadBalancingWeight */, - true, "hostname2", ImmutableMap.of())), - 10 /* localityWeight */, 1 /* priority */, ImmutableMap.of()); + LbEndpoint.create(endpoint1, 0 /* loadBalancingWeight */, true, "hostname1"), + LbEndpoint.create(endpoint2, 0 /* loadBalancingWeight */, true, "hostname2")), + 10 /* localityWeight */, 1 /* priority */); LocalityLbEndpoints localityLbEndpoints2 = LocalityLbEndpoints.create( Collections.singletonList( - LbEndpoint.create( - endpoint3, 60 /* loadBalancingWeight */, true, - "hostname3", ImmutableMap.of())), - 50 /* localityWeight */, 1 /* priority */, ImmutableMap.of()); + LbEndpoint.create(endpoint3, 60 /* loadBalancingWeight */, true, "hostname3")), + 50 /* localityWeight */, 1 /* priority */); xdsClient.deliverClusterLoadAssignment( EDS_SERVICE_NAME1, ImmutableMap.of(locality1, localityLbEndpoints1, locality2, localityLbEndpoints2)); @@ -310,7 +304,7 @@ public void edsClustersWithRingHashEndpointLbPolicy() { @Test public void edsClustersWithLeastRequestEndpointLbPolicy() { ClusterResolverConfig config = new ClusterResolverConfig( - Collections.singletonList(edsDiscoveryMechanism1), leastRequest, false); + Collections.singletonList(edsDiscoveryMechanism1), leastRequest); deliverLbConfig(config); assertThat(xdsClient.watchers.keySet()).containsExactly(EDS_SERVICE_NAME1); assertThat(childBalancers).isEmpty(); @@ -320,9 +314,8 @@ public void edsClustersWithLeastRequestEndpointLbPolicy() { LocalityLbEndpoints localityLbEndpoints = LocalityLbEndpoints.create( Arrays.asList( - LbEndpoint.create(endpoint, 0 /* loadBalancingWeight */, true, - "hostname1", ImmutableMap.of())), - 100 /* localityWeight */, 1 /* priority */, ImmutableMap.of()); + LbEndpoint.create(endpoint, 0 /* loadBalancingWeight */, true, "hostname1")), + 100 /* localityWeight */, 1 /* priority */); xdsClient.deliverClusterLoadAssignment( EDS_SERVICE_NAME1, ImmutableMap.of(locality1, localityLbEndpoints)); @@ -357,7 +350,7 @@ public void edsClustersWithLeastRequestEndpointLbPolicy() { @Test public void edsClustersEndpointHostname_addedToAddressAttribute() { ClusterResolverConfig config = new ClusterResolverConfig( - Collections.singletonList(edsDiscoveryMechanismWithOutlierDetection), leastRequest, false); + Collections.singletonList(edsDiscoveryMechanismWithOutlierDetection), leastRequest); deliverLbConfig(config); assertThat(xdsClient.watchers.keySet()).containsExactly(EDS_SERVICE_NAME1); assertThat(childBalancers).isEmpty(); @@ -367,9 +360,8 @@ public void edsClustersEndpointHostname_addedToAddressAttribute() { LocalityLbEndpoints localityLbEndpoints = LocalityLbEndpoints.create( Arrays.asList( - LbEndpoint.create(endpoint, 0 /* loadBalancingWeight */, true, - "hostname1", ImmutableMap.of())), - 100 /* localityWeight */, 1 /* priority */, ImmutableMap.of()); + LbEndpoint.create(endpoint, 0 /* loadBalancingWeight */, true, "hostname1")), + 100 /* localityWeight */, 1 /* priority */); xdsClient.deliverClusterLoadAssignment( EDS_SERVICE_NAME1, ImmutableMap.of(locality1, localityLbEndpoints)); @@ -381,104 +373,11 @@ public void edsClustersEndpointHostname_addedToAddressAttribute() { .get(XdsAttributes.ATTR_ADDRESS_NAME)).isEqualTo("hostname1"); } - @Test - public void endpointAddressRewritten_whenProxyMetadataIsInEndpointMetadata() { - ClusterResolverConfig config = new ClusterResolverConfig( - Collections.singletonList(edsDiscoveryMechanismWithOutlierDetection), leastRequest, true); - deliverLbConfig(config); - assertThat(xdsClient.watchers.keySet()).containsExactly(EDS_SERVICE_NAME1); - assertThat(childBalancers).isEmpty(); - - EquivalentAddressGroup endpoint = - new EquivalentAddressGroup(InetSocketAddress.createUnresolved("127.0.0.1", 8080)); - - // Proxy address in endpointMetadata (use FakeSocketAddress directly) - SocketAddress proxyAddress = new FakeSocketAddress("127.0.0.2"); - ImmutableMap endpointMetadata = - ImmutableMap.of("envoy.http11_proxy_transport_socket.proxy_address", proxyAddress); - - // No proxy in locality metadata - ImmutableMap localityMetadata = ImmutableMap.of(); - - LocalityLbEndpoints localityLbEndpoints = LocalityLbEndpoints.create( - Arrays.asList( - LbEndpoint.create(endpoint, 0 /* loadBalancingWeight */, true, - "hostname1", endpointMetadata)), - 100 /* localityWeight */, 1 /* priority */, localityMetadata); - - xdsClient.deliverClusterLoadAssignment( - EDS_SERVICE_NAME1, - ImmutableMap.of(locality1, localityLbEndpoints)); - - assertThat(childBalancers).hasSize(1); - FakeLoadBalancer childBalancer = Iterables.getOnlyElement(childBalancers); - - // Get the rewritten address - SocketAddress rewrittenAddress = - childBalancer.addresses.get(0).getAddresses().get(0); - assertThat(rewrittenAddress).isInstanceOf(HttpConnectProxiedSocketAddress.class); - HttpConnectProxiedSocketAddress proxiedSocket = - (HttpConnectProxiedSocketAddress) rewrittenAddress; - - // Assert that the target address is the original address - assertThat(proxiedSocket.getTargetAddress()) - .isEqualTo(endpoint.getAddresses().get(0)); - - // Assert that the proxy address is correctly set - assertThat(proxiedSocket.getProxyAddress()).isEqualTo(proxyAddress); - } - - @Test - public void endpointAddressRewritten_whenProxyMetadataIsInLocalityMetadata() { - ClusterResolverConfig config = new ClusterResolverConfig( - Collections.singletonList(edsDiscoveryMechanismWithOutlierDetection), leastRequest, true); - deliverLbConfig(config); - assertThat(xdsClient.watchers.keySet()).containsExactly(EDS_SERVICE_NAME1); - assertThat(childBalancers).isEmpty(); - - EquivalentAddressGroup endpoint = - new EquivalentAddressGroup(InetSocketAddress.createUnresolved("127.0.0.2", 8080)); - - // No proxy in endpointMetadata - ImmutableMap endpointMetadata = ImmutableMap.of(); - - // Proxy address is now in localityMetadata - SocketAddress proxyAddress = new FakeSocketAddress("proxy-addr"); - ImmutableMap localityMetadata = - ImmutableMap.of("envoy.http11_proxy_transport_socket.proxy_address", proxyAddress); - - LocalityLbEndpoints localityLbEndpoints = LocalityLbEndpoints.create( - Arrays.asList( - LbEndpoint.create(endpoint, 0 /* loadBalancingWeight */, true, - "hostname2", endpointMetadata)), - 100 /* localityWeight */, 1 /* priority */, localityMetadata); - - xdsClient.deliverClusterLoadAssignment( - EDS_SERVICE_NAME1, - ImmutableMap.of(locality1, localityLbEndpoints)); - - assertThat(childBalancers).hasSize(1); - FakeLoadBalancer childBalancer = Iterables.getOnlyElement(childBalancers); - - // Get the rewritten address - SocketAddress rewrittenAddress = childBalancer.addresses.get(0).getAddresses().get(0); - - // Assert that the address was rewritten - assertThat(rewrittenAddress).isInstanceOf(HttpConnectProxiedSocketAddress.class); - HttpConnectProxiedSocketAddress proxiedSocket = - (HttpConnectProxiedSocketAddress) rewrittenAddress; - - // Assert that the target address is the original address - assertThat(proxiedSocket.getTargetAddress()).isEqualTo(endpoint.getAddresses().get(0)); - - // Assert that the proxy address is correctly set from locality metadata - assertThat(proxiedSocket.getProxyAddress()).isEqualTo(proxyAddress); - } @Test public void onlyEdsClusters_receivedEndpoints() { ClusterResolverConfig config = new ClusterResolverConfig( - Arrays.asList(edsDiscoveryMechanism1, edsDiscoveryMechanism2), roundRobin, false); + Arrays.asList(edsDiscoveryMechanism1, edsDiscoveryMechanism2), roundRobin); deliverLbConfig(config); assertThat(xdsClient.watchers.keySet()).containsExactly(EDS_SERVICE_NAME1, EDS_SERVICE_NAME2); assertThat(childBalancers).isEmpty(); @@ -492,21 +391,17 @@ public void onlyEdsClusters_receivedEndpoints() { LocalityLbEndpoints localityLbEndpoints1 = LocalityLbEndpoints.create( Arrays.asList( - LbEndpoint.create(endpoint1, 100, - true, "hostname1", ImmutableMap.of()), - LbEndpoint.create(endpoint2, 100, - true, "hostname1", ImmutableMap.of())), - 70 /* localityWeight */, 1 /* priority */, ImmutableMap.of()); + LbEndpoint.create(endpoint1, 100, true, "hostname1"), + LbEndpoint.create(endpoint2, 100, true, "hostname1")), + 70 /* localityWeight */, 1 /* priority */); LocalityLbEndpoints localityLbEndpoints2 = LocalityLbEndpoints.create( - Collections.singletonList(LbEndpoint.create(endpoint3, 100, true, - "hostname2", ImmutableMap.of())), - 10 /* localityWeight */, 1 /* priority */, ImmutableMap.of()); + Collections.singletonList(LbEndpoint.create(endpoint3, 100, true, "hostname2")), + 10 /* localityWeight */, 1 /* priority */); LocalityLbEndpoints localityLbEndpoints3 = LocalityLbEndpoints.create( - Collections.singletonList(LbEndpoint.create(endpoint4, 100, true, - "hostname3", ImmutableMap.of())), - 20 /* localityWeight */, 2 /* priority */, ImmutableMap.of()); + Collections.singletonList(LbEndpoint.create(endpoint4, 100, true, "hostname3")), + 20 /* localityWeight */, 2 /* priority */); String priority1 = CLUSTER2 + "[child1]"; String priority2 = CLUSTER2 + "[child2]"; String priority3 = CLUSTER1 + "[child1]"; @@ -594,7 +489,7 @@ public void onlyEdsClusters_receivedEndpoints() { private void verifyEdsPriorityNames(List want, Map... updates) { ClusterResolverConfig config = new ClusterResolverConfig( - Arrays.asList(edsDiscoveryMechanism2), roundRobin, false); + Arrays.asList(edsDiscoveryMechanism2), roundRobin); deliverLbConfig(config); assertThat(xdsClient.watchers.keySet()).containsExactly(EDS_SERVICE_NAME2); assertThat(childBalancers).isEmpty(); @@ -660,17 +555,15 @@ locality2, createEndpoints(1) private LocalityLbEndpoints createEndpoints(int priority) { return LocalityLbEndpoints.create( Arrays.asList( - LbEndpoint.create(makeAddress("endpoint-addr-1"), 100, - true, "hostname1", ImmutableMap.of()), - LbEndpoint.create(makeAddress("endpoint-addr-2"), 100, - true, "hostname2", ImmutableMap.of())), - 70 /* localityWeight */, priority /* priority */, ImmutableMap.of()); + LbEndpoint.create(makeAddress("endpoint-addr-1"), 100, true, "hostname1"), + LbEndpoint.create(makeAddress("endpoint-addr-2"), 100, true, "hostname2")), + 70 /* localityWeight */, priority /* priority */); } @Test public void onlyEdsClusters_resourceNeverExist_returnErrorPicker() { ClusterResolverConfig config = new ClusterResolverConfig( - Arrays.asList(edsDiscoveryMechanism1, edsDiscoveryMechanism2), roundRobin, false); + Arrays.asList(edsDiscoveryMechanism1, edsDiscoveryMechanism2), roundRobin); deliverLbConfig(config); assertThat(xdsClient.watchers.keySet()).containsExactly(EDS_SERVICE_NAME1, EDS_SERVICE_NAME2); assertThat(childBalancers).isEmpty(); @@ -692,7 +585,7 @@ public void onlyEdsClusters_resourceNeverExist_returnErrorPicker() { @Test public void onlyEdsClusters_allResourcesRevoked_shutDownChildLbPolicy() { ClusterResolverConfig config = new ClusterResolverConfig( - Arrays.asList(edsDiscoveryMechanism1, edsDiscoveryMechanism2), roundRobin, false); + Arrays.asList(edsDiscoveryMechanism1, edsDiscoveryMechanism2), roundRobin); deliverLbConfig(config); assertThat(xdsClient.watchers.keySet()).containsExactly(EDS_SERVICE_NAME1, EDS_SERVICE_NAME2); assertThat(childBalancers).isEmpty(); @@ -701,14 +594,12 @@ public void onlyEdsClusters_allResourcesRevoked_shutDownChildLbPolicy() { EquivalentAddressGroup endpoint2 = makeAddress("endpoint-addr-2"); LocalityLbEndpoints localityLbEndpoints1 = LocalityLbEndpoints.create( - Collections.singletonList(LbEndpoint.create(endpoint1, 100, true, - "hostname1", ImmutableMap.of())), - 10 /* localityWeight */, 1 /* priority */, ImmutableMap.of()); + Collections.singletonList(LbEndpoint.create(endpoint1, 100, true, "hostname1")), + 10 /* localityWeight */, 1 /* priority */); LocalityLbEndpoints localityLbEndpoints2 = LocalityLbEndpoints.create( - Collections.singletonList(LbEndpoint.create(endpoint2, 100, true, - "hostname2", ImmutableMap.of())), - 20 /* localityWeight */, 2 /* priority */, ImmutableMap.of()); + Collections.singletonList(LbEndpoint.create(endpoint2, 100, true, "hostname2")), + 20 /* localityWeight */, 2 /* priority */); xdsClient.deliverClusterLoadAssignment( EDS_SERVICE_NAME1, Collections.singletonMap(locality1, localityLbEndpoints1)); xdsClient.deliverClusterLoadAssignment( @@ -729,19 +620,17 @@ public void onlyEdsClusters_allResourcesRevoked_shutDownChildLbPolicy() { @Test public void handleEdsResource_ignoreUnhealthyEndpoints() { - ClusterResolverConfig config = new ClusterResolverConfig( - Collections.singletonList(edsDiscoveryMechanism1), roundRobin, false); + ClusterResolverConfig config = + new ClusterResolverConfig(Collections.singletonList(edsDiscoveryMechanism1), roundRobin); deliverLbConfig(config); EquivalentAddressGroup endpoint1 = makeAddress("endpoint-addr-1"); EquivalentAddressGroup endpoint2 = makeAddress("endpoint-addr-2"); LocalityLbEndpoints localityLbEndpoints = LocalityLbEndpoints.create( Arrays.asList( - LbEndpoint.create(endpoint1, 100, false /* isHealthy */, - "hostname1", ImmutableMap.of()), - LbEndpoint.create(endpoint2, 100, true /* isHealthy */, - "hostname2", ImmutableMap.of())), - 10 /* localityWeight */, 1 /* priority */, ImmutableMap.of()); + LbEndpoint.create(endpoint1, 100, false /* isHealthy */, "hostname1"), + LbEndpoint.create(endpoint2, 100, true /* isHealthy */, "hostname2")), + 10 /* localityWeight */, 1 /* priority */); xdsClient.deliverClusterLoadAssignment( EDS_SERVICE_NAME1, Collections.singletonMap(locality1, localityLbEndpoints)); FakeLoadBalancer childBalancer = Iterables.getOnlyElement(childBalancers); @@ -751,21 +640,21 @@ public void handleEdsResource_ignoreUnhealthyEndpoints() { @Test public void handleEdsResource_ignoreLocalitiesWithNoHealthyEndpoints() { - ClusterResolverConfig config = new ClusterResolverConfig( - Collections.singletonList(edsDiscoveryMechanism1), roundRobin, false); + ClusterResolverConfig config = + new ClusterResolverConfig(Collections.singletonList(edsDiscoveryMechanism1), roundRobin); deliverLbConfig(config); EquivalentAddressGroup endpoint1 = makeAddress("endpoint-addr-1"); EquivalentAddressGroup endpoint2 = makeAddress("endpoint-addr-2"); LocalityLbEndpoints localityLbEndpoints1 = LocalityLbEndpoints.create( Collections.singletonList(LbEndpoint.create(endpoint1, 100, false /* isHealthy */, - "hostname1", ImmutableMap.of())), - 10 /* localityWeight */, 1 /* priority */, ImmutableMap.of()); + "hostname1")), + 10 /* localityWeight */, 1 /* priority */); LocalityLbEndpoints localityLbEndpoints2 = LocalityLbEndpoints.create( Collections.singletonList(LbEndpoint.create(endpoint2, 100, true /* isHealthy */, - "hostname2", ImmutableMap.of())), - 10 /* localityWeight */, 1 /* priority */, ImmutableMap.of()); + "hostname2")), + 10 /* localityWeight */, 1 /* priority */); xdsClient.deliverClusterLoadAssignment( EDS_SERVICE_NAME1, ImmutableMap.of(locality1, localityLbEndpoints1, locality2, localityLbEndpoints2)); @@ -778,21 +667,21 @@ public void handleEdsResource_ignoreLocalitiesWithNoHealthyEndpoints() { @Test public void handleEdsResource_ignorePrioritiesWithNoHealthyEndpoints() { - ClusterResolverConfig config = new ClusterResolverConfig( - Collections.singletonList(edsDiscoveryMechanism1), roundRobin, false); + ClusterResolverConfig config = + new ClusterResolverConfig(Collections.singletonList(edsDiscoveryMechanism1), roundRobin); deliverLbConfig(config); EquivalentAddressGroup endpoint1 = makeAddress("endpoint-addr-1"); EquivalentAddressGroup endpoint2 = makeAddress("endpoint-addr-2"); LocalityLbEndpoints localityLbEndpoints1 = LocalityLbEndpoints.create( Collections.singletonList(LbEndpoint.create(endpoint1, 100, false /* isHealthy */, - "hostname1", ImmutableMap.of())), - 10 /* localityWeight */, 1 /* priority */, ImmutableMap.of()); + "hostname1")), + 10 /* localityWeight */, 1 /* priority */); LocalityLbEndpoints localityLbEndpoints2 = LocalityLbEndpoints.create( Collections.singletonList(LbEndpoint.create(endpoint2, 200, true /* isHealthy */, - "hostname2", ImmutableMap.of())), - 10 /* localityWeight */, 2 /* priority */, ImmutableMap.of()); + "hostname2")), + 10 /* localityWeight */, 2 /* priority */); String priority2 = CLUSTER1 + "[child2]"; xdsClient.deliverClusterLoadAssignment( EDS_SERVICE_NAME1, @@ -804,15 +693,15 @@ public void handleEdsResource_ignorePrioritiesWithNoHealthyEndpoints() { @Test public void handleEdsResource_noHealthyEndpoint() { - ClusterResolverConfig config = new ClusterResolverConfig( - Collections.singletonList(edsDiscoveryMechanism1), roundRobin, false); + ClusterResolverConfig config = + new ClusterResolverConfig(Collections.singletonList(edsDiscoveryMechanism1), roundRobin); deliverLbConfig(config); EquivalentAddressGroup endpoint = makeAddress("endpoint-addr-1"); LocalityLbEndpoints localityLbEndpoints = LocalityLbEndpoints.create( Collections.singletonList(LbEndpoint.create(endpoint, 100, false /* isHealthy */, - "hostname1", ImmutableMap.of())), - 10 /* localityWeight */, 1 /* priority */, ImmutableMap.of()); + "hostname1")), + 10 /* localityWeight */, 1 /* priority */); xdsClient.deliverClusterLoadAssignment(EDS_SERVICE_NAME1, Collections.singletonMap(locality1, localityLbEndpoints)); // single endpoint, unhealthy @@ -840,7 +729,7 @@ public void oldListenerCallback_onlyLogicalDnsCluster_endpointsResolved() { void do_onlyLogicalDnsCluster_endpointsResolved() { ClusterResolverConfig config = new ClusterResolverConfig( - Collections.singletonList(logicalDnsDiscoveryMechanism), roundRobin, false); + Collections.singletonList(logicalDnsDiscoveryMechanism), roundRobin); deliverLbConfig(config); FakeNameResolver resolver = assertResolverCreated("/" + DNS_HOST_NAME); assertThat(childBalancers).isEmpty(); @@ -872,7 +761,7 @@ void do_onlyLogicalDnsCluster_endpointsResolved() { @Test public void onlyLogicalDnsCluster_handleRefreshNameResolution() { ClusterResolverConfig config = new ClusterResolverConfig( - Collections.singletonList(logicalDnsDiscoveryMechanism), roundRobin, false); + Collections.singletonList(logicalDnsDiscoveryMechanism), roundRobin); deliverLbConfig(config); FakeNameResolver resolver = assertResolverCreated("/" + DNS_HOST_NAME); assertThat(childBalancers).isEmpty(); @@ -901,7 +790,7 @@ void do_onlyLogicalDnsCluster_resolutionError_backoffAndRefresh() { InOrder inOrder = Mockito.inOrder(helper, backoffPolicyProvider, backoffPolicy1, backoffPolicy2); ClusterResolverConfig config = new ClusterResolverConfig( - Collections.singletonList(logicalDnsDiscoveryMechanism), roundRobin, false); + Collections.singletonList(logicalDnsDiscoveryMechanism), roundRobin); deliverLbConfig(config); FakeNameResolver resolver = assertResolverCreated("/" + DNS_HOST_NAME); assertThat(childBalancers).isEmpty(); @@ -947,7 +836,7 @@ void do_onlyLogicalDnsCluster_resolutionError_backoffAndRefresh() { public void onlyLogicalDnsCluster_refreshNameResolutionRaceWithResolutionError() { InOrder inOrder = Mockito.inOrder(backoffPolicyProvider, backoffPolicy1, backoffPolicy2); ClusterResolverConfig config = new ClusterResolverConfig( - Collections.singletonList(logicalDnsDiscoveryMechanism), roundRobin, false); + Collections.singletonList(logicalDnsDiscoveryMechanism), roundRobin); deliverLbConfig(config); FakeNameResolver resolver = assertResolverCreated("/" + DNS_HOST_NAME); assertThat(childBalancers).isEmpty(); @@ -985,7 +874,7 @@ public void onlyLogicalDnsCluster_refreshNameResolutionRaceWithResolutionError() @Test public void edsClustersAndLogicalDnsCluster_receivedEndpoints() { ClusterResolverConfig config = new ClusterResolverConfig( - Arrays.asList(edsDiscoveryMechanism1, logicalDnsDiscoveryMechanism), roundRobin, false); + Arrays.asList(edsDiscoveryMechanism1, logicalDnsDiscoveryMechanism), roundRobin); deliverLbConfig(config); assertThat(xdsClient.watchers.keySet()).containsExactly(EDS_SERVICE_NAME1); FakeNameResolver resolver = assertResolverCreated("/" + DNS_HOST_NAME); @@ -996,9 +885,8 @@ public void edsClustersAndLogicalDnsCluster_receivedEndpoints() { resolver.deliverEndpointAddresses(Arrays.asList(endpoint1, endpoint2)); LocalityLbEndpoints localityLbEndpoints = LocalityLbEndpoints.create( - Collections.singletonList(LbEndpoint.create(endpoint3, 100, true, - "hostname3", ImmutableMap.of())), - 10 /* localityWeight */, 1 /* priority */, ImmutableMap.of()); + Collections.singletonList(LbEndpoint.create(endpoint3, 100, true, "hostname3")), + 10 /* localityWeight */, 1 /* priority */); xdsClient.deliverClusterLoadAssignment( EDS_SERVICE_NAME1, Collections.singletonMap(locality1, localityLbEndpoints)); @@ -1021,7 +909,7 @@ public void edsClustersAndLogicalDnsCluster_receivedEndpoints() { @Test public void noEdsResourceExists_useDnsResolutionResults() { ClusterResolverConfig config = new ClusterResolverConfig( - Arrays.asList(edsDiscoveryMechanism1, logicalDnsDiscoveryMechanism), roundRobin, false); + Arrays.asList(edsDiscoveryMechanism1, logicalDnsDiscoveryMechanism), roundRobin); deliverLbConfig(config); assertThat(xdsClient.watchers.keySet()).containsExactly(EDS_SERVICE_NAME1); FakeNameResolver resolver = assertResolverCreated("/" + DNS_HOST_NAME); @@ -1045,7 +933,7 @@ public void noEdsResourceExists_useDnsResolutionResults() { @Test public void edsResourceRevoked_dnsResolutionError_shutDownChildLbPolicyAndReturnErrorPicker() { ClusterResolverConfig config = new ClusterResolverConfig( - Arrays.asList(edsDiscoveryMechanism1, logicalDnsDiscoveryMechanism), roundRobin, false); + Arrays.asList(edsDiscoveryMechanism1, logicalDnsDiscoveryMechanism), roundRobin); deliverLbConfig(config); assertThat(xdsClient.watchers.keySet()).containsExactly(EDS_SERVICE_NAME1); FakeNameResolver resolver = assertResolverCreated("/" + DNS_HOST_NAME); @@ -1054,9 +942,8 @@ public void edsResourceRevoked_dnsResolutionError_shutDownChildLbPolicyAndReturn EquivalentAddressGroup endpoint = makeAddress("endpoint-addr-1"); LocalityLbEndpoints localityLbEndpoints = LocalityLbEndpoints.create( - Collections.singletonList(LbEndpoint.create(endpoint, 100, true, - "hostname1", ImmutableMap.of())), - 10 /* localityWeight */, 1 /* priority */, ImmutableMap.of()); + Collections.singletonList(LbEndpoint.create(endpoint, 100, true, "hostname1")), + 10 /* localityWeight */, 1 /* priority */); xdsClient.deliverClusterLoadAssignment( EDS_SERVICE_NAME1, Collections.singletonMap(locality1, localityLbEndpoints)); resolver.deliverError(Status.UNKNOWN.withDescription("I am lost")); @@ -1077,7 +964,7 @@ public void edsResourceRevoked_dnsResolutionError_shutDownChildLbPolicyAndReturn @Test public void resolutionErrorAfterChildLbCreated_propagateErrorIfAllClustersEncounterError() { ClusterResolverConfig config = new ClusterResolverConfig( - Arrays.asList(edsDiscoveryMechanism1, logicalDnsDiscoveryMechanism), roundRobin, false); + Arrays.asList(edsDiscoveryMechanism1, logicalDnsDiscoveryMechanism), roundRobin); deliverLbConfig(config); assertThat(xdsClient.watchers.keySet()).containsExactly(EDS_SERVICE_NAME1); FakeNameResolver resolver = assertResolverCreated("/" + DNS_HOST_NAME); @@ -1086,9 +973,8 @@ public void resolutionErrorAfterChildLbCreated_propagateErrorIfAllClustersEncoun EquivalentAddressGroup endpoint = makeAddress("endpoint-addr-1"); LocalityLbEndpoints localityLbEndpoints = LocalityLbEndpoints.create( - Collections.singletonList(LbEndpoint.create(endpoint, 100, true, - "hostname1", ImmutableMap.of())), - 10 /* localityWeight */, 1 /* priority */, ImmutableMap.of()); + Collections.singletonList(LbEndpoint.create(endpoint, 100, true, "hostname1")), + 10 /* localityWeight */, 1 /* priority */); xdsClient.deliverClusterLoadAssignment( EDS_SERVICE_NAME1, Collections.singletonMap(locality1, localityLbEndpoints)); assertThat(childBalancers).isEmpty(); // not created until all clusters resolved. @@ -1113,7 +999,7 @@ public void resolutionErrorAfterChildLbCreated_propagateErrorIfAllClustersEncoun @Test public void resolutionErrorBeforeChildLbCreated_returnErrorPickerIfAllClustersEncounterError() { ClusterResolverConfig config = new ClusterResolverConfig( - Arrays.asList(edsDiscoveryMechanism1, logicalDnsDiscoveryMechanism), roundRobin, false); + Arrays.asList(edsDiscoveryMechanism1, logicalDnsDiscoveryMechanism), roundRobin); deliverLbConfig(config); assertThat(xdsClient.watchers.keySet()).containsExactly(EDS_SERVICE_NAME1); FakeNameResolver resolver = assertResolverCreated("/" + DNS_HOST_NAME); @@ -1136,7 +1022,7 @@ public void resolutionErrorBeforeChildLbCreated_returnErrorPickerIfAllClustersEn @Test public void resolutionErrorBeforeChildLbCreated_edsOnly_returnErrorPicker() { ClusterResolverConfig config = new ClusterResolverConfig( - Arrays.asList(edsDiscoveryMechanism1), roundRobin, false); + Arrays.asList(edsDiscoveryMechanism1), roundRobin); deliverLbConfig(config); assertThat(xdsClient.watchers.keySet()).containsExactly(EDS_SERVICE_NAME1); assertThat(childBalancers).isEmpty(); @@ -1154,7 +1040,7 @@ public void resolutionErrorBeforeChildLbCreated_edsOnly_returnErrorPicker() { @Test public void handleNameResolutionErrorFromUpstream_beforeChildLbCreated_returnErrorPicker() { ClusterResolverConfig config = new ClusterResolverConfig( - Arrays.asList(edsDiscoveryMechanism1, logicalDnsDiscoveryMechanism), roundRobin, false); + Arrays.asList(edsDiscoveryMechanism1, logicalDnsDiscoveryMechanism), roundRobin); deliverLbConfig(config); assertThat(xdsClient.watchers.keySet()).containsExactly(EDS_SERVICE_NAME1); assertResolverCreated("/" + DNS_HOST_NAME); @@ -1170,7 +1056,7 @@ public void handleNameResolutionErrorFromUpstream_beforeChildLbCreated_returnErr @Test public void handleNameResolutionErrorFromUpstream_afterChildLbCreated_fallThrough() { ClusterResolverConfig config = new ClusterResolverConfig( - Arrays.asList(edsDiscoveryMechanism1, logicalDnsDiscoveryMechanism), roundRobin, false); + Arrays.asList(edsDiscoveryMechanism1, logicalDnsDiscoveryMechanism), roundRobin); deliverLbConfig(config); assertThat(xdsClient.watchers.keySet()).containsExactly(EDS_SERVICE_NAME1); FakeNameResolver resolver = assertResolverCreated("/" + DNS_HOST_NAME); @@ -1180,9 +1066,8 @@ public void handleNameResolutionErrorFromUpstream_afterChildLbCreated_fallThroug EquivalentAddressGroup endpoint2 = makeAddress("endpoint-addr-2"); LocalityLbEndpoints localityLbEndpoints = LocalityLbEndpoints.create( - Collections.singletonList(LbEndpoint.create(endpoint1, 100, true, - "hostname1", ImmutableMap.of())), - 10 /* localityWeight */, 1 /* priority */, ImmutableMap.of()); + Collections.singletonList(LbEndpoint.create(endpoint1, 100, true, "hostname1")), + 10 /* localityWeight */, 1 /* priority */); xdsClient.deliverClusterLoadAssignment( EDS_SERVICE_NAME1, Collections.singletonMap(locality1, localityLbEndpoints)); resolver.deliverEndpointAddresses(Collections.singletonList(endpoint2)); @@ -1256,37 +1141,37 @@ private static void assertAddressesEqual( } private static EquivalentAddressGroup makeAddress(final String name) { - return new EquivalentAddressGroup(new FakeSocketAddress(name)); - } - - static class FakeSocketAddress extends SocketAddress { - private final String name; + class FakeSocketAddress extends SocketAddress { + private final String name; - private FakeSocketAddress(String name) { - this.name = name; - } + private FakeSocketAddress(String name) { + this.name = name; + } - @Override - public int hashCode() { - return Objects.hash(name); - } + @Override + public int hashCode() { + return Objects.hash(name); + } - @Override - public boolean equals(Object o) { - if (this == o) { - return true; + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof FakeSocketAddress)) { + return false; + } + FakeSocketAddress that = (FakeSocketAddress) o; + return Objects.equals(name, that.name); } - if (!(o instanceof FakeSocketAddress)) { - return false; + + @Override + public String toString() { + return name; } - FakeSocketAddress that = (FakeSocketAddress) o; - return Objects.equals(name, that.name); } - @Override - public String toString() { - return name; - } + return new EquivalentAddressGroup(new FakeSocketAddress(name)); } private static final class FakeXdsClient extends XdsClient { diff --git a/xds/src/test/java/io/grpc/xds/GrpcXdsClientImplDataTest.java b/xds/src/test/java/io/grpc/xds/GrpcXdsClientImplDataTest.java index 7fac666f983..610d147ccf9 100644 --- a/xds/src/test/java/io/grpc/xds/GrpcXdsClientImplDataTest.java +++ b/xds/src/test/java/io/grpc/xds/GrpcXdsClientImplDataTest.java @@ -18,7 +18,6 @@ import static com.google.common.truth.Truth.assertThat; import static io.envoyproxy.envoy.config.route.v3.RouteAction.ClusterSpecifierCase.CLUSTER_SPECIFIER_PLUGIN; -import static io.grpc.xds.XdsClusterResource.TRANSPORT_SOCKET_NAME_HTTP11_PROXY; import static io.grpc.xds.XdsEndpointResource.GRPC_EXPERIMENTAL_XDS_DUALSTACK_ENDPOINTS; import static org.junit.Assert.fail; @@ -94,7 +93,6 @@ import io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.Rds; import io.envoyproxy.envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin; import io.envoyproxy.envoy.extensions.load_balancing_policies.wrr_locality.v3.WrrLocality; -import io.envoyproxy.envoy.extensions.transport_sockets.http_11_proxy.v3.Http11ProxyUpstreamTransport; import io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.CertificateProviderPluginInstance; import io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext; import io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.CommonTlsContext; @@ -1057,7 +1055,7 @@ public void parseClusterWeight() { } @Test - public void parseLocalityLbEndpoints_withHealthyEndpoints() throws ResourceInvalidException { + public void parseLocalityLbEndpoints_withHealthyEndpoints() { io.envoyproxy.envoy.config.endpoint.v3.LocalityLbEndpoints proto = io.envoyproxy.envoy.config.endpoint.v3.LocalityLbEndpoints.newBuilder() .setLocality(Locality.newBuilder() @@ -1077,14 +1075,12 @@ public void parseLocalityLbEndpoints_withHealthyEndpoints() throws ResourceInval assertThat(struct.getErrorDetail()).isNull(); assertThat(struct.getStruct()).isEqualTo( LocalityLbEndpoints.create( - Collections.singletonList(LbEndpoint.create("172.14.14.5", 8888, - 20, true, "", ImmutableMap.of())), - 100, 1, ImmutableMap.of())); + Collections.singletonList(LbEndpoint.create("172.14.14.5", 8888, 20, true, "")), + 100, 1)); } @Test - public void parseLocalityLbEndpoints_treatUnknownHealthAsHealthy() - throws ResourceInvalidException { + public void parseLocalityLbEndpoints_treatUnknownHealthAsHealthy() { io.envoyproxy.envoy.config.endpoint.v3.LocalityLbEndpoints proto = io.envoyproxy.envoy.config.endpoint.v3.LocalityLbEndpoints.newBuilder() .setLocality(Locality.newBuilder() @@ -1104,13 +1100,12 @@ public void parseLocalityLbEndpoints_treatUnknownHealthAsHealthy() assertThat(struct.getErrorDetail()).isNull(); assertThat(struct.getStruct()).isEqualTo( LocalityLbEndpoints.create( - Collections.singletonList(LbEndpoint.create("172.14.14.5", 8888, - 20, true, "", ImmutableMap.of())), - 100, 1, ImmutableMap.of())); + Collections.singletonList(LbEndpoint.create("172.14.14.5", 8888, 20, true, "")), 100, + 1)); } @Test - public void parseLocalityLbEndpoints_withUnHealthyEndpoints() throws ResourceInvalidException { + public void parseLocalityLbEndpoints_withUnHealthyEndpoints() { io.envoyproxy.envoy.config.endpoint.v3.LocalityLbEndpoints proto = io.envoyproxy.envoy.config.endpoint.v3.LocalityLbEndpoints.newBuilder() .setLocality(Locality.newBuilder() @@ -1130,13 +1125,12 @@ public void parseLocalityLbEndpoints_withUnHealthyEndpoints() throws ResourceInv assertThat(struct.getErrorDetail()).isNull(); assertThat(struct.getStruct()).isEqualTo( LocalityLbEndpoints.create( - Collections.singletonList(LbEndpoint.create("172.14.14.5", 8888, 20, - false, "", ImmutableMap.of())), - 100, 1, ImmutableMap.of())); + Collections.singletonList(LbEndpoint.create("172.14.14.5", 8888, 20, false, "")), 100, + 1)); } @Test - public void parseLocalityLbEndpoints_ignorZeroWeightLocality() throws ResourceInvalidException { + public void parseLocalityLbEndpoints_ignorZeroWeightLocality() { io.envoyproxy.envoy.config.endpoint.v3.LocalityLbEndpoints proto = io.envoyproxy.envoy.config.endpoint.v3.LocalityLbEndpoints.newBuilder() .setLocality(Locality.newBuilder() @@ -1193,10 +1187,7 @@ public void parseLocalityLbEndpoints_withDualStackEndpoints() { EquivalentAddressGroup expectedEag = new EquivalentAddressGroup(socketAddressList); assertThat(struct.getStruct()).isEqualTo( LocalityLbEndpoints.create( - Collections.singletonList(LbEndpoint.create( - expectedEag, 20, true, "", ImmutableMap.of())), 100, 1, ImmutableMap.of())); - } catch (ResourceInvalidException e) { - throw new RuntimeException(e); + Collections.singletonList(LbEndpoint.create(expectedEag, 20, true, "")), 100, 1)); } finally { if (originalDualStackProp != null) { System.setProperty(GRPC_EXPERIMENTAL_XDS_DUALSTACK_ENDPOINTS, originalDualStackProp); @@ -1207,7 +1198,7 @@ public void parseLocalityLbEndpoints_withDualStackEndpoints() { } @Test - public void parseLocalityLbEndpoints_invalidPriority() throws ResourceInvalidException { + public void parseLocalityLbEndpoints_invalidPriority() { io.envoyproxy.envoy.config.endpoint.v3.LocalityLbEndpoints proto = io.envoyproxy.envoy.config.endpoint.v3.LocalityLbEndpoints.newBuilder() .setLocality(Locality.newBuilder() @@ -2465,59 +2456,6 @@ public void processCluster_parsesAudienceMetadata() assertThat(update.parsedMetadata()).isEqualTo(expectedParsedMetadata); } - @Test - public void processCluster_parsesAddressMetadata() throws Exception { - - // Create an Address message - Address address = Address.newBuilder() - .setSocketAddress(SocketAddress.newBuilder() - .setAddress("192.168.1.1") - .setPortValue(8080) - .build()) - .build(); - - // Wrap the Address in Any - Any addressMetadata = Any.newBuilder() - .setTypeUrl("type.googleapis.com/envoy.config.core.v3.Address") - .setValue(address.toByteString()) - .build(); - - Struct filterMetadata = Struct.newBuilder() - .putFields("key1", Value.newBuilder().setStringValue("value1").build()) - .putFields("key2", Value.newBuilder().setNumberValue(42).build()) - .build(); - - Metadata metadata = Metadata.newBuilder() - .putTypedFilterMetadata("ADDRESS_METADATA", addressMetadata) - .putFilterMetadata("FILTER_METADATA", filterMetadata) - .build(); - - Cluster cluster = Cluster.newBuilder() - .setName("cluster-foo.googleapis.com") - .setType(DiscoveryType.EDS) - .setEdsClusterConfig( - EdsClusterConfig.newBuilder() - .setEdsConfig( - ConfigSource.newBuilder() - .setAds(AggregatedConfigSource.getDefaultInstance())) - .setServiceName("service-foo.googleapis.com")) - .setLbPolicy(LbPolicy.ROUND_ROBIN) - .setMetadata(metadata) - .build(); - - CdsUpdate update = XdsClusterResource.processCluster( - cluster, null, LRS_SERVER_INFO, - LoadBalancerRegistry.getDefaultRegistry()); - - ImmutableMap expectedParsedMetadata = ImmutableMap.of( - "ADDRESS_METADATA", new InetSocketAddress("192.168.1.1", 8080), - "FILTER_METADATA", ImmutableMap.of( - "key1", "value1", - "key2", 42.0)); - - assertThat(update.parsedMetadata()).isEqualTo(expectedParsedMetadata); - } - @Test public void processCluster_metadataKeyCollision_resolvesToTypedMetadata() throws ResourceInvalidException, InvalidProtocolBufferException { @@ -2574,40 +2512,6 @@ public Object parse(Any value) { metadataRegistry.removeParser(testParser); } - @Test - public void parseNonAggregateCluster_withHttp11ProxyTransportSocket() - throws ResourceInvalidException, InvalidProtocolBufferException { - XdsClusterResource.isEnabledXdsHttpConnect = true; - - Http11ProxyUpstreamTransport http11ProxyUpstreamTransport = - Http11ProxyUpstreamTransport.newBuilder() - .setTransportSocket(TransportSocket.getDefaultInstance()) - .build(); - - TransportSocket transportSocket = TransportSocket.newBuilder() - .setName(TRANSPORT_SOCKET_NAME_HTTP11_PROXY) - .setTypedConfig(Any.pack(http11ProxyUpstreamTransport)) - .build(); - - Cluster cluster = Cluster.newBuilder() - .setName("cluster-http11-proxy.googleapis.com") - .setType(DiscoveryType.EDS) - .setEdsClusterConfig( - EdsClusterConfig.newBuilder() - .setEdsConfig( - ConfigSource.newBuilder().setAds(AggregatedConfigSource.getDefaultInstance())) - .setServiceName("service-http11-proxy.googleapis.com")) - .setLbPolicy(LbPolicy.ROUND_ROBIN) - .setTransportSocket(transportSocket) - .build(); - - CdsUpdate result = - XdsClusterResource.processCluster(cluster, null, LRS_SERVER_INFO, - LoadBalancerRegistry.getDefaultRegistry()); - - assertThat(result).isNotNull(); - assertThat(result.isHttp11ProxyAvailable()).isTrue(); - } @Test public void parseServerSideListener_invalidTrafficDirection() throws ResourceInvalidException { diff --git a/xds/src/test/java/io/grpc/xds/GrpcXdsClientImplTestBase.java b/xds/src/test/java/io/grpc/xds/GrpcXdsClientImplTestBase.java index 51c07cb3537..00fbfe669af 100644 --- a/xds/src/test/java/io/grpc/xds/GrpcXdsClientImplTestBase.java +++ b/xds/src/test/java/io/grpc/xds/GrpcXdsClientImplTestBase.java @@ -607,9 +607,9 @@ private void validateGoldenClusterLoadAssignment(EdsUpdate edsUpdate) { Locality.create("region1", "zone1", "subzone1"), LocalityLbEndpoints.create( ImmutableList.of(LbEndpoint.create("192.168.0.1", 8080, 2, true, - "endpoint-host-name", ImmutableMap.of())), 1, 0, ImmutableMap.of()), + "endpoint-host-name")), 1, 0), Locality.create("region3", "zone3", "subzone3"), - LocalityLbEndpoints.create(ImmutableList.of(), 2, 1, ImmutableMap.of())); + LocalityLbEndpoints.create(ImmutableList.of(), 2, 1)); } /** @@ -3246,9 +3246,7 @@ public void edsResourceUpdated() { Locality.create("region2", "zone2", "subzone2"), LocalityLbEndpoints.create( ImmutableList.of( - LbEndpoint.create("172.44.2.2", 8000, 3, - true, "endpoint-host-name", ImmutableMap.of())), - 2, 0, ImmutableMap.of())); + LbEndpoint.create("172.44.2.2", 8000, 3, true, "endpoint-host-name")), 2, 0)); verifyResourceMetadataAcked(EDS, EDS_RESOURCE, updatedClusterLoadAssignment, VERSION_2, TIME_INCREMENT * 2); verifySubscribedResourcesMetadataSizes(0, 0, 0, 1); @@ -3418,9 +3416,7 @@ public void multipleEdsWatchers() { Locality.create("region2", "zone2", "subzone2"), LocalityLbEndpoints.create( ImmutableList.of( - LbEndpoint.create("172.44.2.2", 8000, 3, - true, "endpoint-host-name", ImmutableMap.of())), - 2, 0, ImmutableMap.of())); + LbEndpoint.create("172.44.2.2", 8000, 3, true, "endpoint-host-name")), 2, 0)); verify(watcher2).onChanged(edsUpdateCaptor.capture()); edsUpdate = edsUpdateCaptor.getValue(); assertThat(edsUpdate.clusterName).isEqualTo(edsResourceTwo); @@ -3430,9 +3426,7 @@ public void multipleEdsWatchers() { Locality.create("region2", "zone2", "subzone2"), LocalityLbEndpoints.create( ImmutableList.of( - LbEndpoint.create("172.44.2.2", 8000, 3, - true, "endpoint-host-name", ImmutableMap.of())), - 2, 0, ImmutableMap.of())); + LbEndpoint.create("172.44.2.2", 8000, 3, true, "endpoint-host-name")), 2, 0)); verifyNoMoreInteractions(edsResourceWatcher); verifyResourceMetadataAcked( EDS, edsResourceTwo, clusterLoadAssignmentTwo, VERSION_2, TIME_INCREMENT * 2); diff --git a/xds/src/test/java/io/grpc/xds/XdsTestUtils.java b/xds/src/test/java/io/grpc/xds/XdsTestUtils.java index d0580ae2667..ea28734ec6a 100644 --- a/xds/src/test/java/io/grpc/xds/XdsTestUtils.java +++ b/xds/src/test/java/io/grpc/xds/XdsTestUtils.java @@ -257,17 +257,17 @@ static XdsConfig getDefaultXdsConfig(String serverHostName) // Need to create endpoints to create locality endpoints map to create edsUpdate Map lbEndpointsMap = new HashMap<>(); - LbEndpoint lbEndpoint = LbEndpoint.create( - serverHostName, ENDPOINT_PORT, 0, true, ENDPOINT_HOSTNAME, ImmutableMap.of()); + LbEndpoint lbEndpoint = + LbEndpoint.create(serverHostName, ENDPOINT_PORT, 0, true, ENDPOINT_HOSTNAME); lbEndpointsMap.put( Locality.create("", "", ""), - LocalityLbEndpoints.create(ImmutableList.of(lbEndpoint), 10, 0, ImmutableMap.of())); + LocalityLbEndpoints.create(ImmutableList.of(lbEndpoint), 10, 0)); // Need to create EdsUpdate to create CdsUpdate to create XdsClusterConfig for builder XdsEndpointResource.EdsUpdate edsUpdate = new XdsEndpointResource.EdsUpdate( EDS_NAME, lbEndpointsMap, Collections.emptyList()); XdsClusterResource.CdsUpdate cdsUpdate = XdsClusterResource.CdsUpdate.forEds( - CLUSTER_NAME, EDS_NAME, serverInfo, null, null, null, false) + CLUSTER_NAME, EDS_NAME, serverInfo, null, null, null) .lbPolicyConfig(getWrrLbConfigAsMap()).build(); XdsConfig.XdsClusterConfig clusterConfig = new XdsConfig.XdsClusterConfig( CLUSTER_NAME, cdsUpdate, new EndpointConfig(StatusOr.fromValue(edsUpdate))); diff --git a/xds/third_party/envoy/import.sh b/xds/third_party/envoy/import.sh index 7a6b33871b3..dbe6f81b1a8 100755 --- a/xds/third_party/envoy/import.sh +++ b/xds/third_party/envoy/import.sh @@ -86,7 +86,6 @@ envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.proto envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.proto envoy/extensions/load_balancing_policies/round_robin/v3/round_robin.proto envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.proto -envoy/extensions/transport_sockets/http_11_proxy/v3/upstream_http_11_connect.proto envoy/extensions/transport_sockets/tls/v3/cert.proto envoy/extensions/transport_sockets/tls/v3/common.proto envoy/extensions/transport_sockets/tls/v3/secret.proto diff --git a/xds/third_party/envoy/src/main/proto/envoy/extensions/transport_sockets/http_11_proxy/v3/upstream_http_11_connect.proto b/xds/third_party/envoy/src/main/proto/envoy/extensions/transport_sockets/http_11_proxy/v3/upstream_http_11_connect.proto deleted file mode 100644 index 2c9b5333f41..00000000000 --- a/xds/third_party/envoy/src/main/proto/envoy/extensions/transport_sockets/http_11_proxy/v3/upstream_http_11_connect.proto +++ /dev/null @@ -1,38 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.http_11_proxy.v3; - -import "envoy/config/core/v3/base.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.http_11_proxy.v3"; -option java_outer_classname = "UpstreamHttp11ConnectProto"; -option java_multiple_files = true; -option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/http_11_proxy/v3;http_11_proxyv3"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Upstream HTTP/1.1 Proxy] -// [#extension: envoy.transport_sockets.http_11_proxy] - -// HTTP/1.1 proxy transport socket establishes an upstream connection to a proxy address -// instead of the target host's address. This behavior is triggered when the transport -// socket is configured and proxy information is provided. -// -// Behavior when proxying: -// ======================= -// When an upstream connection is established, instead of connecting directly to the endpoint -// address, the client will connect to the specified proxy address, send an HTTP/1.1 ``CONNECT`` request -// indicating the endpoint address, and process the response. If the response has HTTP status 200, -// the connection will be passed down to the underlying transport socket. -// -// Configuring proxy information: -// ============================== -// Set ``typed_filter_metadata`` in :ref:`LbEndpoint.Metadata ` or :ref:`LocalityLbEndpoints.Metadata `. -// using the key ``envoy.http11_proxy_transport_socket.proxy_address`` and the -// proxy address in ``config::core::v3::Address`` format. -// -message Http11ProxyUpstreamTransport { - // The underlying transport socket being wrapped. Defaults to plaintext (raw_buffer) if unset. - config.core.v3.TransportSocket transport_socket = 1; -} From 5e2f4d54ab97a47769123b9254edfb478890e9e8 Mon Sep 17 00:00:00 2001 From: MV Shiva Date: Mon, 2 Jun 2025 20:00:29 +0530 Subject: [PATCH 12/15] =?UTF-8?q?Revert=20"xds:=20ensure=20server=20interc?= =?UTF-8?q?eptors=20are=20created=20in=20a=20sync=20context=20(#11930?= =?UTF-8?q?=E2=80=A6"=20(#12119)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 80ea4e9e6981daadb1e175a2a60ce78fd5fab1ed. --- .../java/io/grpc/xds/XdsServerWrapper.java | 4 +- .../java/io/grpc/xds/XdsServerTestHelper.java | 83 ++++--------------- .../io/grpc/xds/XdsServerWrapperTest.java | 28 ++++--- 3 files changed, 36 insertions(+), 79 deletions(-) diff --git a/xds/src/main/java/io/grpc/xds/XdsServerWrapper.java b/xds/src/main/java/io/grpc/xds/XdsServerWrapper.java index e5b25ae458b..bbb17d9b616 100644 --- a/xds/src/main/java/io/grpc/xds/XdsServerWrapper.java +++ b/xds/src/main/java/io/grpc/xds/XdsServerWrapper.java @@ -524,7 +524,9 @@ private AtomicReference generateRoutingConfig(FilterChain f private ImmutableMap generatePerRouteInterceptors( @Nullable List filterConfigs, List virtualHosts) { - syncContext.throwIfNotInThisSynchronizationContext(); + // This should always be called from the sync context. + // Ideally we'd want to throw otherwise, but this breaks the tests now. + // syncContext.throwIfNotInThisSynchronizationContext(); ImmutableMap.Builder perRouteInterceptors = new ImmutableMap.Builder<>(); diff --git a/xds/src/test/java/io/grpc/xds/XdsServerTestHelper.java b/xds/src/test/java/io/grpc/xds/XdsServerTestHelper.java index 0508b11c205..a27c2917712 100644 --- a/xds/src/test/java/io/grpc/xds/XdsServerTestHelper.java +++ b/xds/src/test/java/io/grpc/xds/XdsServerTestHelper.java @@ -38,7 +38,6 @@ import io.grpc.xds.client.XdsClient; import io.grpc.xds.client.XdsInitializationException; import io.grpc.xds.client.XdsResourceType; -import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -46,10 +45,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import javax.annotation.Nullable; /** @@ -178,18 +174,12 @@ public List getTargets() { } } - // Implementation details: - // 1. Use `synchronized` in methods where XdsClientImpl uses its own `syncContext`. - // 2. Use `serverExecutor` via `execute()` in methods where XdsClientImpl uses watcher's executor. static final class FakeXdsClient extends XdsClient { - public static final Duration DEFAULT_TIMEOUT = Duration.ofSeconds(5); - - private boolean shutdown; - @Nullable SettableFuture ldsResource = SettableFuture.create(); - @Nullable ResourceWatcher ldsWatcher; - private CountDownLatch rdsCount = new CountDownLatch(1); + boolean shutdown; + SettableFuture ldsResource = SettableFuture.create(); + ResourceWatcher ldsWatcher; + CountDownLatch rdsCount = new CountDownLatch(1); final Map> rdsWatchers = new HashMap<>(); - @Nullable private volatile Executor serverExecutor; @Override public TlsContextManager getSecurityConfig() { @@ -203,20 +193,14 @@ public BootstrapInfo getBootstrapInfo() { @Override @SuppressWarnings("unchecked") - public synchronized void watchXdsResource( - XdsResourceType resourceType, - String resourceName, - ResourceWatcher watcher, - Executor executor) { - if (serverExecutor != null) { - assertThat(executor).isEqualTo(serverExecutor); - } - + public void watchXdsResource(XdsResourceType resourceType, + String resourceName, + ResourceWatcher watcher, + Executor syncContext) { switch (resourceType.typeName()) { case "LDS": assertThat(ldsWatcher).isNull(); ldsWatcher = (ResourceWatcher) watcher; - serverExecutor = executor; ldsResource.set(resourceName); break; case "RDS": @@ -229,14 +213,14 @@ public synchronized void watchXdsResource( } @Override - public synchronized void cancelXdsResourceWatch( - XdsResourceType type, String resourceName, ResourceWatcher watcher) { + public void cancelXdsResourceWatch(XdsResourceType type, + String resourceName, + ResourceWatcher watcher) { switch (type.typeName()) { case "LDS": assertThat(ldsWatcher).isNotNull(); ldsResource = null; ldsWatcher = null; - serverExecutor = null; break; case "RDS": rdsWatchers.remove(resourceName); @@ -246,58 +230,27 @@ public synchronized void cancelXdsResourceWatch( } @Override - public synchronized void shutdown() { + public void shutdown() { shutdown = true; } @Override - public synchronized boolean isShutDown() { + public boolean isShutDown() { return shutdown; } - public void awaitRds(Duration timeout) throws InterruptedException, TimeoutException { - if (!rdsCount.await(timeout.toMillis(), TimeUnit.MILLISECONDS)) { - throw new TimeoutException("Timeout " + timeout + " waiting for RDSs"); - } - } - - public void setExpectedRdsCount(int count) { - rdsCount = new CountDownLatch(count); - } - - private void execute(Runnable action) { - // This method ensures that all watcher updates: - // - Happen after the server started watching LDS. - // - Are executed within the sync context of the server. - // - // Note that this doesn't guarantee that any of the RDS watchers are created. - // Tests should use setExpectedRdsCount(int) and awaitRds() for that. - if (ldsResource == null) { - throw new IllegalStateException("xDS resource update after watcher cancel"); - } - try { - ldsResource.get(DEFAULT_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS); - } catch (ExecutionException | TimeoutException e) { - throw new RuntimeException("Can't resolve LDS resource name in " + DEFAULT_TIMEOUT, e); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new RuntimeException(e); - } - serverExecutor.execute(action); - } - void deliverLdsUpdate(List filterChains, FilterChain defaultFilterChain) { - deliverLdsUpdate(LdsUpdate.forTcpListener(Listener.create( - "listener", "0.0.0.0:1", ImmutableList.copyOf(filterChains), defaultFilterChain))); + ldsWatcher.onChanged(LdsUpdate.forTcpListener(Listener.create( + "listener", "0.0.0.0:1", ImmutableList.copyOf(filterChains), defaultFilterChain))); } void deliverLdsUpdate(LdsUpdate ldsUpdate) { - execute(() -> ldsWatcher.onChanged(ldsUpdate)); + ldsWatcher.onChanged(ldsUpdate); } - void deliverRdsUpdate(String resourceName, List virtualHosts) { - execute(() -> rdsWatchers.get(resourceName).onChanged(new RdsUpdate(virtualHosts))); + void deliverRdsUpdate(String rdsName, List virtualHosts) { + rdsWatchers.get(rdsName).onChanged(new RdsUpdate(virtualHosts)); } } } diff --git a/xds/src/test/java/io/grpc/xds/XdsServerWrapperTest.java b/xds/src/test/java/io/grpc/xds/XdsServerWrapperTest.java index 388052a3dc8..41f005ba583 100644 --- a/xds/src/test/java/io/grpc/xds/XdsServerWrapperTest.java +++ b/xds/src/test/java/io/grpc/xds/XdsServerWrapperTest.java @@ -74,6 +74,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; @@ -251,7 +252,7 @@ public void run() { FilterChain f0 = createFilterChain("filter-chain-0", hcm_virtual); FilterChain f1 = createFilterChain("filter-chain-1", createRds("rds")); xdsClient.deliverLdsUpdate(Collections.singletonList(f0), f1); - xdsClient.awaitRds(FakeXdsClient.DEFAULT_TIMEOUT); + xdsClient.rdsCount.await(5, TimeUnit.SECONDS); xdsClient.deliverRdsUpdate("rds", Collections.singletonList(createVirtualHost("virtual-host-1"))); verify(listener, timeout(5000)).onServing(); @@ -260,7 +261,7 @@ public void run() { xdsServerWrapper.shutdown(); assertThat(xdsServerWrapper.isShutdown()).isTrue(); assertThat(xdsClient.ldsResource).isNull(); - assertThat(xdsClient.isShutDown()).isTrue(); + assertThat(xdsClient.shutdown).isTrue(); verify(mockServer).shutdown(); assertThat(f0.sslContextProviderSupplier().isShutdown()).isTrue(); assertThat(f1.sslContextProviderSupplier().isShutdown()).isTrue(); @@ -302,7 +303,7 @@ public void run() { verify(mockServer, never()).start(); assertThat(xdsServerWrapper.isShutdown()).isTrue(); assertThat(xdsClient.ldsResource).isNull(); - assertThat(xdsClient.isShutDown()).isTrue(); + assertThat(xdsClient.shutdown).isTrue(); verify(mockServer).shutdown(); assertThat(f0.sslContextProviderSupplier().isShutdown()).isTrue(); assertThat(f1.sslContextProviderSupplier().isShutdown()).isTrue(); @@ -341,7 +342,7 @@ public void run() { xdsServerWrapper.shutdown(); assertThat(xdsServerWrapper.isShutdown()).isTrue(); assertThat(xdsClient.ldsResource).isNull(); - assertThat(xdsClient.isShutDown()).isTrue(); + assertThat(xdsClient.shutdown).isTrue(); verify(mockBuilder, times(1)).build(); verify(mockServer, times(1)).shutdown(); xdsServerWrapper.awaitTermination(1, TimeUnit.SECONDS); @@ -366,7 +367,7 @@ public void run() { FilterChain filterChain = createFilterChain("filter-chain-1", createRds("rds")); SslContextProviderSupplier sslSupplier = filterChain.sslContextProviderSupplier(); xdsClient.deliverLdsUpdate(Collections.singletonList(filterChain), null); - xdsClient.awaitRds(FakeXdsClient.DEFAULT_TIMEOUT); + xdsClient.rdsCount.await(5, TimeUnit.SECONDS); xdsClient.deliverRdsUpdate("rds", Collections.singletonList(createVirtualHost("virtual-host-1"))); try { @@ -433,7 +434,7 @@ public void run() { xdsClient.ldsResource.get(5, TimeUnit.SECONDS); FilterChain filterChain = createFilterChain("filter-chain-1", createRds("rds")); xdsClient.deliverLdsUpdate(Collections.singletonList(filterChain), null); - xdsClient.awaitRds(FakeXdsClient.DEFAULT_TIMEOUT); + xdsClient.rdsCount.await(5, TimeUnit.SECONDS); xdsClient.deliverRdsUpdate("rds", Collections.singletonList(createVirtualHost("virtual-host-1"))); try { @@ -543,7 +544,7 @@ public void run() { 0L, Collections.singletonList(virtualHost), new ArrayList()); EnvoyServerProtoData.FilterChain f0 = createFilterChain("filter-chain-0", hcmVirtual); EnvoyServerProtoData.FilterChain f1 = createFilterChain("filter-chain-1", createRds("r0")); - xdsClient.setExpectedRdsCount(3); + xdsClient.rdsCount = new CountDownLatch(3); xdsClient.deliverLdsUpdate(Arrays.asList(f0, f1), null); assertThat(start.isDone()).isFalse(); assertThat(selectorManager.getSelectorToUpdateSelector()).isNull(); @@ -555,7 +556,7 @@ public void run() { xdsClient.deliverLdsUpdate(Arrays.asList(f0, f2), f3); verify(mockServer, never()).start(); verify(listener, never()).onServing(); - xdsClient.awaitRds(FakeXdsClient.DEFAULT_TIMEOUT); + xdsClient.rdsCount.await(5, TimeUnit.SECONDS); xdsClient.deliverRdsUpdate("r1", Collections.singletonList(createVirtualHost("virtual-host-1"))); @@ -601,11 +602,12 @@ public void run() { EnvoyServerProtoData.FilterChain f1 = createFilterChain("filter-chain-1", createRds("r0")); EnvoyServerProtoData.FilterChain f2 = createFilterChain("filter-chain-2", createRds("r0")); + xdsClient.rdsCount = new CountDownLatch(1); xdsClient.deliverLdsUpdate(Arrays.asList(f0, f1), f2); assertThat(start.isDone()).isFalse(); assertThat(selectorManager.getSelectorToUpdateSelector()).isNull(); - xdsClient.awaitRds(FakeXdsClient.DEFAULT_TIMEOUT); + xdsClient.rdsCount.await(5, TimeUnit.SECONDS); xdsClient.deliverRdsUpdate("r0", Collections.singletonList(createVirtualHost("virtual-host-0"))); start.get(5000, TimeUnit.MILLISECONDS); @@ -631,9 +633,9 @@ public void run() { EnvoyServerProtoData.FilterChain f3 = createFilterChain("filter-chain-3", createRds("r0")); EnvoyServerProtoData.FilterChain f4 = createFilterChain("filter-chain-4", createRds("r1")); EnvoyServerProtoData.FilterChain f5 = createFilterChain("filter-chain-4", createRds("r1")); - xdsClient.setExpectedRdsCount(1); + xdsClient.rdsCount = new CountDownLatch(1); xdsClient.deliverLdsUpdate(Arrays.asList(f5, f3), f4); - xdsClient.awaitRds(FakeXdsClient.DEFAULT_TIMEOUT); + xdsClient.rdsCount.await(5, TimeUnit.SECONDS); xdsClient.deliverRdsUpdate("r1", Collections.singletonList(createVirtualHost("virtual-host-1"))); xdsClient.deliverRdsUpdate("r0", @@ -686,7 +688,7 @@ public void run() { EnvoyServerProtoData.FilterChain f0 = createFilterChain("filter-chain-0", hcmVirtual); EnvoyServerProtoData.FilterChain f1 = createFilterChain("filter-chain-1", createRds("r0")); xdsClient.deliverLdsUpdate(Arrays.asList(f0, f1), null); - xdsClient.awaitRds(FakeXdsClient.DEFAULT_TIMEOUT); + xdsClient.rdsCount.await(); xdsClient.rdsWatchers.get("r0").onError(Status.CANCELLED); start.get(5000, TimeUnit.MILLISECONDS); assertThat(selectorManager.getSelectorToUpdateSelector().getRoutingConfigs().size()) @@ -1233,7 +1235,7 @@ public ServerCall.Listener interceptCall(ServerCall Date: Mon, 2 Jun 2025 20:47:26 +0530 Subject: [PATCH 13/15] =?UTF-8?q?Revert=20"xds:=20Change=20how=20xDS=20fil?= =?UTF-8?q?ters=20are=20created=20by=20introducing=20Filter.Provide?= =?UTF-8?q?=E2=80=A6"=20(#12124)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 2fd84478a6e45bfb07215dac1f1a49733d24b2e8. --- .../main/java/io/grpc/xds/FaultFilter.java | 170 ++++++++---------- xds/src/main/java/io/grpc/xds/Filter.java | 90 +++------- .../main/java/io/grpc/xds/FilterRegistry.java | 16 +- .../io/grpc/xds/GcpAuthenticationFilter.java | 78 ++++---- .../java/io/grpc/xds/InternalRbacFilter.java | 7 +- xds/src/main/java/io/grpc/xds/RbacFilter.java | 158 ++++++++-------- .../main/java/io/grpc/xds/RouterFilter.java | 65 +++---- .../java/io/grpc/xds/XdsListenerResource.java | 29 +-- .../java/io/grpc/xds/XdsNameResolver.java | 31 ++-- .../grpc/xds/XdsRouteConfigureResource.java | 6 +- .../java/io/grpc/xds/XdsServerWrapper.java | 66 +++---- .../java/io/grpc/xds/FaultFilterTest.java | 19 +- .../grpc/xds/GcpAuthenticationFilterTest.java | 25 ++- .../grpc/xds/GrpcXdsClientImplDataTest.java | 78 ++++---- .../test/java/io/grpc/xds/RbacFilterTest.java | 35 ++-- .../java/io/grpc/xds/RouterFilterTest.java | 36 ---- .../java/io/grpc/xds/XdsNameResolverTest.java | 19 +- .../io/grpc/xds/XdsServerWrapperTest.java | 61 +++---- 18 files changed, 410 insertions(+), 579 deletions(-) delete mode 100644 xds/src/test/java/io/grpc/xds/RouterFilterTest.java diff --git a/xds/src/main/java/io/grpc/xds/FaultFilter.java b/xds/src/main/java/io/grpc/xds/FaultFilter.java index 2012fd36b62..c66861a9f15 100644 --- a/xds/src/main/java/io/grpc/xds/FaultFilter.java +++ b/xds/src/main/java/io/grpc/xds/FaultFilter.java @@ -45,6 +45,7 @@ import io.grpc.internal.GrpcUtil; import io.grpc.xds.FaultConfig.FaultAbort; import io.grpc.xds.FaultConfig.FaultDelay; +import io.grpc.xds.Filter.ClientInterceptorBuilder; import io.grpc.xds.ThreadSafeRandom.ThreadSafeRandomImpl; import java.util.Locale; import java.util.concurrent.Executor; @@ -55,11 +56,10 @@ import javax.annotation.Nullable; /** HttpFault filter implementation. */ -final class FaultFilter implements Filter { +final class FaultFilter implements Filter, ClientInterceptorBuilder { - private static final FaultFilter INSTANCE = + static final FaultFilter INSTANCE = new FaultFilter(ThreadSafeRandomImpl.instance, new AtomicLong()); - @VisibleForTesting static final Metadata.Key HEADER_DELAY_KEY = Metadata.Key.of("x-envoy-fault-delay-request", Metadata.ASCII_STRING_MARSHALLER); @@ -87,110 +87,98 @@ final class FaultFilter implements Filter { this.activeFaultCounter = activeFaultCounter; } - static final class Provider implements Filter.Provider { - @Override - public String[] typeUrls() { - return new String[]{TYPE_URL}; - } + @Override + public String[] typeUrls() { + return new String[] { TYPE_URL }; + } - @Override - public boolean isClientFilter() { - return true; + @Override + public ConfigOrError parseFilterConfig(Message rawProtoMessage) { + HTTPFault httpFaultProto; + if (!(rawProtoMessage instanceof Any)) { + return ConfigOrError.fromError("Invalid config type: " + rawProtoMessage.getClass()); } - - @Override - public FaultFilter newInstance() { - return INSTANCE; - } - - @Override - public ConfigOrError parseFilterConfig(Message rawProtoMessage) { - HTTPFault httpFaultProto; - if (!(rawProtoMessage instanceof Any)) { - return ConfigOrError.fromError("Invalid config type: " + rawProtoMessage.getClass()); - } - Any anyMessage = (Any) rawProtoMessage; - try { - httpFaultProto = anyMessage.unpack(HTTPFault.class); - } catch (InvalidProtocolBufferException e) { - return ConfigOrError.fromError("Invalid proto: " + e); - } - return parseHttpFault(httpFaultProto); + Any anyMessage = (Any) rawProtoMessage; + try { + httpFaultProto = anyMessage.unpack(HTTPFault.class); + } catch (InvalidProtocolBufferException e) { + return ConfigOrError.fromError("Invalid proto: " + e); } + return parseHttpFault(httpFaultProto); + } - @Override - public ConfigOrError parseFilterConfigOverride(Message rawProtoMessage) { - return parseFilterConfig(rawProtoMessage); + private static ConfigOrError parseHttpFault(HTTPFault httpFault) { + FaultDelay faultDelay = null; + FaultAbort faultAbort = null; + if (httpFault.hasDelay()) { + faultDelay = parseFaultDelay(httpFault.getDelay()); } - - private static ConfigOrError parseHttpFault(HTTPFault httpFault) { - FaultDelay faultDelay = null; - FaultAbort faultAbort = null; - if (httpFault.hasDelay()) { - faultDelay = parseFaultDelay(httpFault.getDelay()); + if (httpFault.hasAbort()) { + ConfigOrError faultAbortOrError = parseFaultAbort(httpFault.getAbort()); + if (faultAbortOrError.errorDetail != null) { + return ConfigOrError.fromError( + "HttpFault contains invalid FaultAbort: " + faultAbortOrError.errorDetail); } - if (httpFault.hasAbort()) { - ConfigOrError faultAbortOrError = parseFaultAbort(httpFault.getAbort()); - if (faultAbortOrError.errorDetail != null) { - return ConfigOrError.fromError( - "HttpFault contains invalid FaultAbort: " + faultAbortOrError.errorDetail); - } - faultAbort = faultAbortOrError.config; - } - Integer maxActiveFaults = null; - if (httpFault.hasMaxActiveFaults()) { - maxActiveFaults = httpFault.getMaxActiveFaults().getValue(); - if (maxActiveFaults < 0) { - maxActiveFaults = Integer.MAX_VALUE; - } + faultAbort = faultAbortOrError.config; + } + Integer maxActiveFaults = null; + if (httpFault.hasMaxActiveFaults()) { + maxActiveFaults = httpFault.getMaxActiveFaults().getValue(); + if (maxActiveFaults < 0) { + maxActiveFaults = Integer.MAX_VALUE; } - return ConfigOrError.fromConfig(FaultConfig.create(faultDelay, faultAbort, maxActiveFaults)); } + return ConfigOrError.fromConfig(FaultConfig.create(faultDelay, faultAbort, maxActiveFaults)); + } - private static FaultDelay parseFaultDelay( - io.envoyproxy.envoy.extensions.filters.common.fault.v3.FaultDelay faultDelay) { - FaultConfig.FractionalPercent percent = parsePercent(faultDelay.getPercentage()); - if (faultDelay.hasHeaderDelay()) { - return FaultDelay.forHeader(percent); - } - return FaultDelay.forFixedDelay(Durations.toNanos(faultDelay.getFixedDelay()), percent); + private static FaultDelay parseFaultDelay( + io.envoyproxy.envoy.extensions.filters.common.fault.v3.FaultDelay faultDelay) { + FaultConfig.FractionalPercent percent = parsePercent(faultDelay.getPercentage()); + if (faultDelay.hasHeaderDelay()) { + return FaultDelay.forHeader(percent); } + return FaultDelay.forFixedDelay(Durations.toNanos(faultDelay.getFixedDelay()), percent); + } - @VisibleForTesting - static ConfigOrError parseFaultAbort( - io.envoyproxy.envoy.extensions.filters.http.fault.v3.FaultAbort faultAbort) { - FaultConfig.FractionalPercent percent = parsePercent(faultAbort.getPercentage()); - switch (faultAbort.getErrorTypeCase()) { - case HEADER_ABORT: - return ConfigOrError.fromConfig(FaultAbort.forHeader(percent)); - case HTTP_STATUS: - return ConfigOrError.fromConfig(FaultAbort.forStatus( - GrpcUtil.httpStatusToGrpcStatus(faultAbort.getHttpStatus()), percent)); - case GRPC_STATUS: - return ConfigOrError.fromConfig(FaultAbort.forStatus( - Status.fromCodeValue(faultAbort.getGrpcStatus()), percent)); - case ERRORTYPE_NOT_SET: - default: - return ConfigOrError.fromError( - "Unknown error type case: " + faultAbort.getErrorTypeCase()); - } + @VisibleForTesting + static ConfigOrError parseFaultAbort( + io.envoyproxy.envoy.extensions.filters.http.fault.v3.FaultAbort faultAbort) { + FaultConfig.FractionalPercent percent = parsePercent(faultAbort.getPercentage()); + switch (faultAbort.getErrorTypeCase()) { + case HEADER_ABORT: + return ConfigOrError.fromConfig(FaultAbort.forHeader(percent)); + case HTTP_STATUS: + return ConfigOrError.fromConfig(FaultAbort.forStatus( + GrpcUtil.httpStatusToGrpcStatus(faultAbort.getHttpStatus()), percent)); + case GRPC_STATUS: + return ConfigOrError.fromConfig(FaultAbort.forStatus( + Status.fromCodeValue(faultAbort.getGrpcStatus()), percent)); + case ERRORTYPE_NOT_SET: + default: + return ConfigOrError.fromError( + "Unknown error type case: " + faultAbort.getErrorTypeCase()); } + } - private static FaultConfig.FractionalPercent parsePercent(FractionalPercent proto) { - switch (proto.getDenominator()) { - case HUNDRED: - return FaultConfig.FractionalPercent.perHundred(proto.getNumerator()); - case TEN_THOUSAND: - return FaultConfig.FractionalPercent.perTenThousand(proto.getNumerator()); - case MILLION: - return FaultConfig.FractionalPercent.perMillion(proto.getNumerator()); - case UNRECOGNIZED: - default: - throw new IllegalArgumentException("Unknown denominator type: " + proto.getDenominator()); - } + private static FaultConfig.FractionalPercent parsePercent(FractionalPercent proto) { + switch (proto.getDenominator()) { + case HUNDRED: + return FaultConfig.FractionalPercent.perHundred(proto.getNumerator()); + case TEN_THOUSAND: + return FaultConfig.FractionalPercent.perTenThousand(proto.getNumerator()); + case MILLION: + return FaultConfig.FractionalPercent.perMillion(proto.getNumerator()); + case UNRECOGNIZED: + default: + throw new IllegalArgumentException("Unknown denominator type: " + proto.getDenominator()); } } + @Override + public ConfigOrError parseFilterConfigOverride(Message rawProtoMessage) { + return parseFilterConfig(rawProtoMessage); + } + @Nullable @Override public ClientInterceptor buildClientInterceptor( diff --git a/xds/src/main/java/io/grpc/xds/Filter.java b/xds/src/main/java/io/grpc/xds/Filter.java index ab61ba2b570..29f8cc4e337 100644 --- a/xds/src/main/java/io/grpc/xds/Filter.java +++ b/xds/src/main/java/io/grpc/xds/Filter.java @@ -25,82 +25,48 @@ import javax.annotation.Nullable; /** - * Defines the parsing functionality of an HTTP filter. - * - *

A Filter may optionally implement either {@link Filter#buildClientInterceptor} or - * {@link Filter#buildServerInterceptor} or both, and return true from corresponding - * {@link Provider#isClientFilter()}, {@link Provider#isServerFilter()} to indicate that the filter - * is capable of working on the client side or server side or both, respectively. + * Defines the parsing functionality of an HTTP filter. A Filter may optionally implement either + * {@link ClientInterceptorBuilder} or {@link ServerInterceptorBuilder} or both, indicating it is + * capable of working on the client side or server side or both, respectively. */ interface Filter { - /** Represents an opaque data structure holding configuration for a filter. */ - interface FilterConfig { - String typeUrl(); - } - /** - * Common interface for filter providers. + * The proto message types supported by this filter. A filter will be registered by each of its + * supported message types. */ - interface Provider { - /** - * The proto message types supported by this filter. A filter will be registered by each of its - * supported message types. - */ - String[] typeUrls(); - - /** - * Whether the filter can be installed on the client side. - * - *

Returns true if the filter implements {@link Filter#buildClientInterceptor}. - */ - default boolean isClientFilter() { - return false; - } + String[] typeUrls(); - /** - * Whether the filter can be installed into xDS-enabled servers. - * - *

Returns true if the filter implements {@link Filter#buildServerInterceptor}. - */ - default boolean isServerFilter() { - return false; - } - - /** - * Creates a new instance of the filter. - * - *

Returns a filter instance registered with the same typeUrls as the provider, - * capable of working with the same FilterConfig type returned by provider's parse functions. - */ - Filter newInstance(); + /** + * Parses the top-level filter config from raw proto message. The message may be either a {@link + * com.google.protobuf.Any} or a {@link com.google.protobuf.Struct}. + */ + ConfigOrError parseFilterConfig(Message rawProtoMessage); - /** - * Parses the top-level filter config from raw proto message. The message may be either a {@link - * com.google.protobuf.Any} or a {@link com.google.protobuf.Struct}. - */ - ConfigOrError parseFilterConfig(Message rawProtoMessage); + /** + * Parses the per-filter override filter config from raw proto message. The message may be either + * a {@link com.google.protobuf.Any} or a {@link com.google.protobuf.Struct}. + */ + ConfigOrError parseFilterConfigOverride(Message rawProtoMessage); - /** - * Parses the per-filter override filter config from raw proto message. The message may be - * either a {@link com.google.protobuf.Any} or a {@link com.google.protobuf.Struct}. - */ - ConfigOrError parseFilterConfigOverride(Message rawProtoMessage); + /** Represents an opaque data structure holding configuration for a filter. */ + interface FilterConfig { + String typeUrl(); } /** Uses the FilterConfigs produced above to produce an HTTP filter interceptor for clients. */ - @Nullable - default ClientInterceptor buildClientInterceptor( - FilterConfig config, @Nullable FilterConfig overrideConfig, - ScheduledExecutorService scheduler) { - return null; + interface ClientInterceptorBuilder { + @Nullable + ClientInterceptor buildClientInterceptor( + FilterConfig config, @Nullable FilterConfig overrideConfig, + ScheduledExecutorService scheduler); } /** Uses the FilterConfigs produced above to produce an HTTP filter interceptor for the server. */ - @Nullable - default ServerInterceptor buildServerInterceptor( - FilterConfig config, @Nullable FilterConfig overrideConfig) { - return null; + interface ServerInterceptorBuilder { + @Nullable + ServerInterceptor buildServerInterceptor( + FilterConfig config, @Nullable FilterConfig overrideConfig); } /** Filter config with instance name. */ diff --git a/xds/src/main/java/io/grpc/xds/FilterRegistry.java b/xds/src/main/java/io/grpc/xds/FilterRegistry.java index 426c6d1b3f6..7f1fe82c6c3 100644 --- a/xds/src/main/java/io/grpc/xds/FilterRegistry.java +++ b/xds/src/main/java/io/grpc/xds/FilterRegistry.java @@ -23,21 +23,21 @@ /** * A registry for all supported {@link Filter}s. Filters can be queried from the registry - * by any of the {@link Filter.Provider#typeUrls() type URLs}. + * by any of the {@link Filter#typeUrls() type URLs}. */ final class FilterRegistry { private static FilterRegistry instance; - private final Map supportedFilters = new HashMap<>(); + private final Map supportedFilters = new HashMap<>(); private FilterRegistry() {} static synchronized FilterRegistry getDefaultRegistry() { if (instance == null) { instance = newRegistry().register( - new FaultFilter.Provider(), - new RouterFilter.Provider(), - new RbacFilter.Provider()); + FaultFilter.INSTANCE, + RouterFilter.INSTANCE, + RbacFilter.INSTANCE); } return instance; } @@ -48,8 +48,8 @@ static FilterRegistry newRegistry() { } @VisibleForTesting - FilterRegistry register(Filter.Provider... filters) { - for (Filter.Provider filter : filters) { + FilterRegistry register(Filter... filters) { + for (Filter filter : filters) { for (String typeUrl : filter.typeUrls()) { supportedFilters.put(typeUrl, filter); } @@ -58,7 +58,7 @@ FilterRegistry register(Filter.Provider... filters) { } @Nullable - Filter.Provider get(String typeUrl) { + Filter get(String typeUrl) { return supportedFilters.get(typeUrl); } } diff --git a/xds/src/main/java/io/grpc/xds/GcpAuthenticationFilter.java b/xds/src/main/java/io/grpc/xds/GcpAuthenticationFilter.java index 7ed617c9843..f73494d74db 100644 --- a/xds/src/main/java/io/grpc/xds/GcpAuthenticationFilter.java +++ b/xds/src/main/java/io/grpc/xds/GcpAuthenticationFilter.java @@ -35,6 +35,7 @@ import io.grpc.MethodDescriptor; import io.grpc.Status; import io.grpc.auth.MoreCallCredentials; +import io.grpc.xds.Filter.ClientInterceptorBuilder; import io.grpc.xds.MetadataRegistry.MetadataValueParser; import java.util.LinkedHashMap; import java.util.Map; @@ -46,63 +47,50 @@ * A {@link Filter} that injects a {@link CallCredentials} to handle * authentication for xDS credentials. */ -final class GcpAuthenticationFilter implements Filter { +final class GcpAuthenticationFilter implements Filter, ClientInterceptorBuilder { static final String TYPE_URL = "type.googleapis.com/envoy.extensions.filters.http.gcp_authn.v3.GcpAuthnFilterConfig"; - static final class Provider implements Filter.Provider { - @Override - public String[] typeUrls() { - return new String[]{TYPE_URL}; - } + @Override + public String[] typeUrls() { + return new String[] { TYPE_URL }; + } - @Override - public boolean isClientFilter() { - return true; + @Override + public ConfigOrError parseFilterConfig(Message rawProtoMessage) { + GcpAuthnFilterConfig gcpAuthnProto; + if (!(rawProtoMessage instanceof Any)) { + return ConfigOrError.fromError("Invalid config type: " + rawProtoMessage.getClass()); } + Any anyMessage = (Any) rawProtoMessage; - @Override - public GcpAuthenticationFilter newInstance() { - return new GcpAuthenticationFilter(); + try { + gcpAuthnProto = anyMessage.unpack(GcpAuthnFilterConfig.class); + } catch (InvalidProtocolBufferException e) { + return ConfigOrError.fromError("Invalid proto: " + e); } - @Override - public ConfigOrError parseFilterConfig(Message rawProtoMessage) { - GcpAuthnFilterConfig gcpAuthnProto; - if (!(rawProtoMessage instanceof Any)) { - return ConfigOrError.fromError("Invalid config type: " + rawProtoMessage.getClass()); - } - Any anyMessage = (Any) rawProtoMessage; - - try { - gcpAuthnProto = anyMessage.unpack(GcpAuthnFilterConfig.class); - } catch (InvalidProtocolBufferException e) { - return ConfigOrError.fromError("Invalid proto: " + e); + long cacheSize = 10; + // Validate cache_config + if (gcpAuthnProto.hasCacheConfig()) { + TokenCacheConfig cacheConfig = gcpAuthnProto.getCacheConfig(); + cacheSize = cacheConfig.getCacheSize().getValue(); + if (cacheSize == 0) { + return ConfigOrError.fromError( + "cache_config.cache_size must be greater than zero"); } - - long cacheSize = 10; - // Validate cache_config - if (gcpAuthnProto.hasCacheConfig()) { - TokenCacheConfig cacheConfig = gcpAuthnProto.getCacheConfig(); - cacheSize = cacheConfig.getCacheSize().getValue(); - if (cacheSize == 0) { - return ConfigOrError.fromError( - "cache_config.cache_size must be greater than zero"); - } - // LruCache's size is an int and briefly exceeds its maximum size before evicting entries - cacheSize = UnsignedLongs.min(cacheSize, Integer.MAX_VALUE - 1); - } - - GcpAuthenticationConfig config = new GcpAuthenticationConfig((int) cacheSize); - return ConfigOrError.fromConfig(config); + // LruCache's size is an int and briefly exceeds its maximum size before evicting entries + cacheSize = UnsignedLongs.min(cacheSize, Integer.MAX_VALUE - 1); } - @Override - public ConfigOrError parseFilterConfigOverride( - Message rawProtoMessage) { - return parseFilterConfig(rawProtoMessage); - } + GcpAuthenticationConfig config = new GcpAuthenticationConfig((int) cacheSize); + return ConfigOrError.fromConfig(config); + } + + @Override + public ConfigOrError parseFilterConfigOverride(Message rawProtoMessage) { + return parseFilterConfig(rawProtoMessage); } @Nullable diff --git a/xds/src/main/java/io/grpc/xds/InternalRbacFilter.java b/xds/src/main/java/io/grpc/xds/InternalRbacFilter.java index cedb3f4c85b..54e6c748cd5 100644 --- a/xds/src/main/java/io/grpc/xds/InternalRbacFilter.java +++ b/xds/src/main/java/io/grpc/xds/InternalRbacFilter.java @@ -19,6 +19,8 @@ import io.envoyproxy.envoy.extensions.filters.http.rbac.v3.RBAC; import io.grpc.Internal; import io.grpc.ServerInterceptor; +import io.grpc.xds.RbacConfig; +import io.grpc.xds.RbacFilter; /** This class exposes some functionality in RbacFilter to other packages. */ @Internal @@ -28,12 +30,11 @@ private InternalRbacFilter() {} /** Parses RBAC filter config and creates AuthorizationServerInterceptor. */ public static ServerInterceptor createInterceptor(RBAC rbac) { - ConfigOrError filterConfig = RbacFilter.Provider.parseRbacConfig(rbac); + ConfigOrError filterConfig = RbacFilter.parseRbacConfig(rbac); if (filterConfig.errorDetail != null) { throw new IllegalArgumentException( String.format("Failed to parse Rbac policy: %s", filterConfig.errorDetail)); } - return new RbacFilter.Provider().newInstance() - .buildServerInterceptor(filterConfig.config, null); + return new RbacFilter().buildServerInterceptor(filterConfig.config, null); } } diff --git a/xds/src/main/java/io/grpc/xds/RbacFilter.java b/xds/src/main/java/io/grpc/xds/RbacFilter.java index 2bc4eeb846b..6a55f7f193e 100644 --- a/xds/src/main/java/io/grpc/xds/RbacFilter.java +++ b/xds/src/main/java/io/grpc/xds/RbacFilter.java @@ -18,6 +18,7 @@ import static com.google.common.base.Preconditions.checkNotNull; +import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.Any; import com.google.protobuf.InvalidProtocolBufferException; import com.google.protobuf.Message; @@ -33,6 +34,7 @@ import io.grpc.ServerCallHandler; import io.grpc.ServerInterceptor; import io.grpc.Status; +import io.grpc.xds.Filter.ServerInterceptorBuilder; import io.grpc.xds.internal.MatcherParser; import io.grpc.xds.internal.Matchers; import io.grpc.xds.internal.rbac.engine.GrpcAuthorizationEngine; @@ -64,10 +66,10 @@ import javax.annotation.Nullable; /** RBAC Http filter implementation. */ -final class RbacFilter implements Filter { +final class RbacFilter implements Filter, ServerInterceptorBuilder { private static final Logger logger = Logger.getLogger(RbacFilter.class.getName()); - private static final RbacFilter INSTANCE = new RbacFilter(); + static final RbacFilter INSTANCE = new RbacFilter(); static final String TYPE_URL = "type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC"; @@ -75,99 +77,87 @@ final class RbacFilter implements Filter { private static final String TYPE_URL_OVERRIDE_CONFIG = "type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBACPerRoute"; - private RbacFilter() {} + RbacFilter() {} - static final class Provider implements Filter.Provider { - @Override - public String[] typeUrls() { - return new String[] {TYPE_URL, TYPE_URL_OVERRIDE_CONFIG}; - } + @Override + public String[] typeUrls() { + return new String[] { TYPE_URL, TYPE_URL_OVERRIDE_CONFIG }; + } - @Override - public boolean isServerFilter() { - return true; + @Override + public ConfigOrError parseFilterConfig(Message rawProtoMessage) { + RBAC rbacProto; + if (!(rawProtoMessage instanceof Any)) { + return ConfigOrError.fromError("Invalid config type: " + rawProtoMessage.getClass()); } - - @Override - public RbacFilter newInstance() { - return INSTANCE; + Any anyMessage = (Any) rawProtoMessage; + try { + rbacProto = anyMessage.unpack(RBAC.class); + } catch (InvalidProtocolBufferException e) { + return ConfigOrError.fromError("Invalid proto: " + e); } + return parseRbacConfig(rbacProto); + } - @Override - public ConfigOrError parseFilterConfig(Message rawProtoMessage) { - RBAC rbacProto; - if (!(rawProtoMessage instanceof Any)) { - return ConfigOrError.fromError("Invalid config type: " + rawProtoMessage.getClass()); - } - Any anyMessage = (Any) rawProtoMessage; - try { - rbacProto = anyMessage.unpack(RBAC.class); - } catch (InvalidProtocolBufferException e) { - return ConfigOrError.fromError("Invalid proto: " + e); - } - return parseRbacConfig(rbacProto); + @VisibleForTesting + static ConfigOrError parseRbacConfig(RBAC rbac) { + if (!rbac.hasRules()) { + return ConfigOrError.fromConfig(RbacConfig.create(null)); } - - @Override - public ConfigOrError parseFilterConfigOverride(Message rawProtoMessage) { - RBACPerRoute rbacPerRoute; - if (!(rawProtoMessage instanceof Any)) { - return ConfigOrError.fromError("Invalid config type: " + rawProtoMessage.getClass()); - } - Any anyMessage = (Any) rawProtoMessage; - try { - rbacPerRoute = anyMessage.unpack(RBACPerRoute.class); - } catch (InvalidProtocolBufferException e) { - return ConfigOrError.fromError("Invalid proto: " + e); - } - if (rbacPerRoute.hasRbac()) { - return parseRbacConfig(rbacPerRoute.getRbac()); - } else { + io.envoyproxy.envoy.config.rbac.v3.RBAC rbacConfig = rbac.getRules(); + GrpcAuthorizationEngine.Action authAction; + switch (rbacConfig.getAction()) { + case ALLOW: + authAction = GrpcAuthorizationEngine.Action.ALLOW; + break; + case DENY: + authAction = GrpcAuthorizationEngine.Action.DENY; + break; + case LOG: return ConfigOrError.fromConfig(RbacConfig.create(null)); - } + case UNRECOGNIZED: + default: + return ConfigOrError.fromError("Unknown rbacConfig action type: " + rbacConfig.getAction()); } - - static ConfigOrError parseRbacConfig(RBAC rbac) { - if (!rbac.hasRules()) { - return ConfigOrError.fromConfig(RbacConfig.create(null)); - } - io.envoyproxy.envoy.config.rbac.v3.RBAC rbacConfig = rbac.getRules(); - GrpcAuthorizationEngine.Action authAction; - switch (rbacConfig.getAction()) { - case ALLOW: - authAction = GrpcAuthorizationEngine.Action.ALLOW; - break; - case DENY: - authAction = GrpcAuthorizationEngine.Action.DENY; - break; - case LOG: - return ConfigOrError.fromConfig(RbacConfig.create(null)); - case UNRECOGNIZED: - default: + List policyMatchers = new ArrayList<>(); + List> sortedPolicyEntries = rbacConfig.getPoliciesMap().entrySet() + .stream() + .sorted((a,b) -> a.getKey().compareTo(b.getKey())) + .collect(Collectors.toList()); + for (Map.Entry entry: sortedPolicyEntries) { + try { + Policy policy = entry.getValue(); + if (policy.hasCondition() || policy.hasCheckedCondition()) { return ConfigOrError.fromError( - "Unknown rbacConfig action type: " + rbacConfig.getAction()); - } - List policyMatchers = new ArrayList<>(); - List> sortedPolicyEntries = rbacConfig.getPoliciesMap().entrySet() - .stream() - .sorted((a,b) -> a.getKey().compareTo(b.getKey())) - .collect(Collectors.toList()); - for (Map.Entry entry: sortedPolicyEntries) { - try { - Policy policy = entry.getValue(); - if (policy.hasCondition() || policy.hasCheckedCondition()) { - return ConfigOrError.fromError( - "Policy.condition and Policy.checked_condition must not set: " + entry.getKey()); - } - policyMatchers.add(PolicyMatcher.create(entry.getKey(), - parsePermissionList(policy.getPermissionsList()), - parsePrincipalList(policy.getPrincipalsList()))); - } catch (Exception e) { - return ConfigOrError.fromError("Encountered error parsing policy: " + e); + "Policy.condition and Policy.checked_condition must not set: " + entry.getKey()); } + policyMatchers.add(PolicyMatcher.create(entry.getKey(), + parsePermissionList(policy.getPermissionsList()), + parsePrincipalList(policy.getPrincipalsList()))); + } catch (Exception e) { + return ConfigOrError.fromError("Encountered error parsing policy: " + e); } - return ConfigOrError.fromConfig(RbacConfig.create( - AuthConfig.create(policyMatchers, authAction))); + } + return ConfigOrError.fromConfig(RbacConfig.create( + AuthConfig.create(policyMatchers, authAction))); + } + + @Override + public ConfigOrError parseFilterConfigOverride(Message rawProtoMessage) { + RBACPerRoute rbacPerRoute; + if (!(rawProtoMessage instanceof Any)) { + return ConfigOrError.fromError("Invalid config type: " + rawProtoMessage.getClass()); + } + Any anyMessage = (Any) rawProtoMessage; + try { + rbacPerRoute = anyMessage.unpack(RBACPerRoute.class); + } catch (InvalidProtocolBufferException e) { + return ConfigOrError.fromError("Invalid proto: " + e); + } + if (rbacPerRoute.hasRbac()) { + return parseRbacConfig(rbacPerRoute.getRbac()); + } else { + return ConfigOrError.fromConfig(RbacConfig.create(null)); } } diff --git a/xds/src/main/java/io/grpc/xds/RouterFilter.java b/xds/src/main/java/io/grpc/xds/RouterFilter.java index 939bd0b12ab..8038c1b98ae 100644 --- a/xds/src/main/java/io/grpc/xds/RouterFilter.java +++ b/xds/src/main/java/io/grpc/xds/RouterFilter.java @@ -17,12 +17,18 @@ package io.grpc.xds; import com.google.protobuf.Message; +import io.grpc.ClientInterceptor; +import io.grpc.ServerInterceptor; +import io.grpc.xds.Filter.ClientInterceptorBuilder; +import io.grpc.xds.Filter.ServerInterceptorBuilder; +import java.util.concurrent.ScheduledExecutorService; +import javax.annotation.Nullable; /** * Router filter implementation. Currently this filter does not parse any field in the config. */ -final class RouterFilter implements Filter { - private static final RouterFilter INSTANCE = new RouterFilter(); +enum RouterFilter implements Filter, ClientInterceptorBuilder, ServerInterceptorBuilder { + INSTANCE; static final String TYPE_URL = "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router"; @@ -30,7 +36,7 @@ final class RouterFilter implements Filter { static final FilterConfig ROUTER_CONFIG = new FilterConfig() { @Override public String typeUrl() { - return TYPE_URL; + return RouterFilter.TYPE_URL; } @Override @@ -39,38 +45,33 @@ public String toString() { } }; - static final class Provider implements Filter.Provider { - @Override - public String[] typeUrls() { - return new String[]{TYPE_URL}; - } - - @Override - public boolean isClientFilter() { - return true; - } - - @Override - public boolean isServerFilter() { - return true; - } + @Override + public String[] typeUrls() { + return new String[] { TYPE_URL }; + } - @Override - public RouterFilter newInstance() { - return INSTANCE; - } + @Override + public ConfigOrError parseFilterConfig(Message rawProtoMessage) { + return ConfigOrError.fromConfig(ROUTER_CONFIG); + } - @Override - public ConfigOrError parseFilterConfig(Message rawProtoMessage) { - return ConfigOrError.fromConfig(ROUTER_CONFIG); - } + @Override + public ConfigOrError parseFilterConfigOverride(Message rawProtoMessage) { + return ConfigOrError.fromError("Router Filter should not have override config"); + } - @Override - public ConfigOrError parseFilterConfigOverride( - Message rawProtoMessage) { - return ConfigOrError.fromError("Router Filter should not have override config"); - } + @Nullable + @Override + public ClientInterceptor buildClientInterceptor( + FilterConfig config, @Nullable FilterConfig overrideConfig, + ScheduledExecutorService scheduler) { + return null; } - private RouterFilter() {} + @Nullable + @Override + public ServerInterceptor buildServerInterceptor( + FilterConfig config, @Nullable Filter.FilterConfig overrideConfig) { + return null; + } } diff --git a/xds/src/main/java/io/grpc/xds/XdsListenerResource.java b/xds/src/main/java/io/grpc/xds/XdsListenerResource.java index 4b554be1743..a18b093e38f 100644 --- a/xds/src/main/java/io/grpc/xds/XdsListenerResource.java +++ b/xds/src/main/java/io/grpc/xds/XdsListenerResource.java @@ -575,8 +575,12 @@ static StructOrError parseHttpFilter( String filterName = httpFilter.getName(); boolean isOptional = httpFilter.getIsOptional(); if (!httpFilter.hasTypedConfig()) { - return isOptional ? null : StructOrError.fromError( - "HttpFilter [" + filterName + "] is not optional and has no typed config"); + if (isOptional) { + return null; + } else { + return StructOrError.fromError( + "HttpFilter [" + filterName + "] is not optional and has no typed config"); + } } Message rawConfig = httpFilter.getTypedConfig(); String typeUrl = httpFilter.getTypedConfig().getTypeUrl(); @@ -596,17 +600,18 @@ static StructOrError parseHttpFilter( return StructOrError.fromError( "HttpFilter [" + filterName + "] contains invalid proto: " + e); } - - Filter.Provider provider = filterRegistry.get(typeUrl); - if (provider == null - || (isForClient && !provider.isClientFilter()) - || (!isForClient && !provider.isServerFilter())) { - // Filter type not supported. - return isOptional ? null : StructOrError.fromError( - "HttpFilter [" + filterName + "](" + typeUrl + ") is required but unsupported for " + ( - isForClient ? "client" : "server")); + Filter filter = filterRegistry.get(typeUrl); + if ((isForClient && !(filter instanceof Filter.ClientInterceptorBuilder)) + || (!isForClient && !(filter instanceof Filter.ServerInterceptorBuilder))) { + if (isOptional) { + return null; + } else { + return StructOrError.fromError( + "HttpFilter [" + filterName + "](" + typeUrl + ") is required but unsupported for " + + (isForClient ? "client" : "server")); + } } - ConfigOrError filterConfig = provider.parseFilterConfig(rawConfig); + ConfigOrError filterConfig = filter.parseFilterConfig(rawConfig); if (filterConfig.errorDetail != null) { return StructOrError.fromError( "Invalid filter config for HttpFilter [" + filterName + "]: " + filterConfig.errorDetail); diff --git a/xds/src/main/java/io/grpc/xds/XdsNameResolver.java b/xds/src/main/java/io/grpc/xds/XdsNameResolver.java index b7b1ed0bdba..21f5d5efce6 100644 --- a/xds/src/main/java/io/grpc/xds/XdsNameResolver.java +++ b/xds/src/main/java/io/grpc/xds/XdsNameResolver.java @@ -49,6 +49,7 @@ import io.grpc.internal.GrpcUtil; import io.grpc.internal.ObjectPool; import io.grpc.xds.ClusterSpecifierPlugin.PluginConfig; +import io.grpc.xds.Filter.ClientInterceptorBuilder; import io.grpc.xds.Filter.FilterConfig; import io.grpc.xds.Filter.NamedFilterConfig; import io.grpc.xds.RouteLookupServiceClusterSpecifierPlugin.RlsPluginConfig; @@ -826,36 +827,26 @@ private ClientInterceptor createFilters( if (filterConfigs == null) { return new PassthroughClientInterceptor(); } - Map selectedOverrideConfigs = new HashMap<>(virtualHost.filterConfigOverrides()); selectedOverrideConfigs.putAll(route.filterConfigOverrides()); if (weightedCluster != null) { selectedOverrideConfigs.putAll(weightedCluster.filterConfigOverrides()); } - ImmutableList.Builder filterInterceptors = ImmutableList.builder(); for (NamedFilterConfig namedFilter : filterConfigs) { - FilterConfig config = namedFilter.filterConfig; - String name = namedFilter.name; - String typeUrl = config.typeUrl(); - - Filter.Provider provider = filterRegistry.get(typeUrl); - if (provider == null || !provider.isClientFilter()) { - continue; - } - - Filter filter = provider.newInstance(); - - ClientInterceptor interceptor = - filter.buildClientInterceptor(config, selectedOverrideConfigs.get(name), scheduler); - if (interceptor != null) { - filterInterceptors.add(interceptor); + FilterConfig filterConfig = namedFilter.filterConfig; + Filter filter = filterRegistry.get(filterConfig.typeUrl()); + if (filter instanceof ClientInterceptorBuilder) { + ClientInterceptor interceptor = ((ClientInterceptorBuilder) filter) + .buildClientInterceptor( + filterConfig, selectedOverrideConfigs.get(namedFilter.name), + scheduler); + if (interceptor != null) { + filterInterceptors.add(interceptor); + } } } - - // Combine interceptors produced by different filters into a single one that executes - // them sequentially. The order is preserved. return combineInterceptors(filterInterceptors.build()); } diff --git a/xds/src/main/java/io/grpc/xds/XdsRouteConfigureResource.java b/xds/src/main/java/io/grpc/xds/XdsRouteConfigureResource.java index 80a77cbb1d4..c5ca8d45cb3 100644 --- a/xds/src/main/java/io/grpc/xds/XdsRouteConfigureResource.java +++ b/xds/src/main/java/io/grpc/xds/XdsRouteConfigureResource.java @@ -245,8 +245,8 @@ static StructOrError> parseOverrideFilterConfigs( return StructOrError.fromError( "FilterConfig [" + name + "] contains invalid proto: " + e); } - Filter.Provider provider = filterRegistry.get(typeUrl); - if (provider == null) { + Filter filter = filterRegistry.get(typeUrl); + if (filter == null) { if (isOptional) { continue; } @@ -254,7 +254,7 @@ static StructOrError> parseOverrideFilterConfigs( "HttpFilter [" + name + "](" + typeUrl + ") is required but unsupported"); } ConfigOrError filterConfig = - provider.parseFilterConfigOverride(rawConfig); + filter.parseFilterConfigOverride(rawConfig); if (filterConfig.errorDetail != null) { return StructOrError.fromError( "Invalid filter config for HttpFilter [" + name + "]: " + filterConfig.errorDetail); diff --git a/xds/src/main/java/io/grpc/xds/XdsServerWrapper.java b/xds/src/main/java/io/grpc/xds/XdsServerWrapper.java index bbb17d9b616..3a9b98ee321 100644 --- a/xds/src/main/java/io/grpc/xds/XdsServerWrapper.java +++ b/xds/src/main/java/io/grpc/xds/XdsServerWrapper.java @@ -47,6 +47,7 @@ import io.grpc.xds.EnvoyServerProtoData.FilterChain; import io.grpc.xds.Filter.FilterConfig; import io.grpc.xds.Filter.NamedFilterConfig; +import io.grpc.xds.Filter.ServerInterceptorBuilder; import io.grpc.xds.FilterChainMatchingProtocolNegotiators.FilterChainMatchingHandler.FilterChainSelector; import io.grpc.xds.ThreadSafeRandom.ThreadSafeRandomImpl; import io.grpc.xds.VirtualHost.Route; @@ -523,56 +524,37 @@ private AtomicReference generateRoutingConfig(FilterChain f } private ImmutableMap generatePerRouteInterceptors( - @Nullable List filterConfigs, List virtualHosts) { - // This should always be called from the sync context. - // Ideally we'd want to throw otherwise, but this breaks the tests now. - // syncContext.throwIfNotInThisSynchronizationContext(); - + List namedFilterConfigs, List virtualHosts) { ImmutableMap.Builder perRouteInterceptors = new ImmutableMap.Builder<>(); - for (VirtualHost virtualHost : virtualHosts) { for (Route route : virtualHost.routes()) { - // Short circuit. - if (filterConfigs == null) { - perRouteInterceptors.put(route, noopInterceptor); - continue; - } - - // Override vhost filter configs with more specific per-route configs. - Map perRouteOverrides = ImmutableMap.builder() - .putAll(virtualHost.filterConfigOverrides()) - .putAll(route.filterConfigOverrides()) - .buildKeepingLast(); - - // Interceptors for this vhost/route combo. - List interceptors = new ArrayList<>(filterConfigs.size()); - - for (NamedFilterConfig namedFilter : filterConfigs) { - FilterConfig config = namedFilter.filterConfig; - String name = namedFilter.name; - String typeUrl = config.typeUrl(); - - Filter.Provider provider = filterRegistry.get(typeUrl); - if (provider == null || !provider.isServerFilter()) { - logger.warning("HttpFilter[" + name + "]: not supported on server-side: " + typeUrl); - continue; - } - - Filter filter = provider.newInstance(); - ServerInterceptor interceptor = - filter.buildServerInterceptor(config, perRouteOverrides.get(name)); - if (interceptor != null) { - interceptors.add(interceptor); + List filterInterceptors = new ArrayList<>(); + Map selectedOverrideConfigs = + new HashMap<>(virtualHost.filterConfigOverrides()); + selectedOverrideConfigs.putAll(route.filterConfigOverrides()); + if (namedFilterConfigs != null) { + for (NamedFilterConfig namedFilterConfig : namedFilterConfigs) { + FilterConfig filterConfig = namedFilterConfig.filterConfig; + Filter filter = filterRegistry.get(filterConfig.typeUrl()); + if (filter instanceof ServerInterceptorBuilder) { + ServerInterceptor interceptor = + ((ServerInterceptorBuilder) filter).buildServerInterceptor( + filterConfig, selectedOverrideConfigs.get(namedFilterConfig.name)); + if (interceptor != null) { + filterInterceptors.add(interceptor); + } + } else { + logger.log(Level.WARNING, "HttpFilterConfig(type URL: " + + filterConfig.typeUrl() + ") is not supported on server-side. " + + "Probably a bug at ClientXdsClient verification."); + } } } - - // Combine interceptors produced by different filters into a single one that executes - // them sequentially. The order is preserved. - perRouteInterceptors.put(route, combineInterceptors(interceptors)); + ServerInterceptor interceptor = combineInterceptors(filterInterceptors); + perRouteInterceptors.put(route, interceptor); } } - return perRouteInterceptors.buildOrThrow(); } diff --git a/xds/src/test/java/io/grpc/xds/FaultFilterTest.java b/xds/src/test/java/io/grpc/xds/FaultFilterTest.java index 8f0a33951b0..f85f29ec0a3 100644 --- a/xds/src/test/java/io/grpc/xds/FaultFilterTest.java +++ b/xds/src/test/java/io/grpc/xds/FaultFilterTest.java @@ -33,23 +33,16 @@ /** Tests for {@link FaultFilter}. */ @RunWith(JUnit4.class) public class FaultFilterTest { - private static final FaultFilter.Provider FILTER_PROVIDER = new FaultFilter.Provider(); - - @Test - public void filterType_clientOnly() { - assertThat(FILTER_PROVIDER.isClientFilter()).isTrue(); - assertThat(FILTER_PROVIDER.isServerFilter()).isFalse(); - } @Test public void parseFaultAbort_convertHttpStatus() { Any rawConfig = Any.pack( HTTPFault.newBuilder().setAbort(FaultAbort.newBuilder().setHttpStatus(404)).build()); - FaultConfig faultConfig = FILTER_PROVIDER.parseFilterConfig(rawConfig).config; + FaultConfig faultConfig = FaultFilter.INSTANCE.parseFilterConfig(rawConfig).config; assertThat(faultConfig.faultAbort().status().getCode()) .isEqualTo(GrpcUtil.httpStatusToGrpcStatus(404).getCode()); - - FaultConfig faultConfigOverride = FILTER_PROVIDER.parseFilterConfigOverride(rawConfig).config; + FaultConfig faultConfigOverride = + FaultFilter.INSTANCE.parseFilterConfigOverride(rawConfig).config; assertThat(faultConfigOverride.faultAbort().status().getCode()) .isEqualTo(GrpcUtil.httpStatusToGrpcStatus(404).getCode()); } @@ -61,7 +54,7 @@ public void parseFaultAbort_withHeaderAbort() { .setPercentage(FractionalPercent.newBuilder() .setNumerator(20).setDenominator(DenominatorType.HUNDRED)) .setHeaderAbort(HeaderAbort.getDefaultInstance()).build(); - FaultConfig.FaultAbort faultAbort = FaultFilter.Provider.parseFaultAbort(proto).config; + FaultConfig.FaultAbort faultAbort = FaultFilter.parseFaultAbort(proto).config; assertThat(faultAbort.headerAbort()).isTrue(); assertThat(faultAbort.percent().numerator()).isEqualTo(20); assertThat(faultAbort.percent().denominatorType()) @@ -75,7 +68,7 @@ public void parseFaultAbort_withHttpStatus() { .setPercentage(FractionalPercent.newBuilder() .setNumerator(100).setDenominator(DenominatorType.TEN_THOUSAND)) .setHttpStatus(400).build(); - FaultConfig.FaultAbort res = FaultFilter.Provider.parseFaultAbort(proto).config; + FaultConfig.FaultAbort res = FaultFilter.parseFaultAbort(proto).config; assertThat(res.percent().numerator()).isEqualTo(100); assertThat(res.percent().denominatorType()) .isEqualTo(FaultConfig.FractionalPercent.DenominatorType.TEN_THOUSAND); @@ -89,7 +82,7 @@ public void parseFaultAbort_withGrpcStatus() { .setPercentage(FractionalPercent.newBuilder() .setNumerator(600).setDenominator(DenominatorType.MILLION)) .setGrpcStatus(Code.DEADLINE_EXCEEDED.value()).build(); - FaultConfig.FaultAbort faultAbort = FaultFilter.Provider.parseFaultAbort(proto).config; + FaultConfig.FaultAbort faultAbort = FaultFilter.parseFaultAbort(proto).config; assertThat(faultAbort.percent().numerator()).isEqualTo(600); assertThat(faultAbort.percent().denominatorType()) .isEqualTo(FaultConfig.FractionalPercent.DenominatorType.MILLION); diff --git a/xds/src/test/java/io/grpc/xds/GcpAuthenticationFilterTest.java b/xds/src/test/java/io/grpc/xds/GcpAuthenticationFilterTest.java index 52efaf9bd7b..3ca240ab7c7 100644 --- a/xds/src/test/java/io/grpc/xds/GcpAuthenticationFilterTest.java +++ b/xds/src/test/java/io/grpc/xds/GcpAuthenticationFilterTest.java @@ -35,7 +35,6 @@ import io.grpc.ClientInterceptor; import io.grpc.MethodDescriptor; import io.grpc.testing.TestMethodDescriptors; -import io.grpc.xds.GcpAuthenticationFilter.GcpAuthenticationConfig; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; @@ -44,14 +43,6 @@ @RunWith(JUnit4.class) public class GcpAuthenticationFilterTest { - private static final GcpAuthenticationFilter.Provider FILTER_PROVIDER = - new GcpAuthenticationFilter.Provider(); - - @Test - public void filterType_clientOnly() { - assertThat(FILTER_PROVIDER.isClientFilter()).isTrue(); - assertThat(FILTER_PROVIDER.isServerFilter()).isFalse(); - } @Test public void testParseFilterConfig_withValidConfig() { @@ -60,11 +51,13 @@ public void testParseFilterConfig_withValidConfig() { .build(); Any anyMessage = Any.pack(config); - ConfigOrError result = FILTER_PROVIDER.parseFilterConfig(anyMessage); + GcpAuthenticationFilter filter = new GcpAuthenticationFilter(); + ConfigOrError result = filter.parseFilterConfig(anyMessage); assertNotNull(result.config); assertNull(result.errorDetail); - assertEquals(20L, result.config.getCacheSize()); + assertEquals(20L, + ((GcpAuthenticationFilter.GcpAuthenticationConfig) result.config).getCacheSize()); } @Test @@ -74,7 +67,8 @@ public void testParseFilterConfig_withZeroCacheSize() { .build(); Any anyMessage = Any.pack(config); - ConfigOrError result = FILTER_PROVIDER.parseFilterConfig(anyMessage); + GcpAuthenticationFilter filter = new GcpAuthenticationFilter(); + ConfigOrError result = filter.parseFilterConfig(anyMessage); assertNull(result.config); assertNotNull(result.errorDetail); @@ -83,9 +77,9 @@ public void testParseFilterConfig_withZeroCacheSize() { @Test public void testParseFilterConfig_withInvalidMessageType() { + GcpAuthenticationFilter filter = new GcpAuthenticationFilter(); Message invalidMessage = Empty.getDefaultInstance(); - ConfigOrError result = - FILTER_PROVIDER.parseFilterConfig(invalidMessage); + ConfigOrError result = filter.parseFilterConfig(invalidMessage); assertNull(result.config); assertThat(result.errorDetail).contains("Invalid config type"); @@ -93,7 +87,8 @@ public void testParseFilterConfig_withInvalidMessageType() { @Test public void testClientInterceptor_createsAndReusesCachedCredentials() { - GcpAuthenticationConfig config = new GcpAuthenticationConfig(10); + GcpAuthenticationFilter.GcpAuthenticationConfig config = + new GcpAuthenticationFilter.GcpAuthenticationConfig(10); GcpAuthenticationFilter filter = new GcpAuthenticationFilter(); // Create interceptor diff --git a/xds/src/test/java/io/grpc/xds/GrpcXdsClientImplDataTest.java b/xds/src/test/java/io/grpc/xds/GrpcXdsClientImplDataTest.java index 610d147ccf9..314b2094480 100644 --- a/xds/src/test/java/io/grpc/xds/GrpcXdsClientImplDataTest.java +++ b/xds/src/test/java/io/grpc/xds/GrpcXdsClientImplDataTest.java @@ -110,6 +110,7 @@ import io.envoyproxy.envoy.type.v3.FractionalPercent; import io.envoyproxy.envoy.type.v3.FractionalPercent.DenominatorType; import io.envoyproxy.envoy.type.v3.Int64Range; +import io.grpc.ClientInterceptor; import io.grpc.EquivalentAddressGroup; import io.grpc.InsecureChannelCredentials; import io.grpc.LoadBalancerRegistry; @@ -149,7 +150,9 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; +import javax.annotation.Nullable; import org.junit.After; import org.junit.Before; import org.junit.Rule; @@ -162,10 +165,6 @@ @RunWith(JUnit4.class) public class GrpcXdsClientImplDataTest { - private static final FaultFilter.Provider FAULT_FILTER_PROVIDER = new FaultFilter.Provider(); - private static final RbacFilter.Provider RBAC_FILTER_PROVIDER = new RbacFilter.Provider(); - private static final RouterFilter.Provider ROUTER_FILTER_PROVIDER = new RouterFilter.Provider(); - private static final ServerInfo LRS_SERVER_INFO = ServerInfo.create("lrs.googleapis.com", InsecureChannelCredentials.create()); private static final String GRPC_EXPERIMENTAL_XDS_AUTHORITY_REWRITE = @@ -1244,39 +1243,36 @@ public String typeUrl() { } } - private static class TestFilter implements io.grpc.xds.Filter { - - static final class Provider implements io.grpc.xds.Filter.Provider { - @Override - public String[] typeUrls() { - return new String[]{"test-url"}; - } - - @Override - public boolean isClientFilter() { - return true; - } + private static class TestFilter implements io.grpc.xds.Filter, + io.grpc.xds.Filter.ClientInterceptorBuilder { + @Override + public String[] typeUrls() { + return new String[]{"test-url"}; + } - @Override - public TestFilter newInstance() { - return new TestFilter(); - } + @Override + public ConfigOrError parseFilterConfig(Message rawProtoMessage) { + return ConfigOrError.fromConfig(new SimpleFilterConfig(rawProtoMessage)); + } - @Override - public ConfigOrError parseFilterConfig(Message rawProtoMessage) { - return ConfigOrError.fromConfig(new SimpleFilterConfig(rawProtoMessage)); - } + @Override + public ConfigOrError parseFilterConfigOverride( + Message rawProtoMessage) { + return ConfigOrError.fromConfig(new SimpleFilterConfig(rawProtoMessage)); + } - @Override - public ConfigOrError parseFilterConfigOverride(Message rawProtoMessage) { - return ConfigOrError.fromConfig(new SimpleFilterConfig(rawProtoMessage)); - } + @Nullable + @Override + public ClientInterceptor buildClientInterceptor(FilterConfig config, + @Nullable FilterConfig overrideConfig, + ScheduledExecutorService scheduler) { + return null; } } @Test public void parseHttpFilter_typedStructMigration() { - filterRegistry.register(new TestFilter.Provider()); + filterRegistry.register(new TestFilter()); Struct rawStruct = Struct.newBuilder() .putFields("name", Value.newBuilder().setStringValue("default").build()) .build(); @@ -1305,7 +1301,7 @@ public void parseHttpFilter_typedStructMigration() { @Test public void parseOverrideHttpFilter_typedStructMigration() { - filterRegistry.register(new TestFilter.Provider()); + filterRegistry.register(new TestFilter()); Struct rawStruct0 = Struct.newBuilder() .putFields("name", Value.newBuilder().setStringValue("default0").build()) .build(); @@ -1346,7 +1342,7 @@ public void parseHttpFilter_unsupportedAndRequired() { @Test public void parseHttpFilter_routerFilterForClient() { - filterRegistry.register(ROUTER_FILTER_PROVIDER); + filterRegistry.register(RouterFilter.INSTANCE); HttpFilter httpFilter = HttpFilter.newBuilder() .setIsOptional(false) @@ -1360,7 +1356,7 @@ public void parseHttpFilter_routerFilterForClient() { @Test public void parseHttpFilter_routerFilterForServer() { - filterRegistry.register(ROUTER_FILTER_PROVIDER); + filterRegistry.register(RouterFilter.INSTANCE); HttpFilter httpFilter = HttpFilter.newBuilder() .setIsOptional(false) @@ -1374,7 +1370,7 @@ public void parseHttpFilter_routerFilterForServer() { @Test public void parseHttpFilter_faultConfigForClient() { - filterRegistry.register(FAULT_FILTER_PROVIDER); + filterRegistry.register(FaultFilter.INSTANCE); HttpFilter httpFilter = HttpFilter.newBuilder() .setIsOptional(false) @@ -1401,7 +1397,7 @@ public void parseHttpFilter_faultConfigForClient() { @Test public void parseHttpFilter_faultConfigUnsupportedForServer() { - filterRegistry.register(FAULT_FILTER_PROVIDER); + filterRegistry.register(FaultFilter.INSTANCE); HttpFilter httpFilter = HttpFilter.newBuilder() .setIsOptional(false) @@ -1430,7 +1426,7 @@ public void parseHttpFilter_faultConfigUnsupportedForServer() { @Test public void parseHttpFilter_rbacConfigForServer() { - filterRegistry.register(RBAC_FILTER_PROVIDER); + filterRegistry.register(RbacFilter.INSTANCE); HttpFilter httpFilter = HttpFilter.newBuilder() .setIsOptional(false) @@ -1457,7 +1453,7 @@ public void parseHttpFilter_rbacConfigForServer() { @Test public void parseHttpFilter_rbacConfigUnsupportedForClient() { - filterRegistry.register(RBAC_FILTER_PROVIDER); + filterRegistry.register(RbacFilter.INSTANCE); HttpFilter httpFilter = HttpFilter.newBuilder() .setIsOptional(false) @@ -1486,7 +1482,7 @@ public void parseHttpFilter_rbacConfigUnsupportedForClient() { @Test public void parseOverrideRbacFilterConfig() { - filterRegistry.register(RBAC_FILTER_PROVIDER); + filterRegistry.register(RbacFilter.INSTANCE); RBACPerRoute rbacPerRoute = RBACPerRoute.newBuilder() .setRbac( @@ -1512,7 +1508,7 @@ public void parseOverrideRbacFilterConfig() { @Test public void parseOverrideFilterConfigs_unsupportedButOptional() { - filterRegistry.register(FAULT_FILTER_PROVIDER); + filterRegistry.register(FaultFilter.INSTANCE); HTTPFault httpFault = HTTPFault.newBuilder() .setDelay(FaultDelay.newBuilder().setFixedDelay(Durations.fromNanos(3000))) .build(); @@ -1532,7 +1528,7 @@ public void parseOverrideFilterConfigs_unsupportedButOptional() { @Test public void parseOverrideFilterConfigs_unsupportedAndRequired() { - filterRegistry.register(FAULT_FILTER_PROVIDER); + filterRegistry.register(FaultFilter.INSTANCE); HTTPFault httpFault = HTTPFault.newBuilder() .setDelay(FaultDelay.newBuilder().setFixedDelay(Durations.fromNanos(3000))) .build(); @@ -1624,7 +1620,7 @@ public void parseHttpConnectionManager_duplicateHttpFilters() throws ResourceInv @Test public void parseHttpConnectionManager_lastNotTerminal() throws ResourceInvalidException { - filterRegistry.register(FAULT_FILTER_PROVIDER); + filterRegistry.register(FaultFilter.INSTANCE); HttpConnectionManager hcm = HttpConnectionManager.newBuilder() .addHttpFilters( @@ -1642,7 +1638,7 @@ public void parseHttpConnectionManager_lastNotTerminal() throws ResourceInvalidE @Test public void parseHttpConnectionManager_terminalNotLast() throws ResourceInvalidException { - filterRegistry.register(ROUTER_FILTER_PROVIDER); + filterRegistry.register(RouterFilter.INSTANCE); HttpConnectionManager hcm = HttpConnectionManager.newBuilder() .addHttpFilters( diff --git a/xds/src/test/java/io/grpc/xds/RbacFilterTest.java b/xds/src/test/java/io/grpc/xds/RbacFilterTest.java index 7f195693d84..013b21e3f45 100644 --- a/xds/src/test/java/io/grpc/xds/RbacFilterTest.java +++ b/xds/src/test/java/io/grpc/xds/RbacFilterTest.java @@ -78,13 +78,6 @@ public class RbacFilterTest { private static final String PATH = "auth"; private static final StringMatcher STRING_MATCHER = StringMatcher.newBuilder().setExact("/" + PATH).setIgnoreCase(true).build(); - private static final RbacFilter.Provider FILTER_PROVIDER = new RbacFilter.Provider(); - - @Test - public void filterType_serverOnly() { - assertThat(FILTER_PROVIDER.isClientFilter()).isFalse(); - assertThat(FILTER_PROVIDER.isServerFilter()).isTrue(); - } @Test @SuppressWarnings({"unchecked", "deprecation"}) @@ -259,7 +252,7 @@ public void testAuthorizationInterceptor() { OrMatcher.create(AlwaysTrueMatcher.INSTANCE)); AuthConfig authconfig = AuthConfig.create(Collections.singletonList(policyMatcher), GrpcAuthorizationEngine.Action.ALLOW); - FILTER_PROVIDER.newInstance().buildServerInterceptor(RbacConfig.create(authconfig), null) + new RbacFilter().buildServerInterceptor(RbacConfig.create(authconfig), null) .interceptCall(mockServerCall, new Metadata(), mockHandler); verify(mockHandler, never()).startCall(eq(mockServerCall), any(Metadata.class)); ArgumentCaptor captor = ArgumentCaptor.forClass(Status.class); @@ -271,7 +264,7 @@ public void testAuthorizationInterceptor() { authconfig = AuthConfig.create(Collections.singletonList(policyMatcher), GrpcAuthorizationEngine.Action.DENY); - FILTER_PROVIDER.newInstance().buildServerInterceptor(RbacConfig.create(authconfig), null) + new RbacFilter().buildServerInterceptor(RbacConfig.create(authconfig), null) .interceptCall(mockServerCall, new Metadata(), mockHandler); verify(mockHandler).startCall(eq(mockServerCall), any(Metadata.class)); } @@ -297,7 +290,7 @@ public void handleException() { .putPolicies("policy-name", Policy.newBuilder().setCondition(Expr.newBuilder().build()).build()) .build()).build(); - result = FILTER_PROVIDER.parseFilterConfig(Any.pack(rawProto)); + result = new RbacFilter().parseFilterConfig(Any.pack(rawProto)); assertThat(result.errorDetail).isNotNull(); } @@ -319,10 +312,10 @@ public void overrideConfig() { RbacConfig original = RbacConfig.create(authconfig); RBACPerRoute rbacPerRoute = RBACPerRoute.newBuilder().build(); - RbacConfig override = FILTER_PROVIDER.parseFilterConfigOverride(Any.pack(rbacPerRoute)).config; + RbacConfig override = + new RbacFilter().parseFilterConfigOverride(Any.pack(rbacPerRoute)).config; assertThat(override).isEqualTo(RbacConfig.create(null)); - ServerInterceptor interceptor = - FILTER_PROVIDER.newInstance().buildServerInterceptor(original, override); + ServerInterceptor interceptor = new RbacFilter().buildServerInterceptor(original, override); assertThat(interceptor).isNull(); policyMatcher = PolicyMatcher.create("policy-matcher-override", @@ -332,7 +325,7 @@ public void overrideConfig() { GrpcAuthorizationEngine.Action.ALLOW); override = RbacConfig.create(authconfig); - FILTER_PROVIDER.newInstance().buildServerInterceptor(original, override) + new RbacFilter().buildServerInterceptor(original, override) .interceptCall(mockServerCall, new Metadata(), mockHandler); verify(mockHandler).startCall(eq(mockServerCall), any(Metadata.class)); verify(mockServerCall).getAttributes(); @@ -344,22 +337,22 @@ public void ignoredConfig() { Message rawProto = io.envoyproxy.envoy.extensions.filters.http.rbac.v3.RBAC.newBuilder() .setRules(RBAC.newBuilder().setAction(Action.LOG) .putPolicies("policy-name", Policy.newBuilder().build()).build()).build(); - ConfigOrError result = FILTER_PROVIDER.parseFilterConfig(Any.pack(rawProto)); + ConfigOrError result = new RbacFilter().parseFilterConfig(Any.pack(rawProto)); assertThat(result.config).isEqualTo(RbacConfig.create(null)); } @Test public void testOrderIndependenceOfPolicies() { Message rawProto = buildComplexRbac(ImmutableList.of(1, 2, 3, 4, 5, 6), true); - ConfigOrError ascFirst = FILTER_PROVIDER.parseFilterConfig(Any.pack(rawProto)); + ConfigOrError ascFirst = new RbacFilter().parseFilterConfig(Any.pack(rawProto)); rawProto = buildComplexRbac(ImmutableList.of(1, 2, 3, 4, 5, 6), false); - ConfigOrError ascLast = FILTER_PROVIDER.parseFilterConfig(Any.pack(rawProto)); + ConfigOrError ascLast = new RbacFilter().parseFilterConfig(Any.pack(rawProto)); assertThat(ascFirst.config).isEqualTo(ascLast.config); rawProto = buildComplexRbac(ImmutableList.of(6, 5, 4, 3, 2, 1), true); - ConfigOrError decFirst = FILTER_PROVIDER.parseFilterConfig(Any.pack(rawProto)); + ConfigOrError decFirst = new RbacFilter().parseFilterConfig(Any.pack(rawProto)); assertThat(ascFirst.config).isEqualTo(decFirst.config); } @@ -381,14 +374,14 @@ private MethodDescriptor.Builder method() { private ConfigOrError parse(List permissionList, List principalList) { - return RbacFilter.Provider.parseRbacConfig(buildRbac(permissionList, principalList)); + return RbacFilter.parseRbacConfig(buildRbac(permissionList, principalList)); } private ConfigOrError parseRaw(List permissionList, List principalList) { Message rawProto = buildRbac(permissionList, principalList); Any proto = Any.pack(rawProto); - return FILTER_PROVIDER.parseFilterConfig(proto); + return new RbacFilter().parseFilterConfig(proto); } private io.envoyproxy.envoy.extensions.filters.http.rbac.v3.RBAC buildRbac( @@ -456,6 +449,6 @@ private ConfigOrError parseOverride(List permissionList, RBACPerRoute rbacPerRoute = RBACPerRoute.newBuilder().setRbac( buildRbac(permissionList, principalList)).build(); Any proto = Any.pack(rbacPerRoute); - return FILTER_PROVIDER.parseFilterConfigOverride(proto); + return new RbacFilter().parseFilterConfigOverride(proto); } } diff --git a/xds/src/test/java/io/grpc/xds/RouterFilterTest.java b/xds/src/test/java/io/grpc/xds/RouterFilterTest.java deleted file mode 100644 index 30fd8a6dc38..00000000000 --- a/xds/src/test/java/io/grpc/xds/RouterFilterTest.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright 2025 The gRPC Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.grpc.xds; - -import static com.google.common.truth.Truth.assertThat; - -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; - -/** Tests for {@link RouterFilter}. */ -@RunWith(JUnit4.class) -public class RouterFilterTest { - private static final RouterFilter.Provider FILTER_PROVIDER = new RouterFilter.Provider(); - - @Test - public void filterType_clientAndServer() { - assertThat(FILTER_PROVIDER.isClientFilter()).isTrue(); - assertThat(FILTER_PROVIDER.isServerFilter()).isTrue(); - } - -} diff --git a/xds/src/test/java/io/grpc/xds/XdsNameResolverTest.java b/xds/src/test/java/io/grpc/xds/XdsNameResolverTest.java index f7309051f92..d895cecdb10 100644 --- a/xds/src/test/java/io/grpc/xds/XdsNameResolverTest.java +++ b/xds/src/test/java/io/grpc/xds/XdsNameResolverTest.java @@ -22,12 +22,10 @@ import static io.grpc.xds.FaultFilter.HEADER_ABORT_PERCENTAGE_KEY; import static io.grpc.xds.FaultFilter.HEADER_DELAY_KEY; import static io.grpc.xds.FaultFilter.HEADER_DELAY_PERCENTAGE_KEY; -import static org.mockito.AdditionalAnswers.delegatesTo; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.lenient; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.reset; @@ -132,9 +130,6 @@ public class XdsNameResolverTest { private static final String RDS_RESOURCE_NAME = "route-configuration.googleapis.com"; private static final String FAULT_FILTER_INSTANCE_NAME = "envoy.fault"; private static final String ROUTER_FILTER_INSTANCE_NAME = "envoy.router"; - private static final FaultFilter.Provider FAULT_FILTER_PROVIDER = new FaultFilter.Provider(); - private static final RouterFilter.Provider ROUTER_FILTER_PROVIDER = new RouterFilter.Provider(); - @Rule public final MockitoRule mocks = MockitoJUnit.rule(); private final SynchronizationContext syncContext = new SynchronizationContext( @@ -189,19 +184,9 @@ public void setUp() { originalEnableTimeout = XdsNameResolver.enableTimeout; XdsNameResolver.enableTimeout = true; - - // Replace FaultFilter.Provider with the one returning FaultFilter injected with mockRandom. - Filter.Provider faultFilterProvider = - mock(Filter.Provider.class, delegatesTo(FAULT_FILTER_PROVIDER)); - // Lenient: suppress [MockitoHint] Unused warning, only used in resolved_fault* tests. - lenient() - .doReturn(new FaultFilter(mockRandom, new AtomicLong())) - .when(faultFilterProvider).newInstance(); - FilterRegistry filterRegistry = FilterRegistry.newRegistry().register( - ROUTER_FILTER_PROVIDER, - faultFilterProvider); - + new FaultFilter(mockRandom, new AtomicLong()), + RouterFilter.INSTANCE); resolver = new XdsNameResolver(targetUri, null, AUTHORITY, null, serviceConfigParser, syncContext, scheduler, xdsClientPoolFactory, mockRandom, filterRegistry, null, metricRecorder); diff --git a/xds/src/test/java/io/grpc/xds/XdsServerWrapperTest.java b/xds/src/test/java/io/grpc/xds/XdsServerWrapperTest.java index 41f005ba583..66ac1475d8e 100644 --- a/xds/src/test/java/io/grpc/xds/XdsServerWrapperTest.java +++ b/xds/src/test/java/io/grpc/xds/XdsServerWrapperTest.java @@ -31,6 +31,7 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import static org.mockito.Mockito.withSettings; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; @@ -52,6 +53,7 @@ import io.grpc.xds.EnvoyServerProtoData.FilterChain; import io.grpc.xds.Filter.FilterConfig; import io.grpc.xds.Filter.NamedFilterConfig; +import io.grpc.xds.Filter.ServerInterceptorBuilder; import io.grpc.xds.FilterChainMatchingProtocolNegotiators.FilterChainMatchingHandler.FilterChainSelector; import io.grpc.xds.VirtualHost.Route; import io.grpc.xds.VirtualHost.Route.RouteMatch; @@ -955,11 +957,9 @@ public void run() { new AtomicReference<>(routingConfig)).build()); when(serverCall.getAuthority()).thenReturn("not-match.google.com"); - Filter.Provider filterProvider = mock(Filter.Provider.class); - when(filterProvider.typeUrls()).thenReturn(new String[]{"filter-type-url"}); - when(filterProvider.isServerFilter()).thenReturn(true); - filterRegistry.register(filterProvider); - + Filter filter = mock(Filter.class); + when(filter.typeUrls()).thenReturn(new String[]{"filter-type-url"}); + filterRegistry.register(filter); ServerCallHandler next = mock(ServerCallHandler.class); interceptor.interceptCall(serverCall, new Metadata(), next); verify(next, never()).startCall(any(ServerCall.class), any(Metadata.class)); @@ -998,11 +998,9 @@ public void run() { when(serverCall.getMethodDescriptor()).thenReturn(createMethod("NotMatchMethod")); when(serverCall.getAuthority()).thenReturn("foo.google.com"); - Filter.Provider filterProvider = mock(Filter.Provider.class); - when(filterProvider.typeUrls()).thenReturn(new String[]{"filter-type-url"}); - when(filterProvider.isServerFilter()).thenReturn(true); - filterRegistry.register(filterProvider); - + Filter filter = mock(Filter.class); + when(filter.typeUrls()).thenReturn(new String[]{"filter-type-url"}); + filterRegistry.register(filter); ServerCallHandler next = mock(ServerCallHandler.class); interceptor.interceptCall(serverCall, new Metadata(), next); verify(next, never()).startCall(any(ServerCall.class), any(Metadata.class)); @@ -1046,11 +1044,9 @@ public void run() { when(serverCall.getMethodDescriptor()).thenReturn(createMethod("FooService/barMethod")); when(serverCall.getAuthority()).thenReturn("foo.google.com"); - Filter.Provider filterProvider = mock(Filter.Provider.class); - when(filterProvider.typeUrls()).thenReturn(new String[]{"filter-type-url"}); - when(filterProvider.isServerFilter()).thenReturn(true); - filterRegistry.register(filterProvider); - + Filter filter = mock(Filter.class); + when(filter.typeUrls()).thenReturn(new String[]{"filter-type-url"}); + filterRegistry.register(filter); ServerCallHandler next = mock(ServerCallHandler.class); interceptor.interceptCall(serverCall, new Metadata(), next); verify(next, never()).startCall(any(ServerCall.class), any(Metadata.class)); @@ -1117,14 +1113,10 @@ public void run() { RouteMatch.create( PathMatcher.fromPath("/FooService/barMethod", true), Collections.emptyList(), null); - - Filter filter = mock(Filter.class); - Filter.Provider filterProvider = mock(Filter.Provider.class); - when(filterProvider.typeUrls()).thenReturn(new String[]{"filter-type-url"}); - when(filterProvider.isServerFilter()).thenReturn(true); - when(filterProvider.newInstance()).thenReturn(filter); - filterRegistry.register(filterProvider); - + Filter filter = mock(Filter.class, withSettings() + .extraInterfaces(ServerInterceptorBuilder.class)); + when(filter.typeUrls()).thenReturn(new String[]{"filter-type-url"}); + filterRegistry.register(filter); FilterConfig f0 = mock(FilterConfig.class); FilterConfig f0Override = mock(FilterConfig.class); when(f0.typeUrl()).thenReturn("filter-type-url"); @@ -1145,8 +1137,10 @@ public ServerCall.Listener interceptCall(ServerCallof()); VirtualHost virtualHost = VirtualHost.create( @@ -1191,13 +1185,10 @@ public void run() { }); xdsClient.ldsResource.get(5, TimeUnit.SECONDS); - Filter filter = mock(Filter.class); - Filter.Provider filterProvider = mock(Filter.Provider.class); - when(filterProvider.typeUrls()).thenReturn(new String[]{"filter-type-url"}); - when(filterProvider.isServerFilter()).thenReturn(true); - when(filterProvider.newInstance()).thenReturn(filter); - filterRegistry.register(filterProvider); - + Filter filter = mock(Filter.class, withSettings() + .extraInterfaces(ServerInterceptorBuilder.class)); + when(filter.typeUrls()).thenReturn(new String[]{"filter-type-url"}); + filterRegistry.register(filter); FilterConfig f0 = mock(FilterConfig.class); FilterConfig f0Override = mock(FilterConfig.class); when(f0.typeUrl()).thenReturn("filter-type-url"); @@ -1218,8 +1209,10 @@ public ServerCall.Listener interceptCall(ServerCall Date: Mon, 23 Jun 2025 12:00:25 -0700 Subject: [PATCH 14/15] download maven using archive/permalink url (#12169) --- buildscripts/grpc-java-artifacts/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/buildscripts/grpc-java-artifacts/Dockerfile b/buildscripts/grpc-java-artifacts/Dockerfile index 736babe9d8e..bf71a710d74 100644 --- a/buildscripts/grpc-java-artifacts/Dockerfile +++ b/buildscripts/grpc-java-artifacts/Dockerfile @@ -28,6 +28,6 @@ RUN mkdir -p "$ANDROID_HOME/cmdline-tools" && \ yes | "$ANDROID_HOME/cmdline-tools/latest/bin/sdkmanager" --licenses # Install Maven -RUN curl -Ls https://dlcdn.apache.org/maven/maven-3/3.8.8/binaries/apache-maven-3.8.8-bin.tar.gz | \ +RUN curl -Ls https://archive.apache.org/dist/maven/maven-3/3.8.8/binaries/apache-maven-3.8.8-bin.tar.gz | \ tar xz -C /var/local ENV PATH /var/local/apache-maven-3.8.8/bin:$PATH From 43eedd0b3926bda1f49c39f2904171bfee12a3d1 Mon Sep 17 00:00:00 2001 From: eshitachandwani <59800922+eshitachandwani@users.noreply.github.com> Date: Thu, 26 Jun 2025 14:23:05 +0530 Subject: [PATCH 15/15] kokoro: add spiffe tests config (1.71.x backport) Backport of #12133 --- buildscripts/kokoro/psm-spiffe.cfg | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 buildscripts/kokoro/psm-spiffe.cfg diff --git a/buildscripts/kokoro/psm-spiffe.cfg b/buildscripts/kokoro/psm-spiffe.cfg new file mode 100644 index 00000000000..b04d715fca1 --- /dev/null +++ b/buildscripts/kokoro/psm-spiffe.cfg @@ -0,0 +1,17 @@ +# Config file for internal CI + +# Location of the continuous shell script in repository. +build_file: "grpc-java/buildscripts/kokoro/psm-interop-test-java.sh" +timeout_mins: 240 + +action { + define_artifacts { + regex: "artifacts/**/*sponge_log.xml" + regex: "artifacts/**/*.log" + strip_prefix: "artifacts" + } +} +env_vars { + key: "PSM_TEST_SUITE" + value: "spiffe" +}