diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 706dbc076c..90f3915414 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -18,6 +18,7 @@ jobs: - msrv - miri - features + - deprecated - ffi - ffi-header - doc @@ -29,21 +30,19 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v1 + uses: actions/checkout@v4 - name: Install Rust - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@stable with: - profile: minimal - toolchain: stable - override: true components: rustfmt - name: cargo fmt --check - uses: actions-rs/cargo@v1 - with: - command: fmt - args: --all -- --check + run: | + if ! rustfmt --check --edition 2018 $(git ls-files '*.rs'); then + printf "Please run \`rustfmt --edition 2018 \$(git ls-files '*.rs')\` to fix rustfmt errors.\nSee CONTRIBUTING.md for more details.\n" >&2 + exit 1 + fi test: name: Test ${{ matrix.rust }} on ${{ matrix.os }} @@ -62,38 +61,32 @@ jobs: include: - rust: stable - features: "--features full" + features: "--features full,backports,deprecated" - rust: beta - features: "--features full" + features: "--features full,backports" - rust: nightly - features: "--features full,nightly" + features: "--features full,nightly,backports" benches: true runs-on: ${{ matrix.os }} steps: - name: Checkout - uses: actions/checkout@v1 + uses: actions/checkout@v4 - name: Install Rust (${{ matrix.rust }}) - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@stable with: - profile: minimal toolchain: ${{ matrix.rust }} - override: true + + - uses: Swatinem/rust-cache@v2 - name: Test - uses: actions-rs/cargo@v1 - with: - command: test - args: ${{ matrix.features }} + run: cargo test ${{ matrix.features }} - name: Test all benches if: matrix.benches - uses: actions-rs/cargo@v1 - with: - command: test - args: --benches ${{ matrix.features }} + run: cargo test --benches ${{ matrix.features }} msrv: name: Check MSRV (${{ matrix.rust }}) @@ -101,7 +94,7 @@ jobs: strategy: matrix: rust: - - 1.49 # keep in sync with MSRV.md dev doc + - 1.63 # keep in sync with MSRV.md dev doc os: - ubuntu-latest @@ -110,20 +103,23 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v1 + uses: actions/checkout@v4 - name: Install Rust (${{ matrix.rust }}) - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@stable with: - profile: minimal toolchain: ${{ matrix.rust }} - override: true + + - uses: Swatinem/rust-cache@v2 + + - name: Pin some dependencies + run: | + cargo update -p tokio --precise 1.38.1 + cargo update -p tokio-util --precise 0.7.11 + cargo update -p hashbrown --precise 0.15.0 - name: Check - uses: actions-rs/cargo@v1 - with: - command: check - args: --features full + run: cargo check --features full miri: name: Test with Miri @@ -132,15 +128,12 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v1 + uses: actions/checkout@v4 - name: Install Rust - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@nightly with: - profile: minimal - toolchain: nightly components: miri - override: true - name: Test # Can't enable tcp feature since Miri does not support the tokio runtime @@ -152,20 +145,43 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v1 + uses: actions/checkout@v4 - name: Install Rust - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: nightly - override: true + uses: dtolnay/rust-toolchain@stable - name: Install cargo-hack - run: cargo install cargo-hack + uses: taiki-e/install-action@cargo-hack + + - uses: Swatinem/rust-cache@v2 - name: check --feature-powerset - run: cargo hack check --feature-powerset --depth 2 --skip ffi -Z avoid-dev-deps + run: cargo hack --no-dev-deps check --feature-powerset --depth 2 --skip ffi + + deprecated: + name: Check deprecated on ${{ matrix.rust }} + needs: [style] + strategy: + matrix: + rust: + - stable + - beta + + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Install Rust (${{ matrix.rust }}) + uses: dtolnay/rust-toolchain@stable + with: + toolchain: ${{ matrix.rust }} + + - uses: Swatinem/rust-cache@v2 + + - name: Check + run: cargo check --features full,backports,deprecated ffi: name: Test C API (FFI) @@ -173,69 +189,57 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v1 + uses: actions/checkout@v4 - name: Install Rust - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: nightly - override: true + uses: dtolnay/rust-toolchain@stable - name: Install cbindgen - uses: actions-rs/cargo@v1 + uses: taiki-e/cache-cargo-install-action@v1 with: - command: install - args: cbindgen + tool: cbindgen + + - uses: Swatinem/rust-cache@v2 - name: Build FFI - uses: actions-rs/cargo@v1 + run: cargo rustc --features client,http1,http2,ffi --crate-type cdylib env: RUSTFLAGS: --cfg hyper_unstable_ffi - with: - command: rustc - args: --features client,http1,http2,ffi -Z unstable-options --crate-type cdylib - name: Make Examples run: cd capi/examples && make client - name: Run FFI unit tests - uses: actions-rs/cargo@v1 + run: cargo test --features full,ffi --lib env: RUSTFLAGS: --cfg hyper_unstable_ffi - with: - command: test - args: --features full,ffi --lib ffi-header: name: Verify hyper.h is up to date runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v1 + uses: actions/checkout@v4 - name: Install Rust - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: nightly - default: true - override: true - components: cargo + uses: dtolnay/rust-toolchain@stable - name: Install cbindgen - uses: actions-rs/cargo@v1 + uses: taiki-e/cache-cargo-install-action@v1 + with: + tool: cbindgen + + - name: Install cargo-expand + uses: taiki-e/cache-cargo-install-action@v1 with: - command: install - args: cbindgen + tool: cargo-expand + + - uses: Swatinem/rust-cache@v2 - name: Build FFI - uses: actions-rs/cargo@v1 + run: cargo build --features client,http1,http2,ffi env: RUSTFLAGS: --cfg hyper_unstable_ffi - with: - command: build - args: --features client,http1,http2,ffi - name: Ensure that hyper.h is up to date run: ./capi/gen_header.sh --verify @@ -246,17 +250,10 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v1 + uses: actions/checkout@v4 - name: Install Rust - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: nightly - override: true + uses: dtolnay/rust-toolchain@nightly - name: cargo doc - uses: actions-rs/cargo@v1 - with: - command: rustdoc - args: --features full,ffi -- --cfg docsrs --cfg hyper_unstable_ffi -D broken-intra-doc-links + run: cargo rustdoc --features full,ffi -- --cfg docsrs --cfg hyper_unstable_ffi -D broken-intra-doc-links diff --git a/CHANGELOG.md b/CHANGELOG.md index d0d25bdefc..f0a6e55fd4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,161 @@ +### v0.14.32 (2024-12-16) + +#### Features + +* **server:** add `Builder::max_pending_accept_reset_streams(num)` option ([a24f0c0](https://github.com/hyperium/hyper/commit/a24f0c0af8e1f4c6b7cc3a47c83eb6e4af88aca6)) + +#### Bug Fixes + +* **http1:** fix intermittent panic parsing partial headers ([0f274ae](https://github.com/hyperium/hyper/commit/0f274ae653841e0a58b2835fd3edf47a08311e50)) + +### v0.14.31 (2024-10-15) + + +#### Bug Fixes + +* **http1:** improve performance of parsing sequentially partial messages ([97b595e](https://github.com/hyperium/hyper/commit/97b595e5892c239a195b199f9e7910f582351c44)) + + +### v0.14.30 (2024-07-09) + + +#### Bug Fixes + +* **http1:** reject final chunked if missing 0 ([4a51b2af](https://github.com/hyperium/hyper/commit/4a51b2afefcc1373c2a5b834fa0ae8d935dbff46)) + + +### v0.14.29 (2024-06-03) + + +#### Bug Fixes + +* **http1:** start header read timeout immediately (#3305) ([b5c2592f](https://github.com/hyperium/hyper/commit/b5c2592fde5e20d29c69428c85aef3d682ee36bc)) + + +#### Features + +* **http2:** add config for `max_local_error_reset_streams` in server (#3528) ([dedcb674](https://github.com/hyperium/hyper/commit/dedcb674f35eaec765a42b550caabe6f694d86d1)) + + +### v0.14.28 (2023-12-18) + + +#### Bug Fixes + +* **client:** + * panic when pool idle timeout set to zero (#3365) ([34d38008](https://github.com/hyperium/hyper/commit/34d38008499de37d9b5b65440b3123ccd05c7510)) + * divide by zero error when DNS returns no addrs (#3355) ([41eaf204](https://github.com/hyperium/hyper/commit/41eaf2042b8169d3dd067d49cfdbdaaf36678903)) + * Do not strip `path` and `scheme` components from URIs for HTTP/2 Extended CONNEC ([45aa6249](https://github.com/hyperium/hyper/commit/45aa62494127066c63c987a57cc5eae2c5361886)) + * early respond from server shouldn't propagate reset error (#3274) ([aac6760e](https://github.com/hyperium/hyper/commit/aac6760e032050dd47f5dbd32f852bf1ede9312b), closes [#2872](https://github.com/hyperium/hyper/issues/2872)) +* **http1:** + * add internal limit for chunked extensions (#3495) ([344a8782](https://github.com/hyperium/hyper/commit/344a87822951a46d252843ccc0b48e62988fc85b)) + * reject chunked headers missing a digit (#3494) ([5eca028f](https://github.com/hyperium/hyper/commit/5eca028f4142e3e73f6d6188a4076f4db292b252)) + + +#### Features + +* **body:** deprecate to_bytes() and aggregate() (#3466) ([7f382ad6](https://github.com/hyperium/hyper/commit/7f382ad64326e1470912feb310d348fd79099c44)) +* **client:** add `conn::http1::Connection::without_shutdown()` method (#3431) ([ad504977](https://github.com/hyperium/hyper/commit/ad504977b520a9582e5516a08b2f1028ef1b5e45)) +* **server:** add `Builder::local_addr()` (#3278) ([d342c2c7](https://github.com/hyperium/hyper/commit/d342c2c714498d33891fa285a3c9ae991dc34769)) + + +### v0.14.27 (2023-06-26) + + +#### Bug Fixes + +* **http1:** + * send error on Incoming body when connection errors (#3256) ([b107655f](https://github.com/hyperium/hyper/commit/b107655ff8557d001bb8e558752f5f2247381e98), closes [#3253](https://github.com/hyperium/hyper/issues/3253)) + * properly end chunked bodies when it was known to be empty (#3254) ([32422c47](https://github.com/hyperium/hyper/commit/32422c47ec35e7405873277c87de14c18dbb98bd), closes [#3252](https://github.com/hyperium/hyper/issues/3252)) + + +#### Features + +* **client:** include connection info in `Client::send_request` errors (#2749) + + +### v0.14.26 (2023-04-13) + + +#### Features + +* **http2:** add `max_pending_accept_reset_streams` configuration option (#3201) ([a6f7571a](https://github.com/hyperium/hyper/commit/a6f7571a5299793aef8f1aa4194574438b9df64c)) + + +### v0.14.25 (2023-03-10) + + +#### Features + +* **client:** + * deprecate `client::conn` types (#3156) ([0ced15d3](https://github.com/hyperium/hyper/commit/0ced15d3cc10ace477ebda13ead8e6857b51867e)) + * add 1.0 compatible client conn API (#3155) ([253cc74d](https://github.com/hyperium/hyper/commit/253cc74d86b082067aa884a0a63a089d7d19401d), closes [#3053](https://github.com/hyperium/hyper/issues/3053)) + * add `client::connect::capture_connection()` (#3144) ([c8493399](https://github.com/hyperium/hyper/commit/c8493399b2929a86f3020ae77304a00e43cfd161)) + * add `poison` to `Connected` (#3145) ([37ed5a2e](https://github.com/hyperium/hyper/commit/37ed5a2e3cab76a11092823a80afd8fe2f2a9693)) +* **server:** + * deprecate server conn structs (#3161) ([02fe20f2](https://github.com/hyperium/hyper/commit/02fe20f232a7c3cf24d505b121ce4d428a93254d)) + * backport the split server conn modules from 1.0 (#3102) ([84881c9e](https://github.com/hyperium/hyper/commit/84881c9e5160167a89d18d30c0ef6856dc859839), closes [#3079](https://github.com/hyperium/hyper/issues/3079)) + * remove some `Unpin` and `'static` constraints (#3119) ([0368a41a](https://github.com/hyperium/hyper/commit/0368a41a6cc1a5c6f1eada0d88e38b7dce261587)) + + +### v0.14.24 (2023-02-02) + + +#### Bug Fixes + +* **body:** set an internal max to reserve in `to_bytes` ([4d89adce](https://github.com/hyperium/hyper/commit/4d89adce6122af1650165337d9d814314e7ee409)) +* **server:** prevent sending 100-continue if user drops request body (#3138) ([92443d7e](https://github.com/hyperium/hyper/commit/92443d7ef57ed474f0add7dd1f114c81a3faa8fe)) + + +#### Features + +* **http2:** add `http2_max_header_list_size` to `hyper::server::Builder` (#3006) ([031425f0](https://github.com/hyperium/hyper/commit/031425f087219f02a87eea3d01b14e75e35a5209)) + + +### v0.14.23 (2022-11-07) + + +#### Bug Fixes + +* **http2:** Fix race condition in client dispatcher (#3041) ([2f1c0b72](https://github.com/hyperium/hyper/commit/2f1c0b720da4553fff216a38018a78ecafe23d60), closes [#2419](https://github.com/hyperium/hyper/issues/2419)) + + +### v0.14.22 (2022-10-31) + + +#### Bug Fixes + +* **server:** fix compile-time cfgs for TCP keepalive options (#3039) ([e8765e0f](https://github.com/hyperium/hyper/commit/e8765e0febd0267472799dcd1109af75944c2637), closes [#3038](https://github.com/hyperium/hyper/issues/3038)) + + +### v0.14.21 (2022-10-31) + + +#### Bug Fixes + +* **client:** send an error back to client when dispatch misbehaves () ([9fa36382](https://github.com/hyperium/hyper/commit/9fa363829ced232acb18c31ebab8ffb93f691ecc), closes [#2649](https://github.com/hyperium/hyper/issues/2649)) +* **http1:** fix `http1_header_read_timeout` to use same future (#2891) ([c5a14e7c](https://github.com/hyperium/hyper/commit/c5a14e7c087424001223aaeb2dad532ba4ee6063)) + + +#### Features + +* **http1:** allow ignoring invalid header lines in requests ([73dd4746](https://github.com/hyperium/hyper/commit/73dd474652f5e71fe8a87baa6f9b2490ae746eb3)) +* **server:** add `Server::tcp_keepalive_interval` and `Server::tcp_keepalive_retries` (#2991) ([287d7124](https://github.com/hyperium/hyper/commit/287d712483aec6671427438d60ed2a72f856fd9f)) + + +### v0.14.20 (2022-07-07) + + +#### Bug Fixes + +* **http1:** fix `http1_header_read_timeout` to use same future (#2891) ([c5a14e7c](https://github.com/hyperium/hyper/commit/c5a14e7c087424001223aaeb2dad532ba4ee6063)) + + +#### Features + +* **ext:** support non-canonical HTTP/1 reason phrases (#2792) ([b2052a43](https://github.com/hyperium/hyper/commit/b2052a433fd151d7d745ee9c5b27a2031db1dc32)) + + ### v0.14.19 (2022-05-27) diff --git a/Cargo.toml b/Cargo.toml index 8288584ace..b187de594a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "hyper" -version = "0.14.19" +version = "0.14.32" description = "A fast and correct HTTP library." readme = "README.md" homepage = "https://hyper.rs/" @@ -27,19 +27,19 @@ futures-util = { version = "0.3", default-features = false } http = "0.2" http-body = "0.4" httpdate = "1.0" -httparse = "1.6" -h2 = { version = "0.3.9", optional = true } +httparse = "1.8" +h2 = { version = "0.3.24", optional = true } itoa = "1" tracing = { version = "0.1", default-features = false, features = ["std"] } pin-project-lite = "0.2.4" tower-service = "0.3" -tokio = { version = "1", features = ["sync"] } +tokio = { version = "1.27", features = ["sync"] } want = "0.3" # Optional libc = { version = "0.2", optional = true } -socket2 = { version = "0.4", optional = true } +socket2 = { version = ">=0.4.7, <0.6.0", optional = true, features = ["all"] } [dev-dependencies] futures-util = { version = "0.3", default-features = false, features = ["alloc"] } @@ -49,7 +49,7 @@ pretty_env_logger = "0.4" spmc = "0.3" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" -tokio = { version = "1", features = [ +tokio = { version = "1.27", features = [ "fs", "macros", "io-std", @@ -62,7 +62,7 @@ tokio = { version = "1", features = [ ] } tokio-test = "0.4" tokio-util = { version = "0.7", features = ["codec"] } -tower = { version = "0.4", features = ["make", "util"] } +tower = { version = "0.4", default-features = false, features = ["make", "util"] } url = "2.2" [target.'cfg(any(target_os = "linux", target_os = "macos"))'.dev-dependencies] @@ -109,6 +109,12 @@ tcp = [ # C-API support (currently unstable (no semver)) ffi = ["libc"] +# enable 1.0 backports +backports = [] + +# whether or not to display deprecation warnings +deprecated = [] + # internal features used in CI nightly = [] __internal_happy_eyeballs_tests = [] @@ -191,7 +197,7 @@ required-features = ["full"] [[example]] name = "tower_client" path = "examples/tower_client.rs" -required-features = ["full"] +required-features = ["full", "backports"] [[example]] name = "tower_server" diff --git a/README.md b/README.md index c3c73d7ed7..328f47bd36 100644 --- a/README.md +++ b/README.md @@ -24,7 +24,7 @@ libraries and applications. If you are looking for a convenient HTTP client, then you may wish to consider [reqwest](https://github.com/seanmonstar/reqwest). If you are looking for a -convenient HTTP server, then you may wish to consider [warp](https://github.com/seanmonstar/warp). +convenient HTTP server, then you may wish to consider [Axum](https://github.com/tokio-rs/tokio). Both are built on top of this library. ## Contributing diff --git a/capi/gen_header.sh b/capi/gen_header.sh index d0b9c13a32..7a08d3e6ff 100755 --- a/capi/gen_header.sh +++ b/capi/gen_header.sh @@ -6,101 +6,44 @@ set -e CAPI_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" - -WORK_DIR=$(mktemp -d) - -# check if tmp dir was created -if [[ ! "$WORK_DIR" || ! -d "$WORK_DIR" ]]; then - echo "Could not create temp dir" - exit 1 -fi - header_file_backup="$CAPI_DIR/include/hyper.h.backup" function cleanup { - rm -rf "$WORK_DIR" + rm -rf "$WORK_DIR" || true rm "$header_file_backup" || true } trap cleanup EXIT -mkdir "$WORK_DIR/src" - -# Fake a library -cat > "$WORK_DIR/src/lib.rs" << EOF -#[path = "$CAPI_DIR/../src/ffi/mod.rs"] -pub mod ffi; -EOF - -# And its Cargo.toml -cat > "$WORK_DIR/Cargo.toml" << EOF -[package] -name = "hyper" -version = "0.0.0" -edition = "2018" -publish = false - -[dependencies] -# Determined which dependencies we need by running the "cargo rustc" command -# below and watching the compile error output for references to unknown imports, -# until we didn't get any errors. -bytes = "1" -futures-channel = "0.3" -futures-util = { version = "0.3", default-features = false, features = ["alloc"] } -libc = { version = "0.2", optional = true } -http = "0.2" -http-body = "0.4" -tokio = { version = "1", features = ["rt"] } - -[features] -default = [ - "client", - "ffi", - "http1", -] +WORK_DIR=$(mktemp -d) -http1 = [] -client = [] -ffi = ["libc", "tokio/rt"] -EOF +# check if tmp dir was created +if [[ ! "$WORK_DIR" || ! -d "$WORK_DIR" ]]; then + echo "Could not create temp dir" + exit 1 +fi cp "$CAPI_DIR/include/hyper.h" "$header_file_backup" -#cargo metadata --no-default-features --features ffi --format-version 1 > "$WORK_DIR/metadata.json" - -cd "${WORK_DIR}" || exit 2 - # Expand just the ffi module -if ! output=$(RUSTFLAGS='--cfg hyper_unstable_ffi' cargo rustc -- -Z unpretty=expanded 2>&1 > expanded.rs); then - # As of April 2021 the script above prints a lot of warnings/errors, and - # exits with a nonzero return code, but hyper.h still gets generated. - # - # However, on Github Actions, this will result in automatic "annotations" - # being added to files not related to a PR, so if this is `--verify` mode, - # then don't show it. - # - # But yes show it when using it locally. - if [[ "--verify" != "$1" ]]; then - echo "$output" - fi +if ! RUSTFLAGS='--cfg hyper_unstable_ffi' cargo expand --features client,http1,http2,ffi ::ffi 2> $WORK_DIR/expand_stderr.err > $WORK_DIR/expanded.rs; then + cat $WORK_DIR/expand_stderr.err fi -# Replace the previous copy with the single expanded file -rm -rf ./src -mkdir src -mv expanded.rs src/lib.rs - - # Bindgen! if ! cbindgen \ --config "$CAPI_DIR/cbindgen.toml" \ --lockfile "$CAPI_DIR/../Cargo.lock" \ --output "$CAPI_DIR/include/hyper.h" \ - "${@}"; then + "${@}"\ + $WORK_DIR/expanded.rs 2> $WORK_DIR/cbindgen_stderr.err; then bindgen_exit_code=$? if [[ "--verify" == "$1" ]]; then - echo "diff generated (<) vs backup (>)" - diff "$CAPI_DIR/include/hyper.h" "$header_file_backup" + echo "Changes from previous header (old < > new)" + diff -u "$header_file_backup" "$CAPI_DIR/include/hyper.h" + else + echo "cbindgen failed:" + cat $WORK_DIR/cbindgen_stderr.err fi exit $bindgen_exit_code fi diff --git a/capi/include/hyper.h b/capi/include/hyper.h index 1f938b8714..2b96b98103 100644 --- a/capi/include/hyper.h +++ b/capi/include/hyper.h @@ -773,7 +773,7 @@ void hyper_waker_free(struct hyper_waker *waker); void hyper_waker_wake(struct hyper_waker *waker); #ifdef __cplusplus -} // extern "C" -#endif // __cplusplus +} // extern "C" +#endif // __cplusplus -#endif /* _HYPER_H */ +#endif /* _HYPER_H */ diff --git a/docs/MSRV.md b/docs/MSRV.md index ddc8534e2e..70752c9138 100644 --- a/docs/MSRV.md +++ b/docs/MSRV.md @@ -6,4 +6,4 @@ hyper. It is possible that an older compiler can work, but that is not guaranteed. We try to increase the MSRV responsibly, only when a significant new feature is needed. -The current MSRV is: **1.49**. +The current MSRV is: **1.63**. diff --git a/examples/client_json.rs b/examples/client_json.rs index ef92f14b10..04ca6f7d91 100644 --- a/examples/client_json.rs +++ b/examples/client_json.rs @@ -28,6 +28,7 @@ async fn fetch_json(url: hyper::Uri) -> Result> { let res = client.get(url).await?; // asynchronously aggregate the chunks of the body + #[allow(deprecated)] let body = hyper::body::aggregate(res).await?; // try to parse as json with serde_json diff --git a/examples/echo.rs b/examples/echo.rs index ff7573049e..ff13085004 100644 --- a/examples/echo.rs +++ b/examples/echo.rs @@ -34,6 +34,7 @@ async fn echo(req: Request) -> Result, hyper::Error> { // So here we do `.await` on the future, waiting on concatenating the full body, // then afterwards the content can be reversed. Only then can we return a `Response`. (&Method::POST, "/echo/reversed") => { + #[allow(deprecated)] let whole_body = hyper::body::to_bytes(req.into_body()).await?; let reversed_body = whole_body.iter().rev().cloned().collect::>(); diff --git a/examples/params.rs b/examples/params.rs index 87c2368928..8c9d923788 100644 --- a/examples/params.rs +++ b/examples/params.rs @@ -17,6 +17,7 @@ async fn param_example(req: Request) -> Result, hyper::Erro (&Method::GET, "/") | (&Method::GET, "/post") => Ok(Response::new(INDEX.into())), (&Method::POST, "/post") => { // Concatenate the body... + #[allow(deprecated)] let b = hyper::body::to_bytes(req).await?; // Parse the request body. form_urlencoded::parse // always succeeds, but in general parsing may diff --git a/examples/tower_client.rs b/examples/tower_client.rs index 5a2a6e78df..f733fe0d35 100644 --- a/examples/tower_client.rs +++ b/examples/tower_client.rs @@ -1,20 +1,20 @@ #![deny(warnings)] -use hyper::client::conn::Builder; -use hyper::client::connect::HttpConnector; -use hyper::client::service::Connect; +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll}; + use hyper::service::Service; -use hyper::{Body, Request}; +use hyper::{Body, Request, Response}; +use tokio::net::TcpStream; #[tokio::main] -async fn main() -> Result<(), Box> { +async fn main() -> Result<(), Box> { pretty_env_logger::init(); - let mut mk_svc = Connect::new(HttpConnector::new(), Builder::new()); - let uri = "http://127.0.0.1:8080/".parse::()?; - let mut svc = mk_svc.call(uri.clone()).await?; + let mut svc = Connector; let body = Body::empty(); @@ -25,3 +25,35 @@ async fn main() -> Result<(), Box> { Ok(()) } + +struct Connector; + +impl Service> for Connector { + type Response = Response; + type Error = Box; + type Future = Pin>>>; + + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> std::task::Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: Request) -> Self::Future { + Box::pin(async move { + let host = req.uri().host().expect("no host in uri"); + let port = req.uri().port_u16().expect("no port in uri"); + + let stream = TcpStream::connect(format!("{}:{}", host, port)).await?; + + let (mut sender, conn) = hyper::client::conn::http1::handshake(stream).await?; + + tokio::task::spawn(async move { + if let Err(err) = conn.await { + println!("Connection error: {:?}", err); + } + }); + + let res = sender.send_request(req).await?; + Ok(res) + }) + } +} diff --git a/examples/web_api.rs b/examples/web_api.rs index 5226249b35..6e6fea311a 100644 --- a/examples/web_api.rs +++ b/examples/web_api.rs @@ -40,6 +40,7 @@ async fn client_request_response(client: &Client) -> Result) -> Result> { // Aggregate the body... + #[allow(deprecated)] let whole_body = hyper::body::aggregate(req).await?; // Decode as JSON... let mut data: serde_json::Value = serde_json::from_reader(whole_body.reader())?; diff --git a/src/body/aggregate.rs b/src/body/aggregate.rs index 99662419d3..4bce1767ff 100644 --- a/src/body/aggregate.rs +++ b/src/body/aggregate.rs @@ -13,6 +13,13 @@ use crate::common::buf::BufList; /// Care needs to be taken if the remote is untrusted. The function doesn't implement any length /// checks and an malicious peer might make it consume arbitrary amounts of memory. Checking the /// `Content-Length` is a possibility, but it is not strictly mandated to be present. +#[cfg_attr( + feature = "deprecated", + deprecated( + note = "This function has been replaced by a method on the `hyper::body::HttpBody` trait. Use `.collect().await?.aggregate()` instead." + ) +)] +#[cfg_attr(feature = "deprecated", allow(deprecated))] pub async fn aggregate(body: T) -> Result where T: HttpBody, diff --git a/src/body/body.rs b/src/body/body.rs index 9dc1a034f9..7df87404f6 100644 --- a/src/body/body.rs +++ b/src/body/body.rs @@ -1,7 +1,12 @@ use std::borrow::Cow; +#[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] +use std::convert::Infallible; #[cfg(feature = "stream")] use std::error::Error as StdError; use std::fmt; +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll}; use bytes::Bytes; use futures_channel::mpsc; @@ -15,10 +20,7 @@ use http_body::{Body as HttpBody, SizeHint}; use super::DecodedLength; #[cfg(feature = "stream")] use crate::common::sync_wrapper::SyncWrapper; -use crate::common::Future; -#[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] -use crate::common::Never; -use crate::common::{task, watch, Pin, Poll}; +use crate::common::watch; #[cfg(all(feature = "http2", any(feature = "client", feature = "server")))] use crate::proto::h2::ping; @@ -30,8 +32,8 @@ type TrailersSender = oneshot::Sender; /// A good default [`HttpBody`](crate::body::HttpBody) to use in many /// applications. /// -/// Note: To read the full body, use [`body::to_bytes`](crate::body::to_bytes) -/// or [`body::aggregate`](crate::body::aggregate). +/// Note: To read the full body, use [`body::to_bytes`](crate::body::to_bytes()) +/// or [`body::aggregate`](crate::body::aggregate()). #[must_use = "streams do nothing unless polled"] pub struct Body { kind: Kind, @@ -77,7 +79,7 @@ struct Extra { } #[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] -type DelayEofUntil = oneshot::Receiver; +type DelayEofUntil = oneshot::Receiver; enum DelayEof { /// Initial state, stream hasn't seen EOF yet. @@ -239,7 +241,7 @@ impl Body { .get_or_insert_with(|| Box::new(Extra { delayed_eof: None })) } - fn poll_eof(&mut self, cx: &mut task::Context<'_>) -> Poll>> { + fn poll_eof(&mut self, cx: &mut Context<'_>) -> Poll>> { match self.take_delayed_eof() { #[cfg(any(feature = "http1", feature = "http2"))] #[cfg(feature = "client")] @@ -292,7 +294,7 @@ impl Body { } } - fn poll_inner(&mut self, cx: &mut task::Context<'_>) -> Poll>> { + fn poll_inner(&mut self, cx: &mut Context<'_>) -> Poll>> { match self.kind { Kind::Once(ref mut val) => Poll::Ready(val.take().map(Ok)), Kind::Chan { @@ -323,7 +325,12 @@ impl Body { ping.record_data(bytes.len()); Poll::Ready(Some(Ok(bytes))) } - Some(Err(e)) => Poll::Ready(Some(Err(crate::Error::new_body(e)))), + Some(Err(e)) => match e.reason() { + // These reasons should cause stop of body reading, but nor fail it. + // The same logic as for `AsyncRead for H2Upgraded` is applied here. + Some(h2::Reason::NO_ERROR) | Some(h2::Reason::CANCEL) => Poll::Ready(None), + _ => Poll::Ready(Some(Err(crate::Error::new_body(e)))), + }, None => Poll::Ready(None), }, @@ -362,14 +369,14 @@ impl HttpBody for Body { fn poll_data( mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, ) -> Poll>> { self.poll_eof(cx) } fn poll_trailers( #[cfg_attr(not(feature = "http2"), allow(unused_mut))] mut self: Pin<&mut Self>, - #[cfg_attr(not(feature = "http2"), allow(unused))] cx: &mut task::Context<'_>, + #[cfg_attr(not(feature = "http2"), allow(unused))] cx: &mut Context<'_>, ) -> Poll, Self::Error>> { match self.kind { #[cfg(all(feature = "http2", any(feature = "client", feature = "server")))] @@ -465,7 +472,7 @@ impl fmt::Debug for Body { impl Stream for Body { type Item = crate::Result; - fn poll_next(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { HttpBody::poll_data(self, cx) } } @@ -545,7 +552,7 @@ impl From> for Body { impl Sender { /// Check to see if this `Sender` can send more data. - pub fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { + pub fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { // Check if the receiver end has tried polling for the body yet ready!(self.poll_want(cx)?); self.data_tx @@ -553,7 +560,7 @@ impl Sender { .map_err(|_| crate::Error::new_closed()) } - fn poll_want(&mut self, cx: &mut task::Context<'_>) -> Poll> { + fn poll_want(&mut self, cx: &mut Context<'_>) -> Poll> { match self.want_rx.load(cx) { WANT_READY => Poll::Ready(Ok(())), WANT_PENDING => Poll::Pending, @@ -602,17 +609,16 @@ impl Sender { } /// Aborts the body in an abnormal fashion. - pub fn abort(self) { + pub fn abort(mut self) { + self.send_error(crate::Error::new_body_write_aborted()); + } + + pub(crate) fn send_error(&mut self, err: crate::Error) { let _ = self .data_tx // clone so the send works even if buffer is full .clone() - .try_send(Err(crate::Error::new_body_write_aborted())); - } - - #[cfg(feature = "http1")] - pub(crate) fn send_error(&mut self, err: crate::Error) { - let _ = self.data_tx.try_send(Err(err)); + .try_send(Err(err)); } } diff --git a/src/body/mod.rs b/src/body/mod.rs index 5e2181e941..109b1e6b72 100644 --- a/src/body/mod.rs +++ b/src/body/mod.rs @@ -19,9 +19,11 @@ pub use bytes::{Buf, Bytes}; pub use http_body::Body as HttpBody; pub use http_body::SizeHint; +#[cfg_attr(feature = "deprecated", allow(deprecated))] pub use self::aggregate::aggregate; pub use self::body::{Body, Sender}; pub(crate) use self::length::DecodedLength; +#[cfg_attr(feature = "deprecated", allow(deprecated))] pub use self::to_bytes::to_bytes; mod aggregate; diff --git a/src/body/to_bytes.rs b/src/body/to_bytes.rs index 62b15a54a9..2e398d250a 100644 --- a/src/body/to_bytes.rs +++ b/src/body/to_bytes.rs @@ -44,6 +44,13 @@ use super::HttpBody; /// # Ok(()) /// # } /// ``` +#[cfg_attr( + feature = "deprecated", + deprecated( + note = "This function has been replaced by a method on the `hyper::body::HttpBody` trait. Use `.collect().await?.to_bytes()` instead." + ) +)] +#[cfg_attr(feature = "deprecated", allow(deprecated))] pub async fn to_bytes(body: T) -> Result where T: HttpBody, @@ -63,8 +70,13 @@ where return Ok(first.copy_to_bytes(first.remaining())); }; + // Don't pre-emptively reserve *too* much. + let rest = (body.size_hint().lower() as usize).min(1024 * 16); + let cap = first + .remaining() + .saturating_add(second.remaining()) + .saturating_add(rest); // With more than 1 buf, we gotta flatten into a Vec first. - let cap = first.remaining() + second.remaining() + body.size_hint().lower() as usize; let mut vec = Vec::with_capacity(cap); vec.put(first); vec.put(second); diff --git a/src/client/client.rs b/src/client/client.rs index cfdd267a11..8195554bd7 100644 --- a/src/client/client.rs +++ b/src/client/client.rs @@ -1,6 +1,10 @@ use std::error::Error as StdError; use std::fmt; +use std::future::Future; +use std::marker::Unpin; use std::mem; +use std::pin::Pin; +use std::task::{Context, Poll}; use std::time::Duration; use futures_channel::oneshot; @@ -10,6 +14,13 @@ use http::uri::{Port, Scheme}; use http::{Method, Request, Response, Uri, Version}; use tracing::{debug, trace, warn}; +use crate::body::{Body, HttpBody}; +use crate::client::connect::CaptureConnectionExtension; +use crate::common::{exec::BoxSendFuture, lazy as hyper_lazy, sync_wrapper::SyncWrapper, Lazy}; +#[cfg(feature = "http2")] +use crate::ext::Protocol; +use crate::rt::Executor; + use super::conn; use super::connect::{self, sealed::Connect, Alpn, Connected, Connection}; use super::pool::{ @@ -17,9 +28,6 @@ use super::pool::{ }; #[cfg(feature = "tcp")] use super::HttpConnector; -use crate::body::{Body, HttpBody}; -use crate::common::{exec::BoxSendFuture, sync_wrapper::SyncWrapper, lazy as hyper_lazy, task, Future, Lazy, Pin, Poll}; -use crate::rt::Executor; /// A Client to make outgoing HTTP requests. /// @@ -28,6 +36,7 @@ use crate::rt::Executor; #[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] pub struct Client { config: Config, + #[cfg_attr(feature = "deprecated", allow(deprecated))] conn_builder: conn::Builder, connector: C, pool: Pool>, @@ -238,12 +247,15 @@ where }) } }; - + req.extensions_mut() + .get_mut::() + .map(|conn| conn.set(&pooled.conn_info)); if pooled.is_http1() { if req.version() == Version::HTTP_2 { warn!("Connection is HTTP/1, but request requires HTTP/2"); return Err(ClientError::Normal( - crate::Error::new_user_unsupported_version(), + crate::Error::new_user_unsupported_version() + .with_client_connect_info(pooled.conn_info.clone()), )); } @@ -270,21 +282,29 @@ where origin_form(req.uri_mut()); } } else if req.method() == Method::CONNECT { + #[cfg(not(feature = "http2"))] authority_form(req.uri_mut()); + + #[cfg(feature = "http2")] + if req.extensions().get::().is_none() { + authority_form(req.uri_mut()); + } } - let fut = pooled - .send_request_retryable(req) - .map_err(ClientError::map_with_reused(pooled.is_reused())); + let mut res = match pooled.send_request_retryable(req).await { + Err((err, orig_req)) => { + return Err(ClientError::map_with_reused(pooled.is_reused())(( + err.with_client_connect_info(pooled.conn_info.clone()), + orig_req, + ))); + } + Ok(res) => res, + }; // If the Connector included 'extra' info, add to Response... - let extra_info = pooled.conn_info.extra.clone(); - let fut = fut.map_ok(move |mut res| { - if let Some(extra) = extra_info { - extra.set(res.extensions_mut()); - } - res - }); + if let Some(extra) = &pooled.conn_info.extra { + extra.set(res.extensions_mut()); + } // As of futures@0.1.21, there is a race condition in the mpsc // channel, such that sending when the receiver is closing can @@ -294,11 +314,9 @@ where // To counteract this, we must check if our senders 'want' channel // has been closed after having tried to send. If so, error out... if pooled.is_closed() { - return fut.await; + return Ok(res); } - let mut res = fut.await?; - // If pooled is HTTP/2, we can toss this reference immediately. // // when pooled is dropped, it will try to insert back into the @@ -320,12 +338,14 @@ where drop(delayed_tx); }); + #[cfg_attr(feature = "deprecated", allow(deprecated))] self.conn_builder.exec.execute(on_idle); } else { // There's no body to delay, but the connection isn't // ready yet. Only re-insert when it's ready let on_idle = future::poll_fn(move |cx| pooled.poll_ready(cx)).map(|_| ()); + #[cfg_attr(feature = "deprecated", allow(deprecated))] self.conn_builder.exec.execute(on_idle); } @@ -379,6 +399,7 @@ where }); // An execute error here isn't important, we're just trying // to prevent a waste of a socket... + #[cfg_attr(feature = "deprecated", allow(deprecated))] self.conn_builder.exec.execute(bg); } Ok(checked_out) @@ -423,6 +444,7 @@ where &self, pool_key: PoolKey, ) -> impl Lazy>>> + Unpin { + #[cfg_attr(feature = "deprecated", allow(deprecated))] let executor = self.conn_builder.exec.clone(); let pool = self.pool.clone(); #[cfg(not(feature = "http2"))] @@ -532,7 +554,7 @@ where type Error = crate::Error; type Future = ResponseFuture; - fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll> { + fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } @@ -552,7 +574,7 @@ where type Error = crate::Error; type Future = ResponseFuture; - fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll> { + fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } @@ -586,7 +608,7 @@ impl ResponseFuture { F: Future>> + Send + 'static, { Self { - inner: SyncWrapper::new(Box::pin(value)) + inner: SyncWrapper::new(Box::pin(value)), } } @@ -607,7 +629,7 @@ impl fmt::Debug for ResponseFuture { impl Future for ResponseFuture { type Output = crate::Result>; - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { self.inner.get_mut().as_mut().poll(cx) } } @@ -622,13 +644,14 @@ struct PoolClient { } enum PoolTx { + #[cfg_attr(feature = "deprecated", allow(deprecated))] Http1(conn::SendRequest), #[cfg(feature = "http2")] Http2(conn::Http2SendRequest), } impl PoolClient { - fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { match self.tx { PoolTx::Http1(ref mut tx) => tx.poll_ready(cx), #[cfg(feature = "http2")] @@ -689,6 +712,13 @@ where B: Send + 'static, { fn is_open(&self) -> bool { + if self.conn_info.poisoned.poisoned() { + trace!( + "marking {:?} as closed because it was poisoned", + self.conn_info + ); + return false; + } match self.tx { PoolTx::Http1(ref tx) => tx.is_ready(), #[cfg(feature = "http2")] @@ -894,6 +924,7 @@ fn is_schema_secure(uri: &Uri) -> bool { #[derive(Clone)] pub struct Builder { client_config: Config, + #[cfg_attr(feature = "deprecated", allow(deprecated))] conn_builder: conn::Builder, pool_config: pool::Config, } @@ -906,6 +937,7 @@ impl Default for Builder { set_host: true, ver: Ver::Auto, }, + #[cfg_attr(feature = "deprecated", allow(deprecated))] conn_builder: conn::Builder::new(), pool_config: pool::Config { idle_timeout: Some(Duration::from_secs(90)), @@ -1063,6 +1095,36 @@ impl Builder { self } + /// Sets whether invalid header lines should be silently ignored in HTTP/1 responses. + /// + /// This mimicks the behaviour of major browsers. You probably don't want this. + /// You should only want this if you are implementing a proxy whose main + /// purpose is to sit in front of browsers whose users access arbitrary content + /// which may be malformed, and they expect everything that works without + /// the proxy to keep working with the proxy. + /// + /// This option will prevent Hyper's client from returning an error encountered + /// when parsing a header, except if the error was caused by the character NUL + /// (ASCII code 0), as Chrome specifically always reject those. + /// + /// The ignorable errors are: + /// * empty header names; + /// * characters that are not allowed in header names, except for `\0` and `\r`; + /// * when `allow_spaces_after_header_name_in_responses` is not enabled, + /// spaces and tabs between the header name and the colon; + /// * missing colon between header name and colon; + /// * characters that are not allowed in header values except for `\0` and `\r`. + /// + /// If an ignorable error is encountered, the parser tries to find the next + /// line in the input to resume parsing the rest of the headers. An error + /// will be emitted nonetheless if it finds `\0` or a lone `\r` while + /// looking for the next line. + pub fn http1_ignore_invalid_headers_in_responses(&mut self, val: bool) -> &mut Builder { + self.conn_builder + .http1_ignore_invalid_headers_in_responses(val); + self + } + /// Set whether HTTP/1 connections should try to use vectored writes, /// or always flatten into a single buffer. /// @@ -1337,6 +1399,7 @@ impl Builder { B: HttpBody + Send, B::Data: Send, { + #[cfg_attr(feature = "deprecated", allow(deprecated))] Client { config: self.client_config, conn_builder: self.conn_builder.clone(), diff --git a/src/client/conn.rs b/src/client/conn.rs index a44bc3ccaf..8da457da64 100644 --- a/src/client/conn.rs +++ b/src/client/conn.rs @@ -54,11 +54,22 @@ //! # } //! ``` +#[cfg(all(feature = "backports", feature = "http1"))] +pub mod http1; +#[cfg(all(feature = "backports", feature = "http2"))] +pub mod http2; + +#[cfg(not(all(feature = "http1", feature = "http2")))] +use std::convert::Infallible; use std::error::Error as StdError; use std::fmt; +use std::future::Future; #[cfg(not(all(feature = "http1", feature = "http2")))] use std::marker::PhantomData; +use std::marker::Unpin; +use std::pin::Pin; use std::sync::Arc; +use std::task::{Context, Poll}; #[cfg(all(feature = "runtime", feature = "http2"))] use std::time::Duration; @@ -72,12 +83,7 @@ use tracing::{debug, trace}; use super::dispatch; use crate::body::HttpBody; -#[cfg(not(all(feature = "http1", feature = "http2")))] -use crate::common::Never; -use crate::common::{ - exec::{BoxSendFuture, Exec}, - task, Future, Pin, Poll, -}; +use crate::common::exec::{BoxSendFuture, Exec}; use crate::proto; use crate::rt::Executor; #[cfg(feature = "http1")] @@ -89,13 +95,13 @@ type Http1Dispatcher = proto::dispatch::Dispatcher, B, T, proto::h1::ClientTransaction>; #[cfg(not(feature = "http1"))] -type Http1Dispatcher = (Never, PhantomData<(T, Pin>)>); +type Http1Dispatcher = (Infallible, PhantomData<(T, Pin>)>); #[cfg(feature = "http2")] type Http2ClientTask = proto::h2::ClientTask; #[cfg(not(feature = "http2"))] -type Http2ClientTask = (Never, PhantomData>>); +type Http2ClientTask = (Infallible, PhantomData>>); pin_project! { #[project = ProtoClientProj] @@ -118,16 +124,30 @@ pin_project! { /// /// This is a shortcut for `Builder::new().handshake(io)`. /// See [`client::conn`](crate::client::conn) for more. +#[cfg_attr( + feature = "deprecated", + deprecated( + note = "This function will be replaced with `client::conn::http1::handshake` and `client::conn::http2::handshake` in 1.0, enable the \"backports\" feature to use them now." + ) +)] +#[cfg_attr(feature = "deprecated", allow(deprecated))] pub async fn handshake( io: T, ) -> crate::Result<(SendRequest, Connection)> where T: AsyncRead + AsyncWrite + Unpin + Send + 'static, { + #[allow(deprecated)] Builder::new().handshake(io).await } /// The sender side of an established connection. +#[cfg_attr( + feature = "deprecated", + deprecated( + note = "This type will be replaced with `client::conn::http1::SendRequest` and `client::conn::http2::SendRequest` in 1.0, enable the \"backports\" feature to use them now." + ) +)] pub struct SendRequest { dispatch: dispatch::Sender, Response>, } @@ -137,6 +157,12 @@ pub struct SendRequest { /// In most cases, this should just be spawned into an executor, so that it /// can process incoming and outgoing messages, notice hangups, and the like. #[must_use = "futures do nothing unless polled"] +#[cfg_attr( + feature = "deprecated", + deprecated( + note = "This type will be replaced with `client::conn::http1::Connection` and `client::conn::http2::Connection` in 1.0, enable the \"backports\" feature to use them now." + ) +)] pub struct Connection where T: AsyncRead + AsyncWrite + Send + 'static, @@ -149,6 +175,12 @@ where /// /// After setting options, the builder is used to create a handshake future. #[derive(Clone, Debug)] +#[cfg_attr( + feature = "deprecated", + deprecated( + note = "This type will be replaced with `client::conn::http1::Builder` and `client::conn::http2::Builder` in 1.0, enable the \"backports\" feature to use them now." + ) +)] pub struct Builder { pub(super) exec: Exec, h09_responses: bool, @@ -221,11 +253,12 @@ pub(super) struct Http2SendRequest { // ===== impl SendRequest +#[cfg_attr(feature = "deprecated", allow(deprecated))] impl SendRequest { /// Polls to determine whether this sender can be used yet for a request. /// /// If the associated connection is closed, this returns an Error. - pub fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { + pub fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { self.dispatch.poll_ready(cx) } @@ -254,6 +287,7 @@ impl SendRequest { } } +#[cfg_attr(feature = "deprecated", allow(deprecated))] impl SendRequest where B: HttpBody + 'static, @@ -339,6 +373,7 @@ where } } +#[cfg_attr(feature = "deprecated", allow(deprecated))] impl Service> for SendRequest where B: HttpBody + 'static, @@ -347,7 +382,7 @@ where type Error = crate::Error; type Future = ResponseFuture; - fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { self.poll_ready(cx) } @@ -356,6 +391,7 @@ where } } +#[cfg_attr(feature = "deprecated", allow(deprecated))] impl fmt::Debug for SendRequest { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("SendRequest").finish() @@ -425,6 +461,7 @@ impl Clone for Http2SendRequest { // ===== impl Connection +#[cfg_attr(feature = "deprecated", allow(deprecated))] impl Connection where T: AsyncRead + AsyncWrite + Unpin + Send + 'static, @@ -466,7 +503,7 @@ where /// Use [`poll_fn`](https://docs.rs/futures/0.1.25/futures/future/fn.poll_fn.html) /// and [`try_ready!`](https://docs.rs/futures/0.1.25/futures/macro.try_ready.html) /// to work with this function; or use the `without_shutdown` wrapper. - pub fn poll_without_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll> { + pub fn poll_without_shutdown(&mut self, cx: &mut Context<'_>) -> Poll> { match *self.inner.as_mut().expect("already upgraded") { #[cfg(feature = "http1")] ProtoClient::H1 { ref mut h1 } => h1.poll_without_shutdown(cx), @@ -508,16 +545,17 @@ where } } +#[cfg_attr(feature = "deprecated", allow(deprecated))] impl Future for Connection where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static, + T: AsyncRead + AsyncWrite + Unpin + Send, B: HttpBody + Send + 'static, B::Data: Send, B::Error: Into>, { type Output = crate::Result<()>; - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { match ready!(Pin::new(self.inner.as_mut().unwrap()).poll(cx))? { proto::Dispatched::Shutdown => Poll::Ready(Ok(())), #[cfg(feature = "http1")] @@ -536,6 +574,7 @@ where } } +#[cfg_attr(feature = "deprecated", allow(deprecated))] impl fmt::Debug for Connection where T: AsyncRead + AsyncWrite + fmt::Debug + Send + 'static, @@ -548,6 +587,7 @@ where // ===== impl Builder +#[cfg_attr(feature = "deprecated", allow(deprecated))] impl Builder { /// Creates a new connection builder. #[inline] @@ -662,6 +702,21 @@ impl Builder { self } + /// Set whether HTTP/1 connections will silently ignored malformed header lines. + /// + /// If this is enabled and and a header line does not start with a valid header + /// name, or does not include a colon at all, the line will be silently ignored + /// and no error will be reported. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + pub fn http1_ignore_invalid_headers_in_responses(&mut self, enabled: bool) -> &mut Builder { + self.h1_parser_config + .ignore_invalid_headers_in_responses(enabled); + self + } + /// Set whether HTTP/1 connections should try to use vectored writes, /// or always flatten into a single buffer. /// @@ -1013,7 +1068,7 @@ impl Builder { impl Future for ResponseFuture { type Output = crate::Result>; - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { match self.inner { ResponseFutureState::Waiting(ref mut rx) => { Pin::new(rx).poll(cx).map(|res| match res { @@ -1047,7 +1102,7 @@ where { type Output = crate::Result; - fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { match self.project() { #[cfg(feature = "http1")] ProtoClientProj::H1 { h1 } => h1.poll(cx), @@ -1067,9 +1122,11 @@ where trait AssertSend: Send {} trait AssertSendSync: Send + Sync {} +#[cfg_attr(feature = "deprecated", allow(deprecated))] #[doc(hidden)] impl AssertSendSync for SendRequest {} +#[cfg_attr(feature = "deprecated", allow(deprecated))] #[doc(hidden)] impl AssertSend for Connection where @@ -1079,6 +1136,7 @@ where { } +#[cfg_attr(feature = "deprecated", allow(deprecated))] #[doc(hidden)] impl AssertSendSync for Connection where @@ -1088,6 +1146,7 @@ where { } +#[cfg_attr(feature = "deprecated", allow(deprecated))] #[doc(hidden)] impl AssertSendSync for Builder {} diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs new file mode 100644 index 0000000000..37eda04067 --- /dev/null +++ b/src/client/conn/http1.rs @@ -0,0 +1,541 @@ +//! HTTP/1 client connections + +use std::error::Error as StdError; +use std::fmt; +use std::future::Future; +use std::marker::Unpin; +use std::pin::Pin; +use std::task::{Context, Poll}; + +use bytes::Bytes; +use http::{Request, Response}; +use httparse::ParserConfig; +use tokio::io::{AsyncRead, AsyncWrite}; + +use super::super::dispatch; +use crate::body::{Body as IncomingBody, HttpBody as Body}; +use crate::proto; +use crate::upgrade::Upgraded; + +type Dispatcher = + proto::dispatch::Dispatcher, B, T, proto::h1::ClientTransaction>; + +/// The sender side of an established connection. +pub struct SendRequest { + dispatch: dispatch::Sender, Response>, +} + +/// Deconstructed parts of a `Connection`. +/// +/// This allows taking apart a `Connection` at a later time, in order to +/// reclaim the IO object, and additional related pieces. +#[derive(Debug)] +pub struct Parts { + /// The original IO object used in the handshake. + pub io: T, + /// A buffer of bytes that have been read but not processed as HTTP. + /// + /// For instance, if the `Connection` is used for an HTTP upgrade request, + /// it is possible the server sent back the first bytes of the new protocol + /// along with the response upgrade. + /// + /// You will want to check for any existing bytes if you plan to continue + /// communicating on the IO object. + pub read_buf: Bytes, + _inner: (), +} + +/// A future that processes all HTTP state for the IO object. +/// +/// In most cases, this should just be spawned into an executor, so that it +/// can process incoming and outgoing messages, notice hangups, and the like. +#[must_use = "futures do nothing unless polled"] +pub struct Connection +where + T: AsyncRead + AsyncWrite + Send + 'static, + B: Body + 'static, +{ + inner: Option>, +} + +impl Connection +where + T: AsyncRead + AsyncWrite + Send + Unpin + 'static, + B: Body + 'static, + B::Error: Into>, +{ + /// Return the inner IO object, and additional information. + /// + /// Only works for HTTP/1 connections. HTTP/2 connections will panic. + pub fn into_parts(self) -> Parts { + let (io, read_buf, _) = self.inner.expect("already upgraded").into_inner(); + Parts { + io, + read_buf, + _inner: (), + } + } + + /// Poll the connection for completion, but without calling `shutdown` + /// on the underlying IO. + /// + /// This is useful to allow running a connection while doing an HTTP + /// upgrade. Once the upgrade is completed, the connection would be "done", + /// but it is not desired to actually shutdown the IO object. Instead you + /// would take it back using `into_parts`. + /// + /// Use [`poll_fn`](https://docs.rs/futures/0.1.25/futures/future/fn.poll_fn.html) + /// and [`try_ready!`](https://docs.rs/futures/0.1.25/futures/macro.try_ready.html) + /// to work with this function; or use the `without_shutdown` wrapper. + pub fn poll_without_shutdown(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner + .as_mut() + .expect("algready upgraded") + .poll_without_shutdown(cx) + } + + /// Prevent shutdown of the underlying IO object at the end of service the request, + /// instead run `into_parts`. This is a convenience wrapper over `poll_without_shutdown`. + pub fn without_shutdown(self) -> impl Future>> { + let mut conn = Some(self); + futures_util::future::poll_fn(move |cx| -> Poll>> { + ready!(conn.as_mut().unwrap().poll_without_shutdown(cx))?; + Poll::Ready(Ok(conn.take().unwrap().into_parts())) + }) + } +} + +/// A builder to configure an HTTP connection. +/// +/// After setting options, the builder is used to create a handshake future. +#[derive(Clone, Debug)] +pub struct Builder { + h09_responses: bool, + h1_parser_config: ParserConfig, + h1_writev: Option, + h1_title_case_headers: bool, + h1_preserve_header_case: bool, + #[cfg(feature = "ffi")] + h1_preserve_header_order: bool, + h1_read_buf_exact_size: Option, + h1_max_buf_size: Option, +} + +/// Returns a handshake future over some IO. +/// +/// This is a shortcut for `Builder::new().handshake(io)`. +/// See [`client::conn`](crate::client::conn) for more. +pub async fn handshake(io: T) -> crate::Result<(SendRequest, Connection)> +where + T: AsyncRead + AsyncWrite + Unpin + Send + 'static, + B: Body + 'static, + B::Data: Send, + B::Error: Into>, +{ + Builder::new().handshake(io).await +} + +// ===== impl SendRequest + +impl SendRequest { + /// Polls to determine whether this sender can be used yet for a request. + /// + /// If the associated connection is closed, this returns an Error. + pub fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.dispatch.poll_ready(cx) + } + + /// Waits until the dispatcher is ready + /// + /// If the associated connection is closed, this returns an Error. + pub async fn ready(&mut self) -> crate::Result<()> { + futures_util::future::poll_fn(|cx| self.poll_ready(cx)).await + } + + /* + pub(super) async fn when_ready(self) -> crate::Result { + let mut me = Some(self); + future::poll_fn(move |cx| { + ready!(me.as_mut().unwrap().poll_ready(cx))?; + Poll::Ready(Ok(me.take().unwrap())) + }) + .await + } + + pub(super) fn is_ready(&self) -> bool { + self.dispatch.is_ready() + } + + pub(super) fn is_closed(&self) -> bool { + self.dispatch.is_closed() + } + */ +} + +impl SendRequest +where + B: Body + 'static, +{ + /// Sends a `Request` on the associated connection. + /// + /// Returns a future that if successful, yields the `Response`. + /// + /// # Note + /// + /// There are some key differences in what automatic things the `Client` + /// does for you that will not be done here: + /// + /// - `Client` requires absolute-form `Uri`s, since the scheme and + /// authority are needed to connect. They aren't required here. + /// - Since the `Client` requires absolute-form `Uri`s, it can add + /// the `Host` header based on it. You must add a `Host` header yourself + /// before calling this method. + /// - Since absolute-form `Uri`s are not required, if received, they will + /// be serialized as-is. + pub fn send_request( + &mut self, + req: Request, + ) -> impl Future>> { + let sent = self.dispatch.send(req); + + async move { + match sent { + Ok(rx) => match rx.await { + Ok(Ok(resp)) => Ok(resp), + Ok(Err(err)) => Err(err), + // this is definite bug if it happens, but it shouldn't happen! + Err(_canceled) => panic!("dispatch dropped without returning error"), + }, + Err(_req) => { + tracing::debug!("connection was not ready"); + + Err(crate::Error::new_canceled().with("connection was not ready")) + } + } + } + } + + /* + pub(super) fn send_request_retryable( + &mut self, + req: Request, + ) -> impl Future, (crate::Error, Option>)>> + Unpin + where + B: Send, + { + match self.dispatch.try_send(req) { + Ok(rx) => { + Either::Left(rx.then(move |res| { + match res { + Ok(Ok(res)) => future::ok(res), + Ok(Err(err)) => future::err(err), + // this is definite bug if it happens, but it shouldn't happen! + Err(_) => panic!("dispatch dropped without returning error"), + } + })) + } + Err(req) => { + tracing::debug!("connection was not ready"); + let err = crate::Error::new_canceled().with("connection was not ready"); + Either::Right(future::err((err, Some(req)))) + } + } + } + */ +} + +impl fmt::Debug for SendRequest { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SendRequest").finish() + } +} + +// ===== impl Connection + +impl fmt::Debug for Connection +where + T: AsyncRead + AsyncWrite + fmt::Debug + Send + 'static, + B: Body + 'static, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Connection").finish() + } +} + +impl Future for Connection +where + T: AsyncRead + AsyncWrite + Unpin + Send + 'static, + B: Body + Send + 'static, + B::Data: Send, + B::Error: Into>, +{ + type Output = crate::Result<()>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match ready!(Pin::new(self.inner.as_mut().unwrap()).poll(cx))? { + proto::Dispatched::Shutdown => Poll::Ready(Ok(())), + proto::Dispatched::Upgrade(pending) => match self.inner.take() { + Some(h1) => { + let (io, buf, _) = h1.into_inner(); + pending.fulfill(Upgraded::new(io, buf)); + Poll::Ready(Ok(())) + } + _ => { + drop(pending); + unreachable!("Upgraded twice"); + } + }, + } + } +} + +// ===== impl Builder + +impl Builder { + /// Creates a new connection builder. + #[inline] + pub fn new() -> Builder { + Builder { + h09_responses: false, + h1_writev: None, + h1_read_buf_exact_size: None, + h1_parser_config: Default::default(), + h1_title_case_headers: false, + h1_preserve_header_case: false, + #[cfg(feature = "ffi")] + h1_preserve_header_order: false, + h1_max_buf_size: None, + } + } + + /// Set whether HTTP/0.9 responses should be tolerated. + /// + /// Default is false. + pub fn http09_responses(&mut self, enabled: bool) -> &mut Builder { + self.h09_responses = enabled; + self + } + + /// Set whether HTTP/1 connections will accept spaces between header names + /// and the colon that follow them in responses. + /// + /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has + /// to say about it: + /// + /// > No whitespace is allowed between the header field-name and colon. In + /// > the past, differences in the handling of such whitespace have led to + /// > security vulnerabilities in request routing and response handling. A + /// > server MUST reject any received request message that contains + /// > whitespace between a header field-name and colon with a response code + /// > of 400 (Bad Request). A proxy MUST remove any such whitespace from a + /// > response message before forwarding the message downstream. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + /// + /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4 + pub fn allow_spaces_after_header_name_in_responses(&mut self, enabled: bool) -> &mut Builder { + self.h1_parser_config + .allow_spaces_after_header_name_in_responses(enabled); + self + } + + /// Set whether HTTP/1 connections will accept obsolete line folding for + /// header values. + /// + /// Newline codepoints (`\r` and `\n`) will be transformed to spaces when + /// parsing. + /// + /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has + /// to say about it: + /// + /// > A server that receives an obs-fold in a request message that is not + /// > within a message/http container MUST either reject the message by + /// > sending a 400 (Bad Request), preferably with a representation + /// > explaining that obsolete line folding is unacceptable, or replace + /// > each received obs-fold with one or more SP octets prior to + /// > interpreting the field value or forwarding the message downstream. + /// + /// > A proxy or gateway that receives an obs-fold in a response message + /// > that is not within a message/http container MUST either discard the + /// > message and replace it with a 502 (Bad Gateway) response, preferably + /// > with a representation explaining that unacceptable line folding was + /// > received, or replace each received obs-fold with one or more SP + /// > octets prior to interpreting the field value or forwarding the + /// > message downstream. + /// + /// > A user agent that receives an obs-fold in a response message that is + /// > not within a message/http container MUST replace each received + /// > obs-fold with one or more SP octets prior to interpreting the field + /// > value. + /// + /// Default is false. + /// + /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4 + pub fn allow_obsolete_multiline_headers_in_responses(&mut self, enabled: bool) -> &mut Builder { + self.h1_parser_config + .allow_obsolete_multiline_headers_in_responses(enabled); + self + } + + /// Set whether HTTP/1 connections will silently ignored malformed header lines. + /// + /// If this is enabled and and a header line does not start with a valid header + /// name, or does not include a colon at all, the line will be silently ignored + /// and no error will be reported. + /// + /// Default is false. + pub fn ignore_invalid_headers_in_responses(&mut self, enabled: bool) -> &mut Builder { + self.h1_parser_config + .ignore_invalid_headers_in_responses(enabled); + self + } + + /// Set whether HTTP/1 connections should try to use vectored writes, + /// or always flatten into a single buffer. + /// + /// Note that setting this to false may mean more copies of body data, + /// but may also improve performance when an IO transport doesn't + /// support vectored writes well, such as most TLS implementations. + /// + /// Setting this to true will force hyper to use queued strategy + /// which may eliminate unnecessary cloning on some TLS backends + /// + /// Default is `auto`. In this mode hyper will try to guess which + /// mode to use + pub fn writev(&mut self, enabled: bool) -> &mut Builder { + self.h1_writev = Some(enabled); + self + } + + /// Set whether HTTP/1 connections will write header names as title case at + /// the socket level. + /// + /// Default is false. + pub fn title_case_headers(&mut self, enabled: bool) -> &mut Builder { + self.h1_title_case_headers = enabled; + self + } + + /// Set whether to support preserving original header cases. + /// + /// Currently, this will record the original cases received, and store them + /// in a private extension on the `Response`. It will also look for and use + /// such an extension in any provided `Request`. + /// + /// Since the relevant extension is still private, there is no way to + /// interact with the original cases. The only effect this can have now is + /// to forward the cases in a proxy-like fashion. + /// + /// Default is false. + pub fn preserve_header_case(&mut self, enabled: bool) -> &mut Builder { + self.h1_preserve_header_case = enabled; + self + } + + /// Set whether to support preserving original header order. + /// + /// Currently, this will record the order in which headers are received, and store this + /// ordering in a private extension on the `Response`. It will also look for and use + /// such an extension in any provided `Request`. + /// + /// Default is false. + #[cfg(feature = "ffi")] + pub fn preserve_header_order(&mut self, enabled: bool) -> &mut Builder { + self.h1_preserve_header_order = enabled; + self + } + + /// Sets the exact size of the read buffer to *always* use. + /// + /// Note that setting this option unsets the `max_buf_size` option. + /// + /// Default is an adaptive read buffer. + pub fn read_buf_exact_size(&mut self, sz: Option) -> &mut Builder { + self.h1_read_buf_exact_size = sz; + self.h1_max_buf_size = None; + self + } + + /// Set the maximum buffer size for the connection. + /// + /// Default is ~400kb. + /// + /// Note that setting this option unsets the `read_exact_buf_size` option. + /// + /// # Panics + /// + /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum. + pub fn max_buf_size(&mut self, max: usize) -> &mut Self { + assert!( + max >= proto::h1::MINIMUM_MAX_BUFFER_SIZE, + "the max_buf_size cannot be smaller than the minimum that h1 specifies." + ); + + self.h1_max_buf_size = Some(max); + self.h1_read_buf_exact_size = None; + self + } + + /// Constructs a connection with the configured options and IO. + /// See [`client::conn`](crate::client::conn) for more. + /// + /// Note, if [`Connection`] is not `await`-ed, [`SendRequest`] will + /// do nothing. + pub fn handshake( + &self, + io: T, + ) -> impl Future, Connection)>> + where + T: AsyncRead + AsyncWrite + Unpin + Send + 'static, + B: Body + 'static, + B::Data: Send, + B::Error: Into>, + { + let opts = self.clone(); + + async move { + tracing::trace!("client handshake HTTP/1"); + + let (tx, rx) = dispatch::channel(); + let mut conn = proto::Conn::new(io); + conn.set_h1_parser_config(opts.h1_parser_config); + if let Some(writev) = opts.h1_writev { + if writev { + conn.set_write_strategy_queue(); + } else { + conn.set_write_strategy_flatten(); + } + } + if opts.h1_title_case_headers { + conn.set_title_case_headers(); + } + if opts.h1_preserve_header_case { + conn.set_preserve_header_case(); + } + #[cfg(feature = "ffi")] + if opts.h1_preserve_header_order { + conn.set_preserve_header_order(); + } + + if opts.h09_responses { + conn.set_h09_responses(); + } + + if let Some(sz) = opts.h1_read_buf_exact_size { + conn.set_read_buf_exact_size(sz); + } + if let Some(max) = opts.h1_max_buf_size { + conn.set_max_buf_size(max); + } + let cd = proto::h1::dispatch::Client::new(rx); + let proto = proto::h1::Dispatcher::new(cd, conn); + + Ok(( + SendRequest { dispatch: tx }, + Connection { inner: Some(proto) }, + )) + } + } +} diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs new file mode 100644 index 0000000000..5697e9ee47 --- /dev/null +++ b/src/client/conn/http2.rs @@ -0,0 +1,420 @@ +//! HTTP/2 client connections + +use std::error::Error as StdError; +use std::fmt; +use std::future::Future; +use std::marker::PhantomData; +use std::marker::Unpin; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context, Poll}; +use std::time::Duration; + +use http::{Request, Response}; +use tokio::io::{AsyncRead, AsyncWrite}; + +use super::super::dispatch; +use crate::body::{Body as IncomingBody, HttpBody as Body}; +use crate::common::exec::{BoxSendFuture, Exec}; +use crate::proto; +use crate::rt::Executor; + +/// The sender side of an established connection. +pub struct SendRequest { + dispatch: dispatch::UnboundedSender, Response>, +} + +impl Clone for SendRequest { + fn clone(&self) -> SendRequest { + SendRequest { + dispatch: self.dispatch.clone(), + } + } +} + +/// A future that processes all HTTP state for the IO object. +/// +/// In most cases, this should just be spawned into an executor, so that it +/// can process incoming and outgoing messages, notice hangups, and the like. +#[must_use = "futures do nothing unless polled"] +pub struct Connection +where + T: AsyncRead + AsyncWrite + Send + 'static, + B: Body + 'static, +{ + inner: (PhantomData, proto::h2::ClientTask), +} + +/// A builder to configure an HTTP connection. +/// +/// After setting options, the builder is used to create a handshake future. +#[derive(Clone, Debug)] +pub struct Builder { + pub(super) exec: Exec, + h2_builder: proto::h2::client::Config, +} + +/// Returns a handshake future over some IO. +/// +/// This is a shortcut for `Builder::new().handshake(io)`. +/// See [`client::conn`](crate::client::conn) for more. +pub async fn handshake(exec: E, io: T) -> crate::Result<(SendRequest, Connection)> +where + E: Executor + Send + Sync + 'static, + T: AsyncRead + AsyncWrite + Unpin + Send + 'static, + B: Body + 'static, + B::Data: Send, + B::Error: Into>, +{ + Builder::new(exec).handshake(io).await +} + +// ===== impl SendRequest + +impl SendRequest { + /// Polls to determine whether this sender can be used yet for a request. + /// + /// If the associated connection is closed, this returns an Error. + pub fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + if self.is_closed() { + Poll::Ready(Err(crate::Error::new_closed())) + } else { + Poll::Ready(Ok(())) + } + } + + /// Waits until the dispatcher is ready + /// + /// If the associated connection is closed, this returns an Error. + pub async fn ready(&mut self) -> crate::Result<()> { + futures_util::future::poll_fn(|cx| self.poll_ready(cx)).await + } + + /* + pub(super) async fn when_ready(self) -> crate::Result { + let mut me = Some(self); + future::poll_fn(move |cx| { + ready!(me.as_mut().unwrap().poll_ready(cx))?; + Poll::Ready(Ok(me.take().unwrap())) + }) + .await + } + + pub(super) fn is_ready(&self) -> bool { + self.dispatch.is_ready() + } + */ + + pub(super) fn is_closed(&self) -> bool { + self.dispatch.is_closed() + } +} + +impl SendRequest +where + B: Body + 'static, +{ + /// Sends a `Request` on the associated connection. + /// + /// Returns a future that if successful, yields the `Response`. + /// + /// # Note + /// + /// There are some key differences in what automatic things the `Client` + /// does for you that will not be done here: + /// + /// - `Client` requires absolute-form `Uri`s, since the scheme and + /// authority are needed to connect. They aren't required here. + /// - Since the `Client` requires absolute-form `Uri`s, it can add + /// the `Host` header based on it. You must add a `Host` header yourself + /// before calling this method. + /// - Since absolute-form `Uri`s are not required, if received, they will + /// be serialized as-is. + pub fn send_request( + &mut self, + req: Request, + ) -> impl Future>> { + let sent = self.dispatch.send(req); + + async move { + match sent { + Ok(rx) => match rx.await { + Ok(Ok(resp)) => Ok(resp), + Ok(Err(err)) => Err(err), + // this is definite bug if it happens, but it shouldn't happen! + Err(_canceled) => panic!("dispatch dropped without returning error"), + }, + Err(_req) => { + tracing::debug!("connection was not ready"); + + Err(crate::Error::new_canceled().with("connection was not ready")) + } + } + } + } + + /* + pub(super) fn send_request_retryable( + &mut self, + req: Request, + ) -> impl Future, (crate::Error, Option>)>> + Unpin + where + B: Send, + { + match self.dispatch.try_send(req) { + Ok(rx) => { + Either::Left(rx.then(move |res| { + match res { + Ok(Ok(res)) => future::ok(res), + Ok(Err(err)) => future::err(err), + // this is definite bug if it happens, but it shouldn't happen! + Err(_) => panic!("dispatch dropped without returning error"), + } + })) + } + Err(req) => { + tracing::debug!("connection was not ready"); + let err = crate::Error::new_canceled().with("connection was not ready"); + Either::Right(future::err((err, Some(req)))) + } + } + } + */ +} + +impl fmt::Debug for SendRequest { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SendRequest").finish() + } +} + +// ===== impl Connection + +impl Connection +where + T: AsyncRead + AsyncWrite + Unpin + Send + 'static, + B: Body + Unpin + Send + 'static, + B::Data: Send, + B::Error: Into>, +{ + /// Returns whether the [extended CONNECT protocol][1] is enabled or not. + /// + /// This setting is configured by the server peer by sending the + /// [`SETTINGS_ENABLE_CONNECT_PROTOCOL` parameter][2] in a `SETTINGS` frame. + /// This method returns the currently acknowledged value received from the + /// remote. + /// + /// [1]: https://datatracker.ietf.org/doc/html/rfc8441#section-4 + /// [2]: https://datatracker.ietf.org/doc/html/rfc8441#section-3 + pub fn is_extended_connect_protocol_enabled(&self) -> bool { + self.inner.1.is_extended_connect_protocol_enabled() + } +} + +impl fmt::Debug for Connection +where + T: AsyncRead + AsyncWrite + fmt::Debug + Send + 'static, + B: Body + 'static, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Connection").finish() + } +} + +impl Future for Connection +where + T: AsyncRead + AsyncWrite + Unpin + Send + 'static, + B: Body + Send + 'static, + B::Data: Send, + B::Error: Into>, +{ + type Output = crate::Result<()>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match ready!(Pin::new(&mut self.inner.1).poll(cx))? { + proto::Dispatched::Shutdown => Poll::Ready(Ok(())), + #[cfg(feature = "http1")] + proto::Dispatched::Upgrade(_pending) => unreachable!("http2 cannot upgrade"), + } + } +} + +// ===== impl Builder + +impl Builder { + /// Creates a new connection builder. + #[inline] + pub fn new(exec: E) -> Builder + where + E: Executor + Send + Sync + 'static, + { + use std::sync::Arc; + Builder { + exec: Exec::Executor(Arc::new(exec)), + h2_builder: Default::default(), + } + } + + /// Provide an executor to execute background HTTP2 tasks. + pub fn executor(&mut self, exec: E) -> &mut Builder + where + E: Executor + Send + Sync + 'static, + { + self.exec = Exec::Executor(Arc::new(exec)); + self + } + + /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 + /// stream-level flow control. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + /// + /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE + pub fn initial_stream_window_size(&mut self, sz: impl Into>) -> &mut Self { + if let Some(sz) = sz.into() { + self.h2_builder.adaptive_window = false; + self.h2_builder.initial_stream_window_size = sz; + } + self + } + + /// Sets the max connection-level flow control for HTTP2 + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + pub fn initial_connection_window_size(&mut self, sz: impl Into>) -> &mut Self { + if let Some(sz) = sz.into() { + self.h2_builder.adaptive_window = false; + self.h2_builder.initial_conn_window_size = sz; + } + self + } + + /// Sets whether to use an adaptive flow control. + /// + /// Enabling this will override the limits set in + /// `initial_stream_window_size` and + /// `initial_connection_window_size`. + pub fn adaptive_window(&mut self, enabled: bool) -> &mut Self { + use proto::h2::SPEC_WINDOW_SIZE; + + self.h2_builder.adaptive_window = enabled; + if enabled { + self.h2_builder.initial_conn_window_size = SPEC_WINDOW_SIZE; + self.h2_builder.initial_stream_window_size = SPEC_WINDOW_SIZE; + } + self + } + + /// Sets the maximum frame size to use for HTTP2. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + pub fn max_frame_size(&mut self, sz: impl Into>) -> &mut Self { + if let Some(sz) = sz.into() { + self.h2_builder.max_frame_size = sz; + } + self + } + + /// Sets an interval for HTTP2 Ping frames should be sent to keep a + /// connection alive. + /// + /// Pass `None` to disable HTTP2 keep-alive. + /// + /// Default is currently disabled. + #[cfg(feature = "runtime")] + pub fn keep_alive_interval(&mut self, interval: impl Into>) -> &mut Self { + self.h2_builder.keep_alive_interval = interval.into(); + self + } + + /// Sets a timeout for receiving an acknowledgement of the keep-alive ping. + /// + /// If the ping is not acknowledged within the timeout, the connection will + /// be closed. Does nothing if `keep_alive_interval` is disabled. + /// + /// Default is 20 seconds. + #[cfg(feature = "runtime")] + pub fn keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self { + self.h2_builder.keep_alive_timeout = timeout; + self + } + + /// Sets whether HTTP2 keep-alive should apply while the connection is idle. + /// + /// If disabled, keep-alive pings are only sent while there are open + /// request/responses streams. If enabled, pings are also sent when no + /// streams are active. Does nothing if `keep_alive_interval` is + /// disabled. + /// + /// Default is `false`. + #[cfg(feature = "runtime")] + pub fn keep_alive_while_idle(&mut self, enabled: bool) -> &mut Self { + self.h2_builder.keep_alive_while_idle = enabled; + self + } + + /// Sets the maximum number of HTTP2 concurrent locally reset streams. + /// + /// See the documentation of [`h2::client::Builder::max_concurrent_reset_streams`] for more + /// details. + /// + /// The default value is determined by the `h2` crate. + /// + /// [`h2::client::Builder::max_concurrent_reset_streams`]: https://docs.rs/h2/client/struct.Builder.html#method.max_concurrent_reset_streams + pub fn max_concurrent_reset_streams(&mut self, max: usize) -> &mut Self { + self.h2_builder.max_concurrent_reset_streams = Some(max); + self + } + + /// Set the maximum write buffer size for each HTTP/2 stream. + /// + /// Default is currently 1MB, but may change. + /// + /// # Panics + /// + /// The value must be no larger than `u32::MAX`. + pub fn max_send_buf_size(&mut self, max: usize) -> &mut Self { + assert!(max <= std::u32::MAX as usize); + self.h2_builder.max_send_buffer_size = max; + self + } + + /// Constructs a connection with the configured options and IO. + /// See [`client::conn`](crate::client::conn) for more. + /// + /// Note, if [`Connection`] is not `await`-ed, [`SendRequest`] will + /// do nothing. + pub fn handshake( + &self, + io: T, + ) -> impl Future, Connection)>> + where + T: AsyncRead + AsyncWrite + Unpin + Send + 'static, + B: Body + 'static, + B::Data: Send, + B::Error: Into>, + { + let opts = self.clone(); + + async move { + tracing::trace!("client handshake HTTP/1"); + + let (tx, rx) = dispatch::channel(); + let h2 = proto::h2::client::handshake(io, rx, &opts.h2_builder, opts.exec).await?; + Ok(( + SendRequest { + dispatch: tx.unbound(), + }, + Connection { + inner: (PhantomData, h2), + }, + )) + } + } +} diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs index e4465078b3..50245de68d 100644 --- a/src/client/connect/dns.rs +++ b/src/client/connect/dns.rs @@ -26,7 +26,7 @@ use std::future::Future; use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, ToSocketAddrs}; use std::pin::Pin; use std::str::FromStr; -use std::task::{self, Poll}; +use std::task::{Context, Poll}; use std::{fmt, io, vec}; use tokio::task::JoinHandle; @@ -113,7 +113,7 @@ impl Service for GaiResolver { type Error = io::Error; type Future = GaiFuture; - fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll> { + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } @@ -138,7 +138,7 @@ impl fmt::Debug for GaiResolver { impl Future for GaiFuture { type Output = Result; - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { Pin::new(&mut self.inner).poll(cx).map(|res| match res { Ok(Ok(addrs)) => Ok(GaiAddrs { inner: addrs }), Ok(Err(err)) => Err(err), @@ -286,7 +286,7 @@ impl Service for TokioThreadpoolGaiResolver { type Error = io::Error; type Future = TokioThreadpoolGaiFuture; - fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll> { + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } @@ -299,7 +299,7 @@ impl Service for TokioThreadpoolGaiResolver { impl Future for TokioThreadpoolGaiFuture { type Output = Result; - fn poll(self: Pin<&mut Self>, _cx: &mut task::Context<'_>) -> Poll { + fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { match ready!(tokio_executor::threadpool::blocking(|| ( self.name.as_str(), 0 @@ -318,8 +318,10 @@ impl Future for TokioThreadpoolGaiFuture { */ mod sealed { - use super::{SocketAddr, Name}; - use crate::common::{task, Future, Poll}; + use std::future::Future; + use std::task::{Context, Poll}; + + use super::{Name, SocketAddr}; use tower_service::Service; // "Trait alias" for `Service` @@ -328,7 +330,7 @@ mod sealed { type Error: Into>; type Future: Future>; - fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll>; + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll>; fn resolve(&mut self, name: Name) -> Self::Future; } @@ -342,7 +344,7 @@ mod sealed { type Error = S::Error; type Future = S::Future; - fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { Service::poll_ready(self, cx) } diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs index afe7b155eb..ee09afe9a3 100644 --- a/src/client/connect/http.rs +++ b/src/client/connect/http.rs @@ -362,7 +362,10 @@ impl Connection for TcpStream { fn connected(&self) -> Connected { let connected = Connected::new(); if let (Ok(remote_addr), Ok(local_addr)) = (self.peer_addr(), self.local_addr()) { - connected.extra(HttpInfo { remote_addr, local_addr }) + connected.extra(HttpInfo { + remote_addr, + local_addr, + }) } else { connected } @@ -521,7 +524,9 @@ struct ConnectingTcpRemote { impl ConnectingTcpRemote { fn new(addrs: dns::SocketAddrs, connect_timeout: Option) -> Self { - let connect_timeout = connect_timeout.map(|t| t / (addrs.len() as u32)); + let connect_timeout = connect_timeout + .map(|t| t.checked_div(addrs.len() as u32)) + .flatten(); Self { addrs, diff --git a/src/client/connect/mod.rs b/src/client/connect/mod.rs index 862a0e65c1..4c29dd3a3e 100644 --- a/src/client/connect/mod.rs +++ b/src/client/connect/mod.rs @@ -80,8 +80,13 @@ //! [`AsyncWrite`]: tokio::io::AsyncWrite //! [`Connection`]: Connection use std::fmt; +use std::fmt::{Debug, Formatter}; +use std::ops::Deref; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; use ::http::Extensions; +use tokio::sync::watch; cfg_feature! { #![feature = "tcp"] @@ -113,6 +118,147 @@ pub struct Connected { pub(super) alpn: Alpn, pub(super) is_proxied: bool, pub(super) extra: Option, + pub(super) poisoned: PoisonPill, +} + +#[derive(Clone)] +pub(crate) struct PoisonPill { + poisoned: Arc, +} + +impl Debug for PoisonPill { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + // print the address of the pill—this makes debugging issues much easier + write!( + f, + "PoisonPill@{:p} {{ poisoned: {} }}", + self.poisoned, + self.poisoned.load(Ordering::Relaxed) + ) + } +} + +impl PoisonPill { + pub(crate) fn healthy() -> Self { + Self { + poisoned: Arc::new(AtomicBool::new(false)), + } + } + pub(crate) fn poison(&self) { + self.poisoned.store(true, Ordering::Relaxed) + } + + pub(crate) fn poisoned(&self) -> bool { + self.poisoned.load(Ordering::Relaxed) + } +} + +/// [`CaptureConnection`] allows callers to capture [`Connected`] information +/// +/// To capture a connection for a request, use [`capture_connection`]. +#[derive(Debug, Clone)] +pub struct CaptureConnection { + rx: watch::Receiver>, +} + +/// Capture the connection for a given request +/// +/// When making a request with Hyper, the underlying connection must implement the [`Connection`] trait. +/// [`capture_connection`] allows a caller to capture the returned [`Connected`] structure as soon +/// as the connection is established. +/// +/// *Note*: If establishing a connection fails, [`CaptureConnection::connection_metadata`] will always return none. +/// +/// # Examples +/// +/// **Synchronous access**: +/// The [`CaptureConnection::connection_metadata`] method allows callers to check if a connection has been +/// established. This is ideal for situations where you are certain the connection has already +/// been established (e.g. after the response future has already completed). +/// ```rust +/// use hyper::client::connect::{capture_connection, CaptureConnection}; +/// let mut request = http::Request::builder() +/// .uri("http://foo.com/") +/// .body(()) +/// .unwrap(); +/// +/// let captured_connection = capture_connection(&mut request); +/// // some time later after the request has been sent... +/// let connection_info = captured_connection.connection_metadata(); +/// println!("we are connected! {:?}", connection_info.as_ref()); +/// ``` +/// +/// **Asynchronous access**: +/// The [`CaptureConnection::wait_for_connection_metadata`] method returns a future resolves as soon as the +/// connection is available. +/// +/// ```rust +/// # #[cfg(feature = "runtime")] +/// # async fn example() { +/// use hyper::client::connect::{capture_connection, CaptureConnection}; +/// let mut request = http::Request::builder() +/// .uri("http://foo.com/") +/// .body(hyper::Body::empty()) +/// .unwrap(); +/// +/// let mut captured = capture_connection(&mut request); +/// tokio::task::spawn(async move { +/// let connection_info = captured.wait_for_connection_metadata().await; +/// println!("we are connected! {:?}", connection_info.as_ref()); +/// }); +/// +/// let client = hyper::Client::new(); +/// client.request(request).await.expect("request failed"); +/// # } +/// ``` +pub fn capture_connection(request: &mut crate::http::Request) -> CaptureConnection { + let (tx, rx) = CaptureConnection::new(); + request.extensions_mut().insert(tx); + rx +} + +/// TxSide for [`CaptureConnection`] +/// +/// This is inserted into `Extensions` to allow Hyper to back channel connection info +#[derive(Clone)] +pub(crate) struct CaptureConnectionExtension { + tx: Arc>>, +} + +impl CaptureConnectionExtension { + pub(crate) fn set(&self, connected: &Connected) { + self.tx.send_replace(Some(connected.clone())); + } +} + +impl CaptureConnection { + /// Internal API to create the tx and rx half of [`CaptureConnection`] + pub(crate) fn new() -> (CaptureConnectionExtension, Self) { + let (tx, rx) = watch::channel(None); + ( + CaptureConnectionExtension { tx: Arc::new(tx) }, + CaptureConnection { rx }, + ) + } + + /// Retrieve the connection metadata, if available + pub fn connection_metadata(&self) -> impl Deref> + '_ { + self.rx.borrow() + } + + /// Wait for the connection to be established + /// + /// If a connection was established, this will always return `Some(...)`. If the request never + /// successfully connected (e.g. DNS resolution failure), this method will never return. + pub async fn wait_for_connection_metadata( + &mut self, + ) -> impl Deref> + '_ { + if self.rx.borrow().is_some() { + return self.rx.borrow(); + } + let _ = self.rx.changed().await; + self.rx.borrow() + } } pub(super) struct Extra(Box); @@ -130,6 +276,7 @@ impl Connected { alpn: Alpn::None, is_proxied: false, extra: None, + poisoned: PoisonPill::healthy(), } } @@ -189,14 +336,24 @@ impl Connected { self.alpn == Alpn::H2 } + /// Poison this connection + /// + /// A poisoned connection will not be reused for subsequent requests by the pool + pub fn poison(&self) { + self.poisoned.poison(); + tracing::debug!( + poison_pill = ?self.poisoned, "connection was poisoned" + ); + } + // Don't public expose that `Connected` is `Clone`, unsure if we want to // keep that contract... - #[cfg(feature = "http2")] pub(super) fn clone(&self) -> Connected { Connected { alpn: self.alpn.clone(), is_proxied: self.is_proxied, extra: self.extra.clone(), + poisoned: self.poisoned.clone(), } } } @@ -270,12 +427,13 @@ where #[cfg(any(feature = "http1", feature = "http2"))] pub(super) mod sealed { use std::error::Error as StdError; + use std::future::Future; + use std::marker::Unpin; use ::http::Uri; use tokio::io::{AsyncRead, AsyncWrite}; use super::Connection; - use crate::common::{Future, Unpin}; /// Connect to a destination, returning an IO transport. /// @@ -296,6 +454,7 @@ pub(super) mod sealed { fn connect(self, internal_only: Internal, dst: Uri) -> ::Future; } + #[allow(unreachable_pub)] pub trait ConnectSvc { type Connection: AsyncRead + AsyncWrite + Connection + Unpin + Send + 'static; type Error: Into>; @@ -351,6 +510,7 @@ pub(super) mod sealed { #[cfg(test)] mod tests { use super::Connected; + use crate::client::connect::CaptureConnection; #[derive(Clone, Debug, PartialEq)] struct Ex1(usize); @@ -409,4 +569,72 @@ mod tests { assert_eq!(ex2.get::(), Some(&Ex1(99))); assert_eq!(ex2.get::(), Some(&Ex2("hiccup"))); } + + #[test] + fn test_sync_capture_connection() { + let (tx, rx) = CaptureConnection::new(); + assert!( + rx.connection_metadata().is_none(), + "connection has not been set" + ); + tx.set(&Connected::new().proxy(true)); + assert_eq!( + rx.connection_metadata() + .as_ref() + .expect("connected should be set") + .is_proxied(), + true + ); + + // ensure it can be called multiple times + assert_eq!( + rx.connection_metadata() + .as_ref() + .expect("connected should be set") + .is_proxied(), + true + ); + } + + #[tokio::test] + async fn async_capture_connection() { + let (tx, mut rx) = CaptureConnection::new(); + assert!( + rx.connection_metadata().is_none(), + "connection has not been set" + ); + let test_task = tokio::spawn(async move { + assert_eq!( + rx.wait_for_connection_metadata() + .await + .as_ref() + .expect("connection should be set") + .is_proxied(), + true + ); + // can be awaited multiple times + assert!( + rx.wait_for_connection_metadata().await.is_some(), + "should be awaitable multiple times" + ); + + assert_eq!(rx.connection_metadata().is_some(), true); + }); + // can't be finished, we haven't set the connection yet + assert_eq!(test_task.is_finished(), false); + tx.set(&Connected::new().proxy(true)); + + assert!(test_task.await.is_ok()); + } + + #[tokio::test] + async fn capture_connection_sender_side_dropped() { + let (tx, mut rx) = CaptureConnection::new(); + assert!( + rx.connection_metadata().is_none(), + "connection has not been set" + ); + drop(tx); + assert!(rx.wait_for_connection_metadata().await.is_none()); + } } diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs index 1d2b87eb00..a1a93ea964 100644 --- a/src/client/dispatch.rs +++ b/src/client/dispatch.rs @@ -1,13 +1,13 @@ #[cfg(feature = "http2")] use std::future::Future; +use std::marker::Unpin; +#[cfg(feature = "http2")] +use std::pin::Pin; +use std::task::{Context, Poll}; use futures_util::FutureExt; use tokio::sync::{mpsc, oneshot}; -#[cfg(feature = "http2")] -use crate::common::Pin; -use crate::common::{task, Poll}; - pub(crate) type RetryPromise = oneshot::Receiver)>>; pub(crate) type Promise = oneshot::Receiver>; @@ -53,7 +53,7 @@ pub(crate) struct UnboundedSender { } impl Sender { - pub(crate) fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { + pub(crate) fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { self.giver .poll_want(cx) .map_err(|_| crate::Error::new_closed()) @@ -86,7 +86,7 @@ impl Sender { } let (tx, rx) = oneshot::channel(); self.inner - .send(Envelope(Some((val, Callback::Retry(tx))))) + .send(Envelope(Some((val, Callback::Retry(Some(tx)))))) .map(move |_| rx) .map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0) } @@ -97,7 +97,7 @@ impl Sender { } let (tx, rx) = oneshot::channel(); self.inner - .send(Envelope(Some((val, Callback::NoRetry(tx))))) + .send(Envelope(Some((val, Callback::NoRetry(Some(tx)))))) .map(move |_| rx) .map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0) } @@ -124,7 +124,16 @@ impl UnboundedSender { pub(crate) fn try_send(&mut self, val: T) -> Result, T> { let (tx, rx) = oneshot::channel(); self.inner - .send(Envelope(Some((val, Callback::Retry(tx))))) + .send(Envelope(Some((val, Callback::Retry(Some(tx)))))) + .map(move |_| rx) + .map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0) + } + + #[cfg(all(feature = "backports", feature = "http2"))] + pub(crate) fn send(&mut self, val: T) -> Result, T> { + let (tx, rx) = oneshot::channel(); + self.inner + .send(Envelope(Some((val, Callback::NoRetry(Some(tx)))))) .map(move |_| rx) .map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0) } @@ -146,10 +155,7 @@ pub(crate) struct Receiver { } impl Receiver { - pub(crate) fn poll_recv( - &mut self, - cx: &mut task::Context<'_>, - ) -> Poll)>> { + pub(crate) fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll)>> { match self.inner.poll_recv(cx) { Poll::Ready(item) => { Poll::Ready(item.map(|mut env| env.0.take().expect("envelope not dropped"))) @@ -198,33 +204,59 @@ impl Drop for Envelope { } pub(crate) enum Callback { - Retry(oneshot::Sender)>>), - NoRetry(oneshot::Sender>), + Retry(Option)>>>), + NoRetry(Option>>), +} + +impl Drop for Callback { + fn drop(&mut self) { + // FIXME(nox): What errors do we want here? + let error = crate::Error::new_user_dispatch_gone().with(if std::thread::panicking() { + "user code panicked" + } else { + "runtime dropped the dispatch task" + }); + + match self { + Callback::Retry(tx) => { + if let Some(tx) = tx.take() { + let _ = tx.send(Err((error, None))); + } + } + Callback::NoRetry(tx) => { + if let Some(tx) = tx.take() { + let _ = tx.send(Err(error)); + } + } + } + } } impl Callback { #[cfg(feature = "http2")] pub(crate) fn is_canceled(&self) -> bool { match *self { - Callback::Retry(ref tx) => tx.is_closed(), - Callback::NoRetry(ref tx) => tx.is_closed(), + Callback::Retry(Some(ref tx)) => tx.is_closed(), + Callback::NoRetry(Some(ref tx)) => tx.is_closed(), + _ => unreachable!(), } } - pub(crate) fn poll_canceled(&mut self, cx: &mut task::Context<'_>) -> Poll<()> { + pub(crate) fn poll_canceled(&mut self, cx: &mut Context<'_>) -> Poll<()> { match *self { - Callback::Retry(ref mut tx) => tx.poll_closed(cx), - Callback::NoRetry(ref mut tx) => tx.poll_closed(cx), + Callback::Retry(Some(ref mut tx)) => tx.poll_closed(cx), + Callback::NoRetry(Some(ref mut tx)) => tx.poll_closed(cx), + _ => unreachable!(), } } - pub(crate) fn send(self, val: Result)>) { + pub(crate) fn send(mut self, val: Result)>) { match self { - Callback::Retry(tx) => { - let _ = tx.send(val); + Callback::Retry(ref mut tx) => { + let _ = tx.take().unwrap().send(val); } - Callback::NoRetry(tx) => { - let _ = tx.send(val.map_err(|e| e.0)); + Callback::NoRetry(ref mut tx) => { + let _ = tx.take().unwrap().send(val.map_err(|e| e.0)); } } } diff --git a/src/client/pool.rs b/src/client/pool.rs index b9772d688d..1dfd6ba3d3 100644 --- a/src/client/pool.rs +++ b/src/client/pool.rs @@ -1,8 +1,12 @@ use std::collections::{HashMap, HashSet, VecDeque}; use std::error::Error as StdError; use std::fmt; +use std::future::Future; +use std::marker::Unpin; use std::ops::{Deref, DerefMut}; +use std::pin::Pin; use std::sync::{Arc, Mutex, Weak}; +use std::task::{Context, Poll}; #[cfg(not(feature = "runtime"))] use std::time::{Duration, Instant}; @@ -13,7 +17,7 @@ use tokio::time::{Duration, Instant, Interval}; use tracing::{debug, trace}; use super::client::Ver; -use crate::common::{exec::Exec, task, Future, Pin, Poll, Unpin}; +use crate::common::exec::Exec; // FIXME: allow() required due to `impl Trait` leaking types to this lint #[allow(missing_debug_implementations)] @@ -79,7 +83,7 @@ struct PoolInner { // A oneshot channel is used to allow the interval to be notified when // the Pool completely drops. That way, the interval can cancel immediately. #[cfg(feature = "runtime")] - idle_interval_ref: Option>, + idle_interval_ref: Option>, #[cfg(feature = "runtime")] exec: Exec, timeout: Option, @@ -113,7 +117,7 @@ impl Pool { waiters: HashMap::new(), #[cfg(feature = "runtime")] exec: __exec.clone(), - timeout: config.idle_timeout, + timeout: config.idle_timeout.filter(|&t| t > Duration::ZERO), }))) } else { None @@ -576,10 +580,7 @@ impl fmt::Display for CheckoutIsClosedError { } impl Checkout { - fn poll_waiter( - &mut self, - cx: &mut task::Context<'_>, - ) -> Poll>>> { + fn poll_waiter(&mut self, cx: &mut Context<'_>) -> Poll>>> { if let Some(mut rx) = self.waiter.take() { match Pin::new(&mut rx).poll(cx) { Poll::Ready(Ok(value)) => { @@ -604,7 +605,7 @@ impl Checkout { } } - fn checkout(&mut self, cx: &mut task::Context<'_>) -> Option> { + fn checkout(&mut self, cx: &mut Context<'_>) -> Option> { let entry = { let mut inner = self.pool.inner.as_ref()?.lock().unwrap(); let expiration = Expiration::new(inner.timeout); @@ -657,7 +658,7 @@ impl Checkout { impl Future for Checkout { type Output = crate::Result>; - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { if let Some(pooled) = ready!(self.poll_waiter(cx)?) { return Poll::Ready(Ok(pooled)); } @@ -740,7 +741,7 @@ pin_project_lite::pin_project! { // Pool is fully dropped, and shutdown. This channel is never sent on, // but Err(Canceled) will be received when the Pool is dropped. #[pin] - pool_drop_notifier: oneshot::Receiver, + pool_drop_notifier: oneshot::Receiver, } } @@ -748,7 +749,7 @@ pin_project_lite::pin_project! { impl Future for IdleTask { type Output = (); - fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let mut this = self.project(); loop { match this.pool_drop_notifier.as_mut().poll(cx) { @@ -790,11 +791,14 @@ impl WeakOpt { #[cfg(test)] mod tests { + use std::future::Future; + use std::pin::Pin; + use std::task::Context; use std::task::Poll; use std::time::Duration; use super::{Connecting, Key, Pool, Poolable, Reservation, WeakOpt}; - use crate::common::{exec::Exec, task, Future, Pin}; + use crate::common::exec::Exec; /// Test unique reservations. #[derive(Debug, PartialEq, Eq)] @@ -864,7 +868,7 @@ mod tests { { type Output = Option<()>; - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { match Pin::new(&mut self.0).poll(cx) { Poll::Ready(Ok(_)) => Poll::Ready(Some(())), Poll::Ready(Err(_)) => Poll::Ready(Some(())), diff --git a/src/client/service.rs b/src/client/service.rs index 406f61edc9..047dd98766 100644 --- a/src/client/service.rs +++ b/src/client/service.rs @@ -5,13 +5,15 @@ use std::error::Error as StdError; use std::future::Future; use std::marker::PhantomData; +use std::pin::Pin; +use std::task::{Context, Poll}; use tracing::debug; +#[cfg_attr(feature = "deprecated", allow(deprecated))] use super::conn::{Builder, SendRequest}; use crate::{ body::HttpBody, - common::{task, Pin, Poll}, service::{MakeConnection, Service}, }; @@ -23,6 +25,7 @@ use crate::{ #[derive(Debug)] pub struct Connect { inner: C, + #[cfg_attr(feature = "deprecated", allow(deprecated))] builder: Builder, _pd: PhantomData, } @@ -30,6 +33,7 @@ pub struct Connect { impl Connect { /// Create a new `Connect` with some inner connector `C` and a connection /// builder. + #[cfg_attr(feature = "deprecated", allow(deprecated))] pub fn new(inner: C, builder: Builder) -> Self { Self { inner, @@ -49,12 +53,13 @@ where B::Data: Send + Unpin, B::Error: Into>, { + #[cfg_attr(feature = "deprecated", allow(deprecated))] type Response = SendRequest; type Error = crate::Error; type Future = Pin> + Send + 'static>>; - fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { self.inner .poll_ready(cx) .map_err(|e| crate::Error::new(crate::error::Kind::Connect).with(e.into())) @@ -68,6 +73,7 @@ where match io.await { Ok(io) => match builder.handshake(io).await { Ok((sr, conn)) => { + #[cfg_attr(feature = "deprecated", allow(deprecated))] builder.exec.execute(async move { if let Err(e) = conn.await { debug!("connection error: {:?}", e); diff --git a/src/common/drain.rs b/src/common/drain.rs index 174da876df..c8562d3c98 100644 --- a/src/common/drain.rs +++ b/src/common/drain.rs @@ -1,10 +1,11 @@ +use std::future::Future; use std::mem; +use std::pin::Pin; +use std::task::{Context, Poll}; use pin_project_lite::pin_project; use tokio::sync::watch; -use super::{task, Future, Pin, Poll}; - pub(crate) fn channel() -> (Signal, Watch) { let (tx, rx) = watch::channel(()); (Signal { tx }, Watch { rx }) @@ -47,7 +48,7 @@ impl Signal { impl Future for Draining { type Output = (); - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { Pin::new(&mut self.as_mut().0).poll(cx) } } @@ -80,7 +81,7 @@ where { type Output = F::Output; - fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let mut me = self.project(); loop { match mem::replace(me.state, State::Draining) { @@ -115,7 +116,7 @@ mod tests { impl Future for TestMe { type Output = (); - fn poll(mut self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { + fn poll(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll { self.poll_cnt += 1; if self.finished { Poll::Ready(()) diff --git a/src/common/io/rewind.rs b/src/common/io/rewind.rs index 0afef5f7ea..9ed7c42fea 100644 --- a/src/common/io/rewind.rs +++ b/src/common/io/rewind.rs @@ -1,11 +1,11 @@ use std::marker::Unpin; +use std::pin::Pin; +use std::task::{Context, Poll}; use std::{cmp, io}; use bytes::{Buf, Bytes}; use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; -use crate::common::{task, Pin, Poll}; - /// Combine a buffer with an IO, rewinding reads to use the buffer. #[derive(Debug)] pub(crate) struct Rewind { @@ -50,7 +50,7 @@ where { fn poll_read( mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll> { if let Some(mut prefix) = self.pre.take() { @@ -78,7 +78,7 @@ where { fn poll_write( mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { Pin::new(&mut self.inner).poll_write(cx, buf) @@ -86,17 +86,17 @@ where fn poll_write_vectored( mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, bufs: &[io::IoSlice<'_>], ) -> Poll> { Pin::new(&mut self.inner).poll_write_vectored(cx, bufs) } - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { Pin::new(&mut self.inner).poll_flush(cx) } - fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { + fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { Pin::new(&mut self.inner).poll_shutdown(cx) } diff --git a/src/common/lazy.rs b/src/common/lazy.rs index 2722077303..df2c07d596 100644 --- a/src/common/lazy.rs +++ b/src/common/lazy.rs @@ -1,6 +1,9 @@ -use pin_project_lite::pin_project; +use std::future::Future; +use std::marker::Unpin; +use std::pin::Pin; +use std::task::{Context, Poll}; -use super::{task, Future, Pin, Poll}; +use pin_project_lite::pin_project; pub(crate) trait Started: Future { fn started(&self) -> bool; @@ -55,7 +58,7 @@ where { type Output = R::Output; - fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let mut this = self.project(); if let InnerProj::Fut { fut } = this.inner.as_mut().project() { diff --git a/src/common/mod.rs b/src/common/mod.rs index e38c6f5c7a..3d83946243 100644 --- a/src/common/mod.rs +++ b/src/common/mod.rs @@ -17,23 +17,14 @@ pub(crate) mod exec; pub(crate) mod io; #[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] mod lazy; -mod never; #[cfg(any( feature = "stream", all(feature = "client", any(feature = "http1", feature = "http2")) ))] pub(crate) mod sync_wrapper; +#[cfg(feature = "http1")] pub(crate) mod task; pub(crate) mod watch; #[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] pub(crate) use self::lazy::{lazy, Started as Lazy}; -#[cfg(any(feature = "http1", feature = "http2", feature = "runtime"))] -pub(crate) use self::never::Never; -pub(crate) use self::task::Poll; - -// group up types normally needed for `Future` -cfg_proto! { - pub(crate) use std::marker::Unpin; -} -pub(crate) use std::{future::Future, pin::Pin}; diff --git a/src/common/never.rs b/src/common/never.rs deleted file mode 100644 index f143caf60f..0000000000 --- a/src/common/never.rs +++ /dev/null @@ -1,21 +0,0 @@ -//! An uninhabitable type meaning it can never happen. -//! -//! To be replaced with `!` once it is stable. - -use std::error::Error; -use std::fmt; - -#[derive(Debug)] -pub(crate) enum Never {} - -impl fmt::Display for Never { - fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self {} - } -} - -impl Error for Never { - fn description(&self) -> &str { - match *self {} - } -} diff --git a/src/common/task.rs b/src/common/task.rs index ec70c957d6..0ac047a462 100644 --- a/src/common/task.rs +++ b/src/common/task.rs @@ -1,12 +1,12 @@ -#[cfg(feature = "http1")] -use super::Never; -pub(crate) use std::task::{Context, Poll}; +use std::{ + convert::Infallible, + task::{Context, Poll}, +}; /// A function to help "yield" a future, such that it is re-scheduled immediately. /// /// Useful for spin counts, so a future doesn't hog too much time. -#[cfg(feature = "http1")] -pub(crate) fn yield_now(cx: &mut Context<'_>) -> Poll { +pub(crate) fn yield_now(cx: &mut Context<'_>) -> Poll { cx.waker().wake_by_ref(); Poll::Pending } diff --git a/src/error.rs b/src/error.rs index 20acf3a7a5..5beedeb8b2 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,4 +1,7 @@ //! Error and Result module. + +#[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] +use crate::client::connect::Connected; use std::error::Error as StdError; use std::fmt; @@ -15,6 +18,8 @@ pub struct Error { struct ErrorImpl { kind: Kind, cause: Option, + #[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] + connect_info: Option, } #[derive(Debug)] @@ -137,6 +142,10 @@ pub(super) enum User { #[cfg(feature = "server")] WithoutShutdownNonHttp1, + /// The dispatch task is gone. + #[cfg(feature = "client")] + DispatchGone, + /// User aborted in an FFI callback. #[cfg(feature = "ffi")] AbortedByCallback, @@ -206,9 +215,20 @@ impl Error { self.inner.cause } + /// Returns the info of the client connection on which this error occurred. + #[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] + pub fn client_connect_info(&self) -> Option<&Connected> { + self.inner.connect_info.as_ref() + } + pub(super) fn new(kind: Kind) -> Error { Error { - inner: Box::new(ErrorImpl { kind, cause: None }), + inner: Box::new(ErrorImpl { + kind, + cause: None, + #[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] + connect_info: None, + }), } } @@ -217,6 +237,12 @@ impl Error { self } + #[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] + pub(super) fn with_client_connect_info(mut self, connect_info: Connected) -> Error { + self.inner.connect_info = Some(connect_info); + self + } + #[cfg(any(all(feature = "http1", feature = "server"), feature = "ffi"))] pub(super) fn kind(&self) -> &Kind { &self.inner.kind @@ -387,6 +413,11 @@ impl Error { Error::new_user(User::AbortedByCallback) } + #[cfg(feature = "client")] + pub(super) fn new_user_dispatch_gone() -> Error { + Error::new(Kind::User(User::DispatchGone)) + } + #[cfg(feature = "http2")] pub(super) fn new_h2(cause: ::h2::Error) -> Error { if cause.is_io() { @@ -483,6 +514,8 @@ impl Error { Kind::User(User::WithoutShutdownNonHttp1) => { "without_shutdown() called on a non-HTTP/1 connection" } + #[cfg(feature = "client")] + Kind::User(User::DispatchGone) => "dispatch task is gone", #[cfg(feature = "ffi")] Kind::User(User::AbortedByCallback) => "operation aborted by an application callback", } diff --git a/src/ext.rs b/src/ext.rs index 6c54821e9d..224206dd66 100644 --- a/src/ext.rs +++ b/src/ext.rs @@ -40,6 +40,7 @@ impl Protocol { self.inner.as_str() } + #[cfg(feature = "server")] pub(crate) fn from_inner(inner: h2::ext::Protocol) -> Self { Self { inner } } diff --git a/src/ffi/client.rs b/src/ffi/client.rs index 4cdb257e30..670f77d141 100644 --- a/src/ffi/client.rs +++ b/src/ffi/client.rs @@ -93,6 +93,7 @@ unsafe impl AsTaskType for hyper_clientconn { ffi_fn! { /// Creates a new set of HTTP clientconn options to be used in a handshake. fn hyper_clientconn_options_new() -> *mut hyper_clientconn_options { + #[allow(deprecated)] let builder = conn::Builder::new(); Box::into_raw(Box::new(hyper_clientconn_options { diff --git a/src/headers.rs b/src/headers.rs index 8407be185f..2e5e5db0f2 100644 --- a/src/headers.rs +++ b/src/headers.rs @@ -53,15 +53,15 @@ pub(super) fn content_length_parse_all_values(values: ValueIter<'_, HeaderValue> return None; } } else { - return None + return None; } } } else { - return None + return None; } } - return content_length + content_length } fn from_digits(bytes: &[u8]) -> Option { @@ -80,7 +80,7 @@ fn from_digits(bytes: &[u8]) -> Option { b'0'..=b'9' => { result = result.checked_mul(RADIX)?; result = result.checked_add((b - b'0') as u64)?; - }, + } _ => { // not a DIGIT, get outta here! return None; diff --git a/src/lib.rs b/src/lib.rs index 3a2202dff6..064a18ec30 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -2,7 +2,8 @@ #![deny(missing_debug_implementations)] #![cfg_attr(test, deny(rust_2018_idioms))] #![cfg_attr(all(test, feature = "full"), deny(unreachable_pub))] -#![cfg_attr(all(test, feature = "full"), deny(warnings))] +// 0.14.x is not actively developed, new warnings just get in the way. +//#![cfg_attr(all(test, feature = "full", not(feature = "nightly")), deny(warnings))] #![cfg_attr(all(test, feature = "nightly"), feature(test))] #![cfg_attr(docsrs, feature(doc_cfg))] @@ -53,6 +54,8 @@ //! connectors and acceptors for TCP, and a default executor. //! - `tcp`: Enables convenient implementations over TCP (using tokio). //! - `stream`: Provides `futures::Stream` capabilities. +//! - `backports`: 1.0 functionality backported to 0.14. +//! - `deprecated`: opt-in to deprecation warnings to prepare you for 1.0. //! //! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs index 37ab380f8b..5ab72f264e 100644 --- a/src/proto/h1/conn.rs +++ b/src/proto/h1/conn.rs @@ -1,6 +1,9 @@ use std::fmt; use std::io; use std::marker::PhantomData; +use std::marker::Unpin; +use std::pin::Pin; +use std::task::{Context, Poll}; #[cfg(all(feature = "server", feature = "runtime"))] use std::time::Duration; @@ -16,7 +19,6 @@ use tracing::{debug, error, trace}; use super::io::Buffered; use super::{Decoder, Encode, EncodedBuf, Encoder, Http1Transaction, ParseContext, Wants}; use crate::body::DecodedLength; -use crate::common::{task, Pin, Poll, Unpin}; use crate::headers::connection_keep_alive; use crate::proto::{BodyLength, MessageHead}; @@ -185,7 +187,7 @@ where pub(super) fn poll_read_head( &mut self, - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, ) -> Poll, DecodedLength, Wants)>>> { debug_assert!(self.can_read_head()); trace!("Conn::read_head"); @@ -286,7 +288,7 @@ where pub(crate) fn poll_read_body( &mut self, - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, ) -> Poll>> { debug_assert!(self.can_read_body()); @@ -347,10 +349,7 @@ where ret } - pub(crate) fn poll_read_keep_alive( - &mut self, - cx: &mut task::Context<'_>, - ) -> Poll> { + pub(crate) fn poll_read_keep_alive(&mut self, cx: &mut Context<'_>) -> Poll> { debug_assert!(!self.can_read_head() && !self.can_read_body()); if self.is_read_closed() { @@ -373,7 +372,7 @@ where // // This should only be called for Clients wanting to enter the idle // state. - fn require_empty_read(&mut self, cx: &mut task::Context<'_>) -> Poll> { + fn require_empty_read(&mut self, cx: &mut Context<'_>) -> Poll> { debug_assert!(!self.can_read_head() && !self.can_read_body() && !self.is_read_closed()); debug_assert!(!self.is_mid_message()); debug_assert!(T::is_client()); @@ -406,7 +405,7 @@ where Poll::Ready(Err(crate::Error::new_unexpected_message())) } - fn mid_message_detect_eof(&mut self, cx: &mut task::Context<'_>) -> Poll> { + fn mid_message_detect_eof(&mut self, cx: &mut Context<'_>) -> Poll> { debug_assert!(!self.can_read_head() && !self.can_read_body() && !self.is_read_closed()); debug_assert!(self.is_mid_message()); @@ -425,7 +424,7 @@ where } } - fn force_io_read(&mut self, cx: &mut task::Context<'_>) -> Poll> { + fn force_io_read(&mut self, cx: &mut Context<'_>) -> Poll> { debug_assert!(!self.state.is_read_closed()); let result = ready!(self.io.poll_read_from_io(cx)); @@ -436,7 +435,7 @@ where })) } - fn maybe_notify(&mut self, cx: &mut task::Context<'_>) { + fn maybe_notify(&mut self, cx: &mut Context<'_>) { // its possible that we returned NotReady from poll() without having // exhausted the underlying Io. We would have done this when we // determined we couldn't keep reading until we knew how writing @@ -483,7 +482,7 @@ where } } - fn try_keep_alive(&mut self, cx: &mut task::Context<'_>) { + fn try_keep_alive(&mut self, cx: &mut Context<'_>) { self.state.try_keep_alive::(); self.maybe_notify(cx); } @@ -726,14 +725,14 @@ where Err(err) } - pub(crate) fn poll_flush(&mut self, cx: &mut task::Context<'_>) -> Poll> { + pub(crate) fn poll_flush(&mut self, cx: &mut Context<'_>) -> Poll> { ready!(Pin::new(&mut self.io).poll_flush(cx))?; self.try_keep_alive(cx); trace!("flushed({}): {:?}", T::LOG, self.state); Poll::Ready(Ok(())) } - pub(crate) fn poll_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll> { + pub(crate) fn poll_shutdown(&mut self, cx: &mut Context<'_>) -> Poll> { match ready!(Pin::new(self.io.io_mut()).poll_shutdown(cx)) { Ok(()) => { trace!("shut down IO complete"); @@ -747,7 +746,13 @@ where } /// If the read side can be cheaply drained, do so. Otherwise, close. - pub(super) fn poll_drain_or_close_read(&mut self, cx: &mut task::Context<'_>) { + pub(super) fn poll_drain_or_close_read(&mut self, cx: &mut Context<'_>) { + if let Reading::Continue(ref decoder) = self.state.reading { + // skip sending the 100-continue + // just move forward to a read, in case a tiny body was included + self.state.reading = Reading::Body(decoder.clone()); + } + let _ = self.poll_read_body(cx); // If still in Reading::Body, just give up diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs index 1e3a38effc..3206863530 100644 --- a/src/proto/h1/decode.rs +++ b/src/proto/h1/decode.rs @@ -1,18 +1,22 @@ use std::error::Error as StdError; use std::fmt; use std::io; +use std::task::{Context, Poll}; use std::usize; use bytes::Bytes; use tracing::{debug, trace}; -use crate::common::{task, Poll}; - use super::io::MemRead; use super::DecodedLength; use self::Kind::{Chunked, Eof, Length}; +/// Maximum amount of bytes allowed in chunked extensions. +/// +/// This limit is currentlty applied for the entire body, not per chunk. +const CHUNKED_EXTENSIONS_LIMIT: u64 = 1024 * 16; + /// Decoders to handle different Transfer-Encodings. /// /// If a message body does not include a Transfer-Encoding, it *should* @@ -27,7 +31,11 @@ enum Kind { /// A Reader used when a Content-Length header is passed with a positive integer. Length(u64), /// A Reader used when Transfer-Encoding is `chunked`. - Chunked(ChunkedState, u64), + Chunked { + state: ChunkedState, + chunk_len: u64, + extensions_cnt: u64, + }, /// A Reader used for responses that don't indicate a length or chunked. /// /// The bool tracks when EOF is seen on the transport. @@ -49,6 +57,7 @@ enum Kind { #[derive(Debug, PartialEq, Clone, Copy)] enum ChunkedState { + Start, Size, SizeLws, Extension, @@ -74,7 +83,11 @@ impl Decoder { pub(crate) fn chunked() -> Decoder { Decoder { - kind: Kind::Chunked(ChunkedState::Size, 0), + kind: Kind::Chunked { + state: ChunkedState::new(), + chunk_len: 0, + extensions_cnt: 0, + }, } } @@ -95,12 +108,20 @@ impl Decoder { // methods pub(crate) fn is_eof(&self) -> bool { - matches!(self.kind, Length(0) | Chunked(ChunkedState::End, _) | Eof(true)) + matches!( + self.kind, + Length(0) + | Chunked { + state: ChunkedState::End, + .. + } + | Eof(true) + ) } pub(crate) fn decode( &mut self, - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, body: &mut R, ) -> Poll> { trace!("decode; state={:?}", self.kind); @@ -125,11 +146,15 @@ impl Decoder { Poll::Ready(Ok(buf)) } } - Chunked(ref mut state, ref mut size) => { + Chunked { + ref mut state, + ref mut chunk_len, + ref mut extensions_cnt, + } => { loop { let mut buf = None; // advances the chunked state - *state = ready!(state.step(cx, body, size, &mut buf))?; + *state = ready!(state.step(cx, body, chunk_len, extensions_cnt, &mut buf))?; if *state == ChunkedState::End { trace!("end of chunked"); return Poll::Ready(Ok(Bytes::new())); @@ -179,19 +204,36 @@ macro_rules! byte ( }) ); +macro_rules! or_overflow { + ($e:expr) => ( + match $e { + Some(val) => val, + None => return Poll::Ready(Err(io::Error::new( + io::ErrorKind::InvalidData, + "invalid chunk size: overflow", + ))), + } + ) +} + impl ChunkedState { + fn new() -> ChunkedState { + ChunkedState::Start + } fn step( &self, - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, body: &mut R, size: &mut u64, + extensions_cnt: &mut u64, buf: &mut Option, ) -> Poll> { use self::ChunkedState::*; match *self { + Start => ChunkedState::read_start(cx, body, size), Size => ChunkedState::read_size(cx, body, size), SizeLws => ChunkedState::read_size_lws(cx, body), - Extension => ChunkedState::read_extension(cx, body), + Extension => ChunkedState::read_extension(cx, body, extensions_cnt), SizeLf => ChunkedState::read_size_lf(cx, body, *size), Body => ChunkedState::read_body(cx, body, size, buf), BodyCr => ChunkedState::read_body_cr(cx, body), @@ -203,25 +245,46 @@ impl ChunkedState { End => Poll::Ready(Ok(ChunkedState::End)), } } - fn read_size( - cx: &mut task::Context<'_>, + + fn read_start( + cx: &mut Context<'_>, rdr: &mut R, size: &mut u64, ) -> Poll> { - trace!("Read chunk hex size"); + trace!("Read chunk start"); - macro_rules! or_overflow { - ($e:expr) => ( - match $e { - Some(val) => val, - None => return Poll::Ready(Err(io::Error::new( - io::ErrorKind::InvalidData, - "invalid chunk size: overflow", - ))), - } - ) + let radix = 16; + match byte!(rdr, cx) { + b @ b'0'..=b'9' => { + *size = or_overflow!(size.checked_mul(radix)); + *size = or_overflow!(size.checked_add((b - b'0') as u64)); + } + b @ b'a'..=b'f' => { + *size = or_overflow!(size.checked_mul(radix)); + *size = or_overflow!(size.checked_add((b + 10 - b'a') as u64)); + } + b @ b'A'..=b'F' => { + *size = or_overflow!(size.checked_mul(radix)); + *size = or_overflow!(size.checked_add((b + 10 - b'A') as u64)); + } + _ => { + return Poll::Ready(Err(io::Error::new( + io::ErrorKind::InvalidInput, + "Invalid chunk size line: missing size digit", + ))); + } } + Poll::Ready(Ok(ChunkedState::Size)) + } + + fn read_size( + cx: &mut Context<'_>, + rdr: &mut R, + size: &mut u64, + ) -> Poll> { + trace!("Read chunk hex size"); + let radix = 16; match byte!(rdr, cx) { b @ b'0'..=b'9' => { @@ -249,7 +312,7 @@ impl ChunkedState { Poll::Ready(Ok(ChunkedState::Size)) } fn read_size_lws( - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, rdr: &mut R, ) -> Poll> { trace!("read_size_lws"); @@ -265,8 +328,9 @@ impl ChunkedState { } } fn read_extension( - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, rdr: &mut R, + extensions_cnt: &mut u64, ) -> Poll> { trace!("read_extension"); // We don't care about extensions really at all. Just ignore them. @@ -281,11 +345,21 @@ impl ChunkedState { io::ErrorKind::InvalidData, "invalid chunk extension contains newline", ))), - _ => Poll::Ready(Ok(ChunkedState::Extension)), // no supported extensions + _ => { + *extensions_cnt += 1; + if *extensions_cnt >= CHUNKED_EXTENSIONS_LIMIT { + Poll::Ready(Err(io::Error::new( + io::ErrorKind::InvalidData, + "chunk extensions over limit", + ))) + } else { + Poll::Ready(Ok(ChunkedState::Extension)) + } + } // no supported extensions } } fn read_size_lf( - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, rdr: &mut R, size: u64, ) -> Poll> { @@ -307,7 +381,7 @@ impl ChunkedState { } fn read_body( - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, rdr: &mut R, rem: &mut u64, buf: &mut Option, @@ -341,7 +415,7 @@ impl ChunkedState { } } fn read_body_cr( - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, rdr: &mut R, ) -> Poll> { match byte!(rdr, cx) { @@ -353,11 +427,11 @@ impl ChunkedState { } } fn read_body_lf( - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, rdr: &mut R, ) -> Poll> { match byte!(rdr, cx) { - b'\n' => Poll::Ready(Ok(ChunkedState::Size)), + b'\n' => Poll::Ready(Ok(ChunkedState::Start)), _ => Poll::Ready(Err(io::Error::new( io::ErrorKind::InvalidInput, "Invalid chunk body LF", @@ -366,7 +440,7 @@ impl ChunkedState { } fn read_trailer( - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, rdr: &mut R, ) -> Poll> { trace!("read_trailer"); @@ -376,7 +450,7 @@ impl ChunkedState { } } fn read_trailer_lf( - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, rdr: &mut R, ) -> Poll> { match byte!(rdr, cx) { @@ -389,7 +463,7 @@ impl ChunkedState { } fn read_end_cr( - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, rdr: &mut R, ) -> Poll> { match byte!(rdr, cx) { @@ -398,7 +472,7 @@ impl ChunkedState { } } fn read_end_lf( - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, rdr: &mut R, ) -> Poll> { match byte!(rdr, cx) { @@ -430,7 +504,7 @@ mod tests { use tokio::io::{AsyncRead, ReadBuf}; impl<'a> MemRead for &'a [u8] { - fn read_mem(&mut self, _: &mut task::Context<'_>, len: usize) -> Poll> { + fn read_mem(&mut self, _: &mut Context<'_>, len: usize) -> Poll> { let n = std::cmp::min(len, self.len()); if n > 0 { let (a, b) = self.split_at(n); @@ -444,7 +518,7 @@ mod tests { } impl<'a> MemRead for &'a mut (dyn AsyncRead + Unpin) { - fn read_mem(&mut self, cx: &mut task::Context<'_>, len: usize) -> Poll> { + fn read_mem(&mut self, cx: &mut Context<'_>, len: usize) -> Poll> { let mut v = vec![0; len]; let mut buf = ReadBuf::new(&mut v); ready!(Pin::new(self).poll_read(cx, &mut buf)?); @@ -452,9 +526,8 @@ mod tests { } } - #[cfg(feature = "nightly")] impl MemRead for Bytes { - fn read_mem(&mut self, _: &mut task::Context<'_>, len: usize) -> Poll> { + fn read_mem(&mut self, _: &mut Context<'_>, len: usize) -> Poll> { let n = std::cmp::min(len, self.len()); let ret = self.split_to(n); Poll::Ready(Ok(ret)) @@ -476,13 +549,15 @@ mod tests { use std::io::ErrorKind::{InvalidData, InvalidInput, UnexpectedEof}; async fn read(s: &str) -> u64 { - let mut state = ChunkedState::Size; + let mut state = ChunkedState::new(); let rdr = &mut s.as_bytes(); let mut size = 0; + let mut ext_cnt = 0; loop { - let result = - futures_util::future::poll_fn(|cx| state.step(cx, rdr, &mut size, &mut None)) - .await; + let result = futures_util::future::poll_fn(|cx| { + state.step(cx, rdr, &mut size, &mut ext_cnt, &mut None) + }) + .await; let desc = format!("read_size failed for {:?}", s); state = result.expect(desc.as_str()); if state == ChunkedState::Body || state == ChunkedState::EndCr { @@ -493,13 +568,15 @@ mod tests { } async fn read_err(s: &str, expected_err: io::ErrorKind) { - let mut state = ChunkedState::Size; + let mut state = ChunkedState::new(); let rdr = &mut s.as_bytes(); let mut size = 0; + let mut ext_cnt = 0; loop { - let result = - futures_util::future::poll_fn(|cx| state.step(cx, rdr, &mut size, &mut None)) - .await; + let result = futures_util::future::poll_fn(|cx| { + state.step(cx, rdr, &mut size, &mut ext_cnt, &mut None) + }) + .await; state = match result { Ok(s) => s, Err(e) => { @@ -530,6 +607,9 @@ mod tests { // Missing LF or CRLF read_err("F\rF", InvalidInput).await; read_err("F", UnexpectedEof).await; + // Missing digit + read_err("\r\n\r\n", InvalidInput).await; + read_err("\r\n", InvalidInput).await; // Invalid hex digit read_err("X\r\n", InvalidInput).await; read_err("1X\r\n", InvalidInput).await; @@ -586,6 +666,48 @@ mod tests { assert_eq!("1234567890abcdef", &result); } + #[tokio::test] + async fn test_read_chunked_with_missing_zero_digit() { + // After reading a valid chunk, the ending is missing a zero. + let mut mock_buf = &b"1\r\nZ\r\n\r\n\r\n"[..]; + let mut decoder = Decoder::chunked(); + let buf = decoder.decode_fut(&mut mock_buf).await.expect("decode"); + assert_eq!("Z", buf); + + let err = decoder + .decode_fut(&mut mock_buf) + .await + .expect_err("decode 2"); + assert_eq!(err.kind(), io::ErrorKind::InvalidInput); + } + + #[tokio::test] + async fn test_read_chunked_extensions_over_limit() { + // construct a chunked body where each individual chunked extension + // is totally fine, but combined is over the limit. + let per_chunk = super::CHUNKED_EXTENSIONS_LIMIT * 2 / 3; + let mut scratch = vec![]; + for _ in 0..2 { + scratch.extend(b"1;"); + scratch.extend(b"x".repeat(per_chunk as usize)); + scratch.extend(b"\r\nA\r\n"); + } + scratch.extend(b"0\r\n\r\n"); + let mut mock_buf = Bytes::from(scratch); + + let mut decoder = Decoder::chunked(); + let buf1 = decoder.decode_fut(&mut mock_buf).await.expect("decode1"); + assert_eq!(&buf1[..], b"A"); + + let err = decoder + .decode_fut(&mut mock_buf) + .await + .expect_err("decode2"); + assert_eq!(err.kind(), io::ErrorKind::InvalidData); + assert_eq!(err.to_string(), "chunk extensions over limit"); + } + + #[cfg(not(miri))] #[tokio::test] async fn test_read_chunked_trailer_with_missing_lf() { let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\nbad\r\r\n"[..]; diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs index 677131bfdd..3516d7ad21 100644 --- a/src/proto/h1/dispatch.rs +++ b/src/proto/h1/dispatch.rs @@ -1,4 +1,8 @@ use std::error::Error as StdError; +use std::future::Future; +use std::marker::Unpin; +use std::pin::Pin; +use std::task::{Context, Poll}; use bytes::{Buf, Bytes}; use http::Request; @@ -7,10 +11,8 @@ use tracing::{debug, trace}; use super::{Http1Transaction, Wants}; use crate::body::{Body, DecodedLength, HttpBody}; -use crate::common::{task, Future, Pin, Poll, Unpin}; -use crate::proto::{ - BodyLength, Conn, Dispatched, MessageHead, RequestHead, -}; +use crate::common; +use crate::proto::{BodyLength, Conn, Dispatched, MessageHead, RequestHead}; use crate::upgrade::OnUpgrade; pub(crate) struct Dispatcher { @@ -28,10 +30,10 @@ pub(crate) trait Dispatch { type RecvItem; fn poll_msg( self: Pin<&mut Self>, - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, ) -> Poll>>; fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, Body)>) -> crate::Result<()>; - fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll>; + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll>; fn should_poll(&self) -> bool; } @@ -60,10 +62,10 @@ cfg_client! { impl Dispatcher where D: Dispatch< - PollItem = MessageHead, - PollBody = Bs, - RecvItem = MessageHead, - > + Unpin, + PollItem = MessageHead, + PollBody = Bs, + RecvItem = MessageHead, + > + Unpin, D::PollError: Into>, I: AsyncRead + AsyncWrite + Unpin, T: Http1Transaction + Unpin, @@ -98,10 +100,7 @@ where /// /// This is useful for old-style HTTP upgrades, but ignores /// newer-style upgrade API. - pub(crate) fn poll_without_shutdown( - &mut self, - cx: &mut task::Context<'_>, - ) -> Poll> + pub(crate) fn poll_without_shutdown(&mut self, cx: &mut Context<'_>) -> Poll> where Self: Unpin, { @@ -114,10 +113,14 @@ where fn poll_catch( &mut self, - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, should_shutdown: bool, ) -> Poll> { Poll::Ready(ready!(self.poll_inner(cx, should_shutdown)).or_else(|e| { + // Be sure to alert a streaming body of the failure. + if let Some(mut body) = self.body_tx.take() { + body.send_error(crate::Error::new_body("connection error")); + } // An error means we're shutting down either way. // We just try to give the error to the user, // and close the connection with an Ok. If we @@ -129,7 +132,7 @@ where fn poll_inner( &mut self, - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, should_shutdown: bool, ) -> Poll> { T::update_date(); @@ -150,7 +153,7 @@ where } } - fn poll_loop(&mut self, cx: &mut task::Context<'_>) -> Poll> { + fn poll_loop(&mut self, cx: &mut Context<'_>) -> Poll> { // Limit the looping on this connection, in case it is ready far too // often, so that other futures don't starve. // @@ -177,10 +180,10 @@ where trace!("poll_loop yielding (self = {:p})", self); - task::yield_now(cx).map(|never| match never {}) + common::task::yield_now(cx).map(|never| match never {}) } - fn poll_read(&mut self, cx: &mut task::Context<'_>) -> Poll> { + fn poll_read(&mut self, cx: &mut Context<'_>) -> Poll> { loop { if self.is_closing { return Poll::Ready(Ok(())); @@ -234,7 +237,7 @@ where } } - fn poll_read_head(&mut self, cx: &mut task::Context<'_>) -> Poll> { + fn poll_read_head(&mut self, cx: &mut Context<'_>) -> Poll> { // can dispatch receive, or does it still care about, an incoming message? match ready!(self.dispatch.poll_ready(cx)) { Ok(()) => (), @@ -258,7 +261,10 @@ where if wants.contains(Wants::UPGRADE) { let upgrade = self.conn.on_upgrade(); debug_assert!(!upgrade.is_none(), "empty upgrade"); - debug_assert!(head.extensions.get::().is_none(), "OnUpgrade already set"); + debug_assert!( + head.extensions.get::().is_none(), + "OnUpgrade already set" + ); head.extensions.insert(upgrade); } self.dispatch.recv_msg(Ok((head, body)))?; @@ -286,7 +292,7 @@ where } } - fn poll_write(&mut self, cx: &mut task::Context<'_>) -> Poll> { + fn poll_write(&mut self, cx: &mut Context<'_>) -> Poll> { loop { if self.is_closing { return Poll::Ready(Ok(())); @@ -367,13 +373,18 @@ where self.conn.end_body()?; } } else { - return Poll::Pending; + // If there's no body_rx, end the body + if self.conn.can_write_body() { + self.conn.end_body()?; + } else { + return Poll::Pending; + } } } } } - fn poll_flush(&mut self, cx: &mut task::Context<'_>) -> Poll> { + fn poll_flush(&mut self, cx: &mut Context<'_>) -> Poll> { self.conn.poll_flush(cx).map_err(|err| { debug!("error writing: {}", err); crate::Error::new_body_write(err) @@ -407,10 +418,10 @@ where impl Future for Dispatcher where D: Dispatch< - PollItem = MessageHead, - PollBody = Bs, - RecvItem = MessageHead, - > + Unpin, + PollItem = MessageHead, + PollBody = Bs, + RecvItem = MessageHead, + > + Unpin, D::PollError: Into>, I: AsyncRead + AsyncWrite + Unpin, T: Http1Transaction + Unpin, @@ -420,7 +431,7 @@ where type Output = crate::Result; #[inline] - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { self.poll_catch(cx, true) } } @@ -484,7 +495,7 @@ cfg_server! { fn poll_msg( mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, ) -> Poll>> { let mut this = self.as_mut(); let ret = if let Some(ref mut fut) = this.in_flight.as_mut().as_pin_mut() { @@ -519,7 +530,7 @@ cfg_server! { Ok(()) } - fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { if self.in_flight.is_some() { Poll::Pending } else { @@ -555,13 +566,13 @@ cfg_client! { { type PollItem = RequestHead; type PollBody = B; - type PollError = crate::common::Never; + type PollError = std::convert::Infallible; type RecvItem = crate::proto::ResponseHead; fn poll_msg( mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - ) -> Poll>> { + cx: &mut Context<'_>, + ) -> Poll>> { let mut this = self.as_mut(); debug_assert!(!this.rx_closed); match this.rx.poll_recv(cx) { @@ -631,7 +642,7 @@ cfg_client! { } } - fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { match self.callback { Some(ref mut cb) => match cb.poll_canceled(cx) { Poll::Ready(()) => { diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs index 1d251e2c84..88ce9dac5f 100644 --- a/src/proto/h1/io.rs +++ b/src/proto/h1/io.rs @@ -5,6 +5,8 @@ use std::future::Future; use std::io::{self, IoSlice}; use std::marker::Unpin; use std::mem::MaybeUninit; +use std::pin::Pin; +use std::task::{Context, Poll}; #[cfg(all(feature = "server", feature = "runtime"))] use std::time::Duration; @@ -16,7 +18,6 @@ use tracing::{debug, trace}; use super::{Http1Transaction, ParseContext, ParsedMessage}; use crate::common::buf::BufList; -use crate::common::{task, Pin, Poll}; /// The initial buffer size allocated before trying to read from IO. pub(crate) const INIT_BUFFER_SIZE: usize = 8192; @@ -39,6 +40,7 @@ const MAX_BUF_LIST_BUFFERS: usize = 16; pub(crate) struct Buffered { flush_pipeline: bool, io: T, + partial_len: Option, read_blocked: bool, read_buf: BytesMut, read_buf_strategy: ReadStrategy, @@ -72,6 +74,7 @@ where Buffered { flush_pipeline: false, io, + partial_len: None, read_blocked: false, read_buf: BytesMut::with_capacity(0), read_buf_strategy: ReadStrategy::default(), @@ -174,7 +177,7 @@ where pub(super) fn parse( &mut self, - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, parse_ctx: ParseContext<'_>, ) -> Poll>> where @@ -183,6 +186,7 @@ where loop { match super::role::parse_headers::( &mut self.read_buf, + self.partial_len, ParseContext { cached_headers: parse_ctx.cached_headers, req_method: parse_ctx.req_method, @@ -219,11 +223,13 @@ where .reset(Instant::now() + Duration::from_secs(30 * 24 * 60 * 60)); } } + self.partial_len = None; return Poll::Ready(Ok(msg)); } None => { let max = self.read_buf_strategy.max(); - if self.read_buf.len() >= max { + let curr_len = self.read_buf.len(); + if curr_len >= max { debug!("max_buf_size ({}) reached, closing", max); return Poll::Ready(Err(crate::Error::new_too_large())); } @@ -241,6 +247,13 @@ where } } } + if curr_len > 0 { + trace!("partial headers; {} bytes so far", curr_len); + self.partial_len = Some(curr_len); + } else { + // 1xx gobled some bytes + self.partial_len = None; + } } } if ready!(self.poll_read_from_io(cx)).map_err(crate::Error::new_io)? == 0 { @@ -250,10 +263,7 @@ where } } - pub(crate) fn poll_read_from_io( - &mut self, - cx: &mut task::Context<'_>, - ) -> Poll> { + pub(crate) fn poll_read_from_io(&mut self, cx: &mut Context<'_>) -> Poll> { self.read_blocked = false; let next = self.read_buf_strategy.next(); if self.read_buf_remaining_mut() < next { @@ -296,7 +306,7 @@ where self.read_blocked } - pub(crate) fn poll_flush(&mut self, cx: &mut task::Context<'_>) -> Poll> { + pub(crate) fn poll_flush(&mut self, cx: &mut Context<'_>) -> Poll> { if self.flush_pipeline && !self.read_buf.is_empty() { Poll::Ready(Ok(())) } else if self.write_buf.remaining() == 0 { @@ -336,7 +346,7 @@ where /// /// Since all buffered bytes are flattened into the single headers buffer, /// that skips some bookkeeping around using multiple buffers. - fn poll_flush_flattened(&mut self, cx: &mut task::Context<'_>) -> Poll> { + fn poll_flush_flattened(&mut self, cx: &mut Context<'_>) -> Poll> { loop { let n = ready!(Pin::new(&mut self.io).poll_write(cx, self.write_buf.headers.chunk()))?; debug!("flushed {} bytes", n); @@ -366,7 +376,7 @@ impl Unpin for Buffered {} // TODO: This trait is old... at least rename to PollBytes or something... pub(crate) trait MemRead { - fn read_mem(&mut self, cx: &mut task::Context<'_>, len: usize) -> Poll>; + fn read_mem(&mut self, cx: &mut Context<'_>, len: usize) -> Poll>; } impl MemRead for Buffered @@ -374,7 +384,7 @@ where T: AsyncRead + AsyncWrite + Unpin, B: Buf, { - fn read_mem(&mut self, cx: &mut task::Context<'_>, len: usize) -> Poll> { + fn read_mem(&mut self, cx: &mut Context<'_>, len: usize) -> Poll> { if !self.read_buf.is_empty() { let n = std::cmp::min(len, self.read_buf.len()); Poll::Ready(Ok(self.read_buf.split_to(n).freeze())) diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs index d6272eeab2..1c00d7445d 100644 --- a/src/proto/h1/role.rs +++ b/src/proto/h1/role.rs @@ -62,24 +62,20 @@ macro_rules! maybe_panic { pub(super) fn parse_headers( bytes: &mut BytesMut, + prev_len: Option, ctx: ParseContext<'_>, ) -> ParseResult where T: Http1Transaction, { - // If the buffer is empty, don't bother entering the span, it's just noise. - if bytes.is_empty() { - return Ok(None); - } - - let span = trace_span!("parse_headers"); - let _s = span.enter(); - #[cfg(all(feature = "server", feature = "runtime"))] if !*ctx.h1_header_read_timeout_running { if let Some(h1_header_read_timeout) = ctx.h1_header_read_timeout { - let deadline = Instant::now() + h1_header_read_timeout; + let span = trace_span!("parse_headers"); + let _s = span.enter(); + let deadline = Instant::now() + h1_header_read_timeout; + *ctx.h1_header_read_timeout_running = true; match ctx.h1_header_read_timeout_fut { Some(h1_header_read_timeout_fut) => { debug!("resetting h1 header read timeout timer"); @@ -94,9 +90,45 @@ where } } + // If the buffer is empty, don't bother entering the span, it's just noise. + if bytes.is_empty() { + return Ok(None); + } + + let span = trace_span!("parse_headers"); + let _s = span.enter(); + + if let Some(prev_len) = prev_len { + if !is_complete_fast(bytes, prev_len) { + return Ok(None); + } + } + T::parse(bytes, ctx) } +/// A fast scan for the end of a message. +/// Used when there was a partial read, to skip full parsing on a +/// a slow connection. +fn is_complete_fast(bytes: &[u8], prev_len: usize) -> bool { + let start = if prev_len < 3 { 0 } else { prev_len - 3 }; + let bytes = &bytes[start..]; + + for (i, b) in bytes.iter().copied().enumerate() { + if b == b'\r' { + if bytes[i + 1..].chunks(3).next() == Some(&b"\n\r\n"[..]) { + return true; + } + } else if b == b'\n' { + if bytes.get(i + 1) == Some(&b'\n') { + return true; + } + } + } + + false +} + pub(super) fn encode_headers( enc: Encode<'_, T::Outgoing>, dst: &mut Vec, @@ -2632,6 +2664,28 @@ mod tests { assert_eq!(parsed.head.headers["server"], "hello\tworld"); } + #[test] + fn test_is_complete_fast() { + let s = b"GET / HTTP/1.1\r\na: b\r\n\r\n"; + for n in 0..s.len() { + assert!(is_complete_fast(s, n), "{:?}; {}", s, n); + } + let s = b"GET / HTTP/1.1\na: b\n\n"; + for n in 0..s.len() { + assert!(is_complete_fast(s, n)); + } + + // Not + let s = b"GET / HTTP/1.1\r\na: b\r\n\r"; + for n in 0..s.len() { + assert!(!is_complete_fast(s, n)); + } + let s = b"GET / HTTP/1.1\na: b\n"; + for n in 0..s.len() { + assert!(!is_complete_fast(s, n)); + } + } + #[test] fn test_write_headers_orig_case_empty_value() { let mut headers = HeaderMap::new(); diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs index 013f6fb5a8..8c2a4d2e0f 100644 --- a/src/proto/h2/client.rs +++ b/src/proto/h2/client.rs @@ -1,4 +1,9 @@ +use std::convert::Infallible; use std::error::Error as StdError; +use std::future::Future; +use std::marker::Unpin; +use std::pin::Pin; +use std::task::{Context, Poll}; #[cfg(feature = "runtime")] use std::time::Duration; @@ -7,29 +12,32 @@ use futures_channel::{mpsc, oneshot}; use futures_util::future::{self, Either, FutureExt as _, TryFutureExt as _}; use futures_util::stream::StreamExt as _; use h2::client::{Builder, SendRequest}; +use h2::SendStream; use http::{Method, StatusCode}; use tokio::io::{AsyncRead, AsyncWrite}; use tracing::{debug, trace, warn}; use super::{ping, H2Upgraded, PipeToSendStream, SendBuf}; use crate::body::HttpBody; -use crate::common::{exec::Exec, task, Future, Never, Pin, Poll}; +use crate::client::dispatch::Callback; +use crate::common::exec::Exec; use crate::ext::Protocol; use crate::headers; use crate::proto::h2::UpgradedSendStream; use crate::proto::Dispatched; use crate::upgrade::Upgraded; use crate::{Body, Request, Response}; +use h2::client::ResponseFuture; type ClientRx = crate::client::dispatch::Receiver, Response>; ///// An mpsc channel is used to help notify the `Connection` task when *all* ///// other handles to it have been dropped, so that it can shutdown. -type ConnDropRef = mpsc::Sender; +type ConnDropRef = mpsc::Sender; ///// A oneshot channel watches the `Connection` task, and when it completes, ///// the "dispatch" task will be notified and can shutdown sooner. -type ConnEof = oneshot::Receiver; +type ConnEof = oneshot::Receiver; // Our defaults are chosen for the "majority" case, which usually are not // resource constrained, and so the spec default of 64kb can be too limiting @@ -170,10 +178,11 @@ where executor: exec, h2_tx, req_rx, + fut_ctx: None, }) } -async fn conn_task(conn: C, drop_rx: D, cancel_tx: oneshot::Sender) +async fn conn_task(conn: C, drop_rx: D, cancel_tx: oneshot::Sender) where C: Future + Unpin, D: Future + Unpin, @@ -193,6 +202,20 @@ where } } +struct FutCtx +where + B: HttpBody, +{ + is_connect: bool, + eos: bool, + fut: ResponseFuture, + body_tx: SendStream>, + body: B, + cb: Callback, Response>, +} + +impl Unpin for FutCtx {} + pub(crate) struct ClientTask where B: HttpBody, @@ -203,6 +226,7 @@ where executor: Exec, h2_tx: SendRequest>, req_rx: ClientRx, + fut_ctx: Option>, } impl ClientTask @@ -214,6 +238,99 @@ where } } +impl ClientTask +where + B: HttpBody + Send + 'static, + B::Data: Send, + B::Error: Into>, +{ + fn poll_pipe(&mut self, f: FutCtx, cx: &mut Context<'_>) { + let ping = self.ping.clone(); + let send_stream = if !f.is_connect { + if !f.eos { + let mut pipe = Box::pin(PipeToSendStream::new(f.body, f.body_tx)).map(|res| { + if let Err(e) = res { + debug!("client request body error: {}", e); + } + }); + + // eagerly see if the body pipe is ready and + // can thus skip allocating in the executor + match Pin::new(&mut pipe).poll(cx) { + Poll::Ready(_) => (), + Poll::Pending => { + let conn_drop_ref = self.conn_drop_ref.clone(); + // keep the ping recorder's knowledge of an + // "open stream" alive while this body is + // still sending... + let ping = ping.clone(); + let pipe = pipe.map(move |x| { + drop(conn_drop_ref); + drop(ping); + x + }); + // Clear send task + self.executor.execute(pipe); + } + } + } + + None + } else { + Some(f.body_tx) + }; + + let fut = f.fut.map(move |result| match result { + Ok(res) => { + // record that we got the response headers + ping.record_non_data(); + + let content_length = headers::content_length_parse_all(res.headers()); + if let (Some(mut send_stream), StatusCode::OK) = (send_stream, res.status()) { + if content_length.map_or(false, |len| len != 0) { + warn!("h2 connect response with non-zero body not supported"); + + send_stream.send_reset(h2::Reason::INTERNAL_ERROR); + return Err(( + crate::Error::new_h2(h2::Reason::INTERNAL_ERROR.into()), + None, + )); + } + let (parts, recv_stream) = res.into_parts(); + let mut res = Response::from_parts(parts, Body::empty()); + + let (pending, on_upgrade) = crate::upgrade::pending(); + let io = H2Upgraded { + ping, + send_stream: unsafe { UpgradedSendStream::new(send_stream) }, + recv_stream, + buf: Bytes::new(), + }; + let upgraded = Upgraded::new(io, Bytes::new()); + + pending.fulfill(upgraded); + res.extensions_mut().insert(on_upgrade); + + Ok(res) + } else { + let res = res.map(|stream| { + let ping = ping.for_stream(&stream); + crate::Body::h2(stream, content_length.into(), ping) + }); + Ok(res) + } + } + Err(err) => { + ping.ensure_not_timed_out().map_err(|e| (e, None))?; + + debug!("client response error: {}", err); + Err((crate::Error::new_h2(err), None)) + } + }); + self.executor.execute(f.cb.send_when(fut)); + } +} + impl Future for ClientTask where B: HttpBody + Send + 'static, @@ -222,7 +339,7 @@ where { type Output = crate::Result; - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { loop { match ready!(self.h2_tx.poll_ready(cx)) { Ok(()) => (), @@ -237,6 +354,16 @@ where } }; + match self.fut_ctx.take() { + // If we were waiting on pending open + // continue where we left off. + Some(f) => { + self.poll_pipe(f, cx); + continue; + } + None => (), + } + match self.req_rx.poll_recv(cx) { Poll::Ready(Some((req, cb))) => { // check that future hasn't been canceled already @@ -255,7 +382,6 @@ where let is_connect = req.method() == Method::CONNECT; let eos = body.is_end_stream(); - let ping = self.ping.clone(); if is_connect { if headers::content_length_parse_all(req.headers()) @@ -283,90 +409,31 @@ where } }; - let send_stream = if !is_connect { - if !eos { - let mut pipe = - Box::pin(PipeToSendStream::new(body, body_tx)).map(|res| { - if let Err(e) = res { - debug!("client request body error: {}", e); - } - }); - - // eagerly see if the body pipe is ready and - // can thus skip allocating in the executor - match Pin::new(&mut pipe).poll(cx) { - Poll::Ready(_) => (), - Poll::Pending => { - let conn_drop_ref = self.conn_drop_ref.clone(); - // keep the ping recorder's knowledge of an - // "open stream" alive while this body is - // still sending... - let ping = ping.clone(); - let pipe = pipe.map(move |x| { - drop(conn_drop_ref); - drop(ping); - x - }); - self.executor.execute(pipe); - } - } - } - - None - } else { - Some(body_tx) + let f = FutCtx { + is_connect, + eos, + fut, + body_tx, + body, + cb, }; - let fut = fut.map(move |result| match result { - Ok(res) => { - // record that we got the response headers - ping.record_non_data(); - - let content_length = headers::content_length_parse_all(res.headers()); - if let (Some(mut send_stream), StatusCode::OK) = - (send_stream, res.status()) - { - if content_length.map_or(false, |len| len != 0) { - warn!("h2 connect response with non-zero body not supported"); - - send_stream.send_reset(h2::Reason::INTERNAL_ERROR); - return Err(( - crate::Error::new_h2(h2::Reason::INTERNAL_ERROR.into()), - None, - )); - } - let (parts, recv_stream) = res.into_parts(); - let mut res = Response::from_parts(parts, Body::empty()); - - let (pending, on_upgrade) = crate::upgrade::pending(); - let io = H2Upgraded { - ping, - send_stream: unsafe { UpgradedSendStream::new(send_stream) }, - recv_stream, - buf: Bytes::new(), - }; - let upgraded = Upgraded::new(io, Bytes::new()); - - pending.fulfill(upgraded); - res.extensions_mut().insert(on_upgrade); - - Ok(res) - } else { - let res = res.map(|stream| { - let ping = ping.for_stream(&stream); - crate::Body::h2(stream, content_length.into(), ping) - }); - Ok(res) - } + // Check poll_ready() again. + // If the call to send_request() resulted in the new stream being pending open + // we have to wait for the open to complete before accepting new requests. + match self.h2_tx.poll_ready(cx) { + Poll::Pending => { + // Save Context + self.fut_ctx = Some(f); + return Poll::Pending; } - Err(err) => { - ping.ensure_not_timed_out().map_err(|e| (e, None))?; - - debug!("client response error: {}", err); - Err((crate::Error::new_h2(err), None)) + Poll::Ready(Ok(())) => (), + Poll::Ready(Err(err)) => { + f.cb.send(Err((crate::Error::new_h2(err), None))); + continue; } - }); - self.executor.execute(cb.send_when(fut)); + } + self.poll_pipe(f, cx); continue; } diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs index 5857c919d1..d50850d0a0 100644 --- a/src/proto/h2/mod.rs +++ b/src/proto/h2/mod.rs @@ -4,14 +4,15 @@ use http::header::{HeaderName, CONNECTION, TE, TRAILER, TRANSFER_ENCODING, UPGRA use http::HeaderMap; use pin_project_lite::pin_project; use std::error::Error as StdError; +use std::future::Future; use std::io::{self, Cursor, IoSlice}; use std::mem; -use std::task::Context; +use std::pin::Pin; +use std::task::{Context, Poll}; use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; use tracing::{debug, trace, warn}; use crate::body::HttpBody; -use crate::common::{task, Future, Pin, Poll}; use crate::proto::h2::ping::Recorder; pub(crate) mod ping; @@ -116,7 +117,7 @@ where { type Output = crate::Result<()>; - fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let mut me = self.project(); loop { if !*me.data_done { @@ -383,14 +384,12 @@ where cx: &mut Context<'_>, ) -> Poll> { if self.send_stream.write(&[], true).is_ok() { - return Poll::Ready(Ok(())) + return Poll::Ready(Ok(())); } Poll::Ready(Err(h2_to_io_error( match ready!(self.send_stream.poll_reset(cx)) { - Ok(Reason::NO_ERROR) => { - return Poll::Ready(Ok(())) - } + Ok(Reason::NO_ERROR) => return Poll::Ready(Ok(())), Ok(Reason::CANCEL) | Ok(Reason::STREAM_CLOSED) => { return Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())) } diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs index 1e8386497c..d830c93eda 100644 --- a/src/proto/h2/ping.rs +++ b/src/proto/h2/ping.rs @@ -328,7 +328,7 @@ impl Ponger { } } - if let Some(ref mut bdp) = self.bdp { + if let Some(ref mut bdp) = self.bdp { let bytes = locked.bytes.expect("bdp enabled implies bytes"); locked.bytes = Some(0); // reset trace!("received BDP ack; bytes = {}, rtt = {:?}", bytes, rtt); @@ -336,7 +336,7 @@ impl Ponger { let update = bdp.calculate(bytes, rtt); locked.next_bdp_at = Some(now + bdp.ping_delay); if let Some(update) = update { - return Poll::Ready(Ponged::SizeUpdate(update)) + return Poll::Ready(Ponged::SizeUpdate(update)); } } } diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs index d24e6bac5f..b7bff590ff 100644 --- a/src/proto/h2/server.rs +++ b/src/proto/h2/server.rs @@ -1,5 +1,8 @@ use std::error::Error as StdError; +use std::future::Future; use std::marker::Unpin; +use std::pin::Pin; +use std::task::{Context, Poll}; #[cfg(feature = "runtime")] use std::time::Duration; @@ -13,8 +16,8 @@ use tracing::{debug, trace, warn}; use super::{ping, PipeToSendStream, SendBuf}; use crate::body::HttpBody; +use crate::common::date; use crate::common::exec::ConnStreamExec; -use crate::common::{date, task, Future, Pin, Poll}; use crate::ext::Protocol; use crate::headers; use crate::proto::h2::ping::Recorder; @@ -35,8 +38,8 @@ const DEFAULT_CONN_WINDOW: u32 = 1024 * 1024; // 1mb const DEFAULT_STREAM_WINDOW: u32 = 1024 * 1024; // 1mb const DEFAULT_MAX_FRAME_SIZE: u32 = 1024 * 16; // 16kb const DEFAULT_MAX_SEND_BUF_SIZE: usize = 1024 * 400; // 400kb -// 16 MB "sane default" taken from golang http2 -const DEFAULT_SETTINGS_MAX_HEADER_LIST_SIZE: u32 = 16 << 20; +const DEFAULT_SETTINGS_MAX_HEADER_LIST_SIZE: u32 = 16 << 20; // 16 MB "sane default" taken from golang http2 +const DEFAULT_MAX_LOCAL_ERROR_RESET_STREAMS: usize = 1024; #[derive(Clone, Debug)] pub(crate) struct Config { @@ -46,6 +49,8 @@ pub(crate) struct Config { pub(crate) max_frame_size: u32, pub(crate) enable_connect_protocol: bool, pub(crate) max_concurrent_streams: Option, + pub(crate) max_pending_accept_reset_streams: Option, + pub(crate) max_local_error_reset_streams: Option, #[cfg(feature = "runtime")] pub(crate) keep_alive_interval: Option, #[cfg(feature = "runtime")] @@ -63,6 +68,8 @@ impl Default for Config { max_frame_size: DEFAULT_MAX_FRAME_SIZE, enable_connect_protocol: false, max_concurrent_streams: None, + max_pending_accept_reset_streams: None, + max_local_error_reset_streams: Some(DEFAULT_MAX_LOCAL_ERROR_RESET_STREAMS), #[cfg(feature = "runtime")] keep_alive_interval: None, #[cfg(feature = "runtime")] @@ -121,10 +128,14 @@ where .initial_connection_window_size(config.initial_conn_window_size) .max_frame_size(config.max_frame_size) .max_header_list_size(config.max_header_list_size) + .max_local_error_reset_streams(config.max_local_error_reset_streams) .max_send_buffer_size(config.max_send_buffer_size); if let Some(max) = config.max_concurrent_streams { builder.max_concurrent_streams(max); } + if let Some(max) = config.max_pending_accept_reset_streams { + builder.max_pending_accept_reset_streams(max); + } if config.enable_connect_protocol { builder.enable_connect_protocol(); } @@ -188,7 +199,7 @@ where { type Output = crate::Result; - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let me = &mut *self; loop { let next = match me.state { @@ -231,7 +242,7 @@ where { fn poll_server( &mut self, - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, service: &mut S, exec: &mut E, ) -> Poll> @@ -351,7 +362,7 @@ where Poll::Ready(Err(self.closing.take().expect("polled after error"))) } - fn poll_ping(&mut self, cx: &mut task::Context<'_>) { + fn poll_ping(&mut self, cx: &mut Context<'_>) { if let Some((_, ref mut estimator)) = self.ping { match estimator.poll(cx) { Poll::Ready(ping::Ponged::SizeUpdate(wnd)) => { @@ -442,7 +453,7 @@ where B::Error: Into>, E: Into>, { - fn poll2(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { + fn poll2(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut me = self.project(); loop { let next = match me.state.as_mut().project() { @@ -503,7 +514,6 @@ where } } - if !body.is_end_stream() { // automatically set Content-Length from body... if let Some(len) = body.size_hint().exact() { @@ -538,7 +548,7 @@ where { type Output = (); - fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { self.poll2(cx).map(|res| { if let Err(e) = res { debug!("stream error: {}", e); diff --git a/src/proto/mod.rs b/src/proto/mod.rs index f938bf532b..3628576dc1 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -50,7 +50,7 @@ pub(crate) enum BodyLength { Unknown, } -/// Status of when a Disaptcher future completes. +/// Status of when a Dispatcher future completes. pub(crate) enum Dispatched { /// Dispatcher completely shutdown connection. Shutdown, diff --git a/src/server/accept.rs b/src/server/accept.rs index 4b7a1487dd..07dcd62524 100644 --- a/src/server/accept.rs +++ b/src/server/accept.rs @@ -6,16 +6,14 @@ //! connections. //! - Utilities like `poll_fn` to ease creating a custom `Accept`. +use std::pin::Pin; +use std::task::{Context, Poll}; + #[cfg(feature = "stream")] use futures_core::Stream; #[cfg(feature = "stream")] use pin_project_lite::pin_project; -use crate::common::{ - task::{self, Poll}, - Pin, -}; - /// Asynchronously accept incoming connections. pub trait Accept { /// The connection type that can be accepted. @@ -26,7 +24,7 @@ pub trait Accept { /// Poll to accept the next connection. fn poll_accept( self: Pin<&mut Self>, - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, ) -> Poll>>; } @@ -51,7 +49,7 @@ pub trait Accept { /// ``` pub fn poll_fn(func: F) -> impl Accept where - F: FnMut(&mut task::Context<'_>) -> Poll>>, + F: FnMut(&mut Context<'_>) -> Poll>>, { struct PollFn(F); @@ -60,13 +58,13 @@ where impl Accept for PollFn where - F: FnMut(&mut task::Context<'_>) -> Poll>>, + F: FnMut(&mut Context<'_>) -> Poll>>, { type Conn = IO; type Error = E; fn poll_accept( self: Pin<&mut Self>, - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, ) -> Poll>> { (self.get_mut().0)(cx) } @@ -101,7 +99,7 @@ where type Error = E; fn poll_accept( self: Pin<&mut Self>, - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, ) -> Poll>> { self.project().stream.poll_next(cx) } diff --git a/src/server/conn.rs b/src/server/conn.rs index d5370b0f14..951c9ee5cd 100644 --- a/src/server/conn.rs +++ b/src/server/conn.rs @@ -58,11 +58,22 @@ use crate::error::{Kind, Parse}; #[cfg(feature = "http1")] use crate::upgrade::Upgraded; +#[cfg(all(feature = "backports", feature = "http1"))] +pub mod http1; +#[cfg(all(feature = "backports", feature = "http2"))] +pub mod http2; + cfg_feature! { #![any(feature = "http1", feature = "http2")] use std::error::Error as StdError; use std::fmt; + use std::task::{Context, Poll}; + use std::pin::Pin; + use std::future::Future; + use std::marker::Unpin; + #[cfg(not(all(feature = "http1", feature = "http2")))] + use std::convert::Infallible; use bytes::Bytes; use pin_project_lite::pin_project; @@ -71,9 +82,6 @@ cfg_feature! { pub use super::server::Connecting; use crate::body::{Body, HttpBody}; - use crate::common::{task, Future, Pin, Poll, Unpin}; - #[cfg(not(all(feature = "http1", feature = "http2")))] - use crate::common::Never; use crate::common::exec::{ConnStreamExec, Exec}; use crate::proto; use crate::service::HttpService; @@ -93,6 +101,12 @@ pub use super::tcp::{AddrIncoming, AddrStream}; #[derive(Clone, Debug)] #[cfg(any(feature = "http1", feature = "http2"))] #[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] +#[cfg_attr( + feature = "deprecated", + deprecated( + note = "This struct will be replaced with `server::conn::http1::Builder` and `server::conn::http2::Builder` in 1.0, enable the \"backports\" feature to use them now." + ) +)] pub struct Http { pub(crate) exec: E, h1_half_close: bool, @@ -145,14 +159,14 @@ type Http1Dispatcher = proto::h1::Dispatcher, B, T, proto::ServerTransaction>; #[cfg(all(not(feature = "http1"), feature = "http2"))] -type Http1Dispatcher = (Never, PhantomData<(T, Box>, Box>)>); +type Http1Dispatcher = (Infallible, PhantomData<(T, Box>, Box>)>); #[cfg(feature = "http2")] type Http2Server = proto::h2::Server, S, B, E>; #[cfg(all(not(feature = "http2"), feature = "http1"))] type Http2Server = ( - Never, + Infallible, PhantomData<(T, Box>, Box>, Box>)>, ); @@ -208,6 +222,12 @@ impl Unpin for Fallback {} #[derive(Debug)] #[cfg(any(feature = "http1", feature = "http2"))] #[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] +#[cfg_attr( + feature = "deprecated", + deprecated( + note = "This struct will be replaced with `server::conn::http1::Parts` in 1.0, enable the \"backports\" feature to use them now." + ) +)] pub struct Parts { /// The original IO object used in the handshake. pub io: T, @@ -227,6 +247,7 @@ pub struct Parts { // ===== impl Http ===== +#[cfg_attr(feature = "deprecated", allow(deprecated))] #[cfg(any(feature = "http1", feature = "http2"))] impl Http { /// Creates a new instance of the HTTP protocol, ready to spawn a server or @@ -250,6 +271,7 @@ impl Http { } } +#[cfg_attr(feature = "deprecated", allow(deprecated))] #[cfg(any(feature = "http1", feature = "http2"))] impl Http { /// Sets whether HTTP1 is required. @@ -327,7 +349,7 @@ impl Http { self } - /// Set a timeout for reading client request headers. If a client does not + /// Set a timeout for reading client request headers. If a client does not /// transmit the entire header within this time, the connection is closed. /// /// Default is None. @@ -375,6 +397,40 @@ impl Http { self } + /// Configures the maximum number of pending reset streams allowed before a GOAWAY will be sent. + /// + /// This will default to the default value set by the [`h2` crate](https://crates.io/crates/h2). + /// As of v0.3.17, it is 20. + /// + /// See for more information. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_max_pending_accept_reset_streams( + &mut self, + max: impl Into>, + ) -> &mut Self { + self.h2_builder.max_pending_accept_reset_streams = max.into(); + + self + } + + /// Configures the maximum number of pending reset streams allowed before a GOAWAY will be sent. + /// + /// This will default to the default value set by the [`h2` crate](https://crates.io/crates/h2). + /// As of v0.3.17, it is 20. + /// + /// See for more information. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_max_local_error_reset_streams( + &mut self, + max: impl Into>, + ) -> &mut Self { + self.h2_builder.max_local_error_reset_streams = max.into(); + + self + } + /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 /// stream-level flow control. /// @@ -733,6 +789,7 @@ where /// /// # Panics /// This method will panic if this connection is using an h2 protocol. + #[cfg_attr(feature = "deprecated", allow(deprecated))] pub fn into_parts(self) -> Parts { self.try_into_parts() .unwrap_or_else(|| panic!("h2 cannot into_inner")) @@ -741,6 +798,7 @@ where /// Return the inner IO object, and additional information, if available. /// /// This method will return a `None` if this connection is using an h2 protocol. + #[cfg_attr(feature = "deprecated", allow(deprecated))] pub fn try_into_parts(self) -> Option> { match self.conn.unwrap() { #[cfg(feature = "http1")] @@ -767,12 +825,7 @@ where /// upgrade. Once the upgrade is completed, the connection would be "done", /// but it is not desired to actually shutdown the IO object. Instead you /// would take it back using `into_parts`. - pub fn poll_without_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll> - where - S: Unpin, - S::Future: Unpin, - B: Unpin, - { + pub fn poll_without_shutdown(&mut self, cx: &mut Context<'_>) -> Poll> { loop { match *self.conn.as_mut().unwrap() { #[cfg(feature = "http1")] @@ -808,16 +861,17 @@ where /// # Error /// /// This errors if the underlying connection protocol is not HTTP/1. - pub fn without_shutdown(self) -> impl Future>> - where - S: Unpin, - S::Future: Unpin, - B: Unpin, - { + #[cfg_attr(feature = "deprecated", allow(deprecated))] + pub fn without_shutdown(self) -> impl Future>> { let mut conn = Some(self); futures_util::future::poll_fn(move |cx| { ready!(conn.as_mut().unwrap().poll_without_shutdown(cx))?; - Poll::Ready(conn.take().unwrap().try_into_parts().ok_or_else(crate::Error::new_without_shutdown_not_h1)) + Poll::Ready( + conn.take() + .unwrap() + .try_into_parts() + .ok_or_else(crate::Error::new_without_shutdown_not_h1), + ) }) } @@ -860,14 +914,14 @@ impl Future for Connection where S: HttpService, S::Error: Into>, - I: AsyncRead + AsyncWrite + Unpin + 'static, + I: AsyncRead + AsyncWrite + Unpin, B: HttpBody + 'static, B::Error: Into>, E: ConnStreamExec, { type Output = crate::Result<()>; - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { loop { match ready!(Pin::new(self.conn.as_mut().unwrap()).poll(cx)) { Ok(done) => { @@ -946,7 +1000,7 @@ where { type Output = crate::Result; - fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { match self.project() { #[cfg(feature = "http1")] ProtoServerProj::H1 { h1, .. } => h1.poll(cx), @@ -1007,7 +1061,7 @@ mod upgrades { { type Output = crate::Result<()>; - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { loop { match ready!(Pin::new(self.inner.conn.as_mut().unwrap()).poll(cx)) { Ok(proto::Dispatched::Shutdown) => return Poll::Ready(Ok(())), diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs new file mode 100644 index 0000000000..ab833b938b --- /dev/null +++ b/src/server/conn/http1.rs @@ -0,0 +1,449 @@ +//! HTTP/1 Server Connections + +use std::error::Error as StdError; +use std::fmt; +use std::future::Future; +use std::marker::Unpin; +use std::pin::Pin; +use std::task::{Context, Poll}; +use std::time::Duration; + +use bytes::Bytes; +use tokio::io::{AsyncRead, AsyncWrite}; + +use crate::body::{Body as IncomingBody, HttpBody as Body}; +use crate::proto; +use crate::service::HttpService; + +type Http1Dispatcher = proto::h1::Dispatcher< + proto::h1::dispatch::Server, + B, + T, + proto::ServerTransaction, +>; + +pin_project_lite::pin_project! { + /// A future binding an http1 connection with a Service. + /// + /// Polling this future will drive HTTP forward. + #[must_use = "futures do nothing unless polled"] + pub struct Connection + where + S: HttpService, + { + conn: Http1Dispatcher, + } +} + +/// A configuration builder for HTTP/1 server connections. +#[derive(Clone, Debug)] +pub struct Builder { + h1_half_close: bool, + h1_keep_alive: bool, + h1_title_case_headers: bool, + h1_preserve_header_case: bool, + h1_header_read_timeout: Option, + h1_writev: Option, + max_buf_size: Option, + pipeline_flush: bool, +} + +/// Deconstructed parts of a `Connection`. +/// +/// This allows taking apart a `Connection` at a later time, in order to +/// reclaim the IO object, and additional related pieces. +#[derive(Debug)] +pub struct Parts { + /// The original IO object used in the handshake. + pub io: T, + /// A buffer of bytes that have been read but not processed as HTTP. + /// + /// If the client sent additional bytes after its last request, and + /// this connection "ended" with an upgrade, the read buffer will contain + /// those bytes. + /// + /// You will want to check for any existing bytes if you plan to continue + /// communicating on the IO object. + pub read_buf: Bytes, + /// The `Service` used to serve this connection. + pub service: S, + _inner: (), +} + +// ===== impl Connection ===== + +impl fmt::Debug for Connection +where + S: HttpService, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Connection").finish() + } +} + +impl Connection +where + S: HttpService, + S::Error: Into>, + I: AsyncRead + AsyncWrite + Unpin, + B: Body + 'static, + B::Error: Into>, +{ + /// Start a graceful shutdown process for this connection. + /// + /// This `Connection` should continue to be polled until shutdown + /// can finish. + /// + /// # Note + /// + /// This should only be called while the `Connection` future is still + /// pending. If called after `Connection::poll` has resolved, this does + /// nothing. + pub fn graceful_shutdown(mut self: Pin<&mut Self>) { + self.conn.disable_keep_alive(); + } + + /// Return the inner IO object, and additional information. + /// + /// If the IO object has been "rewound" the io will not contain those bytes rewound. + /// This should only be called after `poll_without_shutdown` signals + /// that the connection is "done". Otherwise, it may not have finished + /// flushing all necessary HTTP bytes. + /// + /// # Panics + /// This method will panic if this connection is using an h2 protocol. + pub fn into_parts(self) -> Parts { + let (io, read_buf, dispatch) = self.conn.into_inner(); + Parts { + io, + read_buf, + service: dispatch.into_service(), + _inner: (), + } + } + + /// Poll the connection for completion, but without calling `shutdown` + /// on the underlying IO. + /// + /// This is useful to allow running a connection while doing an HTTP + /// upgrade. Once the upgrade is completed, the connection would be "done", + /// but it is not desired to actually shutdown the IO object. Instead you + /// would take it back using `into_parts`. + pub fn poll_without_shutdown(&mut self, cx: &mut Context<'_>) -> Poll> + where + S: Unpin, + S::Future: Unpin, + B: Unpin, + { + self.conn.poll_without_shutdown(cx) + } + + /// Prevent shutdown of the underlying IO object at the end of service the request, + /// instead run `into_parts`. This is a convenience wrapper over `poll_without_shutdown`. + /// + /// # Error + /// + /// This errors if the underlying connection protocol is not HTTP/1. + pub fn without_shutdown(self) -> impl Future>> + where + S: Unpin, + S::Future: Unpin, + B: Unpin, + { + let mut zelf = Some(self); + futures_util::future::poll_fn(move |cx| { + ready!(zelf.as_mut().unwrap().conn.poll_without_shutdown(cx))?; + Poll::Ready(Ok(zelf.take().unwrap().into_parts())) + }) + } + + /// Enable this connection to support higher-level HTTP upgrades. + /// + /// See [the `upgrade` module](crate::upgrade) for more. + pub fn with_upgrades(self) -> upgrades::UpgradeableConnection + where + I: Send, + { + upgrades::UpgradeableConnection { inner: Some(self) } + } +} + +impl Future for Connection +where + S: HttpService, + S::Error: Into>, + I: AsyncRead + AsyncWrite + Unpin + 'static, + B: Body + 'static, + B::Error: Into>, +{ + type Output = crate::Result<()>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match ready!(Pin::new(&mut self.conn).poll(cx)) { + Ok(done) => { + match done { + proto::Dispatched::Shutdown => {} + proto::Dispatched::Upgrade(pending) => { + // With no `Send` bound on `I`, we can't try to do + // upgrades here. In case a user was trying to use + // `Body::on_upgrade` with this API, send a special + // error letting them know about that. + pending.manual(); + } + }; + return Poll::Ready(Ok(())); + } + Err(e) => Poll::Ready(Err(e)), + } + } +} + +// ===== impl Builder ===== + +impl Builder { + /// Create a new connection builder. + pub fn new() -> Self { + Self { + h1_half_close: false, + h1_keep_alive: true, + h1_title_case_headers: false, + h1_preserve_header_case: false, + h1_header_read_timeout: None, + h1_writev: None, + max_buf_size: None, + pipeline_flush: false, + } + } + /// Set whether HTTP/1 connections should support half-closures. + /// + /// Clients can chose to shutdown their write-side while waiting + /// for the server to respond. Setting this to `true` will + /// prevent closing the connection immediately if `read` + /// detects an EOF in the middle of a request. + /// + /// Default is `false`. + pub fn half_close(&mut self, val: bool) -> &mut Self { + self.h1_half_close = val; + self + } + + /// Enables or disables HTTP/1 keep-alive. + /// + /// Default is true. + pub fn keep_alive(&mut self, val: bool) -> &mut Self { + self.h1_keep_alive = val; + self + } + + /// Set whether HTTP/1 connections will write header names as title case at + /// the socket level. + /// + /// Default is false. + pub fn title_case_headers(&mut self, enabled: bool) -> &mut Self { + self.h1_title_case_headers = enabled; + self + } + + /// Set whether to support preserving original header cases. + /// + /// Currently, this will record the original cases received, and store them + /// in a private extension on the `Request`. It will also look for and use + /// such an extension in any provided `Response`. + /// + /// Since the relevant extension is still private, there is no way to + /// interact with the original cases. The only effect this can have now is + /// to forward the cases in a proxy-like fashion. + /// + /// Default is false. + pub fn preserve_header_case(&mut self, enabled: bool) -> &mut Self { + self.h1_preserve_header_case = enabled; + self + } + + /// Set a timeout for reading client request headers. If a client does not + /// transmit the entire header within this time, the connection is closed. + /// + /// Default is None. + pub fn header_read_timeout(&mut self, read_timeout: Duration) -> &mut Self { + self.h1_header_read_timeout = Some(read_timeout); + self + } + + /// Set whether HTTP/1 connections should try to use vectored writes, + /// or always flatten into a single buffer. + /// + /// Note that setting this to false may mean more copies of body data, + /// but may also improve performance when an IO transport doesn't + /// support vectored writes well, such as most TLS implementations. + /// + /// Setting this to true will force hyper to use queued strategy + /// which may eliminate unnecessary cloning on some TLS backends + /// + /// Default is `auto`. In this mode hyper will try to guess which + /// mode to use + pub fn writev(&mut self, val: bool) -> &mut Self { + self.h1_writev = Some(val); + self + } + + /// Set the maximum buffer size for the connection. + /// + /// Default is ~400kb. + /// + /// # Panics + /// + /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum. + pub fn max_buf_size(&mut self, max: usize) -> &mut Self { + assert!( + max >= proto::h1::MINIMUM_MAX_BUFFER_SIZE, + "the max_buf_size cannot be smaller than the minimum that h1 specifies." + ); + self.max_buf_size = Some(max); + self + } + + /// Aggregates flushes to better support pipelined responses. + /// + /// Experimental, may have bugs. + /// + /// Default is false. + pub fn pipeline_flush(&mut self, enabled: bool) -> &mut Self { + self.pipeline_flush = enabled; + self + } + + // /// Set the timer used in background tasks. + // pub fn timer(&mut self, timer: M) -> &mut Self + // where + // M: Timer + Send + Sync + 'static, + // { + // self.timer = Time::Timer(Arc::new(timer)); + // self + // } + + /// Bind a connection together with a [`Service`](crate::service::Service). + /// + /// This returns a Future that must be polled in order for HTTP to be + /// driven on the connection. + /// + /// # Example + /// + /// ``` + /// # use hyper::{Body as Incoming, Request, Response}; + /// # use hyper::service::Service; + /// # use hyper::server::conn::http1::Builder; + /// # use tokio::io::{AsyncRead, AsyncWrite}; + /// # async fn run(some_io: I, some_service: S) + /// # where + /// # I: AsyncRead + AsyncWrite + Unpin + Send + 'static, + /// # S: Service, Response=hyper::Response> + Send + 'static, + /// # S::Error: Into>, + /// # S::Future: Send, + /// # { + /// let http = Builder::new(); + /// let conn = http.serve_connection(some_io, some_service); + /// + /// if let Err(e) = conn.await { + /// eprintln!("server connection error: {}", e); + /// } + /// # } + /// # fn main() {} + /// ``` + pub fn serve_connection(&self, io: I, service: S) -> Connection + where + S: HttpService, + S::Error: Into>, + S::ResBody: 'static, + ::Error: Into>, + I: AsyncRead + AsyncWrite + Unpin, + { + let mut conn = proto::Conn::new(io); + if !self.h1_keep_alive { + conn.disable_keep_alive(); + } + if self.h1_half_close { + conn.set_allow_half_close(); + } + if self.h1_title_case_headers { + conn.set_title_case_headers(); + } + if self.h1_preserve_header_case { + conn.set_preserve_header_case(); + } + if let Some(header_read_timeout) = self.h1_header_read_timeout { + conn.set_http1_header_read_timeout(header_read_timeout); + } + if let Some(writev) = self.h1_writev { + if writev { + conn.set_write_strategy_queue(); + } else { + conn.set_write_strategy_flatten(); + } + } + conn.set_flush_pipeline(self.pipeline_flush); + if let Some(max) = self.max_buf_size { + conn.set_max_buf_size(max); + } + let sd = proto::h1::dispatch::Server::new(service); + let proto = proto::h1::Dispatcher::new(sd, conn); + Connection { conn: proto } + } +} + +mod upgrades { + use crate::upgrade::Upgraded; + + use super::*; + + // A future binding a connection with a Service with Upgrade support. + // + // This type is unnameable outside the crate. + #[must_use = "futures do nothing unless polled"] + #[allow(missing_debug_implementations)] + pub struct UpgradeableConnection + where + S: HttpService, + { + pub(super) inner: Option>, + } + + impl UpgradeableConnection + where + S: HttpService, + S::Error: Into>, + I: AsyncRead + AsyncWrite + Unpin, + B: Body + 'static, + B::Error: Into>, + { + /// Start a graceful shutdown process for this connection. + /// + /// This `Connection` should continue to be polled until shutdown + /// can finish. + pub fn graceful_shutdown(mut self: Pin<&mut Self>) { + Pin::new(self.inner.as_mut().unwrap()).graceful_shutdown() + } + } + + impl Future for UpgradeableConnection + where + S: HttpService, + S::Error: Into>, + I: AsyncRead + AsyncWrite + Unpin + Send + 'static, + B: Body + 'static, + B::Error: Into>, + { + type Output = crate::Result<()>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match ready!(Pin::new(&mut self.inner.as_mut().unwrap().conn).poll(cx)) { + Ok(proto::Dispatched::Shutdown) => Poll::Ready(Ok(())), + Ok(proto::Dispatched::Upgrade(pending)) => { + let (io, buf, _) = self.inner.take().unwrap().conn.into_inner(); + pending.fulfill(Upgraded::new(io, buf)); + Poll::Ready(Ok(())) + } + Err(e) => Poll::Ready(Err(e)), + } + } + } +} diff --git a/src/server/conn/http2.rs b/src/server/conn/http2.rs new file mode 100644 index 0000000000..f08536c7f1 --- /dev/null +++ b/src/server/conn/http2.rs @@ -0,0 +1,277 @@ +//! HTTP/2 Server Connections + +use std::error::Error as StdError; +use std::fmt; +use std::future::Future; +use std::marker::Unpin; +use std::pin::Pin; +use std::task::{Context, Poll}; +use std::time::Duration; + +use pin_project_lite::pin_project; +use tokio::io::{AsyncRead, AsyncWrite}; + +use crate::body::{Body as IncomingBody, HttpBody as Body}; +use crate::common::exec::ConnStreamExec; +use crate::proto; +use crate::service::HttpService; + +pin_project! { + /// A future binding an HTTP/2 connection with a Service. + /// + /// Polling this future will drive HTTP forward. + #[must_use = "futures do nothing unless polled"] + pub struct Connection + where + S: HttpService, + { + conn: proto::h2::Server, + } +} + +/// A configuration builder for HTTP/2 server connections. +#[derive(Clone, Debug)] +pub struct Builder { + exec: E, + h2_builder: proto::h2::server::Config, +} + +// ===== impl Connection ===== + +impl fmt::Debug for Connection +where + S: HttpService, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Connection").finish() + } +} + +impl Connection +where + S: HttpService, + S::Error: Into>, + I: AsyncRead + AsyncWrite + Unpin, + B: Body + 'static, + B::Error: Into>, + E: ConnStreamExec, +{ + /// Start a graceful shutdown process for this connection. + /// + /// This `Connection` should continue to be polled until shutdown + /// can finish. + /// + /// # Note + /// + /// This should only be called while the `Connection` future is still + /// pending. If called after `Connection::poll` has resolved, this does + /// nothing. + pub fn graceful_shutdown(mut self: Pin<&mut Self>) { + self.conn.graceful_shutdown(); + } +} + +impl Future for Connection +where + S: HttpService, + S::Error: Into>, + I: AsyncRead + AsyncWrite + Unpin + 'static, + B: Body + 'static, + B::Error: Into>, + E: ConnStreamExec, +{ + type Output = crate::Result<()>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match ready!(Pin::new(&mut self.conn).poll(cx)) { + Ok(_done) => { + //TODO: the proto::h2::Server no longer needs to return + //the Dispatched enum + Poll::Ready(Ok(())) + } + Err(e) => Poll::Ready(Err(e)), + } + } +} + +// ===== impl Builder ===== + +impl Builder { + /// Create a new connection builder. + /// + /// This starts with the default options, and an executor. + pub fn new(exec: E) -> Self { + Self { + exec: exec, + h2_builder: Default::default(), + } + } + + /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 + /// stream-level flow control. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + /// + /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE + pub fn initial_stream_window_size(&mut self, sz: impl Into>) -> &mut Self { + if let Some(sz) = sz.into() { + self.h2_builder.adaptive_window = false; + self.h2_builder.initial_stream_window_size = sz; + } + self + } + + /// Sets the max connection-level flow control for HTTP2. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + pub fn initial_connection_window_size(&mut self, sz: impl Into>) -> &mut Self { + if let Some(sz) = sz.into() { + self.h2_builder.adaptive_window = false; + self.h2_builder.initial_conn_window_size = sz; + } + self + } + + /// Sets whether to use an adaptive flow control. + /// + /// Enabling this will override the limits set in + /// `initial_stream_window_size` and + /// `initial_connection_window_size`. + pub fn adaptive_window(&mut self, enabled: bool) -> &mut Self { + use proto::h2::SPEC_WINDOW_SIZE; + + self.h2_builder.adaptive_window = enabled; + if enabled { + self.h2_builder.initial_conn_window_size = SPEC_WINDOW_SIZE; + self.h2_builder.initial_stream_window_size = SPEC_WINDOW_SIZE; + } + self + } + + /// Sets the maximum frame size to use for HTTP2. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + pub fn max_frame_size(&mut self, sz: impl Into>) -> &mut Self { + if let Some(sz) = sz.into() { + self.h2_builder.max_frame_size = sz; + } + self + } + + /// Sets the [`SETTINGS_MAX_CONCURRENT_STREAMS`][spec] option for HTTP2 + /// connections. + /// + /// Default is no limit (`std::u32::MAX`). Passing `None` will do nothing. + /// + /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_MAX_CONCURRENT_STREAMS + pub fn max_concurrent_streams(&mut self, max: impl Into>) -> &mut Self { + self.h2_builder.max_concurrent_streams = max.into(); + self + } + + /// Configures the maximum number of pending reset streams allowed before a GOAWAY will be sent. + /// + /// This will default to the default value set by the [`h2` crate](https://crates.io/crates/h2). + /// As of v0.3.17, it is 20. + /// + /// See for more information. + pub fn max_pending_accept_reset_streams(&mut self, max: impl Into>) -> &mut Self { + self.h2_builder.max_pending_accept_reset_streams = max.into(); + self + } + + /// Sets an interval for HTTP2 Ping frames should be sent to keep a + /// connection alive. + /// + /// Pass `None` to disable HTTP2 keep-alive. + /// + /// Default is currently disabled. + /// + /// # Cargo Feature + /// + /// Requires the `runtime` cargo feature to be enabled. + #[cfg(feature = "runtime")] + #[cfg_attr(docsrs, doc(cfg(feature = "runtime")))] + pub fn keep_alive_interval(&mut self, interval: impl Into>) -> &mut Self { + self.h2_builder.keep_alive_interval = interval.into(); + self + } + + /// Sets a timeout for receiving an acknowledgement of the keep-alive ping. + /// + /// If the ping is not acknowledged within the timeout, the connection will + /// be closed. Does nothing if `keep_alive_interval` is disabled. + /// + /// Default is 20 seconds. + /// + /// # Cargo Feature + /// + /// Requires the `runtime` cargo feature to be enabled. + #[cfg(feature = "runtime")] + #[cfg_attr(docsrs, doc(cfg(feature = "runtime")))] + pub fn keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self { + self.h2_builder.keep_alive_timeout = timeout; + self + } + + /// Set the maximum write buffer size for each HTTP/2 stream. + /// + /// Default is currently ~400KB, but may change. + /// + /// # Panics + /// + /// The value must be no larger than `u32::MAX`. + pub fn max_send_buf_size(&mut self, max: usize) -> &mut Self { + assert!(max <= std::u32::MAX as usize); + self.h2_builder.max_send_buffer_size = max; + self + } + + /// Enables the [extended CONNECT protocol]. + /// + /// [extended CONNECT protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4 + pub fn enable_connect_protocol(&mut self) -> &mut Self { + self.h2_builder.enable_connect_protocol = true; + self + } + + /// Sets the max size of received header frames. + /// + /// Default is currently ~16MB, but may change. + pub fn max_header_list_size(&mut self, max: u32) -> &mut Self { + self.h2_builder.max_header_list_size = max; + self + } + + // /// Set the timer used in background tasks. + // pub fn timer(&mut self, timer: M) -> &mut Self + // where + // M: Timer + Send + Sync + 'static, + // { + // self.timer = Time::Timer(Arc::new(timer)); + // self + // } + + /// Bind a connection together with a [`Service`](crate::service::Service). + /// + /// This returns a Future that must be polled in order for HTTP to be + /// driven on the connection. + pub fn serve_connection(&self, io: I, service: S) -> Connection + where + S: HttpService, + S::Error: Into>, + Bd: Body + 'static, + Bd::Error: Into>, + I: AsyncRead + AsyncWrite + Unpin, + E: ConnStreamExec, + { + let proto = proto::h2::Server::new(io, service, &self.h2_builder, self.exec.clone()); + Connection { conn: proto } + } +} diff --git a/src/server/mod.rs b/src/server/mod.rs index e763d0e7c0..65eb7063e5 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -158,6 +158,7 @@ pub use self::server::Server; cfg_feature! { #![any(feature = "http1", feature = "http2")] + #[cfg_attr(feature = "deprecated", allow(deprecated))] pub(crate) mod server; pub use self::server::Builder; diff --git a/src/server/server.rs b/src/server/server.rs index e3058da4bb..4cccedd98a 100644 --- a/src/server/server.rs +++ b/src/server/server.rs @@ -1,11 +1,16 @@ use std::error::Error as StdError; use std::fmt; +use std::future::Future; +use std::marker::Unpin; #[cfg(feature = "tcp")] use std::net::{SocketAddr, TcpListener as StdTcpListener}; -#[cfg(any(feature = "tcp", feature = "http1"))] +use std::pin::Pin; +use std::task::{Context, Poll}; +#[cfg(feature = "tcp")] use std::time::Duration; use pin_project_lite::pin_project; + use tokio::io::{AsyncRead, AsyncWrite}; use tracing::trace; @@ -15,7 +20,6 @@ use super::tcp::AddrIncoming; use crate::body::{Body, HttpBody}; use crate::common::exec::Exec; use crate::common::exec::{ConnStreamExec, NewSvcExec}; -use crate::common::{task, Future, Pin, Poll, Unpin}; // Renamed `Http` as `Http_` for now so that people upgrading don't see an // error that `hyper::server::Http` is private... use super::conn::{Connection, Http as Http_, UpgradeableConnection}; @@ -160,7 +164,7 @@ where fn poll_next_( self: Pin<&mut Self>, - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, ) -> Poll>>> { let me = self.project(); match ready!(me.make_service.poll_ready_ref(cx)) { @@ -186,7 +190,7 @@ where pub(super) fn poll_watch( mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, watcher: &W, ) -> Poll> where @@ -219,7 +223,7 @@ where { type Output = crate::Result<()>; - fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { self.poll_watch(cx, &NoopWatcher) } } @@ -371,6 +375,33 @@ impl Builder { self } + /// Configures the maximum number of pending reset streams allowed before a GOAWAY will be sent. + /// + /// This will default to whatever the default in h2 is. As of v0.3.17, it is 20. + /// + /// See for more information. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_max_pending_accept_reset_streams(mut self, max: impl Into>) -> Self { + self.protocol.http2_max_pending_accept_reset_streams(max); + self + } + + /// Configures the maximum number of local reset streams allowed before a GOAWAY will be sent. + /// + /// If not set, hyper will use a default, currently of 1024. + /// + /// If `None` is supplied, hyper will not apply any limit. + /// This is not advised, as it can potentially expose servers to DOS vulnerabilities. + /// + /// See for more information. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_max_local_error_reset_streams(mut self, max: impl Into>) -> Self { + self.protocol.http2_max_local_error_reset_streams(max); + self + } + /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 /// stream-level flow control. /// @@ -423,6 +454,16 @@ impl Builder { self } + /// Sets the max size of received header frames. + /// + /// Default is currently ~16MB, but may change. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_max_header_list_size(mut self, max: u32) -> Self { + self.protocol.http2_max_header_list_size(max); + self + } + /// Sets the [`SETTINGS_MAX_CONCURRENT_STREAMS`][spec] option for HTTP2 /// connections. /// @@ -559,16 +600,27 @@ impl Builder { doc(cfg(all(feature = "tcp", any(feature = "http1", feature = "http2")))) )] impl Builder { - /// Set whether TCP keepalive messages are enabled on accepted connections. + /// Set the duration to remain idle before sending TCP keepalive probes. /// - /// If `None` is specified, keepalive is disabled, otherwise the duration - /// specified will be the time to remain idle before sending TCP keepalive - /// probes. + /// If `None` is specified, keepalive is disabled. pub fn tcp_keepalive(mut self, keepalive: Option) -> Self { self.incoming.set_keepalive(keepalive); self } + /// Set the duration between two successive TCP keepalive retransmissions, + /// if acknowledgement to the previous keepalive transmission is not received. + pub fn tcp_keepalive_interval(mut self, interval: Option) -> Self { + self.incoming.set_keepalive_interval(interval); + self + } + + /// Set the number of retransmissions to be carried out before declaring that remote end is not available. + pub fn tcp_keepalive_retries(mut self, retries: Option) -> Self { + self.incoming.set_keepalive_retries(retries); + self + } + /// Set the value of `TCP_NODELAY` option for accepted connections. pub fn tcp_nodelay(mut self, enabled: bool) -> Self { self.incoming.set_nodelay(enabled); @@ -594,6 +646,14 @@ impl Builder { self.incoming.set_sleep_on_errors(val); self } + + /// Returns the local address that the server will be bound to. + /// + /// This might be useful when knowing the address is required before calling `Builder::serve`, + /// but the address is not otherwise available (for e.g. when binding to port 0). + pub fn local_addr(&self) -> SocketAddr { + self.incoming.local_addr() + } } // Used by `Server` to optionally watch a `Connection` future. @@ -632,13 +692,17 @@ where // used by exec.rs pub(crate) mod new_svc { use std::error::Error as StdError; + use std::future::Future; + use std::marker::Unpin; + use std::pin::Pin; + use std::task::{Context, Poll}; + use tokio::io::{AsyncRead, AsyncWrite}; use tracing::debug; use super::{Connecting, Watcher}; use crate::body::{Body, HttpBody}; use crate::common::exec::ConnStreamExec; - use crate::common::{task, Future, Pin, Poll, Unpin}; use crate::service::HttpService; use pin_project_lite::pin_project; @@ -699,7 +763,7 @@ pub(crate) mod new_svc { { type Output = (); - fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { // If it weren't for needing to name this type so the `Send` bounds // could be projected to the `Serve` executor, this could just be // an `async fn`, and much safer. Woe is me. @@ -767,7 +831,7 @@ where { type Output = Result, FE>; - fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let mut me = self.project(); let service = ready!(me.future.poll(cx))?; let io = Option::take(&mut me.io).expect("polled after complete"); diff --git a/src/server/shutdown.rs b/src/server/shutdown.rs index 96937d0827..be858481c5 100644 --- a/src/server/shutdown.rs +++ b/src/server/shutdown.rs @@ -1,4 +1,8 @@ use std::error::Error as StdError; +use std::future::Future; +use std::marker::Unpin; +use std::pin::Pin; +use std::task::{Context, Poll}; use pin_project_lite::pin_project; use tokio::io::{AsyncRead, AsyncWrite}; @@ -10,7 +14,6 @@ use super::server::{Server, Watcher}; use crate::body::{Body, HttpBody}; use crate::common::drain::{self, Draining, Signal, Watch, Watching}; use crate::common::exec::{ConnStreamExec, NewSvcExec}; -use crate::common::{task, Future, Pin, Poll, Unpin}; use crate::service::{HttpService, MakeServiceRef}; pin_project! { @@ -63,7 +66,7 @@ where { type Output = crate::Result<()>; - fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let mut me = self.project(); loop { let next = { diff --git a/src/server/tcp.rs b/src/server/tcp.rs index 7e70ce3ac3..edbddcd94b 100644 --- a/src/server/tcp.rs +++ b/src/server/tcp.rs @@ -1,25 +1,117 @@ +use socket2::TcpKeepalive; use std::fmt; +use std::future::Future; use std::io; use std::net::{SocketAddr, TcpListener as StdTcpListener}; +use std::pin::Pin; +use std::task::{Context, Poll}; use std::time::Duration; use tokio::net::TcpListener; use tokio::time::Sleep; use tracing::{debug, error, trace}; -use crate::common::{task, Future, Pin, Poll}; - #[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 pub use self::addr_stream::AddrStream; use super::accept::Accept; +#[derive(Default, Debug, Clone, Copy)] +struct TcpKeepaliveConfig { + time: Option, + interval: Option, + retries: Option, +} + +impl TcpKeepaliveConfig { + /// Converts into a `socket2::TcpKeealive` if there is any keep alive configuration. + fn into_socket2(self) -> Option { + let mut dirty = false; + let mut ka = TcpKeepalive::new(); + if let Some(time) = self.time { + ka = ka.with_time(time); + dirty = true + } + if let Some(interval) = self.interval { + ka = Self::ka_with_interval(ka, interval, &mut dirty) + }; + if let Some(retries) = self.retries { + ka = Self::ka_with_retries(ka, retries, &mut dirty) + }; + if dirty { + Some(ka) + } else { + None + } + } + + #[cfg(any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + target_vendor = "apple", + windows, + ))] + fn ka_with_interval(ka: TcpKeepalive, interval: Duration, dirty: &mut bool) -> TcpKeepalive { + *dirty = true; + ka.with_interval(interval) + } + + #[cfg(not(any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + target_vendor = "apple", + windows, + )))] + fn ka_with_interval(ka: TcpKeepalive, _: Duration, _: &mut bool) -> TcpKeepalive { + ka // no-op as keepalive interval is not supported on this platform + } + + #[cfg(any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + target_vendor = "apple", + ))] + fn ka_with_retries(ka: TcpKeepalive, retries: u32, dirty: &mut bool) -> TcpKeepalive { + *dirty = true; + ka.with_retries(retries) + } + + #[cfg(not(any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + target_vendor = "apple", + )))] + fn ka_with_retries(ka: TcpKeepalive, _: u32, _: &mut bool) -> TcpKeepalive { + ka // no-op as keepalive retries is not supported on this platform + } +} + /// A stream of connections from binding to an address. #[must_use = "streams do nothing unless polled"] pub struct AddrIncoming { addr: SocketAddr, listener: TcpListener, sleep_on_errors: bool, - tcp_keepalive_timeout: Option, + tcp_keepalive_config: TcpKeepaliveConfig, tcp_nodelay: bool, timeout: Option>>, } @@ -52,7 +144,7 @@ impl AddrIncoming { listener, addr, sleep_on_errors: true, - tcp_keepalive_timeout: None, + tcp_keepalive_config: TcpKeepaliveConfig::default(), tcp_nodelay: false, timeout: None, }) @@ -63,13 +155,24 @@ impl AddrIncoming { self.addr } - /// Set whether TCP keepalive messages are enabled on accepted connections. + /// Set the duration to remain idle before sending TCP keepalive probes. /// - /// If `None` is specified, keepalive is disabled, otherwise the duration - /// specified will be the time to remain idle before sending TCP keepalive - /// probes. - pub fn set_keepalive(&mut self, keepalive: Option) -> &mut Self { - self.tcp_keepalive_timeout = keepalive; + /// If `None` is specified, keepalive is disabled. + pub fn set_keepalive(&mut self, time: Option) -> &mut Self { + self.tcp_keepalive_config.time = time; + self + } + + /// Set the duration between two successive TCP keepalive retransmissions, + /// if acknowledgement to the previous keepalive transmission is not received. + pub fn set_keepalive_interval(&mut self, interval: Option) -> &mut Self { + self.tcp_keepalive_config.interval = interval; + self + } + + /// Set the number of retransmissions to be carried out before declaring that remote end is not available. + pub fn set_keepalive_retries(&mut self, retries: Option) -> &mut Self { + self.tcp_keepalive_config.retries = retries; self } @@ -98,7 +201,7 @@ impl AddrIncoming { self.sleep_on_errors = val; } - fn poll_next_(&mut self, cx: &mut task::Context<'_>) -> Poll> { + fn poll_next_(&mut self, cx: &mut Context<'_>) -> Poll> { // Check if a previous timeout is active that was set by IO errors. if let Some(ref mut to) = self.timeout { ready!(Pin::new(to).poll(cx)); @@ -108,10 +211,9 @@ impl AddrIncoming { loop { match ready!(self.listener.poll_accept(cx)) { Ok((socket, remote_addr)) => { - if let Some(dur) = self.tcp_keepalive_timeout { - let socket = socket2::SockRef::from(&socket); - let conf = socket2::TcpKeepalive::new().with_time(dur); - if let Err(e) = socket.set_tcp_keepalive(&conf) { + if let Some(tcp_keepalive) = &self.tcp_keepalive_config.into_socket2() { + let sock_ref = socket2::SockRef::from(&socket); + if let Err(e) = sock_ref.set_tcp_keepalive(tcp_keepalive) { trace!("error trying to set TCP keepalive: {}", e); } } @@ -160,7 +262,7 @@ impl Accept for AddrIncoming { fn poll_accept( mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, ) -> Poll>> { let result = ready!(self.poll_next_(cx)); Poll::Ready(Some(result)) @@ -188,7 +290,7 @@ impl fmt::Debug for AddrIncoming { f.debug_struct("AddrIncoming") .field("addr", &self.addr) .field("sleep_on_errors", &self.sleep_on_errors) - .field("tcp_keepalive_timeout", &self.tcp_keepalive_timeout) + .field("tcp_keepalive_config", &self.tcp_keepalive_config) .field("tcp_nodelay", &self.tcp_nodelay) .finish() } @@ -199,11 +301,11 @@ mod addr_stream { use std::net::SocketAddr; #[cfg(unix)] use std::os::unix::io::{AsRawFd, RawFd}; + use std::pin::Pin; + use std::task::{Context, Poll}; use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; use tokio::net::TcpStream; - use crate::common::{task, Pin, Poll}; - pin_project_lite::pin_project! { /// A transport returned yieled by `AddrIncoming`. #[derive(Debug)] @@ -251,7 +353,7 @@ mod addr_stream { /// not yet available. pub fn poll_peek( &mut self, - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, buf: &mut tokio::io::ReadBuf<'_>, ) -> Poll> { self.inner.poll_peek(cx, buf) @@ -262,7 +364,7 @@ mod addr_stream { #[inline] fn poll_read( self: Pin<&mut Self>, - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll> { self.project().inner.poll_read(cx, buf) @@ -273,7 +375,7 @@ mod addr_stream { #[inline] fn poll_write( self: Pin<&mut Self>, - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { self.project().inner.poll_write(cx, buf) @@ -282,20 +384,20 @@ mod addr_stream { #[inline] fn poll_write_vectored( self: Pin<&mut Self>, - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, bufs: &[io::IoSlice<'_>], ) -> Poll> { self.project().inner.poll_write_vectored(cx, bufs) } #[inline] - fn poll_flush(self: Pin<&mut Self>, _cx: &mut task::Context<'_>) -> Poll> { + fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { // TCP flush is a noop Poll::Ready(Ok(())) } #[inline] - fn poll_shutdown(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { + fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.project().inner.poll_shutdown(cx) } @@ -316,3 +418,68 @@ mod addr_stream { } } } + +#[cfg(test)] +mod tests { + use crate::server::tcp::TcpKeepaliveConfig; + use std::time::Duration; + + #[test] + fn no_tcp_keepalive_config() { + assert!(TcpKeepaliveConfig::default().into_socket2().is_none()); + } + + #[test] + fn tcp_keepalive_time_config() { + let mut kac = TcpKeepaliveConfig::default(); + kac.time = Some(Duration::from_secs(60)); + if let Some(tcp_keepalive) = kac.into_socket2() { + assert!(format!("{tcp_keepalive:?}").contains("time: Some(60s)")); + } else { + panic!("test failed"); + } + } + + #[cfg(any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + target_vendor = "apple", + windows, + ))] + #[test] + fn tcp_keepalive_interval_config() { + let mut kac = TcpKeepaliveConfig::default(); + kac.interval = Some(Duration::from_secs(1)); + if let Some(tcp_keepalive) = kac.into_socket2() { + assert!(format!("{tcp_keepalive:?}").contains("interval: Some(1s)")); + } else { + panic!("test failed"); + } + } + + #[cfg(any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + target_vendor = "apple", + ))] + #[test] + fn tcp_keepalive_retries_config() { + let mut kac = TcpKeepaliveConfig::default(); + kac.retries = Some(3); + if let Some(tcp_keepalive) = kac.into_socket2() { + assert!(format!("{tcp_keepalive:?}").contains("retries: Some(3)")); + } else { + panic!("test failed"); + } + } +} diff --git a/src/service/http.rs b/src/service/http.rs index 81a20c80b5..d0586d8bd2 100644 --- a/src/service/http.rs +++ b/src/service/http.rs @@ -1,7 +1,8 @@ use std::error::Error as StdError; +use std::future::Future; +use std::task::{Context, Poll}; use crate::body::HttpBody; -use crate::common::{task, Future, Poll}; use crate::{Request, Response}; /// An asynchronous function from `Request` to `Response`. @@ -20,7 +21,7 @@ pub trait HttpService: sealed::Sealed { type Future: Future, Self::Error>>; #[doc(hidden)] - fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll>; + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll>; #[doc(hidden)] fn call(&mut self, req: Request) -> Self::Future; @@ -37,7 +38,7 @@ where type Error = T::Error; type Future = T::Future; - fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { tower_service::Service::poll_ready(self, cx) } diff --git a/src/service/make.rs b/src/service/make.rs index 63e6f298f1..188e4f4c32 100644 --- a/src/service/make.rs +++ b/src/service/make.rs @@ -1,11 +1,12 @@ use std::error::Error as StdError; use std::fmt; +use std::future::Future; +use std::task::{Context, Poll}; use tokio::io::{AsyncRead, AsyncWrite}; use super::{HttpService, Service}; use crate::body::HttpBody; -use crate::common::{task, Future, Poll}; // The same "trait alias" as tower::MakeConnection, but inlined to reduce // dependencies. @@ -14,7 +15,7 @@ pub trait MakeConnection: self::sealed::Sealed<(Target,)> { type Error; type Future: Future>; - fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll>; + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll>; fn make_connection(&mut self, target: Target) -> Self::Future; } @@ -29,7 +30,7 @@ where type Error = S::Error; type Future = S::Future; - fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { Service::poll_ready(self, cx) } @@ -58,7 +59,7 @@ pub trait MakeServiceRef: self::sealed::Sealed<(Target, ReqBody // if necessary. type __DontNameMe: self::sealed::CantImpl; - fn poll_ready_ref(&mut self, cx: &mut task::Context<'_>) -> Poll>; + fn poll_ready_ref(&mut self, cx: &mut Context<'_>) -> Poll>; fn make_service_ref(&mut self, target: &Target) -> Self::Future; } @@ -81,7 +82,7 @@ where type __DontNameMe = self::sealed::CantName; - fn poll_ready_ref(&mut self, cx: &mut task::Context<'_>) -> Poll> { + fn poll_ready_ref(&mut self, cx: &mut Context<'_>) -> Poll> { self.poll_ready(cx) } @@ -159,7 +160,7 @@ where type Response = Svc; type Future = Ret; - fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll> { + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } diff --git a/src/service/oneshot.rs b/src/service/oneshot.rs index 2697af8f4c..5e2ca47630 100644 --- a/src/service/oneshot.rs +++ b/src/service/oneshot.rs @@ -1,10 +1,12 @@ // TODO: Eventually to be replaced with tower_util::Oneshot. +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll}; + use pin_project_lite::pin_project; use tower_service::Service; -use crate::common::{task, Future, Pin, Poll}; - pub(crate) fn oneshot(svc: S, req: Req) -> Oneshot where S: Service, @@ -47,7 +49,7 @@ where { type Output = Result; - fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let mut me = self.project(); loop { diff --git a/src/service/util.rs b/src/service/util.rs index 7cba1206f1..59760a6858 100644 --- a/src/service/util.rs +++ b/src/service/util.rs @@ -1,9 +1,10 @@ use std::error::Error as StdError; use std::fmt; +use std::future::Future; use std::marker::PhantomData; +use std::task::{Context, Poll}; use crate::body::HttpBody; -use crate::common::{task, Future, Poll}; use crate::{Request, Response}; /// Create a `Service` from a function. @@ -54,7 +55,7 @@ where type Error = E; type Future = Ret; - fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll> { + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } diff --git a/src/upgrade.rs b/src/upgrade.rs index 1c7b5b01cd..a46a8d224d 100644 --- a/src/upgrade.rs +++ b/src/upgrade.rs @@ -42,8 +42,11 @@ use std::any::TypeId; use std::error::Error as StdError; use std::fmt; +use std::future::Future; use std::io; use std::marker::Unpin; +use std::pin::Pin; +use std::task::{Context, Poll}; use bytes::Bytes; use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; @@ -52,7 +55,6 @@ use tokio::sync::oneshot; use tracing::trace; use crate::common::io::Rewind; -use crate::common::{task, Future, Pin, Poll}; /// An upgraded HTTP connection. /// @@ -151,7 +153,7 @@ impl Upgraded { impl AsyncRead for Upgraded { fn poll_read( mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll> { Pin::new(&mut self.io).poll_read(cx, buf) @@ -161,7 +163,7 @@ impl AsyncRead for Upgraded { impl AsyncWrite for Upgraded { fn poll_write( mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { Pin::new(&mut self.io).poll_write(cx, buf) @@ -169,17 +171,17 @@ impl AsyncWrite for Upgraded { fn poll_write_vectored( mut self: Pin<&mut Self>, - cx: &mut task::Context<'_>, + cx: &mut Context<'_>, bufs: &[io::IoSlice<'_>], ) -> Poll> { Pin::new(&mut self.io).poll_write_vectored(cx, bufs) } - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { Pin::new(&mut self.io).poll_flush(cx) } - fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { + fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { Pin::new(&mut self.io).poll_shutdown(cx) } @@ -210,7 +212,7 @@ impl OnUpgrade { impl Future for OnUpgrade { type Output = Result; - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { match self.rx { Some(ref mut rx) => Pin::new(rx).poll(cx).map(|res| match res { Ok(Ok(upgraded)) => Ok(upgraded), @@ -351,7 +353,7 @@ mod tests { impl AsyncRead for Mock { fn poll_read( self: Pin<&mut Self>, - _cx: &mut task::Context<'_>, + _cx: &mut Context<'_>, _buf: &mut ReadBuf<'_>, ) -> Poll> { unreachable!("Mock::poll_read") @@ -361,21 +363,18 @@ mod tests { impl AsyncWrite for Mock { fn poll_write( self: Pin<&mut Self>, - _: &mut task::Context<'_>, + _: &mut Context<'_>, buf: &[u8], ) -> Poll> { // panic!("poll_write shouldn't be called"); Poll::Ready(Ok(buf.len())) } - fn poll_flush(self: Pin<&mut Self>, _cx: &mut task::Context<'_>) -> Poll> { + fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { unreachable!("Mock::poll_flush") } - fn poll_shutdown( - self: Pin<&mut Self>, - _cx: &mut task::Context<'_>, - ) -> Poll> { + fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { unreachable!("Mock::poll_shutdown") } } diff --git a/tests/client.rs b/tests/client.rs index 88b3ee0d4f..a5fc79da8c 100644 --- a/tests/client.rs +++ b/tests/client.rs @@ -1,5 +1,6 @@ #![deny(warnings)] #![warn(rust_2018_idioms)] +#![cfg_attr(feature = "deprecated", allow(deprecated))] #[macro_use] extern crate matches; @@ -11,6 +12,7 @@ use std::task::{Context, Poll}; use std::thread; use std::time::Duration; +#[allow(deprecated)] use hyper::body::to_bytes as concat; use hyper::{Body, Client, Method, Request, StatusCode}; @@ -1121,10 +1123,11 @@ mod dispatch_impl { use http::Uri; use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; use tokio::net::TcpStream; + use tokio_test::block_on; use super::support; use hyper::body::HttpBody; - use hyper::client::connect::{Connected, Connection, HttpConnector}; + use hyper::client::connect::{capture_connection, Connected, Connection, HttpConnector}; use hyper::Client; #[test] @@ -1533,6 +1536,37 @@ mod dispatch_impl { assert_eq!(connects.load(Ordering::Relaxed), 0); } + #[test] + fn capture_connection_on_client() { + let _ = pretty_env_logger::try_init(); + + let _rt = support::runtime(); + let connector = DebugConnector::new(); + + let client = Client::builder().build(connector); + + let server = TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = server.local_addr().unwrap(); + thread::spawn(move || { + let mut sock = server.accept().unwrap().0; + //drop(server); + sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); + sock.set_write_timeout(Some(Duration::from_secs(5))) + .unwrap(); + let mut buf = [0; 4096]; + sock.read(&mut buf).expect("read 1"); + sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") + .expect("write 1"); + }); + let mut req = Request::builder() + .uri(&*format!("http://{}/a", addr)) + .body(Body::empty()) + .unwrap(); + let captured_conn = capture_connection(&mut req); + block_on(client.request(req)).expect("200 OK"); + assert!(captured_conn.connection_metadata().is_some()); + } + #[test] fn client_keep_alive_0() { let _ = pretty_env_logger::try_init(); @@ -2149,6 +2183,7 @@ mod dispatch_impl { } } +#[allow(deprecated)] mod conn { use std::io::{self, Read, Write}; use std::net::{SocketAddr, TcpListener}; @@ -2214,6 +2249,131 @@ mod conn { future::join(server, client).await; } + #[deny(deprecated)] + #[cfg(feature = "backports")] + mod backports { + use super::*; + #[tokio::test] + async fn get() { + let _ = ::pretty_env_logger::try_init(); + let listener = TkTcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0))) + .await + .unwrap(); + let addr = listener.local_addr().unwrap(); + + let server = async move { + let mut sock = listener.accept().await.unwrap().0; + let mut buf = [0; 4096]; + let n = sock.read(&mut buf).await.expect("read 1"); + + // Notably: + // - Just a path, since just a path was set + // - No host, since no host was set + let expected = "GET /a HTTP/1.1\r\n\r\n"; + assert_eq!(s(&buf[..n]), expected); + + sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") + .await + .unwrap(); + }; + + let client = async move { + let tcp = tcp_connect(&addr).await.expect("connect"); + let (mut client, conn) = conn::http1::handshake(tcp).await.expect("handshake"); + + tokio::task::spawn(async move { + conn.await.expect("http conn"); + }); + + let req: Request = Request::builder() + .uri("/a") + .body(Default::default()) + .unwrap(); + let mut res = client.send_request(req).await.expect("send_request"); + assert_eq!(res.status(), hyper::StatusCode::OK); + assert!(res.body_mut().next().await.is_none()); + }; + + future::join(server, client).await; + } + + #[tokio::test] + async fn http2_detect_conn_eof() { + use futures_util::future; + use hyper::service::{make_service_fn, service_fn}; + use hyper::{Response, Server}; + + let _ = pretty_env_logger::try_init(); + + let server = Server::bind(&([127, 0, 0, 1], 0).into()) + .http2_only(true) + .serve(make_service_fn(|_| async move { + Ok::<_, hyper::Error>(service_fn(|_req| { + future::ok::<_, hyper::Error>(Response::new(Body::empty())) + })) + })); + let addr = server.local_addr(); + let (shdn_tx, shdn_rx) = oneshot::channel(); + tokio::task::spawn(async move { + server + .with_graceful_shutdown(async move { + let _ = shdn_rx.await; + }) + .await + .expect("server") + }); + + struct TokioExec; + impl hyper::rt::Executor for TokioExec + where + F: std::future::Future + Send + 'static, + F::Output: Send + 'static, + { + fn execute(&self, fut: F) { + tokio::spawn(fut); + } + } + + let io = tcp_connect(&addr).await.expect("tcp connect"); + let (mut client, conn) = conn::http2::Builder::new(TokioExec) + .handshake::<_, Body>(io) + .await + .expect("http handshake"); + + tokio::task::spawn(async move { + conn.await.expect("client conn"); + }); + + // Sanity check that client is ready + future::poll_fn(|ctx| client.poll_ready(ctx)) + .await + .expect("client poll ready sanity"); + + let req = Request::builder() + .uri(format!("http://{}/", addr)) + .body(Body::empty()) + .expect("request builder"); + + client.send_request(req).await.expect("req1 send"); + + // Sanity check that client is STILL ready + future::poll_fn(|ctx| client.poll_ready(ctx)) + .await + .expect("client poll ready after"); + + // Trigger the server shutdown... + let _ = shdn_tx.send(()); + + // Allow time for graceful shutdown roundtrips... + tokio::time::sleep(Duration::from_millis(100)).await; + + // After graceful shutdown roundtrips, the client should be closed... + future::poll_fn(|ctx| client.poll_ready(ctx)) + .await + .expect_err("client should be closed"); + } + } + #[tokio::test] async fn get_obsolete_line_folding() { let _ = ::pretty_env_logger::try_init(); @@ -2748,6 +2908,63 @@ mod conn { assert_eq!(vec, b"bar=foo"); } + #[tokio::test] + async fn client_100_then_http09() { + let _ = ::pretty_env_logger::try_init(); + + let server = TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = server.local_addr().unwrap(); + + thread::spawn(move || { + let mut sock = server.accept().unwrap().0; + sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); + sock.set_write_timeout(Some(Duration::from_secs(5))) + .unwrap(); + let mut buf = [0; 4096]; + sock.read(&mut buf).expect("read 1"); + sock.write_all( + b"\ + HTTP/1.1 100 Continue\r\n\ + Content-Type: text/plain\r\n\ + Server: BaseHTTP/0.6 Python/3.12.5\r\n\ + Date: Mon, 16 Dec 2024 03:08:27 GMT\r\n\ + ", + ) + .unwrap(); + // That it's separate writes is important to this test + thread::sleep(Duration::from_millis(50)); + sock.write_all( + b"\ + \r\n\ + ", + ) + .expect("write 2"); + thread::sleep(Duration::from_millis(50)); + sock.write_all( + b"\ + This is a sample text/plain document, without final headers.\ + \n\n\ + ", + ) + .expect("write 3"); + }); + + let tcp = tcp_connect(&addr).await.unwrap(); + + let (mut client, conn) = conn::Builder::new() + .http09_responses(true) + .handshake(tcp) + .await + .unwrap(); + + tokio::spawn(async move { + let _ = conn.await; + }); + + let req = Request::builder().uri("/a").body(Body::empty()).unwrap(); + let _res = client.send_request(req).await.expect("send_request"); + } + #[tokio::test] async fn http2_detect_conn_eof() { use futures_util::future; @@ -2995,6 +3212,61 @@ mod conn { .expect("client should be open"); } + #[tokio::test] + async fn http2_responds_before_consuming_request_body() { + // Test that a early-response from server works correctly (request body wasn't fully consumed). + // https://github.com/hyperium/hyper/issues/2872 + use hyper::service::service_fn; + + let _ = pretty_env_logger::try_init(); + + let listener = TkTcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0))) + .await + .unwrap(); + let addr = listener.local_addr().unwrap(); + + // Spawn an HTTP2 server that responds before reading the whole request body. + // It's normal case to decline the request due to headers or size of the body. + tokio::spawn(async move { + let sock = listener.accept().await.unwrap().0; + hyper::server::conn::Http::new() + .http2_only(true) + .serve_connection( + sock, + service_fn(|_req| async move { + Ok::<_, hyper::Error>(http::Response::new(hyper::Body::from( + "No bread for you!", + ))) + }), + ) + .await + .expect("serve_connection"); + }); + + let io = tcp_connect(&addr).await.expect("tcp connect"); + let (mut client, conn) = conn::Builder::new() + .http2_only(true) + .handshake::<_, Body>(io) + .await + .expect("http handshake"); + + tokio::spawn(async move { + conn.await.expect("client conn shouldn't error"); + }); + + // Use a channel to keep request stream open + let (_tx, body) = hyper::Body::channel(); + let req = Request::post("/a").body(body).unwrap(); + let resp = client.send_request(req).await.expect("send_request"); + assert!(resp.status().is_success()); + + let body = concat(resp.into_body()) + .await + .expect("get response body with no error"); + + assert_eq!(body.as_ref(), b"No bread for you!"); + } + #[tokio::test] async fn h2_connect() { let _ = pretty_env_logger::try_init(); @@ -3114,6 +3386,44 @@ mod conn { done_tx.send(()).unwrap(); } + #[tokio::test] + async fn test_body_panics() { + use hyper::body::HttpBody; + + let _ = pretty_env_logger::try_init(); + + let listener = TkTcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0))) + .await + .unwrap(); + let addr = listener.local_addr().unwrap(); + + // spawn a server that reads but doesn't write + tokio::spawn(async move { + let sock = listener.accept().await.unwrap().0; + drain_til_eof(sock).await.expect("server read"); + }); + + let io = tcp_connect(&addr).await.expect("tcp connect"); + + let (mut client, conn) = conn::Builder::new().handshake(io).await.expect("handshake"); + + tokio::spawn(async move { + conn.await.expect("client conn shouldn't error"); + }); + + let req = Request::post("/a") + .body(Body::from("baguette").map_data::<_, &[u8]>(|_| panic!("oopsie"))) + .unwrap(); + + let error = client.send_request(req).await.unwrap_err(); + + assert!(error.is_user()); + assert_eq!( + error.to_string(), + "dispatch task is gone: user code panicked" + ); + } + async fn drain_til_eof(mut sock: T) -> io::Result<()> { let mut buf = [0u8; 1024]; loop { diff --git a/tests/integration.rs b/tests/integration.rs index 2deee443f8..9e094cc713 100644 --- a/tests/integration.rs +++ b/tests/integration.rs @@ -305,6 +305,48 @@ t! { ; } +t! { + h2_connect_authority_form, + client: + request: + method: "CONNECT", + // http2 should strip scheme and path from URI (authority-form) + uri: "/connect_normal", + ; + response: + ; + server: + request: + method: "CONNECT", + // path should be stripped + uri: "", + ; + response: + ; +} + +t! { + h2_only; + h2_extended_connect_full_uri, + client: + request: + method: "CONNECT", + // http2 should not strip scheme and path from URI for extended CONNECT requests + uri: "/connect_extended", + protocol: "the-bread-protocol", + ; + response: + ; + server: + request: + method: "CONNECT", + // path should not be stripped + uri: "/connect_extended", + ; + response: + ; +} + t! { get_2, client: diff --git a/tests/server.rs b/tests/server.rs index af5b5e9961..09f0dffb8d 100644 --- a/tests/server.rs +++ b/tests/server.rs @@ -1,5 +1,6 @@ #![deny(warnings)] #![deny(rust_2018_idioms)] +#![cfg_attr(feature = "deprecated", allow(deprecated))] use std::convert::TryInto; use std::future::Future; @@ -92,6 +93,7 @@ mod response_body_lengths { } fn run_test(case: TestCase) { + let _ = pretty_env_logger::try_init(); assert!( case.version == 0 || case.version == 1, "TestCase.version must 0 or 1" @@ -156,18 +158,22 @@ mod response_body_lengths { let n = body.find("\r\n\r\n").unwrap() + 4; if case.expects_chunked { - let len = body.len(); - assert_eq!( - &body[n + 1..n + 3], - "\r\n", - "expected body chunk size header" - ); - assert_eq!(&body[n + 3..len - 7], body_str, "expected body"); - assert_eq!( - &body[len - 7..], - "\r\n0\r\n\r\n", - "expected body final chunk size header" - ); + if body_str.len() > 0 { + let len = body.len(); + assert_eq!( + &body[n + 1..n + 3], + "\r\n", + "expected body chunk size header" + ); + assert_eq!(&body[n + 3..len - 7], body_str, "expected body"); + assert_eq!( + &body[len - 7..], + "\r\n0\r\n\r\n", + "expected body final chunk size header" + ); + } else { + assert_eq!(&body[n..], "0\r\n\r\n"); + } } else { assert_eq!(&body[n..], body_str, "expected body"); } @@ -218,6 +224,17 @@ mod response_body_lengths { }); } + #[test] + fn chunked_response_known_empty() { + run_test(TestCase { + version: 1, + headers: &[("transfer-encoding", "chunked")], + body: Bd::Known(""), + expects_chunked: true, // should still send chunked, and 0\r\n\r\n + expects_con_len: false, + }); + } + #[test] fn chunked_response_unknown() { run_test(TestCase { @@ -568,6 +585,29 @@ fn post_with_incomplete_body() { req.read(&mut [0; 256]).expect("read"); } +#[test] +fn post_with_chunked_missing_final_digit() { + let _ = pretty_env_logger::try_init(); + let server = serve(); + let mut req = connect(server.addr()); + req.write_all( + b"\ + POST / HTTP/1.1\r\n\ + Host: example.domain\r\n\ + transfer-encoding: chunked\r\n\ + \r\n\ + 1\r\n\ + Z\r\n\ + \r\n\r\n\ + ", + ) + .expect("write"); + + server.body_err(); + + req.read(&mut [0; 256]).expect("read"); +} + #[test] fn head_response_can_send_content_length() { let _ = pretty_env_logger::try_init(); @@ -973,9 +1013,8 @@ async fn expect_continue_waits_for_body_poll() { service_fn(|req| { assert_eq!(req.headers()["expect"], "100-continue"); // But! We're never going to poll the body! + drop(req); tokio::time::sleep(Duration::from_millis(50)).map(move |_| { - // Move and drop the req, so we don't auto-close - drop(req); Response::builder() .status(StatusCode::BAD_REQUEST) .body(hyper::Body::empty()) @@ -2537,6 +2576,7 @@ async fn http2_keep_alive_with_responsive_client() { }); let tcp = connect_async(addr).await; + #[allow(deprecated)] let (mut client, conn) = hyper::client::conn::Builder::new() .http2_only(true) .handshake::<_, Body>(tcp) @@ -2641,6 +2681,146 @@ async fn http2_keep_alive_count_server_pings() { .expect("timed out waiting for pings"); } +// Tests for backported 1.0 APIs +#[deny(deprecated)] +#[cfg(feature = "backports")] +mod backports { + use super::*; + use hyper::server::conn::{http1, http2}; + + #[tokio::test] + async fn http_connect() { + let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); + let addr = listener.local_addr().unwrap(); + + let (tx, rx) = oneshot::channel(); + + thread::spawn(move || { + let mut tcp = connect(&addr); + tcp.write_all( + b"\ + CONNECT localhost:80 HTTP/1.1\r\n\ + \r\n\ + eagerly optimistic\ + ", + ) + .expect("write 1"); + let mut buf = [0; 256]; + tcp.read(&mut buf).expect("read 1"); + + let expected = "HTTP/1.1 200 OK\r\n"; + assert_eq!(s(&buf[..expected.len()]), expected); + let _ = tx.send(()); + + let n = tcp.read(&mut buf).expect("read 2"); + assert_eq!(s(&buf[..n]), "foo=bar"); + tcp.write_all(b"bar=foo").expect("write 2"); + }); + + let (socket, _) = listener.accept().await.unwrap(); + let conn = http1::Builder::new().serve_connection( + socket, + service_fn(|_| { + // In 1.0 we would use `http_body_util::Empty::::new()` to construct + // an empty body + let res = Response::builder().status(200).body(Body::empty()).unwrap(); + future::ready(Ok::<_, hyper::Error>(res)) + }), + ); + + let parts = conn.without_shutdown().await.unwrap(); + assert_eq!(parts.read_buf, "eagerly optimistic"); + + // wait so that we don't write until other side saw 101 response + rx.await.unwrap(); + + let mut io = parts.io; + io.write_all(b"foo=bar").await.unwrap(); + let mut vec = vec![]; + io.read_to_end(&mut vec).await.unwrap(); + assert_eq!(vec, b"bar=foo"); + } + + #[tokio::test] + async fn h2_connect() { + let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); + let addr = listener.local_addr().unwrap(); + + let conn = connect_async(addr).await; + + let (h2, connection) = h2::client::handshake(conn).await.unwrap(); + tokio::spawn(async move { + connection.await.unwrap(); + }); + let mut h2 = h2.ready().await.unwrap(); + + async fn connect_and_recv_bread( + h2: &mut SendRequest, + ) -> (RecvStream, SendStream) { + let request = Request::connect("localhost").body(()).unwrap(); + let (response, send_stream) = h2.send_request(request, false).unwrap(); + let response = response.await.unwrap(); + assert_eq!(response.status(), StatusCode::OK); + + let mut body = response.into_body(); + let bytes = body.data().await.unwrap().unwrap(); + assert_eq!(&bytes[..], b"Bread?"); + let _ = body.flow_control().release_capacity(bytes.len()); + + (body, send_stream) + } + + tokio::spawn(async move { + let (mut recv_stream, mut send_stream) = connect_and_recv_bread(&mut h2).await; + + send_stream.send_data("Baguette!".into(), true).unwrap(); + + assert!(recv_stream.data().await.unwrap().unwrap().is_empty()); + }); + + // In 1.0 the `Body` struct is renamed to `IncomingBody` + let svc = service_fn(move |req: Request| { + let on_upgrade = hyper::upgrade::on(req); + + tokio::spawn(async move { + let mut upgraded = on_upgrade.await.expect("on_upgrade"); + upgraded.write_all(b"Bread?").await.unwrap(); + + let mut vec = vec![]; + upgraded.read_to_end(&mut vec).await.unwrap(); + assert_eq!(s(&vec), "Baguette!"); + + upgraded.shutdown().await.unwrap(); + }); + + future::ok::<_, hyper::Error>( + // In 1.0 we would use `http_body_util::Empty::::new()` to construct + // an empty body + Response::builder().status(200).body(Body::empty()).unwrap(), + ) + }); + + let (socket, _) = listener.accept().await.unwrap(); + http2::Builder::new(TokioExecutor) + .serve_connection(socket, svc) + .await + .unwrap(); + } + + #[derive(Clone)] + /// An Executor that uses the tokio runtime. + pub struct TokioExecutor; + + impl hyper::rt::Executor for TokioExecutor + where + F: std::future::Future + Send + 'static, + F::Output: Send + 'static, + { + fn execute(&self, fut: F) { + tokio::task::spawn(fut); + } + } +} // ------------------------------------------------- // the Server that is used to run all the tests with // ------------------------------------------------- diff --git a/tests/support/mod.rs b/tests/support/mod.rs index 6b3c8f4472..5a641faaf8 100644 --- a/tests/support/mod.rs +++ b/tests/support/mod.rs @@ -13,14 +13,18 @@ use hyper::{Body, Client, Request, Response, Server, Version}; pub use futures_util::{ future, FutureExt as _, StreamExt as _, TryFutureExt as _, TryStreamExt as _, }; -pub use hyper::{HeaderMap, StatusCode}; +pub use hyper::{ext::Protocol, HeaderMap}; +#[allow(unused_imports)] +pub use hyper::{http::Extensions, StatusCode}; pub use std::net::SocketAddr; #[allow(unused_macros)] macro_rules! t { ( + @impl $name:ident, - parallel: $range:expr + parallel: $range:expr, + $(h2_only: $_h2_only:expr)? ) => ( #[test] fn $name() { @@ -75,6 +79,7 @@ macro_rules! t { } ); ( + @impl $name:ident, client: $( request: $( @@ -91,7 +96,8 @@ macro_rules! t { response: $( $s_res_prop:ident: $s_res_val:tt, )*; - )* + )*, + h2_only: $h2_only:expr ) => ( #[test] fn $name() { @@ -116,15 +122,17 @@ macro_rules! t { } ),)*]; - __run_test(__TestConfig { - client_version: 1, - client_msgs: c.clone(), - server_version: 1, - server_msgs: s.clone(), - parallel: false, - connections: 1, - proxy: false, - }); + if !$h2_only { + __run_test(__TestConfig { + client_version: 1, + client_msgs: c.clone(), + server_version: 1, + server_msgs: s.clone(), + parallel: false, + connections: 1, + proxy: false, + }); + } __run_test(__TestConfig { client_version: 2, @@ -136,15 +144,17 @@ macro_rules! t { proxy: false, }); - __run_test(__TestConfig { - client_version: 1, - client_msgs: c.clone(), - server_version: 1, - server_msgs: s.clone(), - parallel: false, - connections: 1, - proxy: true, - }); + if !$h2_only { + __run_test(__TestConfig { + client_version: 1, + client_msgs: c.clone(), + server_version: 1, + server_msgs: s.clone(), + parallel: false, + connections: 1, + proxy: true, + }); + } __run_test(__TestConfig { client_version: 2, @@ -157,6 +167,12 @@ macro_rules! t { }); } ); + (h2_only; $($t:tt)*) => { + t!(@impl $($t)*, h2_only: true); + }; + ($($t:tt)*) => { + t!(@impl $($t)*, h2_only: false); + }; } macro_rules! __internal_map_prop { @@ -245,6 +261,7 @@ pub struct __CReq { pub uri: &'static str, pub headers: HeaderMap, pub body: Vec, + pub protocol: Option<&'static str>, } impl Default for __CReq { @@ -254,6 +271,7 @@ impl Default for __CReq { uri: "/", headers: HeaderMap::new(), body: Vec::new(), + protocol: None, } } } @@ -356,6 +374,7 @@ async fn async_test(cfg: __TestConfig) { func(&req.headers()); } let sbody = sreq.body; + #[allow(deprecated)] hyper::body::to_bytes(req).map_ok(move |body| { assert_eq!(body.as_ref(), sbody.as_slice(), "client body"); @@ -371,6 +390,7 @@ async fn async_test(cfg: __TestConfig) { let server = hyper::Server::bind(&SocketAddr::from(([127, 0, 0, 1], 0))) .http2_only(cfg.server_version == 2) + .http2_enable_connect_protocol() .serve(new_service); let mut addr = server.local_addr(); @@ -398,6 +418,9 @@ async fn async_test(cfg: __TestConfig) { //.headers(creq.headers) .body(creq.body.into()) .expect("Request::build"); + if let Some(protocol) = creq.protocol { + req.extensions_mut().insert(Protocol::from_static(protocol)); + } *req.headers_mut() = creq.headers; let cstatus = cres.status; let cheaders = cres.headers; @@ -411,6 +434,7 @@ async fn async_test(cfg: __TestConfig) { for func in &cheaders { func(&res.headers()); } + #[allow(deprecated)] hyper::body::to_bytes(res) }) .map_ok(move |body| { @@ -458,18 +482,20 @@ fn naive_proxy(cfg: ProxyConfig) -> (SocketAddr, impl Future) { let max_connections = cfg.connections; let counter = AtomicUsize::new(0); - let srv = Server::bind(&([127, 0, 0, 1], 0).into()).serve(make_service_fn(move |_| { - let prev = counter.fetch_add(1, Ordering::Relaxed); - assert!(max_connections > prev, "proxy max connections"); - let client = client.clone(); - future::ok::<_, hyper::Error>(service_fn(move |mut req| { - let uri = format!("http://{}{}/", dst_addr, req.uri().path()) - .parse() - .expect("proxy new uri parse"); - *req.uri_mut() = uri; - client.request(req) - })) - })); + let srv = Server::bind(&([127, 0, 0, 1], 0).into()) + .http2_enable_connect_protocol() + .serve(make_service_fn(move |_| { + let prev = counter.fetch_add(1, Ordering::Relaxed); + assert!(max_connections > prev, "proxy max connections"); + let client = client.clone(); + future::ok::<_, hyper::Error>(service_fn(move |mut req| { + let uri = format!("http://{}{}/", dst_addr, req.uri().path()) + .parse() + .expect("proxy new uri parse"); + *req.uri_mut() = uri; + client.request(req) + })) + })); let proxy_addr = srv.local_addr(); (proxy_addr, srv.map(|res| res.expect("proxy error"))) }