From 6f0d5720501bd9018bed72c03f8beb3c508870be Mon Sep 17 00:00:00 2001 From: Ruediger Klaehn Date: Wed, 30 Oct 2024 15:47:43 +0200 Subject: [PATCH 01/19] Add rpc types --- Cargo.lock | 244 ++++- Cargo.toml | 15 +- src/lib.rs | 3 + src/rpc.rs | 3 + src/rpc/client.rs | 20 + src/rpc/client/blobs.rs | 1671 +++++++++++++++++++++++++++++++++ src/rpc/client/blobs/batch.rs | 476 ++++++++++ src/rpc/client/tags.rs | 64 ++ src/rpc/proto.rs | 34 + src/rpc/proto/blobs.rs | 320 +++++++ src/rpc/proto/tags.rs | 110 +++ src/util.rs | 1 + src/util/fs.rs | 435 +++++++++ 13 files changed, 3356 insertions(+), 40 deletions(-) create mode 100644 src/rpc.rs create mode 100644 src/rpc/client.rs create mode 100644 src/rpc/client/blobs.rs create mode 100644 src/rpc/client/blobs/batch.rs create mode 100644 src/rpc/client/tags.rs create mode 100644 src/rpc/proto.rs create mode 100644 src/rpc/proto/blobs.rs create mode 100644 src/rpc/proto/tags.rs create mode 100644 src/util/fs.rs diff --git a/Cargo.lock b/Cargo.lock index 88703ab08..88bb1bc45 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -106,7 +106,7 @@ checksum = "7378575ff571966e99a744addeff0bff98b8ada0dedf1956d59e634db95eaac1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.85", "synstructure", ] @@ -118,7 +118,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.85", ] [[package]] @@ -141,7 +141,7 @@ checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.85", ] [[package]] @@ -152,7 +152,7 @@ checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.85", ] [[package]] @@ -320,6 +320,37 @@ dependencies = [ "serde", ] +[[package]] +name = "camino" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b96ec4966b5813e2c0507c1f86115c8c5abaadc3980879c3424042a02fd1ad3" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo-platform" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24b1f0365a6c6bb4020cd05806fd0d33c44d38046b8bd7f0e40814b9763cabfc" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo_metadata" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4acbb09d9ee8e23699b9634375c72795d095bf268439da88562cf9b501f181fa" +dependencies = [ + "camino", + "cargo-platform", + "semver", + "serde", + "serde_json", +] + [[package]] name = "cc" version = "1.1.6" @@ -547,7 +578,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.85", ] [[package]] @@ -602,7 +633,7 @@ checksum = "8034092389675178f570469e6c3b0465d3d30b4505c294a6550db47f3c17ad18" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.85", ] [[package]] @@ -632,7 +663,7 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.85", "unicode-xid", ] @@ -662,7 +693,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.85", ] [[package]] @@ -749,6 +780,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "educe" +version = "0.4.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f0042ff8246a363dbe77d2ceedb073339e85a804b9a47636c6e016a9a32c05f" +dependencies = [ + "enum-ordinalize", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "elliptic-curve" version = "0.13.8" @@ -789,7 +832,20 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.85", +] + +[[package]] +name = "enum-ordinalize" +version = "3.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bf1fa3f06bbff1ea5b1a9c7b14aa992a39657db60a2759457328d7e058f49ee" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 2.0.85", ] [[package]] @@ -809,7 +865,7 @@ checksum = "de0d48a183585823424a4ce1aa132d174a6a81bd540895822eb4c8373a8e49e8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.85", ] [[package]] @@ -1056,7 +1112,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.85", ] [[package]] @@ -1656,17 +1712,22 @@ dependencies = [ "iroh-quinn", "iroh-router", "iroh-test", + "nested_enum_utils", "num_cpus", "oneshot", "parking_lot", "pin-project", + "portable-atomic", "postcard", "proptest", + "quic-rpc", + "quic-rpc-derive", "rand", "range-collections", "rcgen", "redb 1.5.1", "redb 2.1.1", + "ref-cast", "reflink-copy", "rustls", "self_cell", @@ -1675,7 +1736,9 @@ dependencies = [ "serde_json", "serde_test", "smallvec", + "strum", "tempfile", + "testdir", "testresult", "thiserror", "tokio", @@ -1683,6 +1746,7 @@ dependencies = [ "tracing", "tracing-futures", "tracing-subscriber", + "walkdir", ] [[package]] @@ -2083,6 +2147,18 @@ dependencies = [ "getrandom", ] +[[package]] +name = "nested_enum_utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f256ef99e7ac37428ef98c89bef9d84b590172de4bbfbe81b68a4cd3abadb32" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "netdev" version = "0.30.0" @@ -2204,6 +2280,15 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38bf9645c8b145698bb0b18a4637dcacbc421ea49bef2317e4fd8065a387cf21" +[[package]] +name = "ntapi" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8a3895c6391c39d7fe7ebc444a87eb2991b2a0bc718fdabd071eec617fc68e4" +dependencies = [ + "winapi", +] + [[package]] name = "nu-ansi-term" version = "0.46.0" @@ -2305,7 +2390,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.85", ] [[package]] @@ -2495,7 +2580,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.85", ] [[package]] @@ -2526,7 +2611,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.85", ] [[package]] @@ -2606,7 +2691,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.72", + "syn 2.0.85", ] [[package]] @@ -2643,9 +2728,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.7.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da544ee218f0d287a911e9c99a39a8c9bc8fcad3cb8db5959940044ecfc67265" +checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" [[package]] name = "positioned-io" @@ -2779,9 +2864,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.86" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e" dependencies = [ "unicode-ident", ] @@ -2806,7 +2891,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.85", ] [[package]] @@ -2844,6 +2929,39 @@ dependencies = [ "winapi", ] +[[package]] +name = "quic-rpc" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b0ea1bd0b3124538bb71ed8cedbe92608fd1cf227e4f5ff53fb28746737b794" +dependencies = [ + "anyhow", + "derive_more", + "educe", + "flume", + "futures-lite 2.3.0", + "futures-sink", + "futures-util", + "hex", + "pin-project", + "serde", + "slab", + "tokio", + "tracing", +] + +[[package]] +name = "quic-rpc-derive" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94b91a3f7a42657cbfbd0c2499c1f037738eff45bb7f59c6ce3d3d9e890d141c" +dependencies = [ + "proc-macro2", + "quic-rpc", + "quote", + "syn 1.0.109", +] + [[package]] name = "quick-error" version = "1.2.3" @@ -2898,9 +3016,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.36" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ "proc-macro2", ] @@ -3037,7 +3155,7 @@ checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.85", ] [[package]] @@ -3460,6 +3578,9 @@ name = "semver" version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" +dependencies = [ + "serde", +] [[package]] name = "serde" @@ -3506,7 +3627,7 @@ checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.85", ] [[package]] @@ -3744,7 +3865,7 @@ dependencies = [ "proc-macro2", "quote", "struct_iterable_internal", - "syn 2.0.72", + "syn 2.0.85", ] [[package]] @@ -3772,7 +3893,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.72", + "syn 2.0.85", ] [[package]] @@ -3834,9 +3955,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.72" +version = "2.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc4b9b9bf2add8093d3f2c0204471e951b2285580335de42f9d2534f3ae7a8af" +checksum = "5023162dfcd14ef8f32034d8bcd4cc5ddc61ef7a247c024a33e24e1f24d21b56" dependencies = [ "proc-macro2", "quote", @@ -3871,7 +3992,21 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.85", +] + +[[package]] +name = "sysinfo" +version = "0.26.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c18a6156d1f27a9592ee18c1a846ca8dd5c258b7179fc193ae87c74ebb666f5" +dependencies = [ + "cfg-if", + "core-foundation-sys", + "libc", + "ntapi", + "once_cell", + "winapi", ] [[package]] @@ -3913,6 +4048,20 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "testdir" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee79e927b64d193f5abb60d20a0eb56be0ee5a242fdeb8ce3bf054177006de52" +dependencies = [ + "anyhow", + "backtrace", + "cargo_metadata", + "once_cell", + "sysinfo", + "whoami", +] + [[package]] name = "testresult" version = "0.4.1" @@ -3936,7 +4085,7 @@ checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.85", ] [[package]] @@ -4021,7 +4170,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.85", ] [[package]] @@ -4156,7 +4305,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.85", ] [[package]] @@ -4397,6 +4546,12 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasite" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" + [[package]] name = "wasm-bindgen" version = "0.2.92" @@ -4418,7 +4573,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.85", "wasm-bindgen-shared", ] @@ -4452,7 +4607,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.85", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4494,6 +4649,17 @@ dependencies = [ "rustls-pki-types", ] +[[package]] +name = "whoami" +version = "1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "372d5b87f58ec45c384ba03563b03544dc5fadc3983e434b286913f5b4a9bb6d" +dependencies = [ + "redox_syscall", + "wasite", + "web-sys", +] + [[package]] name = "widestring" version = "1.1.0" @@ -4611,7 +4777,7 @@ checksum = "12168c33176773b86799be25e2a2ba07c7aab9968b37541f1094dbd7a60c8946" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.85", ] [[package]] @@ -4622,7 +4788,7 @@ checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.85", ] [[package]] @@ -4633,7 +4799,7 @@ checksum = "9d8dc32e0095a7eeccebd0e3f09e9509365ecb3fc6ac4d6f5f14a3f6392942d1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.85", ] [[package]] @@ -4644,7 +4810,7 @@ checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.85", ] [[package]] @@ -4931,7 +5097,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.85", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 12a50028e..9d917bf5f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,6 +21,7 @@ chrono = "0.4.31" derive_more = { version = "1.0.0", features = ["debug", "display", "deref", "deref_mut", "from", "try_into", "into"] } futures-buffered = "0.2.4" futures-lite = "2.3" +futures-util = { version = "0.3.30", optional = true } genawaiter = { version = "0.99.1", features = ["futures03"] } hashlink = { version = "0.9.0", optional = true } hex = "0.4.3" @@ -29,27 +30,34 @@ iroh-io = { version = "0.6.0", features = ["stats"] } iroh-metrics = { version = "0.27.0", default-features = false } iroh-net = { version = "0.27.0" } iroh-router = "0.27.0" +nested_enum_utils = { version = "0.1.0", optional = true } num_cpus = "1.15.0" oneshot = "0.1.8" parking_lot = { version = "0.12.1", optional = true } pin-project = "1.1.5" +portable-atomic = { version = "1.9.0", optional = true } postcard = { version = "1", default-features = false, features = ["alloc", "use-std", "experimental-derive"] } +quic-rpc = { version = "0.13.0", optional = true } +quic-rpc-derive = { version = "0.13.0", optional = true } quinn = { package = "iroh-quinn", version = "0.11", features = ["ring"] } rand = "0.8" range-collections = "0.4.0" redb = { version = "2.0.0", optional = true } redb_v1 = { package = "redb", version = "1.5.1", optional = true } +ref-cast = { version = "1.0.23", optional = true } reflink-copy = { version = "0.1.8", optional = true } self_cell = "1.0.1" serde = { version = "1", features = ["derive"] } serde-error = "0.1.3" smallvec = { version = "1.10.0", features = ["serde", "const_new"] } +strum = { version = "0.26.3", optional = true } tempfile = { version = "3.10.0", optional = true } thiserror = "1" tokio = { version = "1", features = ["fs"] } tokio-util = { version = "0.7", features = ["io-util", "io"] } tracing = "0.1" tracing-futures = "0.2.5" +walkdir = { version = "2.5.0", optional = true } [dev-dependencies] http-body = "0.4.5" @@ -65,13 +73,18 @@ rcgen = "0.12.0" rustls = { version = "0.23", default-features = false, features = ["ring"] } tempfile = "3.10.0" futures-util = "0.3.30" +testdir = "0.9.1" [features] -default = ["fs-store"] +default = ["fs-store", "rpc"] downloader = ["dep:parking_lot", "tokio-util/time", "dep:hashlink"] fs-store = ["dep:reflink-copy", "redb", "dep:redb_v1", "dep:tempfile"] metrics = ["iroh-metrics/metrics"] redb = ["dep:redb"] +rpc = ["dep:quic-rpc", "dep:quic-rpc-derive", "dep:nested_enum_utils", "dep:strum", "dep:futures-util", "dep:ref-cast", "dep:portable-atomic", "dep:walkdir", "downloader"] +ref-cast = ["dep:ref-cast"] +portable-atomic = ["dep:portable-atomic"] +walkdir = ["dep:walkdir"] [package.metadata.docs.rs] all-features = true diff --git a/src/lib.rs b/src/lib.rs index b6358fabc..8b561c504 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -40,6 +40,9 @@ pub mod metrics; pub mod net_protocol; pub mod protocol; pub mod provider; +#[cfg(feature = "rpc")] +#[cfg_attr(iroh_docsrs, doc(cfg(feature = "rpc")))] +pub mod rpc; pub mod store; pub mod util; diff --git a/src/rpc.rs b/src/rpc.rs new file mode 100644 index 000000000..17bcf4730 --- /dev/null +++ b/src/rpc.rs @@ -0,0 +1,3 @@ +//! Provides a rpc protocol as well as a client for the protocol +pub mod client; +pub mod proto; diff --git a/src/rpc/client.rs b/src/rpc/client.rs new file mode 100644 index 000000000..4b11fdc19 --- /dev/null +++ b/src/rpc/client.rs @@ -0,0 +1,20 @@ +//! Iroh blobs and tags client +use anyhow::Result; +use futures_util::{Stream, StreamExt}; + +pub mod blobs; +pub mod tags; + +fn flatten( + s: impl Stream, E2>>, +) -> impl Stream> +where + E1: std::error::Error + Send + Sync + 'static, + E2: std::error::Error + Send + Sync + 'static, +{ + s.map(|res| match res { + Ok(Ok(res)) => Ok(res), + Ok(Err(err)) => Err(err.into()), + Err(err) => Err(err.into()), + }) +} diff --git a/src/rpc/client/blobs.rs b/src/rpc/client/blobs.rs new file mode 100644 index 000000000..7a31983e0 --- /dev/null +++ b/src/rpc/client/blobs.rs @@ -0,0 +1,1671 @@ +//! API for blobs management. +//! +//! The main entry point is the [`Client`]. +//! +//! You obtain a [`Client`] via [`Iroh::blobs()`](crate::client::Iroh::blobs). +//! +//! ## Interacting with the local blob store +//! +//! ### Importing data +//! +//! There are several ways to import data into the local blob store: +//! +//! - [`add_bytes`](Client::add_bytes) +//! imports in memory data. +//! - [`add_stream`](Client::add_stream) +//! imports data from a stream of bytes. +//! - [`add_reader`](Client::add_reader) +//! imports data from an [async reader](tokio::io::AsyncRead). +//! - [`add_from_path`](Client::add_from_path) +//! imports data from a file. +//! +//! The last method imports data from a file on the local filesystem. +//! This is the most efficient way to import large amounts of data. +//! +//! ### Exporting data +//! +//! There are several ways to export data from the local blob store: +//! +//! - [`read_to_bytes`](Client::read_to_bytes) reads data into memory. +//! - [`read`](Client::read) creates a [reader](Reader) to read data from. +//! - [`export`](Client::export) eports data to a file on the local filesystem. +//! +//! ## Interacting with remote nodes +//! +//! - [`download`](Client::download) downloads data from a remote node. +//! - [`share`](Client::share) allows creating a ticket to share data with a +//! remote node. +//! +//! ## Interacting with the blob store itself +//! +//! These are more advanced operations that are usually not needed in normal +//! operation. +//! +//! - [`consistency_check`](Client::consistency_check) checks the internal +//! consistency of the local blob store. +//! - [`validate`](Client::validate) validates the locally stored data against +//! their BLAKE3 hashes. +//! - [`delete_blob`](Client::delete_blob) deletes a blob from the local store. +//! +//! ### Batch operations +//! +//! For complex update operations, there is a [`batch`](Client::batch) API that +//! allows you to add multiple blobs in a single logical batch. +//! +//! Operations in a batch return [temporary tags](crate::blobs::TempTag) that +//! protect the added data from garbage collection as long as the batch is +//! alive. +//! +//! To store the data permanently, a temp tag needs to be upgraded to a +//! permanent tag using [`persist`](crate::client::blobs::Batch::persist) or +//! [`persist_to`](crate::client::blobs::Batch::persist_to). +use std::{ + future::Future, + io, + path::PathBuf, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; + +pub use crate::net_protocol::DownloadMode; +use crate::{ + export::ExportProgress as BytesExportProgress, + format::collection::{Collection, SimpleStore}, + get::db::DownloadProgress as BytesDownloadProgress, + net_protocol::BlobDownloadRequest, + store::{BaoBlobSize, ConsistencyCheckProgress, ExportFormat, ExportMode, ValidateProgress}, + util::SetTagOption, + BlobFormat, Hash, Tag, +}; +use anyhow::{anyhow, Context as _, Result}; +use bytes::Bytes; +use futures_lite::{Stream, StreamExt}; +use futures_util::SinkExt; +use genawaiter::sync::{Co, Gen}; +use iroh_net::NodeAddr; +use portable_atomic::{AtomicU64, Ordering}; +use quic_rpc::{client::BoxStreamSync, RpcClient}; +use ref_cast::RefCast; +use serde::{Deserialize, Serialize}; +use tokio::io::{AsyncRead, AsyncReadExt, ReadBuf}; +use tokio_util::io::{ReaderStream, StreamReader}; +use tracing::warn; + +mod batch; +pub use batch::{AddDirOpts, AddFileOpts, AddReaderOpts, Batch}; + +use super::{flatten, tags}; +use crate::rpc::proto::blobs::{ + AddPathRequest, AddStreamRequest, AddStreamUpdate, BatchCreateRequest, BatchCreateResponse, + BlobStatusRequest, ConsistencyCheckRequest, CreateCollectionRequest, CreateCollectionResponse, + DeleteRequest, ExportRequest, ListIncompleteRequest, ListRequest, ReadAtRequest, + ReadAtResponse, ValidateRequest, +}; + +/// Iroh blobs client. +#[derive(Debug, Clone, RefCast)] +#[repr(transparent)] +pub struct Client { + pub(super) rpc: RpcClient, +} + +impl Client +where + S: quic_rpc::Service, + C: quic_rpc::ServiceConnection, +{ + /// Check if a blob is completely stored on the node. + /// + /// Note that this will return false for blobs that are partially stored on + /// the node. + pub async fn status(&self, hash: Hash) -> Result { + let status = self.rpc.rpc(BlobStatusRequest { hash }).await??; + Ok(status.0) + } + + /// Check if a blob is completely stored on the node. + /// + /// This is just a convenience wrapper around `status` that returns a boolean. + pub async fn has(&self, hash: Hash) -> Result { + match self.status(hash).await { + Ok(BlobStatus::Complete { .. }) => Ok(true), + Ok(_) => Ok(false), + Err(err) => Err(err), + } + } + + /// Create a new batch for adding data. + /// + /// A batch is a context in which temp tags are created and data is added to the node. Temp tags + /// are automatically deleted when the batch is dropped, leading to the data being garbage collected + /// unless a permanent tag is created for it. + pub async fn batch(&self) -> Result> { + let (updates, mut stream) = self.rpc.bidi(BatchCreateRequest).await?; + let BatchCreateResponse::Id(batch) = stream.next().await.context("expected scope id")??; + let rpc = self.rpc.clone(); + Ok(Batch::new(batch, rpc, updates, 1024)) + } + + /// Stream the contents of a a single blob. + /// + /// Returns a [`Reader`], which can report the size of the blob before reading it. + pub async fn read(&self, hash: Hash) -> Result { + Reader::from_rpc_read(&self.rpc, hash).await + } + + /// Read offset + len from a single blob. + /// + /// If `len` is `None` it will read the full blob. + pub async fn read_at(&self, hash: Hash, offset: u64, len: ReadAtLen) -> Result { + Reader::from_rpc_read_at(&self.rpc, hash, offset, len).await + } + + /// Read all bytes of single blob. + /// + /// This allocates a buffer for the full blob. Use only if you know that the blob you're + /// reading is small. If not sure, use [`Self::read`] and check the size with + /// [`Reader::size`] before calling [`Reader::read_to_bytes`]. + pub async fn read_to_bytes(&self, hash: Hash) -> Result { + Reader::from_rpc_read(&self.rpc, hash) + .await? + .read_to_bytes() + .await + } + + /// Read all bytes of single blob at `offset` for length `len`. + /// + /// This allocates a buffer for the full length. + pub async fn read_at_to_bytes(&self, hash: Hash, offset: u64, len: ReadAtLen) -> Result { + Reader::from_rpc_read_at(&self.rpc, hash, offset, len) + .await? + .read_to_bytes() + .await + } + + /// Import a blob from a filesystem path. + /// + /// `path` should be an absolute path valid for the file system on which + /// the node runs. + /// If `in_place` is true, Iroh will assume that the data will not change and will share it in + /// place without copying to the Iroh data directory. + pub async fn add_from_path( + &self, + path: PathBuf, + in_place: bool, + tag: SetTagOption, + wrap: WrapOption, + ) -> Result { + let stream = self + .rpc + .server_streaming(AddPathRequest { + path, + in_place, + tag, + wrap, + }) + .await?; + Ok(AddProgress::new(stream)) + } + + /// Create a collection from already existing blobs. + /// + /// For automatically clearing the tags for the passed in blobs you can set + /// `tags_to_delete` to those tags, and they will be deleted once the collection is created. + pub async fn create_collection( + &self, + collection: Collection, + tag: SetTagOption, + tags_to_delete: Vec, + ) -> anyhow::Result<(Hash, Tag)> { + let CreateCollectionResponse { hash, tag } = self + .rpc + .rpc(CreateCollectionRequest { + collection, + tag, + tags_to_delete, + }) + .await??; + Ok((hash, tag)) + } + + /// Write a blob by passing an async reader. + pub async fn add_reader( + &self, + reader: impl AsyncRead + Unpin + Send + 'static, + tag: SetTagOption, + ) -> anyhow::Result { + const CAP: usize = 1024 * 64; // send 64KB per request by default + let input = ReaderStream::with_capacity(reader, CAP); + self.add_stream(input, tag).await + } + + /// Write a blob by passing a stream of bytes. + pub async fn add_stream( + &self, + input: impl Stream> + Send + Unpin + 'static, + tag: SetTagOption, + ) -> anyhow::Result { + let (mut sink, progress) = self.rpc.bidi(AddStreamRequest { tag }).await?; + let mut input = input.map(|chunk| match chunk { + Ok(chunk) => Ok(AddStreamUpdate::Chunk(chunk)), + Err(err) => { + warn!("Abort send, reason: failed to read from source stream: {err:?}"); + Ok(AddStreamUpdate::Abort) + } + }); + tokio::spawn(async move { + // TODO: Is it important to catch this error? It should also result in an error on the + // response stream. If we deem it important, we could one-shot send it into the + // BlobAddProgress and return from there. Not sure. + if let Err(err) = sink.send_all(&mut input).await { + warn!("Failed to send input stream to remote: {err:?}"); + } + }); + + Ok(AddProgress::new(progress)) + } + + /// Write a blob by passing bytes. + pub async fn add_bytes(&self, bytes: impl Into) -> anyhow::Result { + let input = futures_lite::stream::once(Ok(bytes.into())); + self.add_stream(input, SetTagOption::Auto).await?.await + } + + /// Write a blob by passing bytes, setting an explicit tag name. + pub async fn add_bytes_named( + &self, + bytes: impl Into, + name: impl Into, + ) -> anyhow::Result { + let input = futures_lite::stream::once(Ok(bytes.into())); + self.add_stream(input, SetTagOption::Named(name.into())) + .await? + .await + } + + /// Validate hashes on the running node. + /// + /// If `repair` is true, repair the store by removing invalid data. + pub async fn validate( + &self, + repair: bool, + ) -> Result>> { + let stream = self + .rpc + .server_streaming(ValidateRequest { repair }) + .await?; + Ok(stream.map(|res| res.map_err(anyhow::Error::from))) + } + + /// Validate hashes on the running node. + /// + /// If `repair` is true, repair the store by removing invalid data. + pub async fn consistency_check( + &self, + repair: bool, + ) -> Result>> { + let stream = self + .rpc + .server_streaming(ConsistencyCheckRequest { repair }) + .await?; + Ok(stream.map(|r| r.map_err(anyhow::Error::from))) + } + + /// Download a blob from another node and add it to the local database. + pub async fn download(&self, hash: Hash, node: NodeAddr) -> Result { + self.download_with_opts( + hash, + DownloadOptions { + format: BlobFormat::Raw, + nodes: vec![node], + tag: SetTagOption::Auto, + mode: DownloadMode::Queued, + }, + ) + .await + } + + /// Download a hash sequence from another node and add it to the local database. + pub async fn download_hash_seq(&self, hash: Hash, node: NodeAddr) -> Result { + self.download_with_opts( + hash, + DownloadOptions { + format: BlobFormat::HashSeq, + nodes: vec![node], + tag: SetTagOption::Auto, + mode: DownloadMode::Queued, + }, + ) + .await + } + + /// Download a blob, with additional options. + pub async fn download_with_opts( + &self, + hash: Hash, + opts: DownloadOptions, + ) -> Result { + let DownloadOptions { + format, + nodes, + tag, + mode, + } = opts; + let stream = self + .rpc + .server_streaming(BlobDownloadRequest { + hash, + format, + nodes, + tag, + mode, + }) + .await?; + Ok(DownloadProgress::new( + stream.map(|res| res.map_err(anyhow::Error::from)), + )) + } + + /// Export a blob from the internal blob store to a path on the node's filesystem. + /// + /// `destination` should be an writeable, absolute path on the local node's filesystem. + /// + /// If `format` is set to [`ExportFormat::Collection`], and the `hash` refers to a collection, + /// all children of the collection will be exported. See [`ExportFormat`] for details. + /// + /// The `mode` argument defines if the blob should be copied to the target location or moved out of + /// the internal store into the target location. See [`ExportMode`] for details. + pub async fn export( + &self, + hash: Hash, + destination: PathBuf, + format: ExportFormat, + mode: ExportMode, + ) -> Result { + let req = ExportRequest { + hash, + path: destination, + format, + mode, + }; + let stream = self.rpc.server_streaming(req).await?; + Ok(ExportProgress::new( + stream.map(|r| r.map_err(anyhow::Error::from)), + )) + } + + /// List all complete blobs. + pub async fn list(&self) -> Result>> { + let stream = self.rpc.server_streaming(ListRequest).await?; + Ok(flatten(stream)) + } + + /// List all incomplete (partial) blobs. + pub async fn list_incomplete(&self) -> Result>> { + let stream = self.rpc.server_streaming(ListIncompleteRequest).await?; + Ok(flatten(stream)) + } + + /// Read the content of a collection. + pub async fn get_collection(&self, hash: Hash) -> Result { + Collection::load(hash, self).await + } + + /// List all collections. + pub fn list_collections(&self) -> Result>> { + let this = self.clone(); + Ok(Gen::new(|co| async move { + if let Err(cause) = this.list_collections_impl(&co).await { + co.yield_(Err(cause)).await; + } + })) + } + + async fn list_collections_impl(&self, co: &Co>) -> Result<()> { + let tags = self.tags_client(); + let mut tags = tags.list_hash_seq().await?; + while let Some(tag) = tags.next().await { + let tag = tag?; + if let Ok(collection) = self.get_collection(tag.hash).await { + let info = CollectionInfo { + tag: tag.name, + hash: tag.hash, + total_blobs_count: Some(collection.len() as u64 + 1), + total_blobs_size: Some(0), + }; + co.yield_(Ok(info)).await; + } + } + Ok(()) + } + + /// Delete a blob. + /// + /// **Warning**: this operation deletes the blob from the local store even + /// if it is tagged. You should usually not do this manually, but rely on the + /// node to remove data that is not tagged. + pub async fn delete_blob(&self, hash: Hash) -> Result<()> { + self.rpc.rpc(DeleteRequest { hash }).await??; + Ok(()) + } + + fn tags_client(&self) -> tags::Client { + tags::Client { + rpc: self.rpc.clone(), + } + } +} + +impl SimpleStore for Client +where + S: quic_rpc::Service, + C: quic_rpc::ServiceConnection, +{ + async fn load(&self, hash: Hash) -> anyhow::Result { + self.read_to_bytes(hash).await + } +} + +/// Defines the way to read bytes. +#[derive(Debug, Serialize, Deserialize, Default, Clone, Copy)] +pub enum ReadAtLen { + /// Reads all available bytes. + #[default] + All, + /// Reads exactly this many bytes, erroring out on larger or smaller. + Exact(u64), + /// Reads at most this many bytes. + AtMost(u64), +} + +impl ReadAtLen { + pub(crate) fn as_result_len(&self, size_remaining: u64) -> u64 { + match self { + ReadAtLen::All => size_remaining, + ReadAtLen::Exact(len) => *len, + ReadAtLen::AtMost(len) => std::cmp::min(*len, size_remaining), + } + } +} + +/// Whether to wrap the added data in a collection. +#[derive(Debug, Serialize, Deserialize, Default, Clone)] +pub enum WrapOption { + /// Do not wrap the file or directory. + #[default] + NoWrap, + /// Wrap the file or directory in a collection. + Wrap { + /// Override the filename in the wrapping collection. + name: Option, + }, +} + +/// Status information about a blob. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub enum BlobStatus { + /// The blob is not stored at all. + NotFound, + /// The blob is only stored partially. + Partial { + /// The size of the currently stored partial blob. + size: BaoBlobSize, + }, + /// The blob is stored completely. + Complete { + /// The size of the blob. + size: u64, + }, +} + +/// Outcome of a blob add operation. +#[derive(Debug, Clone)] +pub struct AddOutcome { + /// The hash of the blob + pub hash: Hash, + /// The format the blob + pub format: BlobFormat, + /// The size of the blob + pub size: u64, + /// The tag of the blob + pub tag: Tag, +} + +/// Information about a stored collection. +#[derive(Debug, Serialize, Deserialize)] +pub struct CollectionInfo { + /// Tag of the collection + pub tag: Tag, + + /// Hash of the collection + pub hash: Hash, + /// Number of children in the collection + /// + /// This is an optional field, because the data is not always available. + pub total_blobs_count: Option, + /// Total size of the raw data referred to by all links + /// + /// This is an optional field, because the data is not always available. + pub total_blobs_size: Option, +} + +/// Information about a complete blob. +#[derive(Debug, Serialize, Deserialize)] +pub struct BlobInfo { + /// Location of the blob + pub path: String, + /// The hash of the blob + pub hash: Hash, + /// The size of the blob + pub size: u64, +} + +/// Information about an incomplete blob. +#[derive(Debug, Serialize, Deserialize)] +pub struct IncompleteBlobInfo { + /// The size we got + pub size: u64, + /// The size we expect + pub expected_size: u64, + /// The hash of the blob + pub hash: Hash, +} + +/// Progress stream for blob add operations. +#[derive(derive_more::Debug)] +pub struct AddProgress { + #[debug(skip)] + stream: + Pin> + Send + Unpin + 'static>>, + current_total_size: Arc, +} + +impl AddProgress { + fn new( + stream: (impl Stream< + Item = Result, impl Into>, + > + Send + + Unpin + + 'static), + ) -> Self { + let current_total_size = Arc::new(AtomicU64::new(0)); + let total_size = current_total_size.clone(); + let stream = stream.map(move |item| match item { + Ok(item) => { + let item = item.into(); + if let crate::provider::AddProgress::Found { size, .. } = &item { + total_size.fetch_add(*size, Ordering::Relaxed); + } + Ok(item) + } + Err(err) => Err(err.into()), + }); + Self { + stream: Box::pin(stream), + current_total_size, + } + } + /// Finish writing the stream, ignoring all intermediate progress events. + /// + /// Returns a [`AddOutcome`] which contains a tag, format, hash and a size. + /// When importing a single blob, this is the hash and size of that blob. + /// When importing a collection, the hash is the hash of the collection and the size + /// is the total size of all imported blobs (but excluding the size of the collection blob + /// itself). + pub async fn finish(self) -> Result { + self.await + } +} + +impl Stream for AddProgress { + type Item = Result; + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.stream).poll_next(cx) + } +} + +impl Future for AddProgress { + type Output = Result; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + loop { + match Pin::new(&mut self.stream).poll_next(cx) { + Poll::Pending => return Poll::Pending, + Poll::Ready(None) => { + return Poll::Ready(Err(anyhow!("Response stream ended prematurely"))) + } + Poll::Ready(Some(Err(err))) => return Poll::Ready(Err(err)), + Poll::Ready(Some(Ok(msg))) => match msg { + crate::provider::AddProgress::AllDone { hash, format, tag } => { + let outcome = AddOutcome { + hash, + format, + tag, + size: self.current_total_size.load(Ordering::Relaxed), + }; + return Poll::Ready(Ok(outcome)); + } + crate::provider::AddProgress::Abort(err) => { + return Poll::Ready(Err(err.into())); + } + _ => {} + }, + } + } + } +} + +/// Outcome of a blob download operation. +#[derive(Debug, Clone)] +pub struct DownloadOutcome { + /// The size of the data we already had locally + pub local_size: u64, + /// The size of the data we downloaded from the network + pub downloaded_size: u64, + /// Statistics about the download + pub stats: crate::get::Stats, +} + +/// Progress stream for blob download operations. +#[derive(derive_more::Debug)] +pub struct DownloadProgress { + #[debug(skip)] + stream: Pin> + Send + Unpin + 'static>>, + current_local_size: Arc, + current_network_size: Arc, +} + +impl DownloadProgress { + /// Create a [`DownloadProgress`] that can help you easily poll the [`BytesDownloadProgress`] stream from your download until it is finished or errors. + pub fn new( + stream: (impl Stream, impl Into>> + + Send + + Unpin + + 'static), + ) -> Self { + let current_local_size = Arc::new(AtomicU64::new(0)); + let current_network_size = Arc::new(AtomicU64::new(0)); + + let local_size = current_local_size.clone(); + let network_size = current_network_size.clone(); + + let stream = stream.map(move |item| match item { + Ok(item) => { + let item = item.into(); + match &item { + BytesDownloadProgress::FoundLocal { size, .. } => { + local_size.fetch_add(size.value(), Ordering::Relaxed); + } + BytesDownloadProgress::Found { size, .. } => { + network_size.fetch_add(*size, Ordering::Relaxed); + } + _ => {} + } + + Ok(item) + } + Err(err) => Err(err.into()), + }); + Self { + stream: Box::pin(stream), + current_local_size, + current_network_size, + } + } + + /// Finish writing the stream, ignoring all intermediate progress events. + /// + /// Returns a [`DownloadOutcome`] which contains the size of the content we downloaded and the size of the content we already had locally. + /// When importing a single blob, this is the size of that blob. + /// When importing a collection, this is the total size of all imported blobs (but excluding the size of the collection blob itself). + pub async fn finish(self) -> Result { + self.await + } +} + +impl Stream for DownloadProgress { + type Item = Result; + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.stream).poll_next(cx) + } +} + +impl Future for DownloadProgress { + type Output = Result; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + loop { + match Pin::new(&mut self.stream).poll_next(cx) { + Poll::Pending => return Poll::Pending, + Poll::Ready(None) => { + return Poll::Ready(Err(anyhow!("Response stream ended prematurely"))) + } + Poll::Ready(Some(Err(err))) => return Poll::Ready(Err(err)), + Poll::Ready(Some(Ok(msg))) => match msg { + BytesDownloadProgress::AllDone(stats) => { + let outcome = DownloadOutcome { + local_size: self.current_local_size.load(Ordering::Relaxed), + downloaded_size: self.current_network_size.load(Ordering::Relaxed), + stats, + }; + return Poll::Ready(Ok(outcome)); + } + BytesDownloadProgress::Abort(err) => { + return Poll::Ready(Err(err.into())); + } + _ => {} + }, + } + } + } +} + +/// Outcome of a blob export operation. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct ExportOutcome { + /// The total size of the exported data. + total_size: u64, +} + +/// Progress stream for blob export operations. +#[derive(derive_more::Debug)] +pub struct ExportProgress { + #[debug(skip)] + stream: Pin> + Send + Unpin + 'static>>, + current_total_size: Arc, +} + +impl ExportProgress { + /// Create a [`ExportProgress`] that can help you easily poll the [`BytesExportProgress`] stream from your + /// download until it is finished or errors. + pub fn new( + stream: (impl Stream, impl Into>> + + Send + + Unpin + + 'static), + ) -> Self { + let current_total_size = Arc::new(AtomicU64::new(0)); + let total_size = current_total_size.clone(); + let stream = stream.map(move |item| match item { + Ok(item) => { + let item = item.into(); + if let BytesExportProgress::Found { size, .. } = &item { + let size = size.value(); + total_size.fetch_add(size, Ordering::Relaxed); + } + + Ok(item) + } + Err(err) => Err(err.into()), + }); + Self { + stream: Box::pin(stream), + current_total_size, + } + } + + /// Finish writing the stream, ignoring all intermediate progress events. + /// + /// Returns a [`ExportOutcome`] which contains the size of the content we exported. + pub async fn finish(self) -> Result { + self.await + } +} + +impl Stream for ExportProgress { + type Item = Result; + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.stream).poll_next(cx) + } +} + +impl Future for ExportProgress { + type Output = Result; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + loop { + match Pin::new(&mut self.stream).poll_next(cx) { + Poll::Pending => return Poll::Pending, + Poll::Ready(None) => { + return Poll::Ready(Err(anyhow!("Response stream ended prematurely"))) + } + Poll::Ready(Some(Err(err))) => return Poll::Ready(Err(err)), + Poll::Ready(Some(Ok(msg))) => match msg { + BytesExportProgress::AllDone => { + let outcome = ExportOutcome { + total_size: self.current_total_size.load(Ordering::Relaxed), + }; + return Poll::Ready(Ok(outcome)); + } + BytesExportProgress::Abort(err) => { + return Poll::Ready(Err(err.into())); + } + _ => {} + }, + } + } + } +} + +/// Data reader for a single blob. +/// +/// Implements [`AsyncRead`]. +#[derive(derive_more::Debug)] +pub struct Reader { + size: u64, + response_size: u64, + is_complete: bool, + #[debug("StreamReader")] + stream: tokio_util::io::StreamReader>, Bytes>, +} + +impl Reader { + fn new( + size: u64, + response_size: u64, + is_complete: bool, + stream: BoxStreamSync<'static, io::Result>, + ) -> Self { + Self { + size, + response_size, + is_complete, + stream: StreamReader::new(stream), + } + } + + pub(crate) async fn from_rpc_read( + rpc: &RpcClient, + hash: Hash, + ) -> anyhow::Result + where + C: quic_rpc::ServiceConnection, + S: quic_rpc::Service, + { + Self::from_rpc_read_at(rpc, hash, 0, ReadAtLen::All).await + } + + async fn from_rpc_read_at( + rpc: &RpcClient, + hash: Hash, + offset: u64, + len: ReadAtLen, + ) -> anyhow::Result + where + C: quic_rpc::ServiceConnection, + S: quic_rpc::Service, + { + let stream = rpc + .server_streaming(ReadAtRequest { hash, offset, len }) + .await?; + let mut stream = flatten(stream); + + let (size, is_complete) = match stream.next().await { + Some(Ok(ReadAtResponse::Entry { size, is_complete })) => (size, is_complete), + Some(Err(err)) => return Err(err), + Some(Ok(_)) => return Err(anyhow!("Expected header frame, but got data frame")), + None => return Err(anyhow!("Expected header frame, but RPC stream was dropped")), + }; + + let stream = stream.map(|item| match item { + Ok(ReadAtResponse::Data { chunk }) => Ok(chunk), + Ok(_) => Err(io::Error::new(io::ErrorKind::Other, "Expected data frame")), + Err(err) => Err(io::Error::new(io::ErrorKind::Other, format!("{err}"))), + }); + let len = len.as_result_len(size.value() - offset); + Ok(Self::new(size.value(), len, is_complete, Box::pin(stream))) + } + + /// Total size of this blob. + pub fn size(&self) -> u64 { + self.size + } + + /// Whether this blob has been downloaded completely. + /// + /// Returns false for partial blobs for which some chunks are missing. + pub fn is_complete(&self) -> bool { + self.is_complete + } + + /// Read all bytes of the blob. + pub async fn read_to_bytes(&mut self) -> anyhow::Result { + let mut buf = Vec::with_capacity(self.response_size as usize); + self.read_to_end(&mut buf).await?; + Ok(buf.into()) + } +} + +impl AsyncRead for Reader { + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut ReadBuf<'_>, + ) -> Poll> { + Pin::new(&mut self.stream).poll_read(cx, buf) + } +} + +impl Stream for Reader { + type Item = io::Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.stream).get_pin_mut().poll_next(cx) + } + + fn size_hint(&self) -> (usize, Option) { + self.stream.get_ref().size_hint() + } +} + +/// Options to configure a download request. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DownloadOptions { + /// The format of the data to download. + pub format: BlobFormat, + /// Source nodes to download from. + /// + /// If set to more than a single node, they will all be tried. If `mode` is set to + /// [`DownloadMode::Direct`], they will be tried sequentially until a download succeeds. + /// If `mode` is set to [`DownloadMode::Queued`], the nodes may be dialed in parallel, + /// if the concurrency limits permit. + pub nodes: Vec, + /// Optional tag to tag the data with. + pub tag: SetTagOption, + /// Whether to directly start the download or add it to the download queue. + pub mode: DownloadMode, +} + +#[cfg(test)] +mod tests { + use crate::hashseq::HashSeq; + use iroh_net::NodeId; + use rand::RngCore; + use testresult::TestResult; + use tokio::{io::AsyncWriteExt, sync::mpsc}; + + use super::*; + + #[tokio::test] + async fn test_blob_create_collection() -> Result<()> { + let _guard = iroh_test::logging::setup(); + + let node = crate::node::Node::memory().spawn().await?; + + // create temp file + let temp_dir = tempfile::tempdir().context("tempdir")?; + + let in_root = temp_dir.path().join("in"); + tokio::fs::create_dir_all(in_root.clone()) + .await + .context("create dir all")?; + + let mut paths = Vec::new(); + for i in 0..5 { + let path = in_root.join(format!("test-{i}")); + let size = 100; + let mut buf = vec![0u8; size]; + rand::thread_rng().fill_bytes(&mut buf); + let mut file = tokio::fs::File::create(path.clone()) + .await + .context("create file")?; + file.write_all(&buf.clone()).await.context("write_all")?; + file.flush().await.context("flush")?; + paths.push(path); + } + + let client = node.client(); + + let mut collection = Collection::default(); + let mut tags = Vec::new(); + // import files + for path in &paths { + let import_outcome = client + .blobs() + .add_from_path( + path.to_path_buf(), + false, + SetTagOption::Auto, + WrapOption::NoWrap, + ) + .await + .context("import file")? + .finish() + .await + .context("import finish")?; + + collection.push( + path.file_name().unwrap().to_str().unwrap().to_string(), + import_outcome.hash, + ); + tags.push(import_outcome.tag); + } + + let (hash, tag) = client + .blobs() + .create_collection(collection, SetTagOption::Auto, tags) + .await?; + + let collections: Vec<_> = client.blobs().list_collections()?.try_collect().await?; + + assert_eq!(collections.len(), 1); + { + let CollectionInfo { + tag, + hash, + total_blobs_count, + .. + } = &collections[0]; + assert_eq!(tag, tag); + assert_eq!(hash, hash); + // 5 blobs + 1 meta + assert_eq!(total_blobs_count, &Some(5 + 1)); + } + + // check that "temp" tags have been deleted + let tags: Vec<_> = client.tags().list().await?.try_collect().await?; + assert_eq!(tags.len(), 1); + assert_eq!(tags[0].hash, hash); + assert_eq!(tags[0].name, tag); + assert_eq!(tags[0].format, BlobFormat::HashSeq); + + Ok(()) + } + + #[tokio::test] + async fn test_blob_read_at() -> Result<()> { + // let _guard = iroh_test::logging::setup(); + + let node = crate::node::Node::memory().spawn().await?; + + // create temp file + let temp_dir = tempfile::tempdir().context("tempdir")?; + + let in_root = temp_dir.path().join("in"); + tokio::fs::create_dir_all(in_root.clone()) + .await + .context("create dir all")?; + + let path = in_root.join("test-blob"); + let size = 1024 * 128; + let buf: Vec = (0..size).map(|i| i as u8).collect(); + let mut file = tokio::fs::File::create(path.clone()) + .await + .context("create file")?; + file.write_all(&buf.clone()).await.context("write_all")?; + file.flush().await.context("flush")?; + + let client = node.client(); + + let import_outcome = client + .blobs() + .add_from_path( + path.to_path_buf(), + false, + SetTagOption::Auto, + WrapOption::NoWrap, + ) + .await + .context("import file")? + .finish() + .await + .context("import finish")?; + + let hash = import_outcome.hash; + + // Read everything + let res = client.blobs().read_to_bytes(hash).await?; + assert_eq!(&res, &buf[..]); + + // Read at smaller than blob_get_chunk_size + let res = client + .blobs() + .read_at_to_bytes(hash, 0, ReadAtLen::Exact(100)) + .await?; + assert_eq!(res.len(), 100); + assert_eq!(&res[..], &buf[0..100]); + + let res = client + .blobs() + .read_at_to_bytes(hash, 20, ReadAtLen::Exact(120)) + .await?; + assert_eq!(res.len(), 120); + assert_eq!(&res[..], &buf[20..140]); + + // Read at equal to blob_get_chunk_size + let res = client + .blobs() + .read_at_to_bytes(hash, 0, ReadAtLen::Exact(1024 * 64)) + .await?; + assert_eq!(res.len(), 1024 * 64); + assert_eq!(&res[..], &buf[0..1024 * 64]); + + let res = client + .blobs() + .read_at_to_bytes(hash, 20, ReadAtLen::Exact(1024 * 64)) + .await?; + assert_eq!(res.len(), 1024 * 64); + assert_eq!(&res[..], &buf[20..(20 + 1024 * 64)]); + + // Read at larger than blob_get_chunk_size + let res = client + .blobs() + .read_at_to_bytes(hash, 0, ReadAtLen::Exact(10 + 1024 * 64)) + .await?; + assert_eq!(res.len(), 10 + 1024 * 64); + assert_eq!(&res[..], &buf[0..(10 + 1024 * 64)]); + + let res = client + .blobs() + .read_at_to_bytes(hash, 20, ReadAtLen::Exact(10 + 1024 * 64)) + .await?; + assert_eq!(res.len(), 10 + 1024 * 64); + assert_eq!(&res[..], &buf[20..(20 + 10 + 1024 * 64)]); + + // full length + let res = client + .blobs() + .read_at_to_bytes(hash, 20, ReadAtLen::All) + .await?; + assert_eq!(res.len(), 1024 * 128 - 20); + assert_eq!(&res[..], &buf[20..]); + + // size should be total + let reader = client + .blobs() + .read_at(hash, 0, ReadAtLen::Exact(20)) + .await?; + assert_eq!(reader.size(), 1024 * 128); + assert_eq!(reader.response_size, 20); + + // last chunk - exact + let res = client + .blobs() + .read_at_to_bytes(hash, 1024 * 127, ReadAtLen::Exact(1024)) + .await?; + assert_eq!(res.len(), 1024); + assert_eq!(res, &buf[1024 * 127..]); + + // last chunk - open + let res = client + .blobs() + .read_at_to_bytes(hash, 1024 * 127, ReadAtLen::All) + .await?; + assert_eq!(res.len(), 1024); + assert_eq!(res, &buf[1024 * 127..]); + + // last chunk - larger + let mut res = client + .blobs() + .read_at(hash, 1024 * 127, ReadAtLen::AtMost(2048)) + .await?; + assert_eq!(res.size, 1024 * 128); + assert_eq!(res.response_size, 1024); + let res = res.read_to_bytes().await?; + assert_eq!(res.len(), 1024); + assert_eq!(res, &buf[1024 * 127..]); + + // out of bounds - too long + let res = client + .blobs() + .read_at(hash, 0, ReadAtLen::Exact(1024 * 128 + 1)) + .await; + let err = res.unwrap_err(); + assert!(err.to_string().contains("out of bound")); + + // out of bounds - offset larger than blob + let res = client + .blobs() + .read_at(hash, 1024 * 128 + 1, ReadAtLen::All) + .await; + let err = res.unwrap_err(); + assert!(err.to_string().contains("out of range")); + + // out of bounds - offset + length too large + let res = client + .blobs() + .read_at(hash, 1024 * 127, ReadAtLen::Exact(1025)) + .await; + let err = res.unwrap_err(); + assert!(err.to_string().contains("out of bound")); + + Ok(()) + } + + #[tokio::test] + async fn test_blob_get_collection() -> Result<()> { + let _guard = iroh_test::logging::setup(); + + let node = crate::node::Node::memory().spawn().await?; + + // create temp file + let temp_dir = tempfile::tempdir().context("tempdir")?; + + let in_root = temp_dir.path().join("in"); + tokio::fs::create_dir_all(in_root.clone()) + .await + .context("create dir all")?; + + let mut paths = Vec::new(); + for i in 0..5 { + let path = in_root.join(format!("test-{i}")); + let size = 100; + let mut buf = vec![0u8; size]; + rand::thread_rng().fill_bytes(&mut buf); + let mut file = tokio::fs::File::create(path.clone()) + .await + .context("create file")?; + file.write_all(&buf.clone()).await.context("write_all")?; + file.flush().await.context("flush")?; + paths.push(path); + } + + let client = node.client(); + + let mut collection = Collection::default(); + let mut tags = Vec::new(); + // import files + for path in &paths { + let import_outcome = client + .blobs() + .add_from_path( + path.to_path_buf(), + false, + SetTagOption::Auto, + WrapOption::NoWrap, + ) + .await + .context("import file")? + .finish() + .await + .context("import finish")?; + + collection.push( + path.file_name().unwrap().to_str().unwrap().to_string(), + import_outcome.hash, + ); + tags.push(import_outcome.tag); + } + + let (hash, _tag) = client + .blobs() + .create_collection(collection, SetTagOption::Auto, tags) + .await?; + + let collection = client.blobs().get_collection(hash).await?; + + // 5 blobs + assert_eq!(collection.len(), 5); + + Ok(()) + } + + #[tokio::test] + async fn test_blob_share() -> Result<()> { + let _guard = iroh_test::logging::setup(); + + let node = crate::node::Node::memory().spawn().await?; + + // create temp file + let temp_dir = tempfile::tempdir().context("tempdir")?; + + let in_root = temp_dir.path().join("in"); + tokio::fs::create_dir_all(in_root.clone()) + .await + .context("create dir all")?; + + let path = in_root.join("test-blob"); + let size = 1024 * 128; + let buf: Vec = (0..size).map(|i| i as u8).collect(); + let mut file = tokio::fs::File::create(path.clone()) + .await + .context("create file")?; + file.write_all(&buf.clone()).await.context("write_all")?; + file.flush().await.context("flush")?; + + let client = node.client(); + + let import_outcome = client + .blobs() + .add_from_path( + path.to_path_buf(), + false, + SetTagOption::Auto, + WrapOption::NoWrap, + ) + .await + .context("import file")? + .finish() + .await + .context("import finish")?; + + let ticket = client + .blobs() + .share(import_outcome.hash, BlobFormat::Raw, Default::default()) + .await?; + assert_eq!(ticket.hash(), import_outcome.hash); + + let status = client.blobs().status(import_outcome.hash).await?; + assert_eq!(status, BlobStatus::Complete { size }); + + Ok(()) + } + + #[derive(Debug, Clone)] + struct BlobEvents { + sender: mpsc::Sender, + } + + impl BlobEvents { + fn new(cap: usize) -> (Self, mpsc::Receiver) { + let (s, r) = mpsc::channel(cap); + (Self { sender: s }, r) + } + } + + impl crate::provider::CustomEventSender for BlobEvents { + fn send(&self, event: crate::provider::Event) -> futures_lite::future::Boxed<()> { + let sender = self.sender.clone(); + Box::pin(async move { + sender.send(event).await.ok(); + }) + } + + fn try_send(&self, event: crate::provider::Event) { + self.sender.try_send(event).ok(); + } + } + + #[tokio::test] + async fn test_blob_provide_events() -> Result<()> { + let _guard = iroh_test::logging::setup(); + + let (node1_events, mut node1_events_r) = BlobEvents::new(16); + let node1 = crate::node::Node::memory() + .blobs_events(node1_events) + .spawn() + .await?; + + let (node2_events, mut node2_events_r) = BlobEvents::new(16); + let node2 = crate::node::Node::memory() + .blobs_events(node2_events) + .spawn() + .await?; + + let import_outcome = node1.blobs().add_bytes(&b"hello world"[..]).await?; + + // Download in node2 + let node1_addr = node1.net().node_addr().await?; + let res = node2 + .blobs() + .download(import_outcome.hash, node1_addr) + .await? + .await?; + dbg!(&res); + assert_eq!(res.local_size, 0); + assert_eq!(res.downloaded_size, 11); + + node1.shutdown().await?; + node2.shutdown().await?; + + let mut ev1 = Vec::new(); + while let Some(ev) = node1_events_r.recv().await { + ev1.push(ev); + } + // assert_eq!(ev1.len(), 3); + assert!(matches!( + ev1[0], + crate::provider::Event::ClientConnected { .. } + )); + assert!(matches!( + ev1[1], + crate::provider::Event::GetRequestReceived { .. } + )); + assert!(matches!( + ev1[2], + crate::provider::Event::TransferProgress { .. } + )); + assert!(matches!( + ev1[3], + crate::provider::Event::TransferCompleted { .. } + )); + dbg!(&ev1); + + let mut ev2 = Vec::new(); + while let Some(ev) = node2_events_r.recv().await { + ev2.push(ev); + } + + // Node 2 did not provide anything + assert!(ev2.is_empty()); + Ok(()) + } + /// Download a existing blob from oneself + #[tokio::test] + async fn test_blob_get_self_existing() -> TestResult<()> { + let _guard = iroh_test::logging::setup(); + + let node = crate::node::Node::memory().spawn().await?; + let node_id = node.node_id(); + let client = node.client(); + + let AddOutcome { hash, size, .. } = client.blobs().add_bytes("foo").await?; + + // Direct + let res = client + .blobs() + .download_with_opts( + hash, + DownloadOptions { + format: BlobFormat::Raw, + nodes: vec![node_id.into()], + tag: SetTagOption::Auto, + mode: DownloadMode::Direct, + }, + ) + .await? + .await?; + + assert_eq!(res.local_size, size); + assert_eq!(res.downloaded_size, 0); + + // Queued + let res = client + .blobs() + .download_with_opts( + hash, + DownloadOptions { + format: BlobFormat::Raw, + nodes: vec![node_id.into()], + tag: SetTagOption::Auto, + mode: DownloadMode::Queued, + }, + ) + .await? + .await?; + + assert_eq!(res.local_size, size); + assert_eq!(res.downloaded_size, 0); + + Ok(()) + } + + /// Download a missing blob from oneself + #[tokio::test] + async fn test_blob_get_self_missing() -> TestResult<()> { + let _guard = iroh_test::logging::setup(); + + let node = crate::node::Node::memory().spawn().await?; + let node_id = node.node_id(); + let client = node.client(); + + let hash = Hash::from_bytes([0u8; 32]); + + // Direct + let res = client + .blobs() + .download_with_opts( + hash, + DownloadOptions { + format: BlobFormat::Raw, + nodes: vec![node_id.into()], + tag: SetTagOption::Auto, + mode: DownloadMode::Direct, + }, + ) + .await? + .await; + assert!(res.is_err()); + assert_eq!( + res.err().unwrap().to_string().as_str(), + "No nodes to download from provided" + ); + + // Queued + let res = client + .blobs() + .download_with_opts( + hash, + DownloadOptions { + format: BlobFormat::Raw, + nodes: vec![node_id.into()], + tag: SetTagOption::Auto, + mode: DownloadMode::Queued, + }, + ) + .await? + .await; + assert!(res.is_err()); + assert_eq!( + res.err().unwrap().to_string().as_str(), + "No provider nodes found" + ); + + Ok(()) + } + + /// Download a existing collection. Check that things succeed and no download is performed. + #[tokio::test] + async fn test_blob_get_existing_collection() -> TestResult<()> { + let _guard = iroh_test::logging::setup(); + + let node = crate::node::Node::memory().spawn().await?; + // We use a nonexisting node id because we just want to check that this succeeds without + // hitting the network. + let node_id = NodeId::from_bytes(&[0u8; 32])?; + let client = node.client(); + + let mut collection = Collection::default(); + let mut tags = Vec::new(); + let mut size = 0; + for value in ["iroh", "is", "cool"] { + let import_outcome = client.blobs().add_bytes(value).await.context("add bytes")?; + collection.push(value.to_string(), import_outcome.hash); + tags.push(import_outcome.tag); + size += import_outcome.size; + } + + let (hash, _tag) = client + .blobs() + .create_collection(collection, SetTagOption::Auto, tags) + .await?; + + // load the hashseq and collection header manually to calculate our expected size + let hashseq_bytes = client.blobs().read_to_bytes(hash).await?; + size += hashseq_bytes.len() as u64; + let hashseq = HashSeq::try_from(hashseq_bytes)?; + let collection_header_bytes = client + .blobs() + .read_to_bytes(hashseq.into_iter().next().expect("header to exist")) + .await?; + size += collection_header_bytes.len() as u64; + + // Direct + let res = client + .blobs() + .download_with_opts( + hash, + DownloadOptions { + format: BlobFormat::HashSeq, + nodes: vec![node_id.into()], + tag: SetTagOption::Auto, + mode: DownloadMode::Direct, + }, + ) + .await? + .await + .context("direct (download)")?; + + assert_eq!(res.local_size, size); + assert_eq!(res.downloaded_size, 0); + + // Queued + let res = client + .blobs() + .download_with_opts( + hash, + DownloadOptions { + format: BlobFormat::HashSeq, + nodes: vec![node_id.into()], + tag: SetTagOption::Auto, + mode: DownloadMode::Queued, + }, + ) + .await? + .await + .context("queued")?; + + assert_eq!(res.local_size, size); + assert_eq!(res.downloaded_size, 0); + + Ok(()) + } + + #[tokio::test] + #[cfg_attr(target_os = "windows", ignore = "flaky")] + async fn test_blob_delete_mem() -> Result<()> { + let _guard = iroh_test::logging::setup(); + + let node = crate::node::Node::memory().spawn().await?; + + let res = node.blobs().add_bytes(&b"hello world"[..]).await?; + + let hashes: Vec<_> = node.blobs().list().await?.try_collect().await?; + assert_eq!(hashes.len(), 1); + assert_eq!(hashes[0].hash, res.hash); + + // delete + node.blobs().delete_blob(res.hash).await?; + + let hashes: Vec<_> = node.blobs().list().await?.try_collect().await?; + assert!(hashes.is_empty()); + + Ok(()) + } + + #[tokio::test] + async fn test_blob_delete_fs() -> Result<()> { + let _guard = iroh_test::logging::setup(); + + let dir = tempfile::tempdir()?; + let node = crate::node::Node::persistent(dir.path()) + .await? + .spawn() + .await?; + + let res = node.blobs().add_bytes(&b"hello world"[..]).await?; + + let hashes: Vec<_> = node.blobs().list().await?.try_collect().await?; + assert_eq!(hashes.len(), 1); + assert_eq!(hashes[0].hash, res.hash); + + // delete + node.blobs().delete_blob(res.hash).await?; + + let hashes: Vec<_> = node.blobs().list().await?.try_collect().await?; + assert!(hashes.is_empty()); + + Ok(()) + } +} diff --git a/src/rpc/client/blobs/batch.rs b/src/rpc/client/blobs/batch.rs new file mode 100644 index 000000000..942e1dbd3 --- /dev/null +++ b/src/rpc/client/blobs/batch.rs @@ -0,0 +1,476 @@ +use std::{ + io, + path::PathBuf, + sync::{Arc, Mutex}, +}; + +use crate::{ + format::collection::Collection, + net_protocol::BatchId, + provider::BatchAddPathProgress, + store::ImportMode, + util::{SetTagOption, TagDrop}, + BlobFormat, HashAndFormat, Tag, TempTag, +}; +use anyhow::{anyhow, Context, Result}; +use bytes::Bytes; +use futures_buffered::BufferedStreamExt; +use futures_lite::StreamExt; +use futures_util::{sink::Buffer, FutureExt, SinkExt, Stream}; +use quic_rpc::{client::UpdateSink, RpcClient}; +use tokio::io::AsyncRead; +use tokio_util::io::ReaderStream; +use tracing::{debug, warn}; + +use super::WrapOption; +use crate::rpc::proto::{ + blobs::{ + BatchAddPathRequest, BatchAddStreamRequest, BatchAddStreamResponse, BatchAddStreamUpdate, + BatchCreateTempTagRequest, BatchUpdate, + }, + tags::{self, SyncMode}, +}; + +/// A scope in which blobs can be added. +#[derive(derive_more::Debug)] +struct BatchInner +where + C: quic_rpc::ServiceConnection, + S: quic_rpc::Service, +{ + /// The id of the scope. + batch: BatchId, + /// The rpc client. + rpc: RpcClient, + /// The stream to send drop + #[debug(skip)] + updates: + Mutex, BatchUpdate>>, +} + +/// A batch for write operations. +/// +/// This serves mostly as a scope for temporary tags. +/// +/// It is not a transaction, so things in a batch are not atomic. Also, there is +/// no isolation between batches. +#[derive(derive_more::Debug)] +pub struct Batch(Arc>) +where + C: quic_rpc::ServiceConnection, + S: quic_rpc::Service; + +impl TagDrop for BatchInner +where + C: quic_rpc::ServiceConnection, + S: quic_rpc::Service, +{ + fn on_drop(&self, content: &HashAndFormat) { + let mut updates = self.updates.lock().unwrap(); + // make a spirited attempt to notify the server that we are dropping the content + // + // this will occasionally fail, but that's acceptable. The temp tags for the batch + // will be cleaned up as soon as the entire batch is dropped. + // + // E.g. a typical scenario is that you create a large array of temp tags, and then + // store them in a hash sequence and then drop the array. You will get many drops + // at the same time, and might get a send failure here. + // + // But that just means that the server will clean up the temp tags when the batch is + // dropped. + updates.feed(BatchUpdate::Drop(*content)).now_or_never(); + updates.flush().now_or_never(); + } +} + +/// Options for adding a file as a blob +#[derive(Debug, Clone, Copy, Default)] +pub struct AddFileOpts { + /// The import mode + pub import_mode: ImportMode, + /// The format of the blob + pub format: BlobFormat, +} + +/// Options for adding a directory as a collection +#[derive(Debug, Clone)] +pub struct AddDirOpts { + /// The import mode + pub import_mode: ImportMode, + /// Whether to preserve the directory name + pub wrap: WrapOption, + /// Io parallelism + pub io_parallelism: usize, +} + +impl Default for AddDirOpts { + fn default() -> Self { + Self { + import_mode: ImportMode::TryReference, + wrap: WrapOption::NoWrap, + io_parallelism: 4, + } + } +} + +/// Options for adding a directory as a collection +#[derive(Debug, Clone)] +pub struct AddReaderOpts { + /// The format of the blob + pub format: BlobFormat, + /// Size of the chunks to send + pub chunk_size: usize, +} + +impl Default for AddReaderOpts { + fn default() -> Self { + Self { + format: BlobFormat::Raw, + chunk_size: 1024 * 64, + } + } +} + +impl Batch +where + C: quic_rpc::ServiceConnection, + S: quic_rpc::Service, +{ + pub(super) fn new( + batch: BatchId, + rpc: RpcClient, + updates: UpdateSink, + buffer_size: usize, + ) -> Self { + let updates = updates.buffer(buffer_size); + Self(Arc::new(BatchInner { + batch, + rpc, + updates: updates.into(), + })) + } + + /// Write a blob by passing bytes. + pub async fn add_bytes(&self, bytes: impl Into) -> Result { + self.add_bytes_with_opts(bytes, Default::default()).await + } + + /// Import a blob from a filesystem path, using the default options. + /// + /// For more control, use [`Self::add_file_with_opts`]. + pub async fn add_file(&self, path: PathBuf) -> Result<(TempTag, u64)> { + self.add_file_with_opts(path, AddFileOpts::default()).await + } + + /// Add a directory as a hashseq in iroh collection format + pub async fn add_dir(&self, root: PathBuf) -> Result { + self.add_dir_with_opts(root, Default::default()).await + } + + /// Write a blob by passing an async reader. + /// + /// This will consume the stream in 64KB chunks, and use a format of [BlobFormat::Raw]. + /// + /// For more options, see [`Self::add_reader_with_opts`]. + pub async fn add_reader( + &self, + reader: impl AsyncRead + Unpin + Send + 'static, + ) -> anyhow::Result { + self.add_reader_with_opts(reader, Default::default()).await + } + + /// Write a blob by passing a stream of bytes. + pub async fn add_stream( + &self, + input: impl Stream> + Send + Unpin + 'static, + ) -> Result { + self.add_stream_with_opts(input, Default::default()).await + } + + /// Creates a temp tag to protect some content (blob or hashseq) from being deleted. + /// + /// This is a lower-level API. The other functions in [`Batch`] already create [`TempTag`]s automatically. + /// + /// [`TempTag`]s allow you to protect some data from deletion while a download is ongoing, + /// even if you don't want to protect it permanently. + pub async fn temp_tag(&self, content: HashAndFormat) -> Result { + // Notify the server that we want one temp tag for the given content + self.0 + .rpc + .rpc(BatchCreateTempTagRequest { + batch: self.0.batch, + content, + }) + .await??; + // Only after success of the above call, we can create the corresponding local temp tag + Ok(self.local_temp_tag(content, None)) + } + + /// Write a blob by passing an async reader. + /// + /// This consumes the stream in chunks using `opts.chunk_size`. A good default is 64KB. + pub async fn add_reader_with_opts( + &self, + reader: impl AsyncRead + Unpin + Send + 'static, + opts: AddReaderOpts, + ) -> anyhow::Result { + let AddReaderOpts { format, chunk_size } = opts; + let input = ReaderStream::with_capacity(reader, chunk_size); + self.add_stream_with_opts(input, format).await + } + + /// Write a blob by passing bytes. + pub async fn add_bytes_with_opts( + &self, + bytes: impl Into, + format: BlobFormat, + ) -> Result { + let input = futures_lite::stream::once(Ok(bytes.into())); + self.add_stream_with_opts(input, format).await + } + + /// Import a blob from a filesystem path. + /// + /// `path` should be an absolute path valid for the file system on which + /// the node runs, which refers to a file. + /// + /// If you use [`ImportMode::TryReference`], Iroh will assume that the data will not + /// change and will share it in place without copying to the Iroh data directory + /// if appropriate. However, for tiny files, Iroh will copy the data. + /// + /// If you use [`ImportMode::Copy`], Iroh will always copy the data. + /// + /// Will return a temp tag for the added blob, as well as the size of the file. + pub async fn add_file_with_opts( + &self, + path: PathBuf, + opts: AddFileOpts, + ) -> Result<(TempTag, u64)> { + let AddFileOpts { + import_mode, + format, + } = opts; + anyhow::ensure!( + path.is_absolute(), + "Path must be absolute, but got: {:?}", + path + ); + anyhow::ensure!(path.is_file(), "Path does not refer to a file: {:?}", path); + let mut stream = self + .0 + .rpc + .server_streaming(BatchAddPathRequest { + path, + import_mode, + format, + batch: self.0.batch, + }) + .await?; + let mut res_hash = None; + let mut res_size = None; + while let Some(item) = stream.next().await { + match item?.0 { + BatchAddPathProgress::Abort(cause) => { + Err(cause)?; + } + BatchAddPathProgress::Done { hash } => { + res_hash = Some(hash); + } + BatchAddPathProgress::Found { size } => { + res_size = Some(size); + } + _ => {} + } + } + let hash = res_hash.context("Missing hash")?; + let size = res_size.context("Missing size")?; + Ok(( + self.local_temp_tag(HashAndFormat { hash, format }, Some(size)), + size, + )) + } + + /// Add a directory as a hashseq in iroh collection format + /// + /// This can also be used to add a single file as a collection, if + /// wrap is set to [WrapOption::Wrap]. + /// + /// However, if you want to add a single file as a raw blob, use add_file instead. + pub async fn add_dir_with_opts(&self, root: PathBuf, opts: AddDirOpts) -> Result { + let AddDirOpts { + import_mode, + wrap, + io_parallelism, + } = opts; + anyhow::ensure!(root.is_absolute(), "Path must be absolute"); + + // let (send, recv) = flume::bounded(32); + // let import_progress = FlumeProgressSender::new(send); + + // import all files below root recursively + let data_sources = crate::util::fs::scan_path(root, wrap)?; + let opts = AddFileOpts { + import_mode, + format: BlobFormat::Raw, + }; + let result: Vec<_> = futures_lite::stream::iter(data_sources) + .map(|source| { + // let import_progress = import_progress.clone(); + async move { + let name = source.name().to_string(); + let (tag, size) = self + .add_file_with_opts(source.path().to_owned(), opts) + .await?; + let hash = *tag.hash(); + anyhow::Ok((name, hash, size, tag)) + } + }) + .buffered_ordered(io_parallelism) + .try_collect() + .await?; + + // create a collection + let (collection, child_tags): (Collection, Vec<_>) = result + .into_iter() + .map(|(name, hash, _, tag)| ((name, hash), tag)) + .unzip(); + + let tag = self.add_collection(collection).await?; + drop(child_tags); + Ok(tag) + } + + /// Write a blob by passing a stream of bytes. + /// + /// For convenient interop with common sources of data, this function takes a stream of `io::Result`. + /// If you have raw bytes, you need to wrap them in `io::Result::Ok`. + pub async fn add_stream_with_opts( + &self, + mut input: impl Stream> + Send + Unpin + 'static, + format: BlobFormat, + ) -> Result { + let (mut sink, mut stream) = self + .0 + .rpc + .bidi(BatchAddStreamRequest { + batch: self.0.batch, + format, + }) + .await?; + let mut size = 0u64; + while let Some(item) = input.next().await { + match item { + Ok(chunk) => { + size += chunk.len() as u64; + sink.send(BatchAddStreamUpdate::Chunk(chunk)) + .await + .map_err(|err| anyhow!("Failed to send input stream to remote: {err:?}"))?; + } + Err(err) => { + warn!("Abort send, reason: failed to read from source stream: {err:?}"); + sink.send(BatchAddStreamUpdate::Abort) + .await + .map_err(|err| anyhow!("Failed to send input stream to remote: {err:?}"))?; + break; + } + } + } + // this is needed for the remote to notice that the stream is closed + drop(sink); + let mut res = None; + while let Some(item) = stream.next().await { + match item? { + BatchAddStreamResponse::Abort(cause) => { + Err(cause)?; + } + BatchAddStreamResponse::Result { hash } => { + res = Some(hash); + } + _ => {} + } + } + let hash = res.context("Missing answer")?; + Ok(self.local_temp_tag(HashAndFormat { hash, format }, Some(size))) + } + + /// Add a collection. + /// + /// This is a convenience function that converts the collection into two blobs + /// (the metadata and the hash sequence) and adds them, returning a temp tag for + /// the hash sequence. + /// + /// Note that this does not guarantee that the data that the collection refers to + /// actually exists. It will just create 2 blobs, the metadata and the hash sequence + /// itself. + pub async fn add_collection(&self, collection: Collection) -> Result { + self.add_blob_seq(collection.to_blobs()).await + } + + /// Add a sequence of blobs, where the last is a hash sequence. + /// + /// It is a common pattern in iroh to have a hash sequence with one or more + /// blobs of metadata, and the remaining blobs being the actual data. E.g. + /// a collection is a hash sequence where the first child is the metadata. + pub async fn add_blob_seq(&self, iter: impl Iterator) -> Result { + let mut blobs = iter.peekable(); + // put the tags somewhere + let mut tags = vec![]; + loop { + let blob = blobs.next().context("Failed to get next blob")?; + if blobs.peek().is_none() { + return self.add_bytes_with_opts(blob, BlobFormat::HashSeq).await; + } else { + tags.push(self.add_bytes(blob).await?); + } + } + } + + /// Upgrades a temp tag to a persistent tag. + pub async fn persist(&self, tt: TempTag) -> Result { + let tag = self + .0 + .rpc + .rpc(tags::CreateRequest { + value: tt.hash_and_format(), + batch: Some(self.0.batch), + sync: SyncMode::Full, + }) + .await??; + Ok(tag) + } + + /// Upgrades a temp tag to a persistent tag with a specific name. + pub async fn persist_to(&self, tt: TempTag, tag: Tag) -> Result<()> { + self.0 + .rpc + .rpc(tags::SetRequest { + name: tag, + value: Some(tt.hash_and_format()), + batch: Some(self.0.batch), + sync: SyncMode::Full, + }) + .await??; + Ok(()) + } + + /// Upgrades a temp tag to a persistent tag with either a specific name or + /// an automatically generated name. + pub async fn persist_with_opts(&self, tt: TempTag, opts: SetTagOption) -> Result { + match opts { + SetTagOption::Auto => self.persist(tt).await, + SetTagOption::Named(tag) => { + self.persist_to(tt, tag.clone()).await?; + Ok(tag) + } + } + } + + /// Creates a temp tag for the given hash and format, without notifying the server. + /// + /// Caution: only do this for data for which you know the server side has created a temp tag. + fn local_temp_tag(&self, inner: HashAndFormat, _size: Option) -> TempTag { + let on_drop: Arc = self.0.clone(); + let on_drop = Some(Arc::downgrade(&on_drop)); + TempTag::new(inner, on_drop) + } +} diff --git a/src/rpc/client/tags.rs b/src/rpc/client/tags.rs new file mode 100644 index 000000000..6b4ac2598 --- /dev/null +++ b/src/rpc/client/tags.rs @@ -0,0 +1,64 @@ +//! API for tag management. +//! +//! The purpose of tags is to mark information as important to prevent it +//! from being garbage-collected (if the garbage collector is turned on). +//! Currently this is used for blobs. +//! +//! The main entry point is the [`Client`]. +//! +//! You obtain a [`Client`] via [`Iroh::tags()`](crate::client::Iroh::tags). +//! +//! [`Client::list`] can be used to list all tags. +//! [`Client::list_hash_seq`] can be used to list all tags with a hash_seq format. +//! +//! [`Client::delete`] can be used to delete a tag. +use crate::{BlobFormat, Hash, Tag}; +use anyhow::Result; +use futures_lite::{Stream, StreamExt}; +use quic_rpc::RpcClient; +use ref_cast::RefCast; +use serde::{Deserialize, Serialize}; + +use crate::rpc::proto::tags::{DeleteRequest, ListRequest}; + +/// Iroh tags client. +#[derive(Debug, Clone, RefCast)] +#[repr(transparent)] +pub struct Client { + pub(super) rpc: RpcClient, +} + +impl Client +where + C: quic_rpc::ServiceConnection, + S: quic_rpc::Service, +{ + /// Lists all tags. + pub async fn list(&self) -> Result>> { + let stream = self.rpc.server_streaming(ListRequest::all()).await?; + Ok(stream.map(|res| res.map_err(anyhow::Error::from))) + } + + /// Lists all tags with a hash_seq format. + pub async fn list_hash_seq(&self) -> Result>> { + let stream = self.rpc.server_streaming(ListRequest::hash_seq()).await?; + Ok(stream.map(|res| res.map_err(anyhow::Error::from))) + } + + /// Deletes a tag. + pub async fn delete(&self, name: Tag) -> Result<()> { + self.rpc.rpc(DeleteRequest { name }).await??; + Ok(()) + } +} + +/// Information about a tag. +#[derive(Debug, Serialize, Deserialize)] +pub struct TagInfo { + /// Name of the tag + pub name: Tag, + /// Format of the data + pub format: BlobFormat, + /// Hash of the data + pub hash: Hash, +} diff --git a/src/rpc/proto.rs b/src/rpc/proto.rs new file mode 100644 index 000000000..26434ad16 --- /dev/null +++ b/src/rpc/proto.rs @@ -0,0 +1,34 @@ +//! RPC protocol for the iroh-blobs service +use nested_enum_utils::enum_conversions; +use serde::{Deserialize, Serialize}; + +pub mod blobs; +pub mod tags; + +/// quic-rpc service for iroh blobs +#[derive(Debug, Clone)] +pub struct RpcService; + +impl quic_rpc::Service for RpcService { + type Req = Request; + type Res = Response; +} + +#[allow(missing_docs)] +#[enum_conversions] +#[derive(Debug, Serialize, Deserialize)] +pub enum Request { + Blobs(blobs::Request), + Tags(tags::Request), +} + +#[allow(missing_docs)] +#[enum_conversions] +#[derive(Debug, Serialize, Deserialize)] +pub enum Response { + Blobs(blobs::Response), + Tags(tags::Response), +} + +type RpcError = serde_error::Error; +type RpcResult = Result; diff --git a/src/rpc/proto/blobs.rs b/src/rpc/proto/blobs.rs new file mode 100644 index 000000000..113971ad7 --- /dev/null +++ b/src/rpc/proto/blobs.rs @@ -0,0 +1,320 @@ +//! RPC requests and responses for the blob service. +use std::path::PathBuf; + +use bytes::Bytes; +use iroh_base::hash::Hash; +use nested_enum_utils::enum_conversions; +use quic_rpc_derive::rpc_requests; +use serde::{Deserialize, Serialize}; + +use crate::{ + export::ExportProgress, + format::collection::Collection, + get::db::DownloadProgress, + net_protocol::{BatchId, BlobDownloadRequest}, + provider::{AddProgress, BatchAddPathProgress}, + store::{ + BaoBlobSize, ConsistencyCheckProgress, ExportFormat, ExportMode, ImportMode, + ValidateProgress, + }, + util::SetTagOption, + BlobFormat, HashAndFormat, Tag, +}; + +use super::{RpcError, RpcResult, RpcService}; +use crate::rpc::client::blobs::{BlobInfo, BlobStatus, IncompleteBlobInfo, ReadAtLen, WrapOption}; + +#[allow(missing_docs)] +#[derive(strum::Display, Debug, Serialize, Deserialize)] +#[enum_conversions(super::Request)] +#[rpc_requests(RpcService)] +pub enum Request { + #[server_streaming(response = RpcResult)] + ReadAt(ReadAtRequest), + #[bidi_streaming(update = AddStreamUpdate, response = AddStreamResponse)] + AddStream(AddStreamRequest), + AddStreamUpdate(AddStreamUpdate), + #[server_streaming(response = AddPathResponse)] + AddPath(AddPathRequest), + #[server_streaming(response = DownloadResponse)] + Download(BlobDownloadRequest), + #[server_streaming(response = ExportResponse)] + Export(ExportRequest), + #[server_streaming(response = RpcResult)] + List(ListRequest), + #[server_streaming(response = RpcResult)] + ListIncomplete(ListIncompleteRequest), + #[rpc(response = RpcResult<()>)] + Delete(DeleteRequest), + #[server_streaming(response = ValidateProgress)] + Validate(ValidateRequest), + #[server_streaming(response = ConsistencyCheckProgress)] + Fsck(ConsistencyCheckRequest), + #[rpc(response = RpcResult)] + CreateCollection(CreateCollectionRequest), + #[rpc(response = RpcResult)] + BlobStatus(BlobStatusRequest), + + #[bidi_streaming(update = BatchUpdate, response = BatchCreateResponse)] + BatchCreate(BatchCreateRequest), + BatchUpdate(BatchUpdate), + #[bidi_streaming(update = BatchAddStreamUpdate, response = BatchAddStreamResponse)] + BatchAddStream(BatchAddStreamRequest), + BatchAddStreamUpdate(BatchAddStreamUpdate), + #[server_streaming(response = BatchAddPathResponse)] + BatchAddPath(BatchAddPathRequest), + #[rpc(response = RpcResult<()>)] + BatchCreateTempTag(BatchCreateTempTagRequest), +} + +#[allow(missing_docs)] +#[derive(strum::Display, Debug, Serialize, Deserialize)] +#[enum_conversions(super::Response)] +pub enum Response { + ReadAt(RpcResult), + AddStream(AddStreamResponse), + AddPath(AddPathResponse), + List(RpcResult), + ListIncomplete(RpcResult), + Download(DownloadResponse), + Fsck(ConsistencyCheckProgress), + Export(ExportResponse), + Validate(ValidateProgress), + CreateCollection(RpcResult), + BlobStatus(RpcResult), + BatchCreate(BatchCreateResponse), + BatchAddStream(BatchAddStreamResponse), + BatchAddPath(BatchAddPathResponse), +} + +/// A request to the node to provide the data at the given path +/// +/// Will produce a stream of [`AddProgress`] messages. +#[derive(Debug, Serialize, Deserialize)] +pub struct AddPathRequest { + /// The path to the data to provide. + /// + /// This should be an absolute path valid for the file system on which + /// the node runs. Usually the cli will run on the same machine as the + /// node, so this should be an absolute path on the cli machine. + pub path: PathBuf, + /// True if the provider can assume that the data will not change, so it + /// can be shared in place. + pub in_place: bool, + /// Tag to tag the data with. + pub tag: SetTagOption, + /// Whether to wrap the added data in a collection + pub wrap: WrapOption, +} + +/// Wrapper around [`AddProgress`]. +#[derive(Debug, Serialize, Deserialize, derive_more::Into)] +pub struct AddPathResponse(pub AddProgress); + +/// Progress response for [`BlobDownloadRequest`] +#[derive(Debug, Clone, Serialize, Deserialize, derive_more::From, derive_more::Into)] +pub struct DownloadResponse(pub DownloadProgress); + +/// A request to the node to download and share the data specified by the hash. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ExportRequest { + /// The hash of the blob to export. + pub hash: Hash, + /// The filepath to where the data should be saved + /// + /// This should be an absolute path valid for the file system on which + /// the node runs. + pub path: PathBuf, + /// Set to [`ExportFormat::Collection`] if the `hash` refers to a [`Collection`] and you want + /// to export all children of the collection into individual files. + pub format: ExportFormat, + /// The mode of exporting. + /// + /// The default is [`ExportMode::Copy`]. See [`ExportMode`] for details. + pub mode: ExportMode, +} + +/// Progress response for [`ExportRequest`] +#[derive(Debug, Clone, Serialize, Deserialize, derive_more::From, derive_more::Into)] +pub struct ExportResponse(pub ExportProgress); + +/// A request to the node to validate the integrity of all provided data +#[derive(Debug, Serialize, Deserialize)] +pub struct ConsistencyCheckRequest { + /// repair the store by dropping inconsistent blobs + pub repair: bool, +} + +/// A request to the node to validate the integrity of all provided data +#[derive(Debug, Serialize, Deserialize)] +pub struct ValidateRequest { + /// repair the store by downgrading blobs from complete to partial + pub repair: bool, +} + +/// List all blobs, including collections +#[derive(Debug, Serialize, Deserialize)] +pub struct ListRequest; + +/// List all blobs, including collections +#[derive(Debug, Serialize, Deserialize)] +pub struct ListIncompleteRequest; + +/// Get the bytes for a hash +#[derive(Serialize, Deserialize, Debug)] +pub struct ReadAtRequest { + /// Hash to get bytes for + pub hash: Hash, + /// Offset to start reading at + pub offset: u64, + /// Length of the data to get + pub len: ReadAtLen, +} + +/// Response to [`ReadAtRequest`] +#[derive(Serialize, Deserialize, Debug)] +pub enum ReadAtResponse { + /// The entry header. + Entry { + /// The size of the blob + size: BaoBlobSize, + /// Whether the blob is complete + is_complete: bool, + }, + /// Chunks of entry data. + Data { + /// The data chunk + chunk: Bytes, + }, +} + +/// Write a blob from a byte stream +#[derive(Serialize, Deserialize, Debug)] +pub struct AddStreamRequest { + /// Tag to tag the data with. + pub tag: SetTagOption, +} + +/// Write a blob from a byte stream +#[derive(Serialize, Deserialize, Debug)] +pub enum AddStreamUpdate { + /// A chunk of stream data + Chunk(Bytes), + /// Abort the request due to an error on the client side + Abort, +} + +/// Wrapper around [`AddProgress`]. +#[derive(Debug, Serialize, Deserialize, derive_more::Into)] +pub struct AddStreamResponse(pub AddProgress); + +/// Delete a blob +#[derive(Debug, Serialize, Deserialize)] +pub struct DeleteRequest { + /// Name of the tag + pub hash: Hash, +} + +/// Create a collection. +#[derive(Debug, Serialize, Deserialize)] +pub struct CreateCollectionRequest { + /// The collection + pub collection: Collection, + /// Tag option. + pub tag: SetTagOption, + /// Tags that should be deleted after creation. + pub tags_to_delete: Vec, +} + +/// A response to a create collection request +#[derive(Debug, Serialize, Deserialize)] +pub struct CreateCollectionResponse { + /// The resulting hash. + pub hash: Hash, + /// The resulting tag. + pub tag: Tag, +} + +/// Request to get the status of a blob +#[derive(Debug, Serialize, Deserialize)] +pub struct BlobStatusRequest { + /// The hash of the blob + pub hash: Hash, +} + +/// The response to a status request +#[derive(Debug, Serialize, Deserialize, derive_more::From, derive_more::Into)] +pub struct BlobStatusResponse(pub BlobStatus); + +/// Request to create a new scope for temp tags +#[derive(Debug, Serialize, Deserialize)] +pub struct BatchCreateRequest; + +/// Update to a temp tag scope +#[derive(Debug, Serialize, Deserialize)] +pub enum BatchUpdate { + /// Drop of a remote temp tag + Drop(HashAndFormat), + /// Message to check that the connection is still alive + Ping, +} + +/// Response to a temp tag scope request +#[derive(Debug, Serialize, Deserialize)] +pub enum BatchCreateResponse { + /// We got the id of the scope + Id(BatchId), +} + +/// Create a temp tag with a given hash and format +#[derive(Debug, Serialize, Deserialize)] +pub struct BatchCreateTempTagRequest { + /// Content to protect + pub content: HashAndFormat, + /// Batch to create the temp tag in + pub batch: BatchId, +} + +/// Write a blob from a byte stream +#[derive(Serialize, Deserialize, Debug)] +pub struct BatchAddStreamRequest { + /// What format to use for the blob + pub format: BlobFormat, + /// Batch to create the temp tag in + pub batch: BatchId, +} + +/// Write a blob from a byte stream +#[derive(Serialize, Deserialize, Debug)] +pub enum BatchAddStreamUpdate { + /// A chunk of stream data + Chunk(Bytes), + /// Abort the request due to an error on the client side + Abort, +} + +/// Wrapper around [`AddProgress`]. +#[allow(missing_docs)] +#[derive(Debug, Serialize, Deserialize)] +pub enum BatchAddStreamResponse { + Abort(RpcError), + OutboardProgress { offset: u64 }, + Result { hash: Hash }, +} + +/// Write a blob from a byte stream +#[derive(Serialize, Deserialize, Debug)] +pub struct BatchAddPathRequest { + /// The path to the data to provide. + pub path: PathBuf, + /// Add the data in place + pub import_mode: ImportMode, + /// What format to use for the blob + pub format: BlobFormat, + /// Batch to create the temp tag in + pub batch: BatchId, +} + +/// Response to a batch add path request +#[derive(Serialize, Deserialize, Debug)] +pub struct BatchAddPathResponse(pub BatchAddPathProgress); diff --git a/src/rpc/proto/tags.rs b/src/rpc/proto/tags.rs new file mode 100644 index 000000000..71bf29f7b --- /dev/null +++ b/src/rpc/proto/tags.rs @@ -0,0 +1,110 @@ +//! Tags RPC protocol +use nested_enum_utils::enum_conversions; +use quic_rpc_derive::rpc_requests; +use serde::{Deserialize, Serialize}; + +use super::{RpcResult, RpcService}; +use crate::rpc::client::tags::TagInfo; +use crate::{net_protocol::BatchId, HashAndFormat, Tag}; + +#[allow(missing_docs)] +#[derive(strum::Display, Debug, Serialize, Deserialize)] +#[enum_conversions(super::Request)] +#[rpc_requests(RpcService)] +pub enum Request { + #[rpc(response = RpcResult)] + Create(CreateRequest), + #[rpc(response = RpcResult<()>)] + Set(SetRequest), + #[rpc(response = RpcResult<()>)] + DeleteTag(DeleteRequest), + #[server_streaming(response = TagInfo)] + ListTags(ListRequest), +} + +#[allow(missing_docs)] +#[derive(strum::Display, Debug, Serialize, Deserialize)] +#[enum_conversions(super::Response)] +pub enum Response { + Create(RpcResult), + ListTags(TagInfo), + DeleteTag(RpcResult<()>), +} + +/// Determine how to sync the db after a modification operation +#[derive(Debug, Serialize, Deserialize, Default)] +pub enum SyncMode { + /// Fully sync the db + #[default] + Full, + /// Do not sync the db + None, +} + +/// Create a tag +#[derive(Debug, Serialize, Deserialize)] +pub struct CreateRequest { + /// Value of the tag + pub value: HashAndFormat, + /// Batch to use, none for global + pub batch: Option, + /// Sync mode + pub sync: SyncMode, +} + +/// Set or delete a tag +#[derive(Debug, Serialize, Deserialize)] +pub struct SetRequest { + /// Name of the tag + pub name: Tag, + /// Value of the tag, None to delete + pub value: Option, + /// Batch to use, none for global + pub batch: Option, + /// Sync mode + pub sync: SyncMode, +} + +/// List all collections +/// +/// Lists all collections that have been explicitly added to the database. +#[derive(Debug, Serialize, Deserialize)] +pub struct ListRequest { + /// List raw tags + pub raw: bool, + /// List hash seq tags + pub hash_seq: bool, +} + +impl ListRequest { + /// List all tags + pub fn all() -> Self { + Self { + raw: true, + hash_seq: true, + } + } + + /// List raw tags + pub fn raw() -> Self { + Self { + raw: true, + hash_seq: false, + } + } + + /// List hash seq tags + pub fn hash_seq() -> Self { + Self { + raw: false, + hash_seq: true, + } + } +} + +/// Delete a tag +#[derive(Debug, Serialize, Deserialize)] +pub struct DeleteRequest { + /// Name of the tag + pub name: Tag, +} diff --git a/src/util.rs b/src/util.rs index 0fb2b1b1f..735a9feb1 100644 --- a/src/util.rs +++ b/src/util.rs @@ -15,6 +15,7 @@ use serde::{Deserialize, Serialize}; use crate::{BlobFormat, Hash, HashAndFormat, IROH_BLOCK_SIZE}; +pub mod fs; pub mod io; mod mem_or_file; pub mod progress; diff --git a/src/util/fs.rs b/src/util/fs.rs new file mode 100644 index 000000000..71dc4caff --- /dev/null +++ b/src/util/fs.rs @@ -0,0 +1,435 @@ +//! Utilities for filesystem operations. +use std::{ + borrow::Cow, + fs::read_dir, + path::{Component, Path, PathBuf}, +}; + +use anyhow::{bail, Context}; +use bytes::Bytes; +use iroh_net::key::SecretKey; +use tokio::io::AsyncWriteExt; +use walkdir::WalkDir; + +use crate::rpc::client::blobs::WrapOption; + +/// A data source +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)] +pub struct DataSource { + /// Custom name + name: String, + /// Path to the file + path: PathBuf, +} + +impl DataSource { + /// Creates a new [`DataSource`] from a [`PathBuf`]. + pub fn new(path: PathBuf) -> Self { + let name = path + .file_name() + .map(|s| s.to_string_lossy().to_string()) + .unwrap_or_default(); + DataSource { path, name } + } + /// Creates a new [`DataSource`] from a [`PathBuf`] and a custom name. + pub fn with_name(path: PathBuf, name: String) -> Self { + DataSource { path, name } + } + + /// Returns blob name for this data source. + /// + /// If no name was provided when created it is derived from the path name. + pub fn name(&self) -> Cow<'_, str> { + Cow::Borrowed(&self.name) + } + + /// Returns the path of this data source. + pub fn path(&self) -> &Path { + &self.path + } +} + +impl From for DataSource { + fn from(value: PathBuf) -> Self { + DataSource::new(value) + } +} + +impl From<&std::path::Path> for DataSource { + fn from(value: &std::path::Path) -> Self { + DataSource::new(value.to_path_buf()) + } +} + +/// Create data sources from a path. +pub fn scan_path(path: PathBuf, wrap: WrapOption) -> anyhow::Result> { + if path.is_dir() { + scan_dir(path, wrap) + } else { + let name = match wrap { + WrapOption::NoWrap => bail!("Cannot scan a file without wrapping"), + WrapOption::Wrap { name: None } => file_name(&path)?, + WrapOption::Wrap { name: Some(name) } => name, + }; + Ok(vec![DataSource { name, path }]) + } +} + +fn file_name(path: &Path) -> anyhow::Result { + relative_canonicalized_path_to_string(path.file_name().context("path is invalid")?) +} + +/// Create data sources from a directory. +pub fn scan_dir(root: PathBuf, wrap: WrapOption) -> anyhow::Result> { + if !root.is_dir() { + bail!("Expected {} to be a file", root.to_string_lossy()); + } + let prefix = match wrap { + WrapOption::NoWrap => None, + WrapOption::Wrap { name: None } => Some(file_name(&root)?), + WrapOption::Wrap { name: Some(name) } => Some(name), + }; + let files = WalkDir::new(&root).into_iter(); + let data_sources = files + .map(|entry| { + let entry = entry?; + if !entry.file_type().is_file() { + // Skip symlinks. Directories are handled by WalkDir. + return Ok(None); + } + let path = entry.into_path(); + let mut name = relative_canonicalized_path_to_string(path.strip_prefix(&root)?)?; + if let Some(prefix) = &prefix { + name = format!("{prefix}/{name}"); + } + anyhow::Ok(Some(DataSource { name, path })) + }) + .filter_map(Result::transpose); + let data_sources: Vec> = data_sources.collect::>(); + data_sources.into_iter().collect::>>() +} + +/// This function converts a canonicalized relative path to a string, returning +/// an error if the path is not valid unicode. +/// +/// This function will also fail if the path is non canonical, i.e. contains +/// `..` or `.`, or if the path components contain any windows or unix path +/// separators. +pub fn relative_canonicalized_path_to_string(path: impl AsRef) -> anyhow::Result { + canonicalized_path_to_string(path, true) +} + +/// Loads a [`SecretKey`] from the provided file, or stores a newly generated one +/// at the given location. +pub async fn load_secret_key(key_path: PathBuf) -> anyhow::Result { + if key_path.exists() { + let keystr = tokio::fs::read(key_path).await?; + let secret_key = SecretKey::try_from_openssh(keystr).context("invalid keyfile")?; + Ok(secret_key) + } else { + let secret_key = SecretKey::generate(); + let ser_key = secret_key.to_openssh()?; + + // Try to canonicalize if possible + let key_path = key_path.canonicalize().unwrap_or(key_path); + let key_path_parent = key_path.parent().ok_or_else(|| { + anyhow::anyhow!("no parent directory found for '{}'", key_path.display()) + })?; + tokio::fs::create_dir_all(&key_path_parent).await?; + + // write to tempfile + let (file, temp_file_path) = tempfile::NamedTempFile::new_in(key_path_parent) + .context("unable to create tempfile")? + .into_parts(); + let mut file = tokio::fs::File::from_std(file); + file.write_all(ser_key.as_bytes()) + .await + .context("unable to write keyfile")?; + file.flush().await?; + drop(file); + + // move file + tokio::fs::rename(temp_file_path, key_path) + .await + .context("failed to rename keyfile")?; + + Ok(secret_key) + } +} + +/// Information about the content on a path +#[derive(Debug, Clone)] +pub struct PathContent { + /// total size of all the files in the directory + pub size: u64, + /// total number of files in the directory + pub files: u64, +} + +/// Walks the directory to get the total size and number of files in directory or file +/// +// TODO: possible combine with `scan_dir` +pub fn path_content_info(path: impl AsRef) -> anyhow::Result { + path_content_info0(path) +} + +fn path_content_info0(path: impl AsRef) -> anyhow::Result { + let mut files = 0; + let mut size = 0; + let path = path.as_ref(); + + if path.is_dir() { + for entry in read_dir(path)? { + let path0 = entry?.path(); + + match path_content_info0(path0) { + Ok(path_content) => { + size += path_content.size; + files += path_content.files; + } + Err(e) => bail!(e), + } + } + } else { + match path.try_exists() { + Ok(true) => { + size = path + .metadata() + .context(format!("Error reading metadata for {path:?}"))? + .len(); + files = 1; + } + Ok(false) => { + tracing::warn!("Not including broking symlink at {path:?}"); + } + Err(e) => { + bail!(e); + } + } + } + Ok(PathContent { size, files }) +} + +/// Helper function that translates a key that was derived from the [`path_to_key`] function back +/// into a path. +/// +/// If `prefix` exists, it will be stripped before converting back to a path +/// If `root` exists, will add the root as a parent to the created path +/// Removes any null byte that has been appended to the key +pub fn key_to_path( + key: impl AsRef<[u8]>, + prefix: Option, + root: Option, +) -> anyhow::Result { + let mut key = key.as_ref(); + if key.is_empty() { + return Ok(PathBuf::new()); + } + // if the last element is the null byte, remove it + if b'\0' == key[key.len() - 1] { + key = &key[..key.len() - 1] + } + + let key = if let Some(prefix) = prefix { + let prefix = prefix.into_bytes(); + if prefix[..] == key[..prefix.len()] { + &key[prefix.len()..] + } else { + anyhow::bail!("key {:?} does not begin with prefix {:?}", key, prefix); + } + } else { + key + }; + + let mut path = if key[0] == b'/' { + PathBuf::from("/") + } else { + PathBuf::new() + }; + for component in key + .split(|c| c == &b'/') + .map(|c| String::from_utf8(c.into()).context("key contains invalid data")) + { + let component = component?; + path = path.join(component); + } + + // add root if it exists + let path = if let Some(root) = root { + root.join(path) + } else { + path + }; + + Ok(path) +} + +/// Helper function that creates a document key from a canonicalized path, removing the `root` and adding the `prefix`, if they exist +/// +/// Appends the null byte to the end of the key. +pub fn path_to_key( + path: impl AsRef, + prefix: Option, + root: Option, +) -> anyhow::Result { + let path = path.as_ref(); + let path = if let Some(root) = root { + path.strip_prefix(root)? + } else { + path + }; + let suffix = canonicalized_path_to_string(path, false)?.into_bytes(); + let mut key = if let Some(prefix) = prefix { + prefix.into_bytes().to_vec() + } else { + Vec::new() + }; + key.extend(suffix); + key.push(b'\0'); + Ok(key.into()) +} + +/// This function converts an already canonicalized path to a string. +/// +/// If `must_be_relative` is true, the function will fail if any component of the path is +/// `Component::RootDir` +/// +/// This function will also fail if the path is non canonical, i.e. contains +/// `..` or `.`, or if the path components contain any windows or unix path +/// separators. +pub fn canonicalized_path_to_string( + path: impl AsRef, + must_be_relative: bool, +) -> anyhow::Result { + let mut path_str = String::new(); + let parts = path + .as_ref() + .components() + .filter_map(|c| match c { + Component::Normal(x) => { + let c = match x.to_str() { + Some(c) => c, + None => return Some(Err(anyhow::anyhow!("invalid character in path"))), + }; + + if !c.contains('/') && !c.contains('\\') { + Some(Ok(c)) + } else { + Some(Err(anyhow::anyhow!("invalid path component {:?}", c))) + } + } + Component::RootDir => { + if must_be_relative { + Some(Err(anyhow::anyhow!("invalid path component {:?}", c))) + } else { + path_str.push('/'); + None + } + } + _ => Some(Err(anyhow::anyhow!("invalid path component {:?}", c))), + }) + .collect::>>()?; + let parts = parts.join("/"); + path_str.push_str(&parts); + Ok(path_str) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_path_to_key_roundtrip() { + let path = PathBuf::from("/foo/bar"); + let expect_path = PathBuf::from("/foo/bar"); + let key = b"/foo/bar\0"; + let expect_key = Bytes::from(&key[..]); + + let got_key = path_to_key(path.clone(), None, None).unwrap(); + let got_path = key_to_path(got_key.clone(), None, None).unwrap(); + + assert_eq!(expect_key, got_key); + assert_eq!(expect_path, got_path); + + // including prefix + let prefix = String::from("prefix:"); + let key = b"prefix:/foo/bar\0"; + let expect_key = Bytes::from(&key[..]); + let got_key = path_to_key(path.clone(), Some(prefix.clone()), None).unwrap(); + assert_eq!(expect_key, got_key); + let got_path = key_to_path(got_key, Some(prefix.clone()), None).unwrap(); + assert_eq!(expect_path, got_path); + + // including root + let root = PathBuf::from("/foo"); + let key = b"prefix:bar\0"; + let expect_key = Bytes::from(&key[..]); + let got_key = path_to_key(path, Some(prefix.clone()), Some(root.clone())).unwrap(); + assert_eq!(expect_key, got_key); + let got_path = key_to_path(got_key, Some(prefix), Some(root)).unwrap(); + assert_eq!(expect_path, got_path); + } + + #[test] + fn test_canonicalized_path_to_string() { + assert_eq!( + canonicalized_path_to_string("foo/bar", true).unwrap(), + "foo/bar" + ); + assert_eq!(canonicalized_path_to_string("", true).unwrap(), ""); + assert_eq!( + canonicalized_path_to_string("foo bar/baz/bat", true).unwrap(), + "foo bar/baz/bat" + ); + assert_eq!( + canonicalized_path_to_string("/foo/bar", true).map_err(|e| e.to_string()), + Err("invalid path component RootDir".to_string()) + ); + + assert_eq!( + canonicalized_path_to_string("/foo/bar", false).unwrap(), + "/foo/bar" + ); + let path = PathBuf::from("/").join("Ü").join("⁰€™■・�").join("東京"); + assert_eq!( + canonicalized_path_to_string(path, false).unwrap(), + "/Ü/⁰€™■・�/東京" + ) + } + + #[test] + fn test_get_path_content() { + let dir = testdir::testdir!(); + let PathContent { size, files } = path_content_info(&dir).unwrap(); + assert_eq!(0, size); + assert_eq!(0, files); + let foo = b"hello_world"; + let bar = b"ipsum lorem"; + let bat = b"happy birthday"; + let expect_size = foo.len() + bar.len() + bat.len(); + std::fs::write(dir.join("foo.txt"), foo).unwrap(); + std::fs::write(dir.join("bar.txt"), bar).unwrap(); + std::fs::write(dir.join("bat.txt"), bat).unwrap(); + let PathContent { size, files } = path_content_info(&dir).unwrap(); + assert_eq!(expect_size as u64, size); + assert_eq!(3, files); + + // create nested empty dirs + std::fs::create_dir(dir.join("1")).unwrap(); + std::fs::create_dir(dir.join("2")).unwrap(); + let dir3 = dir.join("3"); + std::fs::create_dir(&dir3).unwrap(); + + // create a nested dir w/ content + let dir4 = dir3.join("4"); + std::fs::create_dir(&dir4).unwrap(); + std::fs::write(dir4.join("foo.txt"), foo).unwrap(); + std::fs::write(dir4.join("bar.txt"), bar).unwrap(); + std::fs::write(dir4.join("bat.txt"), bat).unwrap(); + + let expect_size = expect_size * 2; + let PathContent { size, files } = path_content_info(&dir).unwrap(); + assert_eq!(expect_size as u64, size); + assert_eq!(6, files); + } +} From 325042fb775cffe2138666c7ebbde71f21387931 Mon Sep 17 00:00:00 2001 From: Ruediger Klaehn Date: Wed, 30 Oct 2024 16:16:02 +0200 Subject: [PATCH 02/19] Avoid portable-atomic conflict --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 9d917bf5f..ea65a2da0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,7 +35,7 @@ num_cpus = "1.15.0" oneshot = "0.1.8" parking_lot = { version = "0.12.1", optional = true } pin-project = "1.1.5" -portable-atomic = { version = "1.9.0", optional = true } +portable-atomic = { version = "1", optional = true } postcard = { version = "1", default-features = false, features = ["alloc", "use-std", "experimental-derive"] } quic-rpc = { version = "0.13.0", optional = true } quic-rpc-derive = { version = "0.13.0", optional = true } From b3d0c58e80f15dbe1bb148793efe3fd164d91054 Mon Sep 17 00:00:00 2001 From: Ruediger Klaehn Date: Thu, 31 Oct 2024 12:21:47 +0200 Subject: [PATCH 03/19] Add rpc handler --- src/net_protocol.rs | 11 + src/rpc.rs | 858 ++++++++++++++++++++++++++++++++++++++++ src/rpc/client/blobs.rs | 21 +- src/rpc/client/tags.rs | 8 +- src/rpc/proto.rs | 6 +- 5 files changed, 891 insertions(+), 13 deletions(-) diff --git a/src/net_protocol.rs b/src/net_protocol.rs index d1de1da65..917dc2f07 100644 --- a/src/net_protocol.rs +++ b/src/net_protocol.rs @@ -73,6 +73,7 @@ pub struct Blobs { events: EventSender, downloader: Downloader, batches: tokio::sync::Mutex, + endpoint: Endpoint, } /// Name used for logging when new node addresses are added from gossip. @@ -135,12 +136,14 @@ impl Blobs { rt: LocalPoolHandle, events: EventSender, downloader: Downloader, + endpoint: Endpoint, ) -> Self { Self { rt, store, events, downloader, + endpoint, batches: Default::default(), } } @@ -149,6 +152,14 @@ impl Blobs { &self.store } + pub(crate) fn rt(&self) -> LocalPoolHandle { + self.rt.clone() + } + + pub(crate) fn endpoint(&self) -> &Endpoint { + &self.endpoint + } + pub async fn batches(&self) -> tokio::sync::MutexGuard<'_, BlobBatches> { self.batches.lock().await } diff --git a/src/rpc.rs b/src/rpc.rs index 17bcf4730..70cafe780 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -1,3 +1,861 @@ //! Provides a rpc protocol as well as a client for the protocol + +use std::{ + io, + sync::{Arc, Mutex}, +}; + +use anyhow::anyhow; +use client::{ + blobs::{BlobInfo, BlobStatus, IncompleteBlobInfo, WrapOption}, + tags::TagInfo, +}; +use futures_buffered::BufferedStreamExt; +use futures_lite::StreamExt; +use futures_util::{FutureExt, Stream}; +use genawaiter::sync::{Co, Gen}; +use iroh_base::hash::{BlobFormat, HashAndFormat}; +use iroh_io::AsyncSliceReader; +use proto::{ + blobs::{ + AddPathRequest, AddPathResponse, AddStreamRequest, AddStreamResponse, AddStreamUpdate, BatchAddPathRequest, BatchAddPathResponse, BatchAddStreamRequest, BatchAddStreamResponse, BatchAddStreamUpdate, BatchCreateRequest, BatchCreateResponse, BatchCreateTempTagRequest, BatchUpdate, BlobStatusRequest, BlobStatusResponse, ConsistencyCheckRequest, CreateCollectionRequest, CreateCollectionResponse, DeleteRequest, DownloadResponse, ExportRequest, ExportResponse, ListIncompleteRequest, ListRequest, ReadAtRequest, ReadAtResponse, ValidateRequest + }, + tags::SyncMode, + RpcError, RpcResult, +}; +use quic_rpc::server::{RpcChannel, RpcServerError}; + +use crate::{ + export::ExportProgress, + format::collection::Collection, + get::db::DownloadProgress, + net_protocol::{BlobDownloadRequest, Blobs}, + provider::{AddProgress, BatchAddPathProgress}, + store::{ConsistencyCheckProgress, ImportProgress, MapEntry, ValidateProgress}, + util::{ + progress::{AsyncChannelProgressSender, ProgressSender}, + SetTagOption, + }, + Tag, +}; +use proto::tags::{ + CreateRequest as TagsCreateRequest, DeleteRequest as TagDeleteRequest, + ListRequest as TagListRequest, SetRequest as TagsSetRequest, +}; pub mod client; pub mod proto; + +/// Chunk size for getting blobs over RPC +const RPC_BLOB_GET_CHUNK_SIZE: usize = 1024 * 64; +/// Channel cap for getting blobs over RPC +const RPC_BLOB_GET_CHANNEL_CAP: usize = 2; + +impl Blobs { + + /// Handle an RPC request + pub async fn handle_rpc_request(self: Arc, + msg: crate::rpc::proto::Request, + chan: RpcChannel + ) -> std::result::Result<(), RpcServerError> + where + S: quic_rpc::Service, + C: quic_rpc::ServiceEndpoint, + { + use crate::rpc::proto::Request::*; + match msg { + Blobs(msg) => self.handle_blobs_request(msg, chan).await, + Tags(msg) => self.handle_tags_request(msg, chan).await, + } + } + + /// Handle a tags request + pub async fn handle_tags_request( + self: Arc, + msg: proto::tags::Request, + chan: RpcChannel + ) -> std::result::Result<(), RpcServerError> + where + S: quic_rpc::Service, + C: quic_rpc::ServiceEndpoint, + { + use proto::tags::Request::*; + match msg { + Create(msg) => chan.rpc(msg, self, Self::tags_create).await, + Set(msg) => chan.rpc(msg, self, Self::tags_set).await, + DeleteTag(msg) => chan.rpc(msg, self, Self::blob_delete_tag).await, + ListTags(msg) => chan.server_streaming(msg, self, Self::blob_list_tags).await, + } + } + + /// Handle a blobs request + pub async fn handle_blobs_request( + self: Arc, + msg: proto::blobs::Request, + chan: RpcChannel + ) -> std::result::Result<(), RpcServerError> + where + Sv: quic_rpc::Service, + C: quic_rpc::ServiceEndpoint, + { + use proto::blobs::Request::*; + match msg { + List(msg) => chan.server_streaming(msg, self, Self::blob_list).await, + ListIncomplete(msg) => { + chan.server_streaming(msg, self, Self::blob_list_incomplete) + .await + } + CreateCollection(msg) => chan.rpc(msg, self, Self::create_collection).await, + Delete(msg) => chan.rpc(msg, self, Self::blob_delete_blob).await, + AddPath(msg) => { + chan.server_streaming(msg, self, Self::blob_add_from_path) + .await + } + Download(msg) => chan.server_streaming(msg, self, Self::blob_download).await, + Export(msg) => chan.server_streaming(msg, self, Self::blob_export).await, + Validate(msg) => chan.server_streaming(msg, self, Self::blob_validate).await, + Fsck(msg) => { + chan.server_streaming(msg, self, Self::blob_consistency_check) + .await + } + ReadAt(msg) => chan.server_streaming(msg, self, Self::blob_read_at).await, + AddStream(msg) => chan.bidi_streaming(msg, self, Self::blob_add_stream).await, + AddStreamUpdate(_msg) => Err(RpcServerError::UnexpectedUpdateMessage), + BlobStatus(msg) => chan.rpc(msg, self, Self::blob_status).await, + BatchCreate(msg) => chan.bidi_streaming(msg, self, Self::batch_create).await, + BatchUpdate(_) => Err(RpcServerError::UnexpectedStartMessage), + BatchAddStream(msg) => chan.bidi_streaming(msg, self, Self::batch_add_stream).await, + BatchAddStreamUpdate(_) => Err(RpcServerError::UnexpectedStartMessage), + BatchAddPath(msg) => { + chan.server_streaming(msg, self, Self::batch_add_from_path) + .await + } + BatchCreateTempTag(msg) => chan.rpc(msg, self, Self::batch_create_temp_tag).await, + } + } + + async fn blob_status(self: Arc, msg: BlobStatusRequest) -> RpcResult { + let blobs = self; + let entry = blobs + .store() + .get(&msg.hash) + .await + .map_err(|e| RpcError::new(&e))?; + Ok(BlobStatusResponse(match entry { + Some(entry) => { + if entry.is_complete() { + BlobStatus::Complete { + size: entry.size().value(), + } + } else { + BlobStatus::Partial { size: entry.size() } + } + } + None => BlobStatus::NotFound, + })) + } + + async fn blob_list_impl(self: Arc, co: &Co>) -> io::Result<()> { + use bao_tree::io::fsm::Outboard; + + let blobs = self; + let db = blobs.store(); + for blob in db.blobs().await? { + let blob = blob?; + let Some(entry) = db.get(&blob).await? else { + continue; + }; + let hash = entry.hash(); + let size = entry.outboard().await?.tree().size(); + let path = "".to_owned(); + co.yield_(Ok(BlobInfo { hash, size, path })).await; + } + Ok(()) + } + + async fn blob_list_incomplete_impl( + self: Arc, + co: &Co>, + ) -> io::Result<()> { + let blobs = self; + let db = blobs.store(); + for hash in db.partial_blobs().await? { + let hash = hash?; + let Ok(Some(entry)) = db.get_mut(&hash).await else { + continue; + }; + if entry.is_complete() { + continue; + } + let size = 0; + let expected_size = entry.size().value(); + co.yield_(Ok(IncompleteBlobInfo { + hash, + size, + expected_size, + })) + .await; + } + Ok(()) + } + + fn blob_list( + self: Arc, + _msg: ListRequest, + ) -> impl Stream> + Send + 'static { + Gen::new(|co| async move { + if let Err(e) = self.blob_list_impl(&co).await { + co.yield_(Err(RpcError::new(&e))).await; + } + }) + } + + fn blob_list_incomplete( + self: Arc, + _msg: ListIncompleteRequest, + ) -> impl Stream> + Send + 'static { + Gen::new(move |co| async move { + if let Err(e) = self.blob_list_incomplete_impl(&co).await { + co.yield_(Err(RpcError::new(&e))).await; + } + }) + } + + async fn blob_delete_tag(self: Arc, msg: TagDeleteRequest) -> RpcResult<()> { + self.store() + .set_tag(msg.name, None) + .await + .map_err(|e| RpcError::new(&e))?; + Ok(()) + } + + async fn blob_delete_blob(self: Arc, msg: DeleteRequest) -> RpcResult<()> { + self.store() + .delete(vec![msg.hash]) + .await + .map_err(|e| RpcError::new(&e))?; + Ok(()) + } + + fn blob_list_tags(self: Arc, msg: TagListRequest) -> impl Stream + Send + 'static { + tracing::info!("blob_list_tags"); + let blobs = self; + Gen::new(|co| async move { + let tags = blobs.store().tags().await.unwrap(); + #[allow(clippy::manual_flatten)] + for item in tags { + if let Ok((name, HashAndFormat { hash, format })) = item { + if (format.is_raw() && msg.raw) || (format.is_hash_seq() && msg.hash_seq) { + co.yield_(TagInfo { name, hash, format }).await; + } + } + } + }) + } + + /// Invoke validate on the database and stream out the result + fn blob_validate( + self: Arc, + msg: ValidateRequest, + ) -> impl Stream + Send + 'static { + let (tx, rx) = async_channel::bounded(1); + let tx2 = tx.clone(); + let blobs = self; + tokio::task::spawn(async move { + if let Err(e) = blobs + .store() + .validate(msg.repair, AsyncChannelProgressSender::new(tx).boxed()) + .await + { + tx2.send(ValidateProgress::Abort(RpcError::new(&e))) + .await + .ok(); + } + }); + rx + } + + /// Invoke validate on the database and stream out the result + fn blob_consistency_check( + self: Arc, + msg: ConsistencyCheckRequest, + ) -> impl Stream + Send + 'static { + let (tx, rx) = async_channel::bounded(1); + let tx2 = tx.clone(); + let blobs = self; + tokio::task::spawn(async move { + if let Err(e) = blobs + .store() + .consistency_check(msg.repair, AsyncChannelProgressSender::new(tx).boxed()) + .await + { + tx2.send(ConsistencyCheckProgress::Abort(RpcError::new(&e))) + .await + .ok(); + } + }); + rx + } + + fn blob_add_from_path( + self: Arc, + msg: AddPathRequest, + ) -> impl Stream { + // provide a little buffer so that we don't slow down the sender + let (tx, rx) = async_channel::bounded(32); + let tx2 = tx.clone(); + self.rt().spawn_detached(|| async move { + if let Err(e) = self.blob_add_from_path0(msg, tx).await { + tx2.send(AddProgress::Abort(RpcError::new(&*e))).await.ok(); + } + }); + rx.map(AddPathResponse) + } + + async fn tags_set(self: Arc, msg: TagsSetRequest) -> RpcResult<()> { + let blobs = self; + blobs + .store() + .set_tag(msg.name, msg.value) + .await + .map_err(|e| RpcError::new(&e))?; + if let SyncMode::Full = msg.sync { + blobs.store().sync().await.map_err(|e| RpcError::new(&e))?; + } + if let Some(batch) = msg.batch { + if let Some(content) = msg.value.as_ref() { + blobs + .batches() + .await + .remove_one(batch, content) + .map_err(|e| RpcError::new(&*e))?; + } + } + Ok(()) + } + + async fn tags_create(self: Arc, msg: TagsCreateRequest) -> RpcResult { + let blobs = self; + let tag = blobs + .store() + .create_tag(msg.value) + .await + .map_err(|e| RpcError::new(&e))?; + if let SyncMode::Full = msg.sync { + blobs.store().sync().await.map_err(|e| RpcError::new(&e))?; + } + if let Some(batch) = msg.batch { + blobs + .batches() + .await + .remove_one(batch, &msg.value) + .map_err(|e| RpcError::new(&*e))?; + } + Ok(tag) + } + + fn blob_download(self: Arc, msg: BlobDownloadRequest) -> impl Stream { + let (sender, receiver) = async_channel::bounded(1024); + let endpoint = self.endpoint().clone(); + let progress = AsyncChannelProgressSender::new(sender); + + let blobs_protocol = self.clone(); + + self.rt().spawn_detached(move || async move { + if let Err(err) = blobs_protocol + .download(endpoint, msg, progress.clone()) + .await + { + progress + .send(DownloadProgress::Abort(RpcError::new(&*err))) + .await + .ok(); + } + }); + + receiver.map(DownloadResponse) + } + + fn blob_export(self: Arc, msg: ExportRequest) -> impl Stream { + let (tx, rx) = async_channel::bounded(1024); + let progress = AsyncChannelProgressSender::new(tx); + self.rt().spawn_detached(move || async move { + let res = crate::export::export( + self.store(), + msg.hash, + msg.path, + msg.format, + msg.mode, + progress.clone(), + ) + .await; + match res { + Ok(()) => progress.send(ExportProgress::AllDone).await.ok(), + Err(err) => progress + .send(ExportProgress::Abort(RpcError::new(&*err))) + .await + .ok(), + }; + }); + rx.map(ExportResponse) + } + + async fn blob_add_from_path0( + self: Arc, + msg: AddPathRequest, + progress: async_channel::Sender, + ) -> anyhow::Result<()> { + use std::collections::BTreeMap; + + use crate::store::ImportMode; + + let blobs = self.clone(); + let progress = AsyncChannelProgressSender::new(progress); + let names = Arc::new(Mutex::new(BTreeMap::new())); + // convert import progress to provide progress + let import_progress = progress.clone().with_filter_map(move |x| match x { + ImportProgress::Found { id, name } => { + names.lock().unwrap().insert(id, name); + None + } + ImportProgress::Size { id, size } => { + let name = names.lock().unwrap().remove(&id)?; + Some(AddProgress::Found { id, name, size }) + } + ImportProgress::OutboardProgress { id, offset } => { + Some(AddProgress::Progress { id, offset }) + } + ImportProgress::OutboardDone { hash, id } => Some(AddProgress::Done { hash, id }), + _ => None, + }); + let AddPathRequest { + wrap, + path: root, + in_place, + tag, + } = msg; + // Check that the path is absolute and exists. + anyhow::ensure!(root.is_absolute(), "path must be absolute"); + anyhow::ensure!( + root.exists(), + "trying to add missing path: {}", + root.display() + ); + + let import_mode = match in_place { + true => ImportMode::TryReference, + false => ImportMode::Copy, + }; + + let create_collection = match wrap { + WrapOption::Wrap { .. } => true, + WrapOption::NoWrap => root.is_dir(), + }; + + let temp_tag = if create_collection { + // import all files below root recursively + let data_sources = crate::util::fs::scan_path(root, wrap)?; + let blobs = self; + + const IO_PARALLELISM: usize = 4; + let result: Vec<_> = futures_lite::stream::iter(data_sources) + .map(|source| { + let import_progress = import_progress.clone(); + let blobs = blobs.clone(); + async move { + let name = source.name().to_string(); + let (tag, size) = blobs + .store() + .import_file( + source.path().to_owned(), + import_mode, + BlobFormat::Raw, + import_progress, + ) + .await?; + let hash = *tag.hash(); + io::Result::Ok((name, hash, size, tag)) + } + }) + .buffered_ordered(IO_PARALLELISM) + .try_collect() + .await?; + + // create a collection + let (collection, _child_tags): (Collection, Vec<_>) = result + .into_iter() + .map(|(name, hash, _, tag)| ((name, hash), tag)) + .unzip(); + + collection.store(blobs.store()).await? + } else { + // import a single file + let (tag, _size) = blobs + .store() + .import_file(root, import_mode, BlobFormat::Raw, import_progress) + .await?; + tag + }; + + let hash_and_format = temp_tag.inner(); + let HashAndFormat { hash, format } = *hash_and_format; + let tag = match tag { + SetTagOption::Named(tag) => { + blobs + .store() + .set_tag(tag.clone(), Some(*hash_and_format)) + .await?; + tag + } + SetTagOption::Auto => blobs.store().create_tag(*hash_and_format).await?, + }; + progress + .send(AddProgress::AllDone { + hash, + format, + tag: tag.clone(), + }) + .await?; + Ok(()) + } + + async fn batch_create_temp_tag(self: Arc, msg: BatchCreateTempTagRequest) -> RpcResult<()> { + let blobs = self; + let tag = blobs.store().temp_tag(msg.content); + blobs.batches().await.store(msg.batch, tag); + Ok(()) + } + + fn batch_add_stream( + self: Arc, + msg: BatchAddStreamRequest, + stream: impl Stream + Send + Unpin + 'static, + ) -> impl Stream { + let (tx, rx) = async_channel::bounded(32); + let this = self.clone(); + + self.rt().spawn_detached(|| async move { + if let Err(err) = this.batch_add_stream0(msg, stream, tx.clone()).await { + tx.send(BatchAddStreamResponse::Abort(RpcError::new(&*err))) + .await + .ok(); + } + }); + rx + } + + fn batch_add_from_path( + self: Arc, + msg: BatchAddPathRequest, + ) -> impl Stream { + // provide a little buffer so that we don't slow down the sender + let (tx, rx) = async_channel::bounded(32); + let tx2 = tx.clone(); + let this = self.clone(); + self.rt().spawn_detached(|| async move { + if let Err(e) = this.batch_add_from_path0(msg, tx).await { + tx2.send(BatchAddPathProgress::Abort(RpcError::new(&*e))) + .await + .ok(); + } + }); + rx.map(BatchAddPathResponse) + } + + async fn batch_add_stream0( + self: Arc, + msg: BatchAddStreamRequest, + stream: impl Stream + Send + Unpin + 'static, + progress: async_channel::Sender, + ) -> anyhow::Result<()> { + let blobs = self; + let progress = AsyncChannelProgressSender::new(progress); + + let stream = stream.map(|item| match item { + BatchAddStreamUpdate::Chunk(chunk) => Ok(chunk), + BatchAddStreamUpdate::Abort => { + Err(io::Error::new(io::ErrorKind::Interrupted, "Remote abort")) + } + }); + + let import_progress = progress.clone().with_filter_map(move |x| match x { + ImportProgress::OutboardProgress { offset, .. } => { + Some(BatchAddStreamResponse::OutboardProgress { offset }) + } + _ => None, + }); + let (temp_tag, _len) = blobs + .store() + .import_stream(stream, msg.format, import_progress) + .await?; + let hash = temp_tag.inner().hash; + blobs.batches().await.store(msg.batch, temp_tag); + progress + .send(BatchAddStreamResponse::Result { hash }) + .await?; + Ok(()) + } + + async fn batch_add_from_path0( + self: Arc, + msg: BatchAddPathRequest, + progress: async_channel::Sender, + ) -> anyhow::Result<()> { + let progress = AsyncChannelProgressSender::new(progress); + // convert import progress to provide progress + let import_progress = progress.clone().with_filter_map(move |x| match x { + ImportProgress::Size { size, .. } => Some(BatchAddPathProgress::Found { size }), + ImportProgress::OutboardProgress { offset, .. } => { + Some(BatchAddPathProgress::Progress { offset }) + } + ImportProgress::OutboardDone { hash, .. } => Some(BatchAddPathProgress::Done { hash }), + _ => None, + }); + let BatchAddPathRequest { + path: root, + import_mode, + format, + batch, + } = msg; + // Check that the path is absolute and exists. + anyhow::ensure!(root.is_absolute(), "path must be absolute"); + anyhow::ensure!( + root.exists(), + "trying to add missing path: {}", + root.display() + ); + let blobs = self; + let (tag, _) = blobs + .store() + .import_file(root, import_mode, format, import_progress) + .await?; + let hash = *tag.hash(); + blobs.batches().await.store(batch, tag); + + progress.send(BatchAddPathProgress::Done { hash }).await?; + Ok(()) + } + + fn blob_add_stream( + self: Arc, + msg: AddStreamRequest, + stream: impl Stream + Send + Unpin + 'static, + ) -> impl Stream { + let (tx, rx) = async_channel::bounded(32); + let this = self.clone(); + + self.rt().spawn_detached(|| async move { + if let Err(err) = this.blob_add_stream0(msg, stream, tx.clone()).await { + tx.send(AddProgress::Abort(RpcError::new(&*err))).await.ok(); + } + }); + + rx.map(AddStreamResponse) + } + + async fn blob_add_stream0( + self: Arc, + msg: AddStreamRequest, + stream: impl Stream + Send + Unpin + 'static, + progress: async_channel::Sender, + ) -> anyhow::Result<()> { + let progress = AsyncChannelProgressSender::new(progress); + + let stream = stream.map(|item| match item { + AddStreamUpdate::Chunk(chunk) => Ok(chunk), + AddStreamUpdate::Abort => { + Err(io::Error::new(io::ErrorKind::Interrupted, "Remote abort")) + } + }); + + let name_cache = Arc::new(Mutex::new(None)); + let import_progress = progress.clone().with_filter_map(move |x| match x { + ImportProgress::Found { id: _, name } => { + let _ = name_cache.lock().unwrap().insert(name); + None + } + ImportProgress::Size { id, size } => { + let name = name_cache.lock().unwrap().take()?; + Some(AddProgress::Found { id, name, size }) + } + ImportProgress::OutboardProgress { id, offset } => { + Some(AddProgress::Progress { id, offset }) + } + ImportProgress::OutboardDone { hash, id } => Some(AddProgress::Done { hash, id }), + _ => None, + }); + let blobs = self; + let (temp_tag, _len) = blobs + .store() + .import_stream(stream, BlobFormat::Raw, import_progress) + .await?; + let hash_and_format = *temp_tag.inner(); + let HashAndFormat { hash, format } = hash_and_format; + let tag = match msg.tag { + SetTagOption::Named(tag) => { + blobs + .store() + .set_tag(tag.clone(), Some(hash_and_format)) + .await?; + tag + } + SetTagOption::Auto => blobs.store().create_tag(hash_and_format).await?, + }; + progress + .send(AddProgress::AllDone { hash, tag, format }) + .await?; + Ok(()) + } + + fn blob_read_at( + self: Arc, + req: ReadAtRequest, + ) -> impl Stream> + Send + 'static { + let (tx, rx) = async_channel::bounded(RPC_BLOB_GET_CHANNEL_CAP); + let db = self.store().clone(); + self.rt().spawn_detached(move || async move { + if let Err(err) = read_loop(req, db, tx.clone(), RPC_BLOB_GET_CHUNK_SIZE).await { + tx.send(RpcResult::Err(RpcError::new(&*err))).await.ok(); + } + }); + + async fn read_loop( + req: ReadAtRequest, + db: D, + tx: async_channel::Sender>, + max_chunk_size: usize, + ) -> anyhow::Result<()> { + let entry = db.get(&req.hash).await?; + let entry = entry.ok_or_else(|| anyhow!("Blob not found"))?; + let size = entry.size(); + + anyhow::ensure!( + req.offset <= size.value(), + "requested offset is out of range: {} > {:?}", + req.offset, + size + ); + + let len: usize = req + .len + .as_result_len(size.value() - req.offset) + .try_into()?; + + anyhow::ensure!( + req.offset + len as u64 <= size.value(), + "requested range is out of bounds: offset: {}, len: {} > {:?}", + req.offset, + len, + size + ); + + tx.send(Ok(ReadAtResponse::Entry { + size, + is_complete: entry.is_complete(), + })) + .await?; + let mut reader = entry.data_reader().await?; + + let (num_chunks, chunk_size) = if len <= max_chunk_size { + (1, len) + } else { + let num_chunks = len / max_chunk_size + (len % max_chunk_size != 0) as usize; + (num_chunks, max_chunk_size) + }; + + let mut read = 0u64; + for i in 0..num_chunks { + let chunk_size = if i == num_chunks - 1 { + // last chunk might be smaller + len - read as usize + } else { + chunk_size + }; + let chunk = reader.read_at(req.offset + read, chunk_size).await?; + let chunk_len = chunk.len(); + if !chunk.is_empty() { + tx.send(Ok(ReadAtResponse::Data { chunk })).await?; + } + if chunk_len < chunk_size { + break; + } else { + read += chunk_len as u64; + } + } + Ok(()) + } + + rx + } + + fn batch_create( + self: Arc, + _: BatchCreateRequest, + mut updates: impl Stream + Send + Unpin + 'static, + ) -> impl Stream { + let blobs = self; + async move { + let batch = blobs.batches().await.create(); + tokio::spawn(async move { + while let Some(item) = updates.next().await { + match item { + BatchUpdate::Drop(content) => { + // this can not fail, since we keep the batch alive. + // therefore it is safe to ignore the result. + let _ = blobs.batches().await.remove_one(batch, &content); + } + BatchUpdate::Ping => {} + } + } + blobs.batches().await.remove(batch); + }); + BatchCreateResponse::Id(batch) + } + .into_stream() + } + + + async fn create_collection( + self: Arc, + req: CreateCollectionRequest, + ) -> RpcResult { + let CreateCollectionRequest { + collection, + tag, + tags_to_delete, + } = req; + + let blobs = self; + + let temp_tag = collection + .store(blobs.store()) + .await + .map_err(|e| RpcError::new(&*e))?; + let hash_and_format = temp_tag.inner(); + let HashAndFormat { hash, .. } = *hash_and_format; + let tag = match tag { + SetTagOption::Named(tag) => { + blobs + .store() + .set_tag(tag.clone(), Some(*hash_and_format)) + .await + .map_err(|e| RpcError::new(&e))?; + tag + } + SetTagOption::Auto => blobs + .store() + .create_tag(*hash_and_format) + .await + .map_err(|e| RpcError::new(&e))?, + }; + + for tag in tags_to_delete { + blobs + .store() + .set_tag(tag, None) + .await + .map_err(|e| RpcError::new(&e))?; + } + + Ok(CreateCollectionResponse { hash, tag }) + } +} diff --git a/src/rpc/client/blobs.rs b/src/rpc/client/blobs.rs index 7a31983e0..c5625f5bb 100644 --- a/src/rpc/client/blobs.rs +++ b/src/rpc/client/blobs.rs @@ -86,7 +86,6 @@ use genawaiter::sync::{Co, Gen}; use iroh_net::NodeAddr; use portable_atomic::{AtomicU64, Ordering}; use quic_rpc::{client::BoxStreamSync, RpcClient}; -use ref_cast::RefCast; use serde::{Deserialize, Serialize}; use tokio::io::{AsyncRead, AsyncReadExt, ReadBuf}; use tokio_util::io::{ReaderStream, StreamReader}; @@ -104,8 +103,7 @@ use crate::rpc::proto::blobs::{ }; /// Iroh blobs client. -#[derive(Debug, Clone, RefCast)] -#[repr(transparent)] +#[derive(Debug, Clone)] pub struct Client { pub(super) rpc: RpcClient, } @@ -115,6 +113,11 @@ where S: quic_rpc::Service, C: quic_rpc::ServiceConnection, { + /// Create a new client + pub fn new(rpc: RpcClient) -> Self { + Self { rpc } + } + /// Check if a blob is completely stored on the node. /// /// Note that this will return false for blobs that are partially stored on @@ -213,7 +216,7 @@ where /// For automatically clearing the tags for the passed in blobs you can set /// `tags_to_delete` to those tags, and they will be deleted once the collection is created. pub async fn create_collection( - &self, + self, collection: Collection, tag: SetTagOption, tags_to_delete: Vec, @@ -451,9 +454,7 @@ where } fn tags_client(&self) -> tags::Client { - tags::Client { - rpc: self.rpc.clone(), - } + tags::Client::new(self.rpc.clone()) } } @@ -480,7 +481,8 @@ pub enum ReadAtLen { } impl ReadAtLen { - pub(crate) fn as_result_len(&self, size_remaining: u64) -> u64 { + /// todo make private again + pub fn as_result_len(&self, size_remaining: u64) -> u64 { match self { ReadAtLen::All => size_remaining, ReadAtLen::Exact(len) => *len, @@ -875,7 +877,8 @@ impl Reader { } } - pub(crate) async fn from_rpc_read( + /// todo make private again + pub async fn from_rpc_read( rpc: &RpcClient, hash: Hash, ) -> anyhow::Result diff --git a/src/rpc/client/tags.rs b/src/rpc/client/tags.rs index 6b4ac2598..fa63a0481 100644 --- a/src/rpc/client/tags.rs +++ b/src/rpc/client/tags.rs @@ -16,13 +16,12 @@ use crate::{BlobFormat, Hash, Tag}; use anyhow::Result; use futures_lite::{Stream, StreamExt}; use quic_rpc::RpcClient; -use ref_cast::RefCast; use serde::{Deserialize, Serialize}; use crate::rpc::proto::tags::{DeleteRequest, ListRequest}; /// Iroh tags client. -#[derive(Debug, Clone, RefCast)] +#[derive(Debug, Clone)] #[repr(transparent)] pub struct Client { pub(super) rpc: RpcClient, @@ -33,6 +32,11 @@ where C: quic_rpc::ServiceConnection, S: quic_rpc::Service, { + /// Creates a new client + pub fn new(rpc: RpcClient) -> Self { + Self { rpc } + } + /// Lists all tags. pub async fn list(&self) -> Result>> { let stream = self.rpc.server_streaming(ListRequest::all()).await?; diff --git a/src/rpc/proto.rs b/src/rpc/proto.rs index 26434ad16..174b0a80c 100644 --- a/src/rpc/proto.rs +++ b/src/rpc/proto.rs @@ -30,5 +30,7 @@ pub enum Response { Tags(tags::Response), } -type RpcError = serde_error::Error; -type RpcResult = Result; +/// Error type for RPC operations +pub type RpcError = serde_error::Error; +/// Result type for RPC operations +pub type RpcResult = Result; From 1ca13d0119bc991824c5fa0cb51618ac7a40efb0 Mon Sep 17 00:00:00 2001 From: Ruediger Klaehn Date: Thu, 31 Oct 2024 15:30:18 +0200 Subject: [PATCH 04/19] fmt --- src/rpc.rs | 125 +++++++++++++++++++--------------- src/rpc/client/blobs.rs | 23 ++++--- src/rpc/client/blobs/batch.rs | 26 +++---- src/rpc/client/tags.rs | 6 +- src/rpc/proto/blobs.rs | 5 +- src/rpc/proto/tags.rs | 3 +- 6 files changed, 101 insertions(+), 87 deletions(-) diff --git a/src/rpc.rs b/src/rpc.rs index 70cafe780..b764f948d 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -18,9 +18,18 @@ use iroh_base::hash::{BlobFormat, HashAndFormat}; use iroh_io::AsyncSliceReader; use proto::{ blobs::{ - AddPathRequest, AddPathResponse, AddStreamRequest, AddStreamResponse, AddStreamUpdate, BatchAddPathRequest, BatchAddPathResponse, BatchAddStreamRequest, BatchAddStreamResponse, BatchAddStreamUpdate, BatchCreateRequest, BatchCreateResponse, BatchCreateTempTagRequest, BatchUpdate, BlobStatusRequest, BlobStatusResponse, ConsistencyCheckRequest, CreateCollectionRequest, CreateCollectionResponse, DeleteRequest, DownloadResponse, ExportRequest, ExportResponse, ListIncompleteRequest, ListRequest, ReadAtRequest, ReadAtResponse, ValidateRequest + AddPathRequest, AddPathResponse, AddStreamRequest, AddStreamResponse, AddStreamUpdate, + BatchAddPathRequest, BatchAddPathResponse, BatchAddStreamRequest, BatchAddStreamResponse, + BatchAddStreamUpdate, BatchCreateRequest, BatchCreateResponse, BatchCreateTempTagRequest, + BatchUpdate, BlobStatusRequest, BlobStatusResponse, ConsistencyCheckRequest, + CreateCollectionRequest, CreateCollectionResponse, DeleteRequest, DownloadResponse, + ExportRequest, ExportResponse, ListIncompleteRequest, ListRequest, ReadAtRequest, + ReadAtResponse, ValidateRequest, + }, + tags::{ + CreateRequest as TagsCreateRequest, DeleteRequest as TagDeleteRequest, + ListRequest as TagListRequest, SetRequest as TagsSetRequest, SyncMode, }, - tags::SyncMode, RpcError, RpcResult, }; use quic_rpc::server::{RpcChannel, RpcServerError}; @@ -38,10 +47,6 @@ use crate::{ }, Tag, }; -use proto::tags::{ - CreateRequest as TagsCreateRequest, DeleteRequest as TagDeleteRequest, - ListRequest as TagListRequest, SetRequest as TagsSetRequest, -}; pub mod client; pub mod proto; @@ -51,15 +56,15 @@ const RPC_BLOB_GET_CHUNK_SIZE: usize = 1024 * 64; const RPC_BLOB_GET_CHANNEL_CAP: usize = 2; impl Blobs { - /// Handle an RPC request - pub async fn handle_rpc_request(self: Arc, + pub async fn handle_rpc_request( + self: Arc, msg: crate::rpc::proto::Request, - chan: RpcChannel + chan: RpcChannel, ) -> std::result::Result<(), RpcServerError> - where - S: quic_rpc::Service, - C: quic_rpc::ServiceEndpoint, + where + S: quic_rpc::Service, + C: quic_rpc::ServiceEndpoint, { use crate::rpc::proto::Request::*; match msg { @@ -72,11 +77,11 @@ impl Blobs { pub async fn handle_tags_request( self: Arc, msg: proto::tags::Request, - chan: RpcChannel + chan: RpcChannel, ) -> std::result::Result<(), RpcServerError> - where - S: quic_rpc::Service, - C: quic_rpc::ServiceEndpoint, + where + S: quic_rpc::Service, + C: quic_rpc::ServiceEndpoint, { use proto::tags::Request::*; match msg { @@ -91,46 +96,46 @@ impl Blobs { pub async fn handle_blobs_request( self: Arc, msg: proto::blobs::Request, - chan: RpcChannel + chan: RpcChannel, ) -> std::result::Result<(), RpcServerError> - where - Sv: quic_rpc::Service, - C: quic_rpc::ServiceEndpoint, + where + Sv: quic_rpc::Service, + C: quic_rpc::ServiceEndpoint, { use proto::blobs::Request::*; - match msg { - List(msg) => chan.server_streaming(msg, self, Self::blob_list).await, - ListIncomplete(msg) => { - chan.server_streaming(msg, self, Self::blob_list_incomplete) - .await - } - CreateCollection(msg) => chan.rpc(msg, self, Self::create_collection).await, - Delete(msg) => chan.rpc(msg, self, Self::blob_delete_blob).await, - AddPath(msg) => { - chan.server_streaming(msg, self, Self::blob_add_from_path) - .await - } - Download(msg) => chan.server_streaming(msg, self, Self::blob_download).await, - Export(msg) => chan.server_streaming(msg, self, Self::blob_export).await, - Validate(msg) => chan.server_streaming(msg, self, Self::blob_validate).await, - Fsck(msg) => { - chan.server_streaming(msg, self, Self::blob_consistency_check) - .await - } - ReadAt(msg) => chan.server_streaming(msg, self, Self::blob_read_at).await, - AddStream(msg) => chan.bidi_streaming(msg, self, Self::blob_add_stream).await, - AddStreamUpdate(_msg) => Err(RpcServerError::UnexpectedUpdateMessage), - BlobStatus(msg) => chan.rpc(msg, self, Self::blob_status).await, - BatchCreate(msg) => chan.bidi_streaming(msg, self, Self::batch_create).await, - BatchUpdate(_) => Err(RpcServerError::UnexpectedStartMessage), - BatchAddStream(msg) => chan.bidi_streaming(msg, self, Self::batch_add_stream).await, - BatchAddStreamUpdate(_) => Err(RpcServerError::UnexpectedStartMessage), - BatchAddPath(msg) => { - chan.server_streaming(msg, self, Self::batch_add_from_path) - .await + match msg { + List(msg) => chan.server_streaming(msg, self, Self::blob_list).await, + ListIncomplete(msg) => { + chan.server_streaming(msg, self, Self::blob_list_incomplete) + .await + } + CreateCollection(msg) => chan.rpc(msg, self, Self::create_collection).await, + Delete(msg) => chan.rpc(msg, self, Self::blob_delete_blob).await, + AddPath(msg) => { + chan.server_streaming(msg, self, Self::blob_add_from_path) + .await + } + Download(msg) => chan.server_streaming(msg, self, Self::blob_download).await, + Export(msg) => chan.server_streaming(msg, self, Self::blob_export).await, + Validate(msg) => chan.server_streaming(msg, self, Self::blob_validate).await, + Fsck(msg) => { + chan.server_streaming(msg, self, Self::blob_consistency_check) + .await + } + ReadAt(msg) => chan.server_streaming(msg, self, Self::blob_read_at).await, + AddStream(msg) => chan.bidi_streaming(msg, self, Self::blob_add_stream).await, + AddStreamUpdate(_msg) => Err(RpcServerError::UnexpectedUpdateMessage), + BlobStatus(msg) => chan.rpc(msg, self, Self::blob_status).await, + BatchCreate(msg) => chan.bidi_streaming(msg, self, Self::batch_create).await, + BatchUpdate(_) => Err(RpcServerError::UnexpectedStartMessage), + BatchAddStream(msg) => chan.bidi_streaming(msg, self, Self::batch_add_stream).await, + BatchAddStreamUpdate(_) => Err(RpcServerError::UnexpectedStartMessage), + BatchAddPath(msg) => { + chan.server_streaming(msg, self, Self::batch_add_from_path) + .await + } + BatchCreateTempTag(msg) => chan.rpc(msg, self, Self::batch_create_temp_tag).await, } - BatchCreateTempTag(msg) => chan.rpc(msg, self, Self::batch_create_temp_tag).await, - } } async fn blob_status(self: Arc, msg: BlobStatusRequest) -> RpcResult { @@ -236,7 +241,10 @@ impl Blobs { Ok(()) } - fn blob_list_tags(self: Arc, msg: TagListRequest) -> impl Stream + Send + 'static { + fn blob_list_tags( + self: Arc, + msg: TagListRequest, + ) -> impl Stream + Send + 'static { tracing::info!("blob_list_tags"); let blobs = self; Gen::new(|co| async move { @@ -353,7 +361,10 @@ impl Blobs { Ok(tag) } - fn blob_download(self: Arc, msg: BlobDownloadRequest) -> impl Stream { + fn blob_download( + self: Arc, + msg: BlobDownloadRequest, + ) -> impl Stream { let (sender, receiver) = async_channel::bounded(1024); let endpoint = self.endpoint().clone(); let progress = AsyncChannelProgressSender::new(sender); @@ -518,7 +529,10 @@ impl Blobs { Ok(()) } - async fn batch_create_temp_tag(self: Arc, msg: BatchCreateTempTagRequest) -> RpcResult<()> { + async fn batch_create_temp_tag( + self: Arc, + msg: BatchCreateTempTagRequest, + ) -> RpcResult<()> { let blobs = self; let tag = blobs.store().temp_tag(msg.content); blobs.batches().await.store(msg.batch, tag); @@ -813,7 +827,6 @@ impl Blobs { .into_stream() } - async fn create_collection( self: Arc, req: CreateCollectionRequest, diff --git a/src/rpc/client/blobs.rs b/src/rpc/client/blobs.rs index c5625f5bb..9b9cfe988 100644 --- a/src/rpc/client/blobs.rs +++ b/src/rpc/client/blobs.rs @@ -68,16 +68,6 @@ use std::{ task::{Context, Poll}, }; -pub use crate::net_protocol::DownloadMode; -use crate::{ - export::ExportProgress as BytesExportProgress, - format::collection::{Collection, SimpleStore}, - get::db::DownloadProgress as BytesDownloadProgress, - net_protocol::BlobDownloadRequest, - store::{BaoBlobSize, ConsistencyCheckProgress, ExportFormat, ExportMode, ValidateProgress}, - util::SetTagOption, - BlobFormat, Hash, Tag, -}; use anyhow::{anyhow, Context as _, Result}; use bytes::Bytes; use futures_lite::{Stream, StreamExt}; @@ -91,6 +81,17 @@ use tokio::io::{AsyncRead, AsyncReadExt, ReadBuf}; use tokio_util::io::{ReaderStream, StreamReader}; use tracing::warn; +pub use crate::net_protocol::DownloadMode; +use crate::{ + export::ExportProgress as BytesExportProgress, + format::collection::{Collection, SimpleStore}, + get::db::DownloadProgress as BytesDownloadProgress, + net_protocol::BlobDownloadRequest, + store::{BaoBlobSize, ConsistencyCheckProgress, ExportFormat, ExportMode, ValidateProgress}, + util::SetTagOption, + BlobFormat, Hash, Tag, +}; + mod batch; pub use batch::{AddDirOpts, AddFileOpts, AddReaderOpts, Batch}; @@ -982,13 +983,13 @@ pub struct DownloadOptions { #[cfg(test)] mod tests { - use crate::hashseq::HashSeq; use iroh_net::NodeId; use rand::RngCore; use testresult::TestResult; use tokio::{io::AsyncWriteExt, sync::mpsc}; use super::*; + use crate::hashseq::HashSeq; #[tokio::test] async fn test_blob_create_collection() -> Result<()> { diff --git a/src/rpc/client/blobs/batch.rs b/src/rpc/client/blobs/batch.rs index 942e1dbd3..6b08e9268 100644 --- a/src/rpc/client/blobs/batch.rs +++ b/src/rpc/client/blobs/batch.rs @@ -4,14 +4,6 @@ use std::{ sync::{Arc, Mutex}, }; -use crate::{ - format::collection::Collection, - net_protocol::BatchId, - provider::BatchAddPathProgress, - store::ImportMode, - util::{SetTagOption, TagDrop}, - BlobFormat, HashAndFormat, Tag, TempTag, -}; use anyhow::{anyhow, Context, Result}; use bytes::Bytes; use futures_buffered::BufferedStreamExt; @@ -23,12 +15,20 @@ use tokio_util::io::ReaderStream; use tracing::{debug, warn}; use super::WrapOption; -use crate::rpc::proto::{ - blobs::{ - BatchAddPathRequest, BatchAddStreamRequest, BatchAddStreamResponse, BatchAddStreamUpdate, - BatchCreateTempTagRequest, BatchUpdate, +use crate::{ + format::collection::Collection, + net_protocol::BatchId, + provider::BatchAddPathProgress, + rpc::proto::{ + blobs::{ + BatchAddPathRequest, BatchAddStreamRequest, BatchAddStreamResponse, + BatchAddStreamUpdate, BatchCreateTempTagRequest, BatchUpdate, + }, + tags::{self, SyncMode}, }, - tags::{self, SyncMode}, + store::ImportMode, + util::{SetTagOption, TagDrop}, + BlobFormat, HashAndFormat, Tag, TempTag, }; /// A scope in which blobs can be added. diff --git a/src/rpc/client/tags.rs b/src/rpc/client/tags.rs index fa63a0481..b4fe680c2 100644 --- a/src/rpc/client/tags.rs +++ b/src/rpc/client/tags.rs @@ -12,13 +12,15 @@ //! [`Client::list_hash_seq`] can be used to list all tags with a hash_seq format. //! //! [`Client::delete`] can be used to delete a tag. -use crate::{BlobFormat, Hash, Tag}; use anyhow::Result; use futures_lite::{Stream, StreamExt}; use quic_rpc::RpcClient; use serde::{Deserialize, Serialize}; -use crate::rpc::proto::tags::{DeleteRequest, ListRequest}; +use crate::{ + rpc::proto::tags::{DeleteRequest, ListRequest}, + BlobFormat, Hash, Tag, +}; /// Iroh tags client. #[derive(Debug, Clone)] diff --git a/src/rpc/proto/blobs.rs b/src/rpc/proto/blobs.rs index 113971ad7..2788027d6 100644 --- a/src/rpc/proto/blobs.rs +++ b/src/rpc/proto/blobs.rs @@ -7,12 +7,14 @@ use nested_enum_utils::enum_conversions; use quic_rpc_derive::rpc_requests; use serde::{Deserialize, Serialize}; +use super::{RpcError, RpcResult, RpcService}; use crate::{ export::ExportProgress, format::collection::Collection, get::db::DownloadProgress, net_protocol::{BatchId, BlobDownloadRequest}, provider::{AddProgress, BatchAddPathProgress}, + rpc::client::blobs::{BlobInfo, BlobStatus, IncompleteBlobInfo, ReadAtLen, WrapOption}, store::{ BaoBlobSize, ConsistencyCheckProgress, ExportFormat, ExportMode, ImportMode, ValidateProgress, @@ -21,9 +23,6 @@ use crate::{ BlobFormat, HashAndFormat, Tag, }; -use super::{RpcError, RpcResult, RpcService}; -use crate::rpc::client::blobs::{BlobInfo, BlobStatus, IncompleteBlobInfo, ReadAtLen, WrapOption}; - #[allow(missing_docs)] #[derive(strum::Display, Debug, Serialize, Deserialize)] #[enum_conversions(super::Request)] diff --git a/src/rpc/proto/tags.rs b/src/rpc/proto/tags.rs index 71bf29f7b..54d35f625 100644 --- a/src/rpc/proto/tags.rs +++ b/src/rpc/proto/tags.rs @@ -4,8 +4,7 @@ use quic_rpc_derive::rpc_requests; use serde::{Deserialize, Serialize}; use super::{RpcResult, RpcService}; -use crate::rpc::client::tags::TagInfo; -use crate::{net_protocol::BatchId, HashAndFormat, Tag}; +use crate::{net_protocol::BatchId, rpc::client::tags::TagInfo, HashAndFormat, Tag}; #[allow(missing_docs)] #[derive(strum::Display, Debug, Serialize, Deserialize)] From f0a163277609709071f77493cda92fa62063b9f4 Mon Sep 17 00:00:00 2001 From: Ruediger Klaehn Date: Thu, 31 Oct 2024 16:37:01 +0200 Subject: [PATCH 05/19] Add mini node for testing --- src/lib.rs | 2 + src/node.rs | 161 ++++++++++++++++++++++++++++++++++++++++ src/rpc/client/blobs.rs | 143 ++++++++++++++--------------------- src/rpc/client/tags.rs | 7 +- 4 files changed, 225 insertions(+), 88 deletions(-) create mode 100644 src/node.rs diff --git a/src/lib.rs b/src/lib.rs index 8b561c504..fa790aa18 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -46,6 +46,8 @@ pub mod rpc; pub mod store; pub mod util; +pub mod node; + use bao_tree::BlockSize; pub use iroh_base::hash::{BlobFormat, Hash, HashAndFormat}; diff --git a/src/node.rs b/src/node.rs new file mode 100644 index 000000000..3fd1d4beb --- /dev/null +++ b/src/node.rs @@ -0,0 +1,161 @@ +//! An iroh node that just has the blobs transport +use std::{path::Path, sync::Arc}; + +use iroh_net::{NodeAddr, NodeId}; +use quic_rpc::client::BoxedServiceConnection; +use tokio_util::task::AbortOnDropHandle; + +use crate::{ + provider::{CustomEventSender, EventSender}, + rpc::client::{blobs, tags}, + util::local_pool::LocalPool, +}; + +type RpcClient = quic_rpc::RpcClient< + crate::rpc::proto::RpcService, + BoxedServiceConnection, + crate::rpc::proto::RpcService, +>; + +/// An iroh node that just has the blobs transport +#[derive(Debug)] +pub struct Node { + router: iroh_router::Router, + client: RpcClient, + _local_pool: LocalPool, + _rpc_task: AbortOnDropHandle<()>, +} + +/// An iroh node builder +#[derive(Debug)] +pub struct Builder { + store: S, + events: EventSender, +} + +impl Builder { + /// Sets the event sender + pub fn blobs_events(self, events: impl CustomEventSender) -> Self { + Builder { + store: self.store, + events: events.into(), + } + } + + /// Spawns the node + pub async fn spawn(self) -> anyhow::Result { + let (client, router, rpc_task, _local_pool) = setup_router(self.store, self.events).await?; + Ok(Node { + router, + client, + _rpc_task: AbortOnDropHandle::new(rpc_task), + _local_pool, + }) + } +} + +impl Node { + /// Creates a new node with memory storage + pub fn memory() -> Builder { + Builder { + store: crate::store::mem::Store::new(), + events: Default::default(), + } + } + + /// Creates a new node with persistent storage + pub async fn persistent( + path: impl AsRef, + ) -> anyhow::Result> { + Ok(Builder { + store: crate::store::fs::Store::load(path).await?, + events: Default::default(), + }) + } + + /// Returns the node id + pub fn node_id(&self) -> NodeId { + self.router.endpoint().node_id() + } + + /// Returns the node address + pub async fn node_addr(&self) -> anyhow::Result { + self.router.endpoint().node_addr().await + } + + /// Shuts down the node + pub async fn shutdown(self) -> anyhow::Result<()> { + self.router.shutdown().await + } + + /// Returns an in-memory blobs client + pub fn blobs(&self) -> blobs::Client { + blobs::Client::new(self.client.clone()) + } + + /// Returns an in-memory tags client + pub fn tags(&self) -> tags::Client { + tags::Client::new(self.client.clone()) + } +} + +async fn setup_router( + store: S, + events: EventSender, +) -> anyhow::Result<( + RpcClient, + iroh_router::Router, + tokio::task::JoinHandle<()>, + LocalPool, +)> { + let endpoint = iroh_net::Endpoint::builder().bind().await?; + let local_pool = LocalPool::single(); + let mut router = iroh_router::Router::builder(endpoint.clone()); + + // Setup blobs + let downloader = crate::downloader::Downloader::new( + store.clone(), + endpoint.clone(), + local_pool.handle().clone(), + ); + let blobs = Arc::new(crate::net_protocol::Blobs::new_with_events( + store.clone(), + local_pool.handle().clone(), + events, + downloader, + endpoint.clone(), + )); + router = router.accept(crate::protocol::ALPN.to_vec(), blobs.clone()); + + // Build the router + let router = router.spawn().await?; + + // Setup RPC + let (internal_rpc, controller) = + quic_rpc::transport::flume::service_connection::(32); + let controller = quic_rpc::transport::boxed::Connection::new(controller); + let internal_rpc = quic_rpc::transport::boxed::ServerEndpoint::new(internal_rpc); + let internal_rpc = quic_rpc::RpcServer::new(internal_rpc); + + let rpc_server_task: tokio::task::JoinHandle<()> = tokio::task::spawn(async move { + loop { + let request = internal_rpc.accept().await; + match request { + Ok(accepting) => { + let blobs = blobs.clone(); + tokio::task::spawn(async move { + let (msg, chan) = accepting.read_first().await.unwrap(); + blobs.handle_rpc_request(msg, chan).await.unwrap(); + }); + } + Err(err) => { + tracing::warn!("rpc error: {:?}", err); + } + } + } + }); + + let client = quic_rpc::RpcClient::new(controller); + + Ok((client, router, rpc_server_task, local_pool)) +} diff --git a/src/rpc/client/blobs.rs b/src/rpc/client/blobs.rs index 9b9cfe988..82c890b7f 100644 --- a/src/rpc/client/blobs.rs +++ b/src/rpc/client/blobs.rs @@ -75,7 +75,10 @@ use futures_util::SinkExt; use genawaiter::sync::{Co, Gen}; use iroh_net::NodeAddr; use portable_atomic::{AtomicU64, Ordering}; -use quic_rpc::{client::BoxStreamSync, RpcClient}; +use quic_rpc::{ + client::{BoxStreamSync, BoxedServiceConnection}, + RpcClient, +}; use serde::{Deserialize, Serialize}; use tokio::io::{AsyncRead, AsyncReadExt, ReadBuf}; use tokio_util::io::{ReaderStream, StreamReader}; @@ -105,7 +108,10 @@ use crate::rpc::proto::blobs::{ /// Iroh blobs client. #[derive(Debug, Clone)] -pub struct Client { +pub struct Client< + C = BoxedServiceConnection, + S = crate::rpc::proto::RpcService, +> { pub(super) rpc: RpcClient, } @@ -217,7 +223,7 @@ where /// For automatically clearing the tags for the passed in blobs you can set /// `tags_to_delete` to those tags, and they will be deleted once the collection is created. pub async fn create_collection( - self, + &self, collection: Collection, tag: SetTagOption, tags_to_delete: Vec, @@ -1019,14 +1025,13 @@ mod tests { paths.push(path); } - let client = node.client(); + let blobs = node.blobs(); let mut collection = Collection::default(); let mut tags = Vec::new(); // import files for path in &paths { - let import_outcome = client - .blobs() + let import_outcome = blobs .add_from_path( path.to_path_buf(), false, @@ -1046,12 +1051,11 @@ mod tests { tags.push(import_outcome.tag); } - let (hash, tag) = client - .blobs() + let (hash, tag) = blobs .create_collection(collection, SetTagOption::Auto, tags) .await?; - let collections: Vec<_> = client.blobs().list_collections()?.try_collect().await?; + let collections: Vec<_> = blobs.list_collections()?.try_collect().await?; assert_eq!(collections.len(), 1); { @@ -1068,7 +1072,7 @@ mod tests { } // check that "temp" tags have been deleted - let tags: Vec<_> = client.tags().list().await?.try_collect().await?; + let tags: Vec<_> = node.tags().list().await?.try_collect().await?; assert_eq!(tags.len(), 1); assert_eq!(tags[0].hash, hash); assert_eq!(tags[0].name, tag); @@ -1100,10 +1104,9 @@ mod tests { file.write_all(&buf.clone()).await.context("write_all")?; file.flush().await.context("flush")?; - let client = node.client(); + let blobs = node.blobs(); - let import_outcome = client - .blobs() + let import_outcome = blobs .add_from_path( path.to_path_buf(), false, @@ -1119,89 +1122,74 @@ mod tests { let hash = import_outcome.hash; // Read everything - let res = client.blobs().read_to_bytes(hash).await?; + let res = blobs.read_to_bytes(hash).await?; assert_eq!(&res, &buf[..]); // Read at smaller than blob_get_chunk_size - let res = client - .blobs() + let res = blobs .read_at_to_bytes(hash, 0, ReadAtLen::Exact(100)) .await?; assert_eq!(res.len(), 100); assert_eq!(&res[..], &buf[0..100]); - let res = client - .blobs() + let res = blobs .read_at_to_bytes(hash, 20, ReadAtLen::Exact(120)) .await?; assert_eq!(res.len(), 120); assert_eq!(&res[..], &buf[20..140]); // Read at equal to blob_get_chunk_size - let res = client - .blobs() + let res = blobs .read_at_to_bytes(hash, 0, ReadAtLen::Exact(1024 * 64)) .await?; assert_eq!(res.len(), 1024 * 64); assert_eq!(&res[..], &buf[0..1024 * 64]); - let res = client - .blobs() + let res = blobs .read_at_to_bytes(hash, 20, ReadAtLen::Exact(1024 * 64)) .await?; assert_eq!(res.len(), 1024 * 64); assert_eq!(&res[..], &buf[20..(20 + 1024 * 64)]); // Read at larger than blob_get_chunk_size - let res = client - .blobs() + let res = blobs .read_at_to_bytes(hash, 0, ReadAtLen::Exact(10 + 1024 * 64)) .await?; assert_eq!(res.len(), 10 + 1024 * 64); assert_eq!(&res[..], &buf[0..(10 + 1024 * 64)]); - let res = client - .blobs() + let res = blobs .read_at_to_bytes(hash, 20, ReadAtLen::Exact(10 + 1024 * 64)) .await?; assert_eq!(res.len(), 10 + 1024 * 64); assert_eq!(&res[..], &buf[20..(20 + 10 + 1024 * 64)]); // full length - let res = client - .blobs() - .read_at_to_bytes(hash, 20, ReadAtLen::All) - .await?; + let res = blobs.read_at_to_bytes(hash, 20, ReadAtLen::All).await?; assert_eq!(res.len(), 1024 * 128 - 20); assert_eq!(&res[..], &buf[20..]); // size should be total - let reader = client - .blobs() - .read_at(hash, 0, ReadAtLen::Exact(20)) - .await?; + let reader = blobs.read_at(hash, 0, ReadAtLen::Exact(20)).await?; assert_eq!(reader.size(), 1024 * 128); assert_eq!(reader.response_size, 20); // last chunk - exact - let res = client - .blobs() + let res = blobs .read_at_to_bytes(hash, 1024 * 127, ReadAtLen::Exact(1024)) .await?; assert_eq!(res.len(), 1024); assert_eq!(res, &buf[1024 * 127..]); // last chunk - open - let res = client - .blobs() + let res = blobs .read_at_to_bytes(hash, 1024 * 127, ReadAtLen::All) .await?; assert_eq!(res.len(), 1024); assert_eq!(res, &buf[1024 * 127..]); // last chunk - larger - let mut res = client - .blobs() + let mut res = blobs .read_at(hash, 1024 * 127, ReadAtLen::AtMost(2048)) .await?; assert_eq!(res.size, 1024 * 128); @@ -1211,24 +1199,19 @@ mod tests { assert_eq!(res, &buf[1024 * 127..]); // out of bounds - too long - let res = client - .blobs() + let res = blobs .read_at(hash, 0, ReadAtLen::Exact(1024 * 128 + 1)) .await; let err = res.unwrap_err(); assert!(err.to_string().contains("out of bound")); // out of bounds - offset larger than blob - let res = client - .blobs() - .read_at(hash, 1024 * 128 + 1, ReadAtLen::All) - .await; + let res = blobs.read_at(hash, 1024 * 128 + 1, ReadAtLen::All).await; let err = res.unwrap_err(); assert!(err.to_string().contains("out of range")); // out of bounds - offset + length too large - let res = client - .blobs() + let res = blobs .read_at(hash, 1024 * 127, ReadAtLen::Exact(1025)) .await; let err = res.unwrap_err(); @@ -1265,14 +1248,13 @@ mod tests { paths.push(path); } - let client = node.client(); + let blobs = node.blobs(); let mut collection = Collection::default(); let mut tags = Vec::new(); // import files for path in &paths { - let import_outcome = client - .blobs() + let import_outcome = blobs .add_from_path( path.to_path_buf(), false, @@ -1292,12 +1274,11 @@ mod tests { tags.push(import_outcome.tag); } - let (hash, _tag) = client - .blobs() + let (hash, _tag) = blobs .create_collection(collection, SetTagOption::Auto, tags) .await?; - let collection = client.blobs().get_collection(hash).await?; + let collection = blobs.get_collection(hash).await?; // 5 blobs assert_eq!(collection.len(), 5); @@ -1328,10 +1309,9 @@ mod tests { file.write_all(&buf.clone()).await.context("write_all")?; file.flush().await.context("flush")?; - let client = node.client(); + let blobs = node.blobs(); - let import_outcome = client - .blobs() + let import_outcome = blobs .add_from_path( path.to_path_buf(), false, @@ -1344,13 +1324,12 @@ mod tests { .await .context("import finish")?; - let ticket = client - .blobs() - .share(import_outcome.hash, BlobFormat::Raw, Default::default()) - .await?; - assert_eq!(ticket.hash(), import_outcome.hash); + // let ticket = blobs + // .share(import_outcome.hash, BlobFormat::Raw, Default::default()) + // .await?; + // assert_eq!(ticket.hash(), import_outcome.hash); - let status = client.blobs().status(import_outcome.hash).await?; + let status = blobs.status(import_outcome.hash).await?; assert_eq!(status, BlobStatus::Complete { size }); Ok(()) @@ -1400,7 +1379,7 @@ mod tests { let import_outcome = node1.blobs().add_bytes(&b"hello world"[..]).await?; // Download in node2 - let node1_addr = node1.net().node_addr().await?; + let node1_addr = node1.node_addr().await?; let res = node2 .blobs() .download(import_outcome.hash, node1_addr) @@ -1452,13 +1431,12 @@ mod tests { let node = crate::node::Node::memory().spawn().await?; let node_id = node.node_id(); - let client = node.client(); + let blobs = node.blobs(); - let AddOutcome { hash, size, .. } = client.blobs().add_bytes("foo").await?; + let AddOutcome { hash, size, .. } = blobs.add_bytes("foo").await?; // Direct - let res = client - .blobs() + let res = blobs .download_with_opts( hash, DownloadOptions { @@ -1475,8 +1453,7 @@ mod tests { assert_eq!(res.downloaded_size, 0); // Queued - let res = client - .blobs() + let res = blobs .download_with_opts( hash, DownloadOptions { @@ -1502,13 +1479,12 @@ mod tests { let node = crate::node::Node::memory().spawn().await?; let node_id = node.node_id(); - let client = node.client(); + let blobs = node.blobs(); let hash = Hash::from_bytes([0u8; 32]); // Direct - let res = client - .blobs() + let res = blobs .download_with_opts( hash, DownloadOptions { @@ -1527,8 +1503,7 @@ mod tests { ); // Queued - let res = client - .blobs() + let res = blobs .download_with_opts( hash, DownloadOptions { @@ -1558,36 +1533,33 @@ mod tests { // We use a nonexisting node id because we just want to check that this succeeds without // hitting the network. let node_id = NodeId::from_bytes(&[0u8; 32])?; - let client = node.client(); + let blobs = node.blobs(); let mut collection = Collection::default(); let mut tags = Vec::new(); let mut size = 0; for value in ["iroh", "is", "cool"] { - let import_outcome = client.blobs().add_bytes(value).await.context("add bytes")?; + let import_outcome = blobs.add_bytes(value).await.context("add bytes")?; collection.push(value.to_string(), import_outcome.hash); tags.push(import_outcome.tag); size += import_outcome.size; } - let (hash, _tag) = client - .blobs() + let (hash, _tag) = blobs .create_collection(collection, SetTagOption::Auto, tags) .await?; // load the hashseq and collection header manually to calculate our expected size - let hashseq_bytes = client.blobs().read_to_bytes(hash).await?; + let hashseq_bytes = blobs.read_to_bytes(hash).await?; size += hashseq_bytes.len() as u64; let hashseq = HashSeq::try_from(hashseq_bytes)?; - let collection_header_bytes = client - .blobs() + let collection_header_bytes = blobs .read_to_bytes(hashseq.into_iter().next().expect("header to exist")) .await?; size += collection_header_bytes.len() as u64; // Direct - let res = client - .blobs() + let res = blobs .download_with_opts( hash, DownloadOptions { @@ -1605,8 +1577,7 @@ mod tests { assert_eq!(res.downloaded_size, 0); // Queued - let res = client - .blobs() + let res = blobs .download_with_opts( hash, DownloadOptions { diff --git a/src/rpc/client/tags.rs b/src/rpc/client/tags.rs index b4fe680c2..87bd2c4ef 100644 --- a/src/rpc/client/tags.rs +++ b/src/rpc/client/tags.rs @@ -14,7 +14,7 @@ //! [`Client::delete`] can be used to delete a tag. use anyhow::Result; use futures_lite::{Stream, StreamExt}; -use quic_rpc::RpcClient; +use quic_rpc::{client::BoxedServiceConnection, RpcClient}; use serde::{Deserialize, Serialize}; use crate::{ @@ -25,7 +25,10 @@ use crate::{ /// Iroh tags client. #[derive(Debug, Clone)] #[repr(transparent)] -pub struct Client { +pub struct Client< + C = BoxedServiceConnection, + S = crate::rpc::proto::RpcService, +> { pub(super) rpc: RpcClient, } From 585589f93cb79f72e64441b56e2da3ff7957f572 Mon Sep 17 00:00:00 2001 From: Ruediger Klaehn Date: Thu, 31 Oct 2024 17:07:14 +0200 Subject: [PATCH 06/19] enable discovery for node --- src/node.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/node.rs b/src/node.rs index 3fd1d4beb..304d44175 100644 --- a/src/node.rs +++ b/src/node.rs @@ -108,7 +108,7 @@ async fn setup_router( tokio::task::JoinHandle<()>, LocalPool, )> { - let endpoint = iroh_net::Endpoint::builder().bind().await?; + let endpoint = iroh_net::Endpoint::builder().discovery_n0().bind().await?; let local_pool = LocalPool::single(); let mut router = iroh_router::Router::builder(endpoint.clone()); From 771541218ee03415c0ed80ad97cb806eaad3c038 Mon Sep 17 00:00:00 2001 From: Ruediger Klaehn Date: Mon, 4 Nov 2024 15:09:38 +0200 Subject: [PATCH 07/19] hide unused stuff depending on features --- src/lib.rs | 2 ++ src/util/fs.rs | 36 ++++++++++++++++++++++++------------ 2 files changed, 26 insertions(+), 12 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index fa790aa18..0ea4f40eb 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -46,6 +46,8 @@ pub mod rpc; pub mod store; pub mod util; +#[cfg(feature = "rpc")] +#[cfg_attr(iroh_docsrs, doc(cfg(feature = "rpc")))] pub mod node; use bao_tree::BlockSize; diff --git a/src/util/fs.rs b/src/util/fs.rs index 71dc4caff..1b5a79ca3 100644 --- a/src/util/fs.rs +++ b/src/util/fs.rs @@ -7,12 +7,6 @@ use std::{ use anyhow::{bail, Context}; use bytes::Bytes; -use iroh_net::key::SecretKey; -use tokio::io::AsyncWriteExt; -use walkdir::WalkDir; - -use crate::rpc::client::blobs::WrapOption; - /// A data source #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)] pub struct DataSource { @@ -62,7 +56,12 @@ impl From<&std::path::Path> for DataSource { } /// Create data sources from a path. -pub fn scan_path(path: PathBuf, wrap: WrapOption) -> anyhow::Result> { +#[cfg(feature = "rpc")] +pub fn scan_path( + path: PathBuf, + wrap: crate::rpc::client::blobs::WrapOption, +) -> anyhow::Result> { + use crate::rpc::client::blobs::WrapOption; if path.is_dir() { scan_dir(path, wrap) } else { @@ -75,12 +74,20 @@ pub fn scan_path(path: PathBuf, wrap: WrapOption) -> anyhow::Result anyhow::Result { relative_canonicalized_path_to_string(path.file_name().context("path is invalid")?) } /// Create data sources from a directory. -pub fn scan_dir(root: PathBuf, wrap: WrapOption) -> anyhow::Result> { +#[cfg(feature = "rpc")] +#[cfg_attr(iroh_docsrs, doc(cfg(feature = "rpc")))] +pub fn scan_dir( + root: PathBuf, + wrap: crate::rpc::client::blobs::WrapOption, +) -> anyhow::Result> { + use crate::rpc::client::blobs::WrapOption; if !root.is_dir() { bail!("Expected {} to be a file", root.to_string_lossy()); } @@ -89,7 +96,7 @@ pub fn scan_dir(root: PathBuf, wrap: WrapOption) -> anyhow::Result Some(file_name(&root)?), WrapOption::Wrap { name: Some(name) } => Some(name), }; - let files = WalkDir::new(&root).into_iter(); + let files = walkdir::WalkDir::new(&root).into_iter(); let data_sources = files .map(|entry| { let entry = entry?; @@ -121,13 +128,18 @@ pub fn relative_canonicalized_path_to_string(path: impl AsRef) -> anyhow:: /// Loads a [`SecretKey`] from the provided file, or stores a newly generated one /// at the given location. -pub async fn load_secret_key(key_path: PathBuf) -> anyhow::Result { +#[cfg(feature = "rpc")] +#[cfg_attr(iroh_docsrs, doc(cfg(feature = "rpc")))] +pub async fn load_secret_key(key_path: PathBuf) -> anyhow::Result { + use tokio::io::AsyncWriteExt; + if key_path.exists() { let keystr = tokio::fs::read(key_path).await?; - let secret_key = SecretKey::try_from_openssh(keystr).context("invalid keyfile")?; + let secret_key = + iroh_net::key::SecretKey::try_from_openssh(keystr).context("invalid keyfile")?; Ok(secret_key) } else { - let secret_key = SecretKey::generate(); + let secret_key = iroh_net::key::SecretKey::generate(); let ser_key = secret_key.to_openssh()?; // Try to canonicalize if possible From 414a5d59af59b23953628563af74b2c351134ad1 Mon Sep 17 00:00:00 2001 From: Ruediger Klaehn Date: Mon, 4 Nov 2024 15:20:51 +0200 Subject: [PATCH 08/19] Fix doc links --- src/rpc/client/blobs.rs | 9 +++------ src/rpc/client/tags.rs | 2 -- src/util/fs.rs | 2 +- 3 files changed, 4 insertions(+), 9 deletions(-) diff --git a/src/rpc/client/blobs.rs b/src/rpc/client/blobs.rs index 82c890b7f..a5889ab6c 100644 --- a/src/rpc/client/blobs.rs +++ b/src/rpc/client/blobs.rs @@ -2,8 +2,6 @@ //! //! The main entry point is the [`Client`]. //! -//! You obtain a [`Client`] via [`Iroh::blobs()`](crate::client::Iroh::blobs). -//! //! ## Interacting with the local blob store //! //! ### Importing data @@ -33,7 +31,6 @@ //! ## Interacting with remote nodes //! //! - [`download`](Client::download) downloads data from a remote node. -//! - [`share`](Client::share) allows creating a ticket to share data with a //! remote node. //! //! ## Interacting with the blob store itself @@ -52,13 +49,13 @@ //! For complex update operations, there is a [`batch`](Client::batch) API that //! allows you to add multiple blobs in a single logical batch. //! -//! Operations in a batch return [temporary tags](crate::blobs::TempTag) that +//! Operations in a batch return [temporary tags](crate::util::TempTag) that //! protect the added data from garbage collection as long as the batch is //! alive. //! //! To store the data permanently, a temp tag needs to be upgraded to a -//! permanent tag using [`persist`](crate::client::blobs::Batch::persist) or -//! [`persist_to`](crate::client::blobs::Batch::persist_to). +//! permanent tag using [`persist`](crate::rpc::client::blobs::Batch::persist) or +//! [`persist_to`](crate::rpc::client::blobs::Batch::persist_to). use std::{ future::Future, io, diff --git a/src/rpc/client/tags.rs b/src/rpc/client/tags.rs index 87bd2c4ef..51825fd1c 100644 --- a/src/rpc/client/tags.rs +++ b/src/rpc/client/tags.rs @@ -6,8 +6,6 @@ //! //! The main entry point is the [`Client`]. //! -//! You obtain a [`Client`] via [`Iroh::tags()`](crate::client::Iroh::tags). -//! //! [`Client::list`] can be used to list all tags. //! [`Client::list_hash_seq`] can be used to list all tags with a hash_seq format. //! diff --git a/src/util/fs.rs b/src/util/fs.rs index 1b5a79ca3..068ebadc9 100644 --- a/src/util/fs.rs +++ b/src/util/fs.rs @@ -126,7 +126,7 @@ pub fn relative_canonicalized_path_to_string(path: impl AsRef) -> anyhow:: canonicalized_path_to_string(path, true) } -/// Loads a [`SecretKey`] from the provided file, or stores a newly generated one +/// Loads a [`iroh_net::key::SecretKey`] from the provided file, or stores a newly generated one /// at the given location. #[cfg(feature = "rpc")] #[cfg_attr(iroh_docsrs, doc(cfg(feature = "rpc")))] From e34da9de1f5b9b908ae226a790a91e769da222e1 Mon Sep 17 00:00:00 2001 From: Ruediger Klaehn Date: Mon, 4 Nov 2024 17:12:19 +0200 Subject: [PATCH 09/19] move test node to tests --- src/lib.rs | 4 - src/node.rs | 161 ---------------------------------- src/rpc/client/blobs.rs | 190 +++++++++++++++++++++++++++++++++++++--- 3 files changed, 176 insertions(+), 179 deletions(-) delete mode 100644 src/node.rs diff --git a/src/lib.rs b/src/lib.rs index 0ea4f40eb..8b561c504 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -46,10 +46,6 @@ pub mod rpc; pub mod store; pub mod util; -#[cfg(feature = "rpc")] -#[cfg_attr(iroh_docsrs, doc(cfg(feature = "rpc")))] -pub mod node; - use bao_tree::BlockSize; pub use iroh_base::hash::{BlobFormat, Hash, HashAndFormat}; diff --git a/src/node.rs b/src/node.rs deleted file mode 100644 index 304d44175..000000000 --- a/src/node.rs +++ /dev/null @@ -1,161 +0,0 @@ -//! An iroh node that just has the blobs transport -use std::{path::Path, sync::Arc}; - -use iroh_net::{NodeAddr, NodeId}; -use quic_rpc::client::BoxedServiceConnection; -use tokio_util::task::AbortOnDropHandle; - -use crate::{ - provider::{CustomEventSender, EventSender}, - rpc::client::{blobs, tags}, - util::local_pool::LocalPool, -}; - -type RpcClient = quic_rpc::RpcClient< - crate::rpc::proto::RpcService, - BoxedServiceConnection, - crate::rpc::proto::RpcService, ->; - -/// An iroh node that just has the blobs transport -#[derive(Debug)] -pub struct Node { - router: iroh_router::Router, - client: RpcClient, - _local_pool: LocalPool, - _rpc_task: AbortOnDropHandle<()>, -} - -/// An iroh node builder -#[derive(Debug)] -pub struct Builder { - store: S, - events: EventSender, -} - -impl Builder { - /// Sets the event sender - pub fn blobs_events(self, events: impl CustomEventSender) -> Self { - Builder { - store: self.store, - events: events.into(), - } - } - - /// Spawns the node - pub async fn spawn(self) -> anyhow::Result { - let (client, router, rpc_task, _local_pool) = setup_router(self.store, self.events).await?; - Ok(Node { - router, - client, - _rpc_task: AbortOnDropHandle::new(rpc_task), - _local_pool, - }) - } -} - -impl Node { - /// Creates a new node with memory storage - pub fn memory() -> Builder { - Builder { - store: crate::store::mem::Store::new(), - events: Default::default(), - } - } - - /// Creates a new node with persistent storage - pub async fn persistent( - path: impl AsRef, - ) -> anyhow::Result> { - Ok(Builder { - store: crate::store::fs::Store::load(path).await?, - events: Default::default(), - }) - } - - /// Returns the node id - pub fn node_id(&self) -> NodeId { - self.router.endpoint().node_id() - } - - /// Returns the node address - pub async fn node_addr(&self) -> anyhow::Result { - self.router.endpoint().node_addr().await - } - - /// Shuts down the node - pub async fn shutdown(self) -> anyhow::Result<()> { - self.router.shutdown().await - } - - /// Returns an in-memory blobs client - pub fn blobs(&self) -> blobs::Client { - blobs::Client::new(self.client.clone()) - } - - /// Returns an in-memory tags client - pub fn tags(&self) -> tags::Client { - tags::Client::new(self.client.clone()) - } -} - -async fn setup_router( - store: S, - events: EventSender, -) -> anyhow::Result<( - RpcClient, - iroh_router::Router, - tokio::task::JoinHandle<()>, - LocalPool, -)> { - let endpoint = iroh_net::Endpoint::builder().discovery_n0().bind().await?; - let local_pool = LocalPool::single(); - let mut router = iroh_router::Router::builder(endpoint.clone()); - - // Setup blobs - let downloader = crate::downloader::Downloader::new( - store.clone(), - endpoint.clone(), - local_pool.handle().clone(), - ); - let blobs = Arc::new(crate::net_protocol::Blobs::new_with_events( - store.clone(), - local_pool.handle().clone(), - events, - downloader, - endpoint.clone(), - )); - router = router.accept(crate::protocol::ALPN.to_vec(), blobs.clone()); - - // Build the router - let router = router.spawn().await?; - - // Setup RPC - let (internal_rpc, controller) = - quic_rpc::transport::flume::service_connection::(32); - let controller = quic_rpc::transport::boxed::Connection::new(controller); - let internal_rpc = quic_rpc::transport::boxed::ServerEndpoint::new(internal_rpc); - let internal_rpc = quic_rpc::RpcServer::new(internal_rpc); - - let rpc_server_task: tokio::task::JoinHandle<()> = tokio::task::spawn(async move { - loop { - let request = internal_rpc.accept().await; - match request { - Ok(accepting) => { - let blobs = blobs.clone(); - tokio::task::spawn(async move { - let (msg, chan) = accepting.read_first().await.unwrap(); - blobs.handle_rpc_request(msg, chan).await.unwrap(); - }); - } - Err(err) => { - tracing::warn!("rpc error: {:?}", err); - } - } - } - }); - - let client = quic_rpc::RpcClient::new(controller); - - Ok((client, router, rpc_server_task, local_pool)) -} diff --git a/src/rpc/client/blobs.rs b/src/rpc/client/blobs.rs index a5889ab6c..21cd82d2e 100644 --- a/src/rpc/client/blobs.rs +++ b/src/rpc/client/blobs.rs @@ -994,11 +994,176 @@ mod tests { use super::*; use crate::hashseq::HashSeq; + mod node { + //! An iroh node that just has the blobs transport + use std::{path::Path, sync::Arc}; + + use iroh_net::{NodeAddr, NodeId}; + use quic_rpc::client::BoxedServiceConnection; + use tokio_util::task::AbortOnDropHandle; + + use crate::{ + provider::{CustomEventSender, EventSender}, + rpc::client::{blobs, tags}, + util::local_pool::LocalPool, + }; + + type RpcClient = quic_rpc::RpcClient< + crate::rpc::proto::RpcService, + BoxedServiceConnection, + crate::rpc::proto::RpcService, + >; + + /// An iroh node that just has the blobs transport + #[derive(Debug)] + pub struct Node { + router: iroh_router::Router, + client: RpcClient, + _local_pool: LocalPool, + _rpc_task: AbortOnDropHandle<()>, + } + + /// An iroh node builder + #[derive(Debug)] + pub struct Builder { + store: S, + events: EventSender, + } + + impl Builder { + /// Sets the event sender + pub fn blobs_events(self, events: impl CustomEventSender) -> Self { + Builder { + store: self.store, + events: events.into(), + } + } + + /// Spawns the node + pub async fn spawn(self) -> anyhow::Result { + let (client, router, rpc_task, _local_pool) = + setup_router(self.store, self.events).await?; + Ok(Node { + router, + client, + _rpc_task: AbortOnDropHandle::new(rpc_task), + _local_pool, + }) + } + } + + impl Node { + /// Creates a new node with memory storage + pub fn memory() -> Builder { + Builder { + store: crate::store::mem::Store::new(), + events: Default::default(), + } + } + + /// Creates a new node with persistent storage + pub async fn persistent( + path: impl AsRef, + ) -> anyhow::Result> { + Ok(Builder { + store: crate::store::fs::Store::load(path).await?, + events: Default::default(), + }) + } + + /// Returns the node id + pub fn node_id(&self) -> NodeId { + self.router.endpoint().node_id() + } + + /// Returns the node address + pub async fn node_addr(&self) -> anyhow::Result { + self.router.endpoint().node_addr().await + } + + /// Shuts down the node + pub async fn shutdown(self) -> anyhow::Result<()> { + self.router.shutdown().await + } + + /// Returns an in-memory blobs client + pub fn blobs(&self) -> blobs::Client { + blobs::Client::new(self.client.clone()) + } + + /// Returns an in-memory tags client + pub fn tags(&self) -> tags::Client { + tags::Client::new(self.client.clone()) + } + } + + async fn setup_router( + store: S, + events: EventSender, + ) -> anyhow::Result<( + RpcClient, + iroh_router::Router, + tokio::task::JoinHandle<()>, + LocalPool, + )> { + let endpoint = iroh_net::Endpoint::builder().discovery_n0().bind().await?; + let local_pool = LocalPool::single(); + let mut router = iroh_router::Router::builder(endpoint.clone()); + + // Setup blobs + let downloader = crate::downloader::Downloader::new( + store.clone(), + endpoint.clone(), + local_pool.handle().clone(), + ); + let blobs = Arc::new(crate::net_protocol::Blobs::new_with_events( + store.clone(), + local_pool.handle().clone(), + events, + downloader, + endpoint.clone(), + )); + router = router.accept(crate::protocol::ALPN.to_vec(), blobs.clone()); + + // Build the router + let router = router.spawn().await?; + + // Setup RPC + let (internal_rpc, controller) = + quic_rpc::transport::flume::service_connection::(32); + let controller = quic_rpc::transport::boxed::Connection::new(controller); + let internal_rpc = quic_rpc::transport::boxed::ServerEndpoint::new(internal_rpc); + let internal_rpc = quic_rpc::RpcServer::new(internal_rpc); + + let rpc_server_task: tokio::task::JoinHandle<()> = tokio::task::spawn(async move { + loop { + let request = internal_rpc.accept().await; + match request { + Ok(accepting) => { + let blobs = blobs.clone(); + tokio::task::spawn(async move { + let (msg, chan) = accepting.read_first().await.unwrap(); + blobs.handle_rpc_request(msg, chan).await.unwrap(); + }); + } + Err(err) => { + tracing::warn!("rpc error: {:?}", err); + } + } + } + }); + + let client = quic_rpc::RpcClient::new(controller); + + Ok((client, router, rpc_server_task, local_pool)) + } + } + #[tokio::test] async fn test_blob_create_collection() -> Result<()> { let _guard = iroh_test::logging::setup(); - let node = crate::node::Node::memory().spawn().await?; + let node = node::Node::memory().spawn().await?; // create temp file let temp_dir = tempfile::tempdir().context("tempdir")?; @@ -1082,7 +1247,7 @@ mod tests { async fn test_blob_read_at() -> Result<()> { // let _guard = iroh_test::logging::setup(); - let node = crate::node::Node::memory().spawn().await?; + let node = node::Node::memory().spawn().await?; // create temp file let temp_dir = tempfile::tempdir().context("tempdir")?; @@ -1221,7 +1386,7 @@ mod tests { async fn test_blob_get_collection() -> Result<()> { let _guard = iroh_test::logging::setup(); - let node = crate::node::Node::memory().spawn().await?; + let node = node::Node::memory().spawn().await?; // create temp file let temp_dir = tempfile::tempdir().context("tempdir")?; @@ -1287,7 +1452,7 @@ mod tests { async fn test_blob_share() -> Result<()> { let _guard = iroh_test::logging::setup(); - let node = crate::node::Node::memory().spawn().await?; + let node = node::Node::memory().spawn().await?; // create temp file let temp_dir = tempfile::tempdir().context("tempdir")?; @@ -1362,13 +1527,13 @@ mod tests { let _guard = iroh_test::logging::setup(); let (node1_events, mut node1_events_r) = BlobEvents::new(16); - let node1 = crate::node::Node::memory() + let node1 = node::Node::memory() .blobs_events(node1_events) .spawn() .await?; let (node2_events, mut node2_events_r) = BlobEvents::new(16); - let node2 = crate::node::Node::memory() + let node2 = node::Node::memory() .blobs_events(node2_events) .spawn() .await?; @@ -1426,7 +1591,7 @@ mod tests { async fn test_blob_get_self_existing() -> TestResult<()> { let _guard = iroh_test::logging::setup(); - let node = crate::node::Node::memory().spawn().await?; + let node = node::Node::memory().spawn().await?; let node_id = node.node_id(); let blobs = node.blobs(); @@ -1474,7 +1639,7 @@ mod tests { async fn test_blob_get_self_missing() -> TestResult<()> { let _guard = iroh_test::logging::setup(); - let node = crate::node::Node::memory().spawn().await?; + let node = node::Node::memory().spawn().await?; let node_id = node.node_id(); let blobs = node.blobs(); @@ -1526,7 +1691,7 @@ mod tests { async fn test_blob_get_existing_collection() -> TestResult<()> { let _guard = iroh_test::logging::setup(); - let node = crate::node::Node::memory().spawn().await?; + let node = node::Node::memory().spawn().await?; // We use a nonexisting node id because we just want to check that this succeeds without // hitting the network. let node_id = NodeId::from_bytes(&[0u8; 32])?; @@ -1599,7 +1764,7 @@ mod tests { async fn test_blob_delete_mem() -> Result<()> { let _guard = iroh_test::logging::setup(); - let node = crate::node::Node::memory().spawn().await?; + let node = node::Node::memory().spawn().await?; let res = node.blobs().add_bytes(&b"hello world"[..]).await?; @@ -1621,10 +1786,7 @@ mod tests { let _guard = iroh_test::logging::setup(); let dir = tempfile::tempdir()?; - let node = crate::node::Node::persistent(dir.path()) - .await? - .spawn() - .await?; + let node = node::Node::persistent(dir.path()).await?.spawn().await?; let res = node.blobs().add_bytes(&b"hello world"[..]).await?; From 5845f6411a46d1581d229fd6868ffab62bc7157e Mon Sep 17 00:00:00 2001 From: Ruediger Klaehn Date: Mon, 4 Nov 2024 17:50:16 +0200 Subject: [PATCH 10/19] try out the new simplified service mapping --- Cargo.lock | 52 +++++++++++++++++++++++++++++++++-- Cargo.toml | 2 +- src/rpc.rs | 21 ++++++-------- src/rpc/client/blobs.rs | 38 ++++++++++--------------- src/rpc/client/blobs/batch.rs | 29 ++++++++----------- src/rpc/client/tags.rs | 14 ++++------ 6 files changed, 92 insertions(+), 64 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 88bb1bc45..f42e19b14 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -245,6 +245,15 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "597bb81c80a54b6a4381b23faba8d7774b144c94cbd1d6fe3f1329bd776554ab" +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + [[package]] name = "bit-set" version = "0.5.3" @@ -1720,7 +1729,7 @@ dependencies = [ "portable-atomic", "postcard", "proptest", - "quic-rpc", + "quic-rpc 0.13.0 (git+https://github.com/n0-computer/quic-rpc.git?branch=map-transports-not-services)", "quic-rpc-derive", "rand", "range-collections", @@ -2950,6 +2959,30 @@ dependencies = [ "tracing", ] +[[package]] +name = "quic-rpc" +version = "0.13.0" +source = "git+https://github.com/n0-computer/quic-rpc.git?branch=map-transports-not-services#c30ba6a08c3ec0e1a05ff6f15a1681cd75ddcf0f" +dependencies = [ + "anyhow", + "bincode", + "derive_more", + "educe", + "flume", + "futures-lite 2.3.0", + "futures-sink", + "futures-util", + "hex", + "iroh-quinn", + "pin-project", + "serde", + "slab", + "tokio", + "tokio-serde", + "tokio-util", + "tracing", +] + [[package]] name = "quic-rpc-derive" version = "0.13.0" @@ -2957,7 +2990,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94b91a3f7a42657cbfbd0c2499c1f037738eff45bb7f59c6ce3d3d9e890d141c" dependencies = [ "proc-macro2", - "quic-rpc", + "quic-rpc 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)", "quote", "syn 1.0.109", ] @@ -4184,6 +4217,21 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-serde" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "911a61637386b789af998ee23f50aa30d5fd7edcec8d6d3dedae5e5815205466" +dependencies = [ + "bincode", + "bytes", + "educe", + "futures-core", + "futures-sink", + "pin-project", + "serde", +] + [[package]] name = "tokio-stream" version = "0.1.15" diff --git a/Cargo.toml b/Cargo.toml index ea65a2da0..3f3871681 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,7 +37,7 @@ parking_lot = { version = "0.12.1", optional = true } pin-project = "1.1.5" portable-atomic = { version = "1", optional = true } postcard = { version = "1", default-features = false, features = ["alloc", "use-std", "experimental-derive"] } -quic-rpc = { version = "0.13.0", optional = true } +quic-rpc = { git = "https://github.com/n0-computer/quic-rpc.git", branch = "map-transports-not-services", optional = true } quic-rpc-derive = { version = "0.13.0", optional = true } quinn = { package = "iroh-quinn", version = "0.11", features = ["ring"] } rand = "0.8" diff --git a/src/rpc.rs b/src/rpc.rs index b764f948d..382e9a934 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -57,14 +57,13 @@ const RPC_BLOB_GET_CHANNEL_CAP: usize = 2; impl Blobs { /// Handle an RPC request - pub async fn handle_rpc_request( + pub async fn handle_rpc_request( self: Arc, msg: crate::rpc::proto::Request, - chan: RpcChannel, + chan: RpcChannel, ) -> std::result::Result<(), RpcServerError> where - S: quic_rpc::Service, - C: quic_rpc::ServiceEndpoint, + C: quic_rpc::ServiceEndpoint, { use crate::rpc::proto::Request::*; match msg { @@ -74,14 +73,13 @@ impl Blobs { } /// Handle a tags request - pub async fn handle_tags_request( + pub async fn handle_tags_request( self: Arc, msg: proto::tags::Request, - chan: RpcChannel, + chan: RpcChannel, ) -> std::result::Result<(), RpcServerError> where - S: quic_rpc::Service, - C: quic_rpc::ServiceEndpoint, + C: quic_rpc::ServiceEndpoint, { use proto::tags::Request::*; match msg { @@ -93,14 +91,13 @@ impl Blobs { } /// Handle a blobs request - pub async fn handle_blobs_request( + pub async fn handle_blobs_request( self: Arc, msg: proto::blobs::Request, - chan: RpcChannel, + chan: RpcChannel, ) -> std::result::Result<(), RpcServerError> where - Sv: quic_rpc::Service, - C: quic_rpc::ServiceEndpoint, + C: quic_rpc::ServiceEndpoint, { use proto::blobs::Request::*; match msg { diff --git a/src/rpc/client/blobs.rs b/src/rpc/client/blobs.rs index 21cd82d2e..9ec9d73a1 100644 --- a/src/rpc/client/blobs.rs +++ b/src/rpc/client/blobs.rs @@ -105,20 +105,16 @@ use crate::rpc::proto::blobs::{ /// Iroh blobs client. #[derive(Debug, Clone)] -pub struct Client< - C = BoxedServiceConnection, - S = crate::rpc::proto::RpcService, -> { - pub(super) rpc: RpcClient, +pub struct Client> { + pub(super) rpc: RpcClient, } -impl Client +impl Client where - S: quic_rpc::Service, - C: quic_rpc::ServiceConnection, + C: quic_rpc::ServiceConnection, { /// Create a new client - pub fn new(rpc: RpcClient) -> Self { + pub fn new(rpc: RpcClient) -> Self { Self { rpc } } @@ -147,7 +143,7 @@ where /// A batch is a context in which temp tags are created and data is added to the node. Temp tags /// are automatically deleted when the batch is dropped, leading to the data being garbage collected /// unless a permanent tag is created for it. - pub async fn batch(&self) -> Result> { + pub async fn batch(&self) -> Result> { let (updates, mut stream) = self.rpc.bidi(BatchCreateRequest).await?; let BatchCreateResponse::Id(batch) = stream.next().await.context("expected scope id")??; let rpc = self.rpc.clone(); @@ -457,15 +453,14 @@ where Ok(()) } - fn tags_client(&self) -> tags::Client { + fn tags_client(&self) -> tags::Client { tags::Client::new(self.rpc.clone()) } } -impl SimpleStore for Client +impl SimpleStore for Client where - S: quic_rpc::Service, - C: quic_rpc::ServiceConnection, + C: quic_rpc::ServiceConnection, { async fn load(&self, hash: Hash) -> anyhow::Result { self.read_to_bytes(hash).await @@ -882,26 +877,24 @@ impl Reader { } /// todo make private again - pub async fn from_rpc_read( - rpc: &RpcClient, + pub async fn from_rpc_read( + rpc: &RpcClient, hash: Hash, ) -> anyhow::Result where - C: quic_rpc::ServiceConnection, - S: quic_rpc::Service, + C: quic_rpc::ServiceConnection, { Self::from_rpc_read_at(rpc, hash, 0, ReadAtLen::All).await } - async fn from_rpc_read_at( - rpc: &RpcClient, + async fn from_rpc_read_at( + rpc: &RpcClient, hash: Hash, offset: u64, len: ReadAtLen, ) -> anyhow::Result where - C: quic_rpc::ServiceConnection, - S: quic_rpc::Service, + C: quic_rpc::ServiceConnection, { let stream = rpc .server_streaming(ReadAtRequest { hash, offset, len }) @@ -1011,7 +1004,6 @@ mod tests { type RpcClient = quic_rpc::RpcClient< crate::rpc::proto::RpcService, BoxedServiceConnection, - crate::rpc::proto::RpcService, >; /// An iroh node that just has the blobs transport diff --git a/src/rpc/client/blobs/batch.rs b/src/rpc/client/blobs/batch.rs index 6b08e9268..80e6c84ab 100644 --- a/src/rpc/client/blobs/batch.rs +++ b/src/rpc/client/blobs/batch.rs @@ -33,19 +33,17 @@ use crate::{ /// A scope in which blobs can be added. #[derive(derive_more::Debug)] -struct BatchInner +struct BatchInner where - C: quic_rpc::ServiceConnection, - S: quic_rpc::Service, + C: quic_rpc::ServiceConnection, { /// The id of the scope. batch: BatchId, /// The rpc client. - rpc: RpcClient, + rpc: RpcClient, /// The stream to send drop #[debug(skip)] - updates: - Mutex, BatchUpdate>>, + updates: Mutex, BatchUpdate>>, } /// A batch for write operations. @@ -55,15 +53,13 @@ where /// It is not a transaction, so things in a batch are not atomic. Also, there is /// no isolation between batches. #[derive(derive_more::Debug)] -pub struct Batch(Arc>) +pub struct Batch(Arc>) where - C: quic_rpc::ServiceConnection, - S: quic_rpc::Service; + C: quic_rpc::ServiceConnection; -impl TagDrop for BatchInner +impl TagDrop for BatchInner where - C: quic_rpc::ServiceConnection, - S: quic_rpc::Service, + C: quic_rpc::ServiceConnection, { fn on_drop(&self, content: &HashAndFormat) { let mut updates = self.updates.lock().unwrap(); @@ -131,15 +127,14 @@ impl Default for AddReaderOpts { } } -impl Batch +impl Batch where - C: quic_rpc::ServiceConnection, - S: quic_rpc::Service, + C: quic_rpc::ServiceConnection, { pub(super) fn new( batch: BatchId, - rpc: RpcClient, - updates: UpdateSink, + rpc: RpcClient, + updates: UpdateSink, buffer_size: usize, ) -> Self { let updates = updates.buffer(buffer_size); diff --git a/src/rpc/client/tags.rs b/src/rpc/client/tags.rs index 51825fd1c..ad39dae6a 100644 --- a/src/rpc/client/tags.rs +++ b/src/rpc/client/tags.rs @@ -23,20 +23,16 @@ use crate::{ /// Iroh tags client. #[derive(Debug, Clone)] #[repr(transparent)] -pub struct Client< - C = BoxedServiceConnection, - S = crate::rpc::proto::RpcService, -> { - pub(super) rpc: RpcClient, +pub struct Client> { + pub(super) rpc: RpcClient, } -impl Client +impl Client where - C: quic_rpc::ServiceConnection, - S: quic_rpc::Service, + C: quic_rpc::ServiceConnection, { /// Creates a new client - pub fn new(rpc: RpcClient) -> Self { + pub fn new(rpc: RpcClient) -> Self { Self { rpc } } From 932444cfac102ad9ea8cf7aae92c3e5a83dd409e Mon Sep 17 00:00:00 2001 From: Ruediger Klaehn Date: Mon, 4 Nov 2024 18:02:24 +0200 Subject: [PATCH 11/19] Simply bounds --- src/rpc.rs | 6 +++--- src/rpc/client/blobs.rs | 30 +++++++++++++++--------------- src/rpc/client/blobs/batch.rs | 15 ++++++++------- src/rpc/client/tags.rs | 15 +++++++++------ 4 files changed, 35 insertions(+), 31 deletions(-) diff --git a/src/rpc.rs b/src/rpc.rs index 382e9a934..bded6fdf0 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -30,7 +30,7 @@ use proto::{ CreateRequest as TagsCreateRequest, DeleteRequest as TagDeleteRequest, ListRequest as TagListRequest, SetRequest as TagsSetRequest, SyncMode, }, - RpcError, RpcResult, + RpcError, RpcResult, RpcService, }; use quic_rpc::server::{RpcChannel, RpcServerError}; @@ -60,10 +60,10 @@ impl Blobs { pub async fn handle_rpc_request( self: Arc, msg: crate::rpc::proto::Request, - chan: RpcChannel, + chan: RpcChannel, ) -> std::result::Result<(), RpcServerError> where - C: quic_rpc::ServiceEndpoint, + C: quic_rpc::ServiceEndpoint, { use crate::rpc::proto::Request::*; match msg { diff --git a/src/rpc/client/blobs.rs b/src/rpc/client/blobs.rs index 9ec9d73a1..e9f8314dd 100644 --- a/src/rpc/client/blobs.rs +++ b/src/rpc/client/blobs.rs @@ -74,7 +74,7 @@ use iroh_net::NodeAddr; use portable_atomic::{AtomicU64, Ordering}; use quic_rpc::{ client::{BoxStreamSync, BoxedServiceConnection}, - RpcClient, + RpcClient, ServiceConnection, }; use serde::{Deserialize, Serialize}; use tokio::io::{AsyncRead, AsyncReadExt, ReadBuf}; @@ -87,6 +87,7 @@ use crate::{ format::collection::{Collection, SimpleStore}, get::db::DownloadProgress as BytesDownloadProgress, net_protocol::BlobDownloadRequest, + rpc::proto::RpcService, store::{BaoBlobSize, ConsistencyCheckProgress, ExportFormat, ExportMode, ValidateProgress}, util::SetTagOption, BlobFormat, Hash, Tag, @@ -105,16 +106,16 @@ use crate::rpc::proto::blobs::{ /// Iroh blobs client. #[derive(Debug, Clone)] -pub struct Client> { - pub(super) rpc: RpcClient, +pub struct Client> { + pub(super) rpc: RpcClient, } impl Client where - C: quic_rpc::ServiceConnection, + C: ServiceConnection, { /// Create a new client - pub fn new(rpc: RpcClient) -> Self { + pub fn new(rpc: RpcClient) -> Self { Self { rpc } } @@ -460,7 +461,7 @@ where impl SimpleStore for Client where - C: quic_rpc::ServiceConnection, + C: ServiceConnection, { async fn load(&self, hash: Hash) -> anyhow::Result { self.read_to_bytes(hash).await @@ -878,23 +879,23 @@ impl Reader { /// todo make private again pub async fn from_rpc_read( - rpc: &RpcClient, + rpc: &RpcClient, hash: Hash, ) -> anyhow::Result where - C: quic_rpc::ServiceConnection, + C: ServiceConnection, { Self::from_rpc_read_at(rpc, hash, 0, ReadAtLen::All).await } async fn from_rpc_read_at( - rpc: &RpcClient, + rpc: &RpcClient, hash: Hash, offset: u64, len: ReadAtLen, ) -> anyhow::Result where - C: quic_rpc::ServiceConnection, + C: ServiceConnection, { let stream = rpc .server_streaming(ReadAtRequest { hash, offset, len }) @@ -1001,10 +1002,9 @@ mod tests { util::local_pool::LocalPool, }; - type RpcClient = quic_rpc::RpcClient< - crate::rpc::proto::RpcService, - BoxedServiceConnection, - >; + use super::RpcService; + + type RpcClient = quic_rpc::RpcClient; /// An iroh node that just has the blobs transport #[derive(Debug)] @@ -1122,7 +1122,7 @@ mod tests { // Setup RPC let (internal_rpc, controller) = - quic_rpc::transport::flume::service_connection::(32); + quic_rpc::transport::flume::service_connection::(32); let controller = quic_rpc::transport::boxed::Connection::new(controller); let internal_rpc = quic_rpc::transport::boxed::ServerEndpoint::new(internal_rpc); let internal_rpc = quic_rpc::RpcServer::new(internal_rpc); diff --git a/src/rpc/client/blobs/batch.rs b/src/rpc/client/blobs/batch.rs index 80e6c84ab..ba1631e33 100644 --- a/src/rpc/client/blobs/batch.rs +++ b/src/rpc/client/blobs/batch.rs @@ -9,7 +9,7 @@ use bytes::Bytes; use futures_buffered::BufferedStreamExt; use futures_lite::StreamExt; use futures_util::{sink::Buffer, FutureExt, SinkExt, Stream}; -use quic_rpc::{client::UpdateSink, RpcClient}; +use quic_rpc::{client::UpdateSink, RpcClient, ServiceConnection}; use tokio::io::AsyncRead; use tokio_util::io::ReaderStream; use tracing::{debug, warn}; @@ -25,6 +25,7 @@ use crate::{ BatchAddStreamUpdate, BatchCreateTempTagRequest, BatchUpdate, }, tags::{self, SyncMode}, + RpcService, }, store::ImportMode, util::{SetTagOption, TagDrop}, @@ -35,12 +36,12 @@ use crate::{ #[derive(derive_more::Debug)] struct BatchInner where - C: quic_rpc::ServiceConnection, + C: ServiceConnection, { /// The id of the scope. batch: BatchId, /// The rpc client. - rpc: RpcClient, + rpc: RpcClient, /// The stream to send drop #[debug(skip)] updates: Mutex, BatchUpdate>>, @@ -55,11 +56,11 @@ where #[derive(derive_more::Debug)] pub struct Batch(Arc>) where - C: quic_rpc::ServiceConnection; + C: ServiceConnection; impl TagDrop for BatchInner where - C: quic_rpc::ServiceConnection, + C: ServiceConnection, { fn on_drop(&self, content: &HashAndFormat) { let mut updates = self.updates.lock().unwrap(); @@ -129,11 +130,11 @@ impl Default for AddReaderOpts { impl Batch where - C: quic_rpc::ServiceConnection, + C: ServiceConnection, { pub(super) fn new( batch: BatchId, - rpc: RpcClient, + rpc: RpcClient, updates: UpdateSink, buffer_size: usize, ) -> Self { diff --git a/src/rpc/client/tags.rs b/src/rpc/client/tags.rs index ad39dae6a..d6b843522 100644 --- a/src/rpc/client/tags.rs +++ b/src/rpc/client/tags.rs @@ -12,27 +12,30 @@ //! [`Client::delete`] can be used to delete a tag. use anyhow::Result; use futures_lite::{Stream, StreamExt}; -use quic_rpc::{client::BoxedServiceConnection, RpcClient}; +use quic_rpc::{client::BoxedServiceConnection, RpcClient, ServiceConnection}; use serde::{Deserialize, Serialize}; use crate::{ - rpc::proto::tags::{DeleteRequest, ListRequest}, + rpc::proto::{ + tags::{DeleteRequest, ListRequest}, + RpcService, + }, BlobFormat, Hash, Tag, }; /// Iroh tags client. #[derive(Debug, Clone)] #[repr(transparent)] -pub struct Client> { - pub(super) rpc: RpcClient, +pub struct Client> { + pub(super) rpc: RpcClient, } impl Client where - C: quic_rpc::ServiceConnection, + C: ServiceConnection, { /// Creates a new client - pub fn new(rpc: RpcClient) -> Self { + pub fn new(rpc: RpcClient) -> Self { Self { rpc } } From a6043e49a03e9a1ab65df6e2fd9d45c0a4d25133 Mon Sep 17 00:00:00 2001 From: Ruediger Klaehn Date: Mon, 4 Nov 2024 18:07:06 +0200 Subject: [PATCH 12/19] unused import --- src/rpc/client/blobs.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/rpc/client/blobs.rs b/src/rpc/client/blobs.rs index e9f8314dd..a583e7ea9 100644 --- a/src/rpc/client/blobs.rs +++ b/src/rpc/client/blobs.rs @@ -993,7 +993,6 @@ mod tests { use std::{path::Path, sync::Arc}; use iroh_net::{NodeAddr, NodeId}; - use quic_rpc::client::BoxedServiceConnection; use tokio_util::task::AbortOnDropHandle; use crate::{ From de6ced0e125604822f989500245a668f1e309622 Mon Sep 17 00:00:00 2001 From: Ruediger Klaehn Date: Mon, 4 Nov 2024 18:16:27 +0200 Subject: [PATCH 13/19] wip --- src/rpc.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/rpc.rs b/src/rpc.rs index bded6fdf0..c1d29265c 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -25,12 +25,10 @@ use proto::{ CreateCollectionRequest, CreateCollectionResponse, DeleteRequest, DownloadResponse, ExportRequest, ExportResponse, ListIncompleteRequest, ListRequest, ReadAtRequest, ReadAtResponse, ValidateRequest, - }, - tags::{ + }, tags::{ CreateRequest as TagsCreateRequest, DeleteRequest as TagDeleteRequest, ListRequest as TagListRequest, SetRequest as TagsSetRequest, SyncMode, - }, - RpcError, RpcResult, RpcService, + }, Request, RpcError, RpcResult, RpcService }; use quic_rpc::server::{RpcChannel, RpcServerError}; @@ -59,13 +57,13 @@ impl Blobs { /// Handle an RPC request pub async fn handle_rpc_request( self: Arc, - msg: crate::rpc::proto::Request, + msg: Request, chan: RpcChannel, ) -> std::result::Result<(), RpcServerError> where C: quic_rpc::ServiceEndpoint, { - use crate::rpc::proto::Request::*; + use Request::*; match msg { Blobs(msg) => self.handle_blobs_request(msg, chan).await, Tags(msg) => self.handle_tags_request(msg, chan).await, From ec5ca15eeaa32d256958caed3e1691443aa215ad Mon Sep 17 00:00:00 2001 From: Ruediger Klaehn Date: Mon, 4 Nov 2024 18:20:42 +0200 Subject: [PATCH 14/19] fmt --- src/rpc.rs | 6 ++++-- src/rpc/client/blobs.rs | 3 +-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/src/rpc.rs b/src/rpc.rs index c1d29265c..41eeaba83 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -25,10 +25,12 @@ use proto::{ CreateCollectionRequest, CreateCollectionResponse, DeleteRequest, DownloadResponse, ExportRequest, ExportResponse, ListIncompleteRequest, ListRequest, ReadAtRequest, ReadAtResponse, ValidateRequest, - }, tags::{ + }, + tags::{ CreateRequest as TagsCreateRequest, DeleteRequest as TagDeleteRequest, ListRequest as TagListRequest, SetRequest as TagsSetRequest, SyncMode, - }, Request, RpcError, RpcResult, RpcService + }, + Request, RpcError, RpcResult, RpcService, }; use quic_rpc::server::{RpcChannel, RpcServerError}; diff --git a/src/rpc/client/blobs.rs b/src/rpc/client/blobs.rs index a583e7ea9..76e44bad4 100644 --- a/src/rpc/client/blobs.rs +++ b/src/rpc/client/blobs.rs @@ -995,14 +995,13 @@ mod tests { use iroh_net::{NodeAddr, NodeId}; use tokio_util::task::AbortOnDropHandle; + use super::RpcService; use crate::{ provider::{CustomEventSender, EventSender}, rpc::client::{blobs, tags}, util::local_pool::LocalPool, }; - use super::RpcService; - type RpcClient = quic_rpc::RpcClient; /// An iroh node that just has the blobs transport From fc2ce39f1fe1d65fcbc8fb80311777214f9884ca Mon Sep 17 00:00:00 2001 From: Ruediger Klaehn Date: Tue, 5 Nov 2024 12:40:49 +0200 Subject: [PATCH 15/19] Relax constraints so we can use the various rpc fns with mapped channels. --- src/rpc.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/rpc.rs b/src/rpc.rs index 41eeaba83..dbeecdce2 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -63,7 +63,7 @@ impl Blobs { chan: RpcChannel, ) -> std::result::Result<(), RpcServerError> where - C: quic_rpc::ServiceEndpoint, + C: quic_rpc::ServiceChannel, { use Request::*; match msg { @@ -79,7 +79,7 @@ impl Blobs { chan: RpcChannel, ) -> std::result::Result<(), RpcServerError> where - C: quic_rpc::ServiceEndpoint, + C: quic_rpc::ServiceChannel, { use proto::tags::Request::*; match msg { @@ -97,7 +97,7 @@ impl Blobs { chan: RpcChannel, ) -> std::result::Result<(), RpcServerError> where - C: quic_rpc::ServiceEndpoint, + C: quic_rpc::ServiceChannel, { use proto::blobs::Request::*; match msg { From 62d09376e384b17c00d3b59aef08bd52209cd6c0 Mon Sep 17 00:00:00 2001 From: Ruediger Klaehn Date: Tue, 5 Nov 2024 15:28:39 +0200 Subject: [PATCH 16/19] apply naming bikeshedding --- Cargo.lock | 83 +++++++++++++++++++++++++++-------- Cargo.toml | 2 +- src/rpc.rs | 6 +-- src/rpc/client/blobs.rs | 19 ++++---- src/rpc/client/blobs/batch.rs | 10 ++--- src/rpc/client/tags.rs | 6 +-- 6 files changed, 86 insertions(+), 40 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a4a2129f9..4714c3a3e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -350,9 +350,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.34" +version = "1.1.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67b9470d453346108f93a59222a9a1a5724db32d0a4727b7ab7ace4b4d822dc9" +checksum = "0f57c4b4da2a9d619dd035f27316d7a426305b75be93d09e92f2b9229c34feaf" dependencies = [ "shlex", ] @@ -1268,6 +1268,25 @@ dependencies = [ "subtle", ] +[[package]] +name = "h2" +version = "0.3.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http 0.2.12", + "indexmap", + "slab", + "tokio", + "tokio-util", + "tracing", +] + [[package]] name = "h2" version = "0.4.6" @@ -1486,6 +1505,30 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" +[[package]] +name = "hyper" +version = "0.14.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c08302e8fa335b151b788c775ff56e7a03ae64ff85c548ee820fecb70356e85" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "h2 0.3.26", + "http 0.2.12", + "http-body 0.4.6", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "socket2", + "tokio", + "tower-service", + "tracing", + "want", +] + [[package]] name = "hyper" version = "1.5.0" @@ -1495,7 +1538,7 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "h2", + "h2 0.4.6", "http 1.1.0", "http-body 1.0.1", "httparse", @@ -1515,7 +1558,7 @@ checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" dependencies = [ "futures-util", "http 1.1.0", - "hyper", + "hyper 1.5.0", "hyper-util", "rustls", "rustls-pki-types", @@ -1536,7 +1579,7 @@ dependencies = [ "futures-util", "http 1.1.0", "http-body 1.0.1", - "hyper", + "hyper 1.5.0", "pin-project-lite", "socket2", "tokio", @@ -1728,7 +1771,7 @@ dependencies = [ "futures", "http 1.1.0", "http-body-util", - "hyper", + "hyper 1.5.0", "hyper-util", "log", "rand", @@ -1795,7 +1838,7 @@ checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" [[package]] name = "iroh-base" version = "0.28.0" -source = "git+https://github.com/n0-computer/iroh?branch=main#b17b1f20b4c5e584e1fa4219ce5e375b37e9dbf1" +source = "git+https://github.com/n0-computer/iroh?branch=main#0a7a534128bf1234a326fcfba134d878e796c377" dependencies = [ "aead", "anyhow", @@ -1864,7 +1907,7 @@ dependencies = [ "portable-atomic", "postcard", "proptest", - "quic-rpc 0.14.0 (git+https://github.com/n0-computer/quic-rpc.git?branch=map-transports-not-services)", + "quic-rpc 0.14.0 (git+https://github.com/n0-computer/quic-rpc.git?branch=naming-bikeshedding)", "quic-rpc-derive", "rand", "range-collections", @@ -1909,12 +1952,12 @@ dependencies = [ [[package]] name = "iroh-metrics" version = "0.28.0" -source = "git+https://github.com/n0-computer/iroh?branch=main#b17b1f20b4c5e584e1fa4219ce5e375b37e9dbf1" +source = "git+https://github.com/n0-computer/iroh?branch=main#0a7a534128bf1234a326fcfba134d878e796c377" dependencies = [ "anyhow", "erased_set", "http-body-util", - "hyper", + "hyper 1.5.0", "hyper-util", "once_cell", "prometheus-client", @@ -1929,7 +1972,7 @@ dependencies = [ [[package]] name = "iroh-net" version = "0.28.1" -source = "git+https://github.com/n0-computer/iroh?branch=main#b17b1f20b4c5e584e1fa4219ce5e375b37e9dbf1" +source = "git+https://github.com/n0-computer/iroh?branch=main#0a7a534128bf1234a326fcfba134d878e796c377" dependencies = [ "anyhow", "backoff", @@ -1951,7 +1994,7 @@ dependencies = [ "hostname", "http 1.1.0", "http-body-util", - "hyper", + "hyper 1.5.0", "hyper-util", "igd-next", "iroh-base", @@ -2056,7 +2099,7 @@ dependencies = [ [[package]] name = "iroh-router" version = "0.28.0" -source = "git+https://github.com/n0-computer/iroh?branch=main#b17b1f20b4c5e584e1fa4219ce5e375b37e9dbf1" +source = "git+https://github.com/n0-computer/iroh?branch=main#0a7a534128bf1234a326fcfba134d878e796c377" dependencies = [ "anyhow", "futures-buffered", @@ -2396,7 +2439,7 @@ dependencies = [ [[package]] name = "netwatch" version = "0.1.0" -source = "git+https://github.com/n0-computer/iroh?branch=main#b17b1f20b4c5e584e1fa4219ce5e375b37e9dbf1" +source = "git+https://github.com/n0-computer/iroh?branch=main#0a7a534128bf1234a326fcfba134d878e796c377" dependencies = [ "anyhow", "bytes", @@ -2915,7 +2958,7 @@ checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" [[package]] name = "portmapper" version = "0.1.0" -source = "git+https://github.com/n0-computer/iroh?branch=main#b17b1f20b4c5e584e1fa4219ce5e375b37e9dbf1" +source = "git+https://github.com/n0-computer/iroh?branch=main#0a7a534128bf1234a326fcfba134d878e796c377" dependencies = [ "anyhow", "base64", @@ -3164,10 +3207,11 @@ dependencies = [ [[package]] name = "quic-rpc" version = "0.14.0" -source = "git+https://github.com/n0-computer/quic-rpc.git?branch=map-transports-not-services#03f97ab0bc5287f873970984d783320350ad3d8a" +source = "git+https://github.com/n0-computer/quic-rpc.git?branch=naming-bikeshedding#067ae4b04bad16036571225904cd6970decb040b" dependencies = [ "anyhow", "bincode", + "bytes", "derive_more", "educe", "flume", @@ -3175,6 +3219,7 @@ dependencies = [ "futures-sink", "futures-util", "hex", + "hyper 0.14.31", "iroh-quinn", "pin-project", "serde", @@ -3240,9 +3285,9 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e346e016eacfff12233c243718197ca12f148c84e1e84268a896699b41c71780" +checksum = "7d5a626c6807713b15cac82a6acaccd6043c9a5408c24baae07611fec3f243da" dependencies = [ "cfg_aliases", "libc", @@ -3464,7 +3509,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.1", "http-body-util", - "hyper", + "hyper 1.5.0", "hyper-rustls", "hyper-util", "ipnet", diff --git a/Cargo.toml b/Cargo.toml index ca83f35ac..809920263 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -38,7 +38,7 @@ pin-project = "1.1.5" portable-atomic = { version = "1", optional = true } postcard = { version = "1", default-features = false, features = ["alloc", "use-std", "experimental-derive"] } quinn = { package = "iroh-quinn", version = "0.12", features = ["ring"] } -quic-rpc = { git = "https://github.com/n0-computer/quic-rpc.git", branch = "map-transports-not-services", optional = true } +quic-rpc = { git = "https://github.com/n0-computer/quic-rpc.git", branch = "naming-bikeshedding", optional = true } quic-rpc-derive = { version = "0.14", optional = true } rand = "0.8" range-collections = "0.4.0" diff --git a/src/rpc.rs b/src/rpc.rs index dbeecdce2..cc79f6a80 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -63,7 +63,7 @@ impl Blobs { chan: RpcChannel, ) -> std::result::Result<(), RpcServerError> where - C: quic_rpc::ServiceChannel, + C: quic_rpc::ListenerTypes, { use Request::*; match msg { @@ -79,7 +79,7 @@ impl Blobs { chan: RpcChannel, ) -> std::result::Result<(), RpcServerError> where - C: quic_rpc::ServiceChannel, + C: quic_rpc::ListenerTypes, { use proto::tags::Request::*; match msg { @@ -97,7 +97,7 @@ impl Blobs { chan: RpcChannel, ) -> std::result::Result<(), RpcServerError> where - C: quic_rpc::ServiceChannel, + C: quic_rpc::ListenerTypes, { use proto::blobs::Request::*; match msg { diff --git a/src/rpc/client/blobs.rs b/src/rpc/client/blobs.rs index 76e44bad4..fcfd44c6d 100644 --- a/src/rpc/client/blobs.rs +++ b/src/rpc/client/blobs.rs @@ -73,8 +73,8 @@ use genawaiter::sync::{Co, Gen}; use iroh_net::NodeAddr; use portable_atomic::{AtomicU64, Ordering}; use quic_rpc::{ - client::{BoxStreamSync, BoxedServiceConnection}, - RpcClient, ServiceConnection, + client::{BoxStreamSync, BoxedConnector}, + Connector, RpcClient, }; use serde::{Deserialize, Serialize}; use tokio::io::{AsyncRead, AsyncReadExt, ReadBuf}; @@ -106,13 +106,13 @@ use crate::rpc::proto::blobs::{ /// Iroh blobs client. #[derive(Debug, Clone)] -pub struct Client> { +pub struct Client> { pub(super) rpc: RpcClient, } impl Client where - C: ServiceConnection, + C: Connector, { /// Create a new client pub fn new(rpc: RpcClient) -> Self { @@ -461,7 +461,7 @@ where impl SimpleStore for Client where - C: ServiceConnection, + C: Connector, { async fn load(&self, hash: Hash) -> anyhow::Result { self.read_to_bytes(hash).await @@ -883,7 +883,7 @@ impl Reader { hash: Hash, ) -> anyhow::Result where - C: ServiceConnection, + C: Connector, { Self::from_rpc_read_at(rpc, hash, 0, ReadAtLen::All).await } @@ -895,7 +895,7 @@ impl Reader { len: ReadAtLen, ) -> anyhow::Result where - C: ServiceConnection, + C: Connector, { let stream = rpc .server_streaming(ReadAtRequest { hash, offset, len }) @@ -993,6 +993,7 @@ mod tests { use std::{path::Path, sync::Arc}; use iroh_net::{NodeAddr, NodeId}; + use quic_rpc::transport::{Connector, Listener}; use tokio_util::task::AbortOnDropHandle; use super::RpcService; @@ -1121,8 +1122,8 @@ mod tests { // Setup RPC let (internal_rpc, controller) = quic_rpc::transport::flume::service_connection::(32); - let controller = quic_rpc::transport::boxed::Connection::new(controller); - let internal_rpc = quic_rpc::transport::boxed::ServerEndpoint::new(internal_rpc); + let controller = controller.boxed(); + let internal_rpc = internal_rpc.boxed(); let internal_rpc = quic_rpc::RpcServer::new(internal_rpc); let rpc_server_task: tokio::task::JoinHandle<()> = tokio::task::spawn(async move { diff --git a/src/rpc/client/blobs/batch.rs b/src/rpc/client/blobs/batch.rs index ba1631e33..b82f17837 100644 --- a/src/rpc/client/blobs/batch.rs +++ b/src/rpc/client/blobs/batch.rs @@ -9,7 +9,7 @@ use bytes::Bytes; use futures_buffered::BufferedStreamExt; use futures_lite::StreamExt; use futures_util::{sink::Buffer, FutureExt, SinkExt, Stream}; -use quic_rpc::{client::UpdateSink, RpcClient, ServiceConnection}; +use quic_rpc::{client::UpdateSink, Connector, RpcClient}; use tokio::io::AsyncRead; use tokio_util::io::ReaderStream; use tracing::{debug, warn}; @@ -36,7 +36,7 @@ use crate::{ #[derive(derive_more::Debug)] struct BatchInner where - C: ServiceConnection, + C: Connector, { /// The id of the scope. batch: BatchId, @@ -56,11 +56,11 @@ where #[derive(derive_more::Debug)] pub struct Batch(Arc>) where - C: ServiceConnection; + C: Connector; impl TagDrop for BatchInner where - C: ServiceConnection, + C: Connector, { fn on_drop(&self, content: &HashAndFormat) { let mut updates = self.updates.lock().unwrap(); @@ -130,7 +130,7 @@ impl Default for AddReaderOpts { impl Batch where - C: ServiceConnection, + C: Connector, { pub(super) fn new( batch: BatchId, diff --git a/src/rpc/client/tags.rs b/src/rpc/client/tags.rs index d6b843522..2b7cbc04d 100644 --- a/src/rpc/client/tags.rs +++ b/src/rpc/client/tags.rs @@ -12,7 +12,7 @@ //! [`Client::delete`] can be used to delete a tag. use anyhow::Result; use futures_lite::{Stream, StreamExt}; -use quic_rpc::{client::BoxedServiceConnection, RpcClient, ServiceConnection}; +use quic_rpc::{client::BoxedConnector, Connector, RpcClient}; use serde::{Deserialize, Serialize}; use crate::{ @@ -26,13 +26,13 @@ use crate::{ /// Iroh tags client. #[derive(Debug, Clone)] #[repr(transparent)] -pub struct Client> { +pub struct Client> { pub(super) rpc: RpcClient, } impl Client where - C: ServiceConnection, + C: Connector, { /// Creates a new client pub fn new(rpc: RpcClient) -> Self { From d60fc215bb3602a0346afbffa8be2b0374b49110 Mon Sep 17 00:00:00 2001 From: Ruediger Klaehn Date: Tue, 5 Nov 2024 17:56:16 +0200 Subject: [PATCH 17/19] use merged naming-bikeshedding --- Cargo.lock | 121 +++++----------------------------------- Cargo.toml | 4 +- src/rpc.rs | 8 +-- src/rpc/client/blobs.rs | 3 +- 4 files changed, 20 insertions(+), 116 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4714c3a3e..dbebb9aee 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -245,15 +245,6 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "597bb81c80a54b6a4381b23faba8d7774b144c94cbd1d6fe3f1329bd776554ab" -[[package]] -name = "bincode" -version = "1.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" -dependencies = [ - "serde", -] - [[package]] name = "bit-set" version = "0.5.3" @@ -1268,25 +1259,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "h2" -version = "0.3.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" -dependencies = [ - "bytes", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http 0.2.12", - "indexmap", - "slab", - "tokio", - "tokio-util", - "tracing", -] - [[package]] name = "h2" version = "0.4.6" @@ -1505,30 +1477,6 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" -[[package]] -name = "hyper" -version = "0.14.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c08302e8fa335b151b788c775ff56e7a03ae64ff85c548ee820fecb70356e85" -dependencies = [ - "bytes", - "futures-channel", - "futures-core", - "futures-util", - "h2 0.3.26", - "http 0.2.12", - "http-body 0.4.6", - "httparse", - "httpdate", - "itoa", - "pin-project-lite", - "socket2", - "tokio", - "tower-service", - "tracing", - "want", -] - [[package]] name = "hyper" version = "1.5.0" @@ -1538,7 +1486,7 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "h2 0.4.6", + "h2", "http 1.1.0", "http-body 1.0.1", "httparse", @@ -1558,7 +1506,7 @@ checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" dependencies = [ "futures-util", "http 1.1.0", - "hyper 1.5.0", + "hyper", "hyper-util", "rustls", "rustls-pki-types", @@ -1579,7 +1527,7 @@ dependencies = [ "futures-util", "http 1.1.0", "http-body 1.0.1", - "hyper 1.5.0", + "hyper", "pin-project-lite", "socket2", "tokio", @@ -1771,7 +1719,7 @@ dependencies = [ "futures", "http 1.1.0", "http-body-util", - "hyper 1.5.0", + "hyper", "hyper-util", "log", "rand", @@ -1907,7 +1855,7 @@ dependencies = [ "portable-atomic", "postcard", "proptest", - "quic-rpc 0.14.0 (git+https://github.com/n0-computer/quic-rpc.git?branch=naming-bikeshedding)", + "quic-rpc", "quic-rpc-derive", "rand", "range-collections", @@ -1957,7 +1905,7 @@ dependencies = [ "anyhow", "erased_set", "http-body-util", - "hyper 1.5.0", + "hyper", "hyper-util", "once_cell", "prometheus-client", @@ -1994,7 +1942,7 @@ dependencies = [ "hostname", "http 1.1.0", "http-body-util", - "hyper 1.5.0", + "hyper", "hyper-util", "igd-next", "iroh-base", @@ -3185,9 +3133,8 @@ dependencies = [ [[package]] name = "quic-rpc" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d8431b2e7c22929347b61a354d4936d71fe7ab1e6b0475dc50e98276970dfec" +version = "0.15.0" +source = "git+https://github.com/n0-computer/quic-rpc.git?branch=map-transports-not-services#16b7c4942365b96fc750627459261e52e1190f6c" dependencies = [ "anyhow", "derive_more", @@ -3204,40 +3151,13 @@ dependencies = [ "tracing", ] -[[package]] -name = "quic-rpc" -version = "0.14.0" -source = "git+https://github.com/n0-computer/quic-rpc.git?branch=naming-bikeshedding#067ae4b04bad16036571225904cd6970decb040b" -dependencies = [ - "anyhow", - "bincode", - "bytes", - "derive_more", - "educe", - "flume", - "futures-lite 2.4.0", - "futures-sink", - "futures-util", - "hex", - "hyper 0.14.31", - "iroh-quinn", - "pin-project", - "serde", - "slab", - "tokio", - "tokio-serde", - "tokio-util", - "tracing", -] - [[package]] name = "quic-rpc-derive" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "403bc8506c847468e00170dbbbfe2c54d13b090031bcbe474cd3faea021cbd9f" +version = "0.15.0" +source = "git+https://github.com/n0-computer/quic-rpc.git?branch=map-transports-not-services#16b7c4942365b96fc750627459261e52e1190f6c" dependencies = [ "proc-macro2", - "quic-rpc 0.14.0 (registry+https://github.com/rust-lang/crates.io-index)", + "quic-rpc", "quote", "syn 1.0.109", ] @@ -3509,7 +3429,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.1", "http-body-util", - "hyper 1.5.0", + "hyper", "hyper-rustls", "hyper-util", "ipnet", @@ -4472,21 +4392,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "tokio-serde" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "911a61637386b789af998ee23f50aa30d5fd7edcec8d6d3dedae5e5815205466" -dependencies = [ - "bincode", - "bytes", - "educe", - "futures-core", - "futures-sink", - "pin-project", - "serde", -] - [[package]] name = "tokio-stream" version = "0.1.16" diff --git a/Cargo.toml b/Cargo.toml index 809920263..cefd5b756 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -38,8 +38,8 @@ pin-project = "1.1.5" portable-atomic = { version = "1", optional = true } postcard = { version = "1", default-features = false, features = ["alloc", "use-std", "experimental-derive"] } quinn = { package = "iroh-quinn", version = "0.12", features = ["ring"] } -quic-rpc = { git = "https://github.com/n0-computer/quic-rpc.git", branch = "naming-bikeshedding", optional = true } -quic-rpc-derive = { version = "0.14", optional = true } +quic-rpc = { git = "https://github.com/n0-computer/quic-rpc.git", branch = "map-transports-not-services", optional = true } +quic-rpc-derive = { git = "https://github.com/n0-computer/quic-rpc.git", branch = "map-transports-not-services", optional = true } rand = "0.8" range-collections = "0.4.0" redb = { version = "2.0.0", optional = true } diff --git a/src/rpc.rs b/src/rpc.rs index cc79f6a80..7264f8ac5 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -32,7 +32,7 @@ use proto::{ }, Request, RpcError, RpcResult, RpcService, }; -use quic_rpc::server::{RpcChannel, RpcServerError}; +use quic_rpc::server::{ChannelTypes, RpcChannel, RpcServerError}; use crate::{ export::ExportProgress, @@ -63,7 +63,7 @@ impl Blobs { chan: RpcChannel, ) -> std::result::Result<(), RpcServerError> where - C: quic_rpc::ListenerTypes, + C: ChannelTypes, { use Request::*; match msg { @@ -79,7 +79,7 @@ impl Blobs { chan: RpcChannel, ) -> std::result::Result<(), RpcServerError> where - C: quic_rpc::ListenerTypes, + C: ChannelTypes, { use proto::tags::Request::*; match msg { @@ -97,7 +97,7 @@ impl Blobs { chan: RpcChannel, ) -> std::result::Result<(), RpcServerError> where - C: quic_rpc::ListenerTypes, + C: ChannelTypes, { use proto::blobs::Request::*; match msg { diff --git a/src/rpc/client/blobs.rs b/src/rpc/client/blobs.rs index fcfd44c6d..1f7ce7646 100644 --- a/src/rpc/client/blobs.rs +++ b/src/rpc/client/blobs.rs @@ -1120,8 +1120,7 @@ mod tests { let router = router.spawn().await?; // Setup RPC - let (internal_rpc, controller) = - quic_rpc::transport::flume::service_connection::(32); + let (internal_rpc, controller) = quic_rpc::transport::flume::channel(32); let controller = controller.boxed(); let internal_rpc = internal_rpc.boxed(); let internal_rpc = quic_rpc::RpcServer::new(internal_rpc); From b1d0e6c02f7ab9608745970ad06b2e6f3368b149 Mon Sep 17 00:00:00 2001 From: Ruediger Klaehn Date: Wed, 6 Nov 2024 12:10:23 +0200 Subject: [PATCH 18/19] use published quic-rpc --- Cargo.lock | 6 ++++-- Cargo.toml | 4 ++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dbebb9aee..ca1acf205 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3134,7 +3134,8 @@ dependencies = [ [[package]] name = "quic-rpc" version = "0.15.0" -source = "git+https://github.com/n0-computer/quic-rpc.git?branch=map-transports-not-services#16b7c4942365b96fc750627459261e52e1190f6c" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61e131f594054d27d077162815db3b5e9ddd76a28fbb9091b68095971e75c286" dependencies = [ "anyhow", "derive_more", @@ -3154,7 +3155,8 @@ dependencies = [ [[package]] name = "quic-rpc-derive" version = "0.15.0" -source = "git+https://github.com/n0-computer/quic-rpc.git?branch=map-transports-not-services#16b7c4942365b96fc750627459261e52e1190f6c" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbef4c942978f74ef296ae40d43d4375c9d730b65a582688a358108cfd5c0cf7" dependencies = [ "proc-macro2", "quic-rpc", diff --git a/Cargo.toml b/Cargo.toml index 3213afbf0..0d2753f49 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,8 +37,8 @@ parking_lot = { version = "0.12.1", optional = true } pin-project = "1.1.5" portable-atomic = { version = "1", optional = true } postcard = { version = "1", default-features = false, features = ["alloc", "use-std", "experimental-derive"] } -quic-rpc = { git = "https://github.com/n0-computer/quic-rpc.git", branch = "map-transports-not-services", optional = true } -quic-rpc-derive = { git = "https://github.com/n0-computer/quic-rpc.git", branch = "map-transports-not-services", optional = true } +quic-rpc = { version = "0.15.0", optional = true } +quic-rpc-derive = { version = "0.15.0", optional = true } quinn = { package = "iroh-quinn", version = "0.12", features = ["ring"] } rand = "0.8" range-collections = "0.4.0" From b7ea1da608e64f1172f00512ea10cd8f3bd99e04 Mon Sep 17 00:00:00 2001 From: Ruediger Klaehn Date: Wed, 6 Nov 2024 12:12:00 +0200 Subject: [PATCH 19/19] Add "Unicode-3.0" license --- deny.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/deny.toml b/deny.toml index 267ca7a65..f5669dbf3 100644 --- a/deny.toml +++ b/deny.toml @@ -21,6 +21,7 @@ allow = [ "Unicode-DFS-2016", "Zlib", "MPL-2.0", # https://fossa.com/blog/open-source-software-licenses-101-mozilla-public-license-2-0/ + "Unicode-3.0" ] [[licenses.clarify]]