|
| 1 | +//! Example that shows how to create a collection, and transfer it to another |
| 2 | +//! node. It also shows patterns for defining a "Node" struct in higher-level |
| 3 | +//! code that abstracts over these operations with an API that feels closer to |
| 4 | +//! what an application would use. |
| 5 | +//! |
| 6 | +//! Run the entire example in one command: |
| 7 | +//! $ cargo run --example transfer-collection |
| 8 | +use std::collections::HashMap; |
| 9 | + |
| 10 | +use anyhow::{Context, Result}; |
| 11 | +use iroh::{ |
| 12 | + discovery::static_provider::StaticProvider, protocol::Router, Endpoint, NodeAddr, Watcher, |
| 13 | +}; |
| 14 | +use iroh_blobs::{ |
| 15 | + api::{downloader::Shuffled, Store, TempTag}, |
| 16 | + format::collection::Collection, |
| 17 | + store::mem::MemStore, |
| 18 | + BlobsProtocol, Hash, HashAndFormat, |
| 19 | +}; |
| 20 | + |
| 21 | +/// Node is something you'd define in your application. It can contain whatever |
| 22 | +/// shared state you'd want to couple with network operations. |
| 23 | +struct Node { |
| 24 | + store: Store, |
| 25 | + /// Router with the blobs protocol registered, to accept blobs requests. |
| 26 | + /// We can always get the endpoint with router.endpoint() |
| 27 | + router: Router, |
| 28 | +} |
| 29 | + |
| 30 | +impl Node { |
| 31 | + async fn new(disc: &StaticProvider) -> Result<Self> { |
| 32 | + let endpoint = Endpoint::builder() |
| 33 | + .add_discovery(disc.clone()) |
| 34 | + .bind() |
| 35 | + .await?; |
| 36 | + |
| 37 | + let store = MemStore::new(); |
| 38 | + |
| 39 | + // this BlobsProtocol accepts connections from other nodes and serves blobs from the store |
| 40 | + // we pass None to skip subscribing to request events |
| 41 | + let blobs = BlobsProtocol::new(&store, endpoint.clone(), None); |
| 42 | + // Routers group one or more protocols together to accept connections from other nodes, |
| 43 | + // here we're only using one, but could add more in a real world use case as needed |
| 44 | + let router = Router::builder(endpoint) |
| 45 | + .accept(iroh_blobs::ALPN, blobs) |
| 46 | + .spawn(); |
| 47 | + |
| 48 | + Ok(Self { |
| 49 | + store: store.into(), |
| 50 | + router, |
| 51 | + }) |
| 52 | + } |
| 53 | + |
| 54 | + // get address of this node. Has the side effect of waiting for the node |
| 55 | + // to be online & ready to accept connections |
| 56 | + async fn node_addr(&self) -> Result<NodeAddr> { |
| 57 | + let addr = self.router.endpoint().node_addr().initialized().await; |
| 58 | + Ok(addr) |
| 59 | + } |
| 60 | + |
| 61 | + async fn list_hashes(&self) -> Result<Vec<Hash>> { |
| 62 | + self.store |
| 63 | + .blobs() |
| 64 | + .list() |
| 65 | + .hashes() |
| 66 | + .await |
| 67 | + .context("Failed to list hashes") |
| 68 | + } |
| 69 | + |
| 70 | + /// creates a collection from a given set of named blobs, adds it to the local store |
| 71 | + /// and returns the hash of the collection. |
| 72 | + async fn create_collection(&self, named_blobs: Vec<(&str, Vec<u8>)>) -> Result<Hash> { |
| 73 | + let mut collection_items: HashMap<&str, TempTag> = HashMap::new(); |
| 74 | + |
| 75 | + let tx = self.store.batch().await?; |
| 76 | + for (name, data) in named_blobs { |
| 77 | + let tmp_tag = tx.add_bytes(data).await?; |
| 78 | + collection_items.insert(name, tmp_tag); |
| 79 | + } |
| 80 | + |
| 81 | + let collection_items = collection_items |
| 82 | + .iter() |
| 83 | + .map(|(name, tag)| (name.to_string(), *tag.hash())) |
| 84 | + .collect::<Vec<_>>(); |
| 85 | + |
| 86 | + let collection = Collection::from_iter(collection_items); |
| 87 | + |
| 88 | + let tt = collection.store(&self.store).await?; |
| 89 | + self.store.tags().create(*tt.hash_and_format()).await?; |
| 90 | + Ok(*tt.hash()) |
| 91 | + } |
| 92 | + |
| 93 | + /// retrieve an entire collection from a given hash and provider |
| 94 | + async fn get_collection(&self, hash: Hash, provider: NodeAddr) -> Result<()> { |
| 95 | + let req = HashAndFormat::hash_seq(hash); |
| 96 | + let addrs = Shuffled::new(vec![provider.node_id]); |
| 97 | + self.store |
| 98 | + .downloader(self.router.endpoint()) |
| 99 | + .download(req, addrs) |
| 100 | + .await?; |
| 101 | + Ok(()) |
| 102 | + } |
| 103 | +} |
| 104 | + |
| 105 | +#[tokio::main] |
| 106 | +async fn main() -> anyhow::Result<()> { |
| 107 | + // create a local provider for nodes to discover each other. |
| 108 | + // outside of a development environment, production apps would |
| 109 | + // use `Endpoint::builder().discovery_n0()` or a similar method |
| 110 | + let disc = StaticProvider::new(); |
| 111 | + |
| 112 | + // create a sending node |
| 113 | + let send_node = Node::new(&disc).await?; |
| 114 | + let send_node_addr = send_node.node_addr().await?; |
| 115 | + // add a collection with three files |
| 116 | + let hash = send_node |
| 117 | + .create_collection(vec![ |
| 118 | + ("a.txt", b"this is file a".into()), |
| 119 | + ("b.txt", b"this is file b".into()), |
| 120 | + ("c.txt", b"this is file c".into()), |
| 121 | + ]) |
| 122 | + .await?; |
| 123 | + |
| 124 | + // create the receiving node |
| 125 | + let recv_node = Node::new(&disc).await?; |
| 126 | + |
| 127 | + // add the send node to the discovery provider so the recv node can find it |
| 128 | + disc.add_node_info(send_node_addr.clone()); |
| 129 | + // fetch the collection and all contents |
| 130 | + recv_node.get_collection(hash, send_node_addr).await?; |
| 131 | + |
| 132 | + // when listing hashes, you'll see 5 hashes in total: |
| 133 | + // - one hash for each of the three files |
| 134 | + // - hash of the collection's metadata (this is where the "a.txt" filenames live) |
| 135 | + // - the hash of the entire collection which is just the above 4 hashes concatenated, then hashed |
| 136 | + let send_hashes = send_node.list_hashes().await?; |
| 137 | + let recv_hashes = recv_node.list_hashes().await?; |
| 138 | + assert_eq!(send_hashes.len(), recv_hashes.len()); |
| 139 | + |
| 140 | + println!("Transfer complete!"); |
| 141 | + Ok(()) |
| 142 | +} |
0 commit comments