From c7099825b11fc88d4e7ccc50a4ede43d4f87dc0d Mon Sep 17 00:00:00 2001 From: GrocerPublishAgent Date: Mon, 10 Nov 2025 15:05:36 -0800 Subject: [PATCH] feat: add --jobs flag to limit concurrent file imports Ran into a scenario on shared hosting where isolation is only by Linux user. Process sees all 64 CPUs but you shouldn't actually use them all since other users share the same server. Added `-j/--jobs` flag to set number of parallel jobs during import. Still uses num_cpus::get() if flag not provided to maintain existing behavior. Use lower value on shared infrastructure to avoid impacting other users. --- src/main.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/main.rs b/src/main.rs index 3103c21..9a60986 100644 --- a/src/main.rs +++ b/src/main.rs @@ -145,6 +145,12 @@ pub struct CommonArgs { #[clap(long)] pub show_secret: bool, + + /// Number of parallel jobs to use while importing files. + /// + /// Defaults to the number of logical CPU cores. + #[clap(short = 'j', long)] + pub jobs: Option, } /// Available command line options for configuring relays. @@ -367,8 +373,9 @@ async fn import( path: PathBuf, db: &Store, mp: &mut MultiProgress, + jobs: Option, ) -> anyhow::Result<(TempTag, u64, Collection)> { - let parallelism = num_cpus::get(); + let parallelism = jobs.unwrap_or_else(num_cpus::get); let path = path.canonicalize()?; anyhow::ensure!(path.exists(), "path {} does not exist", path.display()); let root = path.parent().context("context get parent")?; @@ -711,7 +718,7 @@ async fn send(args: SendArgs) -> anyhow::Result<()> { )), ); - let import_result = import(path2, blobs.store(), &mut mp).await?; + let import_result = import(path2, blobs.store(), &mut mp, args.common.jobs).await?; let dt = t0.elapsed(); let router = iroh::protocol::Router::builder(endpoint)