diff --git a/magic-nix-cache/src/api.rs b/magic-nix-cache/src/api.rs index 013ee92..c9437d2 100644 --- a/magic-nix-cache/src/api.rs +++ b/magic-nix-cache/src/api.rs @@ -12,14 +12,14 @@ use crate::error::{Error, Result}; #[derive(Debug, Clone, Serialize)] struct WorkflowStartResponse { - num_original_paths: usize, + num_original_paths: Option, } #[derive(Debug, Clone, Serialize)] struct WorkflowFinishResponse { - num_original_paths: usize, - num_final_paths: usize, - num_new_paths: usize, + num_original_paths: Option, + num_final_paths: Option, + num_new_paths: Option, } pub fn get_router() -> Router { @@ -33,18 +33,23 @@ pub fn get_router() -> Router { #[debug_handler] async fn workflow_start(Extension(state): Extension) -> Result> { tracing::info!("Workflow started"); - let mut original_paths = state.original_paths.lock().await; - *original_paths = crate::util::get_store_paths(&state.store).await?; + let reply = if let Some(original_paths) = &state.original_paths { + let mut original_paths = original_paths.lock().await; + *original_paths = crate::util::get_store_paths(&state.store).await?; - let reply = WorkflowStartResponse { - num_original_paths: original_paths.len(), + let reply = WorkflowStartResponse { + num_original_paths: Some(original_paths.len()), + }; + + state.metrics.num_original_paths.set(original_paths.len()); + + reply + } else { + WorkflowStartResponse { + num_original_paths: None, + } }; - state - .metrics - .num_original_paths - .set(reply.num_original_paths); - Ok(Json(reply)) } @@ -54,22 +59,42 @@ async fn workflow_finish( ) -> Result> { tracing::info!("Workflow finished"); - let original_paths = state.original_paths.lock().await; - let final_paths = crate::util::get_store_paths(&state.store).await?; - let new_paths = final_paths - .difference(&original_paths) - .cloned() - .map(|path| state.store.follow_store_path(path).map_err(Error::Attic)) - .collect::>>()?; + let response = if let Some(original_paths) = &state.original_paths { + let original_paths = original_paths.lock().await; + let final_paths = crate::util::get_store_paths(&state.store).await?; + let new_paths = final_paths + .difference(&original_paths) + .cloned() + .map(|path| state.store.follow_store_path(path).map_err(Error::Attic)) + .collect::>>()?; - let num_original_paths = original_paths.len(); - let num_final_paths = final_paths.len(); - let num_new_paths = new_paths.len(); + let num_original_paths = original_paths.len(); + let num_final_paths = final_paths.len(); + let num_new_paths = new_paths.len(); - // NOTE(cole-h): If we're substituting from an upstream cache, those paths won't have the - // post-build-hook run on it, so we diff the store to ensure we cache everything we can. - tracing::info!("Diffing the store and uploading any new paths before we shut down"); - enqueue_paths(&state, new_paths).await?; + let reply = WorkflowFinishResponse { + num_original_paths: Some(num_original_paths), + num_final_paths: Some(num_final_paths), + num_new_paths: Some(num_new_paths), + }; + + state.metrics.num_original_paths.set(num_original_paths); + state.metrics.num_final_paths.set(num_final_paths); + state.metrics.num_new_paths.set(num_new_paths); + + // NOTE(cole-h): If we're substituting from an upstream cache, those paths won't have the + // post-build-hook run on it, so we diff the store to ensure we cache everything we can. + tracing::info!("Diffing the store and uploading any new paths before we shut down"); + enqueue_paths(&state, new_paths).await?; + + reply + } else { + WorkflowFinishResponse { + num_original_paths: None, + num_final_paths: None, + num_new_paths: None, + } + }; if let Some(gha_cache) = &state.gha_cache { tracing::info!("Waiting for GitHub action cache uploads to finish"); @@ -94,20 +119,7 @@ async fn workflow_finish( println!("\n{logfile_contents}\n"); } - let reply = WorkflowFinishResponse { - num_original_paths, - num_final_paths, - num_new_paths, - }; - - state - .metrics - .num_original_paths - .set(reply.num_original_paths); - state.metrics.num_final_paths.set(reply.num_final_paths); - state.metrics.num_new_paths.set(reply.num_new_paths); - - Ok(Json(reply)) + Ok(Json(response)) } #[derive(Debug, Clone, Serialize, Deserialize)] diff --git a/magic-nix-cache/src/main.rs b/magic-nix-cache/src/main.rs index 18b431c..d2cc09a 100644 --- a/magic-nix-cache/src/main.rs +++ b/magic-nix-cache/src/main.rs @@ -120,6 +120,10 @@ struct Args { /// File to write to when indicating startup. #[arg(long)] startup_notification_file: Option, + + /// Whether or not to diff the store before and after Magic Nix Cache runs + #[arg(long, default_value_t = false)] + diff_store: bool, } impl Args { @@ -166,8 +170,8 @@ struct StateInner { /// Where all of tracing will log to when GitHub Actions is run in debug mode logfile: Option, - /// The paths in the Nix store when Magic Nix Cache started. - original_paths: Mutex>, + /// The paths in the Nix store when Magic Nix Cache started, if store diffing is enabled. + original_paths: Option>>, } async fn main_cli() -> Result<()> { @@ -356,6 +360,7 @@ async fn main_cli() -> Result<()> { let (shutdown_sender, shutdown_receiver) = oneshot::channel(); + let original_paths = args.diff_store.then_some(Mutex::new(HashSet::new())); let state = Arc::new(StateInner { gha_cache, upstream: args.upstream.clone(), @@ -365,7 +370,7 @@ async fn main_cli() -> Result<()> { store, flakehub_state: RwLock::new(flakehub_state), logfile: guard.logfile, - original_paths: Mutex::new(HashSet::new()), + original_paths, }); let app = Router::new()