Compare commits

...

21 commits

Author SHA1 Message Date
Cole Helbling 02061a5850 wip: use Nix's sqlite db "directly"
this might not be a safe assumption, but we're gonna try
2024-05-14 15:13:50 -07:00
Cole Helbling 008b537385 Don't skip -source when diffing store 2024-05-14 14:33:53 -07:00
Cole Helbling 938f17dd2d Bring back store diffing 2024-05-14 14:33:53 -07:00
Cole Helbling 507095f7ea Merge remote-tracking branch 'origin/main' into log-and-cat-after-workflow-finished 2024-05-14 10:43:37 -07:00
Cole Helbling 8b1b1c9302 fixup 2024-05-07 11:18:56 -07:00
Cole Helbling a0a35d7ff8 Merge remote-tracking branch 'origin/main' into log-and-cat-after-workflow-finished 2024-05-07 11:17:11 -07:00
Cole Helbling a9f32f83dd more debug logging, truncate the log file when we init logging 2024-05-07 11:11:39 -07:00
Cole Helbling 3bc019d996 wip: no, but do flakehub after shutting down the web server 2024-04-24 15:17:35 -07:00
Cole Helbling f06ed35360 wip: wait for flakehub pushes before shutting down? 2024-04-24 14:23:22 -07:00
Cole Helbling 1c83c37f4e wip: more tracing, sleep at first 2024-04-24 07:51:51 -07:00
Cole Helbling c481c9beb6 wip: more debugging 2024-04-24 07:30:32 -07:00
Cole Helbling 9947f4c5ed wip: add more debug lines 2024-04-23 15:35:10 -07:00
Cole Helbling e82dde9ac7 wip: debug logging to see why this still crosses devices 2024-04-23 14:11:02 -07:00
Cole Helbling 5552d61f5e fixup: create temporary netrc file right next to real one 2024-04-23 12:48:30 -07:00
Cole Helbling 3a61953dc1 wip: move logging init back into main cli only 2024-04-23 08:59:51 -07:00
Cole Helbling 1f543d7e7a Revert "wip: drop the file and keep the early-init logging"
This reverts commit 3b1a1e3c12.
2024-04-23 08:59:42 -07:00
Cole Helbling 5cb3d69802 Revert "wip: intentionally bust post-build-hook"
This reverts commit 1e258373b5.
2024-04-23 08:59:00 -07:00
Cole Helbling 1e258373b5 wip: intentionally bust post-build-hook 2024-04-23 08:41:58 -07:00
Cole Helbling 3b1a1e3c12 wip: drop the file and keep the early-init logging 2024-04-23 08:29:03 -07:00
Cole Helbling b983b25405 wip: thin CI so it runs faster 2024-04-23 08:10:38 -07:00
Cole Helbling 589099e43a wip: see if there's some logging we're missing 2024-04-23 07:55:32 -07:00
12 changed files with 256 additions and 313 deletions

View file

@ -5,56 +5,8 @@ on:
workflow_call: workflow_call:
jobs: jobs:
build-artifacts-ARM64-macOS:
runs-on: macos-latest-xlarge
permissions:
contents: read
id-token: write
steps:
- uses: actions/checkout@v3
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- name: Build package
run: "nix build .# -L --fallback && nix-store --export $(nix-store -qR ./result) | xz -9 > magic-nix-cache.closure.xz"
- name: Upload a Build Artifact
uses: actions/upload-artifact@v3.1.2
with:
# Artifact name
name: magic-nix-cache-ARM64-macOS
path: magic-nix-cache.closure.xz
retention-days: 1
build-artifacts-X64-macOS:
runs-on: macos-12
permissions:
contents: read
id-token: write
steps:
- uses: actions/checkout@v3
- uses: DeterminateSystems/flake-checker-action@main
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- name: Build package
run: "nix build .# -L --fallback && nix-store --export $(nix-store -qR ./result) | xz -9 > magic-nix-cache.closure.xz"
- name: Upload a Build Artifact
uses: actions/upload-artifact@v3.1.2
with:
# Artifact name
name: magic-nix-cache-X64-macOS
path: magic-nix-cache.closure.xz
retention-days: 1
build-artifacts-X64-Linux: build-artifacts-X64-Linux:
runs-on: ubuntu-22.04 runs-on: UbuntuLatest32Cores128G
permissions: permissions:
contents: read contents: read
id-token: write id-token: write
@ -77,28 +29,3 @@ jobs:
name: magic-nix-cache-X64-Linux name: magic-nix-cache-X64-Linux
path: magic-nix-cache.closure.xz path: magic-nix-cache.closure.xz
retention-days: 1 retention-days: 1
build-artifacts-ARM64-Linux:
runs-on: namespace-profile-default-arm64
permissions:
contents: read
id-token: write
steps:
- uses: actions/checkout@v3
- uses: DeterminateSystems/flake-checker-action@main
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- name: Build package
run: "nix build .# -L --fallback && nix-store --export $(nix-store -qR ./result) | xz -9 > magic-nix-cache.closure.xz"
- name: Upload a Build Artifact
uses: actions/upload-artifact@v3.1.2
with:
# Artifact name
name: magic-nix-cache-ARM64-Linux
path: magic-nix-cache.closure.xz
retention-days: 1

View file

@ -28,7 +28,3 @@ jobs:
- name: Clippy - name: Clippy
run: nix develop --command cargo clippy run: nix develop --command cargo clippy
build:
uses: ./.github/workflows/build.yaml
secrets: inherit

View file

@ -1,32 +0,0 @@
name: Push dev shell to FlakeHub Cache
on:
push:
branches: [main]
jobs:
push-dev-shell-to-flakehub-cache:
env:
ACTIONS_STEP_DEBUG: true
runs-on: ${{ matrix.systems.runner }}
permissions:
id-token: "write"
contents: "read"
strategy:
matrix:
systems:
- nix-system: "aarch64-darwin"
runner: "macos-latest-xlarge"
- nix-system: "x86_64-darwin"
runner: "macos-12"
- nix-system: "x86_64-linux"
runner: "ubuntu-22.04"
steps:
- uses: actions/checkout@v3
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
with:
use-flakehub: true
- name: Build dev shell for ${{ matrix.systems.nix-system }} on ${{ matrix.systems.runner }}
run: |
nix build .#devShells.${{ matrix.systems.nix-system }}.default

View file

@ -1,67 +0,0 @@
name: Release Branch
on:
push:
branches:
# NOTE: make sure any branches here are also valid directory names,
# otherwise creating the directory and uploading to s3 will fail
- "main"
jobs:
build:
uses: ./.github/workflows/build.yaml
secrets: inherit
release:
needs: build
concurrency: release
runs-on: ubuntu-latest
permissions:
contents: read
id-token: write # In order to request a JWT for AWS auth
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v2
with:
role-to-assume: ${{ secrets.AWS_S3_UPLOAD_ROLE }}
aws-region: us-east-2
- name: Create the artifacts directory
run: rm -rf ./artifacts && mkdir ./artifacts
- uses: actions/download-artifact@v3
with:
name: magic-nix-cache-ARM64-macOS
path: cache-binary-ARM64-macOS
- name: Persist the cache binary
run: cp ./cache-binary-ARM64-macOS/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-ARM64-macOS
- uses: actions/download-artifact@v3
with:
name: magic-nix-cache-X64-macOS
path: cache-binary-X64-macOS
- name: Persist the cache binary
run: cp ./cache-binary-X64-macOS/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-X64-macOS
- uses: actions/download-artifact@v3
with:
name: magic-nix-cache-X64-Linux
path: cache-binary-X64-Linux
- name: Persist the cache binary
run: cp ./cache-binary-X64-Linux/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-X64-Linux
- uses: actions/download-artifact@v3
with:
name: magic-nix-cache-ARM64-Linux
path: cache-binary-ARM64-Linux
- name: Persist the cache binary
run: cp ./cache-binary-ARM64-Linux/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-ARM64-Linux
- name: Publish Release (Branch)
env:
AWS_BUCKET: ${{ secrets.AWS_S3_UPLOAD_BUCKET }}
run: |
.github/workflows/upload_s3.sh branch "${{ github.ref_name }}" "$GITHUB_SHA"

View file

@ -36,20 +36,6 @@ jobs:
- name: Create the artifacts directory - name: Create the artifacts directory
run: rm -rf ./artifacts && mkdir ./artifacts run: rm -rf ./artifacts && mkdir ./artifacts
- uses: actions/download-artifact@v3
with:
name: magic-nix-cache-ARM64-macOS
path: cache-binary-ARM64-macOS
- name: Persist the cache binary
run: cp ./cache-binary-ARM64-macOS/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-ARM64-macOS
- uses: actions/download-artifact@v3
with:
name: magic-nix-cache-X64-macOS
path: cache-binary-X64-macOS
- name: Persist the cache binary
run: cp ./cache-binary-X64-macOS/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-X64-macOS
- uses: actions/download-artifact@v3 - uses: actions/download-artifact@v3
with: with:
name: magic-nix-cache-X64-Linux name: magic-nix-cache-X64-Linux
@ -57,13 +43,6 @@ jobs:
- name: Persist the cache binary - name: Persist the cache binary
run: cp ./cache-binary-X64-Linux/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-X64-Linux run: cp ./cache-binary-X64-Linux/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-X64-Linux
- uses: actions/download-artifact@v3
with:
name: magic-nix-cache-ARM64-Linux
path: cache-binary-ARM64-Linux
- name: Persist the cache binary
run: cp ./cache-binary-ARM64-Linux/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-ARM64-Linux
- name: Configure AWS Credentials - name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v2 uses: aws-actions/configure-aws-credentials@v2
with: with:

View file

@ -1,71 +0,0 @@
name: Release Tags
on:
push:
tags:
- "v*.*.*"
jobs:
build:
uses: ./.github/workflows/build.yaml
release:
needs: build
concurrency: release
runs-on: ubuntu-latest
permissions:
contents: write # In order to upload artifacts to GitHub releases
id-token: write # In order to request a JWT for AWS auth
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Create the artifacts directory
run: rm -rf ./artifacts && mkdir ./artifacts
- uses: actions/download-artifact@v3
with:
name: magic-nix-cache-ARM64-macOS
path: cache-binary-ARM64-macOS
- name: Persist the cache binary
run: cp ./cache-binary-ARM64-macOS/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-ARM64-macOS
- uses: actions/download-artifact@v3
with:
name: magic-nix-cache-X64-macOS
path: cache-binary-X64-macOS
- name: Persist the cache binary
run: cp ./cache-binary-X64-macOS/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-X64-macOS
- uses: actions/download-artifact@v3
with:
name: magic-nix-cache-X64-Linux
path: cache-binary-X64-Linux
- name: Persist the cache binary
run: cp ./cache-binary-X64-Linux/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-X64-Linux
- uses: actions/download-artifact@v3
with:
name: magic-nix-cache-ARM64-Linux
path: cache-binary-ARM64-Linux
- name: Persist the cache binary
run: cp ./cache-binary-ARM64-Linux/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-ARM64-Linux
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v2
with:
role-to-assume: ${{ secrets.AWS_S3_UPLOAD_ROLE }}
aws-region: us-east-2
- name: Publish Release to S3 (Tag)
env:
AWS_BUCKET: ${{ secrets.AWS_S3_UPLOAD_BUCKET }}
run: |
.github/workflows/upload_s3.sh "tag" "$GITHUB_REF_NAME" "$GITHUB_SHA"
- name: Publish Release to GitHub (Tag)
uses: softprops/action-gh-release@v1
with:
fail_on_unmatched_files: true
draft: true
files: |
artifacts/**

50
Cargo.lock generated
View file

@ -1642,6 +1642,18 @@ version = "2.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0"
[[package]]
name = "fallible-iterator"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649"
[[package]]
name = "fallible-streaming-iterator"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a"
[[package]] [[package]]
name = "fastcdc" name = "fastcdc"
version = "3.1.0" version = "3.1.0"
@ -2544,6 +2556,7 @@ dependencies = [
"is_ci", "is_ci",
"netrc-rs", "netrc-rs",
"reqwest", "reqwest",
"rusqlite",
"serde", "serde",
"serde_json", "serde_json",
"sha2", "sha2",
@ -2554,6 +2567,7 @@ dependencies = [
"tokio-util", "tokio-util",
"tower-http", "tower-http",
"tracing", "tracing",
"tracing-appender",
"tracing-subscriber", "tracing-subscriber",
"uuid", "uuid",
] ]
@ -3410,6 +3424,20 @@ dependencies = [
"zeroize", "zeroize",
] ]
[[package]]
name = "rusqlite"
version = "0.30.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a78046161564f5e7cd9008aff3b2990b3850dc8e0349119b98e8f251e099f24d"
dependencies = [
"bitflags 2.4.2",
"fallible-iterator",
"fallible-streaming-iterator",
"hashlink",
"libsqlite3-sys",
"smallvec",
]
[[package]] [[package]]
name = "rust_decimal" name = "rust_decimal"
version = "1.34.3" version = "1.34.3"
@ -4665,6 +4693,18 @@ dependencies = [
"tracing-core", "tracing-core",
] ]
[[package]]
name = "tracing-appender"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf"
dependencies = [
"crossbeam-channel",
"thiserror",
"time",
"tracing-subscriber",
]
[[package]] [[package]]
name = "tracing-attributes" name = "tracing-attributes"
version = "0.1.26" version = "0.1.26"
@ -4698,12 +4738,12 @@ dependencies = [
[[package]] [[package]]
name = "tracing-log" name = "tracing-log"
version = "0.1.3" version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3"
dependencies = [ dependencies = [
"lazy_static",
"log", "log",
"once_cell",
"tracing-core", "tracing-core",
] ]
@ -4719,9 +4759,9 @@ dependencies = [
[[package]] [[package]]
name = "tracing-subscriber" name = "tracing-subscriber"
version = "0.3.17" version = "0.3.18"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b"
dependencies = [ dependencies = [
"matchers", "matchers",
"nu-ansi-term", "nu-ansi-term",

View file

@ -32,6 +32,8 @@ tempfile = "3.9"
uuid = { version = "1.4.0", features = ["serde", "v7", "rand", "std"] } uuid = { version = "1.4.0", features = ["serde", "v7", "rand", "std"] }
futures = "0.3" futures = "0.3"
async-compression = "0.4" async-compression = "0.4"
tracing-appender = "0.2.3"
rusqlite = { version = "0.30", features = ["bundled"] }
[dependencies.tokio] [dependencies.tokio]
version = "1.28.0" version = "1.28.0"

View file

@ -2,6 +2,7 @@
//! //!
//! This API is intended to be used by nix-installer-action. //! This API is intended to be used by nix-installer-action.
use attic::nix_store::StorePath;
use axum::{extract::Extension, routing::post, Json, Router}; use axum::{extract::Extension, routing::post, Json, Router};
use axum_macros::debug_handler; use axum_macros::debug_handler;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@ -10,28 +11,41 @@ use super::State;
use crate::error::{Error, Result}; use crate::error::{Error, Result};
#[derive(Debug, Clone, Serialize)] #[derive(Debug, Clone, Serialize)]
struct WorkflowStartResponse {} struct WorkflowStartResponse {
num_original_paths: usize,
}
#[derive(Debug, Clone, Serialize)] #[derive(Debug, Clone, Serialize)]
struct WorkflowFinishResponse { struct WorkflowFinishResponse {
//num_new_paths: usize, num_original_paths: usize,
num_final_paths: usize,
num_new_paths: usize,
} }
pub fn get_router() -> Router { pub fn get_router() -> Router {
Router::new() Router::new()
.route("/api/workflow-start", post(workflow_start)) .route("/api/workflow-start", post(workflow_start))
.route("/api/workflow-finish", post(workflow_finish)) .route("/api/workflow-finish", post(workflow_finish))
.route("/api/enqueue-paths", post(enqueue_paths)) .route("/api/enqueue-paths", post(post_enqueue_paths))
} }
/// Record existing paths. /// Record existing paths.
#[debug_handler] #[debug_handler]
async fn workflow_start( async fn workflow_start(Extension(state): Extension<State>) -> Result<Json<WorkflowStartResponse>> {
Extension(_state): Extension<State>,
) -> Result<Json<WorkflowStartResponse>> {
tracing::info!("Workflow started"); tracing::info!("Workflow started");
let mut original_paths = state.original_paths.lock().await;
*original_paths = crate::util::get_store_paths(&state.store).await?;
Ok(Json(WorkflowStartResponse {})) let reply = WorkflowStartResponse {
num_original_paths: original_paths.len(),
};
state
.metrics
.num_original_paths
.set(reply.num_original_paths);
Ok(Json(reply))
} }
/// Push new paths and shut down. /// Push new paths and shut down.
@ -40,6 +54,23 @@ async fn workflow_finish(
) -> Result<Json<WorkflowFinishResponse>> { ) -> Result<Json<WorkflowFinishResponse>> {
tracing::info!("Workflow finished"); tracing::info!("Workflow finished");
let original_paths = state.original_paths.lock().await;
let final_paths = crate::util::get_store_paths(&state.store).await?;
let new_paths = final_paths
.difference(&original_paths)
.cloned()
.map(|path| state.store.follow_store_path(path).map_err(Error::Attic))
.collect::<Result<Vec<_>>>()?;
let num_original_paths = original_paths.len();
let num_final_paths = final_paths.len();
let num_new_paths = new_paths.len();
// NOTE(cole-h): If we're substituting from an upstream cache, those paths won't have the
// post-build-hook run on it, so we diff the store to ensure we cache everything we can.
tracing::info!("Diffing the store and uploading any new paths before we shut down");
enqueue_paths(&state, new_paths).await?;
if let Some(gha_cache) = &state.gha_cache { if let Some(gha_cache) = &state.gha_cache {
tracing::info!("Waiting for GitHub action cache uploads to finish"); tracing::info!("Waiting for GitHub action cache uploads to finish");
gha_cache.shutdown().await?; gha_cache.shutdown().await?;
@ -49,17 +80,32 @@ async fn workflow_finish(
sender sender
.send(()) .send(())
.map_err(|_| Error::Internal("Sending shutdown server message".to_owned()))?; .map_err(|_| Error::Internal("Sending shutdown server message".to_owned()))?;
// Wait for the Attic push workers to finish.
if let Some(attic_state) = state.flakehub_state.write().await.take() {
tracing::info!("Waiting for FlakeHub cache uploads to finish");
attic_state.push_session.wait().await?;
}
} }
let reply = WorkflowFinishResponse {}; if let Some(attic_state) = state.flakehub_state.write().await.take() {
tracing::info!("Waiting for FlakeHub cache uploads to finish");
let paths = attic_state.push_session.wait().await?;
tracing::warn!(?paths, "pushed these paths");
}
//state.metrics.num_new_paths.set(num_new_paths); // NOTE(cole-h): see `init_logging`
let logfile = std::env::temp_dir().join("magic-nix-cache-tracing.log");
let logfile_contents = std::fs::read_to_string(logfile)?;
println!("Every log line throughout the lifetime of the program:");
println!("\n{logfile_contents}\n");
let reply = WorkflowFinishResponse {
num_original_paths,
num_final_paths,
num_new_paths,
};
state
.metrics
.num_original_paths
.set(reply.num_original_paths);
state.metrics.num_final_paths.set(reply.num_final_paths);
state.metrics.num_new_paths.set(reply.num_new_paths);
Ok(Json(reply)) Ok(Json(reply))
} }
@ -73,7 +119,8 @@ pub struct EnqueuePathsRequest {
pub struct EnqueuePathsResponse {} pub struct EnqueuePathsResponse {}
/// Schedule paths in the local Nix store for uploading. /// Schedule paths in the local Nix store for uploading.
async fn enqueue_paths( #[tracing::instrument(skip_all)]
async fn post_enqueue_paths(
Extension(state): Extension<State>, Extension(state): Extension<State>,
Json(req): Json<EnqueuePathsRequest>, Json(req): Json<EnqueuePathsRequest>,
) -> Result<Json<EnqueuePathsResponse>> { ) -> Result<Json<EnqueuePathsResponse>> {
@ -85,6 +132,12 @@ async fn enqueue_paths(
.map(|path| state.store.follow_store_path(path).map_err(Error::Attic)) .map(|path| state.store.follow_store_path(path).map_err(Error::Attic))
.collect::<Result<Vec<_>>>()?; .collect::<Result<Vec<_>>>()?;
enqueue_paths(&state, store_paths).await?;
Ok(Json(EnqueuePathsResponse {}))
}
async fn enqueue_paths(state: &State, store_paths: Vec<StorePath>) -> Result<()> {
if let Some(gha_cache) = &state.gha_cache { if let Some(gha_cache) = &state.gha_cache {
gha_cache gha_cache
.enqueue_paths(state.store.clone(), store_paths.clone()) .enqueue_paths(state.store.clone(), store_paths.clone())
@ -92,8 +145,9 @@ async fn enqueue_paths(
} }
if let Some(flakehub_state) = &*state.flakehub_state.read().await { if let Some(flakehub_state) = &*state.flakehub_state.read().await {
tracing::warn!("enqueuing {:?} for flakehub", store_paths);
crate::flakehub::enqueue_paths(flakehub_state, store_paths).await?; crate::flakehub::enqueue_paths(flakehub_state, store_paths).await?;
} }
Ok(Json(EnqueuePathsResponse {})) Ok(())
} }

View file

@ -1,4 +1,5 @@
use crate::error::{Error, Result}; use crate::error::{Error, Result};
use anyhow::Context;
use attic::cache::CacheName; use attic::cache::CacheName;
use attic::nix_store::{NixStore, StorePath}; use attic::nix_store::{NixStore, StorePath};
use attic_client::push::{PushSession, PushSessionConfig}; use attic_client::push::{PushSession, PushSessionConfig};
@ -203,6 +204,7 @@ pub async fn enqueue_paths(state: &State, store_paths: Vec<StorePath>) -> Result
/// Refresh the GitHub Actions JWT every 2 minutes (slightly less than half of the default validity /// Refresh the GitHub Actions JWT every 2 minutes (slightly less than half of the default validity
/// period) to ensure pushing / pulling doesn't stop working. /// period) to ensure pushing / pulling doesn't stop working.
#[tracing::instrument(skip_all)]
async fn refresh_github_actions_jwt_worker( async fn refresh_github_actions_jwt_worker(
netrc_path: std::path::PathBuf, netrc_path: std::path::PathBuf,
mut github_jwt: String, mut github_jwt: String,
@ -214,6 +216,10 @@ async fn refresh_github_actions_jwt_worker(
// breaks this. // breaks this.
let next_refresh = std::time::Duration::from_secs(2 * 60); let next_refresh = std::time::Duration::from_secs(2 * 60);
// NOTE(cole-h): we sleep until the next refresh at first because we already got a token from
// GitHub recently, don't need to try again until we actually might need to get a new one.
tokio::time::sleep(next_refresh).await;
// NOTE(cole-h): https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-cloud-providers#requesting-the-jwt-using-environment-variables // NOTE(cole-h): https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-cloud-providers#requesting-the-jwt-using-environment-variables
let mut headers = reqwest::header::HeaderMap::new(); let mut headers = reqwest::header::HeaderMap::new();
headers.insert( headers.insert(
@ -264,6 +270,7 @@ async fn refresh_github_actions_jwt_worker(
} }
} }
#[tracing::instrument(skip_all)]
async fn rewrite_github_actions_token( async fn rewrite_github_actions_token(
client: &reqwest::Client, client: &reqwest::Client,
netrc_path: &Path, netrc_path: &Path,
@ -281,29 +288,86 @@ async fn rewrite_github_actions_token(
)) ))
})?; })?;
let token_request_url = format!("{runtime_url}&audience=api.flakehub.com");
let token_response = client
.request(reqwest::Method::GET, &token_request_url)
.bearer_auth(runtime_token)
.send()
.await
.with_context(|| format!("sending request to {token_request_url}"))?;
if let Err(e) = token_response.error_for_status_ref() {
tracing::error!(?e, "Got error response when requesting token");
}
#[derive(serde::Deserialize)] #[derive(serde::Deserialize)]
struct TokenResponse { struct TokenResponse {
value: String, value: String,
} }
let res: TokenResponse = client let token_response: TokenResponse = token_response
.request(
reqwest::Method::GET,
format!("{runtime_url}&audience=api.flakehub.com"),
)
.bearer_auth(runtime_token)
.send()
.await?
.json() .json()
.await?; .await
.with_context(|| format!("converting response into json"))?;
let new_github_jwt_string = res.value; let new_github_jwt_string = token_response.value;
tracing::warn!("netrc_path: {:?}", &netrc_path);
tracing::warn!(
"metadata(netrc_path): {:?}",
tokio::fs::metadata(&netrc_path).await
);
tracing::warn!(
"symlink_metadata(netrc_path): {:?}",
tokio::fs::symlink_metadata(&netrc_path).await
);
let netrc_contents = tokio::fs::read_to_string(netrc_path)
.await
.with_context(|| format!("failed to read {netrc_path:?} to string"))?;
tracing::warn!("netrc_path: {:?}", &netrc_path);
tracing::warn!(
"metadata(netrc_path): {:?}",
tokio::fs::metadata(&netrc_path).await
);
tracing::warn!(
"symlink_metadata(netrc_path): {:?}",
tokio::fs::symlink_metadata(&netrc_path).await
);
let netrc_contents = tokio::fs::read_to_string(netrc_path).await?;
let new_netrc_contents = netrc_contents.replace(old_github_jwt, &new_github_jwt_string); let new_netrc_contents = netrc_contents.replace(old_github_jwt, &new_github_jwt_string);
let netrc_path_new = tempfile::NamedTempFile::new()?;
tokio::fs::write(&netrc_path_new, new_netrc_contents).await?; // NOTE(cole-h): create the temporary file right next to the real one so we don't run into
tokio::fs::rename(&netrc_path_new, netrc_path).await?; // cross-device linking issues when renaming
let netrc_path_tmp = netrc_path.with_extension("tmp");
tokio::fs::write(&netrc_path_tmp, new_netrc_contents)
.await
.with_context(|| format!("writing new JWT to {netrc_path_tmp:?}"))?;
tracing::warn!("netrc_path_tmp: {:?}", &netrc_path_tmp);
tracing::warn!(
"metadata(netrc_path_tmp): {:?}",
tokio::fs::metadata(&netrc_path_tmp).await
);
tracing::warn!(
"symlink_metadata(netrc_path_tmp): {:?}",
tokio::fs::symlink_metadata(&netrc_path_tmp).await
);
tokio::fs::rename(&netrc_path_tmp, &netrc_path)
.await
.with_context(|| format!("renaming {netrc_path_tmp:?} to {netrc_path:?}"))?;
tracing::warn!("netrc_path_tmp: {:?}", &netrc_path_tmp);
tracing::warn!(
"metadata(netrc_path_tmp): {:?}",
tokio::fs::metadata(&netrc_path_tmp).await
);
tracing::warn!(
"symlink_metadata(netrc_path_tmp): {:?}",
tokio::fs::symlink_metadata(&netrc_path_tmp).await
);
tracing::warn!("(separator)");
tracing::warn!("(separator)");
tracing::warn!("(separator)");
Ok(new_github_jwt_string) Ok(new_github_jwt_string)
} }

View file

@ -18,9 +18,10 @@ mod error;
mod flakehub; mod flakehub;
mod gha; mod gha;
mod telemetry; mod telemetry;
mod util;
use std::collections::HashSet; use std::collections::HashSet;
use std::fs::{self, create_dir_all, OpenOptions}; use std::fs::{self, create_dir_all};
use std::io::Write; use std::io::Write;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::os::unix::fs::PermissionsExt; use std::os::unix::fs::PermissionsExt;
@ -35,6 +36,8 @@ use tempfile::NamedTempFile;
use tokio::process::Command; use tokio::process::Command;
use tokio::sync::{oneshot, Mutex, RwLock}; use tokio::sync::{oneshot, Mutex, RwLock};
use tracing_subscriber::filter::EnvFilter; use tracing_subscriber::filter::EnvFilter;
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt;
use gha_cache::Credentials; use gha_cache::Credentials;
@ -134,10 +137,13 @@ struct StateInner {
/// FlakeHub cache state. /// FlakeHub cache state.
flakehub_state: RwLock<Option<flakehub::State>>, flakehub_state: RwLock<Option<flakehub::State>>,
/// The paths in the Nix store when Magic Nix Cache started.
original_paths: Mutex<HashSet<PathBuf>>,
} }
async fn main_cli() -> Result<()> { async fn main_cli() -> Result<()> {
init_logging(); let _guard = init_logging()?;
let args = Args::parse(); let args = Args::parse();
@ -147,10 +153,10 @@ async fn main_cli() -> Result<()> {
create_dir_all(parent).with_context(|| "Creating parent directories of nix.conf")?; create_dir_all(parent).with_context(|| "Creating parent directories of nix.conf")?;
} }
let mut nix_conf = OpenOptions::new() let mut nix_conf = std::fs::OpenOptions::new()
.create(true) .create(true)
.append(true) .append(true)
.open(args.nix_conf) .open(&args.nix_conf)
.with_context(|| "Creating nix.conf")?; .with_context(|| "Creating nix.conf")?;
let store = Arc::new(NixStore::connect()?); let store = Arc::new(NixStore::connect()?);
@ -322,6 +328,7 @@ async fn main_cli() -> Result<()> {
metrics, metrics,
store, store,
flakehub_state: RwLock::new(flakehub_state), flakehub_state: RwLock::new(flakehub_state),
original_paths: Mutex::new(HashSet::new()),
}); });
let app = Router::new() let app = Router::new()
@ -437,7 +444,7 @@ async fn main() -> Result<()> {
} }
} }
fn init_logging() { fn init_logging() -> Result<tracing_appender::non_blocking::WorkerGuard> {
let filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| { let filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| {
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
return EnvFilter::new("info") return EnvFilter::new("info")
@ -448,11 +455,28 @@ fn init_logging() {
return EnvFilter::new("info"); return EnvFilter::new("info");
}); });
tracing_subscriber::fmt() let stderr_layer = tracing_subscriber::fmt::layer()
.with_writer(std::io::stderr) .with_writer(std::io::stderr)
.pretty() .pretty();
.with_env_filter(filter)
let logfile = std::env::temp_dir().join("magic-nix-cache-tracing.log");
let file = std::fs::OpenOptions::new()
.create(true)
.write(true)
.truncate(true)
.open(logfile)?;
let (nonblocking, guard) = tracing_appender::non_blocking(file);
let file_layer = tracing_subscriber::fmt::layer()
.with_writer(nonblocking)
.pretty();
tracing_subscriber::registry()
.with(filter)
.with(stderr_layer)
.with(file_layer)
.init(); .init();
Ok(guard)
} }
#[cfg(debug_assertions)] #[cfg(debug_assertions)]

View file

@ -0,0 +1,27 @@
//! Utilities.
use std::collections::HashSet;
use std::path::{Path, PathBuf};
use attic::nix_store::NixStore;
use crate::error::Result;
/// Returns the list of store paths that are currently present.
pub async fn get_store_paths(store: &NixStore) -> Result<HashSet<PathBuf>> {
// FIXME(cole-h): update the nix bindings to get the dbdir of the localstore?
let db =
rusqlite::Connection::open("file:/nix/var/nix/db/db.sqlite?immutable=1").expect("FIXME");
let mut stmt = db.prepare("SELECT path FROM ValidPaths").expect("FIXME");
let paths = stmt
.query_map([], |row| -> std::result::Result<PathBuf, rusqlite::Error> {
Ok(PathBuf::from(row.get::<_, String>(0)?))
})
.expect("FIXME")
.into_iter()
.map(|r| r.expect("FIXME"))
.collect::<HashSet<_>>();
Ok(paths)
}