Add support for pushing to Attic (#1)

* Add support for pushing to Attic

* fmt/clippy

* Fix attic dependency

* Pass ssh private key

* Try to inherit secrets

* Fix static build

* Fix default package

* Fix daemonizing

* Fix clippy

* Update nix.conf

* Add --use-attic flag

* --use-attic -> --use-flakehub

* Handle project not existing

* Handle Attic init failure

* Skip .chroot paths

* Update netrc

* Downgrade to Nixpkgs 23.05 to fix static builds

* Use rust 1.70

We need 1.70, but 1.69 is the default in Nixpkgs 23.05.

* Rename stuff

* Use magic-nix-cache-priv

* Hack
This commit is contained in:
Eelco Dolstra 2023-12-14 17:09:09 +01:00 committed by GitHub
parent 369a0a0a5a
commit fd1420febf
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
17 changed files with 1740 additions and 105 deletions

View file

@ -11,6 +11,10 @@ jobs:
steps:
- uses: actions/checkout@v3
- uses: webfactory/ssh-agent@v0.7.0
with:
ssh-private-key: ${{ secrets.LOL_DETSYS_CI_SSH_PRIVATE_KEY }}
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
@ -32,6 +36,10 @@ jobs:
steps:
- uses: actions/checkout@v3
- uses: webfactory/ssh-agent@v0.7.0
with:
ssh-private-key: ${{ secrets.LOL_DETSYS_CI_SSH_PRIVATE_KEY }}
- uses: DeterminateSystems/flake-checker-action@main
- uses: DeterminateSystems/nix-installer-action@main
@ -55,11 +63,15 @@ jobs:
steps:
- uses: actions/checkout@v3
- uses: webfactory/ssh-agent@v0.7.0
with:
ssh-private-key: ${{ secrets.LOL_DETSYS_CI_SSH_PRIVATE_KEY }}
- uses: DeterminateSystems/flake-checker-action@main
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: DeterminateSystems/magic-nix-cache-action-priv@attic-v2
- name: Build package
run: "nix build .# -L --fallback"

View file

@ -11,6 +11,10 @@ jobs:
steps:
- uses: actions/checkout@v3
- uses: webfactory/ssh-agent@v0.7.0
with:
ssh-private-key: ${{ secrets.LOL_DETSYS_CI_SSH_PRIVATE_KEY }}
- name: Install Nix
uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main

View file

@ -11,6 +11,7 @@ on:
jobs:
build:
uses: ./.github/workflows/build.yaml
secrets: inherit
release:
needs: build
@ -19,7 +20,7 @@ jobs:
# Only intra-repo PRs are allowed to have PR artifacts uploaded
# We only want to trigger once the upload once in the case the upload label is added, not when any label is added
if: |
github.event.pull_request.head.repo.full_name == 'DeterminateSystems/magic-nix-cache'
github.event.pull_request.head.repo.full_name == 'DeterminateSystems/magic-nix-cache-priv'
&& (
(github.event.action == 'labeled' && github.event.label.name == 'upload to s3')
|| (github.event.action != 'labeled' && contains(github.event.pull_request.labels.*.name, 'upload to s3'))

1075
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -6,6 +6,7 @@
, rust-bin
, nix-gitignore
, supportedSystems
, nix-flake
}:
let
@ -32,7 +33,7 @@ let
else
import pkgs.path {
inherit system crossSystem;
overlays = [ ];
overlays = [ nix-flake.overlays.default ];
};
rustTargetSpec = rust.toRustTargetSpec pkgsCross.pkgsStatic.stdenv.hostPlatform;
@ -66,10 +67,13 @@ let
cargoTargets = lib.mapAttrsToList (_: p: p.rustTargetSpec) crossPlatforms;
cargoCrossEnvs = lib.foldl (acc: p: acc // p.env) { } (builtins.attrValues crossPlatforms);
makeBuildInputs = pkgs: with pkgs; [ ]
makeBuildInputs = pkgs:
[ pkgs.nix
pkgs.boost # needed for clippy
]
++ lib.optionals pkgs.stdenv.isDarwin [
darwin.apple_sdk.frameworks.Security
(libiconv.override { enableStatic = true; enableShared = false; })
pkgs.darwin.apple_sdk.frameworks.Security
(pkgs.libiconv.override { enableStatic = true; enableShared = false; })
];
buildFor = system:
@ -87,6 +91,8 @@ let
inherit (crateName) pname version;
inherit src;
nativeBuildInputs = [ pkgs.pkg-config ];
buildInputs = makeBuildInputs pkgs;
cargoExtraArgs = "--target ${crossPlatform.rustTargetSpec}";

View file

@ -38,6 +38,22 @@
"url": "https://flakehub.com/f/edolstra/flake-compat/1.0.1.tar.gz"
}
},
"flake-compat_2": {
"flake": false,
"locked": {
"lastModified": 1673956053,
"narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
@ -74,25 +90,94 @@
"type": "github"
}
},
"nixpkgs": {
"lowdown-src": {
"flake": false,
"locked": {
"lastModified": 1696604326,
"narHash": "sha256-YXUNI0kLEcI5g8lqGMb0nh67fY9f2YoJsILafh6zlMo=",
"rev": "87828a0e03d1418e848d3dd3f3014a632e4a4f64",
"revCount": 533189,
"lastModified": 1633514407,
"narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=",
"owner": "kristapsdz",
"repo": "lowdown",
"rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8",
"type": "github"
},
"original": {
"owner": "kristapsdz",
"repo": "lowdown",
"type": "github"
}
},
"nix": {
"inputs": {
"flake-compat": "flake-compat_2",
"lowdown-src": "lowdown-src",
"nixpkgs": "nixpkgs",
"nixpkgs-regression": "nixpkgs-regression"
},
"locked": {
"lastModified": 1701122567,
"narHash": "sha256-iA8DqS+W2fWTfR+nNJSvMHqQ+4NpYMRT3b+2zS6JTvE=",
"rev": "50f8f1c8bc019a4c0fd098b9ac674b94cfc6af0d",
"revCount": 15434,
"type": "tarball",
"url": "https://api.flakehub.com/f/pinned/NixOS/nixpkgs/0.1.533189%2Brev-87828a0e03d1418e848d3dd3f3014a632e4a4f64/018b0dc8-e84f-7c59-b5d6-16849c3b2074/source.tar.gz"
"url": "https://api.flakehub.com/f/pinned/NixOS/nix/2.19.2/018c1be0-1b88-7682-b3bf-948ec82d0a0b/source.tar.gz"
},
"original": {
"type": "tarball",
"url": "https://flakehub.com/f/NixOS/nixpkgs/0.1.533189.tar.gz"
"url": "https://flakehub.com/f/NixOS/nix/2.19.tar.gz"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1698876495,
"narHash": "sha256-nsQo2/mkDUFeAjuu92p0dEqhRvHHiENhkKVIV1y0/Oo=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "9eb24edd6a0027fed010ccfe300a9734d029983c",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "release-23.05",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-regression": {
"locked": {
"lastModified": 1643052045,
"narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1701615100,
"narHash": "sha256-7VI84NGBvlCTduw2aHLVB62NvCiZUlALLqBe5v684Aw=",
"rev": "e9f06adb793d1cca5384907b3b8a4071d5d7cb19",
"revCount": 492472,
"type": "tarball",
"url": "https://api.flakehub.com/f/pinned/NixOS/nixpkgs/0.2305.492472%2Brev-e9f06adb793d1cca5384907b3b8a4071d5d7cb19/018c3e8e-bc66-7f34-9054-4564bf44b6f8/source.tar.gz"
},
"original": {
"type": "tarball",
"url": "https://flakehub.com/f/NixOS/nixpkgs/0.2305.tar.gz"
}
},
"root": {
"inputs": {
"crane": "crane",
"flake-compat": "flake-compat",
"nixpkgs": "nixpkgs",
"nix": "nix",
"nixpkgs": "nixpkgs_2",
"rust-overlay": "rust-overlay_2"
}
},

View file

@ -2,7 +2,7 @@
description = "GitHub Actions-powered Nix binary cache";
inputs = {
nixpkgs.url = "https://flakehub.com/f/NixOS/nixpkgs/0.1.533189.tar.gz";
nixpkgs.url = "https://flakehub.com/f/NixOS/nixpkgs/0.2305.tar.gz";
rust-overlay = {
url = "github:oxalica/rust-overlay";
@ -16,11 +16,13 @@
};
flake-compat.url = "https://flakehub.com/f/edolstra/flake-compat/1.0.1.tar.gz";
nix.url = "https://flakehub.com/f/NixOS/nix/2.19.tar.gz";
};
outputs = { self, nixpkgs, ... }@inputs:
outputs = { self, nixpkgs, nix, ... }@inputs:
let
overlays = [ inputs.rust-overlay.overlays.default ];
overlays = [ inputs.rust-overlay.overlays.default nix.overlays.default ];
supportedSystems = [
"aarch64-linux"
"x86_64-linux"
@ -32,13 +34,35 @@
cranePkgs = pkgs.callPackage ./crane.nix {
inherit supportedSystems;
inherit (inputs) crane;
nix-flake = nix;
};
inherit (pkgs) lib;
});
in
{
packages = forEachSupportedSystem ({ pkgs, cranePkgs, ... }: rec {
inherit (cranePkgs) magic-nix-cache;
magic-nix-cache = (pkgs.pkgsStatic.callPackage ./package.nix {
rustPlatform = pkgs.pkgsStatic.rustPackages_1_70.rustPlatform;
nix = pkgs.pkgsStatic.nix.overrideAttrs (old: {
patches = (old.patches or []) ++ [ ./nix.patch ];
});
}).overrideAttrs (old: {
nativeBuildInputs = (old.nativeBuildInputs or []) ++ [
pkgs.nukeReferences
];
# Read by pkg_config crate (do some autodetection in build.rs?)
PKG_CONFIG_ALL_STATIC = "1";
"NIX_CFLAGS_LINK_${pkgs.pkgsStatic.stdenv.cc.suffixSalt}" = "-lc";
RUSTFLAGS = "-C relocation-model=static";
postFixup = (old.postFixup or "") + ''
rm -f $out/nix-support/propagated-build-inputs
nuke-refs $out/bin/magic-nix-cache
'';
});
#inherit (cranePkgs) magic-nix-cache;
default = magic-nix-cache;
});

View file

@ -22,12 +22,20 @@ daemonize = "0.5.0"
is_ci = "1.1.1"
sha2 = { version = "0.10.6", default-features = false }
reqwest = { version = "0.11.17", default-features = false, features = ["blocking", "rustls-tls-native-roots", "trust-dns"] }
netrc-rs = "0.1.2"
jwt = { version = "0.16" }
attic = { git = "ssh://git@github.com/DeterminateSystems/attic-priv", branch = "main" }
#attic = { path = "../../attic-priv/attic" }
attic-client = { git = "ssh://git@github.com/DeterminateSystems/attic-priv", branch = "main" }
#attic-client = { path = "../../attic-priv/client" }
indicatif = "0.17"
[dependencies.tokio]
version = "1.28.0"
default-features = false
features = [
"fs",
"macros",
"process",
"rt",
"rt-multi-thread",

View file

@ -36,7 +36,7 @@ async fn workflow_start(Extension(state): Extension<State>) -> Result<Json<Workf
tracing::info!("Workflow started");
let mut original_paths = state.original_paths.lock().await;
*original_paths = get_store_paths().await?;
*original_paths = get_store_paths(&state.store).await?;
Ok(Json(WorkflowStartResponse {
num_original_paths: original_paths.len(),
@ -50,15 +50,28 @@ async fn workflow_finish(
tracing::info!("Workflow finished");
let original_paths = state.original_paths.lock().await;
let final_paths = get_store_paths().await?;
let final_paths = get_store_paths(&state.store).await?;
let new_paths = final_paths
.difference(&original_paths)
.cloned()
.collect::<Vec<_>>();
tracing::info!("Pushing {} new paths", new_paths.len());
let store_uri = make_store_uri(&state.self_endpoint);
upload_paths(new_paths.clone(), &store_uri).await?;
if state.api.is_some() {
tracing::info!("Pushing {} new paths to GHA cache", new_paths.len());
let store_uri = make_store_uri(&state.self_endpoint);
upload_paths(new_paths.clone(), &store_uri).await?;
}
if let Some(attic_state) = &state.flakehub_state {
tracing::info!("Pushing {} new paths to Attic", new_paths.len());
let new_paths = new_paths
.iter()
.map(|path| state.store.follow_store_path(path).unwrap())
.collect();
crate::flakehub::push(attic_state, state.store.clone(), new_paths).await?;
}
let sender = state.shutdown_sender.lock().await.take().unwrap();
sender.send(()).unwrap();

View file

@ -61,9 +61,11 @@ async fn get_narinfo(
return pull_through(&state, &path);
}
if let Some(url) = state.api.get_file_url(&[&key]).await? {
state.metrics.narinfos_served.incr();
return Ok(Redirect::temporary(&url));
if let Some(api) = &state.api {
if let Some(url) = api.get_file_url(&[&key]).await? {
state.metrics.narinfos_served.incr();
return Ok(Redirect::temporary(&url));
}
}
let mut negative_cache = state.narinfo_nagative_cache.write().await;
@ -88,13 +90,15 @@ async fn put_narinfo(
return Err(Error::BadRequest);
}
let api = state.api.as_ref().ok_or(Error::GHADisabled)?;
let store_path_hash = components[0].to_string();
let key = format!("{}.narinfo", store_path_hash);
let allocation = state.api.allocate_file_with_random_suffix(&key).await?;
let allocation = api.allocate_file_with_random_suffix(&key).await?;
let stream = StreamReader::new(
body.map(|r| r.map_err(|e| io::Error::new(io::ErrorKind::Other, e.to_string()))),
);
state.api.upload_file(allocation, stream).await?;
api.upload_file(allocation, stream).await?;
state.metrics.narinfos_uploaded.incr();
state
@ -107,7 +111,13 @@ async fn put_narinfo(
}
async fn get_nar(Extension(state): Extension<State>, Path(path): Path<String>) -> Result<Redirect> {
if let Some(url) = state.api.get_file_url(&[&path]).await? {
if let Some(url) = state
.api
.as_ref()
.ok_or(Error::GHADisabled)?
.get_file_url(&[&path])
.await?
{
state.metrics.nars_served.incr();
return Ok(Redirect::temporary(&url));
}
@ -124,11 +134,13 @@ async fn put_nar(
Path(path): Path<String>,
body: BodyStream,
) -> Result<()> {
let allocation = state.api.allocate_file_with_random_suffix(&path).await?;
let api = state.api.as_ref().ok_or(Error::GHADisabled)?;
let allocation = api.allocate_file_with_random_suffix(&path).await?;
let stream = StreamReader::new(
body.map(|r| r.map_err(|e| io::Error::new(io::ErrorKind::Other, e.to_string()))),
);
state.api.upload_file(allocation, stream).await?;
api.upload_file(allocation, stream).await?;
state.metrics.nars_uploaded.incr();
Ok(())

View file

@ -24,6 +24,9 @@ pub enum Error {
#[error("Failed to upload paths")]
FailedToUpload,
#[error("GHA cache is disabled")]
GHADisabled,
}
impl IntoResponse for Error {

View file

@ -0,0 +1,278 @@
use crate::error::Result;
use attic::api::v1::cache_config::{CreateCacheRequest, KeypairConfig};
use attic::cache::CacheSliceIdentifier;
use attic::nix_store::{NixStore, StorePath};
use attic_client::{
api::{ApiClient, ApiError},
config::ServerConfig,
push::{PushConfig, Pusher},
};
use serde::Deserialize;
use std::env;
use std::path::Path;
use std::str::FromStr;
use std::sync::Arc;
use tokio::fs::File;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
const JWT_PREFIX: &str = "flakehub1_";
const USER_AGENT: &str = "magic-nix-cache";
pub struct State {
cache: CacheSliceIdentifier,
pub substituter: String,
api: ApiClient,
}
pub async fn init_cache(
flakehub_api_server: &str,
flakehub_api_server_netrc: &Path,
flakehub_cache_server: &str,
) -> Result<State> {
// Parse netrc to get the credentials for api.flakehub.com.
let netrc = {
let mut netrc_file = File::open(flakehub_api_server_netrc).await?;
let mut netrc_contents = String::new();
netrc_file.read_to_string(&mut netrc_contents).await?;
netrc_rs::Netrc::parse(netrc_contents, false).unwrap()
};
let netrc_entry = {
netrc
.machines
.iter()
.find(|machine| {
machine.name.as_ref().unwrap()
== &reqwest::Url::parse(flakehub_api_server)
.unwrap()
.host()
.unwrap()
.to_string()
})
.unwrap()
.to_owned()
};
let flakehub_cache_server_hostname = reqwest::Url::parse(flakehub_cache_server)
.unwrap()
.host()
.unwrap()
.to_string();
// Append an entry for the FlakeHub cache server to netrc.
if !netrc
.machines
.iter()
.any(|machine| machine.name.as_ref().unwrap() == &flakehub_cache_server_hostname)
{
let mut netrc_file = tokio::fs::OpenOptions::new()
.create(false)
.append(true)
.open(flakehub_api_server_netrc)
.await
.unwrap();
netrc_file
.write_all(
format!(
"\nmachine {} password {}\n\n",
flakehub_cache_server_hostname,
netrc_entry.password.as_ref().unwrap(),
)
.as_bytes(),
)
.await
.unwrap();
}
// Get the cache we're supposed to use.
let expected_cache_name = {
let github_repo = env::var("GITHUB_REPOSITORY")
.expect("GITHUB_REPOSITORY environment variable is not set");
let url = format!("{}/project/{}", flakehub_api_server, github_repo,);
let response = reqwest::Client::new()
.get(&url)
.header("User-Agent", USER_AGENT)
.basic_auth(
netrc_entry.login.as_ref().unwrap(),
netrc_entry.password.as_ref(),
)
.send()
.await
.unwrap();
if response.status().is_success() {
#[derive(Deserialize)]
struct ProjectInfo {
organization_uuid_v7: String,
project_uuid_v7: String,
}
let project_info = response.json::<ProjectInfo>().await.unwrap();
let expected_cache_name = format!(
"{}:{}",
project_info.organization_uuid_v7, project_info.project_uuid_v7,
);
tracing::info!("Want to use cache {:?}.", expected_cache_name);
Some(expected_cache_name)
} else {
tracing::error!(
"Failed to get project info from {}: {}",
url,
response.status()
);
None
}
};
// Get a token for creating and pushing to the FlakeHub binary cache.
let (known_caches, token) = {
let url = format!("{}/token/create/cache", flakehub_api_server);
let request = reqwest::Client::new()
.post(&url)
.header("User-Agent", USER_AGENT)
.basic_auth(
netrc_entry.login.as_ref().unwrap(),
netrc_entry.password.as_ref(),
);
let response = request.send().await.unwrap();
if !response.status().is_success() {
panic!(
"Failed to get FlakeHub binary cache creation token from {}: {}",
url,
response.status()
);
}
#[derive(Deserialize)]
struct Response {
token: String,
}
let token = response.json::<Response>().await.unwrap().token;
// Parse the JWT to get the list of caches to which we have access.
let jwt = token.strip_prefix(JWT_PREFIX).unwrap();
let jwt_parsed: jwt::Token<jwt::Header, serde_json::Map<String, serde_json::Value>, _> =
jwt::Token::parse_unverified(jwt).unwrap();
let known_caches = jwt_parsed
.claims()
.get("https://cache.flakehub.com/v1")
.unwrap()
.get("caches")
.unwrap()
.as_object()
.unwrap();
(known_caches.to_owned(), token)
};
// Use the expected cache if we have access to it, otherwise use
// the oldest cache to which we have access.
let cache_name = {
if expected_cache_name
.as_ref()
.map_or(false, |x| known_caches.get(x).is_some())
{
expected_cache_name.unwrap().to_owned()
} else {
let mut keys: Vec<_> = known_caches.keys().collect();
keys.sort();
keys.first()
.expect("FlakeHub did not return any cache for the calling user.")
.to_string()
}
};
let cache = CacheSliceIdentifier::from_str(&cache_name).unwrap();
tracing::info!("Using cache {}.", cache);
// Create the cache.
let api = ApiClient::from_server_config(ServerConfig {
endpoint: flakehub_cache_server.to_owned(),
token: Some(token.to_owned()),
})
.unwrap();
let request = CreateCacheRequest {
keypair: KeypairConfig::Generate,
is_public: false,
priority: 39,
store_dir: "/nix/store".to_owned(),
upstream_cache_key_names: vec!["cache.nixos.org-1".to_owned()], // FIXME: do we want this?
};
if let Err(err) = api.create_cache(&cache, request).await {
match err.downcast_ref::<ApiError>() {
Some(ApiError::Structured(x)) if x.error == "CacheAlreadyExists" => {
tracing::info!("Cache {} already exists.", cache_name);
}
_ => {
panic!("{:?}", err);
}
}
} else {
tracing::info!("Created cache {} on {}.", cache_name, flakehub_cache_server);
}
Ok(State {
cache,
substituter: flakehub_cache_server.to_owned(),
api,
})
}
pub async fn push(state: &State, store: Arc<NixStore>, store_paths: Vec<StorePath>) -> Result<()> {
let cache_config = state.api.get_cache_config(&state.cache).await.unwrap();
let push_config = PushConfig {
num_workers: 5, // FIXME: use number of CPUs?
force_preamble: false,
};
let mp = indicatif::MultiProgress::new();
let pusher = Pusher::new(
store.clone(),
state.api.clone(),
state.cache.to_owned(),
cache_config,
mp,
push_config,
);
let plan = pusher.plan(store_paths, false, false).await.unwrap();
for (_, path_info) in plan.store_path_map {
pusher.queue(path_info).await.unwrap();
}
let results = pusher.wait().await;
for (path, res) in &results {
if let Err(err) = res {
tracing::error!(
"Upload of {} failed: {}",
store.get_full_path(path).display(),
err
);
}
}
tracing::info!(
"Uploaded {} paths.",
results.iter().filter(|(_path, res)| res.is_ok()).count()
);
Ok(())
}

View file

@ -16,16 +16,19 @@
mod api;
mod binary_cache;
mod error;
mod flakehub;
mod telemetry;
mod util;
use std::collections::HashSet;
use std::fs::{self, File};
use std::fs::{self, create_dir_all, File, OpenOptions};
use std::io::Write;
use std::net::SocketAddr;
use std::os::fd::OwnedFd;
use std::path::PathBuf;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use ::attic::nix_store::NixStore;
use axum::{extract::Extension, routing::get, Router};
use clap::Parser;
use daemonize::Daemonize;
@ -84,13 +87,36 @@ struct Args {
/// This is for use in the GitHub Action only.
#[arg(long, hide = true)]
daemon_dir: Option<PathBuf>,
/// The FlakeHub API server.
#[arg(long)]
flakehub_api_server: Option<String>,
/// The path of the `netrc` file that contains the FlakeHub JWT token.
#[arg(long)]
flakehub_api_server_netrc: Option<PathBuf>,
/// The FlakeHub binary cache server.
#[arg(long)]
flakehub_cache_server: Option<String>,
/// The location of `nix.conf`.
#[arg(long)]
nix_conf: PathBuf,
/// Whether to use the GHA cache.
#[arg(long)]
use_gha_cache: bool,
/// Whether to use the FlakeHub binary cache.
#[arg(long)]
use_flakehub: bool,
}
/// The global server state.
#[derive(Debug)]
struct StateInner {
/// The GitHub Actions Cache API.
api: Api,
api: Option<Api>,
/// The upstream cache.
upstream: Option<String>,
@ -111,6 +137,12 @@ struct StateInner {
/// Metrics for sending to perf at shutdown
metrics: telemetry::TelemetryReport,
/// Connection to the local Nix store.
store: Arc<NixStore>,
/// FlakeHub cache state.
flakehub_state: Option<flakehub::State>,
}
fn main() {
@ -118,17 +150,97 @@ fn main() {
let args = Args::parse();
let credentials = if let Some(credentials_file) = &args.credentials_file {
tracing::info!("Loading credentials from {:?}", credentials_file);
let bytes = fs::read(credentials_file).expect("Failed to read credentials file");
create_dir_all(Path::new(&args.nix_conf).parent().unwrap())
.expect("Creating parent directories of nix.conf");
serde_json::from_slice(&bytes).expect("Failed to deserialize credentials file")
let mut nix_conf = OpenOptions::new()
.create(true)
.append(true)
.open(args.nix_conf)
.expect("Opening nix.conf");
let store = Arc::new(NixStore::connect().expect("Connecting to the Nix store"));
let flakehub_state = if args.use_flakehub {
let flakehub_cache_server = args
.flakehub_cache_server
.expect("--flakehub-cache-server is required");
let flakehub_api_server_netrc = args
.flakehub_api_server_netrc
.expect("--flakehub-api-server-netrc is required");
let rt = Runtime::new().unwrap();
match rt.block_on(async {
flakehub::init_cache(
&args
.flakehub_api_server
.expect("--flakehub-api-server is required"),
&flakehub_api_server_netrc,
&flakehub_cache_server,
)
.await
}) {
Ok(state) => {
nix_conf
.write_all(
format!(
"extra-substituters = {}?trusted=1\nnetrc-file = {}\n",
&flakehub_cache_server,
flakehub_api_server_netrc.display()
)
.as_bytes(),
)
.expect("Writing to nix.conf");
tracing::info!("Attic cache is enabled.");
Some(state)
}
Err(err) => {
tracing::error!("Attic cache initialization failed: {}", err);
None
}
}
} else {
tracing::info!("Loading credentials from environment");
Credentials::load_from_env()
.expect("Failed to load credentials from environment (see README.md)")
tracing::info!("Attic cache is disabled.");
None
};
let api = if args.use_gha_cache {
let credentials = if let Some(credentials_file) = &args.credentials_file {
tracing::info!("Loading credentials from {:?}", credentials_file);
let bytes = fs::read(credentials_file).expect("Failed to read credentials file");
serde_json::from_slice(&bytes).expect("Failed to deserialize credentials file")
} else {
tracing::info!("Loading credentials from environment");
Credentials::load_from_env()
.expect("Failed to load credentials from environment (see README.md)")
};
let mut api = Api::new(credentials).expect("Failed to initialize GitHub Actions Cache API");
if let Some(cache_version) = &args.cache_version {
api.mutate_version(cache_version.as_bytes());
}
nix_conf
.write_all(format!("extra-substituters = http://{}?trusted=1&compression=zstd&parallel-compression=true&priority=1\n", args.listen).as_bytes())
.expect("Writing to nix.conf");
tracing::info!("GitHub Action cache is enabled.");
Some(api)
} else {
tracing::info!("GitHub Action cache is disabled.");
None
};
nix_conf
.write_all("fallback = true\n".as_bytes())
.expect("Writing to nix.conf");
drop(nix_conf);
let diagnostic_endpoint = match args.diagnostic_endpoint.as_str() {
"" => {
tracing::info!("Diagnostics disabled.");
@ -137,12 +249,6 @@ fn main() {
url => Some(url),
};
let mut api = Api::new(credentials).expect("Failed to initialize GitHub Actions Cache API");
if let Some(cache_version) = &args.cache_version {
api.mutate_version(cache_version.as_bytes());
}
let (shutdown_sender, shutdown_receiver) = oneshot::channel();
let state = Arc::new(StateInner {
@ -153,6 +259,8 @@ fn main() {
narinfo_nagative_cache: RwLock::new(HashSet::new()),
self_endpoint: args.listen.to_owned(),
metrics: telemetry::TelemetryReport::new(),
store,
flakehub_state,
});
let app = Router::new()
@ -187,12 +295,13 @@ fn main() {
.with_graceful_shutdown(async move {
shutdown_receiver.await.ok();
tracing::info!("Shutting down");
if let Some(diagnostic_endpoint) = diagnostic_endpoint {
state.metrics.send(diagnostic_endpoint).await;
}
})
.await;
if let Some(diagnostic_endpoint) = diagnostic_endpoint {
state.metrics.send(diagnostic_endpoint);
}
ret.unwrap()
});
}
@ -220,7 +329,9 @@ async fn dump_api_stats<B>(
request: axum::http::Request<B>,
next: axum::middleware::Next<B>,
) -> axum::response::Response {
state.api.dump_stats();
if let Some(api) = &state.api {
api.dump_stats();
}
next.run(request).await
}

View file

@ -1,7 +1,6 @@
use std::env;
use std::time::SystemTime;
use is_ci;
use sha2::{Digest, Sha256};
/// A telemetry report to measure the effectiveness of the Magic Nix Cache
@ -34,11 +33,11 @@ pub struct TelemetryReport {
#[derive(Debug, Default, serde::Serialize)]
pub struct Metric(std::sync::atomic::AtomicUsize);
impl Metric {
pub fn incr(&self) -> () {
pub fn incr(&self) {
self.0.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
}
pub fn set(&self, val: usize) -> () {
pub fn set(&self, val: usize) {
self.0.store(val, std::sync::atomic::Ordering::Relaxed);
}
}
@ -57,7 +56,7 @@ impl TelemetryReport {
}
}
pub fn send(&self, endpoint: &str) {
pub async fn send(&self, endpoint: &str) {
if let Some(start_time) = self.start_time {
self.elapsed_seconds.set(
SystemTime::now()
@ -70,12 +69,13 @@ impl TelemetryReport {
}
if let Ok(serialized) = serde_json::to_string_pretty(&self) {
let _ = reqwest::blocking::Client::new()
let _ = reqwest::Client::new()
.post(endpoint)
.body(serialized)
.header("Content-Type", "application/json")
.timeout(std::time::Duration::from_millis(3000))
.send();
.send()
.await;
}
}
}

View file

@ -3,13 +3,15 @@
use std::collections::HashSet;
use std::path::{Path, PathBuf};
use attic::nix_store::NixStore;
use tokio::{fs, process::Command};
use crate::error::{Error, Result};
/// Returns the list of store paths that are currently present.
pub async fn get_store_paths() -> Result<HashSet<PathBuf>> {
let store_dir = Path::new("/nix/store");
pub async fn get_store_paths(store: &NixStore) -> Result<HashSet<PathBuf>> {
// FIXME: use the Nix API.
let store_dir = store.store_dir();
let mut listing = fs::read_dir(store_dir).await?;
let mut paths = HashSet::new();
while let Some(entry) = listing.next_entry().await? {
@ -18,7 +20,7 @@ pub async fn get_store_paths() -> Result<HashSet<PathBuf>> {
if let Some(extension) = file_name.extension() {
match extension.to_str() {
None | Some("drv") | Some("lock") => {
None | Some("drv") | Some("lock") | Some("chroot") => {
// Malformed or not interesting
continue;
}

48
nix.patch Normal file
View file

@ -0,0 +1,48 @@
diff --git a/mk/libraries.mk b/mk/libraries.mk
index 6541775f329..5118b957608 100644
--- a/mk/libraries.mk
+++ b/mk/libraries.mk
@@ -130,7 +130,15 @@ define build-library
$(1)_LDFLAGS_USE += $$($(1)_PATH) $$($(1)_LDFLAGS)
- $(1)_INSTALL_PATH := $$(libdir)/$$($(1)_NAME).a
+ $(1)_INSTALL_PATH := $(DESTDIR)$$($(1)_INSTALL_DIR)/$$($(1)_NAME).a
+
+ $$(eval $$(call create-dir, $$($(1)_INSTALL_DIR)))
+
+ $$($(1)_INSTALL_PATH): $$($(1)_OBJS) | $(DESTDIR)$$($(1)_INSTALL_DIR)/
+ +$$(trace-ld) $(LD) -Ur -o $$(_d)/$$($(1)_NAME).o $$^
+ $$(trace-ar) $(AR) crs $$@ $$(_d)/$$($(1)_NAME).o
+
+ install: $$($(1)_INSTALL_PATH)
endif
diff --git a/src/libstore/local.mk b/src/libstore/local.mk
index 8f28bec6c1d..0d41e3c2cac 100644
--- a/src/libstore/local.mk
+++ b/src/libstore/local.mk
@@ -69,6 +69,13 @@ $(d)/build.cc:
clean-files += $(d)/schema.sql.gen.hh $(d)/ca-specific-schema.sql.gen.hh
+$(d)/nix-store.pc: $(d)/nix-store.pc.in
+ $(trace-gen) rm -f $@ && ./config.status --quiet --file=$@
+ifeq ($(BUILD_SHARED_LIBS), 1)
+ sed -i 's|@LIBS_PRIVATE@||' $@
+else
+ sed -i 's|@LIBS_PRIVATE@|Libs.private: $(libstore_LDFLAGS) $(libstore_LDFLAGS_PROPAGATED) $(foreach lib, $(libstore_LIBS), $($(lib)_LDFLAGS))|' $@
+endif
$(eval $(call install-file-in, $(d)/nix-store.pc, $(libdir)/pkgconfig, 0644))
$(foreach i, $(wildcard src/libstore/builtins/*.hh), \
diff --git a/src/libstore/nix-store.pc.in b/src/libstore/nix-store.pc.in
index 6d67b1e0380..738991d307b 100644
--- a/src/libstore/nix-store.pc.in
+++ b/src/libstore/nix-store.pc.in
@@ -7,3 +7,4 @@ Description: Nix Package Manager
Version: @PACKAGE_VERSION@
Libs: -L${libdir} -lnixstore -lnixutil
Cflags: -I${includedir}/nix -std=c++2a
+@LIBS_PRIVATE@

41
package.nix Normal file
View file

@ -0,0 +1,41 @@
{ lib, stdenv, rustPlatform
, pkg-config
, installShellFiles
, nix
, boost
, darwin
}:
let
ignoredPaths = [ ".github" "target" "book" ];
in rustPlatform.buildRustPackage rec {
pname = "magic-nix-cache";
version = "0.1.0";
src = lib.cleanSourceWith {
filter = name: type: !(type == "directory" && builtins.elem (baseNameOf name) ignoredPaths);
src = lib.cleanSource ./.;
};
nativeBuildInputs = [
pkg-config
installShellFiles
];
buildInputs = [
nix boost
] ++ lib.optionals stdenv.isDarwin (with darwin.apple_sdk.frameworks; [
SystemConfiguration
]);
cargoLock = {
lockFile = ./Cargo.lock;
allowBuiltinFetchGit = true;
};
ATTIC_DISTRIBUTOR = "attic";
# Recursive Nix is not stable yet
doCheck = false;
}