Kind of works

This commit is contained in:
Zhaofeng Li 2023-05-08 03:48:11 -06:00
commit 1ad1349d96
17 changed files with 1173 additions and 0 deletions

5
.envrc Normal file
View file

@ -0,0 +1,5 @@
if ! has nix_direnv_version || ! nix_direnv_version 2.1.1; then
source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/2.1.1/direnvrc" "sha256-b6qJ4r34rbE23yWjMqbmu3ia2z4b2wIlZUksBke/ol0="
fi
use_flake

33
.github/workflows/keygen.yaml vendored Normal file
View file

@ -0,0 +1,33 @@
name: Generate Credentials
on:
- workflow_dispatch
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Install Nix
uses: DeterminateSystems/nix-installer-action@v2
- name: Expose GitHub Runtime
uses: crazy-max/ghaction-github-runtime@v2
- name: Dump credentials
run: |
if [[ -z "$AGE_PUBLIC_KEY" ]]; then
>&2 echo 'The AGE_PUBLIC_KEY secret must be present.'
>&2 echo 'You can generate one with `age-keygen -o key.txt`.'
exit 1
fi
cat >creds.json <<EOF
{
"ACTIONS_CACHE_URL": "${ACTIONS_CACHE_URL}",
"ACTIONS_RUNTIME_URL": "${ACTIONS_RUNTIME_URL}",
"ACTIONS_RUNTIME_TOKEN": "${ACTIONS_RUNTIME_TOKEN}"
}
EOF
encrypted=$(cat creds.json | nix develop .#keygen --command -- age -r "$AGE_PUBLIC_KEY" | base64 -w0)
echo 'Use the following command to decrypt:'
echo "echo '$encrypted' | base64 -d | age --decrypt -i key.txt >creds.json"
env:
AGE_PUBLIC_KEY: ${{ secrets.AGE_PUBLIC_KEY }}

7
.gitignore vendored Normal file
View file

@ -0,0 +1,7 @@
.direnv
/target
/Cargo.lock
key.txt
creds.json

14
Cargo.toml Normal file
View file

@ -0,0 +1,14 @@
[workspace]
members = [
"gha-cache",
"nix-actions-cache",
]
[profile.release]
opt-level = 'z'
strip = true
lto = true
panic = "abort"
incremental = false
codegen-units = 1

26
README.md Normal file
View file

@ -0,0 +1,26 @@
# nix-actions-cache
`nix-actions-cache` is a minimal Nix Binary Cache server backed by [the GitHub Actions Cache](https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows).
It can be compiled into a ~3.5MB static binary for distribution, allowing it to start prefetching NARs used in a previous run even _before_ Nix is installed (not implemented yet).
## Development
This project depends on internal APIs used by the GitHub Actions Cache.
See `gha-cache/README.md` for more details on how to obtain the required tokens.
```
cargo run -- -c creds.json
cargo build --release --target x86_64-unknown-linux-musl
cargo build --release --target aarch64-unknown-linux-musl
nix copy --to 'http://127.0.0.1:3000' $(which bash)
nix-store --store $PWD/test-root --extra-substituters 'http://localhost:3000' --option require-sigs false -r $(which bash)
```
## TODO
- [ ] Make a GitHub Action and dogfood
- [ ] Parallelize upload
- [ ] Make sure that the corresponding NAR exists before returning `.narinfo` request
- [ ] Keep in-memory cache of what's present
- [ ] Record what's accessed
- [ ] Prefetch previously-accessed NARs

155
flake.lock Normal file
View file

@ -0,0 +1,155 @@
{
"nodes": {
"crane": {
"inputs": {
"flake-compat": [
"flake-compat"
],
"flake-utils": [
"flake-utils"
],
"nixpkgs": [
"nixpkgs"
],
"rust-overlay": "rust-overlay"
},
"locked": {
"lastModified": 1683505101,
"narHash": "sha256-VBU64Jfu2V4sUR5+tuQS9erBRAe/QEYUxdVMcJGMZZs=",
"owner": "ipetkov",
"repo": "crane",
"rev": "7b5bd9e5acb2bb0cfba2d65f34d8568a894cdb6c",
"type": "github"
},
"original": {
"owner": "ipetkov",
"repo": "crane",
"type": "github"
}
},
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1673956053,
"narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1681202837,
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "cfacdce06f30d2b68473a46042957675eebb3401",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1683408522,
"narHash": "sha256-9kcPh6Uxo17a3kK3XCHhcWiV1Yu1kYj22RHiymUhMkU=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "897876e4c484f1e8f92009fd11b7d988a121a4e7",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"crane": "crane",
"flake-compat": "flake-compat",
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs",
"rust-overlay": "rust-overlay_2"
}
},
"rust-overlay": {
"inputs": {
"flake-utils": [
"crane",
"flake-utils"
],
"nixpkgs": [
"crane",
"nixpkgs"
]
},
"locked": {
"lastModified": 1683080331,
"narHash": "sha256-nGDvJ1DAxZIwdn6ww8IFwzoHb2rqBP4wv/65Wt5vflk=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "d59c3fa0cba8336e115b376c2d9e91053aa59e56",
"type": "github"
},
"original": {
"owner": "oxalica",
"repo": "rust-overlay",
"type": "github"
}
},
"rust-overlay_2": {
"inputs": {
"flake-utils": [
"flake-utils"
],
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1683512408,
"narHash": "sha256-QMJGp/37En+d5YocJuSU89GL14bBYkIJQ6mqhRfqkkc=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "75b07756c3feb22cf230e75fb064c1b4c725b9bc",
"type": "github"
},
"original": {
"owner": "oxalica",
"repo": "rust-overlay",
"type": "github"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

97
flake.nix Normal file
View file

@ -0,0 +1,97 @@
{
description = "GitHub Actions-powered Nix binary cache";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
flake-utils.url = "github:numtide/flake-utils";
rust-overlay = {
url = "github:oxalica/rust-overlay";
inputs.nixpkgs.follows = "nixpkgs";
inputs.flake-utils.follows = "flake-utils";
};
crane = {
url = "github:ipetkov/crane";
inputs.nixpkgs.follows = "nixpkgs";
inputs.flake-compat.follows = "flake-compat";
inputs.flake-utils.follows = "flake-utils";
};
flake-compat = {
url = "github:edolstra/flake-compat";
flake = false;
};
};
outputs = { self, nixpkgs, flake-utils, rust-overlay, crane, ... }: let
supportedSystems = flake-utils.lib.defaultSystems;
nightlyVersion = "2023-05-01";
makeCranePkgs = pkgs: let
craneLib = crane.mkLib pkgs;
in pkgs.callPackage ./crane.nix { inherit craneLib; };
in flake-utils.lib.eachSystem supportedSystems (system: let
pkgs = import nixpkgs {
inherit system;
overlays = [
rust-overlay.overlay
];
};
inherit (pkgs) lib;
crossPlatforms = let
makeCrossPlatform = crossSystem: let
pkgsCross = if crossSystem == system then pkgs else import nixpkgs {
inherit system crossSystem;
overlays = [];
};
rustTargetSpec = pkgs.rust.toRustTargetSpec pkgsCross.pkgsStatic.stdenv.hostPlatform;
rustTargetSpecUnderscored = builtins.replaceStrings [ "-" ] [ "_" ] rustTargetSpec;
in {
inherit rustTargetSpec;
cc = "${pkgsCross.stdenv.cc}/bin/${pkgsCross.stdenv.cc.targetPrefix}cc";
cargoLinkerEnv = lib.strings.toUpper "CARGO_TARGET_${rustTargetSpecUnderscored}_LINKER";
cargoCcEnv = "CC_${rustTargetSpecUnderscored}"; # for ring
};
systems = lib.filter (lib.hasInfix "linux") supportedSystems;
in map makeCrossPlatform systems;
rustNightly = pkgs.rust-bin.nightly.${nightlyVersion}.default.override {
extensions = [ "rust-src" "rust-analyzer-preview" ];
targets = map (p: p.rustTargetSpec) crossPlatforms;
};
cargoCrossEnvs = lib.listToAttrs (lib.flatten (map (p: [
{
name = p.cargoCcEnv;
value = p.cc;
}
{
name = p.cargoLinkerEnv;
value = p.cc;
}
]) crossPlatforms));
in {
devShells = {
default = pkgs.mkShell ({
packages = with pkgs; [
bashInteractive
rustNightly
cargo-bloat
cargo-edit
cargo-udeps
age
];
} // cargoCrossEnvs);
keygen = pkgs.mkShellNoCC {
packages = with pkgs; [
age
];
};
};
});
}

31
gha-cache/Cargo.toml Normal file
View file

@ -0,0 +1,31 @@
[package]
name = "gha-cache"
version = "0.1.0"
edition = "2021"
[dependencies]
async-trait = "0.1.68"
bytes = "1.4.0"
derivative = "2.2.0"
hex = "0.4.3"
rand = "0.8.5"
reqwest = { version = "0.11.17", default-features = false, features = ["json", "rustls-tls-native-roots", "stream"] }
serde = { version = "1.0.162", features = ["derive"] }
serde_json = "1.0.96"
sha2 = "0.10.6"
thiserror = "1.0.40"
[dependencies.tokio]
version = "1.28.0"
features = [
"fs",
"io-util",
"macros",
"process",
"rt",
"rt-multi-thread",
"sync",
]
[dev-dependencies]
anyhow = "1.0.71"

36
gha-cache/README.md Normal file
View file

@ -0,0 +1,36 @@
# gha-cache
`gha-cache` provides an async API to the GitHub Actions Cache API.
You can upload blobs with `AsyncRead` streams and obtain presigned URLs to download them.
## Introduction
The GitHub Actions Cache (hereinafter GHAC) service stores binary blobs [identified](https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#matching-a-cache-key) by the following 3-tuple:
- **Cache Key**: The developer-specified name of the blob.
- **Cache Version**: A string identifying conditions that affect compatibility of the blob. It works like a namespace.
- The official implementation uses a SHA256 hash of the paths and the compression method, but it can be anything.
- In this crate, we let the user feed in arbitrary bytes to mutate the hash.
- **Cache Scope**: The branch containing the workflow run that uploaded the blob
### APIs
Two sets of APIs are in use:
- [GitHub Actions Cache API](https://github.com/actions/toolkit/blob/457303960f03375db6f033e214b9f90d79c3fe5c/packages/cache/src/internal/cacheHttpClient.ts#L38): Private API used by GHAC. This API allows uploading and downloading blobs.
- Endpoint: `$ACTIONS_CACHE_URL`
- Token: `$ACTIONS_RUNTIME_TOKEN`
- [GitHub REST API](https://docs.github.com/en/rest/actions/cache?apiVersion=2022-11-28#delete-github-actions-caches-for-a-repository-using-a-cache-key): Public API. This API allows listing and deleting blobs.
- Endpoint: `$GITHUB_API_URL` / `https://api.github.com`
- Token: `${{ secrets.GITHUB_TOKEN }}`
This crate supports only the former API.
We should contribute support for the latter to [Octocrab](https://github.com/XAMPPRocky/octocrab).
## Quick Start
Since GHAC uses private APIs that use special tokens for authentication, we need to get them from a workflow run.
The easiest way is with the `keygen` workflow in this repo.
Generate an `age` encryption key with `age-keygen -o key.txt`, and add the Public Key as a repository secret named `AGE_PUBLIC_KEY`.
Then, trigger the `keygen` workflow which will print out a command that will let you decrypt the credentials.

448
gha-cache/src/api.rs Normal file
View file

@ -0,0 +1,448 @@
//! GitHub Actions Cache API client.
//!
//! We expose a high-level API that deals with "files."
use async_trait::async_trait;
use bytes::{Bytes, BytesMut};
use rand::{distributions::Alphanumeric, Rng};
use reqwest::{
header::{HeaderMap, HeaderValue, CONTENT_RANGE, CONTENT_TYPE},
Client, StatusCode,
};
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use sha2::{Digest, Sha256};
use thiserror::Error;
use tokio::io::AsyncRead;
use crate::credentials::Credentials;
use crate::util::read_chunk_async;
/// The API version we implement.
///
/// <https://github.com/actions/toolkit/blob/0d44da2b87f9ed48ae889d15c6cc19667aa37ec0/packages/cache/src/internal/cacheHttpClient.ts>
const API_VERSION: &str = "6.0-preview.1";
/// The User-Agent string for the client.
///
/// We want to be polite :)
const USER_AGENT: &str = concat!(env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION"));
/// The default cache version/namespace.
const DEFAULT_VERSION: &str = concat!(env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION"));
/// The chunk size in bytes.
///
/// We greedily read this much from the input stream at a time.
const CHUNK_SIZE: usize = 8 * 1024 * 1024;
type Result<T> = std::result::Result<T, Error>;
/// An API error.
#[derive(Error, Debug)]
pub enum Error {
#[error("Failed to initialize the client")]
InitError(Box<dyn std::error::Error + Send + Sync>),
#[error("Request error")]
RequestError(#[from] reqwest::Error), // TODO: Better errors
#[error("Failed to decode response")]
DecodeError {
status: StatusCode,
bytes: Bytes,
error: serde_json::Error,
},
#[error("API error")]
ApiError {
status: StatusCode,
info: ApiErrorInfo,
},
#[error("I/O error")]
IoError(#[from] std::io::Error),
#[error("Too many collisions")]
TooManyCollisions,
}
#[derive(Debug, Clone)]
pub struct Api {
/// Credentials to access the cache.
credentials: Credentials,
/// The version used for all caches.
///
/// This value should be tied to everything that affects
/// the compatibility of the cached objects.
version: String,
/// The hasher of the version.
version_hasher: Sha256,
/// The HTTP client for authenticated requests.
client: Client,
}
/// A file allocation.
#[derive(Debug, Clone, Copy)]
pub struct FileAllocation(CacheId);
/// The ID of a cache.
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[serde(transparent)]
struct CacheId(pub i32);
/// An API error.
#[derive(Debug, Clone)]
pub enum ApiErrorInfo {
/// An error that we couldn't decode.
Unstructured(Bytes),
/// A structured API error.
Structured(StructuredApiError),
}
/// A structured API error.
#[derive(Debug, Clone, Deserialize)]
#[allow(dead_code)]
pub struct StructuredApiError {
/// A human-readable error message.
message: String,
}
/// A cache entry.
///
/// A valid entry looks like:
///
/// ```text
/// ArtifactCacheEntry {
/// cache_key: Some("hello-224".to_string()),
/// scope: Some("refs/heads/main".to_string()),
/// cache_version: Some("gha-cache/0.1.0".to_string()),
/// creation_time: Some("2023-01-01T00:00:00.0000000Z".to_string()),
/// archive_location: Some(
/// "https://[...].blob.core.windows.net/[...]/[...]?sv=2019-07-07&sr=b&sig=[...]".to_string()
/// ),
/// }
/// ```
#[derive(Debug, Clone, Deserialize)]
#[allow(dead_code)]
struct ArtifactCacheEntry {
/// The cache key.
#[serde(rename = "cacheKey")]
cache_key: Option<String>,
/// The scope of the cache.
///
/// It appears to be the branch name.
scope: Option<String>,
/// The version of the cache.
#[serde(rename = "cacheVersion")]
cache_version: Option<String>,
/// The creation timestamp.
#[serde(rename = "creationTime")]
creation_time: Option<String>,
/// The archive location.
#[serde(rename = "archiveLocation")]
archive_location: String,
}
#[derive(Debug, Clone, Serialize)]
struct ReserveCacheRequest<'a> {
/// The cache key.
key: &'a str,
/// The cache version.
///
/// This value should be tied to everything that affects
/// the compatibility of the cached objects.
version: &'a str,
/// The size of the cache, in bytes.
#[serde(rename = "cacheSize")]
#[serde(skip_serializing_if = "Option::is_none")]
cache_size: Option<usize>,
}
#[derive(Debug, Clone, Deserialize)]
struct ReserveCacheResponse {
/// The reserved cache ID.
#[serde(rename = "cacheId")]
cache_id: CacheId,
}
#[derive(Debug, Clone, Serialize)]
struct CommitCacheRequest {
size: usize,
}
#[async_trait]
trait ResponseExt {
async fn check(self) -> Result<()>;
async fn check_json<T: DeserializeOwned>(self) -> Result<T>;
}
impl Error {
fn init_error<E>(e: E) -> Self
where
E: std::error::Error + Send + Sync + 'static,
{
Self::InitError(Box::new(e))
}
}
impl Api {
pub fn new(credentials: Credentials) -> Result<Self> {
let mut headers = HeaderMap::new();
let auth_header = {
let mut h = HeaderValue::from_str(&format!("Bearer {}", credentials.runtime_token))
.map_err(Error::init_error)?;
h.set_sensitive(true);
h
};
headers.insert("Authorization", auth_header);
headers.insert(
"Accept",
HeaderValue::from_str(&format!("application/json;api-version={}", API_VERSION))
.map_err(Error::init_error)?,
);
let client = Client::builder()
.user_agent(USER_AGENT)
.default_headers(headers)
.build()
.map_err(Error::init_error)?;
let version_hasher = Sha256::new_with_prefix(DEFAULT_VERSION.as_bytes());
let initial_version = hex::encode(version_hasher.clone().finalize());
Ok(Self {
credentials,
version: initial_version,
version_hasher,
client,
})
}
/// Mutates the cache version/namespace.
pub fn mutate_version(&mut self, data: &[u8]) {
self.version_hasher.update(data);
self.version = hex::encode(self.version_hasher.clone().finalize());
}
// Public
/// Allocates a file.
pub async fn allocate_file(&self, key: &str) -> Result<FileAllocation> {
let reservation = self.reserve_cache(key, None).await?;
Ok(FileAllocation(reservation.cache_id))
}
/// Allocates a file with a random suffix.
///
/// This is a hack to allow for easy "overwriting" without
/// deleting the original cache.
pub async fn allocate_file_with_random_suffix(&self, key: &str) -> Result<FileAllocation> {
for _ in 0..5 {
let nonce: String = rand::thread_rng()
.sample_iter(&Alphanumeric)
.take(4)
.map(char::from)
.collect();
let full_key = format!("{}-{}", key, nonce);
match self.allocate_file(&full_key).await {
Ok(allocation) => {
return Ok(allocation);
}
Err(e) => {
if let Error::ApiError {
info: ApiErrorInfo::Structured(structured),
..
} = &e
{
if structured.message.contains("Cache already exists") {
continue;
}
}
return Err(e);
}
}
}
Err(Error::TooManyCollisions)
}
/// Uploads a file.
pub async fn upload_file<S>(&self, allocation: FileAllocation, mut stream: S) -> Result<()>
where
S: AsyncRead + Unpin + Send,
{
// TODO: Parallelize
let mut offset = 0;
loop {
let buf = BytesMut::with_capacity(CHUNK_SIZE);
let chunk = read_chunk_async(&mut stream, buf).await?;
if chunk.is_empty() {
offset += chunk.len();
break;
}
let chunk_len = chunk.len();
self.client
.patch(self.construct_url(&format!("caches/{}", allocation.0 .0)))
.header(CONTENT_TYPE, "application/octet-stream")
.header(
CONTENT_RANGE,
format!("bytes {}-{}/*", offset, offset + chunk.len() - 1),
)
.body(chunk)
.send()
.await?
.check()
.await?;
offset += chunk_len;
}
self.commit_cache(allocation.0, offset).await?;
Ok(())
}
/// Downloads a file based on a list of key prefixes.
pub async fn get_file_url(&self, keys: &[&str]) -> Result<Option<String>> {
Ok(self
.get_cache_entry(keys)
.await?
.map(|entry| entry.archive_location))
}
// Private
/// Retrieves a cache based on a list of key prefixes.
async fn get_cache_entry(&self, keys: &[&str]) -> Result<Option<ArtifactCacheEntry>> {
let res = self
.client
.get(self.construct_url("cache"))
.query(&[("version", &self.version), ("keys", &keys.join(","))])
.send()
.await?
.check_json()
.await;
match res {
Ok(entry) => Ok(Some(entry)),
Err(Error::DecodeError { status, .. }) if status == StatusCode::NO_CONTENT => Ok(None),
Err(e) => Err(e),
}
}
/// Reserves a new cache.
///
/// The cache key should be unique. A cache cannot be created
/// again if the same (cache_name, cache_version) pair already
/// exists.
async fn reserve_cache(
&self,
key: &str,
cache_size: Option<usize>,
) -> Result<ReserveCacheResponse> {
let req = ReserveCacheRequest {
key,
version: &self.version,
cache_size,
};
let res = self
.client
.post(self.construct_url("caches"))
.json(&req)
.send()
.await?
.check_json()
.await?;
Ok(res)
}
/// Finalizes uploading to a cache.
async fn commit_cache(&self, cache_id: CacheId, size: usize) -> Result<()> {
let req = CommitCacheRequest { size };
self.client
.post(self.construct_url(&format!("caches/{}", cache_id.0)))
.json(&req)
.send()
.await?
.check()
.await?;
Ok(())
}
fn construct_url(&self, resource: &str) -> String {
format!(
"{}/_apis/artifactcache/{}",
self.credentials.cache_url, resource
)
}
}
#[async_trait]
impl ResponseExt for reqwest::Response {
async fn check(self) -> Result<()> {
let status = self.status();
if !status.is_success() {
return Err(handle_error(self).await);
}
Ok(())
}
async fn check_json<T: DeserializeOwned>(self) -> Result<T> {
let status = self.status();
if !status.is_success() {
return Err(handle_error(self).await);
}
// We don't do `Response::json()` directly to preserve
// the original response payload for troubleshooting.
let bytes = self.bytes().await?;
match serde_json::from_slice(&bytes) {
Ok(decoded) => Ok(decoded),
Err(error) => Err(Error::DecodeError {
status,
error,
bytes,
}),
}
}
}
async fn handle_error(res: reqwest::Response) -> Error {
let status = res.status();
let bytes = match res.bytes().await {
Ok(bytes) => bytes,
Err(e) => {
return e.into();
}
};
let info = if let Ok(structured) = serde_json::from_slice(&bytes) {
ApiErrorInfo::Structured(structured)
} else {
ApiErrorInfo::Unstructured(bytes)
};
Error::ApiError { status, info }
}

View file

@ -0,0 +1,37 @@
//! Access credentials.
use std::env;
use derivative::Derivative;
use serde::{Deserialize, Serialize};
/// Credentials to access the GitHub Actions Cache.
#[derive(Clone, Derivative, Deserialize, Serialize)]
#[derivative(Debug)]
pub struct Credentials {
/// The base URL of the cache.
///
/// This is the `ACTIONS_CACHE_URL` environment variable.
#[serde(alias = "ACTIONS_CACHE_URL")]
pub(crate) cache_url: String,
/// The token.
///
/// This is the `ACTIONS_RUNTIME_TOKEN` environment variable.
#[derivative(Debug = "ignore")]
#[serde(alias = "ACTIONS_RUNTIME_TOKEN")]
pub(crate) runtime_token: String,
}
impl Credentials {
/// Tries to load credentials from the environment.
pub fn load_from_env() -> Option<Self> {
let cache_url = env::var("ACTIONS_CACHE_URL").ok()?;
let runtime_token = env::var("ACTIONS_RUNTIME_TOKEN").ok()?;
Some(Self {
cache_url,
runtime_token,
})
}
}

8
gha-cache/src/lib.rs Normal file
View file

@ -0,0 +1,8 @@
//! Meow.
pub mod api;
pub mod credentials;
mod util;
pub use api::Api;
pub use credentials::Credentials;

22
gha-cache/src/util.rs Normal file
View file

@ -0,0 +1,22 @@
//! Utilities.
//!
//! Taken from <https://github.com/zhaofengli/attic>.
use bytes::{Bytes, BytesMut};
use tokio::io::{AsyncRead, AsyncReadExt};
/// Greedily reads from a stream to fill a buffer.
pub async fn read_chunk_async<S: AsyncRead + Unpin + Send>(
stream: &mut S,
mut chunk: BytesMut,
) -> std::io::Result<Bytes> {
while chunk.len() < chunk.capacity() {
let read = stream.read_buf(&mut chunk).await?;
if read == 0 {
break;
}
}
Ok(chunk.freeze())
}

View file

@ -0,0 +1,32 @@
[package]
name = "nix-actions-cache"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
gha-cache = { path = "../gha-cache" }
axum = "0.6.18"
clap = { version = "4.2.7", features = ["derive"] }
tracing = "0.1.37"
tracing-subscriber = "0.3.17"
tower-http = { version = "0.4.0", features = ["trace"] }
serde_json = "1.0.96"
thiserror = "1.0.40"
tokio-stream = "0.1.14"
tokio-util = { version = "0.7.8", features = ["io"] }
rand = "0.8.5"
[dependencies.tokio]
version = "1.28.0"
features = [
"fs",
"io-util",
"macros",
"process",
"rt",
"rt-multi-thread",
"sync",
]

View file

@ -0,0 +1,35 @@
//! Errors.
use axum::{
http::StatusCode,
response::{IntoResponse, Response},
};
use thiserror::Error;
use gha_cache::api::Error as ApiError;
pub type Result<T> = std::result::Result<T, Error>;
#[derive(Error, Debug)]
pub enum Error {
#[error("GitHub API error")]
ApiError(#[from] ApiError),
#[error("Not Found")]
NotFound,
#[error("Bad Request")]
BadRequest,
}
impl IntoResponse for Error {
fn into_response(self) -> Response {
let code = match &self {
Self::ApiError(_) => StatusCode::INTERNAL_SERVER_ERROR,
Self::NotFound => StatusCode::NOT_FOUND,
Self::BadRequest => StatusCode::BAD_REQUEST,
};
(code, format!("{}", self)).into_response()
}
}

View file

@ -0,0 +1,171 @@
mod error;
use std::io;
use std::net::SocketAddr;
use std::path::PathBuf;
use std::sync::Arc;
use axum::{
extract::{BodyStream, Extension, Path},
response::Redirect,
routing::{get, put},
Router,
};
use clap::Parser;
use tokio::fs;
use tokio_stream::StreamExt;
use tokio_util::io::StreamReader;
use error::{Error, Result};
use gha_cache::{Api, Credentials};
type State = Arc<StateInner>;
/// GitHub Actions-powered Nix binary cache
#[derive(Parser, Debug)]
struct Args {
/// JSON file containing credentials.
///
/// If this is not specified, credentials will be loaded
/// from the environment.
#[arg(short = 'c', long)]
credentials_file: Option<PathBuf>,
/// Address to listen on.
///
/// FIXME: IPv6
#[arg(short = 'l', long, default_value = "127.0.0.1:3000")]
listen: SocketAddr,
}
/// The global server state.
#[derive(Debug)]
struct StateInner {
api: Api,
}
#[tokio::main]
async fn main() {
let args = Args::parse();
tracing_subscriber::fmt::init();
let credentials = if let Some(credentials_file) = &args.credentials_file {
tracing::info!("Loading credentials from {:?}", credentials_file);
let bytes = fs::read(credentials_file)
.await
.expect("Failed to read credentials file");
serde_json::from_slice(&bytes).expect("Failed to deserialize credentials file")
} else {
tracing::info!("Loading credentials from environment");
Credentials::load_from_env()
.expect("Failed to load credentials from environment (see README.md)")
};
let api = Api::new(credentials).expect("Failed to initialize GitHub Actions Cache API");
let state = Arc::new(StateInner { api });
let app = Router::new()
.route("/", get(root))
.route("/nix-cache-info", get(get_nix_cache_info))
// .narinfo
.route("/:path", get(get_narinfo))
.route("/:path", put(put_narinfo))
// .nar
.route("/nar/:path", get(get_nar))
.route("/nar/:path", put(put_nar))
.layer(Extension(state));
#[cfg(debug_assertions)]
let app = app.layer(tower_http::trace::TraceLayer::new_for_http());
tracing::info!("listening on {}", args.listen);
axum::Server::bind(&args.listen)
.serve(app.into_make_service())
.await
.unwrap();
}
async fn root() -> &'static str {
"cache the world 🚀"
}
async fn get_nix_cache_info() -> &'static str {
// TODO: Make StoreDir configurable
r#"WantMassQuery: 1
StoreDir: /nix/store
Priority: 39
"#
}
async fn get_narinfo(
Extension(state): Extension<State>,
Path(path): Path<String>,
) -> Result<Redirect> {
let components: Vec<&str> = path.splitn(2, '.').collect();
if components.len() != 2 {
return Err(Error::NotFound);
}
if components[1] != "narinfo" {
return Err(Error::NotFound);
}
let store_path_hash = components[0].to_string();
let key = format!("{}.narinfo", store_path_hash);
if let Some(url) = state.api.get_file_url(&[&key]).await? {
return Ok(Redirect::temporary(&url));
}
Err(Error::NotFound)
}
async fn put_narinfo(
Extension(state): Extension<State>,
Path(path): Path<String>,
body: BodyStream,
) -> Result<()> {
let components: Vec<&str> = path.splitn(2, '.').collect();
if components.len() != 2 {
return Err(Error::BadRequest);
}
if components[1] != "narinfo" {
return Err(Error::BadRequest);
}
let store_path_hash = components[0].to_string();
let key = format!("{}.narinfo", store_path_hash);
let allocation = state.api.allocate_file_with_random_suffix(&key).await?;
let stream = StreamReader::new(
body.map(|r| r.map_err(|e| io::Error::new(io::ErrorKind::Other, e.to_string()))),
);
state.api.upload_file(allocation, stream).await?;
Ok(())
}
async fn get_nar(Extension(state): Extension<State>, Path(path): Path<String>) -> Result<Redirect> {
if let Some(url) = state.api.get_file_url(&[&path]).await? {
return Ok(Redirect::temporary(&url));
}
Err(Error::NotFound)
}
async fn put_nar(
Extension(state): Extension<State>,
Path(path): Path<String>,
body: BodyStream,
) -> Result<()> {
let allocation = state.api.allocate_file_with_random_suffix(&path).await?;
let stream = StreamReader::new(
body.map(|r| r.map_err(|e| io::Error::new(io::ErrorKind::Other, e.to_string()))),
);
state.api.upload_file(allocation, stream).await?;
Ok(())
}

16
shell.nix Normal file
View file

@ -0,0 +1,16 @@
let
lock = builtins.fromJSON (builtins.readFile ./flake.lock);
flake-compat = builtins.fetchTarball {
url = "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz";
sha256 = lock.nodes.flake-compat.locked.narHash;
};
flake = import flake-compat {
src = ./.;
};
shell = flake.shellNix.default // {
reproduce = flake.defaultNix.outputs.reproduce.${builtins.currentSystem};
};
in shell