Compare commits
130 commits
optional-n
...
main
Author | SHA1 | Date | |
---|---|---|---|
![]() |
d35e6e72df | ||
![]() |
6fc832cb76 | ||
![]() |
78a56de86a | ||
![]() |
9fdc760dcb | ||
![]() |
90e95ab197 | ||
![]() |
9f88cc4842 | ||
![]() |
942b6b0ffe | ||
![]() |
80600ec316 | ||
![]() |
582930b2fc | ||
![]() |
b29c2cafae | ||
![]() |
0dd0d2d0a6 | ||
![]() |
5a689bfeb3 | ||
![]() |
e9600149c7 | ||
![]() |
9bdbfd97f2 | ||
![]() |
d347386c3f | ||
![]() |
a774f04dfb | ||
![]() |
7ed9fc9cbb | ||
![]() |
8fa4c519ce | ||
![]() |
3a905ca44d | ||
![]() |
339b12a07b | ||
![]() |
c5e897a376 | ||
![]() |
1eeaf990c0 | ||
![]() |
66c0f4bf8e | ||
![]() |
f64d3e92bf | ||
![]() |
5e805d85e8 | ||
![]() |
8780a7d721 | ||
![]() |
90e06e4287 | ||
![]() |
f5504c8285 | ||
![]() |
a5c0301ee8 | ||
![]() |
92b7440174 | ||
![]() |
ee9c0b9fa5 | ||
![]() |
a0571cc895 | ||
![]() |
260ce740fc | ||
![]() |
22d923e664 | ||
![]() |
214e869fee | ||
![]() |
36897bff90 | ||
![]() |
f3fe26e7c0 | ||
![]() |
61233c3bb5 | ||
![]() |
1cd5d316da | ||
![]() |
c12f7ed42f | ||
![]() |
7099e801ae | ||
![]() |
b1a16d5ac2 | ||
![]() |
1fad1473c5 | ||
![]() |
8492a69263 | ||
![]() |
a65ff39edd | ||
![]() |
ef4bd6fb91 | ||
![]() |
4d32a8ef4e | ||
![]() |
e8e38ad4fc | ||
![]() |
e70bb1e416 | ||
![]() |
4e2b37be36 | ||
![]() |
322a99d45e | ||
![]() |
003f106338 | ||
![]() |
b9f89bd546 | ||
![]() |
215dc0d8e9 | ||
![]() |
2bac50c0ca | ||
![]() |
5e7acea3d1 | ||
![]() |
11b78639f1 | ||
![]() |
296e9dc1af | ||
![]() |
f27f314206 | ||
![]() |
448d84e32f | ||
![]() |
d1983bbdff | ||
![]() |
a68e1c4d54 | ||
![]() |
bf844027bc | ||
![]() |
65060bc705 | ||
![]() |
3fd6eeb208 | ||
![]() |
cf183317a5 | ||
![]() |
647b207575 | ||
![]() |
625d7717b6 | ||
![]() |
925be77ec2 | ||
![]() |
7841b8bbe2 | ||
![]() |
a54a97ff9b | ||
![]() |
9f7a4abc4d | ||
![]() |
799a0c42e6 | ||
![]() |
65899a5ad5 | ||
![]() |
6a5abbf3bb | ||
![]() |
24af143b67 | ||
![]() |
4f25f7b3e6 | ||
![]() |
955ed68d34 | ||
![]() |
7c6bd9387c | ||
![]() |
6acb043852 | ||
![]() |
04af54090e | ||
![]() |
bc76dfa4df | ||
![]() |
5b126b691b | ||
![]() |
2bcd86656f | ||
![]() |
f13fa9e9f3 | ||
![]() |
7894df9177 | ||
![]() |
949fc954a5 | ||
![]() |
b499ff2a0f | ||
![]() |
3ca2a4bf5b | ||
![]() |
979ad69132 | ||
![]() |
b5a094c7a2 | ||
![]() |
91eef4416e | ||
![]() |
c1924ba94a | ||
![]() |
b1ec181a0a | ||
![]() |
fa02e9ad6f | ||
![]() |
21a9552b0b | ||
![]() |
f0b1e69100 | ||
![]() |
e52d545126 | ||
![]() |
edb35e8b94 | ||
![]() |
11544ed9eb | ||
![]() |
a4866b9dcd | ||
![]() |
08c8cf0275 | ||
![]() |
d9d748267f | ||
![]() |
48ec31e71d | ||
![]() |
9f46b60a8c | ||
![]() |
deeb8d1d79 | ||
![]() |
7c6300cfdc | ||
![]() |
c41207df35 | ||
![]() |
a6daff9a65 | ||
![]() |
685fe75327 | ||
![]() |
3a001d12e5 | ||
![]() |
0f476bd775 | ||
![]() |
10cbd94f3c | ||
![]() |
594748fe30 | ||
![]() |
c0b8f7b57b | ||
![]() |
f82811cc9c | ||
![]() |
57eb3e75c0 | ||
![]() |
e5d5118022 | ||
![]() |
2506ee0164 | ||
![]() |
ef40bb2caf | ||
![]() |
cc01b81323 | ||
![]() |
2ba4cb13aa | ||
![]() |
c08b262d2a | ||
![]() |
1278a7d98a | ||
![]() |
9ac4ce7953 | ||
![]() |
d0a51e7820 | ||
![]() |
1f386d2aac | ||
![]() |
902b81a064 | ||
![]() |
4ea576ab60 | ||
![]() |
4c0a2510c1 |
|
@ -1,3 +0,0 @@
|
||||||
# For -Zbuild-std
|
|
||||||
[target.aarch64-unknown-linux-musl]
|
|
||||||
rustflags = ["-C", "target-feature=+crt-static", "-C", "link-arg=-lgcc"]
|
|
10
.github/workflows/build.yaml
vendored
10
.github/workflows/build.yaml
vendored
|
@ -24,16 +24,16 @@ jobs:
|
||||||
runner: namespace-profile-default-arm64
|
runner: namespace-profile-default-arm64
|
||||||
- nix-system: x86_64-darwin
|
- nix-system: x86_64-darwin
|
||||||
system: X64-macOS
|
system: X64-macOS
|
||||||
runner: macos-12
|
runner: macos-14-large
|
||||||
- nix-system: aarch64-darwin
|
- nix-system: aarch64-darwin
|
||||||
system: ARM64-macOS
|
system: ARM64-macOS
|
||||||
runner: macos-latest-xlarge
|
runner: macos-latest-xlarge
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- name: Install Nix on ${{ matrix.systems.system }}
|
- name: Install Nix on ${{ matrix.systems.system }}
|
||||||
uses: DeterminateSystems/nix-installer-action@main
|
uses: DeterminateSystems/nix-installer-action@main
|
||||||
- name: Magic Nix Cache
|
- name: Set up FlakeHub Cache
|
||||||
uses: DeterminateSystems/magic-nix-cache-action@main
|
uses: DeterminateSystems/flakehub-cache-action@main
|
||||||
|
|
||||||
- name: Build and cache dev shell for ${{ matrix.systems.nix-system }}
|
- name: Build and cache dev shell for ${{ matrix.systems.nix-system }}
|
||||||
run: |
|
run: |
|
||||||
|
@ -45,7 +45,7 @@ jobs:
|
||||||
nix-store --export $(nix-store -qR ./result) | xz -9 > "${{ env.ARCHIVE_NAME }}"
|
nix-store --export $(nix-store -qR ./result) | xz -9 > "${{ env.ARCHIVE_NAME }}"
|
||||||
|
|
||||||
- name: Upload magic-nix-cache closure for ${{ matrix.systems.system }}
|
- name: Upload magic-nix-cache closure for ${{ matrix.systems.system }}
|
||||||
uses: actions/upload-artifact@v3.1.2
|
uses: actions/upload-artifact@v4.6.0
|
||||||
with:
|
with:
|
||||||
# Artifact name
|
# Artifact name
|
||||||
name: ${{ env.ARTIFACT_KEY }}
|
name: ${{ env.ARTIFACT_KEY }}
|
||||||
|
|
23
.github/workflows/check-and-test.yaml
vendored
23
.github/workflows/check-and-test.yaml
vendored
|
@ -13,18 +13,17 @@ jobs:
|
||||||
contents: read
|
contents: read
|
||||||
id-token: write
|
id-token: write
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Install Nix
|
|
||||||
uses: DeterminateSystems/nix-installer-action@main
|
|
||||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
|
||||||
|
|
||||||
- name: Check health of flake.lock
|
- name: Check health of flake.lock
|
||||||
uses: DeterminateSystems/flake-checker-action@main
|
uses: DeterminateSystems/flake-checker-action@main
|
||||||
# TODO: re-enable fail mode when we find a way to bump Nixpkgs to 24.05
|
with:
|
||||||
# without breaking the static Rust build
|
fail-mode: true
|
||||||
#with:
|
|
||||||
# fail-mode: true
|
- name: Install Nix
|
||||||
|
uses: DeterminateSystems/nix-installer-action@main
|
||||||
|
|
||||||
|
- uses: DeterminateSystems/flakehub-cache-action@main
|
||||||
|
|
||||||
- name: Check Rust formatting
|
- name: Check Rust formatting
|
||||||
run: nix develop --command cargo fmt --check
|
run: nix develop --command cargo fmt --check
|
||||||
|
@ -53,17 +52,17 @@ jobs:
|
||||||
- system: ARM64-Linux
|
- system: ARM64-Linux
|
||||||
runner: namespace-profile-default-arm64
|
runner: namespace-profile-default-arm64
|
||||||
- system: X64-macOS
|
- system: X64-macOS
|
||||||
runner: macos-12
|
runner: macos-14-large
|
||||||
- system: ARM64-macOS
|
- system: ARM64-macOS
|
||||||
runner: macos-latest-xlarge
|
runner: macos-latest-xlarge
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
id-token: write
|
id-token: write
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Download closure for ${{ matrix.systems.system }}
|
- name: Download closure for ${{ matrix.systems.system }}
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v4.1.8
|
||||||
with:
|
with:
|
||||||
name: ${{ env.ARTIFACT_KEY }}
|
name: ${{ env.ARTIFACT_KEY }}
|
||||||
path: ${{ env.ARTIFACT_KEY }}
|
path: ${{ env.ARTIFACT_KEY }}
|
||||||
|
|
2
.github/workflows/flakehub.yaml
vendored
2
.github/workflows/flakehub.yaml
vendored
|
@ -12,7 +12,7 @@ jobs:
|
||||||
id-token: "write"
|
id-token: "write"
|
||||||
contents: "read"
|
contents: "read"
|
||||||
steps:
|
steps:
|
||||||
- uses: "actions/checkout@v3"
|
- uses: "actions/checkout@v4"
|
||||||
- uses: "DeterminateSystems/nix-installer-action@main"
|
- uses: "DeterminateSystems/nix-installer-action@main"
|
||||||
- uses: "DeterminateSystems/flakehub-push@main"
|
- uses: "DeterminateSystems/flakehub-push@main"
|
||||||
with:
|
with:
|
||||||
|
|
4
.github/workflows/keygen.yaml
vendored
4
.github/workflows/keygen.yaml
vendored
|
@ -5,10 +5,10 @@ jobs:
|
||||||
build:
|
build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- name: Install Nix
|
- name: Install Nix
|
||||||
uses: DeterminateSystems/nix-installer-action@main
|
uses: DeterminateSystems/nix-installer-action@main
|
||||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
- uses: DeterminateSystems/flakehub-cache-action@main
|
||||||
- name: Expose GitHub Runtime
|
- name: Expose GitHub Runtime
|
||||||
uses: crazy-max/ghaction-github-runtime@v2
|
uses: crazy-max/ghaction-github-runtime@v2
|
||||||
- name: Dump credentials
|
- name: Dump credentials
|
||||||
|
|
10
.github/workflows/release-branches.yml
vendored
10
.github/workflows/release-branches.yml
vendored
|
@ -22,7 +22,7 @@ jobs:
|
||||||
id-token: write # In order to request a JWT for AWS auth
|
id-token: write # In order to request a JWT for AWS auth
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
- name: Configure AWS Credentials
|
- name: Configure AWS Credentials
|
||||||
uses: aws-actions/configure-aws-credentials@v2
|
uses: aws-actions/configure-aws-credentials@v2
|
||||||
with:
|
with:
|
||||||
|
@ -32,28 +32,28 @@ jobs:
|
||||||
- name: Create the artifacts directory
|
- name: Create the artifacts directory
|
||||||
run: rm -rf ./artifacts && mkdir ./artifacts
|
run: rm -rf ./artifacts && mkdir ./artifacts
|
||||||
|
|
||||||
- uses: actions/download-artifact@v3
|
- uses: actions/download-artifact@v4.1.8
|
||||||
with:
|
with:
|
||||||
name: magic-nix-cache-ARM64-macOS
|
name: magic-nix-cache-ARM64-macOS
|
||||||
path: cache-binary-ARM64-macOS
|
path: cache-binary-ARM64-macOS
|
||||||
- name: Persist the cache binary
|
- name: Persist the cache binary
|
||||||
run: cp ./cache-binary-ARM64-macOS/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-ARM64-macOS
|
run: cp ./cache-binary-ARM64-macOS/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-ARM64-macOS
|
||||||
|
|
||||||
- uses: actions/download-artifact@v3
|
- uses: actions/download-artifact@v4.1.8
|
||||||
with:
|
with:
|
||||||
name: magic-nix-cache-X64-macOS
|
name: magic-nix-cache-X64-macOS
|
||||||
path: cache-binary-X64-macOS
|
path: cache-binary-X64-macOS
|
||||||
- name: Persist the cache binary
|
- name: Persist the cache binary
|
||||||
run: cp ./cache-binary-X64-macOS/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-X64-macOS
|
run: cp ./cache-binary-X64-macOS/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-X64-macOS
|
||||||
|
|
||||||
- uses: actions/download-artifact@v3
|
- uses: actions/download-artifact@v4.1.8
|
||||||
with:
|
with:
|
||||||
name: magic-nix-cache-X64-Linux
|
name: magic-nix-cache-X64-Linux
|
||||||
path: cache-binary-X64-Linux
|
path: cache-binary-X64-Linux
|
||||||
- name: Persist the cache binary
|
- name: Persist the cache binary
|
||||||
run: cp ./cache-binary-X64-Linux/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-X64-Linux
|
run: cp ./cache-binary-X64-Linux/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-X64-Linux
|
||||||
|
|
||||||
- uses: actions/download-artifact@v3
|
- uses: actions/download-artifact@v4.1.8
|
||||||
with:
|
with:
|
||||||
name: magic-nix-cache-ARM64-Linux
|
name: magic-nix-cache-ARM64-Linux
|
||||||
path: cache-binary-ARM64-Linux
|
path: cache-binary-ARM64-Linux
|
||||||
|
|
10
.github/workflows/release-prs.yml
vendored
10
.github/workflows/release-prs.yml
vendored
|
@ -31,33 +31,33 @@ jobs:
|
||||||
contents: read
|
contents: read
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Create the artifacts directory
|
- name: Create the artifacts directory
|
||||||
run: rm -rf ./artifacts && mkdir ./artifacts
|
run: rm -rf ./artifacts && mkdir ./artifacts
|
||||||
|
|
||||||
- uses: actions/download-artifact@v3
|
- uses: actions/download-artifact@v4.1.8
|
||||||
with:
|
with:
|
||||||
name: magic-nix-cache-ARM64-macOS
|
name: magic-nix-cache-ARM64-macOS
|
||||||
path: cache-binary-ARM64-macOS
|
path: cache-binary-ARM64-macOS
|
||||||
- name: Persist the cache binary
|
- name: Persist the cache binary
|
||||||
run: cp ./cache-binary-ARM64-macOS/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-ARM64-macOS
|
run: cp ./cache-binary-ARM64-macOS/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-ARM64-macOS
|
||||||
|
|
||||||
- uses: actions/download-artifact@v3
|
- uses: actions/download-artifact@v4.1.8
|
||||||
with:
|
with:
|
||||||
name: magic-nix-cache-X64-macOS
|
name: magic-nix-cache-X64-macOS
|
||||||
path: cache-binary-X64-macOS
|
path: cache-binary-X64-macOS
|
||||||
- name: Persist the cache binary
|
- name: Persist the cache binary
|
||||||
run: cp ./cache-binary-X64-macOS/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-X64-macOS
|
run: cp ./cache-binary-X64-macOS/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-X64-macOS
|
||||||
|
|
||||||
- uses: actions/download-artifact@v3
|
- uses: actions/download-artifact@v4.1.8
|
||||||
with:
|
with:
|
||||||
name: magic-nix-cache-X64-Linux
|
name: magic-nix-cache-X64-Linux
|
||||||
path: cache-binary-X64-Linux
|
path: cache-binary-X64-Linux
|
||||||
- name: Persist the cache binary
|
- name: Persist the cache binary
|
||||||
run: cp ./cache-binary-X64-Linux/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-X64-Linux
|
run: cp ./cache-binary-X64-Linux/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-X64-Linux
|
||||||
|
|
||||||
- uses: actions/download-artifact@v3
|
- uses: actions/download-artifact@v4.1.8
|
||||||
with:
|
with:
|
||||||
name: magic-nix-cache-ARM64-Linux
|
name: magic-nix-cache-ARM64-Linux
|
||||||
path: cache-binary-ARM64-Linux
|
path: cache-binary-ARM64-Linux
|
||||||
|
|
10
.github/workflows/release-tags.yml
vendored
10
.github/workflows/release-tags.yml
vendored
|
@ -19,33 +19,33 @@ jobs:
|
||||||
id-token: write # In order to request a JWT for AWS auth
|
id-token: write # In order to request a JWT for AWS auth
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Create the artifacts directory
|
- name: Create the artifacts directory
|
||||||
run: rm -rf ./artifacts && mkdir ./artifacts
|
run: rm -rf ./artifacts && mkdir ./artifacts
|
||||||
|
|
||||||
- uses: actions/download-artifact@v3
|
- uses: actions/download-artifact@v4.1.8
|
||||||
with:
|
with:
|
||||||
name: magic-nix-cache-ARM64-macOS
|
name: magic-nix-cache-ARM64-macOS
|
||||||
path: cache-binary-ARM64-macOS
|
path: cache-binary-ARM64-macOS
|
||||||
- name: Persist the cache binary
|
- name: Persist the cache binary
|
||||||
run: cp ./cache-binary-ARM64-macOS/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-ARM64-macOS
|
run: cp ./cache-binary-ARM64-macOS/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-ARM64-macOS
|
||||||
|
|
||||||
- uses: actions/download-artifact@v3
|
- uses: actions/download-artifact@v4.1.8
|
||||||
with:
|
with:
|
||||||
name: magic-nix-cache-X64-macOS
|
name: magic-nix-cache-X64-macOS
|
||||||
path: cache-binary-X64-macOS
|
path: cache-binary-X64-macOS
|
||||||
- name: Persist the cache binary
|
- name: Persist the cache binary
|
||||||
run: cp ./cache-binary-X64-macOS/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-X64-macOS
|
run: cp ./cache-binary-X64-macOS/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-X64-macOS
|
||||||
|
|
||||||
- uses: actions/download-artifact@v3
|
- uses: actions/download-artifact@v4.1.8
|
||||||
with:
|
with:
|
||||||
name: magic-nix-cache-X64-Linux
|
name: magic-nix-cache-X64-Linux
|
||||||
path: cache-binary-X64-Linux
|
path: cache-binary-X64-Linux
|
||||||
- name: Persist the cache binary
|
- name: Persist the cache binary
|
||||||
run: cp ./cache-binary-X64-Linux/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-X64-Linux
|
run: cp ./cache-binary-X64-Linux/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-X64-Linux
|
||||||
|
|
||||||
- uses: actions/download-artifact@v3
|
- uses: actions/download-artifact@v4.1.8
|
||||||
with:
|
with:
|
||||||
name: magic-nix-cache-ARM64-Linux
|
name: magic-nix-cache-ARM64-Linux
|
||||||
path: cache-binary-ARM64-Linux
|
path: cache-binary-ARM64-Linux
|
||||||
|
|
20
.github/workflows/update-flake-lock.yaml
vendored
Normal file
20
.github/workflows/update-flake-lock.yaml
vendored
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
name: update-flake-lock
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch: # enable manual triggering
|
||||||
|
schedule:
|
||||||
|
- cron: "0 0 * * 0" # every Sunday at midnight
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
lockfile:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: DeterminateSystems/nix-installer-action@main
|
||||||
|
- uses: DeterminateSystems/flakehub-cache-action@main
|
||||||
|
- uses: DeterminateSystems/update-flake-lock@main
|
||||||
|
with:
|
||||||
|
pr-title: Update flake.lock
|
||||||
|
pr-labels: |
|
||||||
|
dependencies
|
||||||
|
automated
|
3468
Cargo.lock
generated
3468
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
15
README.md
15
README.md
|
@ -1,5 +1,12 @@
|
||||||
# Magic Nix Cache
|
# Magic Nix Cache
|
||||||
|
|
||||||
|
> [!WARNING]
|
||||||
|
> The [Magic Nix Cache will will stop working](https://determinate.systems/posts/magic-nix-cache-free-tier-eol) on **February 1st, 2025** unless you're on [GitHub Enterprise Server](https://github.com/enterprise).
|
||||||
|
>
|
||||||
|
> You can upgrade to [FlakeHub Cache](https://flakehub.com/cache) and get **one month free** using the coupon code **`FHC`**.
|
||||||
|
>
|
||||||
|
> For more information, read [this blog post](https://determinate.systems/posts/magic-nix-cache-free-tier-eol/).
|
||||||
|
|
||||||
Save 30-50%+ of CI time without any effort or cost.
|
Save 30-50%+ of CI time without any effort or cost.
|
||||||
Use Magic Nix Cache, a totally free and zero-configuration binary cache for Nix on GitHub Actions.
|
Use Magic Nix Cache, a totally free and zero-configuration binary cache for Nix on GitHub Actions.
|
||||||
|
|
||||||
|
@ -10,7 +17,7 @@ permissions:
|
||||||
contents: read
|
contents: read
|
||||||
id-token: write
|
id-token: write
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: DeterminateSystems/nix-installer-action@main
|
- uses: DeterminateSystems/nix-installer-action@main
|
||||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||||
- run: nix flake check
|
- run: nix flake check
|
||||||
|
@ -52,7 +59,7 @@ jobs:
|
||||||
contents: read
|
contents: read
|
||||||
id-token: write
|
id-token: write
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: DeterminateSystems/nix-installer-action@main
|
- uses: DeterminateSystems/nix-installer-action@main
|
||||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||||
- run: nix flake check
|
- run: nix flake check
|
||||||
|
@ -84,8 +91,8 @@ For local development, see `gha-cache/README.md` for more details on how to obta
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
cargo run -- -c creds.json --upstream https://cache.nixos.org
|
cargo run -- -c creds.json --upstream https://cache.nixos.org
|
||||||
cargo build --release --target x86_64-unknown-linux-musl
|
cargo build --release --target x86_64-unknown-linux-gnu
|
||||||
cargo build --release --target aarch64-unknown-linux-musl
|
cargo build --release --target aarch64-unknown-linux-gnu
|
||||||
nix copy --to 'http://127.0.0.1:3000' $(which bash)
|
nix copy --to 'http://127.0.0.1:3000' $(which bash)
|
||||||
nix-store --store $PWD/test-root --extra-substituters 'http://localhost:3000' --option require-sigs false -r $(which bash)
|
nix-store --store $PWD/test-root --extra-substituters 'http://localhost:3000' --option require-sigs false -r $(which bash)
|
||||||
```
|
```
|
||||||
|
|
122
crane.nix
122
crane.nix
|
@ -1,122 +0,0 @@
|
||||||
{ stdenv
|
|
||||||
, pkgs
|
|
||||||
, lib
|
|
||||||
, crane
|
|
||||||
, rust
|
|
||||||
, rust-bin
|
|
||||||
, nix-gitignore
|
|
||||||
, supportedSystems
|
|
||||||
, nix-flake
|
|
||||||
}:
|
|
||||||
|
|
||||||
let
|
|
||||||
inherit (stdenv.hostPlatform) system;
|
|
||||||
|
|
||||||
nightlyVersion = "2024-03-28";
|
|
||||||
rustNightly = (pkgs.rust-bin.nightly.${nightlyVersion}.default.override {
|
|
||||||
extensions = [ "rust-src" "rust-analyzer-preview" ];
|
|
||||||
targets = cargoTargets;
|
|
||||||
}).overrideAttrs (old: {
|
|
||||||
# Remove the propagated libiconv since we want to add our static version
|
|
||||||
depsTargetTargetPropagated = lib.filter (d: d.pname != "libiconv")
|
|
||||||
(lib.flatten (old.depsTargetTargetPropagated or [ ]));
|
|
||||||
});
|
|
||||||
|
|
||||||
# For easy cross-compilation in devShells
|
|
||||||
# We are just composing the pkgsCross.*.stdenv.cc together
|
|
||||||
crossPlatforms =
|
|
||||||
let
|
|
||||||
makeCrossPlatform = crossSystem:
|
|
||||||
let
|
|
||||||
pkgsCross =
|
|
||||||
if crossSystem == system then pkgs
|
|
||||||
else
|
|
||||||
import pkgs.path {
|
|
||||||
inherit system crossSystem;
|
|
||||||
overlays = [ nix-flake.overlays.default ];
|
|
||||||
};
|
|
||||||
|
|
||||||
rustTargetSpec = rust.toRustTargetSpec pkgsCross.pkgsStatic.stdenv.hostPlatform;
|
|
||||||
rustTargetSpecUnderscored = builtins.replaceStrings [ "-" ] [ "_" ] rustTargetSpec;
|
|
||||||
|
|
||||||
cargoLinkerEnv = lib.strings.toUpper "CARGO_TARGET_${rustTargetSpecUnderscored}_LINKER";
|
|
||||||
cargoCcEnv = "CC_${rustTargetSpecUnderscored}"; # for ring
|
|
||||||
|
|
||||||
ccbin = "${pkgsCross.stdenv.cc}/bin/${pkgsCross.stdenv.cc.targetPrefix}cc";
|
|
||||||
in
|
|
||||||
{
|
|
||||||
name = crossSystem;
|
|
||||||
value = {
|
|
||||||
inherit rustTargetSpec;
|
|
||||||
cc = pkgsCross.stdenv.cc;
|
|
||||||
pkgs = pkgsCross;
|
|
||||||
buildInputs = makeBuildInputs pkgsCross;
|
|
||||||
env = {
|
|
||||||
"${cargoLinkerEnv}" = ccbin;
|
|
||||||
"${cargoCcEnv}" = ccbin;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
systems = lib.filter (s: s == system || lib.hasInfix "linux" s) supportedSystems
|
|
||||||
# Cross from aarch64-darwin -> x86_64-darwin doesn't work yet
|
|
||||||
# Hopefully the situation will improve with the SDK bumps
|
|
||||||
++ lib.optional (system == "x86_64-darwin") "aarch64-darwin";
|
|
||||||
in
|
|
||||||
builtins.listToAttrs (map makeCrossPlatform systems);
|
|
||||||
|
|
||||||
cargoTargets = lib.mapAttrsToList (_: p: p.rustTargetSpec) crossPlatforms;
|
|
||||||
cargoCrossEnvs = lib.foldl (acc: p: acc // p.env) { } (builtins.attrValues crossPlatforms);
|
|
||||||
|
|
||||||
makeBuildInputs = pkgs:
|
|
||||||
[ pkgs.nix
|
|
||||||
pkgs.boost # needed for clippy
|
|
||||||
]
|
|
||||||
++ lib.optionals pkgs.stdenv.isDarwin [
|
|
||||||
pkgs.darwin.apple_sdk.frameworks.Security
|
|
||||||
(pkgs.libiconv.override { enableStatic = true; enableShared = false; })
|
|
||||||
];
|
|
||||||
|
|
||||||
buildFor = system:
|
|
||||||
let
|
|
||||||
crossPlatform = crossPlatforms.${system};
|
|
||||||
inherit (crossPlatform) pkgs;
|
|
||||||
craneLib = (crane.mkLib pkgs).overrideToolchain rustNightly;
|
|
||||||
crateName = craneLib.crateNameFromCargoToml {
|
|
||||||
cargoToml = ./magic-nix-cache/Cargo.toml;
|
|
||||||
};
|
|
||||||
|
|
||||||
src = nix-gitignore.gitignoreSource [ ] ./.;
|
|
||||||
|
|
||||||
commonArgs = {
|
|
||||||
inherit (crateName) pname version;
|
|
||||||
inherit src;
|
|
||||||
|
|
||||||
nativeBuildInputs = [ pkgs.pkg-config ];
|
|
||||||
|
|
||||||
buildInputs = makeBuildInputs pkgs;
|
|
||||||
|
|
||||||
cargoExtraArgs = "--target ${crossPlatform.rustTargetSpec}";
|
|
||||||
|
|
||||||
cargoVendorDir = craneLib.vendorMultipleCargoDeps {
|
|
||||||
inherit (craneLib.findCargoFiles src) cargoConfigs;
|
|
||||||
cargoLockList = [
|
|
||||||
./Cargo.lock
|
|
||||||
"${rustNightly.passthru.availableComponents.rust-src}/lib/rustlib/src/rust/Cargo.lock"
|
|
||||||
];
|
|
||||||
};
|
|
||||||
} // crossPlatform.env;
|
|
||||||
|
|
||||||
crate = craneLib.buildPackage (commonArgs // {
|
|
||||||
cargoArtifacts = craneLib.buildDepsOnly commonArgs;
|
|
||||||
|
|
||||||
# The resulting executable must be standalone
|
|
||||||
allowedRequisites = [ ];
|
|
||||||
});
|
|
||||||
in
|
|
||||||
crate;
|
|
||||||
in
|
|
||||||
{
|
|
||||||
inherit crossPlatforms cargoTargets cargoCrossEnvs rustNightly;
|
|
||||||
|
|
||||||
magic-nix-cache = buildFor system;
|
|
||||||
}
|
|
233
flake.lock
233
flake.lock
|
@ -1,46 +1,27 @@
|
||||||
{
|
{
|
||||||
"nodes": {
|
"nodes": {
|
||||||
"crane": {
|
"crane": {
|
||||||
"inputs": {
|
|
||||||
"nixpkgs": [
|
|
||||||
"nixpkgs"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1714842444,
|
"lastModified": 1741479724,
|
||||||
"narHash": "sha256-z4HeSYtEdYxKurrbxCMb8v/I1LYDHR/aFrZtGtgUgHw=",
|
"narHash": "sha256-fnyETBKSVRa5abjOiRG/IAzKZq5yX8U6oRrHstPl4VM=",
|
||||||
"rev": "c5ee4371eea1728ef04bb09c79577c84d5e67a48",
|
"rev": "60202a2e3597a3d91f5e791aab03f45470a738b5",
|
||||||
"revCount": 557,
|
"revCount": 709,
|
||||||
"type": "tarball",
|
"type": "tarball",
|
||||||
"url": "https://api.flakehub.com/f/pinned/ipetkov/crane/0.16.6/018f4495-627e-7385-b537-81f1c1d4003b/source.tar.gz"
|
"url": "https://api.flakehub.com/f/pinned/ipetkov/crane/0.20.2/0195784b-915b-7d2d-915d-ab02d1112ef9/source.tar.gz"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"type": "tarball",
|
"type": "tarball",
|
||||||
"url": "https://flakehub.com/f/ipetkov/crane/0.16.3.tar.gz"
|
"url": "https://flakehub.com/f/ipetkov/crane/%2A"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"flake-compat": {
|
"flake-compat": {
|
||||||
"locked": {
|
|
||||||
"lastModified": 1696426674,
|
|
||||||
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
|
|
||||||
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
|
|
||||||
"revCount": 57,
|
|
||||||
"type": "tarball",
|
|
||||||
"url": "https://api.flakehub.com/f/pinned/edolstra/flake-compat/1.0.1/018afb31-abd1-7bff-a5e4-cff7e18efb7a/source.tar.gz"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"type": "tarball",
|
|
||||||
"url": "https://flakehub.com/f/edolstra/flake-compat/1.0.1.tar.gz"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"flake-compat_2": {
|
|
||||||
"flake": false,
|
"flake": false,
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1673956053,
|
"lastModified": 1733328505,
|
||||||
"narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
|
"narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=",
|
||||||
"owner": "edolstra",
|
"owner": "edolstra",
|
||||||
"repo": "flake-compat",
|
"repo": "flake-compat",
|
||||||
"rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
|
"rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -57,11 +38,11 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1712014858,
|
"lastModified": 1733312601,
|
||||||
"narHash": "sha256-sB4SWl2lX95bExY2gMFG5HIzvva5AVMJd4Igm+GpZNw=",
|
"narHash": "sha256-4pDvzqnegAfRkPwO3wmwBhVi/Sye1mzps0zHWYnP88c=",
|
||||||
"owner": "hercules-ci",
|
"owner": "hercules-ci",
|
||||||
"repo": "flake-parts",
|
"repo": "flake-parts",
|
||||||
"rev": "9126214d0a59633752a136528f5f3b9aa8565b7d",
|
"rev": "205b12d8b7cd4802fbcb8e8ef6a0f1408781a4f9",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -70,93 +51,91 @@
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"flake-utils": {
|
"git-hooks-nix": {
|
||||||
"locked": {
|
|
||||||
"lastModified": 1667395993,
|
|
||||||
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
|
|
||||||
"owner": "numtide",
|
|
||||||
"repo": "flake-utils",
|
|
||||||
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "numtide",
|
|
||||||
"repo": "flake-utils",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"flake-utils_2": {
|
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"systems": "systems"
|
"flake-compat": [
|
||||||
|
"nix"
|
||||||
|
],
|
||||||
|
"gitignore": [
|
||||||
|
"nix"
|
||||||
|
],
|
||||||
|
"nixpkgs": [
|
||||||
|
"nix",
|
||||||
|
"nixpkgs"
|
||||||
|
],
|
||||||
|
"nixpkgs-stable": [
|
||||||
|
"nix",
|
||||||
|
"nixpkgs"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1705309234,
|
"lastModified": 1734279981,
|
||||||
"narHash": "sha256-uNRRNRKmJyCRC/8y1RqBkqWBLM034y4qN7EprSdmgyA=",
|
"narHash": "sha256-NdaCraHPp8iYMWzdXAt5Nv6sA3MUzlCiGiR586TCwo0=",
|
||||||
"owner": "numtide",
|
"owner": "cachix",
|
||||||
"repo": "flake-utils",
|
"repo": "git-hooks.nix",
|
||||||
"rev": "1ef2e671c3b0c19053962c07dbda38332dcebf26",
|
"rev": "aa9f40c906904ebd83da78e7f328cd8aeaeae785",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"owner": "numtide",
|
"owner": "cachix",
|
||||||
"repo": "flake-utils",
|
"repo": "git-hooks.nix",
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"libgit2": {
|
|
||||||
"flake": false,
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1697646580,
|
|
||||||
"narHash": "sha256-oX4Z3S9WtJlwvj0uH9HlYcWv+x1hqp8mhXl7HsLu2f0=",
|
|
||||||
"owner": "libgit2",
|
|
||||||
"repo": "libgit2",
|
|
||||||
"rev": "45fd9ed7ae1a9b74b957ef4f337bc3c8b3df01b5",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "libgit2",
|
|
||||||
"repo": "libgit2",
|
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nix": {
|
"nix": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"flake-compat": "flake-compat_2",
|
"flake-compat": "flake-compat",
|
||||||
"flake-parts": "flake-parts",
|
"flake-parts": "flake-parts",
|
||||||
"libgit2": "libgit2",
|
"git-hooks-nix": "git-hooks-nix",
|
||||||
"nixpkgs": "nixpkgs",
|
"nixpkgs": "nixpkgs",
|
||||||
"nixpkgs-regression": "nixpkgs-regression",
|
"nixpkgs-23-11": "nixpkgs-23-11",
|
||||||
"pre-commit-hooks": "pre-commit-hooks"
|
"nixpkgs-regression": "nixpkgs-regression"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1715246928,
|
"lastModified": 1742824067,
|
||||||
"narHash": "sha256-5Q1WkpTWH7fkVfYhHDc5r0A+Vc+K5xB1UhzrLzBCrB8=",
|
"narHash": "sha256-rBPulEBpn4IiqkPsetuh7BRzT2iGCzZYnogTAsbrvhU=",
|
||||||
"rev": "adba2f19a02eaa74336a06a026d3c37af8020559",
|
"rev": "9cb662df7442a1e2c4600fb8ecb2ad613ebc5a95",
|
||||||
"revCount": 17044,
|
"revCount": 19496,
|
||||||
"type": "tarball",
|
"type": "tarball",
|
||||||
"url": "https://api.flakehub.com/f/pinned/NixOS/nix/2.22.1/018f61d9-3f9a-7ccf-9bfc-174e3a17ab38/source.tar.gz"
|
"url": "https://api.flakehub.com/f/pinned/NixOS/nix/2.27.1/0195c8c5-1964-7a31-b025-ebf9bfeef991/source.tar.gz"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"type": "tarball",
|
"type": "tarball",
|
||||||
"url": "https://flakehub.com/f/NixOS/nix/2.20.tar.gz"
|
"url": "https://flakehub.com/f/NixOS/nix/2"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1709083642,
|
"lastModified": 1734359947,
|
||||||
"narHash": "sha256-7kkJQd4rZ+vFrzWu8sTRtta5D1kBG0LSRYAfhtmMlSo=",
|
"narHash": "sha256-1Noao/H+N8nFB4Beoy8fgwrcOQLVm9o4zKW1ODaqK9E=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "b550fe4b4776908ac2a861124307045f8e717c8e",
|
"rev": "48d12d5e70ee91fe8481378e540433a7303dbf6a",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"ref": "release-23.11",
|
"ref": "release-24.11",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"nixpkgs-23-11": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1717159533,
|
||||||
|
"narHash": "sha256-oamiKNfr2MS6yH64rUn99mIZjc45nGJlj9eGth/3Xuw=",
|
||||||
|
"owner": "NixOS",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"rev": "a62e6edd6d5e1fa0329b8653c801147986f8d446",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "NixOS",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"rev": "a62e6edd6d5e1fa0329b8653c801147986f8d446",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
"nixpkgs-regression": {
|
"nixpkgs-regression": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1643052045,
|
"lastModified": 1643052045,
|
||||||
|
@ -175,93 +154,23 @@
|
||||||
},
|
},
|
||||||
"nixpkgs_2": {
|
"nixpkgs_2": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1716633019,
|
"lastModified": 1742422364,
|
||||||
"narHash": "sha256-xim1b5/HZYbWaZKyI7cn9TJCM6ewNVZnesRr00mXeS4=",
|
"narHash": "sha256-mNqIplmEohk5jRkqYqG19GA8MbQ/D4gQSK0Mu4LvfRQ=",
|
||||||
"rev": "9d29cd266cebf80234c98dd0b87256b6be0af44e",
|
"rev": "a84ebe20c6bc2ecbcfb000a50776219f48d134cc",
|
||||||
"revCount": 558675,
|
"revCount": 770807,
|
||||||
"type": "tarball",
|
"type": "tarball",
|
||||||
"url": "https://api.flakehub.com/f/pinned/NixOS/nixpkgs/0.2311.558675%2Brev-9d29cd266cebf80234c98dd0b87256b6be0af44e/018fb680-a725-7c9d-825e-aadb0901263e/source.tar.gz"
|
"url": "https://api.flakehub.com/f/pinned/NixOS/nixpkgs/0.1.770807%2Brev-a84ebe20c6bc2ecbcfb000a50776219f48d134cc/0195b626-8c1d-7fb9-9282-563af3d37ab9/source.tar.gz"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"type": "tarball",
|
"type": "tarball",
|
||||||
"url": "https://flakehub.com/f/NixOS/nixpkgs/0.2311.tar.gz"
|
"url": "https://flakehub.com/f/NixOS/nixpkgs/0.1"
|
||||||
}
|
|
||||||
},
|
|
||||||
"pre-commit-hooks": {
|
|
||||||
"inputs": {
|
|
||||||
"flake-compat": [
|
|
||||||
"nix"
|
|
||||||
],
|
|
||||||
"flake-utils": "flake-utils",
|
|
||||||
"gitignore": [
|
|
||||||
"nix"
|
|
||||||
],
|
|
||||||
"nixpkgs": [
|
|
||||||
"nix",
|
|
||||||
"nixpkgs"
|
|
||||||
],
|
|
||||||
"nixpkgs-stable": [
|
|
||||||
"nix",
|
|
||||||
"nixpkgs"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1712897695,
|
|
||||||
"narHash": "sha256-nMirxrGteNAl9sWiOhoN5tIHyjBbVi5e2tgZUgZlK3Y=",
|
|
||||||
"owner": "cachix",
|
|
||||||
"repo": "pre-commit-hooks.nix",
|
|
||||||
"rev": "40e6053ecb65fcbf12863338a6dcefb3f55f1bf8",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "cachix",
|
|
||||||
"repo": "pre-commit-hooks.nix",
|
|
||||||
"type": "github"
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"root": {
|
"root": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"crane": "crane",
|
"crane": "crane",
|
||||||
"flake-compat": "flake-compat",
|
|
||||||
"nix": "nix",
|
"nix": "nix",
|
||||||
"nixpkgs": "nixpkgs_2",
|
"nixpkgs": "nixpkgs_2"
|
||||||
"rust-overlay": "rust-overlay"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"rust-overlay": {
|
|
||||||
"inputs": {
|
|
||||||
"flake-utils": "flake-utils_2",
|
|
||||||
"nixpkgs": [
|
|
||||||
"nixpkgs"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1716862669,
|
|
||||||
"narHash": "sha256-7oTPM9lcdwiI1cpRC313B+lHawocgpY5F07N+Rbm5Uk=",
|
|
||||||
"owner": "oxalica",
|
|
||||||
"repo": "rust-overlay",
|
|
||||||
"rev": "47b2d15658b37716393b2463a019000dbd6ce4bc",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "oxalica",
|
|
||||||
"repo": "rust-overlay",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"systems": {
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1681028828,
|
|
||||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
|
||||||
"owner": "nix-systems",
|
|
||||||
"repo": "default",
|
|
||||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "nix-systems",
|
|
||||||
"repo": "default",
|
|
||||||
"type": "github"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
183
flake.nix
183
flake.nix
|
@ -2,46 +2,65 @@
|
||||||
description = "GitHub Actions-powered Nix binary cache";
|
description = "GitHub Actions-powered Nix binary cache";
|
||||||
|
|
||||||
inputs = {
|
inputs = {
|
||||||
nixpkgs.url = "https://flakehub.com/f/NixOS/nixpkgs/0.2311.tar.gz";
|
nixpkgs.url = "https://flakehub.com/f/NixOS/nixpkgs/0.1";
|
||||||
|
|
||||||
rust-overlay = {
|
crane.url = "https://flakehub.com/f/ipetkov/crane/*";
|
||||||
url = "github:oxalica/rust-overlay";
|
|
||||||
inputs.nixpkgs.follows = "nixpkgs";
|
nix.url = "https://flakehub.com/f/NixOS/nix/2";
|
||||||
};
|
};
|
||||||
|
|
||||||
crane = {
|
outputs = inputs:
|
||||||
url = "https://flakehub.com/f/ipetkov/crane/0.16.3.tar.gz";
|
|
||||||
inputs.nixpkgs.follows = "nixpkgs";
|
|
||||||
};
|
|
||||||
|
|
||||||
flake-compat.url = "https://flakehub.com/f/edolstra/flake-compat/1.0.1.tar.gz";
|
|
||||||
|
|
||||||
nix.url = "https://flakehub.com/f/NixOS/nix/2.20.tar.gz";
|
|
||||||
};
|
|
||||||
|
|
||||||
outputs = { self, nixpkgs, nix, ... }@inputs:
|
|
||||||
let
|
let
|
||||||
overlays = [ inputs.rust-overlay.overlays.default nix.overlays.default ];
|
|
||||||
supportedSystems = [
|
supportedSystems = [
|
||||||
"aarch64-linux"
|
"aarch64-linux"
|
||||||
"x86_64-linux"
|
"x86_64-linux"
|
||||||
"aarch64-darwin"
|
"aarch64-darwin"
|
||||||
"x86_64-darwin"
|
"x86_64-darwin"
|
||||||
];
|
];
|
||||||
forEachSupportedSystem = f: nixpkgs.lib.genAttrs supportedSystems (system: f rec {
|
|
||||||
pkgs = import nixpkgs { inherit overlays system; };
|
forEachSupportedSystem = f: inputs.nixpkgs.lib.genAttrs supportedSystems (system: f rec {
|
||||||
cranePkgs = pkgs.callPackage ./crane.nix {
|
pkgs = import inputs.nixpkgs {
|
||||||
inherit supportedSystems;
|
inherit system;
|
||||||
inherit (inputs) crane;
|
overlays = [
|
||||||
nix-flake = nix;
|
inputs.self.overlays.default
|
||||||
|
];
|
||||||
};
|
};
|
||||||
inherit (pkgs) lib;
|
inherit system;
|
||||||
});
|
});
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
packages = forEachSupportedSystem ({ pkgs, cranePkgs, ... }: rec {
|
|
||||||
magic-nix-cache = pkgs.callPackage ./package.nix { };
|
overlays.default = final: prev:
|
||||||
#inherit (cranePkgs) magic-nix-cache;
|
let
|
||||||
|
craneLib = inputs.crane.mkLib final;
|
||||||
|
crateName = craneLib.crateNameFromCargoToml {
|
||||||
|
cargoToml = ./magic-nix-cache/Cargo.toml;
|
||||||
|
};
|
||||||
|
|
||||||
|
commonArgs = {
|
||||||
|
inherit (crateName) pname version;
|
||||||
|
src = inputs.self;
|
||||||
|
|
||||||
|
nativeBuildInputs = with final; [
|
||||||
|
pkg-config
|
||||||
|
];
|
||||||
|
|
||||||
|
buildInputs = [
|
||||||
|
inputs.nix.packages.${final.stdenv.system}.default
|
||||||
|
final.boost
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
cargoArtifacts = craneLib.buildDepsOnly commonArgs;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
magic-nix-cache = craneLib.buildPackage (commonArgs // {
|
||||||
|
inherit cargoArtifacts;
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
packages = forEachSupportedSystem ({ pkgs, ... }: rec {
|
||||||
|
magic-nix-cache = pkgs.magic-nix-cache;
|
||||||
default = magic-nix-cache;
|
default = magic-nix-cache;
|
||||||
|
|
||||||
veryLongChain =
|
veryLongChain =
|
||||||
|
@ -75,12 +94,18 @@
|
||||||
createChain 200 startFile;
|
createChain 200 startFile;
|
||||||
});
|
});
|
||||||
|
|
||||||
devShells = forEachSupportedSystem ({ pkgs, cranePkgs, lib }: {
|
devShells = forEachSupportedSystem ({ system, pkgs }: {
|
||||||
default = pkgs.mkShell {
|
default = pkgs.mkShell {
|
||||||
inputsFrom = [ cranePkgs.magic-nix-cache ];
|
|
||||||
packages = with pkgs; [
|
packages = with pkgs; [
|
||||||
|
rustc
|
||||||
|
cargo
|
||||||
|
clippy
|
||||||
|
rustfmt
|
||||||
|
rust-analyzer
|
||||||
|
|
||||||
|
inputs.nix.packages.${stdenv.system}.default # for linking attic
|
||||||
|
boost # for linking attic
|
||||||
bashInteractive
|
bashInteractive
|
||||||
cranePkgs.rustNightly
|
|
||||||
pkg-config
|
pkg-config
|
||||||
|
|
||||||
cargo-bloat
|
cargo-bloat
|
||||||
|
@ -89,109 +114,11 @@
|
||||||
cargo-watch
|
cargo-watch
|
||||||
bacon
|
bacon
|
||||||
|
|
||||||
age
|
|
||||||
] ++ lib.optionals pkgs.stdenv.isDarwin (with pkgs.darwin.apple_sdk.frameworks; [
|
|
||||||
SystemConfiguration
|
|
||||||
]);
|
|
||||||
|
|
||||||
NIX_CFLAGS_LINK = lib.optionalString pkgs.stdenv.isDarwin "-lc++abi";
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
cross = pkgs.mkShell ({
|
|
||||||
inputsFrom = [ cranePkgs.magic-nix-cache ];
|
|
||||||
packages = with pkgs; [
|
|
||||||
bashInteractive
|
|
||||||
cranePkgs.rustNightly
|
|
||||||
|
|
||||||
cargo-bloat
|
|
||||||
cargo-edit
|
|
||||||
cargo-udeps
|
|
||||||
cargo-watch
|
|
||||||
|
|
||||||
age
|
age
|
||||||
];
|
];
|
||||||
shellHook =
|
|
||||||
let
|
|
||||||
crossSystems = lib.filter (s: s != pkgs.system) (builtins.attrNames cranePkgs.crossPlatforms);
|
|
||||||
in
|
|
||||||
''
|
|
||||||
# Returns compiler environment variables for a platform
|
|
||||||
#
|
|
||||||
# getTargetFlags "suffixSalt" "nativeBuildInputs" "buildInputs"
|
|
||||||
getTargetFlags() {
|
|
||||||
# Here we only call the setup-hooks of nativeBuildInputs.
|
|
||||||
#
|
|
||||||
# What's off-limits for us:
|
|
||||||
#
|
|
||||||
# - findInputs
|
|
||||||
# - activatePackage
|
|
||||||
# - Other functions in stdenv setup that depend on the private accumulator variables
|
|
||||||
(
|
|
||||||
suffixSalt="$1"
|
|
||||||
nativeBuildInputs="$2"
|
|
||||||
buildInputs="$3"
|
|
||||||
|
|
||||||
# Offsets for the nativeBuildInput (e.g., gcc)
|
RUST_SRC_PATH = "${pkgs.rustPlatform.rustcSrc}/library";
|
||||||
hostOffset=-1
|
|
||||||
targetOffset=0
|
|
||||||
|
|
||||||
# In stdenv, the hooks are first accumulated before being called.
|
|
||||||
# Here we call them immediately
|
|
||||||
addEnvHooks() {
|
|
||||||
local depHostOffset="$1"
|
|
||||||
# For simplicity, we only call the hook on buildInputs
|
|
||||||
for pkg in $buildInputs; do
|
|
||||||
depTargetOffset=1
|
|
||||||
$2 $pkg
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
unset _PATH
|
|
||||||
unset NIX_CFLAGS_COMPILE
|
|
||||||
unset NIX_LDFLAGS
|
|
||||||
|
|
||||||
# For simplicity, we only call the setup-hooks of nativeBuildInputs
|
|
||||||
for nbi in $nativeBuildInputs; do
|
|
||||||
addToSearchPath _PATH "$nbi/bin"
|
|
||||||
|
|
||||||
if [ -e "$nbi/nix-support/setup-hook" ]; then
|
|
||||||
source "$nbi/nix-support/setup-hook"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "export NIX_CFLAGS_COMPILE_''${suffixSalt}='$NIX_CFLAGS_COMPILE'"
|
|
||||||
echo "export NIX_LDFLAGS_''${suffixSalt}='$NIX_LDFLAGS'"
|
|
||||||
echo "export PATH=$PATH''${_PATH+:$_PATH}"
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
target_flags=$(mktemp)
|
|
||||||
${lib.concatMapStrings (system: let
|
|
||||||
crossPlatform = cranePkgs.crossPlatforms.${system};
|
|
||||||
in ''
|
|
||||||
getTargetFlags \
|
|
||||||
"${crossPlatform.cc.suffixSalt}" \
|
|
||||||
"${crossPlatform.cc} ${crossPlatform.cc.bintools}" \
|
|
||||||
"${builtins.concatStringsSep " " (crossPlatform.buildInputs ++ crossPlatform.pkgs.stdenv.defaultBuildInputs)}" >$target_flags
|
|
||||||
. $target_flags
|
|
||||||
'') crossSystems}
|
|
||||||
rm $target_flags
|
|
||||||
|
|
||||||
# Suffix flags for current system as well
|
|
||||||
export NIX_CFLAGS_COMPILE_${pkgs.stdenv.cc.suffixSalt}="$NIX_CFLAGS_COMPILE"
|
|
||||||
export NIX_LDFLAGS_${pkgs.stdenv.cc.suffixSalt}="$NIX_LDFLAGS"
|
|
||||||
unset NIX_CFLAGS_COMPILE
|
|
||||||
unset NIX_LDFLAGS
|
|
||||||
'';
|
|
||||||
} // cranePkgs.cargoCrossEnvs);
|
|
||||||
|
|
||||||
keygen = pkgs.mkShellNoCC {
|
|
||||||
packages = with pkgs; [
|
|
||||||
age
|
|
||||||
];
|
|
||||||
};
|
};
|
||||||
*/
|
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,12 +11,12 @@ derivative = { version = "2.2.0", default-features = false }
|
||||||
futures = { version = "0.3.28", default-features = false, features = ["alloc"] }
|
futures = { version = "0.3.28", default-features = false, features = ["alloc"] }
|
||||||
hex = "0.4.3"
|
hex = "0.4.3"
|
||||||
rand = { version = "0.8.5", default-features = false, features = ["std", "std_rng"] }
|
rand = { version = "0.8.5", default-features = false, features = ["std", "std_rng"] }
|
||||||
reqwest = { version = "0.11.17", default-features = false, features = ["json", "rustls-tls-native-roots", "stream", "trust-dns"] }
|
reqwest = { version = "0.12.5", default-features = false, features = ["json", "rustls-tls-native-roots", "stream", "trust-dns"] }
|
||||||
serde = { version = "1.0.162", default-features = false, features = ["derive"] }
|
serde = { version = "1.0.162", default-features = false, features = ["derive"] }
|
||||||
serde_json = { version = "1.0.96", default-features = false }
|
serde_json = { version = "1.0.96", default-features = false }
|
||||||
sha2 = { version = "0.10.6", default-features = false }
|
sha2 = { version = "0.10.6", default-features = false }
|
||||||
thiserror = "1.0.40"
|
thiserror = "1.0.40"
|
||||||
tokio = { version = "1.28.0", default-features = false, features = ["io-util"] }
|
tokio = { version = "1.44.2", default-features = false, features = ["io-util"] }
|
||||||
tracing = { version = "0.1.37", default-features = false }
|
tracing = { version = "0.1.37", default-features = false }
|
||||||
unicode-bom = "2.0.2"
|
unicode-bom = "2.0.2"
|
||||||
|
|
||||||
|
|
|
@ -48,6 +48,8 @@ const MAX_CONCURRENCY: usize = 4;
|
||||||
|
|
||||||
type Result<T> = std::result::Result<T, Error>;
|
type Result<T> = std::result::Result<T, Error>;
|
||||||
|
|
||||||
|
pub type CircuitBreakerTrippedCallback = Arc<Box<dyn Fn() + Send + Sync>>;
|
||||||
|
|
||||||
/// An API error.
|
/// An API error.
|
||||||
#[derive(Error, Debug)]
|
#[derive(Error, Debug)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
|
@ -75,14 +77,13 @@ pub enum Error {
|
||||||
info: ApiErrorInfo,
|
info: ApiErrorInfo,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[error("I/O error: {0}")]
|
#[error("I/O error: {0}, context: {1}")]
|
||||||
IoError(#[from] std::io::Error),
|
IoError(std::io::Error, String),
|
||||||
|
|
||||||
#[error("Too many collisions")]
|
#[error("Too many collisions")]
|
||||||
TooManyCollisions,
|
TooManyCollisions,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct Api {
|
pub struct Api {
|
||||||
/// Credentials to access the cache.
|
/// Credentials to access the cache.
|
||||||
credentials: Credentials,
|
credentials: Credentials,
|
||||||
|
@ -104,6 +105,8 @@ pub struct Api {
|
||||||
|
|
||||||
circuit_breaker_429_tripped: Arc<AtomicBool>,
|
circuit_breaker_429_tripped: Arc<AtomicBool>,
|
||||||
|
|
||||||
|
circuit_breaker_429_tripped_callback: CircuitBreakerTrippedCallback,
|
||||||
|
|
||||||
/// Backend request statistics.
|
/// Backend request statistics.
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
stats: RequestStats,
|
stats: RequestStats,
|
||||||
|
@ -116,7 +119,7 @@ pub struct FileAllocation(CacheId);
|
||||||
/// The ID of a cache.
|
/// The ID of a cache.
|
||||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
||||||
#[serde(transparent)]
|
#[serde(transparent)]
|
||||||
struct CacheId(pub i32);
|
struct CacheId(pub i64);
|
||||||
|
|
||||||
/// An API error.
|
/// An API error.
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
|
@ -242,7 +245,10 @@ impl fmt::Display for ApiErrorInfo {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Api {
|
impl Api {
|
||||||
pub fn new(credentials: Credentials) -> Result<Self> {
|
pub fn new(
|
||||||
|
credentials: Credentials,
|
||||||
|
circuit_breaker_429_tripped_callback: CircuitBreakerTrippedCallback,
|
||||||
|
) -> Result<Self> {
|
||||||
let mut headers = HeaderMap::new();
|
let mut headers = HeaderMap::new();
|
||||||
let auth_header = {
|
let auth_header = {
|
||||||
let mut h = HeaderValue::from_str(&format!("Bearer {}", credentials.runtime_token))
|
let mut h = HeaderValue::from_str(&format!("Bearer {}", credentials.runtime_token))
|
||||||
|
@ -273,6 +279,7 @@ impl Api {
|
||||||
client,
|
client,
|
||||||
concurrency_limit: Arc::new(Semaphore::new(MAX_CONCURRENCY)),
|
concurrency_limit: Arc::new(Semaphore::new(MAX_CONCURRENCY)),
|
||||||
circuit_breaker_429_tripped: Arc::new(AtomicBool::from(false)),
|
circuit_breaker_429_tripped: Arc::new(AtomicBool::from(false)),
|
||||||
|
circuit_breaker_429_tripped_callback,
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
stats: Default::default(),
|
stats: Default::default(),
|
||||||
})
|
})
|
||||||
|
@ -345,8 +352,9 @@ impl Api {
|
||||||
let mut futures = Vec::new();
|
let mut futures = Vec::new();
|
||||||
loop {
|
loop {
|
||||||
let buf = BytesMut::with_capacity(CHUNK_SIZE);
|
let buf = BytesMut::with_capacity(CHUNK_SIZE);
|
||||||
let chunk = read_chunk_async(&mut stream, buf).await?;
|
let chunk = read_chunk_async(&mut stream, buf)
|
||||||
|
.await
|
||||||
|
.map_err(|e| Error::IoError(e, "Reading a chunk during upload".to_string()))?;
|
||||||
if chunk.is_empty() {
|
if chunk.is_empty() {
|
||||||
offset += chunk.len();
|
offset += chunk.len();
|
||||||
break;
|
break;
|
||||||
|
@ -365,10 +373,15 @@ impl Api {
|
||||||
let client = self.client.clone();
|
let client = self.client.clone();
|
||||||
let concurrency_limit = self.concurrency_limit.clone();
|
let concurrency_limit = self.concurrency_limit.clone();
|
||||||
let circuit_breaker_429_tripped = self.circuit_breaker_429_tripped.clone();
|
let circuit_breaker_429_tripped = self.circuit_breaker_429_tripped.clone();
|
||||||
|
let circuit_breaker_429_tripped_callback =
|
||||||
|
self.circuit_breaker_429_tripped_callback.clone();
|
||||||
let url = self.construct_url(&format!("caches/{}", allocation.0 .0));
|
let url = self.construct_url(&format!("caches/{}", allocation.0 .0));
|
||||||
|
|
||||||
tokio::task::spawn(async move {
|
tokio::task::spawn(async move {
|
||||||
let permit = concurrency_limit.acquire().await.unwrap();
|
let permit = concurrency_limit
|
||||||
|
.acquire()
|
||||||
|
.await
|
||||||
|
.expect("failed to acquire concurrency semaphore permit");
|
||||||
|
|
||||||
tracing::trace!(
|
tracing::trace!(
|
||||||
"Starting uploading chunk {}-{}",
|
"Starting uploading chunk {}-{}",
|
||||||
|
@ -398,7 +411,8 @@ impl Api {
|
||||||
|
|
||||||
drop(permit);
|
drop(permit);
|
||||||
|
|
||||||
circuit_breaker_429_tripped.check_result(&r);
|
circuit_breaker_429_tripped
|
||||||
|
.check_result(&r, &circuit_breaker_429_tripped_callback);
|
||||||
|
|
||||||
r
|
r
|
||||||
})
|
})
|
||||||
|
@ -410,7 +424,9 @@ impl Api {
|
||||||
future::join_all(futures)
|
future::join_all(futures)
|
||||||
.await
|
.await
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.try_for_each(|join_result| join_result.unwrap())?;
|
.try_for_each(|join_result| {
|
||||||
|
join_result.expect("failed collecting a join result during parallel upload")
|
||||||
|
})?;
|
||||||
|
|
||||||
tracing::debug!("Received all chunks for cache {:?}", allocation.0);
|
tracing::debug!("Received all chunks for cache {:?}", allocation.0);
|
||||||
|
|
||||||
|
@ -459,7 +475,8 @@ impl Api {
|
||||||
.check_json()
|
.check_json()
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
self.circuit_breaker_429_tripped.check_result(&res);
|
self.circuit_breaker_429_tripped
|
||||||
|
.check_result(&res, &self.circuit_breaker_429_tripped_callback);
|
||||||
|
|
||||||
match res {
|
match res {
|
||||||
Ok(entry) => Ok(Some(entry)),
|
Ok(entry) => Ok(Some(entry)),
|
||||||
|
@ -502,7 +519,8 @@ impl Api {
|
||||||
.check_json()
|
.check_json()
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
self.circuit_breaker_429_tripped.check_result(&res);
|
self.circuit_breaker_429_tripped
|
||||||
|
.check_result(&res, &self.circuit_breaker_429_tripped_callback);
|
||||||
|
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
|
@ -529,7 +547,8 @@ impl Api {
|
||||||
.check()
|
.check()
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
self.circuit_breaker_429_tripped.check_err(&e);
|
self.circuit_breaker_429_tripped
|
||||||
|
.check_err(&e, &self.circuit_breaker_429_tripped_callback);
|
||||||
return Err(e);
|
return Err(e);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -537,10 +556,13 @@ impl Api {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn construct_url(&self, resource: &str) -> String {
|
fn construct_url(&self, resource: &str) -> String {
|
||||||
format!(
|
let mut url = self.credentials.cache_url.clone();
|
||||||
"{}/_apis/artifactcache/{}",
|
if !url.ends_with('/') {
|
||||||
self.credentials.cache_url, resource
|
url.push('/');
|
||||||
)
|
}
|
||||||
|
url.push_str("_apis/artifactcache/");
|
||||||
|
url.push_str(resource);
|
||||||
|
url
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -601,25 +623,34 @@ async fn handle_error(res: reqwest::Response) -> Error {
|
||||||
}
|
}
|
||||||
|
|
||||||
trait AtomicCircuitBreaker {
|
trait AtomicCircuitBreaker {
|
||||||
fn check_err(&self, e: &Error);
|
fn check_err(&self, e: &Error, callback: &CircuitBreakerTrippedCallback);
|
||||||
fn check_result<T>(&self, r: &std::result::Result<T, Error>);
|
fn check_result<T>(
|
||||||
|
&self,
|
||||||
|
r: &std::result::Result<T, Error>,
|
||||||
|
callback: &CircuitBreakerTrippedCallback,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AtomicCircuitBreaker for AtomicBool {
|
impl AtomicCircuitBreaker for AtomicBool {
|
||||||
fn check_result<T>(&self, r: &std::result::Result<T, Error>) {
|
fn check_result<T>(
|
||||||
|
&self,
|
||||||
|
r: &std::result::Result<T, Error>,
|
||||||
|
callback: &CircuitBreakerTrippedCallback,
|
||||||
|
) {
|
||||||
if let Err(ref e) = r {
|
if let Err(ref e) = r {
|
||||||
self.check_err(e)
|
self.check_err(e, callback)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_err(&self, e: &Error) {
|
fn check_err(&self, e: &Error, callback: &CircuitBreakerTrippedCallback) {
|
||||||
if let Error::ApiError {
|
if let Error::ApiError {
|
||||||
status: reqwest::StatusCode::TOO_MANY_REQUESTS,
|
status: reqwest::StatusCode::TOO_MANY_REQUESTS,
|
||||||
info: ref _info,
|
..
|
||||||
} = e
|
} = e
|
||||||
{
|
{
|
||||||
tracing::info!("Disabling GitHub Actions Cache due to 429: Too Many Requests");
|
tracing::info!("Disabling GitHub Actions Cache due to 429: Too Many Requests");
|
||||||
self.store(true, Ordering::Relaxed);
|
self.store(true, Ordering::Relaxed);
|
||||||
|
callback();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,11 +7,12 @@ license = "Apache-2.0"
|
||||||
[dependencies]
|
[dependencies]
|
||||||
gha-cache = { path = "../gha-cache" }
|
gha-cache = { path = "../gha-cache" }
|
||||||
|
|
||||||
axum = { version = "0.6.18", default-features = false, features = [
|
axum = { version = "0.7.5", default-features = false, features = [
|
||||||
"json",
|
"json",
|
||||||
"tokio",
|
"tokio",
|
||||||
|
"http2",
|
||||||
|
"macros"
|
||||||
] }
|
] }
|
||||||
axum-macros = "0.3.7"
|
|
||||||
clap = { version = "4.2.7", default-features = false, features = [
|
clap = { version = "4.2.7", default-features = false, features = [
|
||||||
"std",
|
"std",
|
||||||
"derive",
|
"derive",
|
||||||
|
@ -26,19 +27,20 @@ tracing-subscriber = { version = "0.3.17", default-features = false, features =
|
||||||
"tracing-log",
|
"tracing-log",
|
||||||
"smallvec",
|
"smallvec",
|
||||||
] }
|
] }
|
||||||
tower-http = { version = "0.4.0", features = ["trace"] }
|
tower-http = { version = "0.5.2", features = ["trace"] }
|
||||||
serde = { version = "1.0.162", features = ["derive"] }
|
serde = { version = "1.0.162", features = ["derive"] }
|
||||||
serde_json = { version = "1.0.96", default-features = false }
|
serde_json = { version = "1.0.96", default-features = false }
|
||||||
thiserror = "1.0.40"
|
thiserror = "1.0.40"
|
||||||
tokio-stream = { version = "0.1.14", default-features = false }
|
tokio-stream = { version = "0.1.15", default-features = false }
|
||||||
tokio-util = { version = "0.7.8", features = ["io", "compat"] }
|
tokio-util = { version = "0.7.11", features = ["io", "compat"] }
|
||||||
daemonize = "0.5.0"
|
daemonize = "0.5.0"
|
||||||
is_ci = "1.1.1"
|
is_ci = "1.1.1"
|
||||||
sha2 = { version = "0.10.6", default-features = false }
|
sha2 = { version = "0.10.6", default-features = false }
|
||||||
reqwest = { version = "0.11.17", default-features = false, features = [
|
reqwest = { version = "0.12.5", default-features = false, features = [
|
||||||
"blocking",
|
"blocking",
|
||||||
"rustls-tls-native-roots",
|
"rustls-tls-native-roots",
|
||||||
"trust-dns",
|
"trust-dns",
|
||||||
|
"json"
|
||||||
] }
|
] }
|
||||||
netrc-rs = "0.1.2"
|
netrc-rs = "0.1.2"
|
||||||
attic = { git = "https://github.com/DeterminateSystems/attic", branch = "fixups-for-magic-nix-cache" }
|
attic = { git = "https://github.com/DeterminateSystems/attic", branch = "fixups-for-magic-nix-cache" }
|
||||||
|
@ -51,8 +53,13 @@ uuid = { version = "1.4.0", features = ["serde", "v7", "rand", "std"] }
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
async-compression = "0.4"
|
async-compression = "0.4"
|
||||||
tracing-appender = "0.2.3"
|
tracing-appender = "0.2.3"
|
||||||
|
http = "1.0"
|
||||||
|
http-body-util = "0.1"
|
||||||
|
hyper = { version = "1.0.0", features = ["full"] }
|
||||||
|
hyper-util = { version = "0.1", features = ["tokio", "server-auto", "http1"] }
|
||||||
|
xdg = { version = "2.5.2" }
|
||||||
|
|
||||||
[dependencies.tokio]
|
[dependencies.tokio]
|
||||||
version = "1.28.0"
|
version = "1.44.2"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = ["fs", "macros", "process", "rt", "rt-multi-thread", "sync"]
|
features = ["fs", "macros", "process", "rt", "rt-multi-thread", "sync"]
|
||||||
|
|
|
@ -4,7 +4,6 @@
|
||||||
|
|
||||||
use attic::nix_store::StorePath;
|
use attic::nix_store::StorePath;
|
||||||
use axum::{extract::Extension, routing::post, Json, Router};
|
use axum::{extract::Extension, routing::post, Json, Router};
|
||||||
use axum_macros::debug_handler;
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use super::State;
|
use super::State;
|
||||||
|
@ -30,7 +29,6 @@ pub fn get_router() -> Router {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Record existing paths.
|
/// Record existing paths.
|
||||||
#[debug_handler]
|
|
||||||
async fn workflow_start(Extension(state): Extension<State>) -> Result<Json<WorkflowStartResponse>> {
|
async fn workflow_start(Extension(state): Extension<State>) -> Result<Json<WorkflowStartResponse>> {
|
||||||
tracing::info!("Workflow started");
|
tracing::info!("Workflow started");
|
||||||
let reply = if let Some(original_paths) = &state.original_paths {
|
let reply = if let Some(original_paths) = &state.original_paths {
|
||||||
|
@ -101,20 +99,27 @@ async fn workflow_finish(
|
||||||
gha_cache.shutdown().await?;
|
gha_cache.shutdown().await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Some(attic_state) = state.flakehub_state.write().await.take() {
|
||||||
|
tracing::info!("Waiting for FlakeHub cache uploads to finish");
|
||||||
|
let paths = attic_state.push_session.wait().await?;
|
||||||
|
|
||||||
|
let paths = paths.keys().map(|s| s.name()).collect::<Vec<_>>();
|
||||||
|
|
||||||
|
tracing::info!(?paths, "FlakeHub Cache uploads completed");
|
||||||
|
} else {
|
||||||
|
tracing::info!("FlakeHub cache is not enabled, not uploading anything to it");
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(sender) = state.shutdown_sender.lock().await.take() {
|
if let Some(sender) = state.shutdown_sender.lock().await.take() {
|
||||||
sender
|
sender
|
||||||
.send(())
|
.send(())
|
||||||
.map_err(|_| Error::Internal("Sending shutdown server message".to_owned()))?;
|
.map_err(|_| Error::Internal("Sending shutdown server message".to_owned()))?;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(attic_state) = state.flakehub_state.write().await.take() {
|
|
||||||
tracing::info!("Waiting for FlakeHub cache uploads to finish");
|
|
||||||
let _paths = attic_state.push_session.wait().await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// NOTE(cole-h): see `init_logging`
|
// NOTE(cole-h): see `init_logging`
|
||||||
if let Some(logfile) = &state.logfile {
|
if let Some(logfile) = &state.logfile {
|
||||||
let logfile_contents = std::fs::read_to_string(logfile)?;
|
let logfile_contents = std::fs::read_to_string(logfile)
|
||||||
|
.map_err(|e| crate::error::Error::Io(e, format!("Reading {}", logfile.display())))?;
|
||||||
println!("Every log line throughout the lifetime of the program:");
|
println!("Every log line throughout the lifetime of the program:");
|
||||||
println!("\n{logfile_contents}\n");
|
println!("\n{logfile_contents}\n");
|
||||||
}
|
}
|
||||||
|
@ -149,7 +154,7 @@ async fn post_enqueue_paths(
|
||||||
Ok(Json(EnqueuePathsResponse {}))
|
Ok(Json(EnqueuePathsResponse {}))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn enqueue_paths(state: &State, store_paths: Vec<StorePath>) -> Result<()> {
|
pub async fn enqueue_paths(state: &State, store_paths: Vec<StorePath>) -> Result<()> {
|
||||||
if let Some(gha_cache) = &state.gha_cache {
|
if let Some(gha_cache) = &state.gha_cache {
|
||||||
gha_cache
|
gha_cache
|
||||||
.enqueue_paths(state.store.clone(), store_paths.clone())
|
.enqueue_paths(state.store.clone(), store_paths.clone())
|
||||||
|
|
|
@ -1,14 +1,12 @@
|
||||||
//! Binary Cache API.
|
//! Binary Cache API.
|
||||||
|
|
||||||
use std::io;
|
|
||||||
|
|
||||||
use axum::{
|
use axum::{
|
||||||
extract::{BodyStream, Extension, Path},
|
extract::{Extension, Path},
|
||||||
response::Redirect,
|
response::Redirect,
|
||||||
routing::{get, put},
|
routing::{get, put},
|
||||||
Router,
|
Router,
|
||||||
};
|
};
|
||||||
use tokio_stream::StreamExt;
|
use futures::StreamExt as _;
|
||||||
use tokio_util::io::StreamReader;
|
use tokio_util::io::StreamReader;
|
||||||
|
|
||||||
use super::State;
|
use super::State;
|
||||||
|
@ -79,7 +77,7 @@ async fn get_narinfo(
|
||||||
async fn put_narinfo(
|
async fn put_narinfo(
|
||||||
Extension(state): Extension<State>,
|
Extension(state): Extension<State>,
|
||||||
Path(path): Path<String>,
|
Path(path): Path<String>,
|
||||||
body: BodyStream,
|
body: axum::body::Body,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let components: Vec<&str> = path.splitn(2, '.').collect();
|
let components: Vec<&str> = path.splitn(2, '.').collect();
|
||||||
|
|
||||||
|
@ -96,9 +94,13 @@ async fn put_narinfo(
|
||||||
let store_path_hash = components[0].to_string();
|
let store_path_hash = components[0].to_string();
|
||||||
let key = format!("{}.narinfo", store_path_hash);
|
let key = format!("{}.narinfo", store_path_hash);
|
||||||
let allocation = gha_cache.api.allocate_file_with_random_suffix(&key).await?;
|
let allocation = gha_cache.api.allocate_file_with_random_suffix(&key).await?;
|
||||||
|
|
||||||
|
let body_stream = body.into_data_stream();
|
||||||
let stream = StreamReader::new(
|
let stream = StreamReader::new(
|
||||||
body.map(|r| r.map_err(|e| io::Error::new(io::ErrorKind::Other, e.to_string()))),
|
body_stream
|
||||||
|
.map(|r| r.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))),
|
||||||
);
|
);
|
||||||
|
|
||||||
gha_cache.api.upload_file(allocation, stream).await?;
|
gha_cache.api.upload_file(allocation, stream).await?;
|
||||||
state.metrics.narinfos_uploaded.incr();
|
state.metrics.narinfos_uploaded.incr();
|
||||||
|
|
||||||
|
@ -135,7 +137,7 @@ async fn get_nar(Extension(state): Extension<State>, Path(path): Path<String>) -
|
||||||
async fn put_nar(
|
async fn put_nar(
|
||||||
Extension(state): Extension<State>,
|
Extension(state): Extension<State>,
|
||||||
Path(path): Path<String>,
|
Path(path): Path<String>,
|
||||||
body: BodyStream,
|
body: axum::body::Body,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let gha_cache = state.gha_cache.as_ref().ok_or(Error::GHADisabled)?;
|
let gha_cache = state.gha_cache.as_ref().ok_or(Error::GHADisabled)?;
|
||||||
|
|
||||||
|
@ -143,9 +145,13 @@ async fn put_nar(
|
||||||
.api
|
.api
|
||||||
.allocate_file_with_random_suffix(&path)
|
.allocate_file_with_random_suffix(&path)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
let body_stream = body.into_data_stream();
|
||||||
let stream = StreamReader::new(
|
let stream = StreamReader::new(
|
||||||
body.map(|r| r.map_err(|e| io::Error::new(io::ErrorKind::Other, e.to_string()))),
|
body_stream
|
||||||
|
.map(|r| r.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))),
|
||||||
);
|
);
|
||||||
|
|
||||||
gha_cache.api.upload_file(allocation, stream).await?;
|
gha_cache.api.upload_file(allocation, stream).await?;
|
||||||
state.metrics.nars_uploaded.incr();
|
state.metrics.nars_uploaded.incr();
|
||||||
|
|
||||||
|
|
|
@ -19,8 +19,8 @@ pub enum Error {
|
||||||
#[error("Bad Request")]
|
#[error("Bad Request")]
|
||||||
BadRequest,
|
BadRequest,
|
||||||
|
|
||||||
#[error("I/O error: {0}")]
|
#[error("I/O error: {0}. Context: {1}")]
|
||||||
Io(#[from] std::io::Error),
|
Io(std::io::Error, String),
|
||||||
|
|
||||||
#[error("GHA cache is disabled")]
|
#[error("GHA cache is disabled")]
|
||||||
GHADisabled,
|
GHADisabled,
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
use crate::env::Environment;
|
use crate::env::Environment;
|
||||||
use crate::error::{Error, Result};
|
use crate::error::{Error, Result};
|
||||||
|
use crate::DETERMINATE_NETRC_PATH;
|
||||||
use anyhow::Context;
|
use anyhow::Context;
|
||||||
use attic::cache::CacheName;
|
use attic::cache::CacheName;
|
||||||
use attic::nix_store::{NixStore, StorePath};
|
use attic::nix_store::{NixStore, StorePath};
|
||||||
|
@ -9,10 +10,12 @@ use attic_client::{
|
||||||
config::ServerConfig,
|
config::ServerConfig,
|
||||||
push::{PushConfig, Pusher},
|
push::{PushConfig, Pusher},
|
||||||
};
|
};
|
||||||
|
|
||||||
use reqwest::header::HeaderValue;
|
use reqwest::header::HeaderValue;
|
||||||
use reqwest::Url;
|
use reqwest::Url;
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use std::path::Path;
|
use std::os::unix::fs::MetadataExt;
|
||||||
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tokio::fs::File;
|
use tokio::fs::File;
|
||||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||||
|
@ -31,64 +34,21 @@ pub struct State {
|
||||||
pub async fn init_cache(
|
pub async fn init_cache(
|
||||||
environment: Environment,
|
environment: Environment,
|
||||||
flakehub_api_server: &Url,
|
flakehub_api_server: &Url,
|
||||||
flakehub_api_server_netrc: &Path,
|
|
||||||
flakehub_cache_server: &Url,
|
flakehub_cache_server: &Url,
|
||||||
flakehub_flake_name: Option<String>,
|
flakehub_flake_name: &Option<String>,
|
||||||
store: Arc<NixStore>,
|
store: Arc<NixStore>,
|
||||||
|
auth_method: &super::FlakeHubAuthSource,
|
||||||
) -> Result<State> {
|
) -> Result<State> {
|
||||||
// Parse netrc to get the credentials for api.flakehub.com.
|
// Parse netrc to get the credentials for api.flakehub.com.
|
||||||
let netrc = {
|
let netrc_path = auth_method.as_path_buf();
|
||||||
let mut netrc_file = File::open(flakehub_api_server_netrc).await.map_err(|e| {
|
let NetrcInfo {
|
||||||
Error::Internal(format!(
|
netrc,
|
||||||
"Failed to open {}: {}",
|
flakehub_cache_server_hostname,
|
||||||
flakehub_api_server_netrc.display(),
|
flakehub_login,
|
||||||
e
|
flakehub_password,
|
||||||
))
|
} = extract_info_from_netrc(&netrc_path, flakehub_api_server, flakehub_cache_server).await?;
|
||||||
})?;
|
|
||||||
let mut netrc_contents = String::new();
|
|
||||||
netrc_file
|
|
||||||
.read_to_string(&mut netrc_contents)
|
|
||||||
.await
|
|
||||||
.map_err(|e| {
|
|
||||||
Error::Internal(format!(
|
|
||||||
"Failed to read {} contents: {}",
|
|
||||||
flakehub_api_server_netrc.display(),
|
|
||||||
e
|
|
||||||
))
|
|
||||||
})?;
|
|
||||||
netrc_rs::Netrc::parse(netrc_contents, false).map_err(Error::Netrc)?
|
|
||||||
};
|
|
||||||
|
|
||||||
let flakehub_netrc_entry = {
|
|
||||||
netrc
|
|
||||||
.machines
|
|
||||||
.iter()
|
|
||||||
.find(|machine| {
|
|
||||||
machine.name.as_ref() == flakehub_api_server.host().map(|x| x.to_string()).as_ref()
|
|
||||||
})
|
|
||||||
.ok_or_else(|| Error::MissingCreds(flakehub_api_server.to_string()))?
|
|
||||||
.to_owned()
|
|
||||||
};
|
|
||||||
|
|
||||||
let flakehub_cache_server_hostname = flakehub_cache_server
|
|
||||||
.host()
|
|
||||||
.ok_or_else(|| Error::BadUrl(flakehub_cache_server.to_owned()))?
|
|
||||||
.to_string();
|
|
||||||
|
|
||||||
let flakehub_login = flakehub_netrc_entry.login.as_ref().ok_or_else(|| {
|
|
||||||
Error::Config(format!(
|
|
||||||
"netrc file does not contain a login for '{}'",
|
|
||||||
flakehub_api_server
|
|
||||||
))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let flakehub_password = flakehub_netrc_entry.password.ok_or_else(|| {
|
|
||||||
Error::Config(format!(
|
|
||||||
"netrc file does not contain a password for '{}'",
|
|
||||||
flakehub_api_server
|
|
||||||
))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
|
if let super::FlakeHubAuthSource::Netrc(netrc_path) = auth_method {
|
||||||
// Append an entry for the FlakeHub cache server to netrc.
|
// Append an entry for the FlakeHub cache server to netrc.
|
||||||
if !netrc
|
if !netrc
|
||||||
.machines
|
.machines
|
||||||
|
@ -98,12 +58,12 @@ pub async fn init_cache(
|
||||||
let mut netrc_file = tokio::fs::OpenOptions::new()
|
let mut netrc_file = tokio::fs::OpenOptions::new()
|
||||||
.create(false)
|
.create(false)
|
||||||
.append(true)
|
.append(true)
|
||||||
.open(flakehub_api_server_netrc)
|
.open(netrc_path)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
Error::Internal(format!(
|
Error::Internal(format!(
|
||||||
"Failed to open {} for appending: {}",
|
"Failed to open {} for appending: {}",
|
||||||
flakehub_api_server_netrc.display(),
|
netrc_path.display(),
|
||||||
e
|
e
|
||||||
))
|
))
|
||||||
})?;
|
})?;
|
||||||
|
@ -120,11 +80,12 @@ pub async fn init_cache(
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
Error::Internal(format!(
|
Error::Internal(format!(
|
||||||
"Failed to write credentials to {}: {}",
|
"Failed to write credentials to {}: {}",
|
||||||
flakehub_api_server_netrc.display(),
|
netrc_path.display(),
|
||||||
e
|
e
|
||||||
))
|
))
|
||||||
})?;
|
})?;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let server_config = ServerConfig {
|
let server_config = ServerConfig {
|
||||||
endpoint: flakehub_cache_server.to_string(),
|
endpoint: flakehub_cache_server.to_string(),
|
||||||
|
@ -137,13 +98,9 @@ pub async fn init_cache(
|
||||||
|
|
||||||
// Periodically refresh JWT in GitHub Actions environment
|
// Periodically refresh JWT in GitHub Actions environment
|
||||||
if environment.is_github_actions() {
|
if environment.is_github_actions() {
|
||||||
// NOTE(cole-h): This is a workaround -- at the time of writing, GitHub Actions JWTs are only
|
match auth_method {
|
||||||
// valid for 5 minutes after being issued. FlakeHub uses these JWTs for authentication, which
|
super::FlakeHubAuthSource::Netrc(path) => {
|
||||||
// means that after those 5 minutes have passed and the token is expired, FlakeHub (and by
|
let netrc_path_clone = path.to_path_buf();
|
||||||
// extension FlakeHub Cache) will no longer allow requests using this token. However, GitHub
|
|
||||||
// gives us a way to repeatedly request new tokens, so we utilize that and refresh the token
|
|
||||||
// every 2 minutes (less than half of the lifetime of the token).
|
|
||||||
let netrc_path_clone = flakehub_api_server_netrc.to_path_buf();
|
|
||||||
let initial_github_jwt_clone = flakehub_password.clone();
|
let initial_github_jwt_clone = flakehub_password.clone();
|
||||||
let flakehub_cache_server_clone = flakehub_cache_server.to_string();
|
let flakehub_cache_server_clone = flakehub_cache_server.to_string();
|
||||||
let api_clone = api.clone();
|
let api_clone = api.clone();
|
||||||
|
@ -155,6 +112,27 @@ pub async fn init_cache(
|
||||||
api_clone,
|
api_clone,
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
crate::FlakeHubAuthSource::DeterminateNixd => {
|
||||||
|
let api_clone = api.clone();
|
||||||
|
let netrc_file = PathBuf::from(DETERMINATE_NETRC_PATH);
|
||||||
|
let flakehub_api_server_clone = flakehub_api_server.clone();
|
||||||
|
let flakehub_cache_server_clone = flakehub_cache_server.clone();
|
||||||
|
|
||||||
|
let initial_meta = tokio::fs::metadata(&netrc_file).await.map_err(|e| {
|
||||||
|
Error::Io(e, format!("getting metadata of {}", netrc_file.display()))
|
||||||
|
})?;
|
||||||
|
let initial_inode = initial_meta.ino();
|
||||||
|
|
||||||
|
tokio::task::spawn(refresh_determinate_token_worker(
|
||||||
|
netrc_file,
|
||||||
|
initial_inode,
|
||||||
|
flakehub_api_server_clone,
|
||||||
|
flakehub_cache_server_clone,
|
||||||
|
api_clone,
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Get the cache UUID for this project.
|
// Get the cache UUID for this project.
|
||||||
let cache_name = {
|
let cache_name = {
|
||||||
|
@ -232,6 +210,72 @@ pub async fn init_cache(
|
||||||
Ok(state)
|
Ok(state)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct NetrcInfo {
|
||||||
|
netrc: netrc_rs::Netrc,
|
||||||
|
flakehub_cache_server_hostname: String,
|
||||||
|
flakehub_login: String,
|
||||||
|
flakehub_password: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument]
|
||||||
|
async fn extract_info_from_netrc(
|
||||||
|
netrc_path: &Path,
|
||||||
|
flakehub_api_server: &Url,
|
||||||
|
flakehub_cache_server: &Url,
|
||||||
|
) -> Result<NetrcInfo> {
|
||||||
|
let netrc = {
|
||||||
|
let mut netrc_file = File::open(netrc_path).await.map_err(|e| {
|
||||||
|
Error::Internal(format!("Failed to open {}: {}", netrc_path.display(), e))
|
||||||
|
})?;
|
||||||
|
let mut netrc_contents = String::new();
|
||||||
|
netrc_file
|
||||||
|
.read_to_string(&mut netrc_contents)
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
Error::Internal(format!(
|
||||||
|
"Failed to read {} contents: {}",
|
||||||
|
netrc_path.display(),
|
||||||
|
e
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
netrc_rs::Netrc::parse(netrc_contents, false).map_err(Error::Netrc)?
|
||||||
|
};
|
||||||
|
|
||||||
|
let flakehub_netrc_entry = netrc
|
||||||
|
.machines
|
||||||
|
.iter()
|
||||||
|
.find(|machine| {
|
||||||
|
machine.name.as_ref() == flakehub_api_server.host().map(|x| x.to_string()).as_ref()
|
||||||
|
})
|
||||||
|
.ok_or_else(|| Error::MissingCreds(flakehub_api_server.to_string()))?
|
||||||
|
.to_owned();
|
||||||
|
|
||||||
|
let flakehub_cache_server_hostname = flakehub_cache_server
|
||||||
|
.host()
|
||||||
|
.ok_or_else(|| Error::BadUrl(flakehub_cache_server.to_owned()))?
|
||||||
|
.to_string();
|
||||||
|
let flakehub_login = flakehub_netrc_entry.login.ok_or_else(|| {
|
||||||
|
Error::Config(format!(
|
||||||
|
"netrc file does not contain a login for '{}'",
|
||||||
|
flakehub_api_server
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
let flakehub_password = flakehub_netrc_entry.password.ok_or_else(|| {
|
||||||
|
Error::Config(format!(
|
||||||
|
"netrc file does not contain a password for '{}'",
|
||||||
|
flakehub_api_server
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok(NetrcInfo {
|
||||||
|
netrc,
|
||||||
|
flakehub_cache_server_hostname,
|
||||||
|
flakehub_login,
|
||||||
|
flakehub_password,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn enqueue_paths(state: &State, store_paths: Vec<StorePath>) -> Result<()> {
|
pub async fn enqueue_paths(state: &State, store_paths: Vec<StorePath>) -> Result<()> {
|
||||||
state.push_session.queue_many(store_paths)?;
|
state.push_session.queue_many(store_paths)?;
|
||||||
|
|
||||||
|
@ -247,6 +291,13 @@ async fn refresh_github_actions_jwt_worker(
|
||||||
flakehub_cache_server_clone: String,
|
flakehub_cache_server_clone: String,
|
||||||
api: Arc<RwLock<ApiClient>>,
|
api: Arc<RwLock<ApiClient>>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
|
// NOTE(cole-h): This is a workaround -- at the time of writing, GitHub Actions JWTs are only
|
||||||
|
// valid for 5 minutes after being issued. FlakeHub uses these JWTs for authentication, which
|
||||||
|
// means that after those 5 minutes have passed and the token is expired, FlakeHub (and by
|
||||||
|
// extension FlakeHub Cache) will no longer allow requests using this token. However, GitHub
|
||||||
|
// gives us a way to repeatedly request new tokens, so we utilize that and refresh the token
|
||||||
|
// every 2 minutes (less than half of the lifetime of the token).
|
||||||
|
|
||||||
// TODO(cole-h): this should probably be half of the token's lifetime ((exp - iat) / 2), but
|
// TODO(cole-h): this should probably be half of the token's lifetime ((exp - iat) / 2), but
|
||||||
// getting this is nontrivial so I'm not going to do it until GitHub changes the lifetime and
|
// getting this is nontrivial so I'm not going to do it until GitHub changes the lifetime and
|
||||||
// breaks this.
|
// breaks this.
|
||||||
|
@ -365,3 +416,77 @@ async fn rewrite_github_actions_token(
|
||||||
|
|
||||||
Ok(new_github_jwt_string)
|
Ok(new_github_jwt_string)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip_all)]
|
||||||
|
async fn refresh_determinate_token_worker(
|
||||||
|
netrc_file: PathBuf,
|
||||||
|
mut inode: u64,
|
||||||
|
flakehub_api_server: Url,
|
||||||
|
flakehub_cache_server: Url,
|
||||||
|
api_clone: Arc<RwLock<ApiClient>>,
|
||||||
|
) {
|
||||||
|
// NOTE(cole-h): This is a workaround -- at the time of writing, determinate-nixd handles the
|
||||||
|
// GitHub Actions JWT refreshing for us, which means we don't know when this will happen. At the
|
||||||
|
// moment, it does it roughly every 2 minutes (less than half of the total lifetime of the
|
||||||
|
// issued token).
|
||||||
|
|
||||||
|
loop {
|
||||||
|
tokio::time::sleep(std::time::Duration::from_secs(3)).await;
|
||||||
|
|
||||||
|
let meta = tokio::fs::metadata(&netrc_file)
|
||||||
|
.await
|
||||||
|
.map_err(|e| Error::Io(e, format!("getting metadata of {}", netrc_file.display())));
|
||||||
|
|
||||||
|
let Ok(meta) = meta else {
|
||||||
|
tracing::error!(e = ?meta);
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
let current_inode = meta.ino();
|
||||||
|
|
||||||
|
if current_inode == inode {
|
||||||
|
tracing::debug!("current inode is the same, file didn't change");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
tracing::debug!("current inode is different, file changed");
|
||||||
|
inode = current_inode;
|
||||||
|
|
||||||
|
let flakehub_password = match extract_info_from_netrc(
|
||||||
|
&netrc_file,
|
||||||
|
&flakehub_api_server,
|
||||||
|
&flakehub_cache_server,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(NetrcInfo {
|
||||||
|
flakehub_password, ..
|
||||||
|
}) => flakehub_password,
|
||||||
|
Err(e) => {
|
||||||
|
tracing::error!(?e, "Failed to extract auth info from netrc");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let server_config = ServerConfig {
|
||||||
|
endpoint: flakehub_cache_server.to_string(),
|
||||||
|
token: Some(attic_client::config::ServerTokenConfig::Raw {
|
||||||
|
token: flakehub_password,
|
||||||
|
}),
|
||||||
|
};
|
||||||
|
|
||||||
|
let new_api = ApiClient::from_server_config(server_config.clone());
|
||||||
|
|
||||||
|
let Ok(new_api) = new_api else {
|
||||||
|
tracing::error!(e = ?new_api, "Failed to construct new ApiClient");
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
{
|
||||||
|
let mut api_client = api_clone.write().await;
|
||||||
|
*api_client = new_api;
|
||||||
|
}
|
||||||
|
|
||||||
|
tracing::debug!("Stored new token in API client, sleeping for 30s");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -37,7 +37,15 @@ impl GhaCache {
|
||||||
metrics: Arc<telemetry::TelemetryReport>,
|
metrics: Arc<telemetry::TelemetryReport>,
|
||||||
narinfo_negative_cache: Arc<RwLock<HashSet<String>>>,
|
narinfo_negative_cache: Arc<RwLock<HashSet<String>>>,
|
||||||
) -> Result<GhaCache> {
|
) -> Result<GhaCache> {
|
||||||
let mut api = Api::new(credentials)?;
|
let cb_metrics = metrics.clone();
|
||||||
|
let mut api = Api::new(
|
||||||
|
credentials,
|
||||||
|
Arc::new(Box::new(move || {
|
||||||
|
cb_metrics
|
||||||
|
.tripped_429
|
||||||
|
.store(true, std::sync::atomic::Ordering::Relaxed);
|
||||||
|
})),
|
||||||
|
)?;
|
||||||
|
|
||||||
if let Some(cache_version) = &cache_version {
|
if let Some(cache_version) = &cache_version {
|
||||||
api.mutate_version(cache_version.as_bytes());
|
api.mutate_version(cache_version.as_bytes());
|
||||||
|
@ -72,7 +80,9 @@ impl GhaCache {
|
||||||
self.channel_tx
|
self.channel_tx
|
||||||
.send(Request::Shutdown)
|
.send(Request::Shutdown)
|
||||||
.expect("Cannot send shutdown message");
|
.expect("Cannot send shutdown message");
|
||||||
worker_result.await.unwrap()
|
worker_result
|
||||||
|
.await
|
||||||
|
.expect("failed to read result from gha worker")
|
||||||
} else {
|
} else {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -189,7 +199,7 @@ async fn upload_path(
|
||||||
|
|
||||||
let narinfo = path_info_to_nar_info(store.clone(), &path_info, format!("nar/{}", nar_path))
|
let narinfo = path_info_to_nar_info(store.clone(), &path_info, format!("nar/{}", nar_path))
|
||||||
.to_string()
|
.to_string()
|
||||||
.unwrap();
|
.expect("failed to convert path into to nar info");
|
||||||
|
|
||||||
tracing::debug!("Uploading '{}'", narinfo_path);
|
tracing::debug!("Uploading '{}'", narinfo_path);
|
||||||
|
|
||||||
|
@ -224,7 +234,17 @@ fn path_info_to_nar_info(store: Arc<NixStore>, path_info: &ValidPathInfo, url: S
|
||||||
references: path_info
|
references: path_info
|
||||||
.references
|
.references
|
||||||
.iter()
|
.iter()
|
||||||
.map(|r| r.file_name().unwrap().to_str().unwrap().to_owned())
|
.map(|r| {
|
||||||
|
r.file_name()
|
||||||
|
.and_then(|n| n.to_str())
|
||||||
|
.unwrap_or_else(|| {
|
||||||
|
panic!(
|
||||||
|
"failed to convert nar_info reference to string: {}",
|
||||||
|
r.display()
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.to_owned()
|
||||||
|
})
|
||||||
.collect(),
|
.collect(),
|
||||||
system: None,
|
system: None,
|
||||||
deriver: None,
|
deriver: None,
|
||||||
|
|
|
@ -18,14 +18,14 @@ mod env;
|
||||||
mod error;
|
mod error;
|
||||||
mod flakehub;
|
mod flakehub;
|
||||||
mod gha;
|
mod gha;
|
||||||
|
mod pbh;
|
||||||
mod telemetry;
|
mod telemetry;
|
||||||
mod util;
|
mod util;
|
||||||
|
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::fs::{self, create_dir_all};
|
use std::fs::create_dir_all;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::os::unix::fs::PermissionsExt;
|
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
@ -33,10 +33,9 @@ use ::attic::nix_store::NixStore;
|
||||||
use anyhow::{anyhow, Context, Result};
|
use anyhow::{anyhow, Context, Result};
|
||||||
use axum::{extract::Extension, routing::get, Router};
|
use axum::{extract::Extension, routing::get, Router};
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use tempfile::NamedTempFile;
|
use serde::{Deserialize, Serialize};
|
||||||
use tokio::fs::File;
|
use tokio::fs::File;
|
||||||
use tokio::io::AsyncWriteExt;
|
use tokio::io::AsyncWriteExt;
|
||||||
use tokio::process::Command;
|
|
||||||
use tokio::sync::{oneshot, Mutex, RwLock};
|
use tokio::sync::{oneshot, Mutex, RwLock};
|
||||||
use tracing_subscriber::filter::EnvFilter;
|
use tracing_subscriber::filter::EnvFilter;
|
||||||
use tracing_subscriber::layer::SubscriberExt;
|
use tracing_subscriber::layer::SubscriberExt;
|
||||||
|
@ -44,18 +43,23 @@ use tracing_subscriber::util::SubscriberInitExt;
|
||||||
|
|
||||||
use gha_cache::Credentials;
|
use gha_cache::Credentials;
|
||||||
|
|
||||||
|
const DETERMINATE_STATE_DIR: &str = "/nix/var/determinate";
|
||||||
|
const DETERMINATE_NIXD_SOCKET_NAME: &str = "determinate-nixd.socket";
|
||||||
|
const DETERMINATE_NETRC_PATH: &str = "/nix/var/determinate/netrc";
|
||||||
|
|
||||||
|
// TODO(colemickens): refactor, move with other UDS stuff (or all PBH stuff) to new file
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
#[serde(tag = "c", rename_all = "kebab-case")]
|
||||||
|
pub struct BuiltPathResponseEventV1 {
|
||||||
|
pub drv: PathBuf,
|
||||||
|
pub outputs: Vec<PathBuf>,
|
||||||
|
}
|
||||||
|
|
||||||
type State = Arc<StateInner>;
|
type State = Arc<StateInner>;
|
||||||
|
|
||||||
/// GitHub Actions-powered Nix binary cache
|
/// GitHub Actions-powered Nix binary cache
|
||||||
#[derive(Parser, Debug)]
|
#[derive(Parser, Debug)]
|
||||||
struct Args {
|
struct Args {
|
||||||
/// JSON file containing credentials.
|
|
||||||
///
|
|
||||||
/// If this is not specified, credentials will be loaded
|
|
||||||
/// from the environment.
|
|
||||||
#[arg(short = 'c', long)]
|
|
||||||
credentials_file: Option<PathBuf>,
|
|
||||||
|
|
||||||
/// Address to listen on.
|
/// Address to listen on.
|
||||||
///
|
///
|
||||||
/// FIXME: IPv6
|
/// FIXME: IPv6
|
||||||
|
@ -87,31 +91,31 @@ struct Args {
|
||||||
diagnostic_endpoint: String,
|
diagnostic_endpoint: String,
|
||||||
|
|
||||||
/// The FlakeHub API server.
|
/// The FlakeHub API server.
|
||||||
#[arg(long)]
|
#[arg(long, default_value = "https://api.flakehub.com")]
|
||||||
flakehub_api_server: Option<reqwest::Url>,
|
flakehub_api_server: reqwest::Url,
|
||||||
|
|
||||||
/// The path of the `netrc` file that contains the FlakeHub JWT token.
|
/// The path of the `netrc` file that contains the FlakeHub JWT token.
|
||||||
#[arg(long)]
|
#[arg(long)]
|
||||||
flakehub_api_server_netrc: Option<PathBuf>,
|
flakehub_api_server_netrc: Option<PathBuf>,
|
||||||
|
|
||||||
/// The FlakeHub binary cache server.
|
/// The FlakeHub binary cache server.
|
||||||
#[arg(long)]
|
#[arg(long, default_value = "https://cache.flakehub.com")]
|
||||||
flakehub_cache_server: Option<reqwest::Url>,
|
flakehub_cache_server: reqwest::Url,
|
||||||
|
|
||||||
#[arg(long)]
|
#[arg(long)]
|
||||||
flakehub_flake_name: Option<String>,
|
flakehub_flake_name: Option<String>,
|
||||||
|
|
||||||
/// The location of `nix.conf`.
|
/// The location of `nix.conf`.
|
||||||
#[arg(long)]
|
#[arg(long, default_value_os_t = default_nix_conf())]
|
||||||
nix_conf: PathBuf,
|
nix_conf: PathBuf,
|
||||||
|
|
||||||
/// Whether to use the GHA cache.
|
/// Whether to use the GHA cache.
|
||||||
#[arg(long)]
|
#[arg(long)]
|
||||||
use_gha_cache: bool,
|
use_gha_cache: Option<Option<CacheTrinary>>,
|
||||||
|
|
||||||
/// Whether to use the FlakeHub binary cache.
|
/// Whether to use the FlakeHub binary cache.
|
||||||
#[arg(long)]
|
#[arg(long)]
|
||||||
use_flakehub: bool,
|
use_flakehub: Option<Option<CacheTrinary>>,
|
||||||
|
|
||||||
/// URL to which to post startup notification.
|
/// URL to which to post startup notification.
|
||||||
#[arg(long)]
|
#[arg(long)]
|
||||||
|
@ -126,15 +130,48 @@ struct Args {
|
||||||
diff_store: bool,
|
diff_store: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq, clap::ValueEnum)]
|
||||||
|
pub enum CacheTrinary {
|
||||||
|
NoPreference,
|
||||||
|
Enabled,
|
||||||
|
Disabled,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Option<Option<CacheTrinary>>> for CacheTrinary {
|
||||||
|
fn from(b: Option<Option<CacheTrinary>>) -> Self {
|
||||||
|
match b {
|
||||||
|
None => CacheTrinary::NoPreference,
|
||||||
|
Some(None) => CacheTrinary::Enabled,
|
||||||
|
Some(Some(v)) => v,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(PartialEq, Clone, Copy)]
|
||||||
|
pub enum Dnixd {
|
||||||
|
Available,
|
||||||
|
Missing,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<bool> for Dnixd {
|
||||||
|
fn from(b: bool) -> Self {
|
||||||
|
if b {
|
||||||
|
Dnixd::Available
|
||||||
|
} else {
|
||||||
|
Dnixd::Missing
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Args {
|
impl Args {
|
||||||
fn validate(&self, environment: env::Environment) -> Result<(), error::Error> {
|
fn validate(&self, environment: env::Environment) -> Result<(), error::Error> {
|
||||||
if environment.is_gitlab_ci() && self.use_gha_cache {
|
if environment.is_gitlab_ci() && self.github_cache_preference() == CacheTrinary::Enabled {
|
||||||
return Err(error::Error::Config(String::from(
|
return Err(error::Error::Config(String::from(
|
||||||
"the --use-gha-cache flag should not be applied in GitLab CI",
|
"the --use-gha-cache flag should not be applied in GitLab CI",
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
|
|
||||||
if environment.is_gitlab_ci() && !self.use_flakehub {
|
if environment.is_gitlab_ci() && self.flakehub_preference() != CacheTrinary::Enabled {
|
||||||
return Err(error::Error::Config(String::from(
|
return Err(error::Error::Config(String::from(
|
||||||
"you must set --use-flakehub in GitLab CI",
|
"you must set --use-flakehub in GitLab CI",
|
||||||
)));
|
)));
|
||||||
|
@ -142,6 +179,23 @@ impl Args {
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn github_cache_preference(&self) -> CacheTrinary {
|
||||||
|
self.use_gha_cache.into()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn flakehub_preference(&self) -> CacheTrinary {
|
||||||
|
self.use_flakehub.into()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_nix_conf() -> PathBuf {
|
||||||
|
xdg::BaseDirectories::new()
|
||||||
|
.with_context(|| "identifying XDG base directories")
|
||||||
|
.expect(
|
||||||
|
"Could not identify your home directory. Try setting the HOME environment variable.",
|
||||||
|
)
|
||||||
|
.get_config_file("nix/nix.conf")
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The global server state.
|
/// The global server state.
|
||||||
|
@ -174,6 +228,26 @@ struct StateInner {
|
||||||
original_paths: Option<Mutex<HashSet<PathBuf>>>,
|
original_paths: Option<Mutex<HashSet<PathBuf>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub(crate) enum FlakeHubAuthSource {
|
||||||
|
DeterminateNixd,
|
||||||
|
Netrc(PathBuf),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FlakeHubAuthSource {
|
||||||
|
pub(crate) fn as_path_buf(&self) -> PathBuf {
|
||||||
|
match &self {
|
||||||
|
Self::Netrc(path) => path.clone(),
|
||||||
|
Self::DeterminateNixd => {
|
||||||
|
let mut path = PathBuf::from(DETERMINATE_STATE_DIR);
|
||||||
|
path.push("netrc");
|
||||||
|
|
||||||
|
path
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
async fn main_cli() -> Result<()> {
|
async fn main_cli() -> Result<()> {
|
||||||
let guard = init_logging()?;
|
let guard = init_logging()?;
|
||||||
let _tracing_guard = guard.appender_guard;
|
let _tracing_guard = guard.appender_guard;
|
||||||
|
@ -185,89 +259,131 @@ async fn main_cli() -> Result<()> {
|
||||||
|
|
||||||
let metrics = Arc::new(telemetry::TelemetryReport::new());
|
let metrics = Arc::new(telemetry::TelemetryReport::new());
|
||||||
|
|
||||||
if let Some(parent) = Path::new(&args.nix_conf).parent() {
|
let dnixd_uds_socket_dir: &Path = Path::new(&DETERMINATE_STATE_DIR);
|
||||||
|
let dnixd_uds_socket_path = dnixd_uds_socket_dir.join(DETERMINATE_NIXD_SOCKET_NAME);
|
||||||
|
let dnixd_available: Dnixd = dnixd_uds_socket_path.exists().into();
|
||||||
|
|
||||||
|
let nix_conf_path: PathBuf = args.nix_conf.clone();
|
||||||
|
|
||||||
|
// NOTE: we expect this to point to a user nix.conf
|
||||||
|
// we always open/append to it to be able to append the extra-substituter for github-actions cache
|
||||||
|
// but we don't write to it for initializing flakehub_cache unless dnixd is unavailable
|
||||||
|
if let Some(parent) = Path::new(&nix_conf_path).parent() {
|
||||||
create_dir_all(parent).with_context(|| "Creating parent directories of nix.conf")?;
|
create_dir_all(parent).with_context(|| "Creating parent directories of nix.conf")?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut nix_conf = std::fs::OpenOptions::new()
|
let mut nix_conf = std::fs::OpenOptions::new()
|
||||||
.create(true)
|
.create(true)
|
||||||
.append(true)
|
.append(true)
|
||||||
.open(&args.nix_conf)
|
.open(&nix_conf_path)
|
||||||
.with_context(|| "Creating nix.conf")?;
|
.with_context(|| "Creating nix.conf")?;
|
||||||
|
|
||||||
|
// always enable fallback, first
|
||||||
|
nix_conf
|
||||||
|
.write_all(b"fallback = true\n")
|
||||||
|
.with_context(|| "Setting fallback in nix.conf")?;
|
||||||
|
|
||||||
let store = Arc::new(NixStore::connect()?);
|
let store = Arc::new(NixStore::connect()?);
|
||||||
|
|
||||||
let narinfo_negative_cache = Arc::new(RwLock::new(HashSet::new()));
|
let narinfo_negative_cache = Arc::new(RwLock::new(HashSet::new()));
|
||||||
|
|
||||||
let flakehub_state = if args.use_flakehub {
|
let flakehub_auth_method: Option<FlakeHubAuthSource> = match (
|
||||||
let flakehub_cache_server = args
|
args.flakehub_preference(),
|
||||||
.flakehub_cache_server
|
&args.flakehub_api_server_netrc,
|
||||||
.ok_or_else(|| anyhow!("--flakehub-cache-server is required"))?;
|
dnixd_available,
|
||||||
let flakehub_api_server_netrc = args
|
) {
|
||||||
.flakehub_api_server_netrc
|
// User has explicitly pyassed --use-flakehub=disabled, so just straight up don't
|
||||||
.ok_or_else(|| anyhow!("--flakehub-api-server-netrc is required"))?;
|
(CacheTrinary::Disabled, _, _) => {
|
||||||
let flakehub_flake_name = args.flakehub_flake_name;
|
tracing::info!("Disabling FlakeHub cache.");
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
// User has no preference, did not pass a netrc, and determinate-nixd is not available
|
||||||
|
(CacheTrinary::NoPreference, None, Dnixd::Missing) => None,
|
||||||
|
|
||||||
|
// Use it when determinate-nixd is available, and let the user know what's going on
|
||||||
|
(pref, user_netrc_path, Dnixd::Available) => {
|
||||||
|
if pref == CacheTrinary::NoPreference {
|
||||||
|
tracing::info!("Enabling FlakeHub cache because determinate-nixd is available.");
|
||||||
|
}
|
||||||
|
|
||||||
|
if user_netrc_path.is_some() {
|
||||||
|
tracing::info!("Ignoring the user-specified --flakehub-api-server-netrc, in favor of the determinate-nixd netrc");
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(FlakeHubAuthSource::DeterminateNixd)
|
||||||
|
}
|
||||||
|
|
||||||
|
// When determinate-nixd is not available, but the user specified a netrc
|
||||||
|
(_, Some(path), Dnixd::Missing) => {
|
||||||
|
if path.exists() {
|
||||||
|
Some(FlakeHubAuthSource::Netrc(path.to_owned()))
|
||||||
|
} else {
|
||||||
|
tracing::debug!(path = %path.display(), "User-provided netrc does not exist");
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// User explicitly turned on flakehub cache, but we have no netrc and determinate-nixd is not present
|
||||||
|
(CacheTrinary::Enabled, None, Dnixd::Missing) => {
|
||||||
|
return Err(anyhow!(
|
||||||
|
"--flakehub-api-server-netrc is required when determinate-nixd is unavailable"
|
||||||
|
));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let flakehub_state = if let Some(auth_method) = flakehub_auth_method {
|
||||||
|
let flakehub_cache_server = &args.flakehub_cache_server;
|
||||||
|
|
||||||
|
let flakehub_api_server = &args.flakehub_api_server;
|
||||||
|
|
||||||
|
let flakehub_flake_name = &args.flakehub_flake_name;
|
||||||
|
|
||||||
match flakehub::init_cache(
|
match flakehub::init_cache(
|
||||||
environment,
|
environment,
|
||||||
&args
|
flakehub_api_server,
|
||||||
.flakehub_api_server
|
flakehub_cache_server,
|
||||||
.ok_or_else(|| anyhow!("--flakehub-api-server is required"))?,
|
|
||||||
&flakehub_api_server_netrc,
|
|
||||||
&flakehub_cache_server,
|
|
||||||
flakehub_flake_name,
|
flakehub_flake_name,
|
||||||
store.clone(),
|
store.clone(),
|
||||||
|
&auth_method,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
Ok(state) => {
|
Ok(state) => {
|
||||||
|
if let FlakeHubAuthSource::Netrc(ref path) = auth_method {
|
||||||
nix_conf
|
nix_conf
|
||||||
.write_all(
|
.write_all(
|
||||||
format!(
|
format!(
|
||||||
"extra-substituters = {}?trusted=1\nnetrc-file = {}\n",
|
"extra-substituters = {}?trusted=1\nnetrc-file = {}\n",
|
||||||
&flakehub_cache_server,
|
&flakehub_cache_server,
|
||||||
flakehub_api_server_netrc.display()
|
path.display()
|
||||||
)
|
)
|
||||||
.as_bytes(),
|
.as_bytes(),
|
||||||
)
|
)
|
||||||
.with_context(|| "Writing to nix.conf")?;
|
.with_context(|| "Writing to nix.conf")?;
|
||||||
|
}
|
||||||
|
|
||||||
tracing::info!("FlakeHub cache is enabled.");
|
tracing::info!("FlakeHub cache is enabled.");
|
||||||
Some(state)
|
Some(state)
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
tracing::debug!("FlakeHub cache initialization failed: {}", err);
|
tracing::error!("FlakeHub cache initialization failed: {}. Unable to authenticate to FlakeHub. Individuals must register at FlakeHub.com; Organizations must create an organization at FlakeHub.com.", err);
|
||||||
|
println!("::error title={{FlakeHub: Unauthenticated}}::{{Unable to authenticate to FlakeHub. Individuals must register at FlakeHub.com; Organizations must create an organization at FlakeHub.com.}}");
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
tracing::info!(
|
tracing::info!("FlakeHub cache is disabled.");
|
||||||
"FlakeHub cache is disabled, as the `use-flakehub` setting is set to `false`."
|
|
||||||
);
|
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
let gha_cache = if args.use_gha_cache {
|
let gha_cache = if (args.github_cache_preference() == CacheTrinary::Enabled)
|
||||||
let credentials = if let Some(credentials_file) = &args.credentials_file {
|
|| (args.github_cache_preference() == CacheTrinary::NoPreference
|
||||||
tracing::info!("Loading credentials from {:?}", credentials_file);
|
&& flakehub_state.is_none())
|
||||||
let bytes = fs::read(credentials_file).with_context(|| {
|
{
|
||||||
format!(
|
|
||||||
"Failed to read credentials file '{}'",
|
|
||||||
credentials_file.display()
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
serde_json::from_slice(&bytes).with_context(|| {
|
|
||||||
format!(
|
|
||||||
"Failed to deserialize credentials file '{}'",
|
|
||||||
credentials_file.display()
|
|
||||||
)
|
|
||||||
})?
|
|
||||||
} else {
|
|
||||||
tracing::info!("Loading credentials from environment");
|
tracing::info!("Loading credentials from environment");
|
||||||
Credentials::load_from_env()
|
|
||||||
.with_context(|| "Failed to load credentials from environment (see README.md)")?
|
let credentials = Credentials::load_from_env()
|
||||||
};
|
.with_context(|| "Failed to load credentials from environment (see README.md)")?;
|
||||||
|
|
||||||
let gha_cache = gha::GhaCache::new(
|
let gha_cache = gha::GhaCache::new(
|
||||||
credentials,
|
credentials,
|
||||||
|
@ -292,66 +408,6 @@ async fn main_cli() -> Result<()> {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Write the post-build hook script. Note that the shell script
|
|
||||||
* ignores errors, to avoid the Nix build from failing. */
|
|
||||||
let post_build_hook_script = {
|
|
||||||
let mut file = NamedTempFile::with_prefix("magic-nix-cache-build-hook-")
|
|
||||||
.with_context(|| "Creating a temporary file for the post-build hook")?;
|
|
||||||
file.write_all(
|
|
||||||
format!(
|
|
||||||
// NOTE(cole-h): We want to exit 0 even if the hook failed, otherwise it'll fail the
|
|
||||||
// build itself
|
|
||||||
"#! /bin/sh\nRUST_LOG=trace RUST_BACKTRACE=full {} --server {} || :\n",
|
|
||||||
std::env::current_exe()
|
|
||||||
.with_context(|| "Getting the path of magic-nix-cache")?
|
|
||||||
.display(),
|
|
||||||
args.listen
|
|
||||||
)
|
|
||||||
.as_bytes(),
|
|
||||||
)
|
|
||||||
.with_context(|| "Writing the post-build hook")?;
|
|
||||||
let path = file
|
|
||||||
.keep()
|
|
||||||
.with_context(|| "Keeping the post-build hook")?
|
|
||||||
.1;
|
|
||||||
|
|
||||||
fs::set_permissions(&path, fs::Permissions::from_mode(0o755))
|
|
||||||
.with_context(|| "Setting permissions on the post-build hook")?;
|
|
||||||
|
|
||||||
/* Copy the script to the Nix store so we know for sure that
|
|
||||||
* it's accessible to the Nix daemon, which might have a
|
|
||||||
* different /tmp from us. */
|
|
||||||
let res = Command::new("nix")
|
|
||||||
.args([
|
|
||||||
"--extra-experimental-features",
|
|
||||||
"nix-command",
|
|
||||||
"store",
|
|
||||||
"add-path",
|
|
||||||
&path.display().to_string(),
|
|
||||||
])
|
|
||||||
.output()
|
|
||||||
.await?;
|
|
||||||
if res.status.success() {
|
|
||||||
tokio::fs::remove_file(path).await?;
|
|
||||||
PathBuf::from(String::from_utf8_lossy(&res.stdout).trim())
|
|
||||||
} else {
|
|
||||||
path
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Update nix.conf. */
|
|
||||||
nix_conf
|
|
||||||
.write_all(
|
|
||||||
format!(
|
|
||||||
"fallback = true\npost-build-hook = {}\n",
|
|
||||||
post_build_hook_script.display()
|
|
||||||
)
|
|
||||||
.as_bytes(),
|
|
||||||
)
|
|
||||||
.with_context(|| "Writing to nix.conf")?;
|
|
||||||
|
|
||||||
drop(nix_conf);
|
|
||||||
|
|
||||||
let diagnostic_endpoint = match args.diagnostic_endpoint.as_str() {
|
let diagnostic_endpoint = match args.diagnostic_endpoint.as_str() {
|
||||||
"" => {
|
"" => {
|
||||||
tracing::info!("Diagnostics disabled.");
|
tracing::info!("Diagnostics disabled.");
|
||||||
|
@ -375,6 +431,16 @@ async fn main_cli() -> Result<()> {
|
||||||
original_paths,
|
original_paths,
|
||||||
});
|
});
|
||||||
|
|
||||||
|
if dnixd_available == Dnixd::Available {
|
||||||
|
tracing::info!("Subscribing to Determinate Nixd build events.");
|
||||||
|
crate::pbh::subscribe_uds_post_build_hook(dnixd_uds_socket_path, state.clone()).await?;
|
||||||
|
} else {
|
||||||
|
tracing::info!("Patching nix.conf to use a post-build-hook.");
|
||||||
|
crate::pbh::setup_legacy_post_build_hook(&args.listen, &mut nix_conf).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
drop(nix_conf);
|
||||||
|
|
||||||
let app = Router::new()
|
let app = Router::new()
|
||||||
.route("/", get(root))
|
.route("/", get(root))
|
||||||
.merge(api::get_router())
|
.merge(api::get_router())
|
||||||
|
@ -424,14 +490,39 @@ async fn main_cli() -> Result<()> {
|
||||||
|
|
||||||
tracing::debug!("Startup notification via file at {startup_notification_file_path:?}");
|
tracing::debug!("Startup notification via file at {startup_notification_file_path:?}");
|
||||||
|
|
||||||
let mut notification_file = File::create(&startup_notification_file_path).await?;
|
if let Some(parent_dir) = startup_notification_file_path.parent() {
|
||||||
notification_file.write_all(file_contents).await?;
|
tokio::fs::create_dir_all(parent_dir)
|
||||||
|
.await
|
||||||
|
.with_context(|| {
|
||||||
|
format!(
|
||||||
|
"failed to create parent directory for startup notification file path: {}",
|
||||||
|
startup_notification_file_path.display()
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
}
|
||||||
|
let mut notification_file = File::create(&startup_notification_file_path)
|
||||||
|
.await
|
||||||
|
.with_context(|| {
|
||||||
|
format!(
|
||||||
|
"failed to create startup notification file to path: {}",
|
||||||
|
startup_notification_file_path.display()
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
notification_file
|
||||||
|
.write_all(file_contents)
|
||||||
|
.await
|
||||||
|
.with_context(|| {
|
||||||
|
format!(
|
||||||
|
"failed to write startup notification file to path: {}",
|
||||||
|
startup_notification_file_path.display()
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
tracing::debug!("Created startup notification file at {startup_notification_file_path:?}");
|
tracing::debug!("Created startup notification file at {startup_notification_file_path:?}");
|
||||||
}
|
}
|
||||||
|
|
||||||
let ret = axum::Server::bind(&args.listen)
|
let listener = tokio::net::TcpListener::bind(&args.listen).await?;
|
||||||
.serve(app.into_make_service())
|
let ret = axum::serve(listener, app.into_make_service())
|
||||||
.with_graceful_shutdown(async move {
|
.with_graceful_shutdown(async move {
|
||||||
shutdown_receiver.await.ok();
|
shutdown_receiver.await.ok();
|
||||||
tracing::info!("Shutting down");
|
tracing::info!("Shutting down");
|
||||||
|
@ -448,58 +539,10 @@ async fn main_cli() -> Result<()> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn post_build_hook(out_paths: &str) -> Result<()> {
|
|
||||||
#[derive(Parser, Debug)]
|
|
||||||
struct Args {
|
|
||||||
/// `magic-nix-cache` daemon to connect to.
|
|
||||||
#[arg(short = 'l', long, default_value = "127.0.0.1:3000")]
|
|
||||||
server: SocketAddr,
|
|
||||||
}
|
|
||||||
|
|
||||||
let args = Args::parse();
|
|
||||||
|
|
||||||
let store_paths: Vec<_> = out_paths
|
|
||||||
.split_whitespace()
|
|
||||||
.map(|s| s.trim().to_owned())
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let request = api::EnqueuePathsRequest { store_paths };
|
|
||||||
|
|
||||||
let response = reqwest::Client::new()
|
|
||||||
.post(format!("http://{}/api/enqueue-paths", &args.server))
|
|
||||||
.header(reqwest::header::CONTENT_TYPE, "application/json")
|
|
||||||
.body(
|
|
||||||
serde_json::to_string(&request)
|
|
||||||
.with_context(|| "Decoding the response from the magic-nix-cache server")?,
|
|
||||||
)
|
|
||||||
.send()
|
|
||||||
.await;
|
|
||||||
|
|
||||||
match response {
|
|
||||||
Ok(response) if !response.status().is_success() => Err(anyhow!(
|
|
||||||
"magic-nix-cache server failed to enqueue the push request: {}\n{}",
|
|
||||||
response.status(),
|
|
||||||
response
|
|
||||||
.text()
|
|
||||||
.await
|
|
||||||
.unwrap_or_else(|_| "<no response text>".to_owned()),
|
|
||||||
))?,
|
|
||||||
Ok(response) => response
|
|
||||||
.json::<api::EnqueuePathsResponse>()
|
|
||||||
.await
|
|
||||||
.with_context(|| "magic-nix-cache-server didn't return a valid response")?,
|
|
||||||
Err(err) => {
|
|
||||||
Err(err).with_context(|| "magic-nix-cache server failed to send the enqueue request")?
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() -> Result<()> {
|
async fn main() -> Result<()> {
|
||||||
match std::env::var("OUT_PATHS") {
|
match std::env::var("OUT_PATHS") {
|
||||||
Ok(out_paths) => post_build_hook(&out_paths).await,
|
Ok(out_paths) => pbh::handle_legacy_post_build_hook(&out_paths).await,
|
||||||
Err(_) => main_cli().await,
|
Err(_) => main_cli().await,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -517,8 +560,16 @@ fn init_logging() -> Result<LogGuard> {
|
||||||
let filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| {
|
let filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| {
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
return EnvFilter::new("info")
|
return EnvFilter::new("info")
|
||||||
.add_directive("magic_nix_cache=debug".parse().unwrap())
|
.add_directive(
|
||||||
.add_directive("gha_cache=debug".parse().unwrap());
|
"magic_nix_cache=debug"
|
||||||
|
.parse()
|
||||||
|
.expect("failed to parse magix_nix_cache directive"),
|
||||||
|
)
|
||||||
|
.add_directive(
|
||||||
|
"gha_cache=debug"
|
||||||
|
.parse()
|
||||||
|
.expect("failed to parse gha_cahce directive"),
|
||||||
|
);
|
||||||
|
|
||||||
#[cfg(not(debug_assertions))]
|
#[cfg(not(debug_assertions))]
|
||||||
return EnvFilter::new("info");
|
return EnvFilter::new("info");
|
||||||
|
@ -568,10 +619,10 @@ fn init_logging() -> Result<LogGuard> {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
async fn dump_api_stats<B>(
|
async fn dump_api_stats(
|
||||||
Extension(state): Extension<State>,
|
Extension(state): Extension<State>,
|
||||||
request: axum::http::Request<B>,
|
request: axum::http::Request<axum::body::Body>,
|
||||||
next: axum::middleware::Next<B>,
|
next: axum::middleware::Next,
|
||||||
) -> axum::response::Response {
|
) -> axum::response::Response {
|
||||||
if let Some(gha_cache) = &state.gha_cache {
|
if let Some(gha_cache) = &state.gha_cache {
|
||||||
gha_cache.api.dump_stats();
|
gha_cache.api.dump_stats();
|
||||||
|
|
241
magic-nix-cache/src/pbh.rs
Normal file
241
magic-nix-cache/src/pbh.rs
Normal file
|
@ -0,0 +1,241 @@
|
||||||
|
use std::io::Write as _;
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::os::unix::fs::PermissionsExt as _;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
use anyhow::anyhow;
|
||||||
|
use anyhow::Context as _;
|
||||||
|
use anyhow::Result;
|
||||||
|
use clap::Parser;
|
||||||
|
use futures::StreamExt as _;
|
||||||
|
use http_body_util::BodyExt as _;
|
||||||
|
use hyper_util::rt::TokioExecutor;
|
||||||
|
use hyper_util::rt::TokioIo;
|
||||||
|
use tempfile::NamedTempFile;
|
||||||
|
use tokio::net::UnixStream;
|
||||||
|
use tokio::process::Command;
|
||||||
|
|
||||||
|
use crate::BuiltPathResponseEventV1;
|
||||||
|
use crate::State;
|
||||||
|
|
||||||
|
pub async fn subscribe_uds_post_build_hook(
|
||||||
|
dnixd_uds_socket_path: PathBuf,
|
||||||
|
state: State,
|
||||||
|
) -> Result<()> {
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let dnixd_uds_socket_path = &dnixd_uds_socket_path;
|
||||||
|
loop {
|
||||||
|
let Ok(socket_conn) = UnixStream::connect(dnixd_uds_socket_path).await else {
|
||||||
|
tracing::error!("built-paths: failed to connect to determinate-nixd's socket");
|
||||||
|
tokio::time::sleep(std::time::Duration::from_secs(10)).await;
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
let stream = TokioIo::new(socket_conn);
|
||||||
|
let executor: TokioExecutor = TokioExecutor::new();
|
||||||
|
|
||||||
|
let sender_conn = hyper::client::conn::http2::handshake(executor, stream).await;
|
||||||
|
|
||||||
|
let Ok((mut sender, conn)) = sender_conn else {
|
||||||
|
tracing::error!("built-paths: failed to http2 handshake");
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
// NOTE(colemickens): for now we just drop the joinhandle and let it keep running
|
||||||
|
let _join_handle = tokio::task::spawn(async move {
|
||||||
|
if let Err(err) = conn.await {
|
||||||
|
tracing::error!("Connection failed: {:?}", err);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let request = http::Request::builder()
|
||||||
|
.method(http::Method::GET)
|
||||||
|
.uri("http://localhost/events")
|
||||||
|
.body(axum::body::Body::empty());
|
||||||
|
let Ok(request) = request else {
|
||||||
|
tracing::error!("built-paths: failed to create request to subscribe");
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
let response = sender.send_request(request).await;
|
||||||
|
let response = match response {
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(e) => {
|
||||||
|
tracing::error!("buit-paths: failed to send subscription request: {:?}", e);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let mut data = response.into_data_stream();
|
||||||
|
|
||||||
|
while let Some(event_str) = data.next().await {
|
||||||
|
let event_str = match event_str {
|
||||||
|
Ok(event) => event,
|
||||||
|
Err(e) => {
|
||||||
|
tracing::error!("built-paths: error while receiving: {}", e);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let Some(event_str) = event_str.strip_prefix("data: ".as_bytes()) else {
|
||||||
|
tracing::debug!("built-paths subscription: ignoring non-data frame");
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
let Ok(event): core::result::Result<BuiltPathResponseEventV1, _> =
|
||||||
|
serde_json::from_slice(event_str)
|
||||||
|
else {
|
||||||
|
tracing::error!(
|
||||||
|
"failed to decode built-path response as BuiltPathResponseEventV1"
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
let maybe_store_paths = event
|
||||||
|
.outputs
|
||||||
|
.iter()
|
||||||
|
.map(|path| {
|
||||||
|
state
|
||||||
|
.store
|
||||||
|
.follow_store_path(path)
|
||||||
|
.map_err(|_| anyhow!("failed to collect store paths"))
|
||||||
|
})
|
||||||
|
.collect::<Result<Vec<_>>>();
|
||||||
|
|
||||||
|
let Ok(store_paths) = maybe_store_paths else {
|
||||||
|
tracing::error!(
|
||||||
|
"built-paths: encountered an error aggregating build store paths"
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
tracing::debug!("about to enqueue paths: {:?}", store_paths);
|
||||||
|
if let Err(e) = crate::api::enqueue_paths(&state, store_paths).await {
|
||||||
|
tracing::error!(
|
||||||
|
"built-paths: failed to enqueue paths for drv ({}): {}",
|
||||||
|
event.drv.display(),
|
||||||
|
e
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn setup_legacy_post_build_hook(
|
||||||
|
listen: &SocketAddr,
|
||||||
|
nix_conf: &mut std::fs::File,
|
||||||
|
) -> Result<()> {
|
||||||
|
/* Write the post-build hook script. Note that the shell script
|
||||||
|
* ignores errors, to avoid the Nix build from failing. */
|
||||||
|
let post_build_hook_script = {
|
||||||
|
let mut file = NamedTempFile::with_prefix("magic-nix-cache-build-hook-")
|
||||||
|
.with_context(|| "Creating a temporary file for the post-build hook")?;
|
||||||
|
file.write_all(
|
||||||
|
format!(
|
||||||
|
// NOTE(cole-h): We want to exit 0 even if the hook failed, otherwise it'll fail the
|
||||||
|
// build itself
|
||||||
|
"#! /bin/sh\nRUST_LOG=trace RUST_BACKTRACE=full {} --server {} || :\n",
|
||||||
|
std::env::current_exe()
|
||||||
|
.with_context(|| "Getting the path of magic-nix-cache")?
|
||||||
|
.display(),
|
||||||
|
listen
|
||||||
|
)
|
||||||
|
.as_bytes(),
|
||||||
|
)
|
||||||
|
.with_context(|| "Writing the post-build hook")?;
|
||||||
|
let path = file
|
||||||
|
.keep()
|
||||||
|
.with_context(|| "Keeping the post-build hook")?
|
||||||
|
.1;
|
||||||
|
|
||||||
|
std::fs::set_permissions(&path, std::fs::Permissions::from_mode(0o755))
|
||||||
|
.with_context(|| "Setting permissions on the post-build hook")?;
|
||||||
|
|
||||||
|
/* Copy the script to the Nix store so we know for sure that
|
||||||
|
* it's accessible to the Nix daemon, which might have a
|
||||||
|
* different /tmp from us. */
|
||||||
|
let res = Command::new("nix")
|
||||||
|
.args([
|
||||||
|
"--extra-experimental-features",
|
||||||
|
"nix-command",
|
||||||
|
"store",
|
||||||
|
"add-path",
|
||||||
|
&path.display().to_string(),
|
||||||
|
])
|
||||||
|
.output()
|
||||||
|
.await
|
||||||
|
.with_context(|| {
|
||||||
|
format!(
|
||||||
|
"Running nix to add the post-build-hook to the store from {}",
|
||||||
|
path.display()
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
if res.status.success() {
|
||||||
|
tokio::fs::remove_file(&path).await.with_context(|| {
|
||||||
|
format!(
|
||||||
|
"Cleaning up the temporary post-build-hook at {}",
|
||||||
|
path.display()
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
PathBuf::from(String::from_utf8_lossy(&res.stdout).trim())
|
||||||
|
} else {
|
||||||
|
path
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/* Update nix.conf. */
|
||||||
|
nix_conf
|
||||||
|
.write_all(format!("post-build-hook = {}\n", post_build_hook_script.display()).as_bytes())
|
||||||
|
.with_context(|| "Writing to nix.conf")?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn handle_legacy_post_build_hook(out_paths: &str) -> Result<()> {
|
||||||
|
#[derive(Parser, Debug)]
|
||||||
|
struct Args {
|
||||||
|
/// `magic-nix-cache` daemon to connect to.
|
||||||
|
#[arg(short = 'l', long, default_value = "127.0.0.1:3000")]
|
||||||
|
server: SocketAddr,
|
||||||
|
}
|
||||||
|
|
||||||
|
let args = Args::parse();
|
||||||
|
|
||||||
|
let store_paths: Vec<_> = out_paths
|
||||||
|
.split_whitespace()
|
||||||
|
.map(|s| s.trim().to_owned())
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let request = crate::api::EnqueuePathsRequest { store_paths };
|
||||||
|
|
||||||
|
let response = reqwest::Client::new()
|
||||||
|
.post(format!("http://{}/api/enqueue-paths", &args.server))
|
||||||
|
.header(reqwest::header::CONTENT_TYPE, "application/json")
|
||||||
|
.body(
|
||||||
|
serde_json::to_string(&request)
|
||||||
|
.with_context(|| "Decoding the response from the magic-nix-cache server")?,
|
||||||
|
)
|
||||||
|
.send()
|
||||||
|
.await;
|
||||||
|
|
||||||
|
match response {
|
||||||
|
Ok(response) if !response.status().is_success() => Err(anyhow!(
|
||||||
|
"magic-nix-cache server failed to enqueue the push request: {}\n{}",
|
||||||
|
response.status(),
|
||||||
|
response
|
||||||
|
.text()
|
||||||
|
.await
|
||||||
|
.unwrap_or_else(|_| "<no response text>".to_owned()),
|
||||||
|
))?,
|
||||||
|
Ok(response) => response
|
||||||
|
.json::<crate::api::EnqueuePathsResponse>()
|
||||||
|
.await
|
||||||
|
.with_context(|| "magic-nix-cache-server didn't return a valid response")?,
|
||||||
|
Err(err) => {
|
||||||
|
Err(err).with_context(|| "magic-nix-cache server failed to send the enqueue request")?
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
|
@ -28,6 +28,8 @@ pub struct TelemetryReport {
|
||||||
pub num_original_paths: Metric,
|
pub num_original_paths: Metric,
|
||||||
pub num_final_paths: Metric,
|
pub num_final_paths: Metric,
|
||||||
pub num_new_paths: Metric,
|
pub num_new_paths: Metric,
|
||||||
|
|
||||||
|
pub tripped_429: std::sync::atomic::AtomicBool,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Default, serde::Serialize)]
|
#[derive(Debug, Default, serde::Serialize)]
|
||||||
|
|
|
@ -11,9 +11,19 @@ use crate::error::Result;
|
||||||
pub async fn get_store_paths(store: &NixStore) -> Result<HashSet<PathBuf>> {
|
pub async fn get_store_paths(store: &NixStore) -> Result<HashSet<PathBuf>> {
|
||||||
// FIXME: use the Nix API.
|
// FIXME: use the Nix API.
|
||||||
let store_dir = store.store_dir();
|
let store_dir = store.store_dir();
|
||||||
let mut listing = tokio::fs::read_dir(store_dir).await?;
|
let mut listing = tokio::fs::read_dir(store_dir).await.map_err(|e| {
|
||||||
|
crate::error::Error::Io(
|
||||||
|
e,
|
||||||
|
format!("Enumerating store paths in {}", store_dir.display()),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
let mut paths = HashSet::new();
|
let mut paths = HashSet::new();
|
||||||
while let Some(entry) = listing.next_entry().await? {
|
while let Some(entry) = listing.next_entry().await.map_err(|e| {
|
||||||
|
crate::error::Error::Io(
|
||||||
|
e,
|
||||||
|
format!("Reading existing store paths from {}", store_dir.display()),
|
||||||
|
)
|
||||||
|
})? {
|
||||||
let file_name = entry.file_name();
|
let file_name = entry.file_name();
|
||||||
let file_name = Path::new(&file_name);
|
let file_name = Path::new(&file_name);
|
||||||
|
|
||||||
|
|
48
nix.patch
48
nix.patch
|
@ -1,48 +0,0 @@
|
||||||
diff --git a/mk/libraries.mk b/mk/libraries.mk
|
|
||||||
index 6541775f329..5118b957608 100644
|
|
||||||
--- a/mk/libraries.mk
|
|
||||||
+++ b/mk/libraries.mk
|
|
||||||
@@ -130,7 +130,15 @@ define build-library
|
|
||||||
|
|
||||||
$(1)_LDFLAGS_USE += $$($(1)_PATH) $$($(1)_LDFLAGS)
|
|
||||||
|
|
||||||
- $(1)_INSTALL_PATH := $$(libdir)/$$($(1)_NAME).a
|
|
||||||
+ $(1)_INSTALL_PATH := $(DESTDIR)$$($(1)_INSTALL_DIR)/$$($(1)_NAME).a
|
|
||||||
+
|
|
||||||
+ $$(eval $$(call create-dir, $$($(1)_INSTALL_DIR)))
|
|
||||||
+
|
|
||||||
+ $$($(1)_INSTALL_PATH): $$($(1)_OBJS) | $(DESTDIR)$$($(1)_INSTALL_DIR)/
|
|
||||||
+ +$$(trace-ld) $(LD) -Ur -o $$(_d)/$$($(1)_NAME).o $$^
|
|
||||||
+ $$(trace-ar) $(AR) crs $$@ $$(_d)/$$($(1)_NAME).o
|
|
||||||
+
|
|
||||||
+ install: $$($(1)_INSTALL_PATH)
|
|
||||||
|
|
||||||
endif
|
|
||||||
|
|
||||||
diff --git a/src/libstore/local.mk b/src/libstore/local.mk
|
|
||||||
index 8f28bec6c1d..0d41e3c2cac 100644
|
|
||||||
--- a/src/libstore/local.mk
|
|
||||||
+++ b/src/libstore/local.mk
|
|
||||||
@@ -69,6 +69,13 @@ $(d)/build.cc:
|
|
||||||
|
|
||||||
clean-files += $(d)/schema.sql.gen.hh $(d)/ca-specific-schema.sql.gen.hh
|
|
||||||
|
|
||||||
+$(d)/nix-store.pc: $(d)/nix-store.pc.in
|
|
||||||
+ $(trace-gen) rm -f $@ && ./config.status --quiet --file=$@
|
|
||||||
+ifeq ($(BUILD_SHARED_LIBS), 1)
|
|
||||||
+ sed -i 's|@LIBS_PRIVATE@||' $@
|
|
||||||
+else
|
|
||||||
+ sed -i 's|@LIBS_PRIVATE@|Libs.private: $(libstore_LDFLAGS) $(libstore_LDFLAGS_PROPAGATED) $(foreach lib, $(libstore_LIBS), $($(lib)_LDFLAGS))|' $@
|
|
||||||
+endif
|
|
||||||
$(eval $(call install-file-in, $(d)/nix-store.pc, $(libdir)/pkgconfig, 0644))
|
|
||||||
|
|
||||||
$(foreach i, $(wildcard src/libstore/builtins/*.hh), \
|
|
||||||
diff --git a/src/libstore/nix-store.pc.in b/src/libstore/nix-store.pc.in
|
|
||||||
index 6d67b1e0380..738991d307b 100644
|
|
||||||
--- a/src/libstore/nix-store.pc.in
|
|
||||||
+++ b/src/libstore/nix-store.pc.in
|
|
||||||
@@ -7,3 +7,4 @@ Description: Nix Package Manager
|
|
||||||
Version: @PACKAGE_VERSION@
|
|
||||||
Libs: -L${libdir} -lnixstore -lnixutil
|
|
||||||
Cflags: -I${includedir}/nix -std=c++2a
|
|
||||||
+@LIBS_PRIVATE@
|
|
58
package.nix
58
package.nix
|
@ -1,58 +0,0 @@
|
||||||
{ lib
|
|
||||||
, stdenv
|
|
||||||
, rustPlatform
|
|
||||||
, pkg-config
|
|
||||||
, installShellFiles
|
|
||||||
, nix
|
|
||||||
, boost
|
|
||||||
, darwin
|
|
||||||
, rust-analyzer
|
|
||||||
, clippy
|
|
||||||
, rustfmt
|
|
||||||
}:
|
|
||||||
|
|
||||||
let
|
|
||||||
ignoredPaths = [ ".github" "target" "book" ];
|
|
||||||
version = (builtins.fromTOML (builtins.readFile ./magic-nix-cache/Cargo.toml)).package.version;
|
|
||||||
in
|
|
||||||
rustPlatform.buildRustPackage rec {
|
|
||||||
pname = "magic-nix-cache";
|
|
||||||
inherit version;
|
|
||||||
|
|
||||||
src = lib.cleanSourceWith {
|
|
||||||
filter = name: type: !(type == "directory" && builtins.elem (baseNameOf name) ignoredPaths);
|
|
||||||
src = lib.cleanSource ./.;
|
|
||||||
};
|
|
||||||
|
|
||||||
nativeBuildInputs = [
|
|
||||||
pkg-config
|
|
||||||
installShellFiles
|
|
||||||
rust-analyzer
|
|
||||||
clippy
|
|
||||||
rustfmt
|
|
||||||
];
|
|
||||||
|
|
||||||
buildInputs = [
|
|
||||||
nix
|
|
||||||
boost
|
|
||||||
] ++ lib.optionals stdenv.isDarwin (with darwin.apple_sdk.frameworks; [
|
|
||||||
SystemConfiguration
|
|
||||||
]);
|
|
||||||
|
|
||||||
cargoLock = {
|
|
||||||
lockFile = ./Cargo.lock;
|
|
||||||
allowBuiltinFetchGit = true;
|
|
||||||
};
|
|
||||||
|
|
||||||
ATTIC_DISTRIBUTOR = "attic";
|
|
||||||
|
|
||||||
# Hack to fix linking on macOS.
|
|
||||||
NIX_CFLAGS_LINK = lib.optionalString stdenv.isDarwin "-lc++abi";
|
|
||||||
|
|
||||||
# Recursive Nix is not stable yet
|
|
||||||
doCheck = false;
|
|
||||||
|
|
||||||
postFixup = ''
|
|
||||||
rm -f $out/nix-support/propagated-build-inputs
|
|
||||||
'';
|
|
||||||
}
|
|
Loading…
Reference in a new issue