Compare commits

...

347 commits
v0.1.2 ... main

Author SHA1 Message Date
Luc Perkins d35e6e72df
Merge pull request #137 from DeterminateSystems/dependabot/cargo/crossbeam-channel-0.5.15
build(deps): bump crossbeam-channel from 0.5.14 to 0.5.15
2025-04-10 12:13:17 -03:00
dependabot[bot] 6fc832cb76
build(deps): bump crossbeam-channel from 0.5.14 to 0.5.15
Bumps [crossbeam-channel](https://github.com/crossbeam-rs/crossbeam) from 0.5.14 to 0.5.15.
- [Release notes](https://github.com/crossbeam-rs/crossbeam/releases)
- [Changelog](https://github.com/crossbeam-rs/crossbeam/blob/master/CHANGELOG.md)
- [Commits](https://github.com/crossbeam-rs/crossbeam/compare/crossbeam-channel-0.5.14...crossbeam-channel-0.5.15)

---
updated-dependencies:
- dependency-name: crossbeam-channel
  dependency-version: 0.5.15
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-04-10 14:45:15 +00:00
Luc Perkins 78a56de86a
Merge pull request #136 from DeterminateSystems/dependabot/cargo/tokio-1.44.2
build(deps): bump tokio from 1.44.1 to 1.44.2
2025-04-08 16:20:27 -03:00
dependabot[bot] 9fdc760dcb
build(deps): bump tokio from 1.44.1 to 1.44.2
Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.44.1 to 1.44.2.
- [Release notes](https://github.com/tokio-rs/tokio/releases)
- [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.44.1...tokio-1.44.2)

---
updated-dependencies:
- dependency-name: tokio
  dependency-version: 1.44.2
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-04-08 02:10:21 +00:00
Graham Christensen 90e95ab197
Merge pull request #134 from DeterminateSystems/update-deps
Update dependencies
2025-04-02 11:55:00 -04:00
Graham Christensen 9f88cc4842 Update dependencies 2025-04-02 11:26:06 -04:00
Luc Perkins 942b6b0ffe
Merge pull request #129 from DeterminateSystems/update-flake-lock
Update flake.lock and add update-flake-lock support
2025-03-26 16:37:30 -03:00
Luc Perkins 80600ec316
Merge remote-tracking branch 'origin/main' into update-flake-lock 2025-03-26 10:48:45 -03:00
Luc Perkins 582930b2fc
Merge pull request #128 from DeterminateSystems/flakehub-cache-action
Switch to flakehub-cache-action
2025-03-26 10:48:17 -03:00
Luc Perkins b29c2cafae
Merge remote-tracking branch 'origin/main' into flakehub-cache-action 2025-03-26 10:07:45 -03:00
Luc Perkins 0dd0d2d0a6
Update flake.lock 2025-03-26 10:07:39 -03:00
Luc Perkins 5a689bfeb3
Fix merge conflicts with main 2025-03-25 11:45:05 -03:00
Cole Mickens e9600149c7
Merge pull request #127 from DeterminateSystems/colemickens/no-auto-create-users-via-api
write github actions error when user is unauthenticated
2025-03-25 06:22:54 -07:00
Cole Mickens 9bdbfd97f2 flake.lock: Update
Flake lock file updates:

• Updated input 'crane':
    'github:ipetkov/crane/19de14aaeb869287647d9461cbd389187d8ecdb7?narHash=sha256-x4syUjNUuRblR07nDPeLDP7DpphaBVbUaSoeZkFbGSk%3D' (2025-02-19)
  → 'github:ipetkov/crane/70947c1908108c0c551ddfd73d4f750ff2ea67cd?narHash=sha256-vVOAp9ahvnU%2BfQoKd4SEXB2JG2wbENkpqcwlkIXgUC0%3D' (2025-03-19)
• Updated input 'nix':
    'https://api.flakehub.com/f/pinned/NixOS/nix/2.26.2/0194fbd7-e2ec-7193-93a9-05ae757e79a1/source.tar.gz?narHash=sha256-EOnBPe%2BydQ0/P5ZyWnFekvpyUxMcmh2rnP9yNFi/EqU%3D' (2025-02-12)
  → 'https://api.flakehub.com/f/pinned/NixOS/nix/2.27.1/0195c8c5-1964-7a31-b025-ebf9bfeef991/source.tar.gz?narHash=sha256-rBPulEBpn4IiqkPsetuh7BRzT2iGCzZYnogTAsbrvhU%3D' (2025-03-24)
• Updated input 'nixpkgs':
    'https://api.flakehub.com/f/pinned/NixOS/nixpkgs/0.1.755230%2Brev-73cf49b8ad837ade2de76f87eb53fc85ed5d4680/01951ca9-35fa-70f2-b972-630b0cd93c65/source.tar.gz?narHash=sha256-EO1ygNKZlsAC9avfcwHkKGMsmipUk1Uc0TbrEZpkn64%3D' (2025-02-18)
  → 'https://api.flakehub.com/f/pinned/NixOS/nixpkgs/0.1.770807%2Brev-a84ebe20c6bc2ecbcfb000a50776219f48d134cc/0195b626-8c1d-7fb9-9282-563af3d37ab9/source.tar.gz?narHash=sha256-mNqIplmEohk5jRkqYqG19GA8MbQ/D4gQSK0Mu4LvfRQ%3D' (2025-03-19)
2025-03-25 06:06:08 -07:00
Cole Mickens d347386c3f write github actions error when user is unauthenticated 2025-03-25 06:06:08 -07:00
Luc Perkins a774f04dfb
Switch crane input to FlakeHub 2025-03-24 20:25:46 -03:00
Luc Perkins 7ed9fc9cbb
Update flake.lock and add update-flake-lock support 2025-03-24 20:21:34 -03:00
Luc Perkins 8fa4c519ce
Switch to flakehub-cache-action 2025-03-24 20:14:03 -03:00
Graham Christensen 3a905ca44d
Merge pull request #126 from DeterminateSystems/dependabot/cargo/ring-0.17.13
build(deps): bump ring from 0.17.9 to 0.17.13
2025-03-07 10:59:11 -08:00
dependabot[bot] 339b12a07b
build(deps): bump ring from 0.17.9 to 0.17.13
Bumps [ring](https://github.com/briansmith/ring) from 0.17.9 to 0.17.13.
- [Changelog](https://github.com/briansmith/ring/blob/main/RELEASES.md)
- [Commits](https://github.com/briansmith/ring/commits)

---
updated-dependencies:
- dependency-name: ring
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-07 17:14:54 +00:00
Graham Christensen c5e897a376
Merge pull request #124 from DeterminateSystems/updates
Updates to flakes and crates
2025-02-24 15:04:11 -05:00
Cole Helbling 1eeaf990c0
Merge pull request #125 from emilazy/push-qppkqwrwsowk
Fix build with Darwin and Nix 2.26 after updates
2025-02-24 11:27:00 -08:00
Emily 66c0f4bf8e nix.patch: remove
No longer used and confused me for a second.
2025-02-24 18:42:11 +00:00
Emily f64d3e92bf readme: remove mention of Musl
This confused me a bunch so it’ll probably confuse others!
2025-02-24 18:42:11 +00:00
Emily 5e805d85e8 ci: re‐enable flake checker fail mode
Looks like this should be fine now that Nixpkgs has been updated and
the static build isn’t used any more.
2025-02-24 18:42:11 +00:00
Emily 8780a7d721 flake: use the Nix flake in the development shell too 2025-02-24 18:42:11 +00:00
Emily 90e06e4287 cargo: bump attic for Nix 2.26 2025-02-24 18:42:11 +00:00
Emily f5504c8285 flake: fix Darwin build
None of this stuff is necessary these days. This will produce an
executable linking against a Nix store `libiconv`, but as the build
isn’t static to begin with that should be fine. If a static build
is required in the future, `pkgsStatic` can be used as it is in the
`nix-installer` flake.
2025-02-19 22:25:16 +00:00
Emily a5c0301ee8 flake: use stock Nixpkgs Rust toolchain
This makes it easier to use Nixpkgs’ cross‐compilation machinery
and simplifies the flake.
2025-02-19 22:25:16 +00:00
Emily 92b7440174 flake: update crane 2025-02-19 22:24:35 +00:00
Emily ee9c0b9fa5 flake: drop flake-compat input
Not used by anything.
2025-02-19 22:24:35 +00:00
Emily a0571cc895 flake: remove default from the overlay
This belongs in `packages` instead (where a duplicate is already
present anyway).
2025-02-19 20:07:15 +00:00
Cole Helbling 260ce740fc
Update flake.nix 2025-02-19 10:32:14 -08:00
Graham Christensen 22d923e664 ...flake.nix too 2025-02-19 11:48:48 -05:00
Graham Christensen 214e869fee flake.lock: Update
Flake lock file updates:
2025-02-19 11:48:22 -05:00
Graham Christensen 36897bff90 Cargo update 2025-02-19 11:45:06 -05:00
Graham Christensen f3fe26e7c0 Swift from a nix overlay to the nix input's default package, update flakes 2025-02-19 11:43:40 -05:00
Graham Christensen 61233c3bb5 flake.lock: Update
Flake lock file updates:

• Updated input 'nix':
    'https://api.flakehub.com/f/pinned/NixOS/nix/2.22.1/018f61d9-3f9a-7ccf-9bfc-174e3a17ab38/source.tar.gz?narHash=sha256-5Q1WkpTWH7fkVfYhHDc5r0A%2BVc%2BK5xB1UhzrLzBCrB8%3D' (2024-05-09)
  → 'https://api.flakehub.com/f/pinned/NixOS/nix/2.26.2/0194fbd7-e2ec-7193-93a9-05ae757e79a1/source.tar.gz?narHash=sha256-EOnBPe%2BydQ0/P5ZyWnFekvpyUxMcmh2rnP9yNFi/EqU%3D' (2025-02-12)
• Updated input 'nix/flake-compat':
    'github:edolstra/flake-compat/35bb57c0c8d8b62bbfd284272c928ceb64ddbde9?narHash=sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm%2B504Ch3sNKLd8%3D' (2023-01-17)
  → 'github:edolstra/flake-compat/ff81ac966bb2cae68946d5ed5fc4994f96d0ffec?narHash=sha256-NeCCThCEP3eCl2l/%2B27kNNK7QrwZB1IJCrXfrbv5oqU%3D' (2024-12-04)
• Updated input 'nix/flake-parts':
    'github:hercules-ci/flake-parts/9126214d0a59633752a136528f5f3b9aa8565b7d?narHash=sha256-sB4SWl2lX95bExY2gMFG5HIzvva5AVMJd4Igm%2BGpZNw%3D' (2024-04-01)
  → 'github:hercules-ci/flake-parts/205b12d8b7cd4802fbcb8e8ef6a0f1408781a4f9?narHash=sha256-4pDvzqnegAfRkPwO3wmwBhVi/Sye1mzps0zHWYnP88c%3D' (2024-12-04)
• Added input 'nix/git-hooks-nix':
    'github:cachix/git-hooks.nix/aa9f40c906904ebd83da78e7f328cd8aeaeae785?narHash=sha256-NdaCraHPp8iYMWzdXAt5Nv6sA3MUzlCiGiR586TCwo0%3D' (2024-12-15)
• Added input 'nix/git-hooks-nix/flake-compat':
    follows 'nix'
• Added input 'nix/git-hooks-nix/gitignore':
    follows 'nix'
• Added input 'nix/git-hooks-nix/nixpkgs':
    follows 'nix/nixpkgs'
• Added input 'nix/git-hooks-nix/nixpkgs-stable':
    follows 'nix/nixpkgs'
• Removed input 'nix/libgit2'
• Updated input 'nix/nixpkgs':
    'github:NixOS/nixpkgs/b550fe4b4776908ac2a861124307045f8e717c8e?narHash=sha256-7kkJQd4rZ%2BvFrzWu8sTRtta5D1kBG0LSRYAfhtmMlSo%3D' (2024-02-28)
  → 'github:NixOS/nixpkgs/48d12d5e70ee91fe8481378e540433a7303dbf6a?narHash=sha256-1Noao/H%2BN8nFB4Beoy8fgwrcOQLVm9o4zKW1ODaqK9E%3D' (2024-12-16)
• Added input 'nix/nixpkgs-23-11':
    'github:NixOS/nixpkgs/a62e6edd6d5e1fa0329b8653c801147986f8d446?narHash=sha256-oamiKNfr2MS6yH64rUn99mIZjc45nGJlj9eGth/3Xuw%3D' (2024-05-31)
• Removed input 'nix/pre-commit-hooks'
• Removed input 'nix/pre-commit-hooks/flake-compat'
• Removed input 'nix/pre-commit-hooks/flake-utils'
• Removed input 'nix/pre-commit-hooks/gitignore'
• Removed input 'nix/pre-commit-hooks/nixpkgs'
• Removed input 'nix/pre-commit-hooks/nixpkgs-stable'
2025-02-19 11:43:22 -05:00
Graham Christensen 1cd5d316da flake.lock: Update
Flake lock file updates:

• Updated input 'fenix':
    'https://api.flakehub.com/f/pinned/nix-community/fenix/0.1.1955%2Brev-60ab4a085ef6ee40f2ef7921ca4061084dd8cf26/01910d03-2462-7e48-b72e-439d1152bd11/source.tar.gz?narHash=sha256-l7/yMehbrL5d4AI8E2hKtNlT50BlUAau4EKTgPg9KcY%3D' (2024-08-01)
  → 'https://api.flakehub.com/f/pinned/nix-community/fenix/0.1.2156%2Brev-de3ea31eb651b663449361f77d9c1e8835290470/0194c095-0041-7b9c-b19e-cf1c4a2adaad/source.tar.gz?narHash=sha256-TC3xA%2B%2BKgprECm/WPsLUd%2Ba77MObZPElCW6eAsjVW1k%3D' (2025-02-01)
• Updated input 'fenix/rust-analyzer-src':
    'github:rust-lang/rust-analyzer/c8e41d95061543715b30880932ec3dc24c42d7ae?narHash=sha256-1na4m2PNH99syz2g/WQ%2BHr3RfY7k4H8NBnmkr5dFDXw%3D' (2024-07-31)
  → 'github:rust-lang/rust-analyzer/3c2aca1e5e9fbabb4e05fc4baa62e807aadc476a?narHash=sha256-1zhfA5NBqin0Z79Se85juvqQteq7uClJMEb7l2pdDUY%3D' (2025-01-30)
• Updated input 'flake-compat':
    'https://api.flakehub.com/f/pinned/edolstra/flake-compat/1.0.1/018afb31-abd1-7bff-a5e4-cff7e18efb7a/source.tar.gz?narHash=sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U%3D' (2023-10-04)
  → 'https://api.flakehub.com/f/pinned/edolstra/flake-compat/1.1.0/01948eb7-9cba-704f-bbf3-3fa956735b52/source.tar.gz?narHash=sha256-NeCCThCEP3eCl2l/%2B27kNNK7QrwZB1IJCrXfrbv5oqU%3D' (2024-12-04)
• Updated input 'nixpkgs':
    'https://api.flakehub.com/f/pinned/NixOS/nixpkgs/0.2311.558675%2Brev-9d29cd266cebf80234c98dd0b87256b6be0af44e/018fb680-a725-7c9d-825e-aadb0901263e/source.tar.gz?narHash=sha256-xim1b5/HZYbWaZKyI7cn9TJCM6ewNVZnesRr00mXeS4%3D' (2024-05-25)
  → 'https://api.flakehub.com/f/pinned/NixOS/nixpkgs/0.1.755230%2Brev-73cf49b8ad837ade2de76f87eb53fc85ed5d4680/01951ca9-35fa-70f2-b972-630b0cd93c65/source.tar.gz?narHash=sha256-EO1ygNKZlsAC9avfcwHkKGMsmipUk1Uc0TbrEZpkn64%3D' (2025-02-18)
2025-02-19 11:37:25 -05:00
Cole Helbling c12f7ed42f
Merge pull request #121 from maxheld83/patch-1
notify users about EOL of magic nix cache
2025-02-18 12:49:59 -08:00
Cole Helbling 7099e801ae fixup: README link 2025-02-18 12:05:25 -08:00
Cole Helbling b1a16d5ac2
Merge pull request #122 from DeterminateSystems/fh-583-shorten-file-changed-check
Check if token changed every 3 seconds instead
2025-02-10 07:43:16 -08:00
Cole Helbling 1fad1473c5 Check if token changed every 3 seconds instead 2025-02-10 07:28:34 -08:00
Max Held 8492a69263
notify users about EOL
closes https://github.com/DeterminateSystems/magic-nix-cache/issues/119
2025-01-29 18:44:28 +01:00
Graham Christensen a65ff39edd
Merge pull request #118 from DeterminateSystems/grahamc-patch-2
drop 429 notice
2025-01-16 18:48:03 -05:00
Graham Christensen ef4bd6fb91
drop 429 notice 2025-01-16 18:34:39 -05:00
Cole Helbling 4d32a8ef4e
Merge pull request #117 from DeterminateSystems/fixup-ci
fixup: bump forgotten download-artifact action version
2025-01-16 09:24:02 -08:00
Cole Helbling e8e38ad4fc fixup: bump forgotten download-artifact action version 2025-01-16 09:10:05 -08:00
Cole Helbling e70bb1e416
Merge pull request #116 from DeterminateSystems/cole/fh-543
Suggest FlakeHub Cache when hit by 429
2025-01-16 09:00:06 -08:00
Graham Christensen 4e2b37be36 Set a metric field for when GHA 429's 2025-01-16 08:46:41 -08:00
Cole Helbling 322a99d45e fixup: update upload-artifact, download-artifact action versions 2025-01-16 08:46:41 -08:00
Cole Helbling 003f106338
Update GHA 429 notice wording
Co-authored-by: Graham Christensen <graham@grahamc.com>
2025-01-16 07:32:20 -08:00
Cole Helbling b9f89bd546 Suggest FlakeHub Cache when hit by 429 2025-01-14 13:56:17 -08:00
Cole Helbling 215dc0d8e9
Merge pull request #114 from DeterminateSystems/cole/fh-485
Fixup auth when using determinate-nixd with long-running builds
2024-12-04 13:24:01 -08:00
Cole Helbling 2bac50c0ca Move "workaround" notes closer to the workaround 2024-12-04 13:11:19 -08:00
Cole Helbling 5e7acea3d1 Fixup auth when using determinate-nixd with long-running builds 2024-12-04 13:11:19 -08:00
Cole Helbling 11b78639f1 Cargo.lock: update attic dependencies 2024-12-04 12:44:32 -08:00
Cole Helbling 296e9dc1af
Merge pull request #111 from DeterminateSystems/grahamc-patch-2
Use magic-nix-cache-action@main, oops
2024-11-08 09:48:20 -08:00
Graham Christensen f27f314206
Use magic-nix-cache-action@main, oops 2024-11-08 12:29:41 -05:00
Graham Christensen 448d84e32f
Merge pull request #110 from DeterminateSystems/graham/fh-433-magic-nix-cache-should-disable-github-actions-cache-if
Graham/fh 433 magic nix cache should disable GitHub actions cache when flakehub cache is enabled
2024-11-06 12:05:23 -05:00
Graham Christensen d1983bbdff Don't try to use the netrc if itdoesn't exist 2024-11-06 09:47:44 -05:00
Graham Christensen a68e1c4d54 Test the patched action 2024-11-05 22:27:17 -05:00
Graham Christensen bf844027bc Turn off the GitHub actions cache if the user expresses no preference, and flakehub cache is in use 2024-11-05 21:22:38 -05:00
Graham Christensen 65060bc705 Switch the GHA Caceh preference to a trinary, but treat it as a straight bool for the moment 2024-11-05 21:19:18 -05:00
Graham Christensen 3fd6eeb208 Make the FlakeHubArg a generic Trinary so we can use it for GHA Cache too 2024-11-05 21:11:26 -05:00
Graham Christensen cf183317a5
Merge pull request #109 from DeterminateSystems/colemickens/shutdown
shutdown: wait for flakehub_cache first
2024-11-05 19:45:48 -05:00
Graham Christensen 647b207575
Update magic-nix-cache/src/api.rs 2024-11-05 19:33:11 -05:00
Cole Mickens 625d7717b6 flakehub logging review comments 2024-11-05 15:08:00 -08:00
Graham Christensen 925be77ec2
Merge pull request #108 from mightyiam/bump-checkout-action
Bump GitHub action actions/checkout from v3 to v4
2024-11-05 18:02:55 -05:00
Cole Mickens 7841b8bbe2 flakehub cache init failure is an error 2024-11-05 15:00:56 -08:00
Cole Mickens a54a97ff9b shutdown: info! print paths after FHC upload 2024-11-05 15:00:56 -08:00
Cole Mickens 9f7a4abc4d shutdown: info! if we don't have flakehub_state at workflow_finish 2024-11-05 14:24:56 -08:00
Cole Mickens 799a0c42e6 shutdown: wait for flakehub_cache first 2024-11-05 13:55:36 -08:00
Shahar "Dawn" Or 65899a5ad5 Bump GitHub action actions/checkout from v3 to v4
The breaking change seems to be something about the default Node.js
runtime: https://github.com/actions/checkout/blob/main/CHANGELOG.md#v400

Also in example in documentation, for the sake of the copy-pasting user
(me).
2024-11-05 21:48:35 +07:00
Graham Christensen 6a5abbf3bb
Merge pull request #105 from cfsnyder/main
Fix compatibility issues with alternative GHA cache implementation
2024-09-25 15:17:09 -04:00
Graham Christensen 24af143b67
Don't fail if flakehub cache wasn't requested and its requirements weren't present (#107)
* Treat the use_flakehub flag as an enum to avoid boolean blindness

* Make the match statement around flakehub cache considerate of users who did not opt in to it

* Update magic-nix-cache/src/main.rs
2024-09-25 19:16:43 +00:00
Cory Snyder 4f25f7b3e6 Fix compatibility issues with alternative GHA cache implementation
Fixes two compatibility issues with the alternative GHA cache server
implementation:

https://github.com/falcondev-oss/github-actions-cache-server

1. This implementation does not support redundant forward slashes
   in URL paths. The change allows magic-nix-cache to work properly
   regardless of whether ACTIONS_CACHE_URL ends in a forward slash or
   not.
2. The cache IDs returned by this implementation can be too big for
   an i32, so the representation of the CacheID type has been updated
   to an i64.

Signed-off-by: Cory Snyder <csnyder@1111systems.com>
2024-09-20 05:12:26 -04:00
Graham Christensen 955ed68d34
Merge pull request #104 from DeterminateSystems/use-14-large
Update our intel macs to 14-large
2024-09-17 17:46:15 -04:00
Graham Christensen 7c6bd9387c
Merge pull request #103 from DeterminateSystems/fh-cache-under-determinate
Drop the assertion around the netrc under dnixd
2024-09-17 17:45:25 -04:00
Graham Christensen 6acb043852 Update our intel macs to 14-large 2024-09-17 17:28:15 -04:00
Graham Christensen 04af54090e Notify the user with an info if we're ignoring their netrc 2024-09-17 17:23:44 -04:00
Graham Christensen bc76dfa4df Clean up the netrc handling when dnixd is around 2024-09-17 17:17:11 -04:00
Cole Mickens 5b126b691b
Merge pull request #102 from DeterminateSystems/colemickens/fallback
nix.conf: move write for 'fallback', always set it
2024-09-16 12:22:57 -05:00
Cole Mickens 2bcd86656f nix.conf: move write for 'fallback', always set it 2024-09-16 10:08:16 -07:00
Cole Helbling f13fa9e9f3
Merge pull request #98 from DeterminateSystems/dependabot/cargo/quinn-proto-0.11.8
Bump quinn-proto from 0.11.6 to 0.11.8
2024-09-04 08:25:06 -07:00
dependabot[bot] 7894df9177
Bump quinn-proto from 0.11.6 to 0.11.8
Bumps [quinn-proto](https://github.com/quinn-rs/quinn) from 0.11.6 to 0.11.8.
- [Release notes](https://github.com/quinn-rs/quinn/releases)
- [Commits](https://github.com/quinn-rs/quinn/compare/quinn-proto-0.11.6...quinn-proto-0.11.8)

---
updated-dependencies:
- dependency-name: quinn-proto
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-09-03 20:51:01 +00:00
Graham Christensen 949fc954a5
Merge pull request #95 from DeterminateSystems/colemickens/defaults
cli: add defaults for flakehub endpoints, nix.conf path
2024-08-30 14:12:31 -04:00
Graham Christensen b499ff2a0f
Merge pull request #96 from DeterminateSystems/grahamc/defaults
What if we used clap's defaults support?
2024-08-30 14:02:39 -04:00
Graham Christensen 3ca2a4bf5b Reimplement using defaults 2024-08-30 13:32:11 -04:00
Cole Mickens 979ad69132 cli: add defaults for flakehub endpoints, nix.conf path 2024-08-30 09:16:53 -07:00
Graham Christensen b5a094c7a2
Merge pull request #89 from DeterminateSystems/colemickens/mnc-netrc-uds
netrc: fixup handling when using dnixd/uds
2024-08-29 23:32:22 -04:00
Graham Christensen 91eef4416e
Merge pull request #91 from DeterminateSystems/why-io-error
Why io error
2024-08-29 23:31:13 -04:00
Graham Christensen c1924ba94a
Merge pull request #92 from DeterminateSystems/colemickens/startup-noti-file
startup notification: create_dir_all parent dir, add context for io ops
2024-08-29 22:52:46 -04:00
Graham Christensen b1ec181a0a
Merge pull request #93 from DeterminateSystems/colemickens/unwraps-to-expect
convert unwraps to expects
2024-08-29 22:42:14 -04:00
Cole Mickens fa02e9ad6f convert unwraps to expects 2024-08-29 14:52:02 -07:00
Cole Mickens 21a9552b0b startup notification: create_dir_all parent dir, add context for io ops 2024-08-29 14:35:21 -07:00
Graham Christensen f0b1e69100 A couple more contexts in the post-build-hook 2024-08-29 16:46:20 -04:00
Graham Christensen e52d545126 Convert the other Io error type to have a string of context 2024-08-29 16:40:21 -04:00
Graham Christensen edb35e8b94 Convert the Io error type to have a string of context 2024-08-29 16:37:49 -04:00
Cole Mickens 11544ed9eb netrc: cli arg is truly optional, defaults to UDS path 2024-08-29 09:34:52 -07:00
Cole Mickens a4866b9dcd
Merge pull request #88 from DeterminateSystems/colemickens/mnc-errors
pbh: a bit extra logging when fail to subscribe to /events
2024-08-29 11:32:31 -05:00
Cole Mickens 08c8cf0275 netrc: w/ dnixd, don't update netrc, require it to be right path 2024-08-29 09:26:09 -07:00
Cole Mickens d9d748267f pbh: a bit extra logging when fail to subscribe to /events 2024-08-29 08:44:54 -07:00
Cole Mickens 48ec31e71d
Merge pull request #87 from DeterminateSystems/colemickens/dnixd-netrc
avoid touching netrc-file when dnixd is available
2024-08-28 15:25:57 -05:00
Cole Mickens 9f46b60a8c avoid touching netrc-file when dnixd is available 2024-08-28 11:29:37 -07:00
Cole Mickens deeb8d1d79
Merge pull request #83 from DeterminateSystems/colemickens/subscribe-uds-built-paths
subscribe to determinate-nixd's uds socket and built-paths endpoint when available
2024-08-15 16:41:31 -05:00
Graham Christensen 7c6300cfdc
Update magic-nix-cache/src/main.rs 2024-08-13 23:39:13 -04:00
Cole Mickens c41207df35 built-paths: feedback around error handling for store path collection 2024-08-13 12:20:11 -07:00
Cole Mickens a6daff9a65 built-paths: switch to using /events from /built-paths 2024-08-12 18:44:15 -07:00
Cole Mickens 685fe75327 built-store-paths: mnc can reconnect in dnixd restarts 2024-08-12 12:35:48 -07:00
Cole Mickens 3a001d12e5 built-store-paths: finish pbh code rearrangement 2024-08-12 12:02:42 -07:00
Cole Mickens 0f476bd775 factor 'legacy' pbh out into separate file 2024-08-09 14:07:48 -07:00
Cole Mickens 10cbd94f3c flake.nix: plz obiwan rust-analyzer, save me 2024-08-09 13:58:31 -07:00
Cole Mickens 594748fe30 built-paths subscription: better heuristic for sipping event frames 2024-08-09 11:57:27 -07:00
Cole Mickens c0b8f7b57b hack: stip data frame prefix 2024-08-09 11:55:52 -07:00
Cole Mickens f82811cc9c hack: skip default keep-alive 2024-08-09 11:55:52 -07:00
Cole Mickens 57eb3e75c0 uds subscription: enqueue_paths 2024-08-09 11:55:52 -07:00
Cole Mickens e5d5118022 uds subscription wip 2024-08-09 11:55:52 -07:00
Cole Mickens 2506ee0164
Merge pull request #82 from DeterminateSystems/colemickens/axum-0.7
tree: upgrade axum, hyper, etc
2024-08-09 11:46:40 -07:00
Cole Mickens ef40bb2caf Merge pull request #86 from DeterminateSystems/crane-fenix
Crane fenix
2024-08-09 11:25:12 -07:00
Cole Mickens cc01b81323 tree: upgrade axum: respond to cole-h feedback 2024-08-09 11:25:12 -07:00
Cole Helbling 2ba4cb13aa Cleanup old package files 2024-08-09 11:12:04 -07:00
Cole Helbling c08b262d2a Use fenix and crane 2024-08-09 11:12:04 -07:00
Cole Helbling 1278a7d98a fixup: time compilation against new Rust 2024-08-09 11:01:33 -07:00
Cole Helbling 9ac4ce7953 Revert "Update rust-overlays"
This reverts commit 4ea576ab60.
2024-08-09 11:01:33 -07:00
Cole Helbling d0a51e7820 Revert "Switch to naersk"
This reverts commit 902b81a064.
2024-08-09 11:01:33 -07:00
Graham Christensen 1f386d2aac
Merge pull request #85 from DeterminateSystems/grahamc/colemickens/axum-0.7
Grahamc/colemickens/axum 0.7
2024-08-09 13:52:04 -04:00
Graham Christensen 902b81a064 Switch to naersk 2024-08-09 13:05:55 -04:00
Graham Christensen 4ea576ab60 Update rust-overlays 2024-08-08 21:09:33 -04:00
Cole Mickens 4c0a2510c1 tree: upgrade axum, hyper, etc 2024-08-08 15:18:18 -07:00
Luc Perkins 97a583df58
Merge pull request #80 from DeterminateSystems/clarify-log-message
Provide more useful disabled message
2024-07-16 10:15:12 -07:00
Luc Perkins 45773e0d63
Revert flake.lock and disable fail mode for Flake Checker 2024-07-16 09:54:22 -07:00
Luc Perkins fce3fefac3
Update inputs 2024-07-15 23:56:31 -07:00
Luc Perkins 3cad69e374
Update flake.lock 2024-07-15 17:45:00 -07:00
Luc Perkins da04019f81
Provide more useful disabled message 2024-07-15 17:42:44 -07:00
Graham Christensen ef5c9ec6ef
Merge pull request #77 from DeterminateSystems/429
Back off on 429
2024-06-13 13:24:09 -04:00
Graham Christensen 51bb9f972d Move the tripping to a separate impl on the atomicbool 2024-06-13 13:07:00 -04:00
Graham Christensen 25359d9b17 nit on error text 2024-06-13 12:57:41 -04:00
Graham Christensen 6996f0029f Don't run the GHA exhaustion test generally 2024-06-12 21:53:11 -04:00
Graham Christensen 3c54557810 Shrink the chain, rebuild every time 2024-06-12 21:20:43 -04:00
Graham Christensen 3d21d10cae lol 2024-06-12 18:03:34 -04:00
Graham Christensen 1e5e79a3c8 lol 2024-06-12 17:55:39 -04:00
Graham Christensen 76db34a53f fixup for older rust 2024-06-12 16:59:38 -04:00
Graham Christensen 56d600d74b I hatethis 2024-06-12 16:33:58 -04:00
Graham Christensen 22f76db215 Catch 429s in more places 2024-06-12 16:30:27 -04:00
Graham Christensen 0ad1f17858 ? 2024-06-12 16:01:21 -04:00
Graham Christensen 805d2cfc15 fixup 2024-06-12 15:50:22 -04:00
Graham Christensen 4dd3242a14 cleanup 2024-06-12 15:46:52 -04:00
Graham Christensen 9c7b8e3fc9 Trip a circuit breaker when we get a 429 so we don't keep doing useless work 2024-06-12 15:44:26 -04:00
Graham Christensen 6742c6a85e Fixup dev shell on macos 2024-06-12 15:36:07 -04:00
Cole Helbling f9076a8afc
Merge pull request #76 from DeterminateSystems/cole/fh-315-m-n-c-better-tracing
Make source of IO errors more obvious
2024-06-06 09:04:48 -07:00
Cole Helbling 0a64d2c632 Make source of IO errors more obvious 2024-06-06 08:02:47 -07:00
Luc Perkins 06ffb16385
Merge pull request #71 from DeterminateSystems/flakehub-publish
Publish to FlakeHub
2024-05-28 18:45:16 -03:00
Luc Perkins b2d45ec3ed
Merge pull request #74 from DeterminateSystems/bump-nix-version
Bump Nix version in MNC closure
2024-05-28 18:15:56 -03:00
Luc Perkins 66c17fdc04
Bump Nix version in closure 2024-05-28 11:29:26 -03:00
Luc Perkins b440f0a0aa
Merge remote-tracking branch 'origin/main' into flakehub-publish 2024-05-26 17:24:16 -03:00
Luc Perkins 490776f268
Merge pull request #72 from DeterminateSystems/package-version
Tie Nix package version to Cargo.toml version
2024-05-24 13:15:40 -03:00
Luc Perkins 824e740fe8
Move version inference logic into package.nix 2024-05-24 12:59:43 -03:00
Luc Perkins bc92ad7f9f
Use Cargo.toml version in Nix package 2024-05-24 12:28:14 -03:00
Luc Perkins 5d8b7417db
Publish to FlakeHub 2024-05-24 11:50:24 -03:00
Luc Perkins 18f457e56e
Merge pull request #70 from DeterminateSystems/fix-strict-mode-input-name
Fix input name for CI-only strict mode
2024-05-22 18:38:50 -03:00
Luc Perkins 684efd3b98
Remove closure extraction step 2024-05-22 18:25:15 -03:00
Luc Perkins ec4b6cdab4
Switch to main branch of magic-nix-cache-action 2024-05-22 17:59:29 -03:00
Luc Perkins d7d82d6159
Fix input name for CI-only strict mode 2024-05-22 17:57:25 -03:00
Cole Helbling 545d3b7bac
Merge pull request #63 from DeterminateSystems/bring-back-store-diffing
Bring back store diffing
2024-05-22 13:10:02 -07:00
Cole Helbling 6a58908c6b Make store diffing optional 2024-05-22 09:27:04 -07:00
Cole Helbling 67647c9997 Don't skip -source when diffing store 2024-05-22 09:14:47 -07:00
Cole Helbling 5cc7e808dc Bring back store diffing 2024-05-22 09:14:47 -07:00
Cole Helbling 8477facf57
Merge pull request #60 from DeterminateSystems/log-and-cat-after-workflow-finished
Record and print tracing logs in debug mode
2024-05-22 07:58:35 -07:00
Cole Helbling 6f3c6309e4 Record and print tracing logs in debug mode 2024-05-21 12:43:01 -07:00
Luc Perkins 07b8fc311f
Merge pull request #68 from DeterminateSystems/integration-test 2024-05-20 11:00:06 -03:00
Luc Perkins 08033cd09a
Revert "Spawn daemon in separate process (this should fail in CI)"
This reverts commit f92c44ab59.
2024-05-20 08:11:08 -03:00
Luc Perkins 645dabfe82
Add nix build to test 2024-05-20 07:58:47 -03:00
Luc Perkins 23356ead97
Test Action in strict mode 2024-05-20 07:44:54 -03:00
Luc Perkins f92c44ab59
Spawn daemon in separate process (this should fail in CI) 2024-05-20 07:17:22 -03:00
Luc Perkins 49afb020c1
Build only when label is applied 2024-05-18 17:24:45 -03:00
Luc Perkins 66317827ea
Some final cleanup 2024-05-18 17:21:03 -03:00
Luc Perkins 03d4aa5f66
Fix naming issue 2024-05-18 17:07:04 -03:00
Luc Perkins 3708b7cec6
Fix download artifact declaration 2024-05-18 16:52:47 -03:00
Luc Perkins e02976750d
Remove unused env var 2024-05-18 16:47:37 -03:00
Luc Perkins 6eaa23c963
Rework integration test 2024-05-18 16:41:44 -03:00
Luc Perkins 867cfad681
Fix broken dev shell build 2024-05-18 16:23:35 -03:00
Luc Perkins 01e147381b
Streamline build logic 2024-05-18 16:21:53 -03:00
Luc Perkins cce0d218c8
Use matrix strategy 2024-05-17 17:36:46 -03:00
Luc Perkins e85ce91771
Extract closure in prior step 2024-05-17 17:28:37 -03:00
Luc Perkins 6f4ce1d570
Install Nix prior to test 2024-05-17 17:17:18 -03:00
Luc Perkins 1407ae42a2
Make sure build job runs first 2024-05-17 17:14:50 -03:00
Luc Perkins 5b98d04c9e
Test build of mnc using source-binary 2024-05-17 17:14:03 -03:00
Luc Perkins 7474dbd627
Use GITHUB_OUTPUT instead of GITHUB_STATE 2024-05-17 17:04:12 -03:00
Luc Perkins 7fc2455f30
Use GITHUB_STATE mechanism for state 2024-05-17 17:03:09 -03:00
Luc Perkins bb7e2fbfa3
Make nix.conf writable 2024-05-17 17:01:32 -03:00
Luc Perkins 986b5798dd
More setup steps 2024-05-17 16:59:39 -03:00
Luc Perkins d677f3a332
Add integration test workflow 2024-05-17 16:52:30 -03:00
Luc Perkins 3a1558438f
Merge pull request #66 from DeterminateSystems/remove-spawn
Don't run server in tokio::spawn
2024-05-17 14:58:46 -04:00
Luc Perkins 1bb6c86f5d
Remove unused import 2024-05-17 15:49:20 -03:00
Luc Perkins d1c5d5203b
Don't run server in tokio::spawn 2024-05-17 15:41:45 -03:00
Luc Perkins 2e05bd5fff
Merge pull request #65 from DeterminateSystems/file-based-notification 2024-05-17 14:01:15 -04:00
Luc Perkins 5da333f97b
Add missing pkg-config dependency to dev shell 2024-05-17 14:32:40 -03:00
Luc Perkins 736bd0c019
Delete notification file if server fails to start up 2024-05-17 14:22:01 -03:00
Luc Perkins d67f330397
Spawn daemon in separate process 2024-05-17 14:07:12 -03:00
Luc Perkins c0b7181ddc
Allow for both startup options to be None 2024-05-17 13:32:43 -03:00
Luc Perkins cfe5cb78c5
Add file-based notification mechanism 2024-05-17 13:22:58 -03:00
Luc Perkins 8f6369dd2a
Merge pull request #64 from DeterminateSystems/determine-env
Determine running environment
2024-05-16 17:07:48 -04:00
Luc Perkins 763508d326
Use matrix for build.yaml 2024-05-16 15:52:18 -03:00
Luc Perkins ab6bb9c47a
Restore info statement in GHA 2024-05-16 15:05:45 -03:00
Luc Perkins 1eb6003444
Derive Copy for Environment 2024-05-16 15:05:11 -03:00
Luc Perkins c1c6574b30
Check only for GITLAB_CI variable 2024-05-16 15:04:40 -03:00
Luc Perkins 1ee5b1eec8
Provide more ergonomic env var checking
Co-authored-by: Cole Helbling <cole.helbling@determinate.systems>
2024-05-16 15:04:04 -03:00
Luc Perkins 06fb14658c
Reformat use statement
Co-authored-by: Cole Helbling <cole.helbling@determinate.systems>
2024-05-16 15:02:28 -03:00
Luc Perkins a6e08a2a14
Remove info statement when not in GHA 2024-05-16 14:54:44 -03:00
Luc Perkins 8ad3089e93
Remove unnecessary determine func 2024-05-16 14:50:28 -03:00
Luc Perkins 41327e96b5
Address Clippy issues 2024-05-16 14:46:17 -03:00
Luc Perkins 136a3d43d6
Periodically fetch JWT only in GHA 2024-05-16 14:29:20 -03:00
Luc Perkins 90180e31ef
Add logic for determining environment 2024-05-16 13:53:46 -03:00
Cole Helbling a5ade67dac
Merge pull request #62 from DeterminateSystems/lookup-project-by-token
Make flakehub-flake-name truly optional
2024-05-09 14:44:36 -07:00
Cole Helbling 3d9bcd16a4 Make flakehub-flake-name truly optional 2024-05-09 14:29:14 -07:00
Eelco Dolstra 00e22a61b6
Merge pull request #61 from DeterminateSystems/grahamc-patch-1
Ignore post-build-hook errors
2024-05-06 18:28:50 +02:00
Graham Christensen 389a63ce68
Ignore post-build-hook errors 2024-05-06 12:00:24 -04:00
Graham Christensen 00fe42c282
Merge pull request #58 from DeterminateSystems/dependabot/cargo/rustls-0.21.11
Bump rustls from 0.21.10 to 0.21.11
2024-04-24 11:38:57 -04:00
dependabot[bot] d61face7fe
Bump rustls from 0.21.10 to 0.21.11
Bumps [rustls](https://github.com/rustls/rustls) from 0.21.10 to 0.21.11.
- [Release notes](https://github.com/rustls/rustls/releases)
- [Changelog](https://github.com/rustls/rustls/blob/main/CHANGELOG.md)
- [Commits](https://github.com/rustls/rustls/compare/v/0.21.10...v/0.21.11)

---
updated-dependencies:
- dependency-name: rustls
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-04-19 19:57:36 +00:00
Cole Helbling a12e8e1700
Merge pull request #57 from DeterminateSystems/cole/fh-268-magic-nix-cache-401s-when-build-takes-too-long
Refresh GitHub Actions JWT in the background
2024-04-19 10:44:09 -07:00
Cole Helbling 0434d467d3 Refresh GitHub Actions JWT in the background
GitHub Actions JWTs are only valid for 5 minutes after being issued.

FlakeHub uses these JWTs for authentication, which means that after
those 5 minutes have passed and the token is expired, FlakeHub (and
by extension FlakeHub Cache) will no longer allow requests using this
token.

However, GitHub gives us a way to repeatedly request new tokens, so we
utilize that and refresh the token every 2 minutes (less than half of
the lifetime of the token).
2024-04-19 08:32:45 -07:00
Graham Christensen a59a765f73
Merge pull request #54 from DeterminateSystems/fixup-tags-prs
Fix releasing tags and prs
2024-04-12 10:08:44 -04:00
Eelco Dolstra fd6db08ef0
Merge pull request #52 from DeterminateSystems/build-hook-in-store
Move the post-build hook script to the Nix store
2024-04-12 10:37:39 +02:00
Graham Christensen 930038182b Fix releasing tags and prs 2024-04-11 22:27:58 -04:00
Luc Perkins 415818d147
Merge pull request #48 from DeterminateSystems/fix-dev-shells
Fix dev shell upload to cache
2024-04-11 23:23:35 -03:00
Graham Christensen b64bf3f4e5
Merge pull request #53 from DeterminateSystems/correlation
Use detsys_correlation if it is set
2024-04-11 22:11:18 -04:00
Graham Christensen 2d747212b0 Use detsys_correlation if it is set 2024-04-11 20:15:52 -04:00
Eelco Dolstra 4d66c1f308 Move the post-build hook script to the Nix store
In self-hosted GHA runners on NixOS, the runner has a different /tmp
than the Nix daemon, so the daemon would get "file not found" trying
to execute the post-build hook. As a workaround, move the script to
the Nix store so we can be sure that the daemon can access it.
2024-04-11 18:10:56 +02:00
Graham Christensen 1cff8aeb19
Merge pull request #49 from DeterminateSystems/dependabot/cargo/h2-0.3.26
Bump h2 from 0.3.24 to 0.3.26
2024-04-10 14:00:46 -04:00
Graham Christensen b176ae218a
Merge pull request #51 from DeterminateSystems/dependabot/cargo/whoami-1.5.1
Bump whoami from 1.4.1 to 1.5.1
2024-04-10 14:00:30 -04:00
Graham Christensen bc6dc0cf6c
Merge pull request #50 from DeterminateSystems/dependabot/cargo/mio-0.8.11
Bump mio from 0.8.8 to 0.8.11
2024-04-10 13:26:03 -04:00
dependabot[bot] a27ea631cf
Bump whoami from 1.4.1 to 1.5.1
Bumps [whoami](https://github.com/ardaku/whoami) from 1.4.1 to 1.5.1.
- [Changelog](https://github.com/ardaku/whoami/blob/v1/CHANGELOG.md)
- [Commits](https://github.com/ardaku/whoami/compare/v1.4.1...v1.5.1)

---
updated-dependencies:
- dependency-name: whoami
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-04-10 15:59:51 +00:00
dependabot[bot] 252d4d424b
Bump mio from 0.8.8 to 0.8.11
Bumps [mio](https://github.com/tokio-rs/mio) from 0.8.8 to 0.8.11.
- [Release notes](https://github.com/tokio-rs/mio/releases)
- [Changelog](https://github.com/tokio-rs/mio/blob/master/CHANGELOG.md)
- [Commits](https://github.com/tokio-rs/mio/compare/v0.8.8...v0.8.11)

---
updated-dependencies:
- dependency-name: mio
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-04-10 15:59:41 +00:00
dependabot[bot] 030213d93c
Bump h2 from 0.3.24 to 0.3.26
Bumps [h2](https://github.com/hyperium/h2) from 0.3.24 to 0.3.26.
- [Release notes](https://github.com/hyperium/h2/releases)
- [Changelog](https://github.com/hyperium/h2/blob/v0.3.26/CHANGELOG.md)
- [Commits](https://github.com/hyperium/h2/compare/v0.3.24...v0.3.26)

---
updated-dependencies:
- dependency-name: h2
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-04-10 15:59:33 +00:00
Luc Perkins 974048fcd6
Fix Clippy issues 2024-03-29 17:19:44 -03:00
Luc Perkins 65d2adf419
Remove shells array from matrix 2024-03-29 16:27:34 -03:00
Luc Perkins 898456dffe
Fix dev shell upload to cache 2024-03-29 16:26:26 -03:00
Eelco Dolstra 45bb610359
Merge pull request #47 from DeterminateSystems/cache-dev-shells
Cache dev shell in FlakeHub Cache
2024-03-29 16:01:23 +01:00
Luc Perkins ffddafa4f4
Cache dev shell in FlakeHub Cache 2024-03-28 11:30:13 -03:00
Eelco Dolstra 1a898bd613
Merge pull request #46 from DeterminateSystems/fix-post-build-hook
Fix post-build hook
2024-03-27 16:58:40 +01:00
Eelco Dolstra 150468c70d Fix post-build hook
"exec" needs to come after the environment variables.
2024-03-27 16:32:14 +01:00
Graham Christensen 7bd6ea0e84
Merge pull request #45 from DeterminateSystems/nits
Migrate nix-installer-action@mnc to main
2024-03-13 09:51:46 -04:00
Graham Christensen b619262a4e nix-installer-action@mnc to main 2024-03-13 09:29:55 -04:00
Graham Christensen 5e55d037f8
Merge pull request #44 from DeterminateSystems/flakehub-cache
Flakehub cache
2024-03-11 19:54:20 -04:00
Graham Christensen 6efe2c73c3 Switch to mnc 2024-03-11 13:15:26 -04:00
Graham Christensen 0c2a3b5d4f Drop the special branch 2024-03-11 13:05:54 -04:00
Graham Christensen fa32a1bad7 ? 2024-03-11 12:07:15 -04:00
Graham Christensen 1aca72fd3a Update the readme to include permissions 2024-03-09 14:30:48 -05:00
Graham Christensen 98db0bfe86 Revert "cache branch builds"
This reverts commit 1a5941c243.
2024-03-09 14:12:22 -05:00
Graham Christensen 16f1b8683c Revert "Revert "Revert "Revert "Revert "arm64: pin back to the v3 action to resolve a mysterious build failure"""""
This reverts commit daf7cd422e.
2024-03-09 14:06:17 -05:00
Graham Christensen daf7cd422e Revert "Revert "Revert "Revert "arm64: pin back to the v3 action to resolve a mysterious build failure""""
This reverts commit a2db427eef.
2024-03-09 12:55:24 -05:00
Graham Christensen 570434e14b D'oh: publish the closure for arm64-linux 2024-03-09 12:55:05 -05:00
Graham Christensen a2db427eef Revert "Revert "Revert "arm64: pin back to the v3 action to resolve a mysterious build failure"""
This reverts commit 24e7ebc681.
2024-03-09 12:41:31 -05:00
Graham Christensen 1a5941c243 cache branch builds 2024-03-09 11:44:41 -05:00
Graham Christensen 24e7ebc681 Revert "Revert "arm64: pin back to the v3 action to resolve a mysterious build failure""
This reverts commit e612684c6b.
2024-03-09 11:43:53 -05:00
Graham Christensen e612684c6b Revert "arm64: pin back to the v3 action to resolve a mysterious build failure"
This reverts commit 687c480220.
2024-03-09 11:38:28 -05:00
Graham Christensen 687c480220 arm64: pin back to the v3 action to resolve a mysterious build failure 2024-03-09 11:01:22 -05:00
Graham Christensen 64de95e342 0.2.0 2024-03-09 10:51:56 -05:00
Graham Christensen d32427b6c8 Start caching to FHC, especiall mac builds 2024-03-09 10:50:39 -05:00
Graham Christensen 605aa5bc43 Update the readme, release-branches workflow 2024-03-08 09:41:54 -05:00
Graham Christensen ad963d4fe7
Apply suggestions from code review 2024-03-06 13:38:35 -05:00
Graham Christensen bd5e681cb3 add flakehub-cache to the release branches for testing 2024-03-06 12:31:51 -05:00
Graham Christensen e23f5398df fixup: drop -priv 2024-03-06 12:17:56 -05:00
Graham Christensen d0115f624f
Merge branch 'main' into flakehub-cache 2024-03-06 09:38:44 -05:00
Eelco Dolstra fbf0bbed94
Merge pull request #35 from Kiskae/pass-through-429
Pass through HTTP 429 to the nix daemon
2024-03-04 18:04:20 +01:00
Graham Christensen 31b9becad9 Merge remote-tracking branch 'origin/main' into HEAD 2024-03-04 11:44:59 -05:00
Graham Christensen 77af0493d2
Apply suggestions from code review 2024-03-04 11:38:31 -05:00
Eelco Dolstra b2a2acdecc
Merge pull request #21 from DeterminateSystems/eelcodolstra/fh-224-magic-nix-cache-use-post-build-hook-for-the-gha-cache-as
Use post-build hook to trigger GHA cache uploads
2024-03-01 18:20:50 +01:00
Cole Helbling 619a6346c0 Merge remote-tracking branch 'origin/eelcodolstra/fh-224-magic-nix-cache-use-post-build-hook-for-the-gha-cache-as' into merge-against-upstream 2024-03-01 08:40:20 -08:00
Eelco Dolstra 9bf26f0680 Invalidate negative narinfo cache entries 2024-03-01 11:55:59 +01:00
Eelco Dolstra b53876db25 Typo 2024-02-29 22:48:51 +01:00
Eelco Dolstra e99ef6ba61 Log upload 2024-02-29 22:31:07 +01:00
Eelco Dolstra 7a2d7ce296 Simplify dev shell 2024-02-29 21:42:35 +01:00
Eelco Dolstra f16e3c292a Make NAR upload streaming 2024-02-29 21:22:44 +01:00
Eelco Dolstra 334bcc7df9 Compress 2024-02-29 20:45:12 +01:00
Eelco Dolstra 625e95f484 Trigger GHA cache uploads from the post-build hook
Also, a worker task now does the uploads directly rather than having
magic-nix-cache invoke "nix copy" via HTTP to itself.
2024-02-29 16:31:29 +01:00
Eelco Dolstra a560959d65 README 2024-02-29 16:27:19 +01:00
Eelco Dolstra e5513406df Update to latest attic 2024-02-29 16:26:48 +01:00
Cole Helbling 14b3ed8242
Merge pull request #18 from DeterminateSystems/configure-flakehub-flake-name
Allow configuring the FlakeHub flake name
2024-02-28 03:44:18 -08:00
Cole Helbling 8ce3c6cafb fixup: Cargo.toml 2024-02-27 09:57:20 -08:00
Cole Helbling 531387f66f Allow configuring the FlakeHub flake name 2024-02-27 09:47:46 -08:00
Cole Helbling bf8c52586b Merge remote-tracking branch 'upstream/main' into merge-against-upstream 2024-02-27 08:51:49 -08:00
Cole Helbling 3b8363028d just hack in an unsafe way to turn a string into a cache name without validation 2024-02-27 08:49:21 -08:00
Cole Helbling 34956e86bb checkpoint 2024-02-27 08:30:35 -08:00
Eelco Dolstra 308fa515eb
Merge pull request #17 from DeterminateSystems/eelcodolstra/fh-218-clean-up-the-magic-nix-cache-priv-backend-to-be-published
Get rid of unwraps/expects
2024-02-26 17:02:17 +01:00
Graham Christensen f7e335a369
Merge pull request #43 from DeterminateSystems/privacy-policy
Correct privacy policy link
2024-02-25 16:45:31 -05:00
Graham Christensen 448ba42429
Correct privacy policy link 2024-02-25 15:25:09 -05:00
Eelco Dolstra 1f46e11aa7 Cleanup 2024-02-24 10:28:18 +01:00
Eelco Dolstra b41211dc24
Apply suggestions from code review
Co-authored-by: Cole Helbling <cole.helbling@determinate.systems>
2024-02-24 10:12:54 +01:00
Eelco Dolstra 5f981d2f91 Get rid of unwraps/expects
The remaining ones should be genuine "impossible" conditions.

main() now renders the error value using anyhow.
2024-02-23 19:45:38 +01:00
Eelco Dolstra 75b1450fdf
Merge pull request #16 from DeterminateSystems/http-startup-notification
Send startup notification via HTTP
2024-02-23 18:25:04 +01:00
Eelco Dolstra 0607f5efa4 Use reqwest::header::CONTENT_TYPE 2024-02-23 18:18:34 +01:00
Eelco Dolstra f6c21a9184 Send startup notification via HTTP 2024-02-23 15:34:22 +01:00
Eelco Dolstra 0537b74a1e
Merge pull request #15 from DeterminateSystems/hoverbear/fh-169-magic-nix-cache-priv-try-logging-in-to-flakehub-if-it-hasnt
Try logging into FlakeHub if nix-installer didn't do that for us
2024-02-13 22:47:39 +01:00
Ana Hobden 0e4f6af07b FlakeHub not Flakehub 2024-02-13 09:37:11 -08:00
Ana Hobden f1d5b7fdc5 Try logging into FlakeHub if nix-installer didn't do that for us 2024-02-12 14:17:20 -08:00
Eelco Dolstra ee9b236259
Merge pull request #14 from DeterminateSystems/eelcodolstra/fh-177
Fix support for multiple output derivations
2024-02-12 17:05:15 +01:00
Eelco Dolstra 7965e647fe Fix support for multiple output derivations
OUT_PATHS is split by spaces, not newlines.
2024-02-12 15:05:59 +01:00
Eelco Dolstra de2b44169c Don't crash if the client sends an invalid store path name 2024-02-12 14:52:35 +01:00
Cole Helbling 9e7cf4e775
Merge pull request #42 from DeterminateSystems/dependabot/cargo/h2-0.3.24
Bump h2 from 0.3.19 to 0.3.24
2024-01-31 08:09:53 -08:00
dependabot[bot] b9f40af341
Bump h2 from 0.3.19 to 0.3.24
Bumps [h2](https://github.com/hyperium/h2) from 0.3.19 to 0.3.24.
- [Release notes](https://github.com/hyperium/h2/releases)
- [Changelog](https://github.com/hyperium/h2/blob/v0.3.24/CHANGELOG.md)
- [Commits](https://github.com/hyperium/h2/compare/v0.3.19...v0.3.24)

---
updated-dependencies:
- dependency-name: h2
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-01-31 15:44:33 +00:00
Cole Helbling 1835ad1c08
Merge pull request #13 from DeterminateSystems/update-deps
Cargo.lock: update h2
2024-01-23 14:51:48 -08:00
Cole Helbling 2617132c3a Cargo.lock: update h2 2024-01-23 14:19:01 -08:00
Eelco Dolstra 53c8320588
Merge pull request #12 from DeterminateSystems/error-handling
Error handling improvements
2024-01-19 17:02:07 +01:00
Cole Helbling f61523931c Don't exit with an error if we can help it
Otherwise, the Nix build will fail.
2024-01-19 07:06:21 -08:00
Cole Helbling 04197c7742 Improve error handling if enqueuing paths failed 2024-01-18 14:27:58 -08:00
Cole Helbling 3cb4c6dbe0 Show full backtrace if build hook panics 2024-01-18 14:20:57 -08:00
Graham Christensen d9f6addd3f
Merge pull request #11 from DeterminateSystems/no-nixos-cache
Don't create the cache
2024-01-18 15:25:31 -05:00
Cole Helbling 28eca6460d Don't create the cache
attic-priv has functionality to getsert caches as they are requested.
2024-01-18 11:06:19 -08:00
Cole Helbling e8efa56401 flake: don't include cross in default devshell 2024-01-18 11:06:00 -08:00
Cole Helbling b08e97efea Don't rely on cache.nixos.org
Users can substitute from it the first time they encounter a path it
has, but otherwise they should re-substitute from us.
2024-01-18 07:38:51 -08:00
Eelco Dolstra ac64bcd221
Merge pull request #10 from DeterminateSystems/eelcodolstra/fh-159-magic-nix-cache-handle-flakehub-errors-more-gracefully
Improve error handling in FlakeHub cache setup
2024-01-11 11:02:41 +01:00
Eelco Dolstra ffccb9bd98 BadURL -> BadUrl 2024-01-10 23:34:15 +01:00
Eelco Dolstra 0d5e889783 Remove use of /cache/token to get a fallback cache
We rely on the GitHub token creating a project on FlakeHub as a side
effect.
2024-01-10 21:31:14 +01:00
Eelco Dolstra 0d9e0c088c Don't use /cache/token anymore except as a fallback 2024-01-10 20:48:48 +01:00
Eelco Dolstra 9781bb8b6e Improve error handling in FlakeHub cache setup
Also update the token endpoint.
2024-01-10 20:19:04 +01:00
Eelco Dolstra e4bb70dba3
Merge pull request #9 from DeterminateSystems/eelcodolstra/fh-158-magic-nix-cache-handle-post-build-hook-failure
Ignore errors from the post-build hook
2024-01-10 18:05:10 +01:00
Eelco Dolstra 6dd1146c24 Use exec 2024-01-10 17:46:55 +01:00
Eelco Dolstra c2542a8016 Ignore errors from the post-build hook
Also, pass the daemon port via the wrapper script rather than an
environment variable.
2024-01-10 16:55:22 +01:00
Eelco Dolstra 0619bb7af3 Shut up resolver warning 2024-01-10 16:27:53 +01:00
Eelco Dolstra a5caffb2e0
Merge pull request #8 from DeterminateSystems/export-closure
Upload Nix closure to S3
2024-01-09 17:18:14 +01:00
Eelco Dolstra bb714ce48c Fix release scripts 2024-01-09 13:52:32 +01:00
Eelco Dolstra 224e365867 Fix macOS build 2024-01-09 13:32:12 +01:00
Eelco Dolstra 894b558a74 Disable magic-nix-cache-action-priv for now 2024-01-09 12:24:19 +01:00
Eelco Dolstra 345fd479ed Upload Nix closure 2024-01-09 12:13:41 +01:00
Eelco Dolstra d73bb0a676 Drop static linking 2024-01-09 12:02:14 +01:00
Eelco Dolstra 16dd05e8f0 Update rustls-webpki dependency
https://github.com/DeterminateSystems/magic-nix-cache-priv/security/dependabot/1
2024-01-09 12:01:46 +01:00
Eelco Dolstra 8115ab51e2 Update to Nixpkgs 23.11 2024-01-09 12:01:33 +01:00
Eelco Dolstra c6a27d60ea
Merge pull request #6 from DeterminateSystems/update-rustix
Update rustix dependency
2024-01-08 17:42:54 +01:00
Eelco Dolstra 3a1064ece3 Update rustix dependency
https://github.com/DeterminateSystems/magic-nix-cache-priv/security/dependabot/2
2024-01-08 17:18:13 +01:00
Cole Helbling 50a138c21d
Merge pull request #7 from DeterminateSystems/fix-branch
Fix magic-nix-cache-action-priv branch
2024-01-08 08:05:20 -08:00
Eelco Dolstra cf983504ea Fix magic-nix-cache-action-priv branch 2024-01-08 16:53:00 +01:00
Graham Christensen 17539505c4
Merge pull request #41 from DeterminateSystems/dep-bump
Update some dependencies
2023-12-19 14:13:25 -05:00
Ana Hobden 6fda83104b Update some dependencies 2023-12-19 10:58:10 -08:00
Graham Christensen 422612fba1
Merge pull request #38 from DeterminateSystems/grahamc-patch-1
Restore the magic nix cache and flake check actions to aarch64 linux
2023-12-19 11:35:01 -05:00
Eelco Dolstra 684aa7a2c4
Merge pull request #3 from DeterminateSystems/async-push
Push to attic from the post-build-hook
2023-12-15 17:01:19 +01:00
Eelco Dolstra 3a111c9404
Merge pull request #4 from DeterminateSystems/cole-h-patch-1
release-branches: inherit secrets
2023-12-14 21:41:57 +01:00
Eelco Dolstra f757190c3b Error handling 2023-12-14 21:35:50 +01:00
Eelco Dolstra 08cc0812bf Formatting 2023-12-14 21:29:19 +01:00
Cole Helbling 53412e544d
release-branches: inherit secrets 2023-12-14 10:24:14 -08:00
Eelco Dolstra 71157983e3 Allow the daemon to notify the parent that it's ready 2023-12-14 17:42:46 +01:00
Eelco Dolstra 5c068ecf75 Update attic dependency 2023-12-14 17:42:46 +01:00
Eelco Dolstra 6bf609975a Push paths to FlakeHub from the post-build-hook
Also get rid of daemonization, it causes too many problems with tokio.
2023-12-14 17:42:46 +01:00
Eelco Dolstra fd1420febf
Add support for pushing to Attic (#1)
* Add support for pushing to Attic

* fmt/clippy

* Fix attic dependency

* Pass ssh private key

* Try to inherit secrets

* Fix static build

* Fix default package

* Fix daemonizing

* Fix clippy

* Update nix.conf

* Add --use-attic flag

* --use-attic -> --use-flakehub

* Handle project not existing

* Handle Attic init failure

* Skip .chroot paths

* Update netrc

* Downgrade to Nixpkgs 23.05 to fix static builds

* Use rust 1.70

We need 1.70, but 1.69 is the default in Nixpkgs 23.05.

* Rename stuff

* Use magic-nix-cache-priv

* Hack
2023-12-14 08:09:09 -08:00
Graham Christensen cb1016de6f
Restore the magic nix cache and flake check actions to aarch64 linux 2023-12-04 15:54:59 -05:00
Kiskae 606006b931 Pass through HTTP 429 to the nix daemon 2023-11-10 10:53:45 +00:00
32 changed files with 7423 additions and 1345 deletions

View file

@ -1,3 +0,0 @@
# For -Zbuild-std
[target.aarch64-unknown-linux-musl]
rustflags = ["-C", "target-feature=+crt-static", "-C", "link-arg=-lgcc"]

10
.editorconfig Normal file
View file

@ -0,0 +1,10 @@
# https://editorconfig.org
root = true
[*]
indent_style = space
indent_size = 2
end_of_line = lf
charset = utf-8
trim_trailing_whitespace = true
insert_final_newline = true

6
.envrc
View file

@ -1,5 +1 @@
if ! has nix_direnv_version || ! nix_direnv_version 2.1.1; then
source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/2.1.1/direnvrc" "sha256-b6qJ4r34rbE23yWjMqbmu3ia2z4b2wIlZUksBke/ol0="
fi
use_flake
use flake

View file

@ -5,92 +5,49 @@ on:
workflow_call:
jobs:
build-artifacts-ARM64-macOS:
concurrency: build-ARM64-macOS
runs-on: macos-latest-xlarge
build-artifacts:
runs-on: ${{ matrix.systems.runner }}
permissions:
contents: read
id-token: write
env:
ARTIFACT_KEY: magic-nix-cache-${{ matrix.systems.system }}
ARCHIVE_NAME: magic-nix-cache.closure.xz
strategy:
matrix:
systems:
- nix-system: x86_64-linux
system: X64-Linux
runner: ubuntu-22.04
- nix-system: aarch64-linux
system: ARM64-Linux
runner: namespace-profile-default-arm64
- nix-system: x86_64-darwin
system: X64-macOS
runner: macos-14-large
- nix-system: aarch64-darwin
system: ARM64-macOS
runner: macos-latest-xlarge
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: Install Nix on ${{ matrix.systems.system }}
uses: DeterminateSystems/nix-installer-action@main
- name: Set up FlakeHub Cache
uses: DeterminateSystems/flakehub-cache-action@main
- uses: DeterminateSystems/nix-installer-action@main
- name: Build and cache dev shell for ${{ matrix.systems.nix-system }}
run: |
nix build ".#devShells.${{ matrix.systems.nix-system }}.default"
- uses: DeterminateSystems/magic-nix-cache-action@main
- name: Build package and create closure for ${{ matrix.systems.system }}
run: |
nix build .# -L --fallback && \
nix-store --export $(nix-store -qR ./result) | xz -9 > "${{ env.ARCHIVE_NAME }}"
- name: Build package
run: "nix build .# -L --fallback"
- name: Upload a Build Artifact
uses: actions/upload-artifact@v3.1.2
- name: Upload magic-nix-cache closure for ${{ matrix.systems.system }}
uses: actions/upload-artifact@v4.6.0
with:
# Artifact name
name: magic-nix-cache-ARM64-macOS
path: result/bin/magic-nix-cache
retention-days: 1
build-artifacts-X64-macOS:
concurrency: build-X64-macOS
runs-on: macos-12
steps:
- uses: actions/checkout@v3
- uses: DeterminateSystems/flake-checker-action@main
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- name: Build package
run: "nix build .# -L --fallback"
- name: Upload a Build Artifact
uses: actions/upload-artifact@v3.1.2
with:
# Artifact name
name: magic-nix-cache-X64-macOS
path: result/bin/magic-nix-cache
retention-days: 1
build-artifacts-X64-Linux:
concurrency: build-X64-Linux
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v3
- uses: DeterminateSystems/flake-checker-action@main
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- name: Build package
run: "nix build .# -L --fallback"
- name: Upload a Build Artifact
uses: actions/upload-artifact@v3.1.2
with:
# Artifact name
name: magic-nix-cache-X64-Linux
path: result/bin/magic-nix-cache
retention-days: 1
build-artifacts-ARM64-Linux:
concurrency: build-ARM64-Linux
runs-on: namespace-profile-default-arm64
steps:
- uses: actions/checkout@v3
# - uses: DeterminateSystems/flake-checker-action@main
- uses: DeterminateSystems/nix-installer-action@main
# - uses: DeterminateSystems/magic-nix-cache-action@main
- name: Build package
run: "nix build .# -L --fallback"
- name: Upload a Build Artifact
uses: actions/upload-artifact@v3.1.2
with:
# Artifact name
name: magic-nix-cache-ARM64-Linux
path: result/bin/magic-nix-cache
name: ${{ env.ARTIFACT_KEY }}
path: ${{ env.ARCHIVE_NAME }}
retention-days: 1

87
.github/workflows/check-and-test.yaml vendored Normal file
View file

@ -0,0 +1,87 @@
name: Run checks and integration test
on:
pull_request:
push:
branches: [main]
jobs:
checks:
name: Nix and Rust checks
runs-on: ubuntu-22.04
permissions:
contents: read
id-token: write
steps:
- uses: actions/checkout@v4
- name: Check health of flake.lock
uses: DeterminateSystems/flake-checker-action@main
with:
fail-mode: true
- name: Install Nix
uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/flakehub-cache-action@main
- name: Check Rust formatting
run: nix develop --command cargo fmt --check
- name: Clippy
run: nix develop --command cargo clippy
build:
name: Build artifacts
needs: checks
uses: ./.github/workflows/build.yaml
secrets: inherit
action-integration-test:
name: Integration test for magic-nix-cache-action
runs-on: ${{ matrix.systems.runner }}
needs: build
env:
ARTIFACT_KEY: magic-nix-cache-${{ matrix.systems.system }}
ARCHIVE_NAME: magic-nix-cache.closure.xz
strategy:
matrix:
systems:
- system: X64-Linux
runner: ubuntu-22.04
- system: ARM64-Linux
runner: namespace-profile-default-arm64
- system: X64-macOS
runner: macos-14-large
- system: ARM64-macOS
runner: macos-latest-xlarge
permissions:
contents: read
id-token: write
steps:
- uses: actions/checkout@v4
- name: Download closure for ${{ matrix.systems.system }}
uses: actions/download-artifact@v4.1.8
with:
name: ${{ env.ARTIFACT_KEY }}
path: ${{ env.ARTIFACT_KEY }}
- name: Install Nix on ${{ matrix.systems.system }}
uses: DeterminateSystems/nix-installer-action@main
- name: Test magic-nix-cache-action@main on ${{ matrix.systems.runner }}
uses: DeterminateSystems/magic-nix-cache-action@main
with:
source-binary: "${{ env.ARTIFACT_KEY }}/${{ env.ARCHIVE_NAME }}"
_internal-strict-mode: true
- name: Run nix to test magic-nix-cache-action
run: |
nix develop --command echo "just testing"
- name: Exhaust our GitHub Actions Cache tokens
# Generally skip this step since it is so intensive
if: ${{ false }}
run: |
date >> README.md
nix build .#veryLongChain -v

View file

@ -1,27 +0,0 @@
name: Rust checks
on:
pull_request:
push:
branches: [main]
jobs:
checks:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v3
- name: Install Nix
uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- name: Check health of flake.lock
uses: DeterminateSystems/flake-checker-action@main
with:
fail-mode: true
- name: Check Rust formatting
run: nix develop --command cargo fmt --check
- name: Clippy
run: nix develop --command cargo clippy

21
.github/workflows/flakehub.yaml vendored Normal file
View file

@ -0,0 +1,21 @@
name: "Publish every Git push to main to FlakeHub"
on:
push:
branches:
- "main"
jobs:
flakehub-publish:
runs-on: "ubuntu-latest"
permissions:
id-token: "write"
contents: "read"
steps:
- uses: "actions/checkout@v4"
- uses: "DeterminateSystems/nix-installer-action@main"
- uses: "DeterminateSystems/flakehub-push@main"
with:
name: "DeterminateSystems/magic-nix-cache"
rolling: true
visibility: "public"

View file

@ -5,10 +5,10 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: Install Nix
uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: DeterminateSystems/flakehub-cache-action@main
- name: Expose GitHub Runtime
uses: crazy-max/ghaction-github-runtime@v2
- name: Dump credentials

View file

@ -10,6 +10,7 @@ on:
jobs:
build:
uses: ./.github/workflows/build.yaml
secrets: inherit
release:
needs: build
@ -21,7 +22,7 @@ jobs:
id-token: write # In order to request a JWT for AWS auth
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v2
with:
@ -31,33 +32,33 @@ jobs:
- name: Create the artifacts directory
run: rm -rf ./artifacts && mkdir ./artifacts
- uses: actions/download-artifact@v3
- uses: actions/download-artifact@v4.1.8
with:
name: magic-nix-cache-ARM64-macOS
path: cache-binary-ARM64-macOS
- name: Persist the cache binary
run: cp ./cache-binary-ARM64-macOS/magic-nix-cache ./artifacts/magic-nix-cache-ARM64-macOS
run: cp ./cache-binary-ARM64-macOS/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-ARM64-macOS
- uses: actions/download-artifact@v3
- uses: actions/download-artifact@v4.1.8
with:
name: magic-nix-cache-X64-macOS
path: cache-binary-X64-macOS
- name: Persist the cache binary
run: cp ./cache-binary-X64-macOS/magic-nix-cache ./artifacts/magic-nix-cache-X64-macOS
run: cp ./cache-binary-X64-macOS/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-X64-macOS
- uses: actions/download-artifact@v3
- uses: actions/download-artifact@v4.1.8
with:
name: magic-nix-cache-X64-Linux
path: cache-binary-X64-Linux
- name: Persist the cache binary
run: cp ./cache-binary-X64-Linux/magic-nix-cache ./artifacts/magic-nix-cache-X64-Linux
run: cp ./cache-binary-X64-Linux/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-X64-Linux
- uses: actions/download-artifact@v3
- uses: actions/download-artifact@v4.1.8
with:
name: magic-nix-cache-ARM64-Linux
path: cache-binary-ARM64-Linux
- name: Persist the cache binary
run: cp ./cache-binary-ARM64-Linux/magic-nix-cache ./artifacts/magic-nix-cache-ARM64-Linux
run: cp ./cache-binary-ARM64-Linux/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-ARM64-Linux
- name: Publish Release (Branch)
env:

View file

@ -10,12 +10,7 @@ on:
jobs:
build:
uses: ./.github/workflows/build.yaml
release:
needs: build
concurrency: release
# We want to build artifacts only if the `upload to s3` label is applied
# Only intra-repo PRs are allowed to have PR artifacts uploaded
# We only want to trigger once the upload once in the case the upload label is added, not when any label is added
if: |
@ -24,44 +19,50 @@ jobs:
(github.event.action == 'labeled' && github.event.label.name == 'upload to s3')
|| (github.event.action != 'labeled' && contains(github.event.pull_request.labels.*.name, 'upload to s3'))
)
uses: ./.github/workflows/build.yaml
secrets: inherit
release:
needs: build
concurrency: release
runs-on: ubuntu-latest
permissions:
id-token: write # In order to request a JWT for AWS auth
contents: read
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Create the artifacts directory
run: rm -rf ./artifacts && mkdir ./artifacts
- uses: actions/download-artifact@v3
- uses: actions/download-artifact@v4.1.8
with:
name: magic-nix-cache-ARM64-macOS
path: cache-binary-ARM64-macOS
- name: Persist the cache binary
run: cp ./cache-binary-ARM64-macOS/magic-nix-cache ./artifacts/magic-nix-cache-ARM64-macOS
run: cp ./cache-binary-ARM64-macOS/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-ARM64-macOS
- uses: actions/download-artifact@v3
- uses: actions/download-artifact@v4.1.8
with:
name: magic-nix-cache-X64-macOS
path: cache-binary-X64-macOS
- name: Persist the cache binary
run: cp ./cache-binary-X64-macOS/magic-nix-cache ./artifacts/magic-nix-cache-X64-macOS
run: cp ./cache-binary-X64-macOS/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-X64-macOS
- uses: actions/download-artifact@v3
- uses: actions/download-artifact@v4.1.8
with:
name: magic-nix-cache-X64-Linux
path: cache-binary-X64-Linux
- name: Persist the cache binary
run: cp ./cache-binary-X64-Linux/magic-nix-cache ./artifacts/magic-nix-cache-X64-Linux
run: cp ./cache-binary-X64-Linux/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-X64-Linux
- uses: actions/download-artifact@v3
- uses: actions/download-artifact@v4.1.8
with:
name: magic-nix-cache-ARM64-Linux
path: cache-binary-ARM64-Linux
- name: Persist the cache binary
run: cp ./cache-binary-ARM64-Linux/magic-nix-cache ./artifacts/magic-nix-cache-ARM64-Linux
run: cp ./cache-binary-ARM64-Linux/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-ARM64-Linux
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v2

View file

@ -19,38 +19,38 @@ jobs:
id-token: write # In order to request a JWT for AWS auth
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Create the artifacts directory
run: rm -rf ./artifacts && mkdir ./artifacts
- uses: actions/download-artifact@v3
- uses: actions/download-artifact@v4.1.8
with:
name: magic-nix-cache-ARM64-macOS
path: cache-binary-ARM64-macOS
- name: Persist the cache binary
run: cp ./cache-binary-ARM64-macOS/magic-nix-cache ./artifacts/magic-nix-cache-ARM64-macOS
run: cp ./cache-binary-ARM64-macOS/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-ARM64-macOS
- uses: actions/download-artifact@v3
- uses: actions/download-artifact@v4.1.8
with:
name: magic-nix-cache-X64-macOS
path: cache-binary-X64-macOS
- name: Persist the cache binary
run: cp ./cache-binary-X64-macOS/magic-nix-cache ./artifacts/magic-nix-cache-X64-macOS
run: cp ./cache-binary-X64-macOS/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-X64-macOS
- uses: actions/download-artifact@v3
- uses: actions/download-artifact@v4.1.8
with:
name: magic-nix-cache-X64-Linux
path: cache-binary-X64-Linux
- name: Persist the cache binary
run: cp ./cache-binary-X64-Linux/magic-nix-cache ./artifacts/magic-nix-cache-X64-Linux
run: cp ./cache-binary-X64-Linux/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-X64-Linux
- uses: actions/download-artifact@v3
- uses: actions/download-artifact@v4.1.8
with:
name: magic-nix-cache-ARM64-Linux
path: cache-binary-ARM64-Linux
- name: Persist the cache binary
run: cp ./cache-binary-ARM64-Linux/magic-nix-cache ./artifacts/magic-nix-cache-ARM64-Linux
run: cp ./cache-binary-ARM64-Linux/magic-nix-cache.closure.xz ./artifacts/magic-nix-cache-ARM64-Linux
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v2

View file

@ -0,0 +1,20 @@
name: update-flake-lock
on:
workflow_dispatch: # enable manual triggering
schedule:
- cron: "0 0 * * 0" # every Sunday at midnight
jobs:
lockfile:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/flakehub-cache-action@main
- uses: DeterminateSystems/update-flake-lock@main
with:
pr-title: Update flake.lock
pr-labels: |
dependencies
automated

5598
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -3,6 +3,7 @@ members = [
"gha-cache",
"magic-nix-cache",
]
resolver = "2"
[profile.release]
opt-level = 'z'

View file

@ -1,12 +1,26 @@
# Magic Nix Cache
> [!WARNING]
> The [Magic Nix Cache will will stop working](https://determinate.systems/posts/magic-nix-cache-free-tier-eol) on **February 1st, 2025** unless you're on [GitHub Enterprise Server](https://github.com/enterprise).
>
> You can upgrade to [FlakeHub Cache](https://flakehub.com/cache) and get **one month free** using the coupon code **`FHC`**.
>
> For more information, read [this blog post](https://determinate.systems/posts/magic-nix-cache-free-tier-eol/).
Save 30-50%+ of CI time without any effort or cost.
Use Magic Nix Cache, a totally free and zero-configuration binary cache for Nix on GitHub Actions.
Add our [GitHub Action][action] after installing Nix, in your workflow, like this:
```yaml
- uses: DeterminateSystems/magic-nix-cache-action@main
permissions:
contents: read
id-token: write
steps:
- uses: actions/checkout@v4
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- run: nix flake check
```
See [Usage](#usage) for a detailed example.
@ -41,8 +55,11 @@ on:
jobs:
check:
runs-on: ubuntu-22.04
permissions:
contents: read
id-token: write
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- run: nix flake check
@ -74,8 +91,8 @@ For local development, see `gha-cache/README.md` for more details on how to obta
```shell
cargo run -- -c creds.json --upstream https://cache.nixos.org
cargo build --release --target x86_64-unknown-linux-musl
cargo build --release --target aarch64-unknown-linux-musl
cargo build --release --target x86_64-unknown-linux-gnu
cargo build --release --target aarch64-unknown-linux-gnu
nix copy --to 'http://127.0.0.1:3000' $(which bash)
nix-store --store $PWD/test-root --extra-substituters 'http://localhost:3000' --option require-sigs false -r $(which bash)
```
@ -119,7 +136,7 @@ You can read the full privacy policy for [Determinate Systems][detsys], the crea
[action]: https://github.com/DeterminateSystems/magic-nix-cache-action/
[installer]: https://github.com/DeterminateSystems/nix-installer/
[ghacache]: https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows
[privacy]: https://determinate.systems/privacy
[privacy]: https://determinate.systems/policies/privacy
[telemetry]: https://github.com/DeterminateSystems/magic-nix-cache/blob/main/magic-nix-cache/src/telemetry.rs
[semantics]: https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#restrictions-for-accessing-a-cache
[z2ncache]: https://zero-to-nix.com/concepts/caching#binary-caches
@ -127,4 +144,3 @@ You can read the full privacy policy for [Determinate Systems][detsys], the crea
[attic]: https://github.com/zhaofengli/attic
[colmena]: https://github.com/zhaofengli/colmena
[z2n]: https://zero-to-nix.com

116
crane.nix
View file

@ -1,116 +0,0 @@
{ stdenv
, pkgs
, lib
, crane
, rust
, rust-bin
, nix-gitignore
, supportedSystems
}:
let
inherit (stdenv.hostPlatform) system;
nightlyVersion = "2023-05-01";
rustNightly = (pkgs.rust-bin.nightly.${nightlyVersion}.default.override {
extensions = [ "rust-src" "rust-analyzer-preview" ];
targets = cargoTargets;
}).overrideAttrs (old: {
# Remove the propagated libiconv since we want to add our static version
depsTargetTargetPropagated = lib.filter (d: d.pname != "libiconv")
(lib.flatten (old.depsTargetTargetPropagated or [ ]));
});
# For easy cross-compilation in devShells
# We are just composing the pkgsCross.*.stdenv.cc together
crossPlatforms =
let
makeCrossPlatform = crossSystem:
let
pkgsCross =
if crossSystem == system then pkgs
else
import pkgs.path {
inherit system crossSystem;
overlays = [ ];
};
rustTargetSpec = rust.toRustTargetSpec pkgsCross.pkgsStatic.stdenv.hostPlatform;
rustTargetSpecUnderscored = builtins.replaceStrings [ "-" ] [ "_" ] rustTargetSpec;
cargoLinkerEnv = lib.strings.toUpper "CARGO_TARGET_${rustTargetSpecUnderscored}_LINKER";
cargoCcEnv = "CC_${rustTargetSpecUnderscored}"; # for ring
ccbin = "${pkgsCross.stdenv.cc}/bin/${pkgsCross.stdenv.cc.targetPrefix}cc";
in
{
name = crossSystem;
value = {
inherit rustTargetSpec;
cc = pkgsCross.stdenv.cc;
pkgs = pkgsCross;
buildInputs = makeBuildInputs pkgsCross;
env = {
"${cargoLinkerEnv}" = ccbin;
"${cargoCcEnv}" = ccbin;
};
};
};
systems = lib.filter (s: s == system || lib.hasInfix "linux" s) supportedSystems
# Cross from aarch64-darwin -> x86_64-darwin doesn't work yet
# Hopefully the situation will improve with the SDK bumps
++ lib.optional (system == "x86_64-darwin") "aarch64-darwin";
in
builtins.listToAttrs (map makeCrossPlatform systems);
cargoTargets = lib.mapAttrsToList (_: p: p.rustTargetSpec) crossPlatforms;
cargoCrossEnvs = lib.foldl (acc: p: acc // p.env) { } (builtins.attrValues crossPlatforms);
makeBuildInputs = pkgs: with pkgs; [ ]
++ lib.optionals pkgs.stdenv.isDarwin [
darwin.apple_sdk.frameworks.Security
(libiconv.override { enableStatic = true; enableShared = false; })
];
buildFor = system:
let
crossPlatform = crossPlatforms.${system};
inherit (crossPlatform) pkgs;
craneLib = (crane.mkLib pkgs).overrideToolchain rustNightly;
crateName = craneLib.crateNameFromCargoToml {
cargoToml = ./magic-nix-cache/Cargo.toml;
};
src = nix-gitignore.gitignoreSource [ ] ./.;
commonArgs = {
inherit (crateName) pname version;
inherit src;
buildInputs = makeBuildInputs pkgs;
cargoExtraArgs = "--target ${crossPlatform.rustTargetSpec}";
cargoVendorDir = craneLib.vendorMultipleCargoDeps {
inherit (craneLib.findCargoFiles src) cargoConfigs;
cargoLockList = [
./Cargo.lock
"${rustNightly.passthru.availableComponents.rust-src}/lib/rustlib/src/rust/Cargo.lock"
];
};
} // crossPlatform.env;
crate = craneLib.buildPackage (commonArgs // {
cargoArtifacts = craneLib.buildDepsOnly commonArgs;
# The resulting executable must be standalone
allowedRequisites = [ ];
});
in
crate;
in
{
inherit crossPlatforms cargoTargets cargoCrossEnvs rustNightly;
magic-nix-cache = buildFor system;
}

View file

@ -1,175 +1,176 @@
{
"nodes": {
"crane": {
"inputs": {
"flake-compat": [
"flake-compat"
],
"flake-utils": "flake-utils",
"nixpkgs": [
"nixpkgs"
],
"rust-overlay": "rust-overlay"
},
"locked": {
"lastModified": 1695511445,
"narHash": "sha256-mnE14re43v3/Jc50Jv0BKPMtEk7FEtDSligP6B5HwlI=",
"rev": "3de322e06fc88ada5e3589dc8a375b73e749f512",
"revCount": 411,
"lastModified": 1741479724,
"narHash": "sha256-fnyETBKSVRa5abjOiRG/IAzKZq5yX8U6oRrHstPl4VM=",
"rev": "60202a2e3597a3d91f5e791aab03f45470a738b5",
"revCount": 709,
"type": "tarball",
"url": "https://api.flakehub.com/f/pinned/ipetkov/crane/0.14.1/018ac45c-ff5e-7076-b956-d478a0336516/source.tar.gz"
"url": "https://api.flakehub.com/f/pinned/ipetkov/crane/0.20.2/0195784b-915b-7d2d-915d-ab02d1112ef9/source.tar.gz"
},
"original": {
"type": "tarball",
"url": "https://flakehub.com/f/ipetkov/crane/0.14.1.tar.gz"
"url": "https://flakehub.com/f/ipetkov/crane/%2A"
}
},
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1696426674,
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
"revCount": 57,
"type": "tarball",
"url": "https://api.flakehub.com/f/pinned/edolstra/flake-compat/1.0.1/018afb31-abd1-7bff-a5e4-cff7e18efb7a/source.tar.gz"
},
"original": {
"type": "tarball",
"url": "https://flakehub.com/f/edolstra/flake-compat/1.0.1.tar.gz"
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1685518550,
"narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef",
"lastModified": 1733328505,
"narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-utils_2": {
"flake-parts": {
"inputs": {
"systems": "systems_2"
"nixpkgs-lib": [
"nix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1681202837,
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "cfacdce06f30d2b68473a46042957675eebb3401",
"lastModified": 1733312601,
"narHash": "sha256-4pDvzqnegAfRkPwO3wmwBhVi/Sye1mzps0zHWYnP88c=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "205b12d8b7cd4802fbcb8e8ef6a0f1408781a4f9",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"git-hooks-nix": {
"inputs": {
"flake-compat": [
"nix"
],
"gitignore": [
"nix"
],
"nixpkgs": [
"nix",
"nixpkgs"
],
"nixpkgs-stable": [
"nix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1734279981,
"narHash": "sha256-NdaCraHPp8iYMWzdXAt5Nv6sA3MUzlCiGiR586TCwo0=",
"owner": "cachix",
"repo": "git-hooks.nix",
"rev": "aa9f40c906904ebd83da78e7f328cd8aeaeae785",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "git-hooks.nix",
"type": "github"
}
},
"nix": {
"inputs": {
"flake-compat": "flake-compat",
"flake-parts": "flake-parts",
"git-hooks-nix": "git-hooks-nix",
"nixpkgs": "nixpkgs",
"nixpkgs-23-11": "nixpkgs-23-11",
"nixpkgs-regression": "nixpkgs-regression"
},
"locked": {
"lastModified": 1742824067,
"narHash": "sha256-rBPulEBpn4IiqkPsetuh7BRzT2iGCzZYnogTAsbrvhU=",
"rev": "9cb662df7442a1e2c4600fb8ecb2ad613ebc5a95",
"revCount": 19496,
"type": "tarball",
"url": "https://api.flakehub.com/f/pinned/NixOS/nix/2.27.1/0195c8c5-1964-7a31-b025-ebf9bfeef991/source.tar.gz"
},
"original": {
"type": "tarball",
"url": "https://flakehub.com/f/NixOS/nix/2"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1696604326,
"narHash": "sha256-YXUNI0kLEcI5g8lqGMb0nh67fY9f2YoJsILafh6zlMo=",
"rev": "87828a0e03d1418e848d3dd3f3014a632e4a4f64",
"revCount": 533189,
"lastModified": 1734359947,
"narHash": "sha256-1Noao/H+N8nFB4Beoy8fgwrcOQLVm9o4zKW1ODaqK9E=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "48d12d5e70ee91fe8481378e540433a7303dbf6a",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "release-24.11",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-23-11": {
"locked": {
"lastModified": 1717159533,
"narHash": "sha256-oamiKNfr2MS6yH64rUn99mIZjc45nGJlj9eGth/3Xuw=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "a62e6edd6d5e1fa0329b8653c801147986f8d446",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "a62e6edd6d5e1fa0329b8653c801147986f8d446",
"type": "github"
}
},
"nixpkgs-regression": {
"locked": {
"lastModified": 1643052045,
"narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1742422364,
"narHash": "sha256-mNqIplmEohk5jRkqYqG19GA8MbQ/D4gQSK0Mu4LvfRQ=",
"rev": "a84ebe20c6bc2ecbcfb000a50776219f48d134cc",
"revCount": 770807,
"type": "tarball",
"url": "https://api.flakehub.com/f/pinned/NixOS/nixpkgs/0.1.533189%2Brev-87828a0e03d1418e848d3dd3f3014a632e4a4f64/018b0dc8-e84f-7c59-b5d6-16849c3b2074/source.tar.gz"
"url": "https://api.flakehub.com/f/pinned/NixOS/nixpkgs/0.1.770807%2Brev-a84ebe20c6bc2ecbcfb000a50776219f48d134cc/0195b626-8c1d-7fb9-9282-563af3d37ab9/source.tar.gz"
},
"original": {
"type": "tarball",
"url": "https://flakehub.com/f/NixOS/nixpkgs/0.1.533189.tar.gz"
"url": "https://flakehub.com/f/NixOS/nixpkgs/0.1"
}
},
"root": {
"inputs": {
"crane": "crane",
"flake-compat": "flake-compat",
"nixpkgs": "nixpkgs",
"rust-overlay": "rust-overlay_2"
}
},
"rust-overlay": {
"inputs": {
"flake-utils": [
"crane",
"flake-utils"
],
"nixpkgs": [
"crane",
"nixpkgs"
]
},
"locked": {
"lastModified": 1685759304,
"narHash": "sha256-I3YBH6MS3G5kGzNuc1G0f9uYfTcNY9NYoRc3QsykLk4=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "c535b4f3327910c96dcf21851bbdd074d0760290",
"type": "github"
},
"original": {
"owner": "oxalica",
"repo": "rust-overlay",
"type": "github"
}
},
"rust-overlay_2": {
"inputs": {
"flake-utils": "flake-utils_2",
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1687400833,
"narHash": "sha256-rVENiSupjAE8o1+ZXNRIqewUzM2brm+aeme8MUrwl0U=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "fc0a266e836c079a9131108f4334e5af219dbb93",
"type": "github"
},
"original": {
"owner": "oxalica",
"repo": "rust-overlay",
"type": "github"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_2": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
"nix": "nix",
"nixpkgs": "nixpkgs_2"
}
}
},

191
flake.nix
View file

@ -2,139 +2,122 @@
description = "GitHub Actions-powered Nix binary cache";
inputs = {
nixpkgs.url = "https://flakehub.com/f/NixOS/nixpkgs/0.1.533189.tar.gz";
nixpkgs.url = "https://flakehub.com/f/NixOS/nixpkgs/0.1";
rust-overlay = {
url = "github:oxalica/rust-overlay";
inputs.nixpkgs.follows = "nixpkgs";
};
crane.url = "https://flakehub.com/f/ipetkov/crane/*";
crane = {
url = "https://flakehub.com/f/ipetkov/crane/0.14.1.tar.gz";
inputs.nixpkgs.follows = "nixpkgs";
inputs.flake-compat.follows = "flake-compat";
};
flake-compat.url = "https://flakehub.com/f/edolstra/flake-compat/1.0.1.tar.gz";
nix.url = "https://flakehub.com/f/NixOS/nix/2";
};
outputs = { self, nixpkgs, ... }@inputs:
outputs = inputs:
let
overlays = [ inputs.rust-overlay.overlays.default ];
supportedSystems = [
"aarch64-linux"
"x86_64-linux"
"aarch64-darwin"
"x86_64-darwin"
];
forEachSupportedSystem = f: nixpkgs.lib.genAttrs supportedSystems (system: f rec {
pkgs = import nixpkgs { inherit overlays system; };
cranePkgs = pkgs.callPackage ./crane.nix {
inherit supportedSystems;
inherit (inputs) crane;
forEachSupportedSystem = f: inputs.nixpkgs.lib.genAttrs supportedSystems (system: f rec {
pkgs = import inputs.nixpkgs {
inherit system;
overlays = [
inputs.self.overlays.default
];
};
inherit (pkgs) lib;
inherit system;
});
in
{
packages = forEachSupportedSystem ({ pkgs, cranePkgs, ... }: rec {
inherit (cranePkgs) magic-nix-cache;
overlays.default = final: prev:
let
craneLib = inputs.crane.mkLib final;
crateName = craneLib.crateNameFromCargoToml {
cargoToml = ./magic-nix-cache/Cargo.toml;
};
commonArgs = {
inherit (crateName) pname version;
src = inputs.self;
nativeBuildInputs = with final; [
pkg-config
];
buildInputs = [
inputs.nix.packages.${final.stdenv.system}.default
final.boost
];
};
cargoArtifacts = craneLib.buildDepsOnly commonArgs;
in
{
magic-nix-cache = craneLib.buildPackage (commonArgs // {
inherit cargoArtifacts;
});
};
packages = forEachSupportedSystem ({ pkgs, ... }: rec {
magic-nix-cache = pkgs.magic-nix-cache;
default = magic-nix-cache;
veryLongChain =
let
ctx = ./README.md;
# Function to write the current date to a file
startFile =
pkgs.stdenv.mkDerivation {
name = "start-file";
buildCommand = ''
cat ${ctx} > $out
'';
};
# Recursive function to create a chain of derivations
createChain = n: startFile:
pkgs.stdenv.mkDerivation {
name = "chain-${toString n}";
src =
if n == 0 then
startFile
else createChain (n - 1) startFile;
buildCommand = ''
echo $src > $out
'';
};
in
# Starting point of the chain
createChain 200 startFile;
});
devShells = forEachSupportedSystem ({ pkgs, cranePkgs, lib }: {
default = pkgs.mkShell ({
inputsFrom = [ cranePkgs.magic-nix-cache ];
devShells = forEachSupportedSystem ({ system, pkgs }: {
default = pkgs.mkShell {
packages = with pkgs; [
rustc
cargo
clippy
rustfmt
rust-analyzer
inputs.nix.packages.${stdenv.system}.default # for linking attic
boost # for linking attic
bashInteractive
cranePkgs.rustNightly
pkg-config
cargo-bloat
cargo-edit
cargo-udeps
cargo-watch
bacon
age
];
shellHook =
let
crossSystems = lib.filter (s: s != pkgs.system) (builtins.attrNames cranePkgs.crossPlatforms);
in
''
# Returns compiler environment variables for a platform
#
# getTargetFlags "suffixSalt" "nativeBuildInputs" "buildInputs"
getTargetFlags() {
# Here we only call the setup-hooks of nativeBuildInputs.
#
# What's off-limits for us:
#
# - findInputs
# - activatePackage
# - Other functions in stdenv setup that depend on the private accumulator variables
(
suffixSalt="$1"
nativeBuildInputs="$2"
buildInputs="$3"
# Offsets for the nativeBuildInput (e.g., gcc)
hostOffset=-1
targetOffset=0
# In stdenv, the hooks are first accumulated before being called.
# Here we call them immediately
addEnvHooks() {
local depHostOffset="$1"
# For simplicity, we only call the hook on buildInputs
for pkg in $buildInputs; do
depTargetOffset=1
$2 $pkg
done
}
unset _PATH
unset NIX_CFLAGS_COMPILE
unset NIX_LDFLAGS
# For simplicity, we only call the setup-hooks of nativeBuildInputs
for nbi in $nativeBuildInputs; do
addToSearchPath _PATH "$nbi/bin"
if [ -e "$nbi/nix-support/setup-hook" ]; then
source "$nbi/nix-support/setup-hook"
fi
done
echo "export NIX_CFLAGS_COMPILE_''${suffixSalt}='$NIX_CFLAGS_COMPILE'"
echo "export NIX_LDFLAGS_''${suffixSalt}='$NIX_LDFLAGS'"
echo "export PATH=$PATH''${_PATH+:$_PATH}"
)
}
target_flags=$(mktemp)
${lib.concatMapStrings (system: let
crossPlatform = cranePkgs.crossPlatforms.${system};
in ''
getTargetFlags \
"${crossPlatform.cc.suffixSalt}" \
"${crossPlatform.cc} ${crossPlatform.cc.bintools}" \
"${builtins.concatStringsSep " " (crossPlatform.buildInputs ++ crossPlatform.pkgs.stdenv.defaultBuildInputs)}" >$target_flags
. $target_flags
'') crossSystems}
rm $target_flags
# Suffix flags for current system as well
export NIX_CFLAGS_COMPILE_${pkgs.stdenv.cc.suffixSalt}="$NIX_CFLAGS_COMPILE"
export NIX_LDFLAGS_${pkgs.stdenv.cc.suffixSalt}="$NIX_LDFLAGS"
unset NIX_CFLAGS_COMPILE
unset NIX_LDFLAGS
'';
} // cranePkgs.cargoCrossEnvs);
keygen = pkgs.mkShellNoCC {
packages = with pkgs; [
age
];
RUST_SRC_PATH = "${pkgs.rustPlatform.rustcSrc}/library";
};
});
};

View file

@ -11,12 +11,12 @@ derivative = { version = "2.2.0", default-features = false }
futures = { version = "0.3.28", default-features = false, features = ["alloc"] }
hex = "0.4.3"
rand = { version = "0.8.5", default-features = false, features = ["std", "std_rng"] }
reqwest = { version = "0.11.17", default-features = false, features = ["json", "rustls-tls-native-roots", "stream", "trust-dns"] }
reqwest = { version = "0.12.5", default-features = false, features = ["json", "rustls-tls-native-roots", "stream", "trust-dns"] }
serde = { version = "1.0.162", default-features = false, features = ["derive"] }
serde_json = { version = "1.0.96", default-features = false }
sha2 = { version = "0.10.6", default-features = false }
thiserror = "1.0.40"
tokio = { version = "1.28.0", default-features = false, features = ["io-util"] }
tokio = { version = "1.44.2", default-features = false, features = ["io-util"] }
tracing = { version = "0.1.37", default-features = false }
unicode-bom = "2.0.2"

View file

@ -32,5 +32,5 @@ We should contribute support for the latter to [Octocrab](https://github.com/XAM
Since GHAC uses private APIs that use special tokens for authentication, we need to get them from a workflow run.
The easiest way is with the `keygen` workflow in this repo.
Generate an `age` encryption key with `age-keygen -o key.txt`, and add the Public Key as a repository secret named `AGE_PUBLIC_KEY`.
Generate an `age` encryption key with `nix shell nixpkgs#age --command age-keygen -o key.txt`, and add the Public Key as a repository secret named `AGE_PUBLIC_KEY`.
Then, trigger the `keygen` workflow which will print out a command that will let you decrypt the credentials.

View file

@ -4,7 +4,8 @@
use std::fmt;
#[cfg(debug_assertions)]
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use async_trait::async_trait;
@ -47,12 +48,19 @@ const MAX_CONCURRENCY: usize = 4;
type Result<T> = std::result::Result<T, Error>;
pub type CircuitBreakerTrippedCallback = Arc<Box<dyn Fn() + Send + Sync>>;
/// An API error.
#[derive(Error, Debug)]
pub enum Error {
#[error("Failed to initialize the client: {0}")]
InitError(Box<dyn std::error::Error + Send + Sync>),
#[error(
"GitHub Actions Cache throttled Magic Nix Cache. Not trying to use it again on this run."
)]
CircuitBreakerTripped,
#[error("Request error: {0}")]
RequestError(#[from] reqwest::Error), // TODO: Better errors
@ -69,14 +77,13 @@ pub enum Error {
info: ApiErrorInfo,
},
#[error("I/O error: {0}")]
IoError(#[from] std::io::Error),
#[error("I/O error: {0}, context: {1}")]
IoError(std::io::Error, String),
#[error("Too many collisions")]
TooManyCollisions,
}
#[derive(Debug)]
pub struct Api {
/// Credentials to access the cache.
credentials: Credentials,
@ -96,6 +103,10 @@ pub struct Api {
/// The concurrent upload limit.
concurrency_limit: Arc<Semaphore>,
circuit_breaker_429_tripped: Arc<AtomicBool>,
circuit_breaker_429_tripped_callback: CircuitBreakerTrippedCallback,
/// Backend request statistics.
#[cfg(debug_assertions)]
stats: RequestStats,
@ -108,7 +119,7 @@ pub struct FileAllocation(CacheId);
/// The ID of a cache.
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[serde(transparent)]
struct CacheId(pub i32);
struct CacheId(pub i64);
/// An API error.
#[derive(Debug, Clone)]
@ -234,7 +245,10 @@ impl fmt::Display for ApiErrorInfo {
}
impl Api {
pub fn new(credentials: Credentials) -> Result<Self> {
pub fn new(
credentials: Credentials,
circuit_breaker_429_tripped_callback: CircuitBreakerTrippedCallback,
) -> Result<Self> {
let mut headers = HeaderMap::new();
let auth_header = {
let mut h = HeaderValue::from_str(&format!("Bearer {}", credentials.runtime_token))
@ -264,11 +278,17 @@ impl Api {
version_hasher,
client,
concurrency_limit: Arc::new(Semaphore::new(MAX_CONCURRENCY)),
circuit_breaker_429_tripped: Arc::new(AtomicBool::from(false)),
circuit_breaker_429_tripped_callback,
#[cfg(debug_assertions)]
stats: Default::default(),
})
}
pub fn circuit_breaker_tripped(&self) -> bool {
self.circuit_breaker_429_tripped.load(Ordering::Relaxed)
}
/// Mutates the cache version/namespace.
pub fn mutate_version(&mut self, data: &[u8]) {
self.version_hasher.update(data);
@ -319,17 +339,22 @@ impl Api {
Err(Error::TooManyCollisions)
}
/// Uploads a file.
pub async fn upload_file<S>(&self, allocation: FileAllocation, mut stream: S) -> Result<()>
/// Uploads a file. Returns the size of the file.
pub async fn upload_file<S>(&self, allocation: FileAllocation, mut stream: S) -> Result<usize>
where
S: AsyncRead + Unpin + Send,
{
if self.circuit_breaker_tripped() {
return Err(Error::CircuitBreakerTripped);
}
let mut offset = 0;
let mut futures = Vec::new();
loop {
let buf = BytesMut::with_capacity(CHUNK_SIZE);
let chunk = read_chunk_async(&mut stream, buf).await?;
let chunk = read_chunk_async(&mut stream, buf)
.await
.map_err(|e| Error::IoError(e, "Reading a chunk during upload".to_string()))?;
if chunk.is_empty() {
offset += chunk.len();
break;
@ -347,10 +372,16 @@ impl Api {
futures.push({
let client = self.client.clone();
let concurrency_limit = self.concurrency_limit.clone();
let circuit_breaker_429_tripped = self.circuit_breaker_429_tripped.clone();
let circuit_breaker_429_tripped_callback =
self.circuit_breaker_429_tripped_callback.clone();
let url = self.construct_url(&format!("caches/{}", allocation.0 .0));
tokio::task::spawn(async move {
let permit = concurrency_limit.acquire().await.unwrap();
let permit = concurrency_limit
.acquire()
.await
.expect("failed to acquire concurrency semaphore permit");
tracing::trace!(
"Starting uploading chunk {}-{}",
@ -380,6 +411,9 @@ impl Api {
drop(permit);
circuit_breaker_429_tripped
.check_result(&r, &circuit_breaker_429_tripped_callback);
r
})
});
@ -390,17 +424,23 @@ impl Api {
future::join_all(futures)
.await
.into_iter()
.try_for_each(|join_result| join_result.unwrap())?;
.try_for_each(|join_result| {
join_result.expect("failed collecting a join result during parallel upload")
})?;
tracing::debug!("Received all chunks for cache {:?}", allocation.0);
self.commit_cache(allocation.0, offset).await?;
Ok(())
Ok(offset)
}
/// Downloads a file based on a list of key prefixes.
pub async fn get_file_url(&self, keys: &[&str]) -> Result<Option<String>> {
if self.circuit_breaker_tripped() {
return Err(Error::CircuitBreakerTripped);
}
Ok(self
.get_cache_entry(keys)
.await?
@ -419,6 +459,10 @@ impl Api {
/// Retrieves a cache based on a list of key prefixes.
async fn get_cache_entry(&self, keys: &[&str]) -> Result<Option<ArtifactCacheEntry>> {
if self.circuit_breaker_tripped() {
return Err(Error::CircuitBreakerTripped);
}
#[cfg(debug_assertions)]
self.stats.get.fetch_add(1, Ordering::SeqCst);
@ -431,6 +475,9 @@ impl Api {
.check_json()
.await;
self.circuit_breaker_429_tripped
.check_result(&res, &self.circuit_breaker_429_tripped_callback);
match res {
Ok(entry) => Ok(Some(entry)),
Err(Error::DecodeError { status, .. }) if status == StatusCode::NO_CONTENT => Ok(None),
@ -448,6 +495,10 @@ impl Api {
key: &str,
cache_size: Option<usize>,
) -> Result<ReserveCacheResponse> {
if self.circuit_breaker_tripped() {
return Err(Error::CircuitBreakerTripped);
}
tracing::debug!("Reserving cache for {}", key);
let req = ReserveCacheRequest {
@ -466,13 +517,20 @@ impl Api {
.send()
.await?
.check_json()
.await?;
.await;
Ok(res)
self.circuit_breaker_429_tripped
.check_result(&res, &self.circuit_breaker_429_tripped_callback);
res
}
/// Finalizes uploading to a cache.
async fn commit_cache(&self, cache_id: CacheId, size: usize) -> Result<()> {
if self.circuit_breaker_tripped() {
return Err(Error::CircuitBreakerTripped);
}
tracing::debug!("Commiting cache {:?}", cache_id);
let req = CommitCacheRequest { size };
@ -480,22 +538,31 @@ impl Api {
#[cfg(debug_assertions)]
self.stats.post.fetch_add(1, Ordering::SeqCst);
self.client
if let Err(e) = self
.client
.post(self.construct_url(&format!("caches/{}", cache_id.0)))
.json(&req)
.send()
.await?
.check()
.await?;
.await
{
self.circuit_breaker_429_tripped
.check_err(&e, &self.circuit_breaker_429_tripped_callback);
return Err(e);
}
Ok(())
}
fn construct_url(&self, resource: &str) -> String {
format!(
"{}/_apis/artifactcache/{}",
self.credentials.cache_url, resource
)
let mut url = self.credentials.cache_url.clone();
if !url.ends_with('/') {
url.push('/');
}
url.push_str("_apis/artifactcache/");
url.push_str(resource);
url
}
}
@ -554,3 +621,36 @@ async fn handle_error(res: reqwest::Response) -> Error {
Error::ApiError { status, info }
}
trait AtomicCircuitBreaker {
fn check_err(&self, e: &Error, callback: &CircuitBreakerTrippedCallback);
fn check_result<T>(
&self,
r: &std::result::Result<T, Error>,
callback: &CircuitBreakerTrippedCallback,
);
}
impl AtomicCircuitBreaker for AtomicBool {
fn check_result<T>(
&self,
r: &std::result::Result<T, Error>,
callback: &CircuitBreakerTrippedCallback,
) {
if let Err(ref e) = r {
self.check_err(e, callback)
}
}
fn check_err(&self, e: &Error, callback: &CircuitBreakerTrippedCallback) {
if let Error::ApiError {
status: reqwest::StatusCode::TOO_MANY_REQUESTS,
..
} = e
{
tracing::info!("Disabling GitHub Actions Cache due to 429: Too Many Requests");
self.store(true, Ordering::Relaxed);
callback();
}
}
}

View file

@ -1,35 +1,65 @@
[package]
name = "magic-nix-cache"
version = "0.1.2"
version = "0.2.0"
edition = "2021"
license = "Apache-2.0"
[dependencies]
gha-cache = { path = "../gha-cache" }
axum = { version = "0.6.18", default-features = false, features = ["json", "tokio"] }
axum-macros = "0.3.7"
clap = { version = "4.2.7", default-features = false, features = ["std", "derive", "error-context", "wrap_help"] }
axum = { version = "0.7.5", default-features = false, features = [
"json",
"tokio",
"http2",
"macros"
] }
clap = { version = "4.2.7", default-features = false, features = [
"std",
"derive",
"error-context",
"wrap_help",
] }
tracing = "0.1.37"
tracing-subscriber = { version = "0.3.17", default-features = false, features = ["ansi", "env-filter", "fmt", "tracing-log", "smallvec"] }
tower-http = { version = "0.4.0", features = ["trace"] }
tracing-subscriber = { version = "0.3.17", default-features = false, features = [
"ansi",
"env-filter",
"fmt",
"tracing-log",
"smallvec",
] }
tower-http = { version = "0.5.2", features = ["trace"] }
serde = { version = "1.0.162", features = ["derive"] }
serde_json = { version = "1.0.96", default-features = false }
thiserror = "1.0.40"
tokio-stream = { version = "0.1.14", default-features = false }
tokio-util = { version = "0.7.8", features = ["io"] }
tokio-stream = { version = "0.1.15", default-features = false }
tokio-util = { version = "0.7.11", features = ["io", "compat"] }
daemonize = "0.5.0"
is_ci = "1.1.1"
sha2 = { version = "0.10.6", default-features = false }
reqwest = { version = "0.11.17", default-features = false, features = ["blocking", "rustls-tls-native-roots", "trust-dns"] }
reqwest = { version = "0.12.5", default-features = false, features = [
"blocking",
"rustls-tls-native-roots",
"trust-dns",
"json"
] }
netrc-rs = "0.1.2"
attic = { git = "https://github.com/DeterminateSystems/attic", branch = "fixups-for-magic-nix-cache" }
attic-client = { git = "https://github.com/DeterminateSystems/attic", branch = "fixups-for-magic-nix-cache" }
attic-server = { git = "https://github.com/DeterminateSystems/attic", branch = "fixups-for-magic-nix-cache" }
indicatif = "0.17"
anyhow = "1.0.71"
tempfile = "3.9"
uuid = { version = "1.4.0", features = ["serde", "v7", "rand", "std"] }
futures = "0.3"
async-compression = "0.4"
tracing-appender = "0.2.3"
http = "1.0"
http-body-util = "0.1"
hyper = { version = "1.0.0", features = ["full"] }
hyper-util = { version = "0.1", features = ["tokio", "server-auto", "http1"] }
xdg = { version = "2.5.2" }
[dependencies.tokio]
version = "1.28.0"
version = "1.44.2"
default-features = false
features = [
"fs",
"process",
"rt",
"rt-multi-thread",
"sync",
]
features = ["fs", "macros", "process", "rt", "rt-multi-thread", "sync"]

View file

@ -2,45 +2,53 @@
//!
//! This API is intended to be used by nix-installer-action.
use std::net::SocketAddr;
use axum::{extract::Extension, http::uri::Uri, routing::post, Json, Router};
use axum_macros::debug_handler;
use serde::Serialize;
use attic::nix_store::StorePath;
use axum::{extract::Extension, routing::post, Json, Router};
use serde::{Deserialize, Serialize};
use super::State;
use crate::error::Result;
use crate::util::{get_store_paths, upload_paths};
use crate::error::{Error, Result};
#[derive(Debug, Clone, Serialize)]
struct WorkflowStartResponse {
num_original_paths: usize,
num_original_paths: Option<usize>,
}
#[derive(Debug, Clone, Serialize)]
struct WorkflowFinishResponse {
num_original_paths: usize,
num_final_paths: usize,
num_new_paths: usize,
num_original_paths: Option<usize>,
num_final_paths: Option<usize>,
num_new_paths: Option<usize>,
}
pub fn get_router() -> Router {
Router::new()
.route("/api/workflow-start", post(workflow_start))
.route("/api/workflow-finish", post(workflow_finish))
.route("/api/enqueue-paths", post(post_enqueue_paths))
}
/// Record existing paths.
#[debug_handler]
async fn workflow_start(Extension(state): Extension<State>) -> Result<Json<WorkflowStartResponse>> {
tracing::info!("Workflow started");
let reply = if let Some(original_paths) = &state.original_paths {
let mut original_paths = original_paths.lock().await;
*original_paths = crate::util::get_store_paths(&state.store).await?;
let mut original_paths = state.original_paths.lock().await;
*original_paths = get_store_paths().await?;
let reply = WorkflowStartResponse {
num_original_paths: Some(original_paths.len()),
};
Ok(Json(WorkflowStartResponse {
num_original_paths: original_paths.len(),
}))
state.metrics.num_original_paths.set(original_paths.len());
reply
} else {
WorkflowStartResponse {
num_original_paths: None,
}
};
Ok(Json(reply))
}
/// Push new paths and shut down.
@ -49,42 +57,113 @@ async fn workflow_finish(
) -> Result<Json<WorkflowFinishResponse>> {
tracing::info!("Workflow finished");
let original_paths = state.original_paths.lock().await;
let final_paths = get_store_paths().await?;
let new_paths = final_paths
.difference(&original_paths)
.cloned()
.collect::<Vec<_>>();
let response = if let Some(original_paths) = &state.original_paths {
let original_paths = original_paths.lock().await;
let final_paths = crate::util::get_store_paths(&state.store).await?;
let new_paths = final_paths
.difference(&original_paths)
.cloned()
.map(|path| state.store.follow_store_path(path).map_err(Error::Attic))
.collect::<Result<Vec<_>>>()?;
tracing::info!("Pushing {} new paths", new_paths.len());
let store_uri = make_store_uri(&state.self_endpoint);
upload_paths(new_paths.clone(), &store_uri).await?;
let num_original_paths = original_paths.len();
let num_final_paths = final_paths.len();
let num_new_paths = new_paths.len();
let sender = state.shutdown_sender.lock().await.take().unwrap();
sender.send(()).unwrap();
let reply = WorkflowFinishResponse {
num_original_paths: Some(num_original_paths),
num_final_paths: Some(num_final_paths),
num_new_paths: Some(num_new_paths),
};
let reply = WorkflowFinishResponse {
num_original_paths: original_paths.len(),
num_final_paths: final_paths.len(),
num_new_paths: new_paths.len(),
state.metrics.num_original_paths.set(num_original_paths);
state.metrics.num_final_paths.set(num_final_paths);
state.metrics.num_new_paths.set(num_new_paths);
// NOTE(cole-h): If we're substituting from an upstream cache, those paths won't have the
// post-build-hook run on it, so we diff the store to ensure we cache everything we can.
tracing::info!("Diffing the store and uploading any new paths before we shut down");
enqueue_paths(&state, new_paths).await?;
reply
} else {
WorkflowFinishResponse {
num_original_paths: None,
num_final_paths: None,
num_new_paths: None,
}
};
state
.metrics
.num_original_paths
.set(reply.num_original_paths);
state.metrics.num_final_paths.set(reply.num_final_paths);
state.metrics.num_new_paths.set(reply.num_new_paths);
if let Some(gha_cache) = &state.gha_cache {
tracing::info!("Waiting for GitHub action cache uploads to finish");
gha_cache.shutdown().await?;
}
Ok(Json(reply))
if let Some(attic_state) = state.flakehub_state.write().await.take() {
tracing::info!("Waiting for FlakeHub cache uploads to finish");
let paths = attic_state.push_session.wait().await?;
let paths = paths.keys().map(|s| s.name()).collect::<Vec<_>>();
tracing::info!(?paths, "FlakeHub Cache uploads completed");
} else {
tracing::info!("FlakeHub cache is not enabled, not uploading anything to it");
}
if let Some(sender) = state.shutdown_sender.lock().await.take() {
sender
.send(())
.map_err(|_| Error::Internal("Sending shutdown server message".to_owned()))?;
}
// NOTE(cole-h): see `init_logging`
if let Some(logfile) = &state.logfile {
let logfile_contents = std::fs::read_to_string(logfile)
.map_err(|e| crate::error::Error::Io(e, format!("Reading {}", logfile.display())))?;
println!("Every log line throughout the lifetime of the program:");
println!("\n{logfile_contents}\n");
}
Ok(Json(response))
}
fn make_store_uri(self_endpoint: &SocketAddr) -> String {
Uri::builder()
.scheme("http")
.authority(self_endpoint.to_string())
.path_and_query("/?compression=zstd&parallel-compression=true")
.build()
.unwrap()
.to_string()
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EnqueuePathsRequest {
pub store_paths: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EnqueuePathsResponse {}
/// Schedule paths in the local Nix store for uploading.
#[tracing::instrument(skip_all)]
async fn post_enqueue_paths(
Extension(state): Extension<State>,
Json(req): Json<EnqueuePathsRequest>,
) -> Result<Json<EnqueuePathsResponse>> {
tracing::info!("Enqueueing {:?}", req.store_paths);
let store_paths = req
.store_paths
.iter()
.map(|path| state.store.follow_store_path(path).map_err(Error::Attic))
.collect::<Result<Vec<_>>>()?;
enqueue_paths(&state, store_paths).await?;
Ok(Json(EnqueuePathsResponse {}))
}
pub async fn enqueue_paths(state: &State, store_paths: Vec<StorePath>) -> Result<()> {
if let Some(gha_cache) = &state.gha_cache {
gha_cache
.enqueue_paths(state.store.clone(), store_paths.clone())
.await?;
}
if let Some(flakehub_state) = &*state.flakehub_state.read().await {
crate::flakehub::enqueue_paths(flakehub_state, store_paths).await?;
}
Ok(())
}

View file

@ -1,14 +1,12 @@
//! Binary Cache API.
use std::io;
use axum::{
extract::{BodyStream, Extension, Path},
extract::{Extension, Path},
response::Redirect,
routing::{get, put},
Router,
};
use tokio_stream::StreamExt;
use futures::StreamExt as _;
use tokio_util::io::StreamReader;
use super::State;
@ -51,7 +49,7 @@ async fn get_narinfo(
let key = format!("{}.narinfo", store_path_hash);
if state
.narinfo_nagative_cache
.narinfo_negative_cache
.read()
.await
.contains(&store_path_hash)
@ -61,22 +59,25 @@ async fn get_narinfo(
return pull_through(&state, &path);
}
if let Some(url) = state.api.get_file_url(&[&key]).await? {
state.metrics.narinfos_served.incr();
return Ok(Redirect::temporary(&url));
if let Some(gha_cache) = &state.gha_cache {
if let Some(url) = gha_cache.api.get_file_url(&[&key]).await? {
state.metrics.narinfos_served.incr();
return Ok(Redirect::temporary(&url));
}
}
let mut negative_cache = state.narinfo_nagative_cache.write().await;
let mut negative_cache = state.narinfo_negative_cache.write().await;
negative_cache.insert(store_path_hash);
state.metrics.narinfos_sent_upstream.incr();
state.metrics.narinfos_negative_cache_misses.incr();
pull_through(&state, &path)
}
async fn put_narinfo(
Extension(state): Extension<State>,
Path(path): Path<String>,
body: BodyStream,
body: axum::body::Body,
) -> Result<()> {
let components: Vec<&str> = path.splitn(2, '.').collect();
@ -88,17 +89,23 @@ async fn put_narinfo(
return Err(Error::BadRequest);
}
let gha_cache = state.gha_cache.as_ref().ok_or(Error::GHADisabled)?;
let store_path_hash = components[0].to_string();
let key = format!("{}.narinfo", store_path_hash);
let allocation = state.api.allocate_file_with_random_suffix(&key).await?;
let allocation = gha_cache.api.allocate_file_with_random_suffix(&key).await?;
let body_stream = body.into_data_stream();
let stream = StreamReader::new(
body.map(|r| r.map_err(|e| io::Error::new(io::ErrorKind::Other, e.to_string()))),
body_stream
.map(|r| r.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))),
);
state.api.upload_file(allocation, stream).await?;
gha_cache.api.upload_file(allocation, stream).await?;
state.metrics.narinfos_uploaded.incr();
state
.narinfo_nagative_cache
.narinfo_negative_cache
.write()
.await
.remove(&store_path_hash);
@ -107,7 +114,14 @@ async fn put_narinfo(
}
async fn get_nar(Extension(state): Extension<State>, Path(path): Path<String>) -> Result<Redirect> {
if let Some(url) = state.api.get_file_url(&[&path]).await? {
if let Some(url) = state
.gha_cache
.as_ref()
.ok_or(Error::GHADisabled)?
.api
.get_file_url(&[&path])
.await?
{
state.metrics.nars_served.incr();
return Ok(Redirect::temporary(&url));
}
@ -119,16 +133,26 @@ async fn get_nar(Extension(state): Extension<State>, Path(path): Path<String>) -
Err(Error::NotFound)
}
}
async fn put_nar(
Extension(state): Extension<State>,
Path(path): Path<String>,
body: BodyStream,
body: axum::body::Body,
) -> Result<()> {
let allocation = state.api.allocate_file_with_random_suffix(&path).await?;
let gha_cache = state.gha_cache.as_ref().ok_or(Error::GHADisabled)?;
let allocation = gha_cache
.api
.allocate_file_with_random_suffix(&path)
.await?;
let body_stream = body.into_data_stream();
let stream = StreamReader::new(
body.map(|r| r.map_err(|e| io::Error::new(io::ErrorKind::Other, e.to_string()))),
body_stream
.map(|r| r.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))),
);
state.api.upload_file(allocation, stream).await?;
gha_cache.api.upload_file(allocation, stream).await?;
state.metrics.nars_uploaded.incr();
Ok(())

View file

@ -0,0 +1,50 @@
use std::fmt::{self, Display};
#[derive(Clone, Copy)]
pub enum Environment {
GitHubActions,
GitLabCI,
Other,
}
impl Environment {
pub fn determine() -> Self {
if env_var_is_true("GITHUB_ACTIONS") {
return Environment::GitHubActions;
}
if env_var_is_true("GITLAB_CI") {
return Environment::GitLabCI;
}
Environment::Other
}
pub fn is_github_actions(&self) -> bool {
matches!(self, Self::GitHubActions)
}
pub fn is_gitlab_ci(&self) -> bool {
matches!(self, Self::GitLabCI)
}
}
impl Display for Environment {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use Environment::*;
write!(
f,
"{}",
match self {
GitHubActions => "GitHub Actions",
GitLabCI => "GitLab CI",
Other => "an unspecified environment",
}
)
}
}
fn env_var_is_true(e: &str) -> bool {
std::env::var(e).is_ok_and(|v| v == "true")
}

View file

@ -19,16 +19,47 @@ pub enum Error {
#[error("Bad Request")]
BadRequest,
#[error("I/O error: {0}")]
Io(#[from] std::io::Error),
#[error("I/O error: {0}. Context: {1}")]
Io(std::io::Error, String),
#[error("Failed to upload paths")]
FailedToUpload,
#[error("GHA cache is disabled")]
GHADisabled,
#[error("FlakeHub cache error: {0}")]
FlakeHub(#[from] anyhow::Error),
#[error("FlakeHub HTTP error: {0}")]
FlakeHubHttp(#[from] reqwest::Error),
#[error("Got HTTP response {0} getting the cache name from FlakeHub: {1}")]
GetCacheName(reqwest::StatusCode, String),
#[error("netrc parse error: {0}")]
Netrc(netrc_rs::Error),
#[error("Cannot find netrc credentials for {0}")]
MissingCreds(String),
#[error("Attic error: {0}")]
Attic(#[from] attic::AtticError),
#[error("Bad URL")]
BadUrl(reqwest::Url),
#[error("Configuration error: {0}")]
Config(String),
#[error("Internal error: {0}")]
Internal(String),
}
impl IntoResponse for Error {
fn into_response(self) -> Response {
let code = match &self {
Self::Api(gha_cache::api::Error::ApiError {
status: StatusCode::TOO_MANY_REQUESTS,
..
}) => StatusCode::TOO_MANY_REQUESTS,
// HACK: HTTP 418 makes Nix throw a visible error but not retry
Self::Api(_) => StatusCode::IM_A_TEAPOT,
Self::NotFound => StatusCode::NOT_FOUND,

View file

@ -0,0 +1,492 @@
use crate::env::Environment;
use crate::error::{Error, Result};
use crate::DETERMINATE_NETRC_PATH;
use anyhow::Context;
use attic::cache::CacheName;
use attic::nix_store::{NixStore, StorePath};
use attic_client::push::{PushSession, PushSessionConfig};
use attic_client::{
api::ApiClient,
config::ServerConfig,
push::{PushConfig, Pusher},
};
use reqwest::header::HeaderValue;
use reqwest::Url;
use serde::Deserialize;
use std::os::unix::fs::MetadataExt;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use tokio::fs::File;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::sync::RwLock;
use uuid::Uuid;
const USER_AGENT: &str = "magic-nix-cache";
pub struct State {
#[allow(dead_code)]
pub substituter: Url,
pub push_session: PushSession,
}
pub async fn init_cache(
environment: Environment,
flakehub_api_server: &Url,
flakehub_cache_server: &Url,
flakehub_flake_name: &Option<String>,
store: Arc<NixStore>,
auth_method: &super::FlakeHubAuthSource,
) -> Result<State> {
// Parse netrc to get the credentials for api.flakehub.com.
let netrc_path = auth_method.as_path_buf();
let NetrcInfo {
netrc,
flakehub_cache_server_hostname,
flakehub_login,
flakehub_password,
} = extract_info_from_netrc(&netrc_path, flakehub_api_server, flakehub_cache_server).await?;
if let super::FlakeHubAuthSource::Netrc(netrc_path) = auth_method {
// Append an entry for the FlakeHub cache server to netrc.
if !netrc
.machines
.iter()
.any(|machine| machine.name.as_ref() == Some(&flakehub_cache_server_hostname))
{
let mut netrc_file = tokio::fs::OpenOptions::new()
.create(false)
.append(true)
.open(netrc_path)
.await
.map_err(|e| {
Error::Internal(format!(
"Failed to open {} for appending: {}",
netrc_path.display(),
e
))
})?;
netrc_file
.write_all(
format!(
"\nmachine {} login {} password {}\n\n",
flakehub_cache_server_hostname, flakehub_login, flakehub_password,
)
.as_bytes(),
)
.await
.map_err(|e| {
Error::Internal(format!(
"Failed to write credentials to {}: {}",
netrc_path.display(),
e
))
})?;
}
}
let server_config = ServerConfig {
endpoint: flakehub_cache_server.to_string(),
token: Some(attic_client::config::ServerTokenConfig::Raw {
token: flakehub_password.clone(),
}),
};
let api_inner = ApiClient::from_server_config(server_config)?;
let api = Arc::new(RwLock::new(api_inner));
// Periodically refresh JWT in GitHub Actions environment
if environment.is_github_actions() {
match auth_method {
super::FlakeHubAuthSource::Netrc(path) => {
let netrc_path_clone = path.to_path_buf();
let initial_github_jwt_clone = flakehub_password.clone();
let flakehub_cache_server_clone = flakehub_cache_server.to_string();
let api_clone = api.clone();
tokio::task::spawn(refresh_github_actions_jwt_worker(
netrc_path_clone,
initial_github_jwt_clone,
flakehub_cache_server_clone,
api_clone,
));
}
crate::FlakeHubAuthSource::DeterminateNixd => {
let api_clone = api.clone();
let netrc_file = PathBuf::from(DETERMINATE_NETRC_PATH);
let flakehub_api_server_clone = flakehub_api_server.clone();
let flakehub_cache_server_clone = flakehub_cache_server.clone();
let initial_meta = tokio::fs::metadata(&netrc_file).await.map_err(|e| {
Error::Io(e, format!("getting metadata of {}", netrc_file.display()))
})?;
let initial_inode = initial_meta.ino();
tokio::task::spawn(refresh_determinate_token_worker(
netrc_file,
initial_inode,
flakehub_api_server_clone,
flakehub_cache_server_clone,
api_clone,
));
}
}
}
// Get the cache UUID for this project.
let cache_name = {
let mut url = flakehub_api_server
.join("project")
.map_err(|_| Error::Config(format!("bad URL '{}'", flakehub_api_server)))?;
if let Some(flakehub_flake_name) = flakehub_flake_name {
if !flakehub_flake_name.is_empty() {
url = flakehub_api_server
.join(&format!("project/{}", flakehub_flake_name))
.map_err(|_| Error::Config(format!("bad URL '{}'", flakehub_api_server)))?;
}
}
let response = reqwest::Client::new()
.get(url.to_owned())
.header("User-Agent", USER_AGENT)
.basic_auth(flakehub_login, Some(&flakehub_password))
.send()
.await?;
if !response.status().is_success() {
return Err(Error::GetCacheName(
response.status(),
response.text().await?,
));
}
#[derive(Deserialize)]
struct ProjectInfo {
organization_uuid_v7: Uuid,
project_uuid_v7: Uuid,
}
let project_info = response.json::<ProjectInfo>().await?;
format!(
"{}:{}",
project_info.organization_uuid_v7, project_info.project_uuid_v7,
)
};
tracing::info!("Using cache {:?}", cache_name);
let cache = unsafe { CacheName::new_unchecked(cache_name) };
let cache_config = api.read().await.get_cache_config(&cache).await?;
let push_config = PushConfig {
num_workers: 5, // FIXME: use number of CPUs?
force_preamble: false,
};
let mp = indicatif::MultiProgress::new();
let push_session = Pusher::new(
store.clone(),
api.clone(),
cache.to_owned(),
cache_config,
mp,
push_config,
)
.into_push_session(PushSessionConfig {
no_closure: false,
ignore_upstream_cache_filter: false,
});
let state = State {
substituter: flakehub_cache_server.to_owned(),
push_session,
};
Ok(state)
}
#[derive(Debug)]
struct NetrcInfo {
netrc: netrc_rs::Netrc,
flakehub_cache_server_hostname: String,
flakehub_login: String,
flakehub_password: String,
}
#[tracing::instrument]
async fn extract_info_from_netrc(
netrc_path: &Path,
flakehub_api_server: &Url,
flakehub_cache_server: &Url,
) -> Result<NetrcInfo> {
let netrc = {
let mut netrc_file = File::open(netrc_path).await.map_err(|e| {
Error::Internal(format!("Failed to open {}: {}", netrc_path.display(), e))
})?;
let mut netrc_contents = String::new();
netrc_file
.read_to_string(&mut netrc_contents)
.await
.map_err(|e| {
Error::Internal(format!(
"Failed to read {} contents: {}",
netrc_path.display(),
e
))
})?;
netrc_rs::Netrc::parse(netrc_contents, false).map_err(Error::Netrc)?
};
let flakehub_netrc_entry = netrc
.machines
.iter()
.find(|machine| {
machine.name.as_ref() == flakehub_api_server.host().map(|x| x.to_string()).as_ref()
})
.ok_or_else(|| Error::MissingCreds(flakehub_api_server.to_string()))?
.to_owned();
let flakehub_cache_server_hostname = flakehub_cache_server
.host()
.ok_or_else(|| Error::BadUrl(flakehub_cache_server.to_owned()))?
.to_string();
let flakehub_login = flakehub_netrc_entry.login.ok_or_else(|| {
Error::Config(format!(
"netrc file does not contain a login for '{}'",
flakehub_api_server
))
})?;
let flakehub_password = flakehub_netrc_entry.password.ok_or_else(|| {
Error::Config(format!(
"netrc file does not contain a password for '{}'",
flakehub_api_server
))
})?;
Ok(NetrcInfo {
netrc,
flakehub_cache_server_hostname,
flakehub_login,
flakehub_password,
})
}
pub async fn enqueue_paths(state: &State, store_paths: Vec<StorePath>) -> Result<()> {
state.push_session.queue_many(store_paths)?;
Ok(())
}
/// Refresh the GitHub Actions JWT every 2 minutes (slightly less than half of the default validity
/// period) to ensure pushing / pulling doesn't stop working.
#[tracing::instrument(skip_all)]
async fn refresh_github_actions_jwt_worker(
netrc_path: std::path::PathBuf,
mut github_jwt: String,
flakehub_cache_server_clone: String,
api: Arc<RwLock<ApiClient>>,
) -> Result<()> {
// NOTE(cole-h): This is a workaround -- at the time of writing, GitHub Actions JWTs are only
// valid for 5 minutes after being issued. FlakeHub uses these JWTs for authentication, which
// means that after those 5 minutes have passed and the token is expired, FlakeHub (and by
// extension FlakeHub Cache) will no longer allow requests using this token. However, GitHub
// gives us a way to repeatedly request new tokens, so we utilize that and refresh the token
// every 2 minutes (less than half of the lifetime of the token).
// TODO(cole-h): this should probably be half of the token's lifetime ((exp - iat) / 2), but
// getting this is nontrivial so I'm not going to do it until GitHub changes the lifetime and
// breaks this.
let next_refresh = std::time::Duration::from_secs(2 * 60);
// NOTE(cole-h): we sleep until the next refresh at first because we already got a token from
// GitHub recently, don't need to try again until we actually might need to get a new one.
tokio::time::sleep(next_refresh).await;
// NOTE(cole-h): https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-cloud-providers#requesting-the-jwt-using-environment-variables
let mut headers = reqwest::header::HeaderMap::new();
headers.insert(
reqwest::header::ACCEPT,
HeaderValue::from_static("application/json;api-version=2.0"),
);
headers.insert(
reqwest::header::CONTENT_TYPE,
HeaderValue::from_static("application/json"),
);
let github_client = reqwest::Client::builder()
.user_agent(USER_AGENT)
.default_headers(headers)
.build()?;
loop {
match rewrite_github_actions_token(&github_client, &netrc_path, &github_jwt).await {
Ok(new_github_jwt) => {
github_jwt = new_github_jwt;
let server_config = ServerConfig {
endpoint: flakehub_cache_server_clone.clone(),
token: Some(attic_client::config::ServerTokenConfig::Raw {
token: github_jwt.clone(),
}),
};
let new_api = ApiClient::from_server_config(server_config)?;
{
let mut api_client = api.write().await;
*api_client = new_api;
}
tracing::debug!(
"Stored new token in netrc and API client, sleeping for {next_refresh:?}"
);
tokio::time::sleep(next_refresh).await;
}
Err(e) => {
tracing::error!(
?e,
"Failed to get a new JWT from GitHub, trying again in 10 seconds"
);
tokio::time::sleep(std::time::Duration::from_secs(10)).await;
}
}
}
}
#[tracing::instrument(skip_all)]
async fn rewrite_github_actions_token(
client: &reqwest::Client,
netrc_path: &Path,
old_github_jwt: &str,
) -> Result<String> {
// NOTE(cole-h): https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-cloud-providers#requesting-the-jwt-using-environment-variables
let runtime_token = std::env::var("ACTIONS_ID_TOKEN_REQUEST_TOKEN").map_err(|e| {
Error::Internal(format!(
"ACTIONS_ID_TOKEN_REQUEST_TOKEN was invalid unicode: {e}"
))
})?;
let runtime_url = std::env::var("ACTIONS_ID_TOKEN_REQUEST_URL").map_err(|e| {
Error::Internal(format!(
"ACTIONS_ID_TOKEN_REQUEST_URL was invalid unicode: {e}"
))
})?;
let token_request_url = format!("{runtime_url}&audience=api.flakehub.com");
let token_response = client
.request(reqwest::Method::GET, &token_request_url)
.bearer_auth(runtime_token)
.send()
.await
.with_context(|| format!("sending request to {token_request_url}"))?;
if let Err(e) = token_response.error_for_status_ref() {
tracing::error!(?e, "Got error response when requesting token");
return Err(e)?;
}
#[derive(serde::Deserialize)]
struct TokenResponse {
value: String,
}
let token_response: TokenResponse = token_response
.json()
.await
.with_context(|| "converting response into json")?;
let new_github_jwt_string = token_response.value;
let netrc_contents = tokio::fs::read_to_string(netrc_path)
.await
.with_context(|| format!("failed to read {netrc_path:?} to string"))?;
let new_netrc_contents = netrc_contents.replace(old_github_jwt, &new_github_jwt_string);
// NOTE(cole-h): create the temporary file right next to the real one so we don't run into
// cross-device linking issues when renaming
let netrc_path_tmp = netrc_path.with_extension("tmp");
tokio::fs::write(&netrc_path_tmp, new_netrc_contents)
.await
.with_context(|| format!("writing new JWT to {netrc_path_tmp:?}"))?;
tokio::fs::rename(&netrc_path_tmp, &netrc_path)
.await
.with_context(|| format!("renaming {netrc_path_tmp:?} to {netrc_path:?}"))?;
Ok(new_github_jwt_string)
}
#[tracing::instrument(skip_all)]
async fn refresh_determinate_token_worker(
netrc_file: PathBuf,
mut inode: u64,
flakehub_api_server: Url,
flakehub_cache_server: Url,
api_clone: Arc<RwLock<ApiClient>>,
) {
// NOTE(cole-h): This is a workaround -- at the time of writing, determinate-nixd handles the
// GitHub Actions JWT refreshing for us, which means we don't know when this will happen. At the
// moment, it does it roughly every 2 minutes (less than half of the total lifetime of the
// issued token).
loop {
tokio::time::sleep(std::time::Duration::from_secs(3)).await;
let meta = tokio::fs::metadata(&netrc_file)
.await
.map_err(|e| Error::Io(e, format!("getting metadata of {}", netrc_file.display())));
let Ok(meta) = meta else {
tracing::error!(e = ?meta);
continue;
};
let current_inode = meta.ino();
if current_inode == inode {
tracing::debug!("current inode is the same, file didn't change");
continue;
}
tracing::debug!("current inode is different, file changed");
inode = current_inode;
let flakehub_password = match extract_info_from_netrc(
&netrc_file,
&flakehub_api_server,
&flakehub_cache_server,
)
.await
{
Ok(NetrcInfo {
flakehub_password, ..
}) => flakehub_password,
Err(e) => {
tracing::error!(?e, "Failed to extract auth info from netrc");
continue;
}
};
let server_config = ServerConfig {
endpoint: flakehub_cache_server.to_string(),
token: Some(attic_client::config::ServerTokenConfig::Raw {
token: flakehub_password,
}),
};
let new_api = ApiClient::from_server_config(server_config.clone());
let Ok(new_api) = new_api else {
tracing::error!(e = ?new_api, "Failed to construct new ApiClient");
continue;
};
{
let mut api_client = api_clone.write().await;
*api_client = new_api;
}
tracing::debug!("Stored new token in API client, sleeping for 30s");
}
}

254
magic-nix-cache/src/gha.rs Normal file
View file

@ -0,0 +1,254 @@
use std::{collections::HashSet, sync::Arc};
use crate::error::{Error, Result};
use crate::telemetry;
use async_compression::tokio::bufread::ZstdEncoder;
use attic::nix_store::{NixStore, StorePath, ValidPathInfo};
use attic_server::narinfo::{Compression, NarInfo};
use futures::stream::TryStreamExt;
use gha_cache::{Api, Credentials};
use tokio::sync::{
mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender},
RwLock,
};
use tokio_util::compat::FuturesAsyncReadCompatExt;
pub struct GhaCache {
/// The GitHub Actions Cache API.
pub api: Arc<Api>,
/// The future from the completion of the worker.
worker_result: RwLock<Option<tokio::task::JoinHandle<Result<()>>>>,
channel_tx: UnboundedSender<Request>,
}
#[derive(Debug)]
enum Request {
Shutdown,
Upload(StorePath),
}
impl GhaCache {
pub fn new(
credentials: Credentials,
cache_version: Option<String>,
store: Arc<NixStore>,
metrics: Arc<telemetry::TelemetryReport>,
narinfo_negative_cache: Arc<RwLock<HashSet<String>>>,
) -> Result<GhaCache> {
let cb_metrics = metrics.clone();
let mut api = Api::new(
credentials,
Arc::new(Box::new(move || {
cb_metrics
.tripped_429
.store(true, std::sync::atomic::Ordering::Relaxed);
})),
)?;
if let Some(cache_version) = &cache_version {
api.mutate_version(cache_version.as_bytes());
}
let (channel_tx, channel_rx) = unbounded_channel();
let api = Arc::new(api);
let api2 = api.clone();
let worker_result = tokio::task::spawn(async move {
worker(
&api2,
store,
channel_rx,
metrics,
narinfo_negative_cache.clone(),
)
.await
});
Ok(GhaCache {
api,
worker_result: RwLock::new(Some(worker_result)),
channel_tx,
})
}
pub async fn shutdown(&self) -> Result<()> {
if let Some(worker_result) = self.worker_result.write().await.take() {
self.channel_tx
.send(Request::Shutdown)
.expect("Cannot send shutdown message");
worker_result
.await
.expect("failed to read result from gha worker")
} else {
Ok(())
}
}
pub async fn enqueue_paths(
&self,
store: Arc<NixStore>,
store_paths: Vec<StorePath>,
) -> Result<()> {
// FIXME: make sending the closure optional. We might want to
// only send the paths that have been built by the user, under
// the assumption that everything else is already in a binary
// cache.
// FIXME: compute_fs_closure_multi doesn't return a
// toposort, though it doesn't really matter for the GHA
// cache.
let closure = store
.compute_fs_closure_multi(store_paths, false, false, false)
.await?;
for p in closure {
self.channel_tx
.send(Request::Upload(p))
.map_err(|_| Error::Internal("Cannot send upload message".to_owned()))?;
}
Ok(())
}
}
async fn worker(
api: &Api,
store: Arc<NixStore>,
mut channel_rx: UnboundedReceiver<Request>,
metrics: Arc<telemetry::TelemetryReport>,
narinfo_negative_cache: Arc<RwLock<HashSet<String>>>,
) -> Result<()> {
let mut done = HashSet::new();
while let Some(req) = channel_rx.recv().await {
match req {
Request::Shutdown => {
break;
}
Request::Upload(path) => {
if api.circuit_breaker_tripped() {
tracing::trace!("GitHub Actions gave us a 429, so we're done.",);
continue;
}
if !done.insert(path.clone()) {
continue;
}
if let Err(err) = upload_path(
api,
store.clone(),
&path,
metrics.clone(),
narinfo_negative_cache.clone(),
)
.await
{
tracing::error!(
"Upload of path '{}' failed: {}",
store.get_full_path(&path).display(),
err
);
}
}
}
}
Ok(())
}
async fn upload_path(
api: &Api,
store: Arc<NixStore>,
path: &StorePath,
metrics: Arc<telemetry::TelemetryReport>,
narinfo_negative_cache: Arc<RwLock<HashSet<String>>>,
) -> Result<()> {
let path_info = store.query_path_info(path.clone()).await?;
// Upload the NAR.
let nar_path = format!("{}.nar.zstd", path_info.nar_hash.to_base32());
let nar_allocation = api.allocate_file_with_random_suffix(&nar_path).await?;
let nar_stream = store.nar_from_path(path.clone());
let nar_reader = nar_stream
.map_err(|err| std::io::Error::new(std::io::ErrorKind::Other, err))
.into_async_read();
let nar_compressor = ZstdEncoder::new(nar_reader.compat());
let compressed_nar_size = api.upload_file(nar_allocation, nar_compressor).await?;
metrics.nars_uploaded.incr();
tracing::debug!(
"Uploaded '{}' (size {} -> {})",
nar_path,
path_info.nar_size,
compressed_nar_size
);
// Upload the narinfo.
let narinfo_path = format!("{}.narinfo", path.to_hash().as_str());
let narinfo_allocation = api.allocate_file_with_random_suffix(&narinfo_path).await?;
let narinfo = path_info_to_nar_info(store.clone(), &path_info, format!("nar/{}", nar_path))
.to_string()
.expect("failed to convert path into to nar info");
tracing::debug!("Uploading '{}'", narinfo_path);
api.upload_file(narinfo_allocation, narinfo.as_bytes())
.await?;
metrics.narinfos_uploaded.incr();
narinfo_negative_cache
.write()
.await
.remove(&path.to_hash().to_string());
tracing::info!(
"Uploaded '{}' to the GitHub Action Cache",
store.get_full_path(path).display()
);
Ok(())
}
// FIXME: move to attic.
fn path_info_to_nar_info(store: Arc<NixStore>, path_info: &ValidPathInfo, url: String) -> NarInfo {
NarInfo {
store_path: store.get_full_path(&path_info.path),
url,
compression: Compression::Zstd,
file_hash: None,
file_size: None,
nar_hash: path_info.nar_hash.clone(),
nar_size: path_info.nar_size as usize,
references: path_info
.references
.iter()
.map(|r| {
r.file_name()
.and_then(|n| n.to_str())
.unwrap_or_else(|| {
panic!(
"failed to convert nar_info reference to string: {}",
r.display()
)
})
.to_owned()
})
.collect(),
system: None,
deriver: None,
signature: None,
ca: path_info.ca.clone(),
}
}

View file

@ -2,7 +2,6 @@
asm_sub_register,
deprecated,
missing_abi,
unsafe_code,
unused_macros,
unused_must_use,
unused_unsafe
@ -15,40 +14,52 @@
mod api;
mod binary_cache;
mod env;
mod error;
mod flakehub;
mod gha;
mod pbh;
mod telemetry;
mod util;
use std::collections::HashSet;
use std::fs::{self, File};
use std::fs::create_dir_all;
use std::io::Write;
use std::net::SocketAddr;
use std::os::fd::OwnedFd;
use std::path::PathBuf;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use ::attic::nix_store::NixStore;
use anyhow::{anyhow, Context, Result};
use axum::{extract::Extension, routing::get, Router};
use clap::Parser;
use daemonize::Daemonize;
use tokio::{
runtime::Runtime,
sync::{oneshot, Mutex, RwLock},
};
use serde::{Deserialize, Serialize};
use tokio::fs::File;
use tokio::io::AsyncWriteExt;
use tokio::sync::{oneshot, Mutex, RwLock};
use tracing_subscriber::filter::EnvFilter;
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt;
use gha_cache::{Api, Credentials};
use gha_cache::Credentials;
const DETERMINATE_STATE_DIR: &str = "/nix/var/determinate";
const DETERMINATE_NIXD_SOCKET_NAME: &str = "determinate-nixd.socket";
const DETERMINATE_NETRC_PATH: &str = "/nix/var/determinate/netrc";
// TODO(colemickens): refactor, move with other UDS stuff (or all PBH stuff) to new file
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(tag = "c", rename_all = "kebab-case")]
pub struct BuiltPathResponseEventV1 {
pub drv: PathBuf,
pub outputs: Vec<PathBuf>,
}
type State = Arc<StateInner>;
/// GitHub Actions-powered Nix binary cache
#[derive(Parser, Debug)]
struct Args {
/// JSON file containing credentials.
///
/// If this is not specified, credentials will be loaded
/// from the environment.
#[arg(short = 'c', long)]
credentials_file: Option<PathBuf>,
/// Address to listen on.
///
/// FIXME: IPv6
@ -79,18 +90,118 @@ struct Args {
)]
diagnostic_endpoint: String,
/// Daemonize the server.
///
/// This is for use in the GitHub Action only.
#[arg(long, hide = true)]
daemon_dir: Option<PathBuf>,
/// The FlakeHub API server.
#[arg(long, default_value = "https://api.flakehub.com")]
flakehub_api_server: reqwest::Url,
/// The path of the `netrc` file that contains the FlakeHub JWT token.
#[arg(long)]
flakehub_api_server_netrc: Option<PathBuf>,
/// The FlakeHub binary cache server.
#[arg(long, default_value = "https://cache.flakehub.com")]
flakehub_cache_server: reqwest::Url,
#[arg(long)]
flakehub_flake_name: Option<String>,
/// The location of `nix.conf`.
#[arg(long, default_value_os_t = default_nix_conf())]
nix_conf: PathBuf,
/// Whether to use the GHA cache.
#[arg(long)]
use_gha_cache: Option<Option<CacheTrinary>>,
/// Whether to use the FlakeHub binary cache.
#[arg(long)]
use_flakehub: Option<Option<CacheTrinary>>,
/// URL to which to post startup notification.
#[arg(long)]
startup_notification_url: Option<reqwest::Url>,
/// File to write to when indicating startup.
#[arg(long)]
startup_notification_file: Option<PathBuf>,
/// Whether or not to diff the store before and after Magic Nix Cache runs
#[arg(long, default_value_t = false)]
diff_store: bool,
}
#[derive(Debug, Clone, Copy, PartialEq, clap::ValueEnum)]
pub enum CacheTrinary {
NoPreference,
Enabled,
Disabled,
}
impl From<Option<Option<CacheTrinary>>> for CacheTrinary {
fn from(b: Option<Option<CacheTrinary>>) -> Self {
match b {
None => CacheTrinary::NoPreference,
Some(None) => CacheTrinary::Enabled,
Some(Some(v)) => v,
}
}
}
#[derive(PartialEq, Clone, Copy)]
pub enum Dnixd {
Available,
Missing,
}
impl From<bool> for Dnixd {
fn from(b: bool) -> Self {
if b {
Dnixd::Available
} else {
Dnixd::Missing
}
}
}
impl Args {
fn validate(&self, environment: env::Environment) -> Result<(), error::Error> {
if environment.is_gitlab_ci() && self.github_cache_preference() == CacheTrinary::Enabled {
return Err(error::Error::Config(String::from(
"the --use-gha-cache flag should not be applied in GitLab CI",
)));
}
if environment.is_gitlab_ci() && self.flakehub_preference() != CacheTrinary::Enabled {
return Err(error::Error::Config(String::from(
"you must set --use-flakehub in GitLab CI",
)));
}
Ok(())
}
fn github_cache_preference(&self) -> CacheTrinary {
self.use_gha_cache.into()
}
fn flakehub_preference(&self) -> CacheTrinary {
self.use_flakehub.into()
}
}
fn default_nix_conf() -> PathBuf {
xdg::BaseDirectories::new()
.with_context(|| "identifying XDG base directories")
.expect(
"Could not identify your home directory. Try setting the HOME environment variable.",
)
.get_config_file("nix/nix.conf")
}
/// The global server state.
#[derive(Debug)]
struct StateInner {
/// The GitHub Actions Cache API.
api: Api,
/// State for uploading to the GHA cache.
gha_cache: Option<gha::GhaCache>,
/// The upstream cache.
upstream: Option<String>,
@ -98,35 +209,203 @@ struct StateInner {
/// The sender half of the oneshot channel to trigger a shutdown.
shutdown_sender: Mutex<Option<oneshot::Sender<()>>>,
/// List of store paths originally present.
original_paths: Mutex<HashSet<PathBuf>>,
/// Set of store path hashes that are not present in GHAC.
narinfo_nagative_cache: RwLock<HashSet<String>>,
/// Endpoint of ourselves.
///
/// This is used by our Action API to invoke `nix copy` to upload new paths.
self_endpoint: SocketAddr,
narinfo_negative_cache: Arc<RwLock<HashSet<String>>>,
/// Metrics for sending to perf at shutdown
metrics: telemetry::TelemetryReport,
metrics: Arc<telemetry::TelemetryReport>,
/// Connection to the local Nix store.
store: Arc<NixStore>,
/// FlakeHub cache state.
flakehub_state: RwLock<Option<flakehub::State>>,
/// Where all of tracing will log to when GitHub Actions is run in debug mode
logfile: Option<PathBuf>,
/// The paths in the Nix store when Magic Nix Cache started, if store diffing is enabled.
original_paths: Option<Mutex<HashSet<PathBuf>>>,
}
fn main() {
init_logging();
#[derive(Debug, Clone)]
pub(crate) enum FlakeHubAuthSource {
DeterminateNixd,
Netrc(PathBuf),
}
impl FlakeHubAuthSource {
pub(crate) fn as_path_buf(&self) -> PathBuf {
match &self {
Self::Netrc(path) => path.clone(),
Self::DeterminateNixd => {
let mut path = PathBuf::from(DETERMINATE_STATE_DIR);
path.push("netrc");
path
}
}
}
}
async fn main_cli() -> Result<()> {
let guard = init_logging()?;
let _tracing_guard = guard.appender_guard;
let args = Args::parse();
let environment = env::Environment::determine();
tracing::debug!("Running in {}", environment.to_string());
args.validate(environment)?;
let credentials = if let Some(credentials_file) = &args.credentials_file {
tracing::info!("Loading credentials from {:?}", credentials_file);
let bytes = fs::read(credentials_file).expect("Failed to read credentials file");
let metrics = Arc::new(telemetry::TelemetryReport::new());
serde_json::from_slice(&bytes).expect("Failed to deserialize credentials file")
let dnixd_uds_socket_dir: &Path = Path::new(&DETERMINATE_STATE_DIR);
let dnixd_uds_socket_path = dnixd_uds_socket_dir.join(DETERMINATE_NIXD_SOCKET_NAME);
let dnixd_available: Dnixd = dnixd_uds_socket_path.exists().into();
let nix_conf_path: PathBuf = args.nix_conf.clone();
// NOTE: we expect this to point to a user nix.conf
// we always open/append to it to be able to append the extra-substituter for github-actions cache
// but we don't write to it for initializing flakehub_cache unless dnixd is unavailable
if let Some(parent) = Path::new(&nix_conf_path).parent() {
create_dir_all(parent).with_context(|| "Creating parent directories of nix.conf")?;
}
let mut nix_conf = std::fs::OpenOptions::new()
.create(true)
.append(true)
.open(&nix_conf_path)
.with_context(|| "Creating nix.conf")?;
// always enable fallback, first
nix_conf
.write_all(b"fallback = true\n")
.with_context(|| "Setting fallback in nix.conf")?;
let store = Arc::new(NixStore::connect()?);
let narinfo_negative_cache = Arc::new(RwLock::new(HashSet::new()));
let flakehub_auth_method: Option<FlakeHubAuthSource> = match (
args.flakehub_preference(),
&args.flakehub_api_server_netrc,
dnixd_available,
) {
// User has explicitly pyassed --use-flakehub=disabled, so just straight up don't
(CacheTrinary::Disabled, _, _) => {
tracing::info!("Disabling FlakeHub cache.");
None
}
// User has no preference, did not pass a netrc, and determinate-nixd is not available
(CacheTrinary::NoPreference, None, Dnixd::Missing) => None,
// Use it when determinate-nixd is available, and let the user know what's going on
(pref, user_netrc_path, Dnixd::Available) => {
if pref == CacheTrinary::NoPreference {
tracing::info!("Enabling FlakeHub cache because determinate-nixd is available.");
}
if user_netrc_path.is_some() {
tracing::info!("Ignoring the user-specified --flakehub-api-server-netrc, in favor of the determinate-nixd netrc");
}
Some(FlakeHubAuthSource::DeterminateNixd)
}
// When determinate-nixd is not available, but the user specified a netrc
(_, Some(path), Dnixd::Missing) => {
if path.exists() {
Some(FlakeHubAuthSource::Netrc(path.to_owned()))
} else {
tracing::debug!(path = %path.display(), "User-provided netrc does not exist");
None
}
}
// User explicitly turned on flakehub cache, but we have no netrc and determinate-nixd is not present
(CacheTrinary::Enabled, None, Dnixd::Missing) => {
return Err(anyhow!(
"--flakehub-api-server-netrc is required when determinate-nixd is unavailable"
));
}
};
let flakehub_state = if let Some(auth_method) = flakehub_auth_method {
let flakehub_cache_server = &args.flakehub_cache_server;
let flakehub_api_server = &args.flakehub_api_server;
let flakehub_flake_name = &args.flakehub_flake_name;
match flakehub::init_cache(
environment,
flakehub_api_server,
flakehub_cache_server,
flakehub_flake_name,
store.clone(),
&auth_method,
)
.await
{
Ok(state) => {
if let FlakeHubAuthSource::Netrc(ref path) = auth_method {
nix_conf
.write_all(
format!(
"extra-substituters = {}?trusted=1\nnetrc-file = {}\n",
&flakehub_cache_server,
path.display()
)
.as_bytes(),
)
.with_context(|| "Writing to nix.conf")?;
}
tracing::info!("FlakeHub cache is enabled.");
Some(state)
}
Err(err) => {
tracing::error!("FlakeHub cache initialization failed: {}. Unable to authenticate to FlakeHub. Individuals must register at FlakeHub.com; Organizations must create an organization at FlakeHub.com.", err);
println!("::error title={{FlakeHub: Unauthenticated}}::{{Unable to authenticate to FlakeHub. Individuals must register at FlakeHub.com; Organizations must create an organization at FlakeHub.com.}}");
None
}
}
} else {
tracing::info!("FlakeHub cache is disabled.");
None
};
let gha_cache = if (args.github_cache_preference() == CacheTrinary::Enabled)
|| (args.github_cache_preference() == CacheTrinary::NoPreference
&& flakehub_state.is_none())
{
tracing::info!("Loading credentials from environment");
Credentials::load_from_env()
.expect("Failed to load credentials from environment (see README.md)")
let credentials = Credentials::load_from_env()
.with_context(|| "Failed to load credentials from environment (see README.md)")?;
let gha_cache = gha::GhaCache::new(
credentials,
args.cache_version,
store.clone(),
metrics.clone(),
narinfo_negative_cache.clone(),
)
.with_context(|| "Failed to initialize GitHub Actions Cache API")?;
nix_conf
.write_all(format!("extra-substituters = http://{}?trusted=1&compression=zstd&parallel-compression=true&priority=1\n", args.listen).as_bytes())
.with_context(|| "Writing to nix.conf")?;
tracing::info!("Native GitHub Action cache is enabled.");
Some(gha_cache)
} else {
if environment.is_github_actions() {
tracing::info!("Native GitHub Action cache is disabled.");
}
None
};
let diagnostic_endpoint = match args.diagnostic_endpoint.as_str() {
@ -137,24 +416,31 @@ fn main() {
url => Some(url),
};
let mut api = Api::new(credentials).expect("Failed to initialize GitHub Actions Cache API");
if let Some(cache_version) = &args.cache_version {
api.mutate_version(cache_version.as_bytes());
}
let (shutdown_sender, shutdown_receiver) = oneshot::channel();
let original_paths = args.diff_store.then_some(Mutex::new(HashSet::new()));
let state = Arc::new(StateInner {
api,
gha_cache,
upstream: args.upstream.clone(),
shutdown_sender: Mutex::new(Some(shutdown_sender)),
original_paths: Mutex::new(HashSet::new()),
narinfo_nagative_cache: RwLock::new(HashSet::new()),
self_endpoint: args.listen.to_owned(),
metrics: telemetry::TelemetryReport::new(),
narinfo_negative_cache,
metrics,
store,
flakehub_state: RwLock::new(flakehub_state),
logfile: guard.logfile,
original_paths,
});
if dnixd_available == Dnixd::Available {
tracing::info!("Subscribing to Determinate Nixd build events.");
crate::pbh::subscribe_uds_post_build_hook(dnixd_uds_socket_path, state.clone()).await?;
} else {
tracing::info!("Patching nix.conf to use a post-build-hook.");
crate::pbh::setup_legacy_post_build_hook(&args.listen, &mut nix_conf).await?;
}
drop(nix_conf);
let app = Router::new()
.route("/", get(root))
.merge(api::get_router())
@ -167,60 +453,180 @@ fn main() {
let app = app.layer(Extension(state.clone()));
if args.daemon_dir.is_some() {
let dir = args.daemon_dir.as_ref().unwrap();
let logfile: OwnedFd = File::create(dir.join("daemon.log")).unwrap().into();
let daemon = Daemonize::new()
.pid_file(dir.join("daemon.pid"))
.stdout(File::from(logfile.try_clone().unwrap()))
.stderr(File::from(logfile));
tracing::info!("Listening on {}", args.listen);
tracing::info!("Forking into the background");
daemon.start().expect("Failed to fork into the background");
// Notify of startup via HTTP
if let Some(startup_notification_url) = args.startup_notification_url {
tracing::debug!("Startup notification via HTTP POST to {startup_notification_url}");
let response = reqwest::Client::new()
.post(startup_notification_url)
.header(reqwest::header::CONTENT_TYPE, "application/json")
.body("{}")
.send()
.await;
match response {
Ok(response) => {
if !response.status().is_success() {
Err(anyhow!(
"Startup notification returned an error: {}\n{}",
response.status(),
response
.text()
.await
.unwrap_or_else(|_| "<no response text>".to_owned())
))?;
}
}
err @ Err(_) => {
err.with_context(|| "Startup notification failed")?;
}
}
}
let rt = Runtime::new().unwrap();
rt.block_on(async move {
tracing::info!("Listening on {}", args.listen);
let ret = axum::Server::bind(&args.listen)
.serve(app.into_make_service())
.with_graceful_shutdown(async move {
shutdown_receiver.await.ok();
tracing::info!("Shutting down");
})
.await;
// Notify of startup by writing "1" to the specified file
if let Some(startup_notification_file_path) = args.startup_notification_file {
let file_contents: &[u8] = b"1";
if let Some(diagnostic_endpoint) = diagnostic_endpoint {
state.metrics.send(diagnostic_endpoint);
tracing::debug!("Startup notification via file at {startup_notification_file_path:?}");
if let Some(parent_dir) = startup_notification_file_path.parent() {
tokio::fs::create_dir_all(parent_dir)
.await
.with_context(|| {
format!(
"failed to create parent directory for startup notification file path: {}",
startup_notification_file_path.display()
)
})?;
}
ret.unwrap()
});
let mut notification_file = File::create(&startup_notification_file_path)
.await
.with_context(|| {
format!(
"failed to create startup notification file to path: {}",
startup_notification_file_path.display()
)
})?;
notification_file
.write_all(file_contents)
.await
.with_context(|| {
format!(
"failed to write startup notification file to path: {}",
startup_notification_file_path.display()
)
})?;
tracing::debug!("Created startup notification file at {startup_notification_file_path:?}");
}
let listener = tokio::net::TcpListener::bind(&args.listen).await?;
let ret = axum::serve(listener, app.into_make_service())
.with_graceful_shutdown(async move {
shutdown_receiver.await.ok();
tracing::info!("Shutting down");
})
.await;
// Notify diagnostics endpoint
if let Some(diagnostic_endpoint) = diagnostic_endpoint {
state.metrics.send(diagnostic_endpoint).await;
}
ret?;
Ok(())
}
fn init_logging() {
#[tokio::main]
async fn main() -> Result<()> {
match std::env::var("OUT_PATHS") {
Ok(out_paths) => pbh::handle_legacy_post_build_hook(&out_paths).await,
Err(_) => main_cli().await,
}
}
pub(crate) fn debug_logfile() -> PathBuf {
std::env::temp_dir().join("magic-nix-cache-tracing.log")
}
pub struct LogGuard {
appender_guard: Option<tracing_appender::non_blocking::WorkerGuard>,
logfile: Option<PathBuf>,
}
fn init_logging() -> Result<LogGuard> {
let filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| {
#[cfg(debug_assertions)]
return EnvFilter::new("info")
.add_directive("magic_nix_cache=debug".parse().unwrap())
.add_directive("gha_cache=debug".parse().unwrap());
.add_directive(
"magic_nix_cache=debug"
.parse()
.expect("failed to parse magix_nix_cache directive"),
)
.add_directive(
"gha_cache=debug"
.parse()
.expect("failed to parse gha_cahce directive"),
);
#[cfg(not(debug_assertions))]
return EnvFilter::new("info");
});
tracing_subscriber::fmt()
.pretty()
.with_env_filter(filter)
let stderr_layer = tracing_subscriber::fmt::layer()
.with_writer(std::io::stderr)
.pretty();
let (guard, file_layer) = match std::env::var("RUNNER_DEBUG") {
Ok(val) if val == "1" => {
let logfile = debug_logfile();
let file = std::fs::OpenOptions::new()
.create(true)
.write(true)
.truncate(true)
.open(&logfile)?;
let (nonblocking, guard) = tracing_appender::non_blocking(file);
let file_layer = tracing_subscriber::fmt::layer()
.with_writer(nonblocking)
.pretty();
(
LogGuard {
appender_guard: Some(guard),
logfile: Some(logfile),
},
Some(file_layer),
)
}
_ => (
LogGuard {
appender_guard: None,
logfile: None,
},
None,
),
};
tracing_subscriber::registry()
.with(filter)
.with(stderr_layer)
.with(file_layer)
.init();
Ok(guard)
}
#[cfg(debug_assertions)]
async fn dump_api_stats<B>(
async fn dump_api_stats(
Extension(state): Extension<State>,
request: axum::http::Request<B>,
next: axum::middleware::Next<B>,
request: axum::http::Request<axum::body::Body>,
next: axum::middleware::Next,
) -> axum::response::Response {
state.api.dump_stats();
if let Some(gha_cache) = &state.gha_cache {
gha_cache.api.dump_stats();
}
next.run(request).await
}

241
magic-nix-cache/src/pbh.rs Normal file
View file

@ -0,0 +1,241 @@
use std::io::Write as _;
use std::net::SocketAddr;
use std::os::unix::fs::PermissionsExt as _;
use std::path::PathBuf;
use anyhow::anyhow;
use anyhow::Context as _;
use anyhow::Result;
use clap::Parser;
use futures::StreamExt as _;
use http_body_util::BodyExt as _;
use hyper_util::rt::TokioExecutor;
use hyper_util::rt::TokioIo;
use tempfile::NamedTempFile;
use tokio::net::UnixStream;
use tokio::process::Command;
use crate::BuiltPathResponseEventV1;
use crate::State;
pub async fn subscribe_uds_post_build_hook(
dnixd_uds_socket_path: PathBuf,
state: State,
) -> Result<()> {
tokio::spawn(async move {
let dnixd_uds_socket_path = &dnixd_uds_socket_path;
loop {
let Ok(socket_conn) = UnixStream::connect(dnixd_uds_socket_path).await else {
tracing::error!("built-paths: failed to connect to determinate-nixd's socket");
tokio::time::sleep(std::time::Duration::from_secs(10)).await;
continue;
};
let stream = TokioIo::new(socket_conn);
let executor: TokioExecutor = TokioExecutor::new();
let sender_conn = hyper::client::conn::http2::handshake(executor, stream).await;
let Ok((mut sender, conn)) = sender_conn else {
tracing::error!("built-paths: failed to http2 handshake");
continue;
};
// NOTE(colemickens): for now we just drop the joinhandle and let it keep running
let _join_handle = tokio::task::spawn(async move {
if let Err(err) = conn.await {
tracing::error!("Connection failed: {:?}", err);
}
});
let request = http::Request::builder()
.method(http::Method::GET)
.uri("http://localhost/events")
.body(axum::body::Body::empty());
let Ok(request) = request else {
tracing::error!("built-paths: failed to create request to subscribe");
continue;
};
let response = sender.send_request(request).await;
let response = match response {
Ok(r) => r,
Err(e) => {
tracing::error!("buit-paths: failed to send subscription request: {:?}", e);
continue;
}
};
let mut data = response.into_data_stream();
while let Some(event_str) = data.next().await {
let event_str = match event_str {
Ok(event) => event,
Err(e) => {
tracing::error!("built-paths: error while receiving: {}", e);
break;
}
};
let Some(event_str) = event_str.strip_prefix("data: ".as_bytes()) else {
tracing::debug!("built-paths subscription: ignoring non-data frame");
continue;
};
let Ok(event): core::result::Result<BuiltPathResponseEventV1, _> =
serde_json::from_slice(event_str)
else {
tracing::error!(
"failed to decode built-path response as BuiltPathResponseEventV1"
);
continue;
};
let maybe_store_paths = event
.outputs
.iter()
.map(|path| {
state
.store
.follow_store_path(path)
.map_err(|_| anyhow!("failed to collect store paths"))
})
.collect::<Result<Vec<_>>>();
let Ok(store_paths) = maybe_store_paths else {
tracing::error!(
"built-paths: encountered an error aggregating build store paths"
);
continue;
};
tracing::debug!("about to enqueue paths: {:?}", store_paths);
if let Err(e) = crate::api::enqueue_paths(&state, store_paths).await {
tracing::error!(
"built-paths: failed to enqueue paths for drv ({}): {}",
event.drv.display(),
e
);
continue;
}
}
}
});
Ok(())
}
pub async fn setup_legacy_post_build_hook(
listen: &SocketAddr,
nix_conf: &mut std::fs::File,
) -> Result<()> {
/* Write the post-build hook script. Note that the shell script
* ignores errors, to avoid the Nix build from failing. */
let post_build_hook_script = {
let mut file = NamedTempFile::with_prefix("magic-nix-cache-build-hook-")
.with_context(|| "Creating a temporary file for the post-build hook")?;
file.write_all(
format!(
// NOTE(cole-h): We want to exit 0 even if the hook failed, otherwise it'll fail the
// build itself
"#! /bin/sh\nRUST_LOG=trace RUST_BACKTRACE=full {} --server {} || :\n",
std::env::current_exe()
.with_context(|| "Getting the path of magic-nix-cache")?
.display(),
listen
)
.as_bytes(),
)
.with_context(|| "Writing the post-build hook")?;
let path = file
.keep()
.with_context(|| "Keeping the post-build hook")?
.1;
std::fs::set_permissions(&path, std::fs::Permissions::from_mode(0o755))
.with_context(|| "Setting permissions on the post-build hook")?;
/* Copy the script to the Nix store so we know for sure that
* it's accessible to the Nix daemon, which might have a
* different /tmp from us. */
let res = Command::new("nix")
.args([
"--extra-experimental-features",
"nix-command",
"store",
"add-path",
&path.display().to_string(),
])
.output()
.await
.with_context(|| {
format!(
"Running nix to add the post-build-hook to the store from {}",
path.display()
)
})?;
if res.status.success() {
tokio::fs::remove_file(&path).await.with_context(|| {
format!(
"Cleaning up the temporary post-build-hook at {}",
path.display()
)
})?;
PathBuf::from(String::from_utf8_lossy(&res.stdout).trim())
} else {
path
}
};
/* Update nix.conf. */
nix_conf
.write_all(format!("post-build-hook = {}\n", post_build_hook_script.display()).as_bytes())
.with_context(|| "Writing to nix.conf")?;
Ok(())
}
pub async fn handle_legacy_post_build_hook(out_paths: &str) -> Result<()> {
#[derive(Parser, Debug)]
struct Args {
/// `magic-nix-cache` daemon to connect to.
#[arg(short = 'l', long, default_value = "127.0.0.1:3000")]
server: SocketAddr,
}
let args = Args::parse();
let store_paths: Vec<_> = out_paths
.split_whitespace()
.map(|s| s.trim().to_owned())
.collect();
let request = crate::api::EnqueuePathsRequest { store_paths };
let response = reqwest::Client::new()
.post(format!("http://{}/api/enqueue-paths", &args.server))
.header(reqwest::header::CONTENT_TYPE, "application/json")
.body(
serde_json::to_string(&request)
.with_context(|| "Decoding the response from the magic-nix-cache server")?,
)
.send()
.await;
match response {
Ok(response) if !response.status().is_success() => Err(anyhow!(
"magic-nix-cache server failed to enqueue the push request: {}\n{}",
response.status(),
response
.text()
.await
.unwrap_or_else(|_| "<no response text>".to_owned()),
))?,
Ok(response) => response
.json::<crate::api::EnqueuePathsResponse>()
.await
.with_context(|| "magic-nix-cache-server didn't return a valid response")?,
Err(err) => {
Err(err).with_context(|| "magic-nix-cache server failed to send the enqueue request")?
}
};
Ok(())
}

View file

@ -1,7 +1,6 @@
use std::env;
use std::time::SystemTime;
use is_ci;
use sha2::{Digest, Sha256};
/// A telemetry report to measure the effectiveness of the Magic Nix Cache
@ -29,16 +28,18 @@ pub struct TelemetryReport {
pub num_original_paths: Metric,
pub num_final_paths: Metric,
pub num_new_paths: Metric,
pub tripped_429: std::sync::atomic::AtomicBool,
}
#[derive(Debug, Default, serde::Serialize)]
pub struct Metric(std::sync::atomic::AtomicUsize);
impl Metric {
pub fn incr(&self) -> () {
pub fn incr(&self) {
self.0.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
}
pub fn set(&self, val: usize) -> () {
pub fn set(&self, val: usize) {
self.0.store(val, std::sync::atomic::Ordering::Relaxed);
}
}
@ -46,7 +47,9 @@ impl Metric {
impl TelemetryReport {
pub fn new() -> TelemetryReport {
TelemetryReport {
distinct_id: calculate_opaque_id().ok(),
distinct_id: env::var("DETSYS_CORRELATION")
.ok()
.or_else(|| calculate_opaque_id().ok()),
version: env!("CARGO_PKG_VERSION").to_string(),
is_ci: is_ci::cached(),
@ -57,7 +60,7 @@ impl TelemetryReport {
}
}
pub fn send(&self, endpoint: &str) {
pub async fn send(&self, endpoint: &str) {
if let Some(start_time) = self.start_time {
self.elapsed_seconds.set(
SystemTime::now()
@ -70,12 +73,13 @@ impl TelemetryReport {
}
if let Ok(serialized) = serde_json::to_string_pretty(&self) {
let _ = reqwest::blocking::Client::new()
let _ = reqwest::Client::new()
.post(endpoint)
.body(serialized)
.header("Content-Type", "application/json")
.timeout(std::time::Duration::from_millis(3000))
.send();
.send()
.await;
}
}
}

View file

@ -3,23 +3,36 @@
use std::collections::HashSet;
use std::path::{Path, PathBuf};
use tokio::{fs, process::Command};
use attic::nix_store::NixStore;
use crate::error::{Error, Result};
use crate::error::Result;
/// Returns the list of store paths that are currently present.
pub async fn get_store_paths() -> Result<HashSet<PathBuf>> {
let store_dir = Path::new("/nix/store");
let mut listing = fs::read_dir(store_dir).await?;
pub async fn get_store_paths(store: &NixStore) -> Result<HashSet<PathBuf>> {
// FIXME: use the Nix API.
let store_dir = store.store_dir();
let mut listing = tokio::fs::read_dir(store_dir).await.map_err(|e| {
crate::error::Error::Io(
e,
format!("Enumerating store paths in {}", store_dir.display()),
)
})?;
let mut paths = HashSet::new();
while let Some(entry) = listing.next_entry().await? {
while let Some(entry) = listing.next_entry().await.map_err(|e| {
crate::error::Error::Io(
e,
format!("Reading existing store paths from {}", store_dir.display()),
)
})? {
let file_name = entry.file_name();
let file_name = Path::new(&file_name);
if let Some(extension) = file_name.extension() {
match extension.to_str() {
None | Some("drv") | Some("lock") => {
// Malformed or not interesting
None | Some("drv") | Some("chroot") => {
tracing::debug!(
"skipping file with weird or uninteresting extension {extension:?}"
);
continue;
}
_ => {}
@ -27,13 +40,8 @@ pub async fn get_store_paths() -> Result<HashSet<PathBuf>> {
}
if let Some(s) = file_name.to_str() {
// Let's not push any sources
if s.ends_with("-source") {
continue;
}
// Special paths (so far only `.links`)
if s.starts_with('.') {
if s == ".links" {
continue;
}
}
@ -42,44 +50,3 @@ pub async fn get_store_paths() -> Result<HashSet<PathBuf>> {
}
Ok(paths)
}
/// Uploads a list of store paths to a store URI.
pub async fn upload_paths(mut paths: Vec<PathBuf>, store_uri: &str) -> Result<()> {
// When the daemon started Nix may not have been installed
let env_path = Command::new("sh")
.args(["-lc", "echo $PATH"])
.output()
.await?
.stdout;
let env_path = String::from_utf8(env_path).expect("PATH contains invalid UTF-8");
while !paths.is_empty() {
let mut batch = Vec::new();
let mut total_len = 0;
while !paths.is_empty() && total_len < 1024 * 1024 {
let p = paths.pop().unwrap();
total_len += p.as_os_str().len() + 1;
batch.push(p);
}
tracing::debug!("{} paths in this batch", batch.len());
let status = Command::new("nix")
.args(["--extra-experimental-features", "nix-command"])
.args(["copy", "--to", store_uri])
.args(&batch)
.env("PATH", &env_path)
.status()
.await?;
if status.success() {
tracing::debug!("Uploaded batch");
} else {
tracing::error!("Failed to upload batch: {:?}", status);
return Err(Error::FailedToUpload);
}
}
Ok(())
}