Merge pull request #77 from DeterminateSystems/429

Back off on 429
This commit is contained in:
Graham Christensen 2024-06-13 13:24:09 -04:00 committed by GitHub
commit ef5c9ec6ef
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 123 additions and 7 deletions

View file

@ -78,3 +78,9 @@ jobs:
- name: Run nix to test magic-nix-cache-action - name: Run nix to test magic-nix-cache-action
run: | run: |
nix develop --command echo "just testing" nix develop --command echo "just testing"
- name: Exhaust our GitHub Actions Cache tokens
# Generally skip this step since it is so intensive
if: ${{ false }}
run: |
date >> README.md
nix build .#veryLongChain -v

View file

@ -43,6 +43,36 @@
magic-nix-cache = pkgs.callPackage ./package.nix { }; magic-nix-cache = pkgs.callPackage ./package.nix { };
#inherit (cranePkgs) magic-nix-cache; #inherit (cranePkgs) magic-nix-cache;
default = magic-nix-cache; default = magic-nix-cache;
veryLongChain =
let
ctx = ./README.md;
# Function to write the current date to a file
startFile =
pkgs.stdenv.mkDerivation {
name = "start-file";
buildCommand = ''
cat ${ctx} > $out
'';
};
# Recursive function to create a chain of derivations
createChain = n: startFile:
pkgs.stdenv.mkDerivation {
name = "chain-${toString n}";
src =
if n == 0 then
startFile
else createChain (n - 1) startFile;
buildCommand = ''
echo $src > $out
'';
};
in
# Starting point of the chain
createChain 200 startFile;
}); });
devShells = forEachSupportedSystem ({ pkgs, cranePkgs, lib }: { devShells = forEachSupportedSystem ({ pkgs, cranePkgs, lib }: {
@ -56,10 +86,15 @@
cargo-bloat cargo-bloat
cargo-edit cargo-edit
cargo-udeps cargo-udeps
cargo-watch
bacon bacon
age age
]; ] ++ lib.optionals pkgs.stdenv.isDarwin (with pkgs.darwin.apple_sdk.frameworks; [
SystemConfiguration
]);
NIX_CFLAGS_LINK = lib.optionalString pkgs.stdenv.isDarwin "-lc++abi";
}; };
/* /*

View file

@ -4,7 +4,8 @@
use std::fmt; use std::fmt;
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::atomic::AtomicUsize;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc; use std::sync::Arc;
use async_trait::async_trait; use async_trait::async_trait;
@ -53,6 +54,11 @@ pub enum Error {
#[error("Failed to initialize the client: {0}")] #[error("Failed to initialize the client: {0}")]
InitError(Box<dyn std::error::Error + Send + Sync>), InitError(Box<dyn std::error::Error + Send + Sync>),
#[error(
"GitHub Actions Cache throttled Magic Nix Cache. Not trying to use it again on this run."
)]
CircuitBreakerTripped,
#[error("Request error: {0}")] #[error("Request error: {0}")]
RequestError(#[from] reqwest::Error), // TODO: Better errors RequestError(#[from] reqwest::Error), // TODO: Better errors
@ -96,6 +102,8 @@ pub struct Api {
/// The concurrent upload limit. /// The concurrent upload limit.
concurrency_limit: Arc<Semaphore>, concurrency_limit: Arc<Semaphore>,
circuit_breaker_429_tripped: Arc<AtomicBool>,
/// Backend request statistics. /// Backend request statistics.
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
stats: RequestStats, stats: RequestStats,
@ -264,11 +272,16 @@ impl Api {
version_hasher, version_hasher,
client, client,
concurrency_limit: Arc::new(Semaphore::new(MAX_CONCURRENCY)), concurrency_limit: Arc::new(Semaphore::new(MAX_CONCURRENCY)),
circuit_breaker_429_tripped: Arc::new(AtomicBool::from(false)),
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
stats: Default::default(), stats: Default::default(),
}) })
} }
pub fn circuit_breaker_tripped(&self) -> bool {
self.circuit_breaker_429_tripped.load(Ordering::Relaxed)
}
/// Mutates the cache version/namespace. /// Mutates the cache version/namespace.
pub fn mutate_version(&mut self, data: &[u8]) { pub fn mutate_version(&mut self, data: &[u8]) {
self.version_hasher.update(data); self.version_hasher.update(data);
@ -324,6 +337,10 @@ impl Api {
where where
S: AsyncRead + Unpin + Send, S: AsyncRead + Unpin + Send,
{ {
if self.circuit_breaker_tripped() {
return Err(Error::CircuitBreakerTripped);
}
let mut offset = 0; let mut offset = 0;
let mut futures = Vec::new(); let mut futures = Vec::new();
loop { loop {
@ -347,6 +364,7 @@ impl Api {
futures.push({ futures.push({
let client = self.client.clone(); let client = self.client.clone();
let concurrency_limit = self.concurrency_limit.clone(); let concurrency_limit = self.concurrency_limit.clone();
let circuit_breaker_429_tripped = self.circuit_breaker_429_tripped.clone();
let url = self.construct_url(&format!("caches/{}", allocation.0 .0)); let url = self.construct_url(&format!("caches/{}", allocation.0 .0));
tokio::task::spawn(async move { tokio::task::spawn(async move {
@ -380,6 +398,8 @@ impl Api {
drop(permit); drop(permit);
circuit_breaker_429_tripped.check_result(&r);
r r
}) })
}); });
@ -401,6 +421,10 @@ impl Api {
/// Downloads a file based on a list of key prefixes. /// Downloads a file based on a list of key prefixes.
pub async fn get_file_url(&self, keys: &[&str]) -> Result<Option<String>> { pub async fn get_file_url(&self, keys: &[&str]) -> Result<Option<String>> {
if self.circuit_breaker_tripped() {
return Err(Error::CircuitBreakerTripped);
}
Ok(self Ok(self
.get_cache_entry(keys) .get_cache_entry(keys)
.await? .await?
@ -419,6 +443,10 @@ impl Api {
/// Retrieves a cache based on a list of key prefixes. /// Retrieves a cache based on a list of key prefixes.
async fn get_cache_entry(&self, keys: &[&str]) -> Result<Option<ArtifactCacheEntry>> { async fn get_cache_entry(&self, keys: &[&str]) -> Result<Option<ArtifactCacheEntry>> {
if self.circuit_breaker_tripped() {
return Err(Error::CircuitBreakerTripped);
}
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
self.stats.get.fetch_add(1, Ordering::SeqCst); self.stats.get.fetch_add(1, Ordering::SeqCst);
@ -431,6 +459,8 @@ impl Api {
.check_json() .check_json()
.await; .await;
self.circuit_breaker_429_tripped.check_result(&res);
match res { match res {
Ok(entry) => Ok(Some(entry)), Ok(entry) => Ok(Some(entry)),
Err(Error::DecodeError { status, .. }) if status == StatusCode::NO_CONTENT => Ok(None), Err(Error::DecodeError { status, .. }) if status == StatusCode::NO_CONTENT => Ok(None),
@ -448,6 +478,10 @@ impl Api {
key: &str, key: &str,
cache_size: Option<usize>, cache_size: Option<usize>,
) -> Result<ReserveCacheResponse> { ) -> Result<ReserveCacheResponse> {
if self.circuit_breaker_tripped() {
return Err(Error::CircuitBreakerTripped);
}
tracing::debug!("Reserving cache for {}", key); tracing::debug!("Reserving cache for {}", key);
let req = ReserveCacheRequest { let req = ReserveCacheRequest {
@ -466,13 +500,19 @@ impl Api {
.send() .send()
.await? .await?
.check_json() .check_json()
.await?; .await;
Ok(res) self.circuit_breaker_429_tripped.check_result(&res);
res
} }
/// Finalizes uploading to a cache. /// Finalizes uploading to a cache.
async fn commit_cache(&self, cache_id: CacheId, size: usize) -> Result<()> { async fn commit_cache(&self, cache_id: CacheId, size: usize) -> Result<()> {
if self.circuit_breaker_tripped() {
return Err(Error::CircuitBreakerTripped);
}
tracing::debug!("Commiting cache {:?}", cache_id); tracing::debug!("Commiting cache {:?}", cache_id);
let req = CommitCacheRequest { size }; let req = CommitCacheRequest { size };
@ -480,13 +520,18 @@ impl Api {
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
self.stats.post.fetch_add(1, Ordering::SeqCst); self.stats.post.fetch_add(1, Ordering::SeqCst);
self.client if let Err(e) = self
.client
.post(self.construct_url(&format!("caches/{}", cache_id.0))) .post(self.construct_url(&format!("caches/{}", cache_id.0)))
.json(&req) .json(&req)
.send() .send()
.await? .await?
.check() .check()
.await?; .await
{
self.circuit_breaker_429_tripped.check_err(&e);
return Err(e);
}
Ok(()) Ok(())
} }
@ -554,3 +599,27 @@ async fn handle_error(res: reqwest::Response) -> Error {
Error::ApiError { status, info } Error::ApiError { status, info }
} }
trait AtomicCircuitBreaker {
fn check_err(&self, e: &Error);
fn check_result<T>(&self, r: &std::result::Result<T, Error>);
}
impl AtomicCircuitBreaker for AtomicBool {
fn check_result<T>(&self, r: &std::result::Result<T, Error>) {
if let Err(ref e) = r {
self.check_err(e)
}
}
fn check_err(&self, e: &Error) {
if let Error::ApiError {
status: reqwest::StatusCode::TOO_MANY_REQUESTS,
info: ref _info,
} = e
{
tracing::info!("Disabling GitHub Actions Cache due to 429: Too Many Requests");
self.store(true, Ordering::Relaxed);
}
}
}

View file

@ -345,7 +345,7 @@ async fn rewrite_github_actions_token(
let token_response: TokenResponse = token_response let token_response: TokenResponse = token_response
.json() .json()
.await .await
.with_context(|| format!("converting response into json"))?; .with_context(|| "converting response into json")?;
let new_github_jwt_string = token_response.value; let new_github_jwt_string = token_response.value;
let netrc_contents = tokio::fs::read_to_string(netrc_path) let netrc_contents = tokio::fs::read_to_string(netrc_path)

View file

@ -119,6 +119,11 @@ async fn worker(
break; break;
} }
Request::Upload(path) => { Request::Upload(path) => {
if api.circuit_breaker_tripped() {
tracing::trace!("GitHub Actions gave us a 429, so we're done.",);
continue;
}
if !done.insert(path.clone()) { if !done.insert(path.clone()) {
continue; continue;
} }
@ -190,6 +195,7 @@ async fn upload_path(
api.upload_file(narinfo_allocation, narinfo.as_bytes()) api.upload_file(narinfo_allocation, narinfo.as_bytes())
.await?; .await?;
metrics.narinfos_uploaded.incr(); metrics.narinfos_uploaded.incr();
narinfo_negative_cache narinfo_negative_cache