Add simple request statistics to debug builds

This commit is contained in:
Zhaofeng Li 2023-05-08 12:59:57 -06:00
parent 9cc43d0e32
commit a6873c91f5
4 changed files with 55 additions and 4 deletions

1
Cargo.lock generated
View file

@ -415,6 +415,7 @@ dependencies = [
"sha2",
"thiserror",
"tokio",
"tracing",
]
[[package]]

View file

@ -14,6 +14,7 @@ serde = { version = "1.0.162", features = ["derive"] }
serde_json = "1.0.96"
sha2 = "0.10.6"
thiserror = "1.0.40"
tracing = "0.1.37"
[dependencies.tokio]
version = "1.28.0"

View file

@ -3,6 +3,8 @@
//! We expose a high-level API that deals with "files."
use std::fmt;
#[cfg(debug_assertions)]
use std::sync::atomic::{AtomicUsize, Ordering};
use async_trait::async_trait;
use bytes::{Bytes, BytesMut};
@ -68,7 +70,7 @@ pub enum Error {
TooManyCollisions,
}
#[derive(Debug, Clone)]
#[derive(Debug)]
pub struct Api {
/// Credentials to access the cache.
credentials: Credentials,
@ -84,6 +86,10 @@ pub struct Api {
/// The HTTP client for authenticated requests.
client: Client,
/// Backend request statistics.
#[cfg(debug_assertions)]
stats: RequestStats,
}
/// A file allocation.
@ -182,6 +188,14 @@ struct CommitCacheRequest {
size: usize,
}
#[cfg(debug_assertions)]
#[derive(Default, Debug)]
struct RequestStats {
get: AtomicUsize,
post: AtomicUsize,
patch: AtomicUsize,
}
#[async_trait]
trait ResponseExt {
async fn check(self) -> Result<()>;
@ -240,6 +254,8 @@ impl Api {
version: initial_version,
version_hasher,
client,
#[cfg(debug_assertions)]
stats: Default::default(),
})
}
@ -311,6 +327,9 @@ impl Api {
let chunk_len = chunk.len();
#[cfg(debug_assertions)]
self.stats.patch.fetch_add(1, Ordering::SeqCst);
self.client
.patch(self.construct_url(&format!("caches/{}", allocation.0 .0)))
.header(CONTENT_TYPE, "application/octet-stream")
@ -340,10 +359,21 @@ impl Api {
.map(|entry| entry.archive_location))
}
/// Dumps statistics.
///
/// This is for debugging only.
pub fn dump_stats(&self) {
#[cfg(debug_assertions)]
tracing::debug!("Request stats: {:#?}", self.stats);
}
// Private
/// Retrieves a cache based on a list of key prefixes.
async fn get_cache_entry(&self, keys: &[&str]) -> Result<Option<ArtifactCacheEntry>> {
#[cfg(debug_assertions)]
self.stats.get.fetch_add(1, Ordering::SeqCst);
let res = self
.client
.get(self.construct_url("cache"))
@ -376,6 +406,9 @@ impl Api {
cache_size,
};
#[cfg(debug_assertions)]
self.stats.post.fetch_add(1, Ordering::SeqCst);
let res = self
.client
.post(self.construct_url("caches"))
@ -392,6 +425,9 @@ impl Api {
async fn commit_cache(&self, cache_id: CacheId, size: usize) -> Result<()> {
let req = CommitCacheRequest { size };
#[cfg(debug_assertions)]
self.stats.post.fetch_add(1, Ordering::SeqCst);
self.client
.post(self.construct_url(&format!("caches/{}", cache_id.0)))
.json(&req)

View file

@ -86,11 +86,14 @@ async fn main() {
.route("/:path", put(put_narinfo))
// .nar
.route("/nar/:path", get(get_nar))
.route("/nar/:path", put(put_nar))
.layer(Extension(state));
.route("/nar/:path", put(put_nar));
#[cfg(debug_assertions)]
let app = app.layer(tower_http::trace::TraceLayer::new_for_http());
let app = app
.layer(tower_http::trace::TraceLayer::new_for_http())
.layer(axum::middleware::from_fn(dump_api_stats));
let app = app.layer(Extension(state));
tracing::info!("listening on {}", args.listen);
axum::Server::bind(&args.listen)
@ -99,6 +102,16 @@ async fn main() {
.unwrap();
}
#[cfg(debug_assertions)]
async fn dump_api_stats<B>(
Extension(state): Extension<State>,
request: axum::http::Request<B>,
next: axum::middleware::Next<B>,
) -> axum::response::Response {
state.api.dump_stats();
next.run(request).await
}
async fn root() -> &'static str {
"cache the world 🚀"
}